diff --git a/localization/v2.4.x/site/de/adminGuide/clouds/aws/s3.json b/localization/v2.4.x/site/de/adminGuide/clouds/aws/s3.json index 4bf437271..6a7774766 100644 --- a/localization/v2.4.x/site/de/adminGuide/clouds/aws/s3.json +++ b/localization/v2.4.x/site/de/adminGuide/clouds/aws/s3.json @@ -1 +1 @@ -{"codeList":["milvus_bucket_name=\"milvus-bucket-$(openssl rand -hex 12)\"\n\naws s3api create-bucket --bucket \"$milvus_bucket_name\" --region 'us-east-2' --acl private --object-ownership ObjectWriter --create-bucket-configuration LocationConstraint='us-east-2'\n\n\n# Output\n#\n# \"Location\": \"http://milvus-bucket-039dd013c0712f085d60e21f.s3.amazonaws.com/\"\n","echo '{\n \"Version\": \"2012-10-17\",\n \"Statement\": [\n {\n \"Effect\": \"Allow\",\n \"Action\": [\n \"s3:GetObject\",\n \"s3:PutObject\",\n \"s3:ListBucket\",\n \"s3:DeleteObject\"\n ],\n \"Resource\": [\n \"arn:aws:s3:::\",\n \"arn:aws:s3:::/*\"\n ]\n }\n ]\n}' > milvus-s3-policy.json\n\naws iam create-policy --policy-name MilvusS3ReadWrite --policy-document file://milvus-s3-policy.json\n\n\n# Get the ARN from the command output as follows:\n# {\n# \"Policy\": {\n# \"PolicyName\": \"MilvusS3ReadWrite\",\n# \"PolicyId\": \"AN5QQVVPM1BVTFlBNkdZT\",\n# \"Arn\": \"arn:aws:iam::12345678901:policy/MilvusS3ReadWrite\",\n# \"Path\": \"/\",\n# \"DefaultVersionId\": \"v1\",\n# \"AttachmentCount\": 0,\n# \"PermissionsBoundaryUsageCount\": 0,\n# \"IsAttachable\": true,\n# \"CreateDate\": \"2023-11-16T06:00:01+00:00\",\n# \"UpdateDate\": \"2023-11-16T06:00:01+00:00\"\n# }\n# } \n","eksctl create iamserviceaccount --name milvus-s3-access-sa --namespace milvus --cluster milvus-eks-cluster --role-name milvus-s3-access-sa \\\n --attach-policy-arn arn:aws:iam:::policy/MilvusS3ReadWrite --approve\n","aws iam get-role --role-name milvus-s3-access-sa --query Role.AssumeRolePolicyDocument\n# An example output is as follows\n{\n \"Version\": \"2012-10-17\",\n \"Statement\": [\n {\n \"Effect\": \"Allow\",\n \"Principal\": {\n \"Federated\": \"arn:aws:iam::111122223333:oidc-provider/oidc.eks.region-code.amazonaws.com/id/EXAMPLED539D4633E53DE1B71EXAMPLE\"\n },\n \"Action\": \"sts:AssumeRoleWithWebIdentity\",\n \"Condition\": {\n \"StringEquals\": {\n \"oidc.eks.region-code.amazonaws.com/id/EXAMPLED539D4633E53DE1B71EXAMPLE:sub\": \"system:serviceaccount:default:my-service-account\",\n \"oidc.eks.region-code.amazonaws.com/id/EXAMPLED539D4633E53DE1B71EXAMPLE:aud\": \"sts.amazonaws.com\"\n }\n }\n }\n ]\n}\n","aws iam list-attached-role-policies --role-name milvus-s3-access-sa --query 'AttachedPolicies[].PolicyArn' --output text\n# An example output is as follows\narn:aws:iam::12345678901:policy/MilvusS3ReadWrite\n","export policy_arn='arn:aws:iam::12345678901:policy/MilvusS3ReadWrite'\naws iam get-policy --policy-arn $policy_arn\n# An example output is as follows\n{\n \"Policy\": {\n \"PolicyName\": \"MilvusS3ReadWrite\",\n \"PolicyId\": \"EXAMPLEBIOWGLDEXAMPLE\",\n \"Arn\": \"arn:aws:iam::12345678901:policy/MilvusS3ReadWrite\",\n \"Path\": \"/\",\n \"DefaultVersionId\": \"v2\",\n [...]\n }\n}\n","aws iam get-policy-version --policy-arn $policy_arn --version-id v2\n# An example output is as follows\n{\n \"PolicyVersion\": {\n \"Document\": {\n \"Version\": \"2012-10-17\",\n \"Statement\": [\n {\n \"Effect\": \"Allow\",\n \"Action\": [\n \"s3:GetObject\",\n \"s3:PutObject\",\n \"s3:ListBucket\",\n \"s3:DeleteObject\"\n ],\n \"Resource\": [\n \"arn:aws:s3:::\",\n \"arn:aws:s3:::/*\"\n ]\n }\n ]\n },\n [...]\n }\n}\n","kubectl describe serviceaccount milvus-s3-access-sa -n milvus\n# An example output is as follows\nName: milvus-s3-access-sa\nNamespace: milvus\nLabels: app.kubernetes.io/managed-by=eksctl\nAnnotations: eks.amazonaws.com/role-arn: arn:aws:iam::12345678901:role/milvus-s3-access-sa\n[...]\n","helm repo add milvus https://zilliztech.github.io/milvus-helm/\nhelm repo update\n","cluster:\n enabled: true\n\nservice:\n type: LoadBalancer\n port: 19530\n annotations: \n service.beta.kubernetes.io/aws-load-balancer-type: external\n service.beta.kubernetes.io/aws-load-balancer-name: milvus-service\n service.beta.kubernetes.io/aws-load-balancer-scheme: internet-facing\n service.beta.kubernetes.io/aws-load-balancer-nlb-target-type: ip\n\nserviceAccount:\n create: false\n name: milvus-s3-access-sa\n\nminio:\n enabled: false\n\nexternalS3:\n enabled: true\n host: \"s3.us-east-2.amazonaws.com\"\n port: \"443\"\n useSSL: true\n bucketName: \"\"\n useIAM: true\n cloudProvider: \"aws\"\n iamEndpoint: \"\"\n\nrootCoordinator:\n replicas: 2\n activeStandby:\n enabled: true\n resources: \n limits:\n cpu: 1\n memory: 2Gi\n\nindexCoordinator:\n replicas: 2\n activeStandby:\n enabled: true\n resources: \n limits:\n cpu: \"0.5\"\n memory: 0.5Gi\n\nqueryCoordinator:\n replicas: 2\n activeStandby:\n enabled: true\n resources: \n limits:\n cpu: \"0.5\"\n memory: 0.5Gi\n\ndataCoordinator:\n replicas: 2\n activeStandby:\n enabled: true\n resources: \n limits:\n cpu: \"0.5\"\n memory: 0.5Gi\n\nproxy:\n replicas: 2\n resources: \n limits:\n cpu: 1\n memory: 2Gi \n","helm upgrade --install milvus-demo milvus/milvus -n milvus -f milvus.yaml\n"],"headingContent":"","anchorList":[{"label":"Konfigurieren des S3-Zugriffs nach IAM-Rolle","href":"Configure-S3-Access-by-IAM-Role","type":1,"isActive":false},{"label":"Bevor Sie beginnen","href":"Before-you-start","type":2,"isActive":false},{"label":"Zuordnen einer IAM-Rolle zu einem Kubernetes-Servicekonto","href":"Associate-an-IAM-role-with-a-Kubernetes-service-account","type":2,"isActive":false},{"label":"Überprüfen Sie die Einrichtung der Rolle und des Dienstkontos","href":"Verify-the-role-and-service-account-setup","type":2,"isActive":false},{"label":"Milvus bereitstellen","href":"Deploy-Milvus","type":2,"isActive":false},{"label":"Überprüfen Sie die Installation","href":"Verify-the-installation","type":2,"isActive":false}]} \ No newline at end of file +{"codeList":["milvus_bucket_name=\"milvus-bucket-$(openssl rand -hex 12)\"\n\naws s3api create-bucket --bucket \"$milvus_bucket_name\" --region 'us-east-2' --acl private --object-ownership ObjectWriter --create-bucket-configuration LocationConstraint='us-east-2'\n\n\n# Output\n#\n# \"Location\": \"http://milvus-bucket-039dd013c0712f085d60e21f.s3.amazonaws.com/\"\n","echo '{\n \"Version\": \"2012-10-17\",\n \"Statement\": [\n {\n \"Effect\": \"Allow\",\n \"Action\": [\n \"s3:ListBucket\"\n ],\n \"Resource\": [\n \"arn:aws:s3:::\"\n ]\n },\n {\n \"Effect\": \"Allow\",\n \"Action\": [\n \"s3:DeleteObject\",\n \"s3:GetObject\",\n \"s3:PutObject\"\n ],\n \"Resource\": [\n \"arn:aws:s3:::/*\"\n ]\n }\n ]\n}' > milvus-s3-policy.json\n\naws iam create-policy --policy-name MilvusS3ReadWrite --policy-document file://milvus-s3-policy.json\n\n\n# Get the ARN from the command output as follows:\n# {\n# \"Policy\": {\n# \"PolicyName\": \"MilvusS3ReadWrite\",\n# \"PolicyId\": \"AN5QQVVPM1BVTFlBNkdZT\",\n# \"Arn\": \"arn:aws:iam::12345678901:policy/MilvusS3ReadWrite\",\n# \"Path\": \"/\",\n# \"DefaultVersionId\": \"v1\",\n# \"AttachmentCount\": 0,\n# \"PermissionsBoundaryUsageCount\": 0,\n# \"IsAttachable\": true,\n# \"CreateDate\": \"2023-11-16T06:00:01+00:00\",\n# \"UpdateDate\": \"2023-11-16T06:00:01+00:00\"\n# }\n# } \n","eksctl create iamserviceaccount --name milvus-s3-access-sa --namespace milvus --cluster milvus-eks-cluster --role-name milvus-s3-access-sa \\\n --attach-policy-arn arn:aws:iam:::policy/MilvusS3ReadWrite --approve\n","aws iam get-role --role-name milvus-s3-access-sa --query Role.AssumeRolePolicyDocument\n# An example output is as follows\n{\n \"Version\": \"2012-10-17\",\n \"Statement\": [\n {\n \"Effect\": \"Allow\",\n \"Principal\": {\n \"Federated\": \"arn:aws:iam::111122223333:oidc-provider/oidc.eks.region-code.amazonaws.com/id/EXAMPLED539D4633E53DE1B71EXAMPLE\"\n },\n \"Action\": \"sts:AssumeRoleWithWebIdentity\",\n \"Condition\": {\n \"StringEquals\": {\n \"oidc.eks.region-code.amazonaws.com/id/EXAMPLED539D4633E53DE1B71EXAMPLE:sub\": \"system:serviceaccount:default:my-service-account\",\n \"oidc.eks.region-code.amazonaws.com/id/EXAMPLED539D4633E53DE1B71EXAMPLE:aud\": \"sts.amazonaws.com\"\n }\n }\n }\n ]\n}\n","aws iam list-attached-role-policies --role-name milvus-s3-access-sa --query 'AttachedPolicies[].PolicyArn' --output text\n# An example output is as follows\narn:aws:iam::12345678901:policy/MilvusS3ReadWrite\n","export policy_arn='arn:aws:iam::12345678901:policy/MilvusS3ReadWrite'\naws iam get-policy --policy-arn $policy_arn\n# An example output is as follows\n{\n \"Policy\": {\n \"PolicyName\": \"MilvusS3ReadWrite\",\n \"PolicyId\": \"EXAMPLEBIOWGLDEXAMPLE\",\n \"Arn\": \"arn:aws:iam::12345678901:policy/MilvusS3ReadWrite\",\n \"Path\": \"/\",\n \"DefaultVersionId\": \"v2\",\n [...]\n }\n}\n","aws iam get-policy-version --policy-arn $policy_arn --version-id v2\n# An example output is as follows\n{\n \"PolicyVersion\": {\n \"Document\": {\n \"Version\": \"2012-10-17\",\n \"Statement\": [\n {\n \"Effect\": \"Allow\",\n \"Action\": [\n \"s3:GetObject\",\n \"s3:PutObject\",\n \"s3:ListBucket\",\n \"s3:DeleteObject\"\n ],\n \"Resource\": [\n \"arn:aws:s3:::\",\n \"arn:aws:s3:::/*\"\n ]\n }\n ]\n },\n [...]\n }\n}\n","kubectl describe serviceaccount milvus-s3-access-sa -n milvus\n# An example output is as follows\nName: milvus-s3-access-sa\nNamespace: milvus\nLabels: app.kubernetes.io/managed-by=eksctl\nAnnotations: eks.amazonaws.com/role-arn: arn:aws:iam::12345678901:role/milvus-s3-access-sa\n[...]\n","helm repo add milvus https://zilliztech.github.io/milvus-helm/\nhelm repo update\n","cluster:\n enabled: true\n\nservice:\n type: LoadBalancer\n port: 19530\n annotations: \n service.beta.kubernetes.io/aws-load-balancer-type: external\n service.beta.kubernetes.io/aws-load-balancer-name: milvus-service\n service.beta.kubernetes.io/aws-load-balancer-scheme: internet-facing\n service.beta.kubernetes.io/aws-load-balancer-nlb-target-type: ip\n\nserviceAccount:\n create: false\n name: milvus-s3-access-sa\n\nminio:\n enabled: false\n\nexternalS3:\n enabled: true\n host: \"s3.us-east-2.amazonaws.com\"\n port: \"443\"\n useSSL: true\n bucketName: \"\"\n useIAM: true\n cloudProvider: \"aws\"\n iamEndpoint: \"\"\n\nrootCoordinator:\n replicas: 2\n activeStandby:\n enabled: true\n resources: \n limits:\n cpu: 1\n memory: 2Gi\n\nindexCoordinator:\n replicas: 2\n activeStandby:\n enabled: true\n resources: \n limits:\n cpu: \"0.5\"\n memory: 0.5Gi\n\nqueryCoordinator:\n replicas: 2\n activeStandby:\n enabled: true\n resources: \n limits:\n cpu: \"0.5\"\n memory: 0.5Gi\n\ndataCoordinator:\n replicas: 2\n activeStandby:\n enabled: true\n resources: \n limits:\n cpu: \"0.5\"\n memory: 0.5Gi\n\nproxy:\n replicas: 2\n resources: \n limits:\n cpu: 1\n memory: 2Gi \n","helm upgrade --install milvus-demo milvus/milvus -n milvus -f milvus.yaml\n"],"headingContent":"Configure S3 Access by IAM Role","anchorList":[{"label":"Konfigurieren des S3-Zugriffs nach IAM-Rolle","href":"Configure-S3-Access-by-IAM-Role","type":1,"isActive":false},{"label":"Bevor Sie beginnen","href":"Before-you-start","type":2,"isActive":false},{"label":"Zuordnen einer IAM-Rolle zu einem Kubernetes-Servicekonto","href":"Associate-an-IAM-role-with-a-Kubernetes-service-account","type":2,"isActive":false},{"label":"Überprüfen Sie die Einrichtung der Rolle und des Dienstkontos","href":"Verify-the-role-and-service-account-setup","type":2,"isActive":false},{"label":"Milvus bereitstellen","href":"Deploy-Milvus","type":2,"isActive":false},{"label":"Überprüfen Sie die Installation","href":"Verify-the-installation","type":2,"isActive":false}]} \ No newline at end of file diff --git a/localization/v2.4.x/site/de/adminGuide/clouds/aws/s3.md b/localization/v2.4.x/site/de/adminGuide/clouds/aws/s3.md index 0c0a79da6..7e2628bd5 100644 --- a/localization/v2.4.x/site/de/adminGuide/clouds/aws/s3.md +++ b/localization/v2.4.x/site/de/adminGuide/clouds/aws/s3.md @@ -73,13 +73,20 @@ aws s3api create-bucket --bucket "$policy_arn }
    -
  • Sehen Sie sich den Inhalt der Richtlinie an, um sicherzustellen, dass die Richtlinie alle Berechtigungen enthält, die Ihr Pod benötigt. Ersetzen Sie ggf. 1 im folgenden Befehl durch die Version, die in der vorherigen Ausgabe zurückgegeben wurde.
  • +
  • Überprüfen Sie den Inhalt der Richtlinie, um sicherzustellen, dass die Richtlinie alle Berechtigungen enthält, die Ihr Pod benötigt. Ersetzen Sie ggf. 1 im folgenden Befehl durch die Version, die in der vorherigen Ausgabe zurückgegeben wurde.
aws iam get-policy-version --policy-arn $policy_arn --version-id v2
 # An example output is as follows
@@ -229,7 +236,7 @@ Annotations:         eks.amazonaws.com/role-arn: arn:aws:iam::12345678901:role/m
           d="M4 9h1v1H4c-1.5 0-3-1.69-3-3.5S2.55 3 4 3h4c1.45 0 3 1.69 3 3.5 0 1.41-.91 2.72-2 3.25V8.59c.58-.45 1-1.27 1-2.09C10 5.22 8.98 4 8 4H4c-.98 0-2 1.22-2 2.5S3 9 4 9zm9-3h-1v1h1c1 0 2 1.22 2 2.5S13.98 12 13 12H9c-.98 0-2-1.22-2-2.5 0-.83.42-1.64 1-2.09V6.25c-1.09.53-2 1.84-2 3.25C6 11.31 7.55 13 9 13h4c1.45 0 3-1.69 3-3.5S14.5 6 13 6z"
         >
       
-    

In diesem Leitfaden werden wir Milvus Helm Charts verwenden, um einen Milvus-Cluster bereitzustellen. Sie finden die Charts hier.

+

In dieser Anleitung werden wir Milvus Helm Charts verwenden, um einen Milvus-Cluster bereitzustellen. Sie finden die Charts hier.

  • Milvus Helm Chart Repo hinzufügen.
diff --git a/localization/v2.4.x/site/de/adminGuide/clouds/openshift/openshift.json b/localization/v2.4.x/site/de/adminGuide/clouds/openshift/openshift.json index 2bd1b43c3..3068619df 100644 --- a/localization/v2.4.x/site/de/adminGuide/clouds/openshift/openshift.json +++ b/localization/v2.4.x/site/de/adminGuide/clouds/openshift/openshift.json @@ -1 +1 @@ -{"codeList":["# milvus-operator-certificate.yaml\napiVersion: cert-manager.io/v1\nkind: Certificate\nmetadata:\nname: milvus-operator-serving-cert\nnamespace: milvus-operator\nspec:\ndnsNames:\n- milvus-operator-webhook-service.milvus-operator.svc\n- milvus-operator-webhook-service.milvus-operator.svc.cluster.local\nissuerRef:\n kind: Issuer\n name: milvus-operator-selfsigned-issuer\nsecretName: milvus-operator-webhook-cert\n---\napiVersion: cert-manager.io/v1\nkind: Issuer\nmetadata:\nname: milvus-operator-selfsigned-issuer\nnamespace: milvus-operator\nspec:\nselfSigned: {}\n","kubectl apply -f milvus-operator-certificate.yaml\n","helm repo add milvus-operator https://zilliztech.github.io/milvus-operator/\nhelm repo update milvus-operator\n","helm -n milvus-operator upgrade --install --create-namespace milvus-operator milvus-operator/milvus-operator\n"],"headingContent":"","anchorList":[{"label":"Bereitstellung eines Milvus-Clusters auf OpenShift","href":"Deploy-a-Milvus-Cluster-on-OpenShift","type":1,"isActive":false},{"label":"Voraussetzungen","href":"Prerequisites","type":2,"isActive":false},{"label":"Schritt 1: Installieren Sie Cert Manager","href":"Step-1-Install-Cert-Manager","type":2,"isActive":false},{"label":"Schritt 2: Ausstellen eines selbstsignierten Zertifikats für Milvus Operator","href":"Step-2-Issue-a-Self-Signed-Certificate-for-Milvus-Operator","type":2,"isActive":false},{"label":"Schritt 3: Installieren Sie Milvus Operator","href":"Step-3-Install-Milvus-Operator","type":2,"isActive":false},{"label":"Schritt 4: Milvus bereitstellen","href":"Step-4-Deploy-Milvus","type":2,"isActive":false},{"label":"Was kommt als nächstes?","href":"Whats-Next","type":2,"isActive":false}]} \ No newline at end of file +{"codeList":["# milvus-operator-certificate.yaml\napiVersion: cert-manager.io/v1\nkind: Certificate\nmetadata:\n name: milvus-operator-serving-cert\n namespace: milvus-operator\nspec:\n dnsNames:\n - milvus-operator-webhook-service.milvus-operator.svc\n - milvus-operator-webhook-service.milvus-operator.svc.cluster.local\n issuerRef:\n kind: Issuer\n name: milvus-operator-selfsigned-issuer\n secretName: milvus-operator-webhook-cert\n---\napiVersion: cert-manager.io/v1\nkind: Issuer\nmetadata:\n name: milvus-operator-selfsigned-issuer\n namespace: milvus-operator\nspec:\n selfSigned: {}\n","kubectl apply -f milvus-operator-certificate.yaml\n","helm repo add milvus-operator https://zilliztech.github.io/milvus-operator/\nhelm repo update milvus-operator\n","helm -n milvus-operator upgrade --install --create-namespace milvus-operator milvus-operator/milvus-operator\n"],"headingContent":"Deploy a Milvus Cluster on OpenShift","anchorList":[{"label":"Bereitstellung eines Milvus-Clusters auf OpenShift","href":"Deploy-a-Milvus-Cluster-on-OpenShift","type":1,"isActive":false},{"label":"Voraussetzungen","href":"Prerequisites","type":2,"isActive":false},{"label":"Schritt 1: Installieren Sie Cert Manager","href":"Step-1-Install-Cert-Manager","type":2,"isActive":false},{"label":"Schritt 2: Ausstellen eines selbstsignierten Zertifikats für Milvus Operator","href":"Step-2-Issue-a-Self-Signed-Certificate-for-Milvus-Operator","type":2,"isActive":false},{"label":"Schritt 3: Installieren Sie Milvus Operator","href":"Step-3-Install-Milvus-Operator","type":2,"isActive":false},{"label":"Schritt 4: Milvus bereitstellen","href":"Step-4-Deploy-Milvus","type":2,"isActive":false},{"label":"Was kommt als nächstes?","href":"Whats-Next","type":2,"isActive":false}]} \ No newline at end of file diff --git a/localization/v2.4.x/site/de/adminGuide/clouds/openshift/openshift.md b/localization/v2.4.x/site/de/adminGuide/clouds/openshift/openshift.md index 4e3a777b6..cd932405f 100644 --- a/localization/v2.4.x/site/de/adminGuide/clouds/openshift/openshift.md +++ b/localization/v2.4.x/site/de/adminGuide/clouds/openshift/openshift.md @@ -89,31 +89,31 @@ summary: 'Erfahren Sie, wie Sie einen Milvus-Cluster auf OpenShift bereitstellen d="M4 9h1v1H4c-1.5 0-3-1.69-3-3.5S2.55 3 4 3h4c1.45 0 3 1.69 3 3.5 0 1.41-.91 2.72-2 3.25V8.59c.58-.45 1-1.27 1-2.09C10 5.22 8.98 4 8 4H4c-.98 0-2 1.22-2 2.5S3 9 4 9zm9-3h-1v1h1c1 0 2 1.22 2 2.5S13.98 12 13 12H9c-.98 0-2-1.22-2-2.5 0-.83.42-1.64 1-2.09V6.25c-1.09.53-2 1.84-2 3.25C6 11.31 7.55 13 9 13h4c1.45 0 3-1.69 3-3.5S14.5 6 13 6z" > -

Stellen Sie sicher, dass Sie als kubeadmin oder mit gleichwertigen Rechten angemeldet sind.

+

Vergewissern Sie sich, dass Sie als kubeadmin oder mit gleichwertigen Rechten angemeldet sind.

  1. Erstellen Sie die folgende Manifestdatei mit dem Namen milvus-operator-certificate.yaml:

    # milvus-operator-certificate.yaml
     apiVersion: cert-manager.io/v1
     kind: Certificate
     metadata:
    -name: milvus-operator-serving-cert
    -namespace: milvus-operator
    +  name: milvus-operator-serving-cert
    +  namespace: milvus-operator
     spec:
    -dnsNames:
    -- milvus-operator-webhook-service.milvus-operator.svc
    -- milvus-operator-webhook-service.milvus-operator.svc.cluster.local
    -issuerRef:
    +  dnsNames:
    +  - milvus-operator-webhook-service.milvus-operator.svc
    +  - milvus-operator-webhook-service.milvus-operator.svc.cluster.local
    +  issuerRef:
         kind: Issuer
         name: milvus-operator-selfsigned-issuer
    -secretName: milvus-operator-webhook-cert
    +  secretName: milvus-operator-webhook-cert
     ---
     apiVersion: cert-manager.io/v1
     kind: Issuer
     metadata:
    -name: milvus-operator-selfsigned-issuer
    -namespace: milvus-operator
    +  name: milvus-operator-selfsigned-issuer
    +  namespace: milvus-operator
     spec:
    -selfSigned: {}
    +  selfSigned: {}
     
  2. Wenden Sie die Datei an:

    kubectl apply -f milvus-operator-certificate.yaml
    diff --git a/localization/v2.4.x/site/de/adminGuide/configure-docker.json b/localization/v2.4.x/site/de/adminGuide/configure-docker.json
    index 91bdf6b73..2d3e14c66 100644
    --- a/localization/v2.4.x/site/de/adminGuide/configure-docker.json
    +++ b/localization/v2.4.x/site/de/adminGuide/configure-docker.json
    @@ -1 +1 @@
    -{"codeList":["$ wget https://raw.githubusercontent.com/milvus-io/milvus/v2.4.9/configs/milvus.yaml\n","# For Milvus standalone\n$ wget https://github.com/milvus-io/milvus/releases/download/v2.4.9/milvus-standalone-docker-compose.yml -O docker-compose.yml\n","...\n  standalone:\n    container_name: milvus-standalone\n    image: milvusdb/milvus:v2.2.13\n    command: [\"milvus\", \"run\", \"standalone\"]\n    environment:\n      ETCD_ENDPOINTS: etcd:2379\n      MINIO_ADDRESS: minio:9000\n    volumes:\n      - /local/path/to/your/milvus.yaml:/milvus/configs/milvus.yaml   # Map the local path to the container path\n      - ${DOCKER_VOLUME_DIRECTORY:-.}/volumes/milvus:/var/lib/milvus\n    ports:\n      - \"19530:19530\"\n      - \"9091:9091\"\n    depends_on:\n      - \"etcd\"\n      - \"minio\"\n...\n","$ sudo docker compose up -d\n"],"headingContent":"","anchorList":[{"label":"Konfigurieren von Milvus mit Docker Compose","href":"Configure-Milvus-with-Docker-Compose","type":1,"isActive":false},{"label":"Herunterladen einer Konfigurationsdatei","href":"Download-a-configuration-file","type":2,"isActive":false},{"label":"Ändern Sie die Konfigurationsdatei","href":"Modify-the-configuration-file","type":2,"isActive":false},{"label":"Download einer Installationsdatei","href":"Download-an-installation-file","type":2,"isActive":false},{"label":"Ändern Sie die Installationsdatei","href":"Modify-the-installation-file","type":2,"isActive":false},{"label":"Starten Sie Milvus","href":"Start-Milvus","type":2,"isActive":false},{"label":"Wie geht es weiter?","href":"Whats-next","type":2,"isActive":false}]}
    \ No newline at end of file
    +{"codeList":["$ wget https://raw.githubusercontent.com/milvus-io/milvus/v2.4.13-hotfix/configs/milvus.yaml\n","# For Milvus standalone\n$ wget https://github.com/milvus-io/milvus/releases/download/v2.4.13-hotfix/milvus-standalone-docker-compose.yml -O docker-compose.yml\n","...\n  standalone:\n    container_name: milvus-standalone\n    image: milvusdb/milvus:v2.2.13\n    command: [\"milvus\", \"run\", \"standalone\"]\n    environment:\n      ETCD_ENDPOINTS: etcd:2379\n      MINIO_ADDRESS: minio:9000\n    volumes:\n      - /local/path/to/your/milvus.yaml:/milvus/configs/milvus.yaml   # Map the local path to the container path\n      - ${DOCKER_VOLUME_DIRECTORY:-.}/volumes/milvus:/var/lib/milvus\n    ports:\n      - \"19530:19530\"\n      - \"9091:9091\"\n    depends_on:\n      - \"etcd\"\n      - \"minio\"\n...\n","$ sudo docker compose up -d\n"],"headingContent":"Configure Milvus with Docker Compose","anchorList":[{"label":"Konfigurieren von Milvus mit Docker Compose","href":"Configure-Milvus-with-Docker-Compose","type":1,"isActive":false},{"label":"Herunterladen einer Konfigurationsdatei","href":"Download-a-configuration-file","type":2,"isActive":false},{"label":"Ändern Sie die Konfigurationsdatei","href":"Modify-the-configuration-file","type":2,"isActive":false},{"label":"Download einer Installationsdatei","href":"Download-an-installation-file","type":2,"isActive":false},{"label":"Ändern Sie die Installationsdatei","href":"Modify-the-installation-file","type":2,"isActive":false},{"label":"Starten Sie Milvus","href":"Start-Milvus","type":2,"isActive":false},{"label":"Wie geht es weiter?","href":"Whats-next","type":2,"isActive":false}]}
    \ No newline at end of file
    diff --git a/localization/v2.4.x/site/de/adminGuide/configure-docker.md b/localization/v2.4.x/site/de/adminGuide/configure-docker.md
    index 2f607f480..09908e933 100644
    --- a/localization/v2.4.x/site/de/adminGuide/configure-docker.md
    +++ b/localization/v2.4.x/site/de/adminGuide/configure-docker.md
    @@ -3,7 +3,7 @@ id: configure-docker.md
     label: Docker Compose
     related_key: configure
     summary: Konfigurieren Sie Milvus mit Docker Compose.
    -title: Milvus mit Docker Compose konfigurieren
    +title: Konfigurieren von Milvus mit Docker Compose
     ---
     

    Konfigurieren von Milvus mit Docker Compose

    In diesem Thema wird beschrieben, wie Sie Milvus-Komponenten und die Abhängigkeiten von Drittanbietern mit Docker Compose konfigurieren.

    +

    In diesem Thema wird beschrieben, wie Sie Milvus-Komponenten und deren Abhängigkeiten von Drittanbietern mit Docker Compose konfigurieren.

    In der aktuellen Version werden alle Parameter erst nach einem Neustart von Milvus wirksam.

    Herunterladen einer Konfigurationsdatei

    Laden Sie milvus.yaml direkt oder mit dem folgenden Befehlherunter.

    -
    $ wget https://raw.githubusercontent.com/milvus-io/milvus/v2.4.9/configs/milvus.yaml
    +    

    Laden Sie milvus.yaml direkt oder mit dem folgenden Befehlherunter.

    +
    $ wget https://raw.githubusercontent.com/milvus-io/milvus/v2.4.13-hotfix/configs/milvus.yaml
     

    Ändern Sie die Konfigurationsdatei

    Laden Sie die Installationsdatei für Milvus standalone herunter und speichern Sie sie als docker-compose.yml.

    +

    Laden Sie die Installationsdatei für Milvus standalone herunter und speichern Sie sie als docker-compose.yml.

    Sie können auch einfach den folgenden Befehl ausführen.

    # For Milvus standalone
    -$ wget https://github.com/milvus-io/milvus/releases/download/v2.4.9/milvus-standalone-docker-compose.yml -O docker-compose.yml
    +$ wget https://github.com/milvus-io/milvus/releases/download/v2.4.13-hotfix/milvus-standalone-docker-compose.yml -O docker-compose.yml
     

    Ändern Sie die Installationsdatei

    Dieser Artikel beschreibt, wie Sie Milvus QueryNode so konfigurieren, dass es lokalen Plattenspeicher verwendet.

    +

    Dieser Artikel beschreibt, wie Sie Milvus QueryNode für die Verwendung von lokalem Plattenspeicher konfigurieren.

    Überblick

    -

    Konfigurieren von Kubernetes zur Verwendung einer lokalen Festplatte

    -

    Weitere Informationen finden Sie unter Bereitstellen von lokalem SSD-Speicher auf GKE.

    +

    Einzelheiten finden Sie unter Bereitstellung von lokalem SSD-Speicher auf GKE.

    Azure

    Um ein Virtual Machine Scale Set (VMSS) mit lokalem NVMe-Plattenspeicher zu erstellen, müssen Sie benutzerdefinierte Daten an die VM-Instanzen übergeben. Im Folgenden finden Sie ein Beispiel dafür, wie Sie eine NVMe-Festplatte an die VM-Instanzen in der VMSS anhängen:

    mdadm -Cv /dev/md0 -l0 -n2 /dev/nvme0n1 /dev/nvme1n1
     mdadm -Ds > /etc/mdadm/mdadm.conf 
    @@ -183,7 +183,7 @@ state = "/mnt/nvme/containerd/state"
               d="M4 9h1v1H4c-1.5 0-3-1.69-3-3.5S2.55 3 4 3h4c1.45 0 3 1.69 3 3.5 0 1.41-.91 2.72-2 3.25V8.59c.58-.45 1-1.27 1-2.09C10 5.22 8.98 4 8 4H4c-.98 0-2 1.22-2 2.5S3 9 4 9zm9-3h-1v1h1c1 0 2 1.22 2 2.5S13.98 12 13 12H9c-.98 0-2-1.22-2-2.5 0-.83.42-1.64 1-2.09V6.25c-1.09.53-2 1.84-2 3.25C6 11.31 7.55 13 9 13h4c1.45 0 3-1.69 3-3.5S14.5 6 13 6z"
             >
           
    -    

    Es wird empfohlen, die Festplattenleistung mit Fio zu überprüfen. Fio ist ein beliebtes Tool zum Benchmarking der Festplattenleistung. Im Folgenden finden Sie ein Beispiel dafür, wie Sie Fio zum Testen der Festplattenleistung ausführen.

    +

    Es wird empfohlen, die Festplattenleistung mit Fio zu überprüfen. Fio ist ein beliebtes Tool zum Benchmarking der Festplattenleistung. Im Folgenden finden Sie ein Beispiel dafür, wie Sie Fio zum Testen der Festplattenleistung ausführen.

    • Setzen Sie den Test-Pod auf dem Knoten mit der NVMe-Festplatte ein.

      kubectl create -f ubuntu.yaml
      @@ -217,7 +217,7 @@ apt-get install fio -y
       cd /data
       
       # write 10GB
      -fio -direct=1-iodepth=128 -rw=randwrite -ioengine=libaio -bs=4K -size=10G -numjobs=10 -runtime=600 -group_reporting -filename=test -name=Rand_Write_IOPS_Test
      +fio -direct=1 -iodepth=128 -rw=randwrite -ioengine=libaio -bs=4K -size=10G -numjobs=10 -runtime=600 -group_reporting -filename=test -name=Rand_Write_IOPS_Test
       
       # verify the read speed
       # compare with the disk performance indicators provided by various cloud providers.
      diff --git a/localization/v2.4.x/site/de/adminGuide/limit_collection_counts.json b/localization/v2.4.x/site/de/adminGuide/limit_collection_counts.json
      index b682443d5..97217f1a2 100644
      --- a/localization/v2.4.x/site/de/adminGuide/limit_collection_counts.json
      +++ b/localization/v2.4.x/site/de/adminGuide/limit_collection_counts.json
      @@ -1 +1 @@
      -{"codeList":["rootCoord:\n    maxGeneralCapacity: 1024\n","60 (collections) x 2 (shards) x 4 (partitions) + 40 (collections) x 1 (shard) x 12 (partitions) = 960\n","failed checking constraint: sum_collections(parition*shard) exceeding the max general capacity:\n"],"headingContent":"","anchorList":[{"label":"Begrenzung der Anzahl der Sammlungen","href":"Limit-Collection-Counts","type":1,"isActive":false},{"label":"Konfigurationsoptionen","href":"Configuration-options","type":2,"isActive":false},{"label":"Berechnen der Anzahl der Sammlungen","href":"Calculating-the-number-of-collections","type":2,"isActive":false}]}
      \ No newline at end of file
      +{"codeList":["rootCoord:\n    maxGeneralCapacity: 65536\n","60 (collections) x 2 (shards) x 4 (partitions) + 40 (collections) x 1 (shard) x 12 (partitions) = 960\n","failed checking constraint: sum_collections(parition*shard) exceeding the max general capacity:\n"],"headingContent":"Limit Collection Counts","anchorList":[{"label":"Begrenzung der Anzahl der Sammlungen","href":"Limit-Collection-Counts","type":1,"isActive":false},{"label":"Konfigurationsoptionen","href":"Configuration-options","type":2,"isActive":false},{"label":"Berechnen der Anzahl der Sammlungen","href":"Calculating-the-number-of-collections","type":2,"isActive":false}]}
      \ No newline at end of file
      diff --git a/localization/v2.4.x/site/de/adminGuide/limit_collection_counts.md b/localization/v2.4.x/site/de/adminGuide/limit_collection_counts.md
      index 75e560770..ea5a6c70f 100644
      --- a/localization/v2.4.x/site/de/adminGuide/limit_collection_counts.md
      +++ b/localization/v2.4.x/site/de/adminGuide/limit_collection_counts.md
      @@ -1,7 +1,6 @@
       ---
       id: limit_collection_counts.md
       title: Begrenzung der Erfassungsanzahl festlegen
      -summary: ''
       ---
       

      Begrenzung der Anzahl der Sammlungen

      rootCoord:
      -    maxGeneralCapacity: 1024
      +    maxGeneralCapacity: 65536
       
      -

      Der Parameter maxGeneralCapacity legt die maximale Anzahl von Sammlungen fest, die die aktuelle Milvus-Instanz enthalten kann. Der Standardwert ist 1024.

      +

      Der Parameter maxGeneralCapacity legt die maximale Anzahl von Sammlungen fest, die die aktuelle Milvus-Instanz enthalten kann. Der Standardwert ist 65536.

      Berechnen der Anzahl der Sammlungen

      -

      Im obigen Beispiel haben Sie bereits 960 der Standardgrenzen genutzt. Wenn Sie nun eine neue Sammlung mit 4 Scherben und 20 Partitionen erstellen wollen, erhalten Sie folgende Fehlermeldung, da die Gesamtzahl der Sammlungen die maximale Kapazität überschreitet:

      +

      Im obigen Beispiel haben Sie bereits 960 der Standardgrenzen genutzt. Wenn Sie nun eine neue Sammlung mit 4 Scherben und 20 Partitionen erstellen möchten, erhalten Sie folgende Fehlermeldung, da die Gesamtzahl der Sammlungen die maximale Kapazität überschreitet:

      failed checking constraint: sum_collections(parition*shard) exceeding the max general capacity:
       

      Um diesen Fehler zu vermeiden, können Sie entweder die Anzahl der Shards oder Partitionen in bestehenden oder neuen Sammlungen reduzieren, einige Sammlungen löschen oder den Wert maxGeneralCapacity erhöhen.

      diff --git a/localization/v2.4.x/site/de/adminGuide/rbac.json b/localization/v2.4.x/site/de/adminGuide/rbac.json index 65aedbb8f..7788471ed 100644 --- a/localization/v2.4.x/site/de/adminGuide/rbac.json +++ b/localization/v2.4.x/site/de/adminGuide/rbac.json @@ -1 +1 @@ -{"codeList":["from pymilvus import MilvusClient\n\nclient = MilvusClient(\n uri='http://localhost:19530', # replace with your own Milvus server address\n token='root:Milvus' # replace with your own Milvus server token\n)\n","client.create_user(\n user_name='user_1',\n password='P@ssw0rd'\n)\n","client.update_password(\n user_name='user_1',\n old_password='P@ssw0rd',\n new_password='P@ssw0rd123'\n)\n","client.list_users()\n\n# output:\n# ['root', 'user_1']\n","client.describe_user(user_name='user_1')\n\n# output:\n# {'user_name': 'user_1', 'roles': ()}\n","client.create_role(\n role_name=\"roleA\",\n)\n","client.list_roles()\n\n# output:\n# ['admin', 'public', 'roleA']\n","# grant privilege to a role\n\nclient.grant_privilege(\n role_name='roleA',\n object_type='User', # value here can be Global, Collection or User, object type also depends on the API defined in privilegeName\n object_name='user_1', # value here can be * or a specific user name if object type is 'User'\n privilege='SelectUser'\n)\n","client.describe_role(\n role_name='roleA'\n)\n\n# output:\n# {'role': 'roleA',\n# 'privileges': [{'object_type': 'User',\n# 'object_name': 'user_1',\n# 'db_name': 'default',\n# 'role_name': 'roleA',\n# 'privilege': 'SelectUser',\n# 'grantor_name': 'root'}]}\n","# grant a role to a user\n\nclient.grant_role(\n user_name='user_1',\n role_name='roleA'\n)\n","client.describe_user(\n user_name='user_1'\n)\n\n# output:\n# {'user_name': 'user_1', 'roles': ('roleA')}\n","client.revoke_privilege(\n role_name='roleA',\n object_type='User', # value here can be Global, Collection or User, object type also depends on the API defined in privilegeName\n object_name='user_1', # value here can be * or a specific user name if object type is 'User'\n privilege='SelectUser'\n)\n","client.revoke_role(\n user_name='user_1',\n role_name='roleA'\n)\n","client.drop_role(role_name='roleA')\n","client.drop_user(user_name='user_1')\n"],"headingContent":"","anchorList":[{"label":"RBAC aktivieren","href":"Enable-RBAC","type":1,"isActive":false},{"label":"1. Initiieren Sie einen Milvus-Client, um eine Verbindung herzustellen","href":"1-Initiate-a-Milvus-client-to-establish-a-connection","type":2,"isActive":false},{"label":"2. Erstellen Sie einen Benutzer","href":"2-Create-a-user","type":2,"isActive":false},{"label":"3. Erstellen Sie eine Rolle","href":"3-Create-a-role","type":2,"isActive":false},{"label":"4. Einer Rolle ein Privileg erteilen","href":"4-Grant-a-privilege-to-a-role","type":2,"isActive":false},{"label":"5. Einem Benutzer eine Rolle gewähren","href":"5-Grant-a-role-to-a-user","type":2,"isActive":false},{"label":"6. Entziehen von Privilegien","href":"6-Revoke-privileges","type":2,"isActive":false},{"label":"Was kommt als nächstes?","href":"Whats-next","type":2,"isActive":false}]} \ No newline at end of file +{"codeList":["from pymilvus import MilvusClient\n\nclient = MilvusClient(\n uri='http://localhost:19530', # replace with your own Milvus server address\n token='root:Milvus' # replace with your own Milvus server token\n)\n","client.create_user(\n user_name='user_1',\n password='P@ssw0rd'\n)\n","client.update_password(\n user_name='user_1',\n old_password='P@ssw0rd',\n new_password='P@ssw0rd123'\n)\n","client.list_users()\n\n# output:\n# ['root', 'user_1']\n","client.describe_user(user_name='user_1')\n\n# output:\n# {'user_name': 'user_1', 'roles': ()}\n","client.create_role(\n role_name=\"roleA\",\n)\n","client.list_roles()\n\n# output:\n# ['admin', 'public', 'roleA']\n","# grant privilege to a role\n\nclient.grant_privilege(\n role_name='roleA',\n object_type='User', # value here can be Global, Collection or User, object type also depends on the API defined in privilegeName\n object_name='user_1', # value here can be * or a specific user name if object type is 'User'\n privilege='SelectUser'\n)\n","client.describe_role(\n role_name='roleA'\n)\n\n# output:\n# {'role': 'roleA',\n# 'privileges': [{'object_type': 'User',\n# 'object_name': 'user_1',\n# 'db_name': 'default',\n# 'role_name': 'roleA',\n# 'privilege': 'SelectUser',\n# 'grantor_name': 'root'}]}\n","# grant a role to a user\n\nclient.grant_role(\n user_name='user_1',\n role_name='roleA'\n)\n","client.describe_user(\n user_name='user_1'\n)\n\n# output:\n# {'user_name': 'user_1', 'roles': ('roleA')}\n","client.revoke_privilege(\n role_name='roleA',\n object_type='User', # value here can be Global, Collection or User, object type also depends on the API defined in privilegeName\n object_name='user_1', # value here can be * or a specific user name if object type is 'User'\n privilege='SelectUser'\n)\n","client.revoke_role(\n user_name='user_1',\n role_name='roleA'\n)\n","client.drop_role(role_name='roleA')\n","client.drop_user(user_name='user_1')\n"],"headingContent":"Enable RBAC","anchorList":[{"label":"RBAC aktivieren","href":"Enable-RBAC","type":1,"isActive":false},{"label":"1. Initiieren Sie einen Milvus-Client, um eine Verbindung herzustellen","href":"1-Initiate-a-Milvus-client-to-establish-a-connection","type":2,"isActive":false},{"label":"2. Erstellen Sie einen Benutzer","href":"2-Create-a-user","type":2,"isActive":false},{"label":"3. Erstellen Sie eine Rolle","href":"3-Create-a-role","type":2,"isActive":false},{"label":"4. Einer Rolle ein Privileg erteilen","href":"4-Grant-a-privilege-to-a-role","type":2,"isActive":false},{"label":"5. Einem Benutzer eine Rolle gewähren","href":"5-Grant-a-role-to-a-user","type":2,"isActive":false},{"label":"6. Entziehen von Privilegien","href":"6-Revoke-privileges","type":2,"isActive":false},{"label":"Was kommt als nächstes?","href":"Whats-next","type":2,"isActive":false}]} \ No newline at end of file diff --git a/localization/v2.4.x/site/de/adminGuide/rbac.md b/localization/v2.4.x/site/de/adminGuide/rbac.md index 1e27fe647..e36be5767 100644 --- a/localization/v2.4.x/site/de/adminGuide/rbac.md +++ b/localization/v2.4.x/site/de/adminGuide/rbac.md @@ -2,7 +2,7 @@ id: rbac.md related_key: enable RBAC summary: 'Erfahren Sie, wie Sie Benutzer, Rollen und Berechtigungen verwalten können.' -title: RBAC einschalten +title: RBAC aktivieren ---

      RBAC aktivieren

      Wenn Sie RBAC aktivieren, können Sie den Zugriff auf bestimmte Milvus-Ressourcen (z. B. eine Sammlung oder eine Partition) oder Berechtigungen auf der Grundlage von Benutzerrollen und -rechten steuern. Derzeit ist diese Funktion nur in Python und Java verfügbar.

      +

      Wenn Sie RBAC aktivieren, können Sie den Zugriff auf bestimmte Milvus-Ressourcen (z. B. eine Sammlung oder eine Partition) oder Berechtigungen auf der Grundlage von Benutzerrollen und Privilegien steuern. Derzeit ist diese Funktion nur in Python und Java verfügbar.

      Dieses Thema beschreibt, wie man RBAC aktiviert und Benutzer und Rollen verwaltet.

      Die Codeschnipsel auf dieser Seite verwenden den neuen MilvusClient (Python) zur Interaktion mit Milvus. Neue MilvusClient SDKs für andere Sprachen werden in zukünftigen Updates veröffentlicht.

      @@ -190,7 +190,7 @@ client.grant_role( role_name='roleA' )
    -

    Nach der Erteilung der Rolle überprüfen, ob sie erteilt wurde:

    +

    Überprüfen Sie nach der Vergabe der Rolle, ob sie vergeben wurde:

    client.describe_user(
         user_name='user_1'
     )
    diff --git a/localization/v2.4.x/site/de/adminGuide/resource_group.json b/localization/v2.4.x/site/de/adminGuide/resource_group.json
    index f30dd7063..99611838c 100644
    --- a/localization/v2.4.x/site/de/adminGuide/resource_group.json
    +++ b/localization/v2.4.x/site/de/adminGuide/resource_group.json
    @@ -1 +1 @@
    -{"codeList":["{\n    \"requests\": { \"nodeNum\": 1 },\n    \"limits\": { \"nodeNum\": 1 },\n    \"transfer_from\": [{ \"resource_group\": \"rg1\" }],\n    \"transfer_to\": [{ \"resource_group\": \"rg2\" }]\n}\n","import pymilvus\n\n# A resource group name should be a string of 1 to 255 characters, starting with a letter or an underscore (_) and containing only numbers, letters, and underscores (_).\nname = \"rg\"\nnode_num = 0\n\n# create a resource group that exactly hold no query node.\ntry:\n    utility.create_resource_group(name, config=utility.ResourceGroupConfig(\n        requests={\"node_num\": node_num},\n        limits={\"node_num\": node_num},\n    ), using='default')\n    print(f\"Succeeded in creating resource group {name}.\")\nexcept Exception:\n    print(\"Failed to create the resource group.\")\n","rgs = utility.list_resource_groups(using='default')\nprint(f\"Resource group list: {rgs}\")\n\n# Resource group list: ['__default_resource_group', 'rg']\n","info = utility.describe_resource_group(name, using=\"default\")\nprint(f\"Resource group description: {info}\")\n\n# Resource group description: \n#        ,           // string, rg name\n#        ,            // int, num_node which has been transfer to this rg\n#        ,  // int, available node_num, some node may shutdown\n#        , // map[string]int, from collection_name to loaded replica of each collecion in this rg\n#        ,  // map[string]int, from collection_name to outgoging accessed node num by replica loaded in this rg \n#        .  // map[string]int, from collection_name to incoming accessed node num by replica loaded in other rg\n","source = '__default_resource_group'\ntarget = 'rg'\nexpected_num_nodes_in_default = 0\nexpected_num_nodes_in_rg = 1\n\ntry:\n    utility.update_resource_groups({\n        source: ResourceGroupConfig(\n            requests={\"node_num\": expected_num_nodes_in_default},\n            limits={\"node_num\": expected_num_nodes_in_default},\n        ),\n        target: ResourceGroupConfig(\n            requests={\"node_num\": expected_num_nodes_in_rg},\n            limits={\"node_num\": expected_num_nodes_in_rg},\n        )\n    }, using=\"default\")\n    print(f\"Succeeded in move 1 node(s) from {source} to {target}.\")\nexcept Exception:\n    print(\"Something went wrong while moving nodes.\")\n\n# After a while, succeeded in moving 1 node(s) from __default_resource_group to rg.\n","from pymilvus import Collection\n\ncollection = Collection('demo')\n\n# Milvus loads the collection to the default resource group.\ncollection.load(replica_number=2)\n\n# Or, you can ask Milvus load the collection to the desired resource group.\n# make sure that query nodes num should be greater or equal to replica_number\nresource_groups = ['rg']\ncollection.load(replica_number=2, _resource_groups=resource_groups) \n","collection = Collection(\"Books\")\n\n# Use the load method of a collection to load one of its partition\ncollection.load([\"Novels\"], replica_number=2, _resource_groups=resource_groups)\n\n# Or, you can use the load method of a partition directly\npartition = Partition(collection, \"Novels\")\npartition.load(replica_number=2, _resource_groups=resource_groups)\n","source = '__default_resource_group'\ntarget = 'rg'\ncollection_name = 'c'\nnum_replicas = 1\n\ntry:\n    utility.transfer_replica(source, target, collection_name, num_replicas, using=\"default\")\n    print(f\"Succeeded in moving {num_node} replica(s) of {collection_name} from {source} to {target}.\")\nexcept Exception:\n    print(\"Something went wrong while moving replicas.\")\n\n# Succeeded in moving 1 replica(s) of c from __default_resource_group to rg.\n","try:\n    utility.update_resource_groups({\n        \"rg\": utility.ResourceGroupConfig(\n            requests={\"node_num\": 0},\n            limits={\"node_num\": 0},\n        ),\n    }, using=\"default\")\n    utility.drop_resource_group(\"rg\", using=\"default\")\n    print(f\"Succeeded in dropping {source}.\")\nexcept Exception:\n    print(f\"Something went wrong while dropping {source}.\")\n","from pymilvus import utility\nfrom pymilvus.client.types import ResourceGroupConfig\n\n_PENDING_NODES_RESOURCE_GROUP=\"__pending_nodes\"\n\ndef init_cluster(node_num: int):\n    print(f\"Init cluster with {node_num} nodes, all nodes will be put in default resource group\")\n    # create a pending resource group, which can used to hold the pending nodes that do not hold any data.\n    utility.create_resource_group(name=_PENDING_NODES_RESOURCE_GROUP, config=ResourceGroupConfig(\n        requests={\"node_num\": 0}, # this resource group can hold 0 nodes, no data will be load on it.\n        limits={\"node_num\": 10000}, # this resource group can hold at most 10000 nodes \n    ))\n\n    # update default resource group, which can used to hold the nodes that all initial node in it.\n    utility.update_resource_groups({\n        \"__default_resource_group\": ResourceGroupConfig(\n            requests={\"node_num\": node_num},\n            limits={\"node_num\": node_num},\n            transfer_from=[{\"resource_group\": _PENDING_NODES_RESOURCE_GROUP}], # recover missing node from pending resource group at high priority.\n            transfer_to=[{\"resource_group\": _PENDING_NODES_RESOURCE_GROUP}], # recover redundant node to pending resource group at low priority.\n        )})\n    utility.create_resource_group(name=\"rg1\", config=ResourceGroupConfig(\n        requests={\"node_num\": 0},\n        limits={\"node_num\": 0},\n        transfer_from=[{\"resource_group\": _PENDING_NODES_RESOURCE_GROUP}], \n        transfer_to=[{\"resource_group\": _PENDING_NODES_RESOURCE_GROUP}],\n    ))\n    utility.create_resource_group(name=\"rg2\", config=ResourceGroupConfig(\n        requests={\"node_num\": 0},\n        limits={\"node_num\": 0},\n        transfer_from=[{\"resource_group\": _PENDING_NODES_RESOURCE_GROUP}], \n        transfer_to=[{\"resource_group\": _PENDING_NODES_RESOURCE_GROUP}],\n    ))\n\ninit_cluster(1)\n","\ndef scale_to(node_num: int):\n    # scale the querynode number in Milvus into node_num.\n    pass\n","# scale rg1 into 3 nodes, rg2 into 1 nodes\nutility.update_resource_groups({\n    \"rg1\": ResourceGroupConfig(\n        requests={\"node_num\": 3},\n        limits={\"node_num\": 3},\n        transfer_from=[{\"resource_group\": _PENDING_NODES_RESOURCE_GROUP}],\n        transfer_to=[{\"resource_group\": _PENDING_NODES_RESOURCE_GROUP}],\n    ),\n    \"rg2\": ResourceGroupConfig(\n        requests={\"node_num\": 1},\n        limits={\"node_num\": 1},\n        transfer_from=[{\"resource_group\": _PENDING_NODES_RESOURCE_GROUP}],\n        transfer_to=[{\"resource_group\": _PENDING_NODES_RESOURCE_GROUP}],\n    ),\n})\nscale_to(5)\n# rg1 has 3 nodes, rg2 has 1 node, __default_resource_group has 1 node.\n","# scale rg1 from 3 nodes into 2 nodes\nutility.update_resource_groups({\n    \"rg1\": ResourceGroupConfig(\n        requests={\"node_num\": 2},\n        limits={\"node_num\": 2},\n        transfer_from=[{\"resource_group\": _PENDING_NODES_RESOURCE_GROUP}],\n        transfer_to=[{\"resource_group\": _PENDING_NODES_RESOURCE_GROUP}],\n    ),\n})\n\n# rg1 has 2 nodes, rg2 has 1 node, __default_resource_group has 1 node, __pending_nodes has 1 node.\nscale_to(4)\n# scale the node in __pending_nodes\n"],"headingContent":"","anchorList":[{"label":"Verwalten von Ressourcengruppen","href":"Manage-Resource-Groups","type":1,"isActive":false},{"label":"Was ist eine Ressourcengruppe?","href":"What-is-a-resource-group","type":2,"isActive":false},{"label":"Konzepte der Ressourcengruppe","href":"Concepts-of-resource-group","type":2,"isActive":false},{"label":"Verwendung der deklarativen API zur Verwaltung der Ressourcengruppe","href":"Use-declarative-api-to-manage-resource-group","type":2,"isActive":false},{"label":"Eine gute Praxis zur Verwaltung der Clusterskalierung","href":"A-good-practice-to-manage-cluster-scaling","type":2,"isActive":false},{"label":"Wie Ressourcengruppen mit mehreren Replikaten interagieren","href":"How-resource-groups-interacts-with-multiple-replicas","type":2,"isActive":false},{"label":"Wie geht es weiter?","href":"Whats-next","type":1,"isActive":false}]}
    \ No newline at end of file
    +{"codeList":["{\n    \"requests\": { \"nodeNum\": 1 },\n    \"limits\": { \"nodeNum\": 1 },\n    \"transfer_from\": [{ \"resource_group\": \"rg1\" }],\n    \"transfer_to\": [{ \"resource_group\": \"rg2\" }]\n}\n","import pymilvus\n\n# A resource group name should be a string of 1 to 255 characters, starting with a letter or an underscore (_) and containing only numbers, letters, and underscores (_).\nname = \"rg\"\nnode_num = 0\n\n# create a resource group that exactly hold no query node.\ntry:\n    utility.create_resource_group(name, config=utility.ResourceGroupConfig(\n        requests={\"node_num\": node_num},\n        limits={\"node_num\": node_num},\n    ), using='default')\n    print(f\"Succeeded in creating resource group {name}.\")\nexcept Exception:\n    print(\"Failed to create the resource group.\")\n","rgs = utility.list_resource_groups(using='default')\nprint(f\"Resource group list: {rgs}\")\n\n# Resource group list: ['__default_resource_group', 'rg']\n","info = utility.describe_resource_group(name, using=\"default\")\nprint(f\"Resource group description: {info}\")\n\n# Resource group description: \n#        ,           // string, rg name\n#        ,            // int, num_node which has been transfer to this rg\n#        ,  // int, available node_num, some node may shutdown\n#        , // map[string]int, from collection_name to loaded replica of each collecion in this rg\n#        ,  // map[string]int, from collection_name to outgoging accessed node num by replica loaded in this rg \n#        .  // map[string]int, from collection_name to incoming accessed node num by replica loaded in other rg\n","source = '__default_resource_group'\ntarget = 'rg'\nexpected_num_nodes_in_default = 0\nexpected_num_nodes_in_rg = 1\n\ntry:\n    utility.update_resource_groups({\n        source: ResourceGroupConfig(\n            requests={\"node_num\": expected_num_nodes_in_default},\n            limits={\"node_num\": expected_num_nodes_in_default},\n        ),\n        target: ResourceGroupConfig(\n            requests={\"node_num\": expected_num_nodes_in_rg},\n            limits={\"node_num\": expected_num_nodes_in_rg},\n        )\n    }, using=\"default\")\n    print(f\"Succeeded in move 1 node(s) from {source} to {target}.\")\nexcept Exception:\n    print(\"Something went wrong while moving nodes.\")\n\n# After a while, succeeded in moving 1 node(s) from __default_resource_group to rg.\n","from pymilvus import Collection\n\ncollection = Collection('demo')\n\n# Milvus loads the collection to the default resource group.\ncollection.load(replica_number=2)\n\n# Or, you can ask Milvus load the collection to the desired resource group.\n# make sure that query nodes num should be greater or equal to replica_number\nresource_groups = ['rg']\ncollection.load(replica_number=2, _resource_groups=resource_groups) \n","collection = Collection(\"Books\")\n\n# Use the load method of a collection to load one of its partition\ncollection.load([\"Novels\"], replica_number=2, _resource_groups=resource_groups)\n\n# Or, you can use the load method of a partition directly\npartition = Partition(collection, \"Novels\")\npartition.load(replica_number=2, _resource_groups=resource_groups)\n","source = '__default_resource_group'\ntarget = 'rg'\ncollection_name = 'c'\nnum_replicas = 1\n\ntry:\n    utility.transfer_replica(source, target, collection_name, num_replicas, using=\"default\")\n    print(f\"Succeeded in moving {num_node} replica(s) of {collection_name} from {source} to {target}.\")\nexcept Exception:\n    print(\"Something went wrong while moving replicas.\")\n\n# Succeeded in moving 1 replica(s) of c from __default_resource_group to rg.\n","try:\n    utility.update_resource_groups({\n        \"rg\": utility.ResourceGroupConfig(\n            requests={\"node_num\": 0},\n            limits={\"node_num\": 0},\n        ),\n    }, using=\"default\")\n    utility.drop_resource_group(\"rg\", using=\"default\")\n    print(f\"Succeeded in dropping {source}.\")\nexcept Exception:\n    print(f\"Something went wrong while dropping {source}.\")\n","from pymilvus import utility\nfrom pymilvus.client.types import ResourceGroupConfig\n\n_PENDING_NODES_RESOURCE_GROUP=\"__pending_nodes\"\n\ndef init_cluster(node_num: int):\n    print(f\"Init cluster with {node_num} nodes, all nodes will be put in default resource group\")\n    # create a pending resource group, which can used to hold the pending nodes that do not hold any data.\n    utility.create_resource_group(name=_PENDING_NODES_RESOURCE_GROUP, config=ResourceGroupConfig(\n        requests={\"node_num\": 0}, # this resource group can hold 0 nodes, no data will be load on it.\n        limits={\"node_num\": 10000}, # this resource group can hold at most 10000 nodes \n    ))\n\n    # update default resource group, which can used to hold the nodes that all initial node in it.\n    utility.update_resource_groups({\n        \"__default_resource_group\": ResourceGroupConfig(\n            requests={\"node_num\": node_num},\n            limits={\"node_num\": node_num},\n            transfer_from=[{\"resource_group\": _PENDING_NODES_RESOURCE_GROUP}], # recover missing node from pending resource group at high priority.\n            transfer_to=[{\"resource_group\": _PENDING_NODES_RESOURCE_GROUP}], # recover redundant node to pending resource group at low priority.\n        )})\n    utility.create_resource_group(name=\"rg1\", config=ResourceGroupConfig(\n        requests={\"node_num\": 0},\n        limits={\"node_num\": 0},\n        transfer_from=[{\"resource_group\": _PENDING_NODES_RESOURCE_GROUP}], \n        transfer_to=[{\"resource_group\": _PENDING_NODES_RESOURCE_GROUP}],\n    ))\n    utility.create_resource_group(name=\"rg2\", config=ResourceGroupConfig(\n        requests={\"node_num\": 0},\n        limits={\"node_num\": 0},\n        transfer_from=[{\"resource_group\": _PENDING_NODES_RESOURCE_GROUP}], \n        transfer_to=[{\"resource_group\": _PENDING_NODES_RESOURCE_GROUP}],\n    ))\n\ninit_cluster(1)\n","\ndef scale_to(node_num: int):\n    # scale the querynode number in Milvus into node_num.\n    pass\n","# scale rg1 into 3 nodes, rg2 into 1 nodes\nutility.update_resource_groups({\n    \"rg1\": ResourceGroupConfig(\n        requests={\"node_num\": 3},\n        limits={\"node_num\": 3},\n        transfer_from=[{\"resource_group\": _PENDING_NODES_RESOURCE_GROUP}],\n        transfer_to=[{\"resource_group\": _PENDING_NODES_RESOURCE_GROUP}],\n    ),\n    \"rg2\": ResourceGroupConfig(\n        requests={\"node_num\": 1},\n        limits={\"node_num\": 1},\n        transfer_from=[{\"resource_group\": _PENDING_NODES_RESOURCE_GROUP}],\n        transfer_to=[{\"resource_group\": _PENDING_NODES_RESOURCE_GROUP}],\n    ),\n})\nscale_to(5)\n# rg1 has 3 nodes, rg2 has 1 node, __default_resource_group has 1 node.\n","# scale rg1 from 3 nodes into 2 nodes\nutility.update_resource_groups({\n    \"rg1\": ResourceGroupConfig(\n        requests={\"node_num\": 2},\n        limits={\"node_num\": 2},\n        transfer_from=[{\"resource_group\": _PENDING_NODES_RESOURCE_GROUP}],\n        transfer_to=[{\"resource_group\": _PENDING_NODES_RESOURCE_GROUP}],\n    ),\n})\n\n# rg1 has 2 nodes, rg2 has 1 node, __default_resource_group has 1 node, __pending_nodes has 1 node.\nscale_to(4)\n# scale the node in __pending_nodes\n"],"headingContent":"Manage Resource Groups","anchorList":[{"label":"Verwalten von Ressourcengruppen","href":"Manage-Resource-Groups","type":1,"isActive":false},{"label":"Was ist eine Ressourcengruppe?","href":"What-is-a-resource-group","type":2,"isActive":false},{"label":"Konzepte der Ressourcengruppe","href":"Concepts-of-resource-group","type":2,"isActive":false},{"label":"Verwendung der deklarativen API zur Verwaltung der Ressourcengruppe","href":"Use-declarative-api-to-manage-resource-group","type":2,"isActive":false},{"label":"Eine gute Praxis zur Verwaltung der Clusterskalierung","href":"A-good-practice-to-manage-cluster-scaling","type":2,"isActive":false},{"label":"Wie Ressourcengruppen mit mehreren Replikaten interagieren","href":"How-resource-groups-interacts-with-multiple-replicas","type":2,"isActive":false},{"label":"Wie geht es weiter?","href":"Whats-next","type":1,"isActive":false}]}
    \ No newline at end of file
    diff --git a/localization/v2.4.x/site/de/adminGuide/resource_group.md b/localization/v2.4.x/site/de/adminGuide/resource_group.md
    index 93ba1f223..80d795a93 100644
    --- a/localization/v2.4.x/site/de/adminGuide/resource_group.md
    +++ b/localization/v2.4.x/site/de/adminGuide/resource_group.md
    @@ -62,7 +62,7 @@ title: Verwalten von Ressourcengruppen
     }
     
      -
    • Das requests-Attribut legt die Bedingungen fest, die eine Ressourcengruppe erfüllen muss.
    • +
    • Das Attribut requests gibt die Bedingungen an, die eine Ressourcengruppe erfüllen muss.
    • Das limits-Attribut legt die Höchstgrenzen für eine Ressourcengruppe fest.
    • Die Attribute transfer_from und transfer_to beschreiben, von welchen Ressourcengruppen eine Ressourcengruppe vorzugsweise Ressourcen beziehen bzw. an welche Ressourcengruppen sie Ressourcen übertragen soll.
    @@ -90,7 +90,7 @@ title: Verwalten von Ressourcengruppen >
    -

    Alle Codebeispiele auf dieser Seite sind in PyMilvus 2.4.5. Aktualisieren Sie Ihre PyMilvus-Installation, bevor Sie sie ausführen.

    +

    Alle Codebeispiele auf dieser Seite sind in PyMilvus 2.4.8. Aktualisieren Sie Ihre PyMilvus-Installation, bevor Sie sie ausführen.

    1. Erstellen Sie eine Ressourcengruppe.

      diff --git a/localization/v2.4.x/site/de/adminGuide/tls.json b/localization/v2.4.x/site/de/adminGuide/tls.json index 57b4fa02f..5ab4dc0f5 100644 --- a/localization/v2.4.x/site/de/adminGuide/tls.json +++ b/localization/v2.4.x/site/de/adminGuide/tls.json @@ -1 +1 @@ -{"codeList":["openssl version\n","sudo apt install openssl\n","mkdir cert && cd cert\ntouch openssl.cnf gen.sh\n","#\n# OpenSSL example configuration file.\n# This is mostly being used for generation of certificate requests.\n#\n\n# This definition stops the following lines choking if HOME isn't\n# defined.\nHOME = .\nRANDFILE = $ENV::HOME/.rnd\n\n# Extra OBJECT IDENTIFIER info:\n#oid_file = $ENV::HOME/.oid\noid_section = new_oids\n\n# To use this configuration file with the \"-extfile\" option of the\n# \"openssl x509\" utility, name here the section containing the\n# X.509v3 extensions to use:\n# extensions = \n# (Alternatively, use a configuration file that has only\n# X.509v3 extensions in its main [= default] section.)\n\n[ new_oids ]\n\n# We can add new OIDs in here for use by 'ca', 'req' and 'ts'.\n# Add a simple OID like this:\n# testoid1=1.2.3.4\n# Or use config file substitution like this:\n# testoid2=${testoid1}.5.6\n\n# Policies used by the TSA examples.\ntsa_policy1 = 1.2.3.4.1\ntsa_policy2 = 1.2.3.4.5.6\ntsa_policy3 = 1.2.3.4.5.7\n\n####################################################################\n[ ca ]\ndefault_ca = CA_default # The default ca section\n\n####################################################################\n[ CA_default ]\n\ndir = ./demoCA # Where everything is kept\ncerts = $dir/certs # Where the issued certs are kept\ncrl_dir = $dir/crl # Where the issued crl are kept\ndatabase = $dir/index.txt # database index file.\n#unique_subject = no # Set to 'no' to allow creation of\n # several ctificates with same subject.\nnew_certs_dir = $dir/newcerts # default place for new certs.\n\ncertificate = $dir/cacert.pem # The CA certificate\nserial = $dir/serial # The current serial number\ncrlnumber = $dir/crlnumber # the current crl number\n # must be commented out to leave a V1 CRL\ncrl = $dir/crl.pem # The current CRL\nprivate_key = $dir/private/cakey.pem# The private key\nRANDFILE = $dir/private/.rand # private random number file\n\nx509_extensions = usr_cert # The extentions to add to the cert\n\n# Comment out the following two lines for the \"traditional\"\n# (and highly broken) format.\nname_opt = ca_default # Subject Name options\ncert_opt = ca_default # Certificate field options\n\n# Extension copying option: use with caution.\ncopy_extensions = copy\n\n# Extensions to add to a CRL. Note: Netscape communicator chokes on V2 CRLs\n# so this is commented out by default to leave a V1 CRL.\n# crlnumber must also be commented out to leave a V1 CRL.\n# crl_extensions = crl_ext\n\ndefault_days = 365 # how long to certify for\ndefault_crl_days= 30 # how long before next CRL\ndefault_md = default # use public key default MD\npreserve = no # keep passed DN ordering\n\n# A few difference way of specifying how similar the request should look\n# For type CA, the listed attributes must be the same, and the optional\n# and supplied fields are just that :-)\npolicy = policy_match\n\n# For the CA policy\n[ policy_match ]\ncountryName = match\nstateOrProvinceName = match\norganizationName = match\norganizationalUnitName = optional\ncommonName = supplied\nemailAddress = optional\n\n# For the 'anything' policy\n# At this point in time, you must list all acceptable 'object'\n# types.\n[ policy_anything ]\ncountryName = optional\nstateOrProvinceName = optional\nlocalityName = optional\norganizationName = optional\norganizationalUnitName = optional\ncommonName = supplied\nemailAddress = optional\n\n####################################################################\n[ req ]\ndefault_bits = 2048\ndefault_keyfile = privkey.pem\ndistinguished_name = req_distinguished_name\nattributes = req_attributes\nx509_extensions = v3_ca # The extentions to add to the self signed cert\n\n# Passwords for private keys if not present they will be prompted for\n# input_password = secret\n# output_password = secret\n\n# This sets a mask for permitted string types. There are several options. \n# default: PrintableString, T61String, BMPString.\n# pkix : PrintableString, BMPString (PKIX recommendation before 2004)\n# utf8only: only UTF8Strings (PKIX recommendation after 2004).\n# nombstr : PrintableString, T61String (no BMPStrings or UTF8Strings).\n# MASK:XXXX a literal mask value.\n# WARNING: ancient versions of Netscape crash on BMPStrings or UTF8Strings.\nstring_mask = utf8only\n\nreq_extensions = v3_req # The extensions to add to a certificate request\n\n[ req_distinguished_name ]\ncountryName = Country Name (2 letter code)\ncountryName_default = AU\ncountryName_min = 2\ncountryName_max = 2\n\nstateOrProvinceName = State or Province Name (full name)\nstateOrProvinceName_default = Some-State\n\nlocalityName = Locality Name (eg, city)\n\n0.organizationName = Organization Name (eg, company)\n0.organizationName_default = Internet Widgits Pty Ltd\n\n# we can do this but it is not needed normally :-)\n#1.organizationName = Second Organization Name (eg, company)\n#1.organizationName_default = World Wide Web Pty Ltd\n\norganizationalUnitName = Organizational Unit Name (eg, section)\n#organizationalUnitName_default =\n\ncommonName = Common Name (e.g. server FQDN or YOUR name)\ncommonName_max = 64\n\nemailAddress = Email Address\nemailAddress_max = 64\n\n# SET-ex3 = SET extension number 3\n\n[ req_attributes ]\nchallengePassword = A challenge password\nchallengePassword_min = 4\nchallengePassword_max = 20\n\nunstructuredName = An optional company name\n\n[ usr_cert ]\n\n# These extensions are added when 'ca' signs a request.\n\n# This goes against PKIX guidelines but some CAs do it and some software\n# requires this to avoid interpreting an end user certificate as a CA.\n\nbasicConstraints=CA:FALSE\n\n# Here are some examples of the usage of nsCertType. If it is omitted\n# the certificate can be used for anything *except* object signing.\n\n# This is OK for an SSL server.\n# nsCertType = server\n\n# For an object signing certificate this would be used.\n# nsCertType = objsign\n\n# For normal client use this is typical\n# nsCertType = client, email\n\n# and for everything including object signing:\n# nsCertType = client, email, objsign\n\n# This is typical in keyUsage for a client certificate.\n# keyUsage = nonRepudiation, digitalSignature, keyEncipherment\n\n# This will be displayed in Netscape's comment listbox.\nnsComment = \"OpenSSL Generated Certificate\"\n\n# PKIX recommendations harmless if included in all certificates.\nsubjectKeyIdentifier=hash\nauthorityKeyIdentifier=keyid,issuer\n\n# This stuff is for subjectAltName and issuerAltname.\n# Import the email address.\n# subjectAltName=email:copy\n# An alternative to produce certificates that aren't\n# deprecated according to PKIX.\n# subjectAltName=email:move\n\n# Copy subject details\n# issuerAltName=issuer:copy\n\n#nsCaRevocationUrl = http://www.domain.dom/ca-crl.pem\n#nsBaseUrl\n#nsRevocationUrl\n#nsRenewalUrl\n#nsCaPolicyUrl\n#nsSslServerName\n\n# This is required for TSA certificates.\n# extendedKeyUsage = critical,timeStamping\n\n[ v3_req ]\n\n# Extensions to add to a certificate request\n\nbasicConstraints = CA:FALSE\nkeyUsage = nonRepudiation, digitalSignature, keyEncipherment\n\n\n[ v3_ca ]\n\n\n# Extensions for a typical CA\n\n\n# PKIX recommendation.\n\nsubjectKeyIdentifier=hash\n\nauthorityKeyIdentifier=keyid:always,issuer\n\n# This is what PKIX recommends but some broken software chokes on critical\n# extensions.\n#basicConstraints = critical,CA:true\n# So we do this instead.\nbasicConstraints = CA:true\n\n# Key usage: this is typical for a CA certificate. However since it will\n# prevent it being used as an test self-signed certificate it is best\n# left out by default.\n# keyUsage = cRLSign, keyCertSign\n\n# Some might want this also\n# nsCertType = sslCA, emailCA\n\n# Include email address in subject alt name: another PKIX recommendation\n# subjectAltName=email:copy\n# Copy issuer details\n# issuerAltName=issuer:copy\n\n# DER hex encoding of an extension: beware experts only!\n# obj=DER:02:03\n# Where 'obj' is a standard or added object\n# You can even override a supported extension:\n# basicConstraints= critical, DER:30:03:01:01:FF\n\n[ crl_ext ]\n\n# CRL extensions.\n# Only issuerAltName and authorityKeyIdentifier make any sense in a CRL.\n\n# issuerAltName=issuer:copy\nauthorityKeyIdentifier=keyid:always\n\n[ proxy_cert_ext ]\n# These extensions should be added when creating a proxy certificate\n\n# This goes against PKIX guidelines but some CAs do it and some software\n# requires this to avoid interpreting an end user certificate as a CA.\n\nbasicConstraints=CA:FALSE\n\n# Here are some examples of the usage of nsCertType. If it is omitted\n# the certificate can be used for anything *except* object signing.\n\n# This is OK for an SSL server.\n# nsCertType = server\n\n# For an object signing certificate this would be used.\n# nsCertType = objsign\n\n# For normal client use this is typical\n# nsCertType = client, email\n\n# and for everything including object signing:\n# nsCertType = client, email, objsign\n\n# This is typical in keyUsage for a client certificate.\n# keyUsage = nonRepudiation, digitalSignature, keyEncipherment\n\n# This will be displayed in Netscape's comment listbox.\nnsComment = \"OpenSSL Generated Certificate\"\n\n# PKIX recommendations harmless if included in all certificates.\nsubjectKeyIdentifier=hash\nauthorityKeyIdentifier=keyid,issuer\n\n# This stuff is for subjectAltName and issuerAltname.\n# Import the email address.\n# subjectAltName=email:copy\n# An alternative to produce certificates that aren't\n# deprecated according to PKIX.\n# subjectAltName=email:move\n\n# Copy subject details\n# issuerAltName=issuer:copy\n\n#nsCaRevocationUrl = http://www.domain.dom/ca-crl.pem\n#nsBaseUrl\n#nsRevocationUrl\n#nsRenewalUrl\n#nsCaPolicyUrl\n#nsSslServerName\n\n# This really needs to be in place for it to be a proxy certificate.\nproxyCertInfo=critical,language:id-ppl-anyLanguage,pathlen:3,policy:foo\n\n####################################################################\n[ tsa ]\n\ndefault_tsa = tsa_config1 # the default TSA section\n\n[ tsa_config1 ]\n\n# These are used by the TSA reply generation only.\ndir = ./demoCA # TSA root directory\nserial = $dir/tsaserial # The current serial number (mandatory)\ncrypto_device = builtin # OpenSSL engine to use for signing\nsigner_cert = $dir/tsacert.pem # The TSA signing certificate\n # (optional)\ncerts = $dir/cacert.pem # Certificate chain to include in reply\n # (optional)\nsigner_key = $dir/private/tsakey.pem # The TSA private key (optional)\n\ndefault_policy = tsa_policy1 # Policy if request did not specify it\n # (optional)\nother_policies = tsa_policy2, tsa_policy3 # acceptable policies (optional)\ndigests = md5, sha1 # Acceptable message digests (mandatory)\naccuracy = secs:1, millisecs:500, microsecs:100 # (optional)\nclock_precision_digits = 0 # number of digits after dot. (optional)\nordering = yes # Is ordering defined for timestamps?\n # (optional, default: no)\ntsa_name = yes # Must the TSA name be included in the reply?\n # (optional, default: no)\ness_cert_id_chain = no # Must the ESS cert id chain be included?\n # (optional, default: no)\n","#!/usr/bin/env sh\n# your variables\nCountry=\"CN\"\nState=\"Shanghai\"\nLocation=\"Shanghai\"\nOrganization=\"milvus\"\nOrganizational=\"milvus\"\nCommonName=\"localhost\"\n\necho \"generate ca.key\"\nopenssl genrsa -out ca.key 2048\n\necho \"generate ca.pem\"\nopenssl req -new -x509 -key ca.key -out ca.pem -days 3650 -subj \"/C=$Country/ST=$State/L=$Location/O=$Organization/OU=$Organizational/CN=$CommonName\"\n\necho \"generate server SAN certificate\"\nopenssl genpkey -algorithm RSA -out server.key\nopenssl req -new -nodes -key server.key -out server.csr -days 3650 -subj \"/C=$Country/O=$Organization/OU=$Organizational/CN=$CommonName\" -config ./openssl.cnf -extensions v3_req\nopenssl x509 -req -days 3650 -in server.csr -out server.pem -CA ca.pem -CAkey ca.key -CAcreateserial -extfile ./openssl.cnf -extensions v3_req\n\necho \"generate client SAN certificate\"\nopenssl genpkey -algorithm RSA -out client.key\nopenssl req -new -nodes -key client.key -out client.csr -days 3650 -subj \"/C=$Country/O=$Organization/OU=$Organizational/CN=$CommonName\" -config ./openssl.cnf -extensions v3_req\nopenssl x509 -req -days 3650 -in client.csr -out client.pem -CA ca.pem -CAkey ca.key -CAcreateserial -extfile ./openssl.cnf -extensions v3_req\n\n","chmod +x gen.sh\n./gen.sh\n","openssl genpkey -algorithm RSA -out ca.key\n","openssl req -new -x509 -key ca.key -out ca.pem -days 3650 -subj \"/C=$Country/ST=$State/L=$Location/O=$Organization/OU=$Organizational/CN=$CommonName\"\n","openssl genpkey -algorithm RSA -out server.key\n","openssl req -new -nodes -key server.key -out server.csr -days 3650 -subj \"/C=$Country/O=$Organization/OU=$Organizational/CN=$CommonName\" -config ./openssl.cnf -extensions v3_req\n","openssl x509 -req -days 3650 -in server.csr -out server.pem -CA ca.pem -CAkey ca.key -CAcreateserial -extfile ./openssl.cnf -extensions v3_req\n","tls:\n serverPemPath: /milvus/tls/server.pem\n serverKeyPath: /milvus/tls/server.key\n caPemPath: /milvus/tls/ca.pem\n\ncommon:\n security:\n tlsMode: 1\n","├── docker-compose.yml\n├── milvus.yaml\n└── tls\n ├── server.pem\n ├── server.key\n └── ca.pem\n"," standalone:\n container_name: milvus-standalone\n image: milvusdb/milvus:latest\n command: [\"milvus\", \"run\", \"standalone\"]\n security_opt:\n - seccomp:unconfined\n environment:\n ETCD_ENDPOINTS: etcd:2379\n MINIO_ADDRESS: minio:9000\n volumes:\n - ${DOCKER_VOLUME_DIRECTORY:-.}/volumes/milvus:/var/lib/milvus\n - ${DOCKER_VOLUME_DIRECTORY:-.}/tls:/milvus/tls\n - ${DOCKER_VOLUME_DIRECTORY:-.}/milvus.yaml:/milvus/configs/milvus.yaml\n","sudo docker compose up -d\n","from pymilvus import MilvusClient\n\nclient = MilvusClient(\n uri=\"http://localhost:19530\",\n secure=True,\n server_pem_path=\"path_to/server.pem\",\n server_name=\"localhost\"\n)\n","from pymilvus import MilvusClient\n\nclient = MilvusClient(\n uri=\"http://localhost:19530\",\n secure=True,\n client_pem_path=\"path_to/client.pem\",\n client_key_path=\"path_to/client.key\",\n ca_pem_path=\"path_to/ca.pem\",\n server_name=\"localhost\"\n)\n"],"headingContent":"","anchorList":[{"label":"Verschlüsselung bei der Übermittlung","href":"Encryption-in-Transit","type":1,"isActive":false},{"label":"Erstellen Sie Ihr eigenes Zertifikat","href":"Create-your-own-certificate","type":2,"isActive":false},{"label":"Einrichten eines Milvus-Servers mit TLS","href":"Set-up-a-Milvus-server-with-TLS","type":2,"isActive":false},{"label":"Verbinden Sie sich mit dem Milvus-Server mit TLS","href":"Connect-to-the-Milvus-server-with-TLS","type":2,"isActive":false}]} \ No newline at end of file +{"codeList":["openssl version\n","sudo apt install openssl\n","mkdir cert && cd cert\ntouch openssl.cnf gen.sh\n","#\n# OpenSSL example configuration file.\n# This is mostly being used for generation of certificate requests.\n#\n\n# This definition stops the following lines choking if HOME isn't\n# defined.\nHOME = .\nRANDFILE = $ENV::HOME/.rnd\n\n# Extra OBJECT IDENTIFIER info:\n#oid_file = $ENV::HOME/.oid\noid_section = new_oids\n\n# To use this configuration file with the \"-extfile\" option of the\n# \"openssl x509\" utility, name here the section containing the\n# X.509v3 extensions to use:\n# extensions = \n# (Alternatively, use a configuration file that has only\n# X.509v3 extensions in its main [= default] section.)\n\n[ new_oids ]\n\n# We can add new OIDs in here for use by 'ca', 'req' and 'ts'.\n# Add a simple OID like this:\n# testoid1=1.2.3.4\n# Or use config file substitution like this:\n# testoid2=${testoid1}.5.6\n\n# Policies used by the TSA examples.\ntsa_policy1 = 1.2.3.4.1\ntsa_policy2 = 1.2.3.4.5.6\ntsa_policy3 = 1.2.3.4.5.7\n\n####################################################################\n[ ca ]\ndefault_ca = CA_default # The default ca section\n\n####################################################################\n[ CA_default ]\n\ndir = ./demoCA # Where everything is kept\ncerts = $dir/certs # Where the issued certs are kept\ncrl_dir = $dir/crl # Where the issued crl are kept\ndatabase = $dir/index.txt # database index file.\n#unique_subject = no # Set to 'no' to allow creation of\n # several ctificates with same subject.\nnew_certs_dir = $dir/newcerts # default place for new certs.\n\ncertificate = $dir/cacert.pem # The CA certificate\nserial = $dir/serial # The current serial number\ncrlnumber = $dir/crlnumber # the current crl number\n # must be commented out to leave a V1 CRL\ncrl = $dir/crl.pem # The current CRL\nprivate_key = $dir/private/cakey.pem# The private key\nRANDFILE = $dir/private/.rand # private random number file\n\nx509_extensions = usr_cert # The extentions to add to the cert\n\n# Comment out the following two lines for the \"traditional\"\n# (and highly broken) format.\nname_opt = ca_default # Subject Name options\ncert_opt = ca_default # Certificate field options\n\n# Extension copying option: use with caution.\ncopy_extensions = copy\n\n# Extensions to add to a CRL. Note: Netscape communicator chokes on V2 CRLs\n# so this is commented out by default to leave a V1 CRL.\n# crlnumber must also be commented out to leave a V1 CRL.\n# crl_extensions = crl_ext\n\ndefault_days = 365 # how long to certify for\ndefault_crl_days= 30 # how long before next CRL\ndefault_md = default # use public key default MD\npreserve = no # keep passed DN ordering\n\n# A few difference way of specifying how similar the request should look\n# For type CA, the listed attributes must be the same, and the optional\n# and supplied fields are just that :-)\npolicy = policy_match\n\n# For the CA policy\n[ policy_match ]\ncountryName = match\nstateOrProvinceName = match\norganizationName = match\norganizationalUnitName = optional\ncommonName = supplied\nemailAddress = optional\n\n# For the 'anything' policy\n# At this point in time, you must list all acceptable 'object'\n# types.\n[ policy_anything ]\ncountryName = optional\nstateOrProvinceName = optional\nlocalityName = optional\norganizationName = optional\norganizationalUnitName = optional\ncommonName = supplied\nemailAddress = optional\n\n####################################################################\n[ req ]\ndefault_bits = 2048\ndefault_keyfile = privkey.pem\ndistinguished_name = req_distinguished_name\nattributes = req_attributes\nx509_extensions = v3_ca # The extentions to add to the self signed cert\n\n# Passwords for private keys if not present they will be prompted for\n# input_password = secret\n# output_password = secret\n\n# This sets a mask for permitted string types. There are several options. \n# default: PrintableString, T61String, BMPString.\n# pkix : PrintableString, BMPString (PKIX recommendation before 2004)\n# utf8only: only UTF8Strings (PKIX recommendation after 2004).\n# nombstr : PrintableString, T61String (no BMPStrings or UTF8Strings).\n# MASK:XXXX a literal mask value.\n# WARNING: ancient versions of Netscape crash on BMPStrings or UTF8Strings.\nstring_mask = utf8only\n\nreq_extensions = v3_req # The extensions to add to a certificate request\n\n[ req_distinguished_name ]\ncountryName = Country Name (2 letter code)\ncountryName_default = AU\ncountryName_min = 2\ncountryName_max = 2\n\nstateOrProvinceName = State or Province Name (full name)\nstateOrProvinceName_default = Some-State\n\nlocalityName = Locality Name (eg, city)\n\n0.organizationName = Organization Name (eg, company)\n0.organizationName_default = Internet Widgits Pty Ltd\n\n# we can do this but it is not needed normally :-)\n#1.organizationName = Second Organization Name (eg, company)\n#1.organizationName_default = World Wide Web Pty Ltd\n\norganizationalUnitName = Organizational Unit Name (eg, section)\n#organizationalUnitName_default =\n\ncommonName = Common Name (e.g. server FQDN or YOUR name)\ncommonName_max = 64\n\nemailAddress = Email Address\nemailAddress_max = 64\n\n# SET-ex3 = SET extension number 3\n\n[ req_attributes ]\nchallengePassword = A challenge password\nchallengePassword_min = 4\nchallengePassword_max = 20\n\nunstructuredName = An optional company name\n\n[ usr_cert ]\n\n# These extensions are added when 'ca' signs a request.\n\n# This goes against PKIX guidelines but some CAs do it and some software\n# requires this to avoid interpreting an end user certificate as a CA.\n\nbasicConstraints=CA:FALSE\n\n# Here are some examples of the usage of nsCertType. If it is omitted\n# the certificate can be used for anything *except* object signing.\n\n# This is OK for an SSL server.\n# nsCertType = server\n\n# For an object signing certificate this would be used.\n# nsCertType = objsign\n\n# For normal client use this is typical\n# nsCertType = client, email\n\n# and for everything including object signing:\n# nsCertType = client, email, objsign\n\n# This is typical in keyUsage for a client certificate.\n# keyUsage = nonRepudiation, digitalSignature, keyEncipherment\n\n# This will be displayed in Netscape's comment listbox.\nnsComment = \"OpenSSL Generated Certificate\"\n\n# PKIX recommendations harmless if included in all certificates.\nsubjectKeyIdentifier=hash\nauthorityKeyIdentifier=keyid,issuer\n\n# This stuff is for subjectAltName and issuerAltname.\n# Import the email address.\n# subjectAltName=email:copy\n# An alternative to produce certificates that aren't\n# deprecated according to PKIX.\n# subjectAltName=email:move\n\n# Copy subject details\n# issuerAltName=issuer:copy\n\n#nsCaRevocationUrl = http://www.domain.dom/ca-crl.pem\n#nsBaseUrl\n#nsRevocationUrl\n#nsRenewalUrl\n#nsCaPolicyUrl\n#nsSslServerName\n\n# This is required for TSA certificates.\n# extendedKeyUsage = critical,timeStamping\n\n[ v3_req ]\n\n# Extensions to add to a certificate request\n\nbasicConstraints = CA:FALSE\nkeyUsage = nonRepudiation, digitalSignature, keyEncipherment\n\n\n[ v3_ca ]\n\n\n# Extensions for a typical CA\n\n\n# PKIX recommendation.\n\nsubjectKeyIdentifier=hash\n\nauthorityKeyIdentifier=keyid:always,issuer\n\n# This is what PKIX recommends but some broken software chokes on critical\n# extensions.\n#basicConstraints = critical,CA:true\n# So we do this instead.\nbasicConstraints = CA:true\n\n# Key usage: this is typical for a CA certificate. However since it will\n# prevent it being used as an test self-signed certificate it is best\n# left out by default.\n# keyUsage = cRLSign, keyCertSign\n\n# Some might want this also\n# nsCertType = sslCA, emailCA\n\n# Include email address in subject alt name: another PKIX recommendation\n# subjectAltName=email:copy\n# Copy issuer details\n# issuerAltName=issuer:copy\n\n# DER hex encoding of an extension: beware experts only!\n# obj=DER:02:03\n# Where 'obj' is a standard or added object\n# You can even override a supported extension:\n# basicConstraints= critical, DER:30:03:01:01:FF\n\n[ crl_ext ]\n\n# CRL extensions.\n# Only issuerAltName and authorityKeyIdentifier make any sense in a CRL.\n\n# issuerAltName=issuer:copy\nauthorityKeyIdentifier=keyid:always\n\n[ proxy_cert_ext ]\n# These extensions should be added when creating a proxy certificate\n\n# This goes against PKIX guidelines but some CAs do it and some software\n# requires this to avoid interpreting an end user certificate as a CA.\n\nbasicConstraints=CA:FALSE\n\n# Here are some examples of the usage of nsCertType. If it is omitted\n# the certificate can be used for anything *except* object signing.\n\n# This is OK for an SSL server.\n# nsCertType = server\n\n# For an object signing certificate this would be used.\n# nsCertType = objsign\n\n# For normal client use this is typical\n# nsCertType = client, email\n\n# and for everything including object signing:\n# nsCertType = client, email, objsign\n\n# This is typical in keyUsage for a client certificate.\n# keyUsage = nonRepudiation, digitalSignature, keyEncipherment\n\n# This will be displayed in Netscape's comment listbox.\nnsComment = \"OpenSSL Generated Certificate\"\n\n# PKIX recommendations harmless if included in all certificates.\nsubjectKeyIdentifier=hash\nauthorityKeyIdentifier=keyid,issuer\n\n# This stuff is for subjectAltName and issuerAltname.\n# Import the email address.\n# subjectAltName=email:copy\n# An alternative to produce certificates that aren't\n# deprecated according to PKIX.\n# subjectAltName=email:move\n\n# Copy subject details\n# issuerAltName=issuer:copy\n\n#nsCaRevocationUrl = http://www.domain.dom/ca-crl.pem\n#nsBaseUrl\n#nsRevocationUrl\n#nsRenewalUrl\n#nsCaPolicyUrl\n#nsSslServerName\n\n# This really needs to be in place for it to be a proxy certificate.\nproxyCertInfo=critical,language:id-ppl-anyLanguage,pathlen:3,policy:foo\n\n####################################################################\n[ tsa ]\n\ndefault_tsa = tsa_config1 # the default TSA section\n\n[ tsa_config1 ]\n\n# These are used by the TSA reply generation only.\ndir = ./demoCA # TSA root directory\nserial = $dir/tsaserial # The current serial number (mandatory)\ncrypto_device = builtin # OpenSSL engine to use for signing\nsigner_cert = $dir/tsacert.pem # The TSA signing certificate\n # (optional)\ncerts = $dir/cacert.pem # Certificate chain to include in reply\n # (optional)\nsigner_key = $dir/private/tsakey.pem # The TSA private key (optional)\n\ndefault_policy = tsa_policy1 # Policy if request did not specify it\n # (optional)\nother_policies = tsa_policy2, tsa_policy3 # acceptable policies (optional)\ndigests = md5, sha1 # Acceptable message digests (mandatory)\naccuracy = secs:1, millisecs:500, microsecs:100 # (optional)\nclock_precision_digits = 0 # number of digits after dot. (optional)\nordering = yes # Is ordering defined for timestamps?\n # (optional, default: no)\ntsa_name = yes # Must the TSA name be included in the reply?\n # (optional, default: no)\ness_cert_id_chain = no # Must the ESS cert id chain be included?\n # (optional, default: no)\n","#!/usr/bin/env sh\n# your variables\nCountry=\"CN\"\nState=\"Shanghai\"\nLocation=\"Shanghai\"\nOrganization=\"milvus\"\nOrganizational=\"milvus\"\nCommonName=\"localhost\"\n\necho \"generate ca.key\"\nopenssl genrsa -out ca.key 2048\n\necho \"generate ca.pem\"\nopenssl req -new -x509 -key ca.key -out ca.pem -days 3650 -subj \"/C=$Country/ST=$State/L=$Location/O=$Organization/OU=$Organizational/CN=$CommonName\"\n\necho \"generate server SAN certificate\"\nopenssl genpkey -algorithm RSA -out server.key\nopenssl req -new -nodes -key server.key -out server.csr -days 3650 -subj \"/C=$Country/O=$Organization/OU=$Organizational/CN=$CommonName\" -config ./openssl.cnf -extensions v3_req\nopenssl x509 -req -days 3650 -in server.csr -out server.pem -CA ca.pem -CAkey ca.key -CAcreateserial -extfile ./openssl.cnf -extensions v3_req\n\necho \"generate client SAN certificate\"\nopenssl genpkey -algorithm RSA -out client.key\nopenssl req -new -nodes -key client.key -out client.csr -days 3650 -subj \"/C=$Country/O=$Organization/OU=$Organizational/CN=$CommonName\" -config ./openssl.cnf -extensions v3_req\nopenssl x509 -req -days 3650 -in client.csr -out client.pem -CA ca.pem -CAkey ca.key -CAcreateserial -extfile ./openssl.cnf -extensions v3_req\n\n","chmod +x gen.sh\n./gen.sh\n","openssl genpkey -algorithm RSA -out ca.key\n","openssl req -new -x509 -key ca.key -out ca.pem -days 3650 -subj \"/C=$Country/ST=$State/L=$Location/O=$Organization/OU=$Organizational/CN=$CommonName\"\n","openssl genpkey -algorithm RSA -out server.key\n","openssl req -new -nodes -key server.key -out server.csr -days 3650 -subj \"/C=$Country/O=$Organization/OU=$Organizational/CN=$CommonName\" -config ./openssl.cnf -extensions v3_req\n","openssl x509 -req -days 3650 -in server.csr -out server.pem -CA ca.pem -CAkey ca.key -CAcreateserial -extfile ./openssl.cnf -extensions v3_req\n","tls:\n serverPemPath: /milvus/tls/server.pem\n serverKeyPath: /milvus/tls/server.key\n caPemPath: /milvus/tls/ca.pem\n\ncommon:\n security:\n tlsMode: 1\n","├── docker-compose.yml\n├── milvus.yaml\n└── tls\n ├── server.pem\n ├── server.key\n └── ca.pem\n"," standalone:\n container_name: milvus-standalone\n image: milvusdb/milvus:latest\n command: [\"milvus\", \"run\", \"standalone\"]\n security_opt:\n - seccomp:unconfined\n environment:\n ETCD_ENDPOINTS: etcd:2379\n MINIO_ADDRESS: minio:9000\n volumes:\n - ${DOCKER_VOLUME_DIRECTORY:-.}/volumes/milvus:/var/lib/milvus\n - ${DOCKER_VOLUME_DIRECTORY:-.}/tls:/milvus/tls\n - ${DOCKER_VOLUME_DIRECTORY:-.}/milvus.yaml:/milvus/configs/milvus.yaml\n","sudo docker compose up -d\n","from pymilvus import MilvusClient\n\nclient = MilvusClient(\n uri=\"https://localhost:19530\",\n secure=True,\n server_pem_path=\"path_to/server.pem\",\n server_name=\"localhost\"\n)\n","from pymilvus import MilvusClient\n\nclient = MilvusClient(\n uri=\"https://localhost:19530\",\n secure=True,\n client_pem_path=\"path_to/client.pem\",\n client_key_path=\"path_to/client.key\",\n ca_pem_path=\"path_to/ca.pem\",\n server_name=\"localhost\"\n)\n","curl --cacert path_to/ca.pem https://localhost:19530/v2/vectordb/collections/list\n","curl --cert path_to/client.pem --key path_to/client.key --cacert path_to/ca.pem https://localhost:19530/v2/vectordb/collections/list\n"],"headingContent":"Encryption in Transit","anchorList":[{"label":"Verschlüsselung bei der Übermittlung","href":"Encryption-in-Transit","type":1,"isActive":false},{"label":"Erstellen Sie Ihr eigenes Zertifikat","href":"Create-your-own-certificate","type":2,"isActive":false},{"label":"Einrichten eines Milvus-Servers mit TLS","href":"Set-up-a-Milvus-server-with-TLS","type":2,"isActive":false},{"label":"Verbinden Sie sich mit dem Milvus-Server mit TLS","href":"Connect-to-the-Milvus-server-with-TLS","type":2,"isActive":false},{"label":"Verbinden mit dem Milvus RESTful Server mit TLS","href":"Connect-to-the-Milvus-RESTful-server-with-TLS","type":2,"isActive":false}]} \ No newline at end of file diff --git a/localization/v2.4.x/site/de/adminGuide/tls.md b/localization/v2.4.x/site/de/adminGuide/tls.md index 925d58e32..94b0b60f5 100644 --- a/localization/v2.4.x/site/de/adminGuide/tls.md +++ b/localization/v2.4.x/site/de/adminGuide/tls.md @@ -1,6 +1,6 @@ --- id: tls.md -title: Verschlüsselung im Transit +title: Verschlüsselung bei der Übermittlung summary: 'Erfahren Sie, wie Sie den TLS-Proxy in Milvus aktivieren.' ---

      Verschlüsselung bei der Übermittlung

      TLS (Transport Layer Security) ist ein Verschlüsselungsprotokoll zur Gewährleistung der Kommunikationssicherheit. Der Milvus-Proxy verwendet TLS für die einseitige und zweiseitige Authentifizierung.

      -

      Dieses Thema beschreibt, wie man TLS-Proxy in Milvus aktiviert.

      +

      TLS (Transport Layer Security) ist ein Verschlüsselungsprotokoll zur Gewährleistung der Kommunikationssicherheit. Milvus Proxy verwendet TLS für die einseitige und zweiseitige Authentifizierung.

      +

      In diesem Thema wird beschrieben, wie TLS in Milvus-Proxy sowohl für gRPC- als auch für RESTful-Verkehr aktiviert werden kann.

      TLS und Benutzerauthentifizierung sind zwei unterschiedliche Sicherheitsansätze. Wenn Sie sowohl die Benutzerauthentifizierung als auch TLS in Ihrem Milvus-System aktiviert haben, müssen Sie einen Benutzernamen, ein Passwort und Pfade für Zertifikatsdateien angeben. Informationen zur Aktivierung der Benutzerauthentifizierung finden Sie unter Authentifizierung des Benutzerzugangs.

      @@ -474,7 +474,7 @@ openssl x509 -req -days 3650 -in client.csr -o
      1. Signieren Sie das Zertifikat.
      -

      Öffnen Sie die Dateien server.csr, ca.key und ca.pem, um das Zertifikat zu signieren. Die Befehlsoption CAcreateserial wird verwendet, um eine CA-Seriennummerndatei zu erstellen, falls sie nicht existiert. Sie erhalten eine aca.srl Datei, wenn Sie diese Befehlsoption wählen.

      +

      Öffnen Sie die Dateien server.csr, ca.key und ca.pem, um das Zertifikat zu signieren. Die Befehlsoption CAcreateserial wird verwendet, um eine CA-Seriennummerndatei zu erstellen, falls sie nicht existiert. Sie erhalten eine aca.srl Datei, nachdem Sie diese Befehlsoption gewählt haben.

      openssl x509 -req -days 3650 -in server.csr -out server.pem -CA ca.pem -CAkey ca.key -CAcreateserial -extfile ./openssl.cnf -extensions v3_req
       

      Einrichten eines Milvus-Servers mit TLS

Siehe example_tls1.py und example_tls2.py für weitere Informationen.

+

Verbinden mit dem Milvus RESTful Server mit TLS

Für RESTful-APIs können Sie tls mit dem Befehl curl überprüfen.

+

Einseitige TLS-Verbindung

curl --cacert path_to/ca.pem https://localhost:19530/v2/vectordb/collections/list
+
+

Zwei-Wege-TLS-Verbindung

curl --cert path_to/client.pem --key path_to/client.key --cacert path_to/ca.pem https://localhost:19530/v2/vectordb/collections/list
+
diff --git a/localization/v2.4.x/site/de/adminGuide/upgrade_milvus_cluster-docker.json b/localization/v2.4.x/site/de/adminGuide/upgrade_milvus_cluster-docker.json index 57096721b..dc6c3b51e 100644 --- a/localization/v2.4.x/site/de/adminGuide/upgrade_milvus_cluster-docker.json +++ b/localization/v2.4.x/site/de/adminGuide/upgrade_milvus_cluster-docker.json @@ -1 +1 @@ -{"codeList":["...\nrootcoord:\n container_name: milvus-rootcoord\n image: milvusdb/milvus:v2.4.9\n...\nproxy:\n container_name: milvus-proxy\n image: milvusdb/milvus:v2.4.9\n...\nquerycoord:\n container_name: milvus-querycoord\n image: milvusdb/milvus:v2.4.9 \n...\nquerynode:\n container_name: milvus-querynode\n image: milvusdb/milvus:v2.4.9\n...\nindexcoord:\n container_name: milvus-indexcoord\n image: milvusdb/milvus:v2.4.9\n...\nindexnode:\n container_name: milvus-indexnode\n image: milvusdb/milvus:v2.4.9 \n...\ndatacoord:\n container_name: milvus-datacoord\n image: milvusdb/milvus:v2.4.9 \n...\ndatanode:\n container_name: milvus-datanode\n image: milvusdb/milvus:v2.4.9\n","docker compose down\ndocker compose up -d\n","docker stop \n","# migration.yaml\ncmd:\n # Option: run/backup/rollback\n type: run\n runWithBackup: true\nconfig:\n sourceVersion: 2.1.4 # Specify your milvus version\n targetVersion: 2.4.9\n backupFilePath: /tmp/migration.bak\nmetastore:\n type: etcd\netcd:\n endpoints:\n - milvus-etcd:2379 # Use the etcd container name\n rootPath: by-dev # The root path where data is stored in etcd\n metaSubPath: meta\n kvSubPath: kv\n","# Suppose your docker-compose run with the default milvus network,\n# and you put migration.yaml in the same directory with docker-compose.yaml.\ndocker run --rm -it --network milvus -v $(pwd)/migration.yaml:/milvus/configs/migration.yaml milvus/meta-migration:v2.2.0 /milvus/bin/meta-migration -config=/milvus/configs/migration.yaml\n","Update the milvus image tag in the docker-compose.yaml\ndocker compose down\ndocker compose up -d\n"],"headingContent":"","anchorList":[{"label":"Upgrade des Milvus-Clusters mit Docker Compose","href":"Upgrade-Milvus-Cluster-with-Docker-Compose","type":1,"isActive":false},{"label":"Upgrade von Milvus durch Ändern des Images","href":"Upgrade-Milvus-by-changing-its-image","type":2,"isActive":false},{"label":"Migrieren Sie die Metadaten","href":"Migrate-the-metadata","type":2,"isActive":false},{"label":"Wie geht es weiter?","href":"Whats-next","type":2,"isActive":false}]} \ No newline at end of file +{"codeList":["...\nrootcoord:\n container_name: milvus-rootcoord\n image: milvusdb/milvus:v2.4.13-hotfix\n...\nproxy:\n container_name: milvus-proxy\n image: milvusdb/milvus:v2.4.13-hotfix\n...\nquerycoord:\n container_name: milvus-querycoord\n image: milvusdb/milvus:v2.4.13-hotfix \n...\nquerynode:\n container_name: milvus-querynode\n image: milvusdb/milvus:v2.4.13-hotfix\n...\nindexcoord:\n container_name: milvus-indexcoord\n image: milvusdb/milvus:v2.4.13-hotfix\n...\nindexnode:\n container_name: milvus-indexnode\n image: milvusdb/milvus:v2.4.13-hotfix \n...\ndatacoord:\n container_name: milvus-datacoord\n image: milvusdb/milvus:v2.4.13-hotfix \n...\ndatanode:\n container_name: milvus-datanode\n image: milvusdb/milvus:v2.4.13-hotfix\n","docker compose down\ndocker compose up -d\n","docker stop \n","# migration.yaml\ncmd:\n # Option: run/backup/rollback\n type: run\n runWithBackup: true\nconfig:\n sourceVersion: 2.1.4 # Specify your milvus version\n targetVersion: 2.4.13-hotfix\n backupFilePath: /tmp/migration.bak\nmetastore:\n type: etcd\netcd:\n endpoints:\n - milvus-etcd:2379 # Use the etcd container name\n rootPath: by-dev # The root path where data is stored in etcd\n metaSubPath: meta\n kvSubPath: kv\n","# Suppose your docker-compose run with the default milvus network,\n# and you put migration.yaml in the same directory with docker-compose.yaml.\ndocker run --rm -it --network milvus -v $(pwd)/migration.yaml:/milvus/configs/migration.yaml milvus/meta-migration:v2.2.0 /milvus/bin/meta-migration -config=/milvus/configs/migration.yaml\n","Update the milvus image tag in the docker-compose.yaml\ndocker compose down\ndocker compose up -d\n"],"headingContent":"Upgrade Milvus Cluster with Docker Compose","anchorList":[{"label":"Upgrade des Milvus-Clusters mit Docker Compose","href":"Upgrade-Milvus-Cluster-with-Docker-Compose","type":1,"isActive":false},{"label":"Upgrade von Milvus durch Ändern des Images","href":"Upgrade-Milvus-by-changing-its-image","type":2,"isActive":false},{"label":"Migrieren Sie die Metadaten","href":"Migrate-the-metadata","type":2,"isActive":false},{"label":"Wie geht es weiter?","href":"Whats-next","type":2,"isActive":false}]} \ No newline at end of file diff --git a/localization/v2.4.x/site/de/adminGuide/upgrade_milvus_cluster-docker.md b/localization/v2.4.x/site/de/adminGuide/upgrade_milvus_cluster-docker.md index 8171af77e..8517b4ef2 100644 --- a/localization/v2.4.x/site/de/adminGuide/upgrade_milvus_cluster-docker.md +++ b/localization/v2.4.x/site/de/adminGuide/upgrade_milvus_cluster-docker.md @@ -1,8 +1,6 @@ --- id: upgrade_milvus_cluster-docker.md -summary: >- - Erfahren Sie, wie Sie den Milvus-Cluster mit Docker Compose aktualisieren - können. +summary: 'Erfahren Sie, wie Sie Milvus-Cluster mit Docker Compose aktualisieren können.' title: Upgrade des Milvus-Clusters mit Docker Compose --- @@ -22,7 +20,7 @@ title: Upgrade des Milvus-Clusters mit Docker Compose >

Dieses Thema beschreibt, wie Sie Ihr Milvus mit Docker Compose aktualisieren können.

-

Im Normalfall können Sie Milvus aktualisieren, indem Sie sein Image ändern. Sie müssen jedoch die Metadaten vor einem Upgrade von v2.1.x auf v2.4.9 migrieren.

+

Im Normalfall können Sie Milvus aktualisieren, indem Sie sein Image ändern. Sie müssen jedoch die Metadaten vor einem Upgrade von v2.1.x auf v2.4.13-Hotfix migrieren.

Upgrade von Milvus durch Ändern des Images
  • Führen Sie die folgenden Befehle aus, um das Upgrade durchzuführen.

    docker compose down
    @@ -107,7 +105,7 @@ cmd:
       runWithBackup: true
     config:
       sourceVersion: 2.1.4   # Specify your milvus version
    -  targetVersion: 2.4.9
    +  targetVersion: 2.4.13-hotfix
       backupFilePath: /tmp/migration.bak
     metastore:
       type: etcd
    diff --git a/localization/v2.4.x/site/de/adminGuide/upgrade_milvus_cluster-helm.json b/localization/v2.4.x/site/de/adminGuide/upgrade_milvus_cluster-helm.json
    index 9264ea255..fcab7fc2f 100644
    --- a/localization/v2.4.x/site/de/adminGuide/upgrade_milvus_cluster-helm.json
    +++ b/localization/v2.4.x/site/de/adminGuide/upgrade_milvus_cluster-helm.json
    @@ -1 +1 @@
    -{"codeList":["$ helm repo update\n$ helm search repo zilliztech/milvus --versions\n","helm repo add zilliztech https://zilliztech.github.io/milvus-helm\nhelm repo update\n# upgrade existing helm release\nhelm upgrade my-release zilliztech/milvus\n","NAME                    CHART VERSION   APP VERSION             DESCRIPTION                                       \nzilliztech/milvus       4.1.34          2.4.5                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.33          2.4.4                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.32          2.4.3                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.31          2.4.1                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.30          2.4.1                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.29          2.4.0                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.24          2.3.11                  Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.23          2.3.10                  Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.22          2.3.10                  Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.21          2.3.10                  Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.20          2.3.10                  Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.18          2.3.10                  Milvus is an open-source vector database built ... \nzilliztech/milvus       4.1.18          2.3.9                   Milvus is an open-source vector database built ...                                       \nzilliztech/milvus       4.1.17          2.3.8                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.16          2.3.7                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.15          2.3.5                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.14          2.3.6                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.13          2.3.5                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.12          2.3.5                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.11          2.3.4                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.10          2.3.3                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.9           2.3.3                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.8           2.3.2                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.7           2.3.2                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.6           2.3.1                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.5           2.3.1                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.4           2.3.1                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.3           2.3.1                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.2           2.3.1                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.1           2.3.0                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.0           2.3.0                   Milvus is an open-source vector database built ...\n","sh rollingUpdate.sh -n default -i my-release -o update -t 2.4.9 -w 'milvusdb/milvus:v2.4.9'\n","helm repo update\nhelm upgrade my-release zilliztech/milvus --reuse-values --version=4.1.24 # use the helm chart version here\n","NAME                NAMESPACE   REVISION    UPDATED                                 STATUS      CHART           APP VERSION    \nnew-release         default     1           2022-11-21 15:41:25.51539 +0800 CST     deployed    milvus-3.2.18   2.1.4 \n","NAME                                             READY   STATUS      RESTARTS   AGE\nmy-release-etcd-0                               1/1     Running     0          21m\nmy-release-etcd-1                               1/1     Running     0          21m\nmy-release-etcd-2                               1/1     Running     0          21m\nmy-release-milvus-datacoord-664c58798d-fl75s    1/1     Running     0          21m\nmy-release-milvus-datanode-5f75686c55-xfg2r     1/1     Running     0          21m\nmy-release-milvus-indexcoord-5f98b97589-2l48r   1/1     Running     0          21m\nmy-release-milvus-indexnode-857b4ddf98-vmd75    1/1     Running     0          21m\nmy-release-milvus-proxy-6c548f787f-scspp        1/1     Running     0          21m\nmy-release-milvus-querycoord-c454f44cd-dwmwq    1/1     Running     0          21m\nmy-release-milvus-querynode-76bb4946d-lbrz6     1/1     Running     0          21m\nmy-release-milvus-rootcoord-7764c5b686-62msm    1/1     Running     0          21m\nmy-release-minio-0                              1/1     Running     0          21m\nmy-release-minio-1                              1/1     Running     0          21m\nmy-release-minio-2                              1/1     Running     0          21m\nmy-release-minio-3                              1/1     Running     0          21m\nmy-release-pulsar-bookie-0                      1/1     Running     0          21m\nmy-release-pulsar-bookie-1                      1/1     Running     0          21m\nmy-release-pulsar-bookie-2                      1/1     Running     0          21m\nmy-release-pulsar-bookie-init-tjxpj             0/1     Completed   0          21m\nmy-release-pulsar-broker-0                      1/1     Running     0          21m\nmy-release-pulsar-proxy-0                       1/1     Running     0          21m\nmy-release-pulsar-pulsar-init-c8vvc             0/1     Completed   0          21m\nmy-release-pulsar-recovery-0                    1/1     Running     0          21m\nmy-release-pulsar-zookeeper-0                   1/1     Running     0          21m\nmy-release-pulsar-zookeeper-1                   1/1     Running     0          20m\nmy-release-pulsar-zookeeper-2                   1/1     Running     0          20m\n","$ kubectl get pods my-release-milvus-proxy-6c548f787f-scspp -o=jsonpath='{$.spec.containers[0].image}'\n# milvusdb/milvus:v2.1.4\n","./migrate.sh -i my-release -s 2.1.4 -t 2.4.9\n","./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.9\n","./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.9 -r by-dev\n","./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.9 -r by-dev -w milvusdb/milvus:v2.4.9\n","./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.9 -w milvusdb/milvus:v2.4.9 -d true\n","./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.9 -r by-dev -o rollback -w milvusdb/milvus:v2.1.1\n./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.9 -r by-dev -o migrate -w milvusdb/milvus:v2.4.9\n"],"headingContent":"","anchorList":[{"label":"Milvus-Cluster mit Helm-Diagramm aufrüsten","href":"Upgrade-Milvus-Cluster-with-Helm-Chart","type":1,"isActive":false},{"label":"Milvus Helm Chart prüfen","href":"Check-Milvus-Helm-Chart","type":2,"isActive":false},{"label":"Durchführen eines rollenden Upgrades","href":"Conduct-a-rolling-upgrade","type":2,"isActive":false},{"label":"Upgrade von Milvus mit Helm","href":"Upgrade-Milvus-using-Helm","type":2,"isActive":false},{"label":"Migrieren Sie die Metadaten","href":"Migrate-the-metadata","type":2,"isActive":false}]}
    \ No newline at end of file
    +{"codeList":["$ helm repo update\n$ helm search repo zilliztech/milvus --versions\n","helm repo add zilliztech https://zilliztech.github.io/milvus-helm\nhelm repo update\n# upgrade existing helm release\nhelm upgrade my-release zilliztech/milvus\n","NAME                    CHART VERSION   APP VERSION             DESCRIPTION                                       \nzilliztech/milvus       4.1.34          2.4.5                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.33          2.4.4                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.32          2.4.3                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.31          2.4.1                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.30          2.4.1                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.29          2.4.0                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.24          2.3.11                  Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.23          2.3.10                  Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.22          2.3.10                  Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.21          2.3.10                  Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.20          2.3.10                  Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.18          2.3.10                  Milvus is an open-source vector database built ... \nzilliztech/milvus       4.1.18          2.3.9                   Milvus is an open-source vector database built ...                                       \nzilliztech/milvus       4.1.17          2.3.8                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.16          2.3.7                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.15          2.3.5                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.14          2.3.6                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.13          2.3.5                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.12          2.3.5                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.11          2.3.4                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.10          2.3.3                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.9           2.3.3                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.8           2.3.2                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.7           2.3.2                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.6           2.3.1                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.5           2.3.1                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.4           2.3.1                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.3           2.3.1                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.2           2.3.1                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.1           2.3.0                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.0           2.3.0                   Milvus is an open-source vector database built ...\n","sh rollingUpdate.sh -n default -i my-release -o update -t 2.4.13-hotfix -w 'milvusdb/milvus:v2.4.13-hotfix'\n","helm repo update\nhelm upgrade my-release zilliztech/milvus --reuse-values --version=4.1.24 # use the helm chart version here\n","NAME                NAMESPACE   REVISION    UPDATED                                 STATUS      CHART           APP VERSION    \nnew-release         default     1           2022-11-21 15:41:25.51539 +0800 CST     deployed    milvus-3.2.18   2.1.4 \n","NAME                                             READY   STATUS      RESTARTS   AGE\nmy-release-etcd-0                               1/1     Running     0          21m\nmy-release-etcd-1                               1/1     Running     0          21m\nmy-release-etcd-2                               1/1     Running     0          21m\nmy-release-milvus-datacoord-664c58798d-fl75s    1/1     Running     0          21m\nmy-release-milvus-datanode-5f75686c55-xfg2r     1/1     Running     0          21m\nmy-release-milvus-indexcoord-5f98b97589-2l48r   1/1     Running     0          21m\nmy-release-milvus-indexnode-857b4ddf98-vmd75    1/1     Running     0          21m\nmy-release-milvus-proxy-6c548f787f-scspp        1/1     Running     0          21m\nmy-release-milvus-querycoord-c454f44cd-dwmwq    1/1     Running     0          21m\nmy-release-milvus-querynode-76bb4946d-lbrz6     1/1     Running     0          21m\nmy-release-milvus-rootcoord-7764c5b686-62msm    1/1     Running     0          21m\nmy-release-minio-0                              1/1     Running     0          21m\nmy-release-minio-1                              1/1     Running     0          21m\nmy-release-minio-2                              1/1     Running     0          21m\nmy-release-minio-3                              1/1     Running     0          21m\nmy-release-pulsar-bookie-0                      1/1     Running     0          21m\nmy-release-pulsar-bookie-1                      1/1     Running     0          21m\nmy-release-pulsar-bookie-2                      1/1     Running     0          21m\nmy-release-pulsar-bookie-init-tjxpj             0/1     Completed   0          21m\nmy-release-pulsar-broker-0                      1/1     Running     0          21m\nmy-release-pulsar-proxy-0                       1/1     Running     0          21m\nmy-release-pulsar-pulsar-init-c8vvc             0/1     Completed   0          21m\nmy-release-pulsar-recovery-0                    1/1     Running     0          21m\nmy-release-pulsar-zookeeper-0                   1/1     Running     0          21m\nmy-release-pulsar-zookeeper-1                   1/1     Running     0          20m\nmy-release-pulsar-zookeeper-2                   1/1     Running     0          20m\n","$ kubectl get pods my-release-milvus-proxy-6c548f787f-scspp -o=jsonpath='{$.spec.containers[0].image}'\n# milvusdb/milvus:v2.1.4\n","./migrate.sh -i my-release -s 2.1.4 -t 2.4.13-hotfix\n","./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.13-hotfix\n","./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.13-hotfix -r by-dev\n","./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.13-hotfix -r by-dev -w milvusdb/milvus:v2.4.13-hotfix\n","./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.13-hotfix -w milvusdb/milvus:v2.4.13-hotfix -d true\n","./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.13-hotfix -r by-dev -o rollback -w milvusdb/milvus:v2.1.1\n./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.13-hotfix -r by-dev -o migrate -w milvusdb/milvus:v2.4.13-hotfix\n"],"headingContent":"Upgrade Milvus Cluster with Helm Chart","anchorList":[{"label":"Milvus-Cluster mit Helm-Diagramm aufrüsten","href":"Upgrade-Milvus-Cluster-with-Helm-Chart","type":1,"isActive":false},{"label":"Milvus Helm Chart prüfen","href":"Check-Milvus-Helm-Chart","type":2,"isActive":false},{"label":"Durchführen eines rollenden Upgrades","href":"Conduct-a-rolling-upgrade","type":2,"isActive":false},{"label":"Upgrade von Milvus mit Helm","href":"Upgrade-Milvus-using-Helm","type":2,"isActive":false},{"label":"Migrieren Sie die Metadaten","href":"Migrate-the-metadata","type":2,"isActive":false}]}
    \ No newline at end of file
    diff --git a/localization/v2.4.x/site/de/adminGuide/upgrade_milvus_cluster-helm.md b/localization/v2.4.x/site/de/adminGuide/upgrade_milvus_cluster-helm.md
    index 1656e8481..95a7c8e6e 100644
    --- a/localization/v2.4.x/site/de/adminGuide/upgrade_milvus_cluster-helm.md
    +++ b/localization/v2.4.x/site/de/adminGuide/upgrade_milvus_cluster-helm.md
    @@ -5,7 +5,7 @@ order: 1
     group: upgrade_milvus_cluster-operator.md
     related_key: upgrade Milvus Cluster
     summary: 'Erfahren Sie, wie Sie Milvus-Cluster mit Helm Chart aktualisieren können.'
    -title: Upgrade Milvus Cluster mit Helm Chart
    +title: Milvus-Cluster mit Helm-Diagramm aufrüsten
     ---
     
     

    Milvus-Cluster mit Helm-Diagramm aufrüsten

    Diese Anleitung beschreibt, wie Sie Ihren Milvus-Cluster mit Milvus Helm-Charts aufrüsten.

    +
  • Diese Anleitung beschreibt, wie Sie Ihren Milvus-Cluster mit Milvus Helm Charts aufrüsten.

    Milvus Helm Chart prüfen

    Sie können den Upgrade-Pfad für Ihren Milvus wie folgt wählen:

    -
    - [Führen Sie ein rollendes Upgrade durch](#conduct-a-rolling-upgrade) von Milvus v2.2.3 und späteren Versionen auf v2.4.9.
    +
    - [Führen Sie ein rollendes Upgrade durch](#conduct-a-rolling-upgrade) von Milvus v2.2.3 und späteren Versionen auf v2.4.13-hotfix.

    Durchführen eines rollenden Upgrades

    Seit Milvus 2.2.3 können Sie Milvus-Koordinatoren so konfigurieren, dass sie im Aktiv-Standby-Modus arbeiten, und die Funktion "Rolling Upgrade" für sie aktivieren, so dass Milvus auf eingehende Anfragen während der Koordinator-Upgrades reagieren kann. In früheren Versionen müssen die Koordinatoren während eines Upgrades entfernt und neu erstellt werden, was zu einer gewissen Ausfallzeit des Dienstes führen kann.

    Rolling Upgrades erfordern, dass die Koordinatoren im Aktiv-Standby-Modus arbeiten. Sie können das von uns bereitgestellte Skript verwenden, um die Koordinatoren so zu konfigurieren, dass sie im aktiven Standby-Modus arbeiten, und das Rolling Upgrade starten.

    -

    Auf der Grundlage der von Kubernetes bereitgestellten rollenden Aktualisierungsfunktionen erzwingt das oben genannte Skript eine geordnete Aktualisierung der Bereitstellungen entsprechend ihrer Abhängigkeiten. Darüber hinaus implementiert Milvus einen Mechanismus, der sicherstellt, dass seine Komponenten während des Upgrades mit den von ihnen abhängigen Komponenten kompatibel bleiben, wodurch potenzielle Service-Ausfallzeiten erheblich reduziert werden.

    +

    Auf der Grundlage der von Kubernetes bereitgestellten rollenden Aktualisierungsfunktionen erzwingt das oben genannte Skript eine geordnete Aktualisierung der Bereitstellungen entsprechend ihrer Abhängigkeiten. Darüber hinaus implementiert Milvus einen Mechanismus, der sicherstellt, dass seine Komponenten während des Upgrades mit den von ihnen abhängigen Komponenten kompatibel bleiben, wodurch mögliche Ausfallzeiten des Dienstes erheblich reduziert werden.

    Das Skript gilt nur für das Upgrade von Milvus, das mit Helm installiert wurde. In der folgenden Tabelle sind die in den Skripten verfügbaren Befehlsflags aufgeführt.

    @@ -123,13 +123,13 @@ zilliztech/milvus 4.1.0 2.3.0 Milvus is an ope
    oVorgangupdateFalsch
    -

    Sobald Sie sichergestellt haben, dass sich alle Einsätze in Ihrer Milvus-Instanz in ihrem normalen Status befinden, können Sie den folgenden Befehl ausführen. Sie können den folgenden Befehl ausführen, um die Milvus-Instanz auf 2.4.9 zu aktualisieren.

    -
    sh rollingUpdate.sh -n default -i my-release -o update -t 2.4.9 -w 'milvusdb/milvus:v2.4.9'
    +

    Sobald Sie sichergestellt haben, dass sich alle Einsätze in Ihrer Milvus-Instanz in ihrem normalen Status befinden, können Sie den folgenden Befehl ausführen. Sie können den folgenden Befehl ausführen, um die Milvus-Instanz auf 2.4.13-hotfix zu aktualisieren.

    +
    sh rollingUpdate.sh -n default -i my-release -o update -t 2.4.13-hotfix -w 'milvusdb/milvus:v2.4.13-hotfix'
     
    1. Das Skript kodiert die Upgrade-Reihenfolge der Einsätze fest und kann nicht geändert werden.
    2. -
    3. Das Skript verwendet kubectl patch, um die Einsätze zu aktualisieren, und kubectl rollout status, um ihren Status zu überwachen.
    4. +
    5. Das Skript verwendet kubectl patch, um die Bereitstellungen zu aktualisieren, und kubectl rollout status, um ihren Status zu überwachen.
    6. Das Skript verwendet kubectl patch, um die Bezeichnung app.kubernetes.io/version der Einsätze auf die Bezeichnung zu aktualisieren, die nach dem Flag -t im Befehl angegeben ist.
    @@ -235,25 +235,25 @@ my-release-pulsar-zookeeper-2
  • Migrieren Sie die Milvus-Metadaten.
  • Starten Sie die Milvus-Komponenten mit einem neuen Image.
  • -

    2. Upgrade von Milvus von v2.1.x auf 2.4.9

    Die folgenden Befehle gehen davon aus, dass Sie Milvus von v2.1.4 auf 2.4.9 aktualisieren. Ändern Sie sie auf die Versionen, die Ihren Anforderungen entsprechen.

    +

    2. Upgrade von Milvus von v2.1.x auf 2.4.13-hotfix

    Die folgenden Befehle gehen davon aus, dass Sie Milvus von v2.1.4 auf 2.4.13-hotfix aktualisieren. Ändern Sie sie auf die Versionen, die Ihren Anforderungen entsprechen.

    1. Geben Sie den Namen der Milvus-Instanz, die Quell-Milvus-Version und die Ziel-Milvus-Version an.

      -
      ./migrate.sh -i my-release -s 2.1.4 -t 2.4.9
      +
      ./migrate.sh -i my-release -s 2.1.4 -t 2.4.13-hotfix
       
    2. Geben Sie den Namespace mit -n an, wenn Ihr Milvus nicht im Standard-Namespace von K8s installiert ist.

      -
      ./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.9
      +
      ./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.13-hotfix
       
    3. Geben Sie den Wurzelpfad mit -r an, wenn Ihr Milvus mit dem benutzerdefinierten rootpath installiert ist.

      -
      ./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.9 -r by-dev
      +
      ./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.13-hotfix -r by-dev
       
    4. Geben Sie das Bild-Tag mit -w an, wenn Ihr Milvus mit einem benutzerdefinierten image installiert ist.

      -
      ./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.9 -r by-dev -w milvusdb/milvus:v2.4.9
      +
      ./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.13-hotfix -r by-dev -w milvusdb/milvus:v2.4.13-hotfix
       
    5. Legen Sie -d true fest, wenn Sie den Migrations-Pod automatisch entfernen möchten, nachdem die Migration abgeschlossen ist.

      -
      ./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.9 -w milvusdb/milvus:v2.4.9 -d true
      +
      ./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.13-hotfix -w milvusdb/milvus:v2.4.13-hotfix -d true
       
    6. Machen Sie einen Rollback und migrieren Sie erneut, wenn die Migration fehlschlägt.

      -
      ./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.9 -r by-dev -o rollback -w milvusdb/milvus:v2.1.1
      -./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.9 -r by-dev -o migrate -w milvusdb/milvus:v2.4.9
      +
      ./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.13-hotfix -r by-dev -o rollback -w milvusdb/milvus:v2.1.1
      +./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.13-hotfix -r by-dev -o migrate -w milvusdb/milvus:v2.4.13-hotfix
       
    diff --git a/localization/v2.4.x/site/de/adminGuide/upgrade_milvus_cluster-operator.json b/localization/v2.4.x/site/de/adminGuide/upgrade_milvus_cluster-operator.json index 6055a685f..6ffc9f054 100644 --- a/localization/v2.4.x/site/de/adminGuide/upgrade_milvus_cluster-operator.json +++ b/localization/v2.4.x/site/de/adminGuide/upgrade_milvus_cluster-operator.json @@ -1 +1 @@ -{"codeList":["helm repo add zilliztech-milvus-operator https://zilliztech.github.io/milvus-operator/\nhelm repo update zilliztech-milvus-operator\nhelm -n milvus-operator upgrade milvus-operator zilliztech-milvus-operator/milvus-operator\n","apiVersion: milvus.io/v1beta1\nkind: Milvus\nmetadata:\n name: my-release\nspec:\n components:\n enableRollingUpdate: true\n imageUpdateMode: rollingUpgrade # Default value, can be omitted\n image: milvusdb/milvus:v2.4.9\n","apiVersion: milvus.io/v1beta1\nkind: Milvus\nmetadata:\n name: my-release\nspec:\n components:\n enableRollingUpdate: true\n imageUpdateMode: all\n image: milvusdb/milvus:v2.4.9\n","apiVersion: milvus.io/v1beta1\nkind: Milvus\nmetadata:\n name: my-release\nspec:\n components:\n enableRollingUpdate: true\n imageUpdateMode: rollingDowngrade\n image: milvusdb/milvus:\n","kubectl apply -f milvusupgrade.yml\n","apiVersion: milvus.io/v1beta1\nkind: Milvus\nmetadata:\n name: my-release\nspec:\n # Omit other fields ...\n components:\n image: milvusdb/milvus:v2.4.9\n","kubectl apply -f milvusupgrade.yaml\n","apiVersion: milvus.io/v1beta1\nkind: MilvusUpgrade\nmetadata:\n name: my-release-upgrade\nspec:\n milvus:\n namespace: default\n name: my-release\n sourceVersion: \"v2.1.4\"\n targetVersion: \"v2.4.9\"\n # below are some omit default values:\n # targetImage: \"milvusdb/milvus:v2.4.9\"\n # toolImage: \"milvusdb/meta-migration:v2.2.0\"\n # operation: upgrade\n # rollbackIfFailed: true\n # backupPVC: \"\"\n # maxRetry: 3\n","$ kubectl apply -f https://github.com/zilliztech/milvus-operator/blob/main/config/samples/beta/milvusupgrade.yaml\n","kubectl describe milvus release-name\n"],"headingContent":"","anchorList":[{"label":"Milvus Cluster mit Milvus Operator aufrüsten","href":"Upgrade-Milvus-Cluster-with-Milvus-Operator","type":1,"isActive":false},{"label":"Aktualisieren Sie Ihren Milvus Operator","href":"Upgrade-your-Milvus-operator","type":2,"isActive":false},{"label":"Durchführen eines rollenden Upgrades","href":"Conduct-a-rolling-upgrade","type":2,"isActive":false},{"label":"Upgrade von Milvus durch Änderung des Images","href":"Upgrade-Milvus-by-changing-its-image","type":2,"isActive":false},{"label":"Migrieren Sie die Metadaten","href":"Migrate-the-metadata","type":2,"isActive":false}]} \ No newline at end of file +{"codeList":["helm repo add zilliztech-milvus-operator https://zilliztech.github.io/milvus-operator/\nhelm repo update zilliztech-milvus-operator\nhelm -n milvus-operator upgrade milvus-operator zilliztech-milvus-operator/milvus-operator\n","apiVersion: milvus.io/v1beta1\nkind: Milvus\nmetadata:\n name: my-release\nspec:\n components:\n enableRollingUpdate: true\n imageUpdateMode: rollingUpgrade # Default value, can be omitted\n image: milvusdb/milvus:v2.4.13-hotfix\n","apiVersion: milvus.io/v1beta1\nkind: Milvus\nmetadata:\n name: my-release\nspec:\n components:\n enableRollingUpdate: true\n imageUpdateMode: all\n image: milvusdb/milvus:v2.4.13-hotfix\n","apiVersion: milvus.io/v1beta1\nkind: Milvus\nmetadata:\n name: my-release\nspec:\n components:\n enableRollingUpdate: true\n imageUpdateMode: rollingDowngrade\n image: milvusdb/milvus:\n","kubectl apply -f milvusupgrade.yml\n","apiVersion: milvus.io/v1beta1\nkind: Milvus\nmetadata:\n name: my-release\nspec:\n # Omit other fields ...\n components:\n image: milvusdb/milvus:v2.4.13-hotfix\n","kubectl apply -f milvusupgrade.yaml\n","apiVersion: milvus.io/v1beta1\nkind: MilvusUpgrade\nmetadata:\n name: my-release-upgrade\nspec:\n milvus:\n namespace: default\n name: my-release\n sourceVersion: \"v2.1.4\"\n targetVersion: \"v2.4.13-hotfix\"\n # below are some omit default values:\n # targetImage: \"milvusdb/milvus:v2.4.13-hotfix\"\n # toolImage: \"milvusdb/meta-migration:v2.2.0\"\n # operation: upgrade\n # rollbackIfFailed: true\n # backupPVC: \"\"\n # maxRetry: 3\n","$ kubectl apply -f https://github.com/zilliztech/milvus-operator/blob/main/config/samples/beta/milvusupgrade.yaml\n","kubectl describe milvus release-name\n"],"headingContent":"Upgrade Milvus Cluster with Milvus Operator","anchorList":[{"label":"Milvus Cluster mit Milvus Operator aufrüsten","href":"Upgrade-Milvus-Cluster-with-Milvus-Operator","type":1,"isActive":false},{"label":"Aktualisieren Sie Ihren Milvus Operator","href":"Upgrade-your-Milvus-operator","type":2,"isActive":false},{"label":"Durchführen eines rollenden Upgrades","href":"Conduct-a-rolling-upgrade","type":2,"isActive":false},{"label":"Upgrade von Milvus durch Änderung des Images","href":"Upgrade-Milvus-by-changing-its-image","type":2,"isActive":false},{"label":"Migrieren Sie die Metadaten","href":"Migrate-the-metadata","type":2,"isActive":false}]} \ No newline at end of file diff --git a/localization/v2.4.x/site/de/adminGuide/upgrade_milvus_cluster-operator.md b/localization/v2.4.x/site/de/adminGuide/upgrade_milvus_cluster-operator.md index 22145ce17..7baf5e0c9 100644 --- a/localization/v2.4.x/site/de/adminGuide/upgrade_milvus_cluster-operator.md +++ b/localization/v2.4.x/site/de/adminGuide/upgrade_milvus_cluster-operator.md @@ -46,9 +46,9 @@ helm -n milvus-operator upgrade milvus-

    Sobald Sie Ihren Milvus-Operator auf die neueste Version aktualisiert haben, haben Sie die folgenden Möglichkeiten:

    Durchführen eines rollenden Upgrades

    Seit Milvus 2.2.3 können Sie Milvus-Koordinatoren so konfigurieren, dass sie im Aktiv-Standby-Modus arbeiten, und die Funktion "Rolling Upgrade" für sie aktivieren, so dass Milvus auf eingehende Anfragen während der Koordinator-Upgrades reagieren kann. In früheren Versionen müssen die Koordinatoren während eines Upgrades entfernt und neu erstellt werden, was zu einer gewissen Ausfallzeit des Dienstes führen kann.

    -

    Auf der Grundlage der von Kubernetes bereitgestellten rollierenden Aktualisierungsfunktionen erzwingt der Milvus-Operator eine geordnete Aktualisierung der Bereitstellungen entsprechend ihren Abhängigkeiten. Darüber hinaus implementiert Milvus einen Mechanismus, der sicherstellt, dass seine Komponenten während des Upgrades mit den von ihnen abhängigen Komponenten kompatibel bleiben, wodurch potenzielle Ausfallzeiten des Dienstes erheblich reduziert werden.

    +

    Auf der Grundlage der von Kubernetes bereitgestellten rollierenden Aktualisierungsfunktionen erzwingt der Milvus-Operator eine geordnete Aktualisierung der Bereitstellungen entsprechend ihrer Abhängigkeiten. Darüber hinaus implementiert Milvus einen Mechanismus, der sicherstellt, dass seine Komponenten während des Upgrades mit den von ihnen abhängigen Komponenten kompatibel bleiben, wodurch potenzielle Ausfallzeiten des Dienstes erheblich reduziert werden.

    Die Funktion "Rolling Upgrade" ist standardmäßig deaktiviert. Sie müssen sie explizit über eine Konfigurationsdatei aktivieren.

    apiVersion: milvus.io/v1beta1
     kind: Milvus
    @@ -76,7 +76,7 @@ spec:
       components:
         enableRollingUpdate: true
         imageUpdateMode: rollingUpgrade # Default value, can be omitted
    -    image: milvusdb/milvus:v2.4.9
    +    image: milvusdb/milvus:v2.4.13-hotfix
     

    In der obigen Konfigurationsdatei setzen Sie spec.components.enableRollingUpdate auf true und spec.components.image auf die gewünschte Milvus-Version.

    Standardmäßig führt Milvus rollende Upgrades für Koordinatoren in einer geordneten Weise durch, bei der die Pod-Images der Koordinatoren nacheinander ersetzt werden. Um die Upgrade-Zeit zu verkürzen, sollten Sie spec.components.imageUpdateMode auf all setzen, damit Milvus alle Pod-Images gleichzeitig ersetzt.

    @@ -88,7 +88,7 @@ spec: components: enableRollingUpdate: true imageUpdateMode: all - image: milvusdb/milvus:v2.4.9 + image: milvusdb/milvus:v2.4.13-hotfix

    Sie können spec.components.imageUpdateMode auf rollingDowngrade setzen, damit Milvus die Coordinator-Pod-Images durch eine niedrigere Version ersetzt.

    apiVersion: milvus.io/v1beta1
    @@ -128,7 +128,7 @@ metadata:
     spec:
       # Omit other fields ...
       components:
    -   image: milvusdb/milvus:v2.4.9
    +   image: milvusdb/milvus:v2.4.13-hotfix
     

    Führen Sie dann das Folgende aus, um das Upgrade durchzuführen:

    kubectl apply -f milvusupgrade.yaml
    @@ -148,8 +148,8 @@ spec:
               d="M4 9h1v1H4c-1.5 0-3-1.69-3-3.5S2.55 3 4 3h4c1.45 0 3 1.69 3 3.5 0 1.41-.91 2.72-2 3.25V8.59c.58-.45 1-1.27 1-2.09C10 5.22 8.98 4 8 4H4c-.98 0-2 1.22-2 2.5S3 9 4 9zm9-3h-1v1h1c1 0 2 1.22 2 2.5S13.98 12 13 12H9c-.98 0-2-1.22-2-2.5 0-.83.42-1.64 1-2.09V6.25c-1.09.53-2 1.84-2 3.25C6 11.31 7.55 13 9 13h4c1.45 0 3-1.69 3-3.5S14.5 6 13 6z"
             >
           
    -    

    Seit Milvus 2.2.0 sind die Metadaten nicht mehr mit denen früherer Versionen kompatibel. Die folgenden Beispielschnipsel gehen von einem Upgrade von Milvus 2.1.4 auf Milvus 2.4.9 aus.

    -

    1. Erstellen Sie eine .yaml Datei für die Migration von Metadaten

    Erstellen Sie eine Metadaten-Migrationsdatei. Im Folgenden finden Sie ein Beispiel. Sie müssen name, sourceVersion und targetVersion in der Konfigurationsdatei angeben. Im folgenden Beispiel wird name auf my-release-upgrade, sourceVersion auf v2.1.4 und targetVersion auf v2.4.9 gesetzt. Dies bedeutet, dass Ihr Milvus-Cluster von v2.1.4 auf v2.4.9 aktualisiert wird.

    +

    Seit Milvus 2.2.0 sind die Metadaten nicht mehr mit denen früherer Versionen kompatibel. Die folgenden Beispielschnipsel gehen von einem Upgrade von Milvus 2.1.4 auf Milvus 2.4.13-hotfix aus.

    +

    1. Erstellen Sie eine .yaml Datei für die Metadaten-Migration

    Erstellen Sie eine Metadaten-Migrationsdatei. Im Folgenden finden Sie ein Beispiel. Sie müssen name, sourceVersion und targetVersion in der Konfigurationsdatei angeben. Im folgenden Beispiel wird name auf my-release-upgrade, sourceVersion auf v2.1.4 und targetVersion auf v2.4.13-hotfix gesetzt. Dies bedeutet, dass Ihr Milvus-Cluster von v2.1.4 auf v2.4.13-Hotfix aktualisiert wird.

    apiVersion: milvus.io/v1beta1
     kind: MilvusUpgrade
     metadata:
    @@ -159,9 +159,9 @@ spec:
         namespace: default
         name: my-release
       sourceVersion: "v2.1.4"
    -  targetVersion: "v2.4.9"
    +  targetVersion: "v2.4.13-hotfix"
       # below are some omit default values:
    -  # targetImage: "milvusdb/milvus:v2.4.9"
    +  # targetImage: "milvusdb/milvus:v2.4.13-hotfix"
       # toolImage: "milvusdb/meta-migration:v2.2.0"
       # operation: upgrade
       # rollbackIfFailed: true
    diff --git a/localization/v2.4.x/site/de/adminGuide/upgrade_milvus_standalone-docker.json b/localization/v2.4.x/site/de/adminGuide/upgrade_milvus_standalone-docker.json
    index 9084b80bc..fa6ba73c7 100644
    --- a/localization/v2.4.x/site/de/adminGuide/upgrade_milvus_standalone-docker.json
    +++ b/localization/v2.4.x/site/de/adminGuide/upgrade_milvus_standalone-docker.json
    @@ -1 +1 @@
    -{"codeList":["...\nstandalone:\n  container_name: milvus-standalone\n  image: milvusdb/milvus:v2.4.9\n","docker compose down\ndocker compose up -d\n","docker stop \n","# migration.yaml\ncmd:\n  # Option: run/backup/rollback\n  type: run\n  runWithBackup: true\nconfig:\n  sourceVersion: 2.1.4   # Specify your milvus version\n  targetVersion: 2.4.9\n  backupFilePath: /tmp/migration.bak\nmetastore:\n  type: etcd\netcd:\n  endpoints:\n    - milvus-etcd:2379  # Use the etcd container name\n  rootPath: by-dev # The root path where data is stored in etcd\n  metaSubPath: meta\n  kvSubPath: kv\n","# Suppose your docker-compose run with the default milvus network,\n# and you put migration.yaml in the same directory with docker-compose.yaml.\ndocker run --rm -it --network milvus -v $(pwd)/migration.yaml:/milvus/configs/migration.yaml milvusdb/meta-migration:v2.2.0 /milvus/bin/meta-migration -config=/milvus/configs/migration.yaml\n","// Run the following only after update the milvus image tag in the docker-compose.yaml\ndocker compose down\ndocker compose up -d\n"],"headingContent":"","anchorList":[{"label":"Upgrade von Milvus Standalone mit Docker Compose","href":"Upgrade-Milvus-Standalone-with-Docker-Compose","type":1,"isActive":false},{"label":"Upgrade von Milvus durch Ändern des Images","href":"Upgrade-Milvus-by-changing-its-image","type":2,"isActive":false},{"label":"Migrieren Sie die Metadaten","href":"Migrate-the-metadata","type":2,"isActive":false},{"label":"Wie geht es weiter?","href":"Whats-next","type":2,"isActive":false}]}
    \ No newline at end of file
    +{"codeList":["...\nstandalone:\n  container_name: milvus-standalone\n  image: milvusdb/milvus:v2.4.13-hotfix\n","docker compose down\ndocker compose up -d\n","docker stop \n","# migration.yaml\ncmd:\n  # Option: run/backup/rollback\n  type: run\n  runWithBackup: true\nconfig:\n  sourceVersion: 2.1.4   # Specify your milvus version\n  targetVersion: 2.4.13-hotfix\n  backupFilePath: /tmp/migration.bak\nmetastore:\n  type: etcd\netcd:\n  endpoints:\n    - milvus-etcd:2379  # Use the etcd container name\n  rootPath: by-dev # The root path where data is stored in etcd\n  metaSubPath: meta\n  kvSubPath: kv\n","# Suppose your docker-compose run with the default milvus network,\n# and you put migration.yaml in the same directory with docker-compose.yaml.\ndocker run --rm -it --network milvus -v $(pwd)/migration.yaml:/milvus/configs/migration.yaml milvusdb/meta-migration:v2.2.0 /milvus/bin/meta-migration -config=/milvus/configs/migration.yaml\n","// Run the following only after update the milvus image tag in the docker-compose.yaml\ndocker compose down\ndocker compose up -d\n"],"headingContent":"Upgrade Milvus Standalone with Docker Compose","anchorList":[{"label":"Upgrade von Milvus Standalone mit Docker Compose","href":"Upgrade-Milvus-Standalone-with-Docker-Compose","type":1,"isActive":false},{"label":"Upgrade von Milvus durch Ändern des Images","href":"Upgrade-Milvus-by-changing-its-image","type":2,"isActive":false},{"label":"Migrieren Sie die Metadaten","href":"Migrate-the-metadata","type":2,"isActive":false},{"label":"Wie geht es weiter?","href":"Whats-next","type":2,"isActive":false}]}
    \ No newline at end of file
    diff --git a/localization/v2.4.x/site/de/adminGuide/upgrade_milvus_standalone-docker.md b/localization/v2.4.x/site/de/adminGuide/upgrade_milvus_standalone-docker.md
    index de5d9dd76..a0fbacaf6 100644
    --- a/localization/v2.4.x/site/de/adminGuide/upgrade_milvus_standalone-docker.md
    +++ b/localization/v2.4.x/site/de/adminGuide/upgrade_milvus_standalone-docker.md
    @@ -26,9 +26,9 @@ title: Upgrade von Milvus Standalone mit Docker Compose
             >
           
         

    Dieses Thema beschreibt, wie Sie Milvus mit Docker Compose aktualisieren können.

    -

    Im Normalfall können Sie Milvus aktualisieren, indem Sie sein Image ändern. Sie müssen jedoch die Metadaten vor einem Upgrade von v2.1.x auf v2.4.9 migrieren.

    +

    Im Normalfall können Sie Milvus aktualisieren, indem Sie sein Image ändern. Vor einem Upgrade von v2.1.x auf v2.4.13-Hotfix müssen Sie jedoch die Metadaten migrieren.

    -

    Aufgrund von Sicherheitsbedenken aktualisiert Milvus seine MinIO auf RELEASE.2023-03-20T20-16-18Z mit der Veröffentlichung von v2.2.5. Vor einem Upgrade von früheren Milvus Standalone-Releases, die mit Docker Compose installiert wurden, sollten Sie eine Single-Node Single-Drive MinIO-Bereitstellung erstellen und die vorhandenen MinIO-Einstellungen und -Inhalte in die neue Bereitstellung migrieren. Einzelheiten hierzu finden Sie in diesem Leitfaden.

    +

    Aufgrund von Sicherheitsbedenken aktualisiert Milvus seine MinIO auf RELEASE.2023-03-20T20-16-18Z mit der Veröffentlichung von v2.2.5. Vor einem Upgrade von früheren Milvus Standalone-Versionen, die mit Docker Compose installiert wurden, sollten Sie eine Single-Node Single-Drive MinIO-Bereitstellung erstellen und die vorhandenen MinIO-Einstellungen und -Inhalte in die neue Bereitstellung migrieren. Einzelheiten hierzu finden Sie in diesem Leitfaden.

    Upgrade von Milvus durch Ändern des Images

  • Führen Sie die folgenden Befehle aus, um das Upgrade durchzuführen.

    docker compose down
    @@ -85,7 +85,7 @@ cmd:
       runWithBackup: true
     config:
       sourceVersion: 2.1.4   # Specify your milvus version
    -  targetVersion: 2.4.9
    +  targetVersion: 2.4.13-hotfix
       backupFilePath: /tmp/migration.bak
     metastore:
       type: etcd
    diff --git a/localization/v2.4.x/site/de/adminGuide/upgrade_milvus_standalone-helm.json b/localization/v2.4.x/site/de/adminGuide/upgrade_milvus_standalone-helm.json
    index 798859b02..d767c8f80 100644
    --- a/localization/v2.4.x/site/de/adminGuide/upgrade_milvus_standalone-helm.json
    +++ b/localization/v2.4.x/site/de/adminGuide/upgrade_milvus_standalone-helm.json
    @@ -1 +1 @@
    -{"codeList":["$ helm repo update\n$ helm search repo zilliztech/milvus --versions\n","helm repo add zilliztech https://zilliztech.github.io/milvus-helm\nhelm repo update\n# upgrade existing helm release\nhelm upgrade my-release zilliztech/milvus\n","NAME                    CHART VERSION   APP VERSION             DESCRIPTION                                       \nzilliztech/milvus       4.1.34          2.4.5                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.33          2.4.4                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.32          2.4.3                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.31          2.4.1                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.30          2.4.1                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.29          2.4.0                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.24          2.3.11                  Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.23          2.3.10                  Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.22          2.3.10                  Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.21          2.3.10                  Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.20          2.3.10                  Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.18          2.3.10                  Milvus is an open-source vector database built ... \nzilliztech/milvus       4.1.18          2.3.9                   Milvus is an open-source vector database built ...                                       \nzilliztech/milvus       4.1.17          2.3.8                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.16          2.3.7                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.15          2.3.5                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.14          2.3.6                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.13          2.3.5                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.12          2.3.5                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.11          2.3.4                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.10          2.3.3                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.9           2.3.3                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.8           2.3.2                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.7           2.3.2                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.6           2.3.1                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.5           2.3.1                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.4           2.3.1                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.3           2.3.1                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.2           2.3.1                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.1           2.3.0                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.0           2.3.0                   Milvus is an open-source vector database built ...\n","sh rollingUpdate.sh -n default -i my-release -o update -t 2.4.9 -w 'milvusdb/milvus:v2.4.9'\n","helm repo update\nhelm upgrade my-release milvus/milvus --reuse-values --version=4.1.24 # use the helm chart version here\n","NAME                NAMESPACE   REVISION    UPDATED                                 STATUS      CHART           APP VERSION     \nmy-release          default     1           2022-11-21 15:41:25.51539 +0800 CST     deployed    milvus-3.2.18   2.1.4\n","NAME                                            READY   STATUS    RESTARTS   AGE\nmy-release-etcd-0                               1/1     Running   0          84s\nmy-release-milvus-standalone-75c599fffc-6rwlj   1/1     Running   0          84s\nmy-release-minio-744dd9586f-qngzv               1/1     Running   0          84s\n","$ kubectl get pods my-release-milvus-proxy-6c548f787f-scspp -o=jsonpath='{$.spec.containers[0].image}'\n# milvusdb/milvus:v2.1.4\n","./migrate.sh -i my-release -s 2.1.4 -t 2.4.9\n","./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.9\n","./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.9 -r by-dev\n","./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.9 -r by-dev -w milvusdb/milvus:v2.4.9\n","./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.9 -w milvusdb/milvus:v2.4.9 -d true\n","./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.9 -r by-dev -o rollback -w milvusdb/milvus:v2.1.1\n./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.9 -r by-dev -o migrate -w milvusdb/milvus:v2.4.9\n"],"headingContent":"","anchorList":[{"label":"Upgrade von Milvus Standalone mit Helm Chart","href":"Upgrade-Milvus-Standalone-with-Helm-Chart","type":1,"isActive":false},{"label":"Überprüfen Sie die Milvus-Version","href":"Check-the-Milvus-version","type":2,"isActive":false},{"label":"Durchführen eines rollenden Upgrades","href":"Conduct-a-rolling-upgrade","type":2,"isActive":false},{"label":"Upgrade von Milvus mit Helm","href":"Upgrade-Milvus-using-Helm","type":2,"isActive":false},{"label":"Migrieren Sie die Metadaten","href":"Migrate-the-metadata","type":2,"isActive":false}]}
    \ No newline at end of file
    +{"codeList":["$ helm repo update\n$ helm search repo zilliztech/milvus --versions\n","helm repo add zilliztech https://zilliztech.github.io/milvus-helm\nhelm repo update\n# upgrade existing helm release\nhelm upgrade my-release zilliztech/milvus\n","NAME                    CHART VERSION   APP VERSION             DESCRIPTION                                       \nzilliztech/milvus       4.1.34          2.4.5                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.33          2.4.4                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.32          2.4.3                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.31          2.4.1                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.30          2.4.1                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.29          2.4.0                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.24          2.3.11                  Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.23          2.3.10                  Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.22          2.3.10                  Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.21          2.3.10                  Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.20          2.3.10                  Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.18          2.3.10                  Milvus is an open-source vector database built ... \nzilliztech/milvus       4.1.18          2.3.9                   Milvus is an open-source vector database built ...                                       \nzilliztech/milvus       4.1.17          2.3.8                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.16          2.3.7                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.15          2.3.5                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.14          2.3.6                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.13          2.3.5                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.12          2.3.5                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.11          2.3.4                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.10          2.3.3                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.9           2.3.3                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.8           2.3.2                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.7           2.3.2                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.6           2.3.1                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.5           2.3.1                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.4           2.3.1                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.3           2.3.1                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.2           2.3.1                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.1           2.3.0                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.0           2.3.0                   Milvus is an open-source vector database built ...\n","sh rollingUpdate.sh -n default -i my-release -o update -t 2.4.13-hotfix -w 'milvusdb/milvus:v2.4.13-hotfix'\n","helm repo update\nhelm upgrade my-release milvus/milvus --reuse-values --version=4.1.24 # use the helm chart version here\n","NAME                NAMESPACE   REVISION    UPDATED                                 STATUS      CHART           APP VERSION     \nmy-release          default     1           2022-11-21 15:41:25.51539 +0800 CST     deployed    milvus-3.2.18   2.1.4\n","NAME                                            READY   STATUS    RESTARTS   AGE\nmy-release-etcd-0                               1/1     Running   0          84s\nmy-release-milvus-standalone-75c599fffc-6rwlj   1/1     Running   0          84s\nmy-release-minio-744dd9586f-qngzv               1/1     Running   0          84s\n","$ kubectl get pods my-release-milvus-proxy-6c548f787f-scspp -o=jsonpath='{$.spec.containers[0].image}'\n# milvusdb/milvus:v2.1.4\n","./migrate.sh -i my-release -s 2.1.4 -t 2.4.13-hotfix\n","./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.13-hotfix\n","./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.13-hotfix -r by-dev\n","./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.13-hotfix -r by-dev -w milvusdb/milvus:v2.4.13-hotfix\n","./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.13-hotfix -w milvusdb/milvus:v2.4.13-hotfix -d true\n","./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.13-hotfix -r by-dev -o rollback -w milvusdb/milvus:v2.1.1\n./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.13-hotfix -r by-dev -o migrate -w milvusdb/milvus:v2.4.13-hotfix\n"],"headingContent":"Upgrade Milvus Standalone with Helm Chart","anchorList":[{"label":"Upgrade von Milvus Standalone mit Helm Chart","href":"Upgrade-Milvus-Standalone-with-Helm-Chart","type":1,"isActive":false},{"label":"Überprüfen Sie die Milvus-Version","href":"Check-the-Milvus-version","type":2,"isActive":false},{"label":"Durchführen eines rollenden Upgrades","href":"Conduct-a-rolling-upgrade","type":2,"isActive":false},{"label":"Upgrade von Milvus mit Helm","href":"Upgrade-Milvus-using-Helm","type":2,"isActive":false},{"label":"Migrieren Sie die Metadaten","href":"Migrate-the-metadata","type":2,"isActive":false}]}
    \ No newline at end of file
    diff --git a/localization/v2.4.x/site/de/adminGuide/upgrade_milvus_standalone-helm.md b/localization/v2.4.x/site/de/adminGuide/upgrade_milvus_standalone-helm.md
    index d74df087b..1e34c3f37 100644
    --- a/localization/v2.4.x/site/de/adminGuide/upgrade_milvus_standalone-helm.md
    +++ b/localization/v2.4.x/site/de/adminGuide/upgrade_milvus_standalone-helm.md
    @@ -5,7 +5,7 @@ order: 1
     group: upgrade_milvus_standalone-operator.md
     related_key: upgrade Milvus Standalone
     summary: 'Erfahren Sie, wie Sie Milvus Standalone mit Helm Chart aktualisieren können.'
    -title: Upgrade Milvus Standalone mit Helm Chart
    +title: Upgrade von Milvus Standalone mit Helm Chart
     ---
     
     

    Upgrade von Milvus Standalone mit Helm Chart

    Sie können den Upgrade-Pfad für Ihren Milvus wie folgt wählen:

    -
    - [Führen Sie ein rollendes Upgrade durch](#conduct-a-rolling-upgrade) von Milvus v2.2.3 und späteren Versionen auf v2.4.9.
    +
    - [Führen Sie ein rollendes Upgrade durch](#conduct-a-rolling-upgrade) von Milvus v2.2.3 und späteren Versionen auf v2.4.13-hotfix.

    Durchführen eines rollenden Upgrades

    Seit Milvus 2.2.3 können Sie Milvus-Koordinatoren so konfigurieren, dass sie im Aktiv-Standby-Modus arbeiten, und die Funktion "Rolling Upgrade" für sie aktivieren, so dass Milvus auf eingehende Anfragen während der Koordinator-Upgrades reagieren kann. In früheren Versionen müssen die Koordinatoren während eines Upgrades entfernt und neu erstellt werden, was zu einer gewissen Ausfallzeit des Dienstes führen kann.

    Rolling Upgrades erfordern, dass die Koordinatoren im Aktiv-Standby-Modus arbeiten. Sie können das von uns bereitgestellte Skript verwenden, um die Koordinatoren so zu konfigurieren, dass sie im aktiven Standby-Modus arbeiten, und das Rolling Upgrade starten.

    -

    Auf der Grundlage der von Kubernetes bereitgestellten rollenden Aktualisierungsfunktionen erzwingt das oben genannte Skript eine geordnete Aktualisierung der Bereitstellungen entsprechend ihrer Abhängigkeiten. Darüber hinaus implementiert Milvus einen Mechanismus, der sicherstellt, dass seine Komponenten während des Upgrades mit den von ihnen abhängigen Komponenten kompatibel bleiben, wodurch mögliche Ausfallzeiten des Dienstes erheblich reduziert werden.

    +

    Auf der Grundlage der von Kubernetes bereitgestellten rollenden Aktualisierungsfunktionen erzwingt das oben genannte Skript eine geordnete Aktualisierung der Bereitstellungen entsprechend ihrer Abhängigkeiten. Darüber hinaus implementiert Milvus einen Mechanismus, der sicherstellt, dass seine Komponenten während des Upgrades mit den von ihnen abhängigen Komponenten kompatibel bleiben, wodurch potenzielle Service-Ausfallzeiten erheblich reduziert werden.

    Das Skript gilt nur für das Upgrade von Milvus, das mit Helm installiert wurde. In der folgenden Tabelle sind die in den Skripten verfügbaren Befehlsflags aufgeführt.

    @@ -123,14 +123,14 @@ zilliztech/milvus 4.1.0 2.3.0 Milvus is an ope
    oVorgangupdateFalsch
    -

    Sobald Sie sichergestellt haben, dass sich alle Einsätze in Ihrer Milvus-Instanz in ihrem normalen Status befinden, können Sie den folgenden Befehl ausführen. Sie können den folgenden Befehl ausführen, um die Milvus-Instanz auf 2.4.9 zu aktualisieren.

    -
    sh rollingUpdate.sh -n default -i my-release -o update -t 2.4.9 -w 'milvusdb/milvus:v2.4.9'
    +

    Sobald Sie sichergestellt haben, dass sich alle Einsätze in Ihrer Milvus-Instanz in ihrem normalen Status befinden, können Sie den folgenden Befehl ausführen. Sie können den folgenden Befehl ausführen, um die Milvus-Instanz auf 2.4.13-hotfix zu aktualisieren.

    +
    sh rollingUpdate.sh -n default -i my-release -o update -t 2.4.13-hotfix -w 'milvusdb/milvus:v2.4.13-hotfix'
     
    1. Das Skript gilt nicht für die mit RocksMQ installierte Milvus-Instanz.
    2. Das Skript kodiert die Upgrade-Reihenfolge der Bereitstellungen fest und kann nicht geändert werden.
    3. -
    4. Das Skript verwendet kubectl patch, um die Bereitstellungen zu aktualisieren, und kubectl rollout status, um deren Status zu überwachen.
    5. +
    6. Das Skript verwendet kubectl patch, um die Bereitstellungen zu aktualisieren, und kubectl rollout status, um ihren Status zu überwachen.
    7. Das Skript verwendet kubectl patch, um die Bezeichnung app.kubernetes.io/version der Bereitstellungen auf die Bezeichnung zu aktualisieren, die nach dem Flag -t im Befehl angegeben ist.
    @@ -187,7 +187,7 @@ my-release-minio-744dd9586f-qngzv 1/1 Running 0 84s

    4. Migrieren Sie die Metadaten

    Eine wichtige Änderung in Milvus 2.2 ist die Metadatenstruktur der Segmentindizes. Daher müssen Sie Helm verwenden, um die Metadaten beim Upgrade von Milvus von v2.1.x auf v2.2.0 zu migrieren. Hier finden Sie ein Skript, mit dem Sie Ihre Metadaten sicher migrieren können.

    Dieses Skript gilt nur für Milvus, das auf einem K8s-Cluster installiert ist. Wenn während des Prozesses ein Fehler auftritt, sollten Sie zunächst mit der Rollback-Operation auf die vorherige Version zurückkehren.

    -

    In der folgenden Tabelle sind die Operationen aufgeführt, die Sie bei der Migration von Metadaten durchführen können.

    +

    In der folgenden Tabelle sind die Operationen aufgeführt, die Sie für die Metamigration durchführen können.

    @@ -213,25 +213,25 @@ my-release-minio-744dd9586f-qngzv 1/1 Running 0 84s
  • Migrieren Sie die Milvus-Metadaten.
  • Starten Sie die Milvus-Komponenten mit einem neuen Image.
  • -

    2. Upgrade von Milvus von v2.1.x auf 2.4.9

    Die folgenden Befehle gehen davon aus, dass Sie Milvus von v2.1.4 auf 2.4.9 aktualisieren. Ändern Sie sie auf die Versionen, die Ihren Anforderungen entsprechen.

    +

    2. Upgrade von Milvus von v2.1.x auf 2.4.13-hotfix

    Die folgenden Befehle gehen davon aus, dass Sie Milvus von v2.1.4 auf 2.4.13-hotfix aktualisieren. Ändern Sie sie auf die Versionen, die Ihren Anforderungen entsprechen.

    1. Geben Sie den Namen der Milvus-Instanz, die Quell-Milvus-Version und die Ziel-Milvus-Version an.

      -
      ./migrate.sh -i my-release -s 2.1.4 -t 2.4.9
      +
      ./migrate.sh -i my-release -s 2.1.4 -t 2.4.13-hotfix
       
    2. Geben Sie den Namespace mit -n an, wenn Ihr Milvus nicht im Standard-Namespace von K8s installiert ist.

      -
      ./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.9
      +
      ./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.13-hotfix
       
    3. Geben Sie den Wurzelpfad mit -r an, wenn Ihr Milvus mit dem benutzerdefinierten rootpath installiert ist.

      -
      ./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.9 -r by-dev
      +
      ./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.13-hotfix -r by-dev
       
    4. Geben Sie das Bild-Tag mit -w an, wenn Ihr Milvus mit einem benutzerdefinierten image installiert ist.

      -
      ./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.9 -r by-dev -w milvusdb/milvus:v2.4.9
      +
      ./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.13-hotfix -r by-dev -w milvusdb/milvus:v2.4.13-hotfix
       
    5. Legen Sie -d true fest, wenn Sie den Migrations-Pod automatisch entfernen möchten, nachdem die Migration abgeschlossen ist.

      -
      ./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.9 -w milvusdb/milvus:v2.4.9 -d true
      +
      ./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.13-hotfix -w milvusdb/milvus:v2.4.13-hotfix -d true
       
    6. Machen Sie einen Rollback und migrieren Sie erneut, wenn die Migration fehlschlägt.

      -
      ./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.9 -r by-dev -o rollback -w milvusdb/milvus:v2.1.1
      -./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.9 -r by-dev -o migrate -w milvusdb/milvus:v2.4.9
      +
      ./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.13-hotfix -r by-dev -o rollback -w milvusdb/milvus:v2.1.1
      +./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.13-hotfix -r by-dev -o migrate -w milvusdb/milvus:v2.4.13-hotfix
       
    diff --git a/localization/v2.4.x/site/de/adminGuide/upgrade_milvus_standalone-operator.json b/localization/v2.4.x/site/de/adminGuide/upgrade_milvus_standalone-operator.json index 629741ac7..6ef97e823 100644 --- a/localization/v2.4.x/site/de/adminGuide/upgrade_milvus_standalone-operator.json +++ b/localization/v2.4.x/site/de/adminGuide/upgrade_milvus_standalone-operator.json @@ -1 +1 @@ -{"codeList":["helm repo add zilliztech-milvus-operator https://zilliztech.github.io/milvus-operator/\nhelm repo update zilliztech-milvus-operator\nhelm -n milvus-operator upgrade milvus-operator zilliztech-milvus-operator/milvus-operator\n","apiVersion: milvus.io/v1beta1\nkind: Milvus\nmetadata:\n name: my-release\nspec:\n components:\n enableRollingUpdate: true\n imageUpdateMode: rollingUpgrade # Default value, can be omitted\n image: milvusdb/milvus:v2.4.9\n","apiVersion: milvus.io/v1beta1\nkind: Milvus\nmetadata:\n name: my-release\nspec:\n components:\n enableRollingUpdate: true\n imageUpdateMode: all\n image: milvusdb/milvus:v2.4.9\n","apiVersion: milvus.io/v1beta1\nkind: Milvus\nmetadata:\n name: my-release\nspec:\n components:\n enableRollingUpdate: true\n imageUpdateMode: rollingDowngrade\n image: milvusdb/milvus:\n","kubectl apply -f milvusupgrade.yml\n","apiVersion: milvus.io/v1beta1\nkind: Milvus\nmetadata:\n name: my-release\nlabels:\n app: milvus\nspec:\n # Omit other fields ...\n components:\n image: milvusdb/milvus:v2.4.9\n","kubectl apply -f milvusupgrade.yaml\n","apiVersion: milvus.io/v1beta1\nkind: MilvusUpgrade\nmetadata:\n name: my-release-upgrade\nspec:\n milvus:\n namespace: default\n name: my-release\n sourceVersion: \"v2.1.4\"\n targetVersion: \"v2.4.9\"\n # below are some omit default values:\n # targetImage: \"milvusdb/milvus:v2.4.9\"\n # toolImage: \"milvusdb/meta-migration:v2.2.0\"\n # operation: upgrade\n # rollbackIfFailed: true\n # backupPVC: \"\"\n # maxRetry: 3\n","$ kubectl apply -f https://raw.githubusercontent.com/zilliztech/milvus-operator/main/config/samples/milvusupgrade.yaml\n","kubectl describe milvus release-name\n"],"headingContent":"","anchorList":[{"label":"Aufrüstung von Milvus Standalone mit Milvus Operator","href":"Upgrade-Milvus-Standalone-with-Milvus-Operator","type":1,"isActive":false},{"label":"Aktualisieren Sie Ihren Milvus Operator","href":"Upgrade-your-Milvus-operator","type":2,"isActive":false},{"label":"Durchführen eines rollenden Upgrades","href":"Conduct-a-rolling-upgrade","type":2,"isActive":false},{"label":"Upgrade von Milvus durch Änderung des Images","href":"Upgrade-Milvus-by-changing-its-image","type":2,"isActive":false},{"label":"Migrieren Sie die Metadaten","href":"Migrate-the-metadata","type":2,"isActive":false}]} \ No newline at end of file +{"codeList":["helm repo add zilliztech-milvus-operator https://zilliztech.github.io/milvus-operator/\nhelm repo update zilliztech-milvus-operator\nhelm -n milvus-operator upgrade milvus-operator zilliztech-milvus-operator/milvus-operator\n","apiVersion: milvus.io/v1beta1\nkind: Milvus\nmetadata:\n name: my-release\nspec:\n components:\n enableRollingUpdate: true\n imageUpdateMode: rollingUpgrade # Default value, can be omitted\n image: milvusdb/milvus:v2.4.13-hotfix\n","apiVersion: milvus.io/v1beta1\nkind: Milvus\nmetadata:\n name: my-release\nspec:\n components:\n enableRollingUpdate: true\n imageUpdateMode: all\n image: milvusdb/milvus:v2.4.13-hotfix\n","apiVersion: milvus.io/v1beta1\nkind: Milvus\nmetadata:\n name: my-release\nspec:\n components:\n enableRollingUpdate: true\n imageUpdateMode: rollingDowngrade\n image: milvusdb/milvus:\n","kubectl apply -f milvusupgrade.yml\n","apiVersion: milvus.io/v1beta1\nkind: Milvus\nmetadata:\n name: my-release\nlabels:\n app: milvus\nspec:\n # Omit other fields ...\n components:\n image: milvusdb/milvus:v2.4.13-hotfix\n","kubectl apply -f milvusupgrade.yaml\n","apiVersion: milvus.io/v1beta1\nkind: MilvusUpgrade\nmetadata:\n name: my-release-upgrade\nspec:\n milvus:\n namespace: default\n name: my-release\n sourceVersion: \"v2.1.4\"\n targetVersion: \"v2.4.13-hotfix\"\n # below are some omit default values:\n # targetImage: \"milvusdb/milvus:v2.4.13-hotfix\"\n # toolImage: \"milvusdb/meta-migration:v2.2.0\"\n # operation: upgrade\n # rollbackIfFailed: true\n # backupPVC: \"\"\n # maxRetry: 3\n","$ kubectl apply -f https://raw.githubusercontent.com/zilliztech/milvus-operator/main/config/samples/milvusupgrade.yaml\n","kubectl describe milvus release-name\n"],"headingContent":"Upgrade Milvus Standalone with Milvus Operator","anchorList":[{"label":"Aufrüstung von Milvus Standalone mit Milvus Operator","href":"Upgrade-Milvus-Standalone-with-Milvus-Operator","type":1,"isActive":false},{"label":"Aktualisieren Sie Ihren Milvus Operator","href":"Upgrade-your-Milvus-operator","type":2,"isActive":false},{"label":"Durchführen eines rollenden Upgrades","href":"Conduct-a-rolling-upgrade","type":2,"isActive":false},{"label":"Upgrade von Milvus durch Änderung des Images","href":"Upgrade-Milvus-by-changing-its-image","type":2,"isActive":false},{"label":"Migrieren Sie die Metadaten","href":"Migrate-the-metadata","type":2,"isActive":false}]} \ No newline at end of file diff --git a/localization/v2.4.x/site/de/adminGuide/upgrade_milvus_standalone-operator.md b/localization/v2.4.x/site/de/adminGuide/upgrade_milvus_standalone-operator.md index e38c5cf66..9e725d8f3 100644 --- a/localization/v2.4.x/site/de/adminGuide/upgrade_milvus_standalone-operator.md +++ b/localization/v2.4.x/site/de/adminGuide/upgrade_milvus_standalone-operator.md @@ -48,9 +48,9 @@ helm -n milvus-operator upgrade milvus-

    Sobald Sie Ihren Milvus-Operator auf die neueste Version aktualisiert haben, haben Sie die folgenden Möglichkeiten:

    Durchführen eines rollenden Upgrades

    In der obigen Konfigurationsdatei setzen Sie spec.components.enableRollingUpdate auf true und spec.components.image auf die gewünschte Milvus-Version.

    Standardmäßig führt Milvus ein Rolling Upgrade für Koordinatoren in geordneter Weise durch, wobei die Pod-Images der Koordinatoren nacheinander ersetzt werden. Um die Upgrade-Zeit zu verkürzen, sollten Sie spec.components.imageUpdateMode auf all setzen, damit Milvus alle Pod-Images gleichzeitig ersetzt.

    @@ -90,7 +90,7 @@ spec: components: enableRollingUpdate: true imageUpdateMode: all - image: milvusdb/milvus:v2.4.9 + image: milvusdb/milvus:v2.4.13-hotfix

    Sie können spec.components.imageUpdateMode auf rollingDowngrade setzen, damit Milvus die Coordinator-Pod-Images durch eine niedrigere Version ersetzt.

    apiVersion: milvus.io/v1beta1
    @@ -103,7 +103,7 @@ spec:
         imageUpdateMode: rollingDowngrade
         image: milvusdb/milvus:<some-older-version>
     
    -

    Speichern Sie dann Ihre Konfiguration als YAML-Datei (z.B. milvusupgrade.yml) und wenden Sie diese Konfigurationsdatei wie folgt auf Ihre Milvus-Instanz an:

    +

    Speichern Sie anschließend Ihre Konfiguration als YAML-Datei (z.B. milvusupgrade.yml) und wenden Sie diese Konfigurationsdatei wie folgt auf Ihre Milvus-Instanz an:

    kubectl apply -f milvusupgrade.yml
     

    Upgrade von Milvus durch Änderung des Images

    Führen Sie dann das Folgende aus, um das Upgrade durchzuführen:

    kubectl apply -f milvusupgrade.yaml
    @@ -152,8 +152,8 @@ spec:
               d="M4 9h1v1H4c-1.5 0-3-1.69-3-3.5S2.55 3 4 3h4c1.45 0 3 1.69 3 3.5 0 1.41-.91 2.72-2 3.25V8.59c.58-.45 1-1.27 1-2.09C10 5.22 8.98 4 8 4H4c-.98 0-2 1.22-2 2.5S3 9 4 9zm9-3h-1v1h1c1 0 2 1.22 2 2.5S13.98 12 13 12H9c-.98 0-2-1.22-2-2.5 0-.83.42-1.64 1-2.09V6.25c-1.09.53-2 1.84-2 3.25C6 11.31 7.55 13 9 13h4c1.45 0 3-1.69 3-3.5S14.5 6 13 6z"
             >
           
    -    

    Seit Milvus 2.2.0 sind die Metadaten nicht mehr mit denen früherer Versionen kompatibel. Die folgenden Beispielschnipsel gehen von einem Upgrade von Milvus 2.1.4 auf Milvus v2.4.9 aus.

    -

    1. Erstellen Sie eine .yaml Datei für die Migration von Metadaten

    Erstellen Sie eine Metadaten-Migrationsdatei. Im Folgenden finden Sie ein Beispiel. Sie müssen name, sourceVersion und targetVersion in der Konfigurationsdatei angeben. Im folgenden Beispiel wird name auf my-release-upgrade, sourceVersion auf v2.1.4 und targetVersion auf v2.4.9 gesetzt. Dies bedeutet, dass Ihre Milvus-Instanz von v2.1.4 auf v2.4.9 aktualisiert wird.

    +

    Seit Milvus 2.2.0 sind die Metadaten nicht mehr mit denen früherer Versionen kompatibel. Die folgenden Beispielschnipsel gehen von einem Upgrade von Milvus 2.1.4 auf Milvus v2.4.13-hotfix aus.

    +

    1. Erstellen Sie eine .yaml Datei für die Metadaten-Migration

    Erstellen Sie eine Metadaten-Migrationsdatei. Im Folgenden finden Sie ein Beispiel. Sie müssen name, sourceVersion und targetVersion in der Konfigurationsdatei angeben. Im folgenden Beispiel wird name auf my-release-upgrade, sourceVersion auf v2.1.4 und targetVersion auf v2.4.13-hotfix gesetzt. Dies bedeutet, dass Ihre Milvus-Instanz von v2.1.4 auf v2.4.13-hotfix aktualisiert wird.

    apiVersion: milvus.io/v1beta1
     kind: MilvusUpgrade
     metadata:
    @@ -163,9 +163,9 @@ spec:
         namespace: default
         name: my-release
       sourceVersion: "v2.1.4"
    -  targetVersion: "v2.4.9"
    +  targetVersion: "v2.4.13-hotfix"
       # below are some omit default values:
    -  # targetImage: "milvusdb/milvus:v2.4.9"
    +  # targetImage: "milvusdb/milvus:v2.4.13-hotfix"
       # toolImage: "milvusdb/meta-migration:v2.2.0"
       # operation: upgrade
       # rollbackIfFailed: true
    diff --git a/localization/v2.4.x/site/de/embeddings/embed-with-cohere.json b/localization/v2.4.x/site/de/embeddings/embed-with-cohere.json
    index 40401b602..3ce35a476 100644
    --- a/localization/v2.4.x/site/de/embeddings/embed-with-cohere.json
    +++ b/localization/v2.4.x/site/de/embeddings/embed-with-cohere.json
    @@ -1 +1 @@
    -{"codeList":["pip install --upgrade pymilvus\npip install \"pymilvus[model]\"\n","cohere_ef = CohereEmbeddingFunction(\n    model_name=\"embed-english-light-v3.0\",\n    api_key=\"YOUR_COHERE_API_KEY\",\n    input_type=\"search_document\",\n    embedding_types=[\"float\"]\n)\n","docs = [\n    \"Artificial intelligence was founded as an academic discipline in 1956.\",\n    \"Alan Turing was the first person to conduct substantial research in AI.\",\n    \"Born in Maida Vale, London, Turing was raised in southern England.\",\n]\n\ndocs_embeddings = cohere_ef.encode_documents(docs)\n\n# Print embeddings\nprint(\"Embeddings:\", docs_embeddings)\n# Print dimension and shape of embeddings\nprint(\"Dim:\", cohere_ef.dim, docs_embeddings[0].shape)\n","Embeddings: [array([ 3.43322754e-02,  1.16252899e-03, -5.25207520e-02,  1.32846832e-03,\n       -6.80541992e-02,  6.10961914e-02, -7.06176758e-02,  1.48925781e-01,\n        1.54174805e-01,  1.98516846e-02,  2.43835449e-02,  3.55224609e-02,\n        1.82952881e-02,  7.57446289e-02, -2.40783691e-02,  4.40063477e-02,\n...\n        0.06359863, -0.01971436, -0.02253723,  0.00354195,  0.00222015,\n        0.00184727,  0.03408813, -0.00777817,  0.04919434,  0.01519775,\n       -0.02862549,  0.04760742, -0.07891846,  0.0124054 ], dtype=float32)]\nDim: 384 (384,)\n","queries = [\"When was artificial intelligence founded\", \n           \"Where was Alan Turing born?\"]\n\nquery_embeddings = cohere_ef.encode_queries(queries)\n\nprint(\"Embeddings:\", query_embeddings)\nprint(\"Dim\", cohere_ef.dim, query_embeddings[0].shape)\n","Embeddings: [array([-1.33361816e-02,  9.79423523e-04, -7.28759766e-02, -1.93786621e-02,\n       -9.71679688e-02,  4.34875488e-02, -9.81445312e-02,  1.16882324e-01,\n        5.89904785e-02, -4.19921875e-02,  4.95910645e-02,  5.83496094e-02,\n        3.47595215e-02, -5.87463379e-03, -7.30514526e-03,  2.92816162e-02,\n...\n        0.00749969, -0.01192474,  0.02719116,  0.03347778,  0.07696533,\n        0.01409149,  0.00964355, -0.01681519, -0.0073204 ,  0.00043154,\n       -0.04577637,  0.03591919, -0.02807617, -0.04812622], dtype=float32)]\nDim 384 (384,)\n"],"headingContent":"","anchorList":[{"label":"Cohere","href":"Cohere","type":1,"isActive":false}]}
    \ No newline at end of file
    +{"codeList":["pip install --upgrade pymilvus\npip install \"pymilvus[model]\"\n","from pymilvus.model.dense import CohereEmbeddingFunction\n\ncohere_ef = CohereEmbeddingFunction(\n    model_name=\"embed-english-light-v3.0\",\n    api_key=\"YOUR_COHERE_API_KEY\",\n    input_type=\"search_document\",\n    embedding_types=[\"float\"]\n)\n","docs = [\n    \"Artificial intelligence was founded as an academic discipline in 1956.\",\n    \"Alan Turing was the first person to conduct substantial research in AI.\",\n    \"Born in Maida Vale, London, Turing was raised in southern England.\",\n]\n\ndocs_embeddings = cohere_ef.encode_documents(docs)\n\n# Print embeddings\nprint(\"Embeddings:\", docs_embeddings)\n# Print dimension and shape of embeddings\nprint(\"Dim:\", cohere_ef.dim, docs_embeddings[0].shape)\n","Embeddings: [array([ 3.43322754e-02,  1.16252899e-03, -5.25207520e-02,  1.32846832e-03,\n       -6.80541992e-02,  6.10961914e-02, -7.06176758e-02,  1.48925781e-01,\n        1.54174805e-01,  1.98516846e-02,  2.43835449e-02,  3.55224609e-02,\n        1.82952881e-02,  7.57446289e-02, -2.40783691e-02,  4.40063477e-02,\n...\n        0.06359863, -0.01971436, -0.02253723,  0.00354195,  0.00222015,\n        0.00184727,  0.03408813, -0.00777817,  0.04919434,  0.01519775,\n       -0.02862549,  0.04760742, -0.07891846,  0.0124054 ], dtype=float32)]\nDim: 384 (384,)\n","queries = [\"When was artificial intelligence founded\", \n           \"Where was Alan Turing born?\"]\n\nquery_embeddings = cohere_ef.encode_queries(queries)\n\nprint(\"Embeddings:\", query_embeddings)\nprint(\"Dim\", cohere_ef.dim, query_embeddings[0].shape)\n","Embeddings: [array([-1.33361816e-02,  9.79423523e-04, -7.28759766e-02, -1.93786621e-02,\n       -9.71679688e-02,  4.34875488e-02, -9.81445312e-02,  1.16882324e-01,\n        5.89904785e-02, -4.19921875e-02,  4.95910645e-02,  5.83496094e-02,\n        3.47595215e-02, -5.87463379e-03, -7.30514526e-03,  2.92816162e-02,\n...\n        0.00749969, -0.01192474,  0.02719116,  0.03347778,  0.07696533,\n        0.01409149,  0.00964355, -0.01681519, -0.0073204 ,  0.00043154,\n       -0.04577637,  0.03591919, -0.02807617, -0.04812622], dtype=float32)]\nDim 384 (384,)\n"],"headingContent":"Cohere","anchorList":[{"label":"Cohere","href":"Cohere","type":1,"isActive":false}]}
    \ No newline at end of file
    diff --git a/localization/v2.4.x/site/de/embeddings/embed-with-cohere.md b/localization/v2.4.x/site/de/embeddings/embed-with-cohere.md
    index 65b14acc2..6c9efa579 100644
    --- a/localization/v2.4.x/site/de/embeddings/embed-with-cohere.md
    +++ b/localization/v2.4.x/site/de/embeddings/embed-with-cohere.md
    @@ -2,8 +2,8 @@
     id: embed-with-cohere.md
     order: 9
     summary: >-
    -  Dieser Artikel beschreibt die Verwendung der CohereEmbeddingFunction, um
    -  Dokumente und Abfragen mit dem Cohere-Einbettungsmodell zu kodieren.
    +  Dieser Artikel beschreibt die Verwendung der CohereEmbeddingFunction zur
    +  Kodierung von Dokumenten und Abfragen mit dem Cohere-Einbettungsmodell.
     title: Cohere einbetten
     ---
     

    Cohere

    Die Einbettungsmodelle von Cohere dienen zur Erzeugung von Texteinbettungen, d. h. von Listen mit Fließkommazahlen, die semantische Informationen über den Text enthalten. Diese Einbettungen können für Aufgaben wie Textklassifizierung und semantische Suche verwendet werden.

    Milvus lässt sich mit den Einbettungsmodellen von Cohere über die Klasse CohereEmbeddingFunction integrieren. Diese Klasse übernimmt die Berechnung der Einbettungen und gibt sie in einem mit Milvus kompatiblen Format zur Indizierung und Suche zurück.

    -

    Um diese Funktion zu nutzen, installieren Sie die erforderlichen Abhängigkeiten:

    +

    Um diese Funktion zu nutzen, installieren Sie die notwendigen Abhängigkeiten:

    pip install --upgrade pymilvus
     pip install "pymilvus[model]"
     

    Dann instanziieren Sie die Klasse CohereEmbeddingFunction:

    -
    cohere_ef = CohereEmbeddingFunction(
    +
    from pymilvus.model.dense import CohereEmbeddingFunction
    +
    +cohere_ef = CohereEmbeddingFunction(
         model_name="embed-english-light-v3.0",
         api_key="YOUR_COHERE_API_KEY",
         input_type="search_document",
    diff --git a/localization/v2.4.x/site/de/embeddings/embed-with-jina.json b/localization/v2.4.x/site/de/embeddings/embed-with-jina.json
    index 3ba04b590..4cee744ff 100644
    --- a/localization/v2.4.x/site/de/embeddings/embed-with-jina.json
    +++ b/localization/v2.4.x/site/de/embeddings/embed-with-jina.json
    @@ -1 +1 @@
    -{"codeList":["pip install --upgrade pymilvus\npip install \"pymilvus[model]\"\n","from pymilvus.model.dense import JinaEmbeddingFunction\n\njina_ef = JinaEmbeddingFunction(\n    model_name=\"jina-embeddings-v2-base-en\", # Defaults to `jina-embeddings-v2-base-en`\n    api_key=JINAAI_API_KEY # Provide your Jina AI API key\n)\n","docs = [\n    \"Artificial intelligence was founded as an academic discipline in 1956.\",\n    \"Alan Turing was the first person to conduct substantial research in AI.\",\n    \"Born in Maida Vale, London, Turing was raised in southern England.\",\n]\n\ndocs_embeddings = jina_ef.encode_documents(docs)\n\n# Print embeddings\nprint(\"Embeddings:\", docs_embeddings)\n# Print dimension and shape of embeddings\nprint(\"Dim:\", jina_ef.dim, docs_embeddings[0].shape)\n","Embeddings: [array([-4.88487840e-01, -4.28095880e-01,  4.90086500e-01, -1.63274320e-01,\n        3.43437800e-01,  3.21476880e-01,  2.83173790e-02, -3.10403670e-01,\n        4.76985040e-01, -1.77410420e-01, -3.84803180e-01, -2.19224200e-01,\n       -2.52898000e-01,  6.62411900e-02, -8.58173100e-01,  1.05221800e+00,\n...\n       -2.04462400e-01,  7.14229800e-01, -1.66823000e-01,  8.72551440e-01,\n        5.53560140e-01,  8.92506300e-01, -2.39408610e-01, -4.22413560e-01,\n       -3.19551350e-01,  5.59153850e-01,  2.44338100e-01, -8.60452100e-01])]\nDim: 768 (768,)\n","queries = [\"When was artificial intelligence founded\", \n           \"Where was Alan Turing born?\"]\n\nquery_embeddings = jina_ef.encode_queries(queries)\n\nprint(\"Embeddings:\", query_embeddings)\nprint(\"Dim\", jina_ef.dim, query_embeddings[0].shape)\n","Embeddings: [array([-5.99164660e-01, -3.49827350e-01,  8.22405160e-01, -1.18632730e-01,\n        5.78107540e-01,  1.09789170e-01,  2.91604200e-01, -3.29306450e-01,\n        2.93779640e-01, -2.17880800e-01, -6.84535440e-01, -3.79752000e-01,\n       -3.47541800e-01,  9.20846100e-02, -6.13804400e-01,  6.31312800e-01,\n...\n       -1.84993740e-02,  9.38629150e-01,  2.74858470e-02,  1.09396360e+00,\n        3.96270750e-01,  7.44445800e-01, -1.95404050e-01, -6.08383200e-01,\n       -3.75076300e-01,  3.87512200e-01,  8.11889650e-01, -3.76407620e-01])]\nDim 768 (768,)\n"],"headingContent":"","anchorList":[{"label":"Jina AI","href":"Jina-AI","type":1,"isActive":false}]}
    \ No newline at end of file
    +{"codeList":["pip install --upgrade pymilvus\npip install \"pymilvus[model]\"\n","from pymilvus.model.dense import JinaEmbeddingFunction\n\njina_ef = JinaEmbeddingFunction(\n    model_name=\"jina-embeddings-v3\", # Defaults to `jina-embeddings-v3`\n    api_key=JINAAI_API_KEY, # Provide your Jina AI API key\n    task=\"retrieval.passage\", # Specify the task\n    dimensions=1024, # Defaults to 1024\n)\n","\n```python\ndocs = [\n    \"Artificial intelligence was founded as an academic discipline in 1956.\",\n    \"Alan Turing was the first person to conduct substantial research in AI.\",\n    \"Born in Maida Vale, London, Turing was raised in southern England.\",\n]\n\ndocs_embeddings = jina_ef.encode_documents(docs)\n\n# Print embeddings\nprint(\"Embeddings:\", docs_embeddings)\n# Print dimension and shape of embeddings\nprint(\"Dim:\", jina_ef.dim, docs_embeddings[0].shape)\n","Embeddings: [array([9.80641991e-02, -8.51697400e-02,  7.36531913e-02,  1.42558888e-02,\n       -2.23589484e-02,  1.68494112e-03, -3.50753777e-02, -3.11530549e-02,\n       -3.26012149e-02,  5.04568312e-03,  3.69836427e-02,  3.48948985e-02,\n        8.19722563e-03,  5.88679723e-02, -6.71099266e-03, -1.82369724e-02,\n...\n        2.48654783e-02,  3.43279652e-02, -1.66154150e-02, -9.90478322e-03,\n       -2.96043139e-03, -8.57473817e-03, -7.39028037e-04,  6.25024503e-03,\n       -1.08831357e-02, -4.00776342e-02,  3.25369164e-02, -1.42691191e-03])]\nDim: 1024 (1024,)\n","queries = [\"When was artificial intelligence founded\", \n           \"Where was Alan Turing born?\"]\n\nquery_embeddings = jina_ef.encode_queries(queries)\n\nprint(\"Embeddings:\", query_embeddings)\nprint(\"Dim\", jina_ef.dim, query_embeddings[0].shape)\n","Embeddings: [array([8.79201014e-03,  1.47551354e-02,  4.02722731e-02, -2.52991207e-02,\n        1.12719582e-02,  3.75947170e-02,  3.97946090e-02, -7.36681819e-02,\n       -2.17952449e-02, -1.16298944e-02, -6.83426252e-03, -5.12507409e-02,\n        5.26071340e-02,  6.75181448e-02,  3.92445624e-02, -1.40817231e-02,\n...\n        8.81703943e-03,  4.24629413e-02, -2.32944116e-02, -2.05193572e-02,\n       -3.22035812e-02,  2.81896023e-03,  3.85326855e-02,  3.64372656e-02,\n       -1.65050142e-02, -4.26847413e-02,  2.02664156e-02, -1.72684863e-02])]\nDim 1024 (1024,)\n","from pymilvus.model.dense import JinaEmbeddingFunction\n\njina_ef = JinaEmbeddingFunction(\n    model_name=\"jina-embeddings-v3\", # Defaults to `jina-embeddings-v3`\n    api_key=JINA_API_KEY, # Provide your Jina AI API key\n    task=\"text-matching\",\n    dimensions=1024, # Defaults to 1024\n)\n\ntexts = [\n    \"Follow the white rabbit.\",  # English\n    \"Sigue al conejo blanco.\",  # Spanish\n    \"Suis le lapin blanc.\",  # French\n    \"跟着白兔走。\",  # Chinese\n    \"اتبع الأرنب الأبيض.\",  # Arabic\n    \"Folge dem weißen Kaninchen.\",  # German\n]\n\nembeddings = jina_ef(texts)\n\n# Compute similarities\nprint(embeddings[0] @ embeddings[1].T)\n"],"headingContent":"Jina AI","anchorList":[{"label":"Jina AI","href":"Jina-AI","type":1,"isActive":false}]}
    \ No newline at end of file
    diff --git a/localization/v2.4.x/site/de/embeddings/embed-with-jina.md b/localization/v2.4.x/site/de/embeddings/embed-with-jina.md
    index 21e13c8dd..ee968f304 100644
    --- a/localization/v2.4.x/site/de/embeddings/embed-with-jina.md
    +++ b/localization/v2.4.x/site/de/embeddings/embed-with-jina.md
    @@ -23,7 +23,7 @@ title: Jina AI - Einbetten
           
         

    Die Einbettungsmodelle von Jina AI sind leistungsstarke Modelle zur Texteinbettung, die Texteingaben in numerische Darstellungen übersetzen können und dabei die Semantik des Textes erfassen. Diese Modelle eignen sich hervorragend für Anwendungen wie dichtes Retrieval, semantische Textähnlichkeit und mehrsprachiges Verständnis.

    Milvus lässt sich über die Klasse JinaEmbeddingFunction mit den Einbettungsmodellen von Jina AI integrieren. Diese Klasse bietet Methoden zur Kodierung von Dokumenten und Abfragen unter Verwendung der Jina AI Einbettungsmodelle und gibt die Einbettungen als dichte Vektoren zurück, die mit der Milvus-Indizierung kompatibel sind. Um diese Funktionalität nutzen zu können, müssen Sie einen API-Schlüssel von Jina AI erhalten.

    -

    Um diese Funktion zu nutzen, installieren Sie die erforderlichen Abhängigkeiten:

    +

    Um diese Funktion zu nutzen, installieren Sie die notwendigen Abhängigkeiten:

    pip install --upgrade pymilvus
     pip install "pymilvus[model]"
     
    @@ -31,19 +31,36 @@ pip install "pymilvus[model]"
    from pymilvus.model.dense import JinaEmbeddingFunction
     
     jina_ef = JinaEmbeddingFunction(
    -    model_name="jina-embeddings-v2-base-en", # Defaults to `jina-embeddings-v2-base-en`
    -    api_key=JINAAI_API_KEY # Provide your Jina AI API key
    +    model_name="jina-embeddings-v3", # Defaults to `jina-embeddings-v3`
    +    api_key=JINAAI_API_KEY, # Provide your Jina AI API key
    +    task="retrieval.passage", # Specify the task
    +    dimensions=1024, # Defaults to 1024
     )
     

    Parameter:

    • model_name (String)

      -

      Der Name des Jina AI Einbettungsmodells, das für die Kodierung verwendet werden soll. Sie können jeden der verfügbaren Namen des Jina AI-Einbettungsmodells angeben, z. B. jina-embeddings-v2-base-en, jina-embeddings-v2-small-en, usw. Wenn Sie diesen Parameter nicht angeben, wird jina-embeddings-v2-base-en verwendet. Eine Liste der verfügbaren Modelle finden Sie unter Jina Embeddings.

    • +

      Der Name des Jina AI Einbettungsmodells, das für die Kodierung verwendet werden soll. Sie können jeden der verfügbaren Namen des Jina AI-Einbettungsmodells angeben, z. B. jina-embeddings-v3, jina-embeddings-v2-base-en, usw. Wenn Sie diesen Parameter nicht angeben, wird jina-embeddings-v3 verwendet. Eine Liste der verfügbaren Modelle finden Sie unter Jina Embeddings.

    • api_key (Zeichenkette)

      Der API-Schlüssel für den Zugriff auf die Jina AI API.

    • +
    • task (Zeichenkette)

      +

      Der Typ der an das Modell übergebenen Eingabe. Erforderlich für Einbettungsmodelle v3 und höher.

      +
        +
      • "retrieval.passage": Wird verwendet, um große Dokumente in Retrieval-Aufgaben zum Zeitpunkt der Indizierung zu kodieren.
      • +
      • "retrieval.query": Wird verwendet, um Benutzeranfragen oder Fragen in Retrieval-Aufgaben zu kodieren.
      • +
      • "classification": Zur Kodierung von Text für Textklassifizierungsaufgaben.
      • +
      • "text-matching": Kodierung von Text für Ähnlichkeitsvergleiche, z. B. zur Messung der Ähnlichkeit zwischen zwei Sätzen.
      • +
      • "clustering": Wird für Clustering- oder Reranking-Aufgaben verwendet.
      • +
    • +
    • dimensions (int)

      +

      Die Anzahl der Dimensionen, die die resultierenden Ausgabeeinbettungen haben sollen. Der Standardwert ist 1024. Wird nur für Einbettungsmodelle v3 und höher unterstützt.

    • +
    • late_chunking (bool)

      +

      Dieser Parameter steuert, ob die neue Chunking-Methode verwendet werden soll, die Jina AI letzten Monat für die Kodierung einer Reihe von Sätzen eingeführt hat. Der Standardwert ist False. Wenn er auf True gesetzt wird, verkettet Jina AI API alle Sätze im Eingabefeld und gibt sie als eine einzige Zeichenkette an das Modell weiter. Intern bettet das Modell diese lange, verkettete Zeichenkette ein und führt dann ein Late Chunking durch, wobei es eine Liste von Einbettungen zurückgibt, die der Größe der Eingabeliste entspricht.

    -

    Um Einbettungen für Dokumente zu erstellen, verwenden Sie die Methode encode_documents():

    -
    docs = [
    +

    Um Einbettungen für Dokumente zu erstellen, verwenden Sie die Methode encode_documents(). Diese Methode wurde für die Einbettung von Dokumenten in asymmetrischen Retrieval-Aufgaben entwickelt, z. B. die Indizierung von Dokumenten für Such- oder Empfehlungsaufgaben. Diese Methode verwendet retrieval.passage als Aufgabe.

    +
    
    +```python
    +docs = [
         "Artificial intelligence was founded as an academic discipline in 1956.",
         "Alan Turing was the first person to conduct substantial research in AI.",
         "Born in Maida Vale, London, Turing was raised in southern England.",
    @@ -57,17 +74,17 @@ docs_embeddings = jina_ef.encode_documents(docs)
     print("Dim:", jina_ef.dim, docs_embeddings[0].shape)
     

    Die erwartete Ausgabe ist ähnlich wie die folgende:

    -
    Embeddings: [array([-4.88487840e-01, -4.28095880e-01,  4.90086500e-01, -1.63274320e-01,
    -        3.43437800e-01,  3.21476880e-01,  2.83173790e-02, -3.10403670e-01,
    -        4.76985040e-01, -1.77410420e-01, -3.84803180e-01, -2.19224200e-01,
    -       -2.52898000e-01,  6.62411900e-02, -8.58173100e-01,  1.05221800e+00,
    +
    Embeddings: [array([9.80641991e-02, -8.51697400e-02,  7.36531913e-02,  1.42558888e-02,
    +       -2.23589484e-02,  1.68494112e-03, -3.50753777e-02, -3.11530549e-02,
    +       -3.26012149e-02,  5.04568312e-03,  3.69836427e-02,  3.48948985e-02,
    +        8.19722563e-03,  5.88679723e-02, -6.71099266e-03, -1.82369724e-02,
     ...
    -       -2.04462400e-01,  7.14229800e-01, -1.66823000e-01,  8.72551440e-01,
    -        5.53560140e-01,  8.92506300e-01, -2.39408610e-01, -4.22413560e-01,
    -       -3.19551350e-01,  5.59153850e-01,  2.44338100e-01, -8.60452100e-01])]
    -Dim: 768 (768,)
    +        2.48654783e-02,  3.43279652e-02, -1.66154150e-02, -9.90478322e-03,
    +       -2.96043139e-03, -8.57473817e-03, -7.39028037e-04,  6.25024503e-03,
    +       -1.08831357e-02, -4.00776342e-02,  3.25369164e-02, -1.42691191e-03])]
    +Dim: 1024 (1024,)
     
    -

    Um Einbettungen für Abfragen zu erstellen, verwenden Sie die Methode encode_queries():

    +

    Um Einbettungen für Abfragen zu erstellen, verwenden Sie die Methode encode_queries(). Diese Methode wurde für die Einbettung von Abfragen in asymmetrischen Retrieval-Aufgaben entwickelt, wie z. B. Suchanfragen oder Fragen. Diese Methode verwendet retrieval.query als Aufgabe.

    queries = ["When was artificial intelligence founded", 
                "Where was Alan Turing born?"]
     
    @@ -77,13 +94,37 @@ query_embeddings = jina_ef.encode_queries(queries)
     print("Dim", jina_ef.dim, query_embeddings[0].shape)
     

    Die erwartete Ausgabe ist ähnlich wie die folgende:

    -
    Embeddings: [array([-5.99164660e-01, -3.49827350e-01,  8.22405160e-01, -1.18632730e-01,
    -        5.78107540e-01,  1.09789170e-01,  2.91604200e-01, -3.29306450e-01,
    -        2.93779640e-01, -2.17880800e-01, -6.84535440e-01, -3.79752000e-01,
    -       -3.47541800e-01,  9.20846100e-02, -6.13804400e-01,  6.31312800e-01,
    +
    Embeddings: [array([8.79201014e-03,  1.47551354e-02,  4.02722731e-02, -2.52991207e-02,
    +        1.12719582e-02,  3.75947170e-02,  3.97946090e-02, -7.36681819e-02,
    +       -2.17952449e-02, -1.16298944e-02, -6.83426252e-03, -5.12507409e-02,
    +        5.26071340e-02,  6.75181448e-02,  3.92445624e-02, -1.40817231e-02,
     ...
    -       -1.84993740e-02,  9.38629150e-01,  2.74858470e-02,  1.09396360e+00,
    -        3.96270750e-01,  7.44445800e-01, -1.95404050e-01, -6.08383200e-01,
    -       -3.75076300e-01,  3.87512200e-01,  8.11889650e-01, -3.76407620e-01])]
    -Dim 768 (768,)
    +        8.81703943e-03,  4.24629413e-02, -2.32944116e-02, -2.05193572e-02,
    +       -3.22035812e-02,  2.81896023e-03,  3.85326855e-02,  3.64372656e-02,
    +       -1.65050142e-02, -4.26847413e-02,  2.02664156e-02, -1.72684863e-02])]
    +Dim 1024 (1024,)
    +
    +

    Um Einbettungen von Eingaben für den Ähnlichkeitsabgleich (z. B. STS- oder symmetrische Retrieval-Aufgaben), die Textklassifizierung, das Clustering oder Reranking-Aufgaben zu erstellen, verwenden Sie bei der Instanziierung der Klasse JinaEmbeddingFunction den entsprechenden Parameterwert task.

    +
    from pymilvus.model.dense import JinaEmbeddingFunction
    +
    +jina_ef = JinaEmbeddingFunction(
    +    model_name="jina-embeddings-v3", # Defaults to `jina-embeddings-v3`
    +    api_key=JINA_API_KEY, # Provide your Jina AI API key
    +    task="text-matching",
    +    dimensions=1024, # Defaults to 1024
    +)
    +
    +texts = [
    +    "Follow the white rabbit.",  # English
    +    "Sigue al conejo blanco.",  # Spanish
    +    "Suis le lapin blanc.",  # French
    +    "跟着白兔走。",  # Chinese
    +    "اتبع الأرنب الأبيض.",  # Arabic
    +    "Folge dem weißen Kaninchen.",  # German
    +]
    +
    +embeddings = jina_ef(texts)
    +
    +# Compute similarities
    +print(embeddings[0] @ embeddings[1].T)
     
    diff --git a/localization/v2.4.x/site/de/embeddings/embed-with-voyage.json b/localization/v2.4.x/site/de/embeddings/embed-with-voyage.json index c5fe18415..ba7547304 100644 --- a/localization/v2.4.x/site/de/embeddings/embed-with-voyage.json +++ b/localization/v2.4.x/site/de/embeddings/embed-with-voyage.json @@ -1 +1 @@ -{"codeList":["pip install --upgrade pymilvus\npip install \"pymilvus[model]\"\n","from pymilvus.model.dense import VoyageEmbeddingFunction\n\nvoyage_ef = VoyageEmbeddingFunction(\n model_name=\"voyage-lite-02-instruct\", # Defaults to `voyage-2`\n api_key=VOYAGE_API_KEY # Provide your Voyage API key\n)\n","docs = [\n \"Artificial intelligence was founded as an academic discipline in 1956.\",\n \"Alan Turing was the first person to conduct substantial research in AI.\",\n \"Born in Maida Vale, London, Turing was raised in southern England.\",\n]\n\ndocs_embeddings = voyage_ef.encode_documents(docs)\n\n# Print embeddings\nprint(\"Embeddings:\", docs_embeddings)\n# Print dimension and shape of embeddings\nprint(\"Dim:\", voyage_ef.dim, docs_embeddings[0].shape)\n","Embeddings: [array([ 0.02582654, -0.00907086, -0.04604037, ..., -0.01227521,\n 0.04420955, -0.00038829]), array([ 0.03844212, -0.01597065, -0.03728884, ..., -0.02118733,\n 0.03349845, 0.0065346 ]), array([ 0.05143557, -0.01096631, -0.02690451, ..., -0.02416254,\n 0.07658645, 0.03064499])]\nDim: 1024 (1024,)\n","queries = [\"When was artificial intelligence founded\", \n \"Where was Alan Turing born?\"]\n\nquery_embeddings = voyage_ef.encode_queries(queries)\n\nprint(\"Embeddings:\", query_embeddings)\nprint(\"Dim\", voyage_ef.dim, query_embeddings[0].shape)\n","Embeddings: [array([ 0.01733501, -0.0230672 , -0.05208827, ..., -0.00957995,\n 0.04493361, 0.01485138]), array([ 0.05937521, -0.00729363, -0.02184347, ..., -0.02107683,\n 0.05706626, 0.0263358 ])]\nDim 1024 (1024,)\n"],"headingContent":"","anchorList":[{"label":"Voyage","href":"Voyage","type":1,"isActive":false}]} \ No newline at end of file +{"codeList":["pip install --upgrade pymilvus\npip install \"pymilvus[model]\"\n","from pymilvus.model.dense import VoyageEmbeddingFunction\n\nvoyage_ef = VoyageEmbeddingFunction(\n model_name=\"voyage-3\", # Defaults to `voyage-3`\n api_key=VOYAGE_API_KEY # Provide your Voyage API key\n)\n","docs = [\n \"Artificial intelligence was founded as an academic discipline in 1956.\",\n \"Alan Turing was the first person to conduct substantial research in AI.\",\n \"Born in Maida Vale, London, Turing was raised in southern England.\",\n]\n\ndocs_embeddings = voyage_ef.encode_documents(docs)\n\n# Print embeddings\nprint(\"Embeddings:\", docs_embeddings)\n# Print dimension and shape of embeddings\nprint(\"Dim:\", voyage_ef.dim, docs_embeddings[0].shape)\n","Embeddings: [array([ 0.02582654, -0.00907086, -0.04604037, ..., -0.01227521,\n 0.04420955, -0.00038829]), array([ 0.03844212, -0.01597065, -0.03728884, ..., -0.02118733,\n 0.03349845, 0.0065346 ]), array([ 0.05143557, -0.01096631, -0.02690451, ..., -0.02416254,\n 0.07658645, 0.03064499])]\nDim: 1024 (1024,)\n","queries = [\"When was artificial intelligence founded\", \n \"Where was Alan Turing born?\"]\n\nquery_embeddings = voyage_ef.encode_queries(queries)\n\nprint(\"Embeddings:\", query_embeddings)\nprint(\"Dim\", voyage_ef.dim, query_embeddings[0].shape)\n","Embeddings: [array([ 0.01733501, -0.0230672 , -0.05208827, ..., -0.00957995,\n 0.04493361, 0.01485138]), array([ 0.05937521, -0.00729363, -0.02184347, ..., -0.02107683,\n 0.05706626, 0.0263358 ])]\nDim 1024 (1024,)\n"],"headingContent":"Voyage","anchorList":[{"label":"Voyage","href":"Voyage","type":1,"isActive":false}]} \ No newline at end of file diff --git a/localization/v2.4.x/site/de/embeddings/embed-with-voyage.md b/localization/v2.4.x/site/de/embeddings/embed-with-voyage.md index d57a3135d..9be249ec5 100644 --- a/localization/v2.4.x/site/de/embeddings/embed-with-voyage.md +++ b/localization/v2.4.x/site/de/embeddings/embed-with-voyage.md @@ -30,13 +30,13 @@ pip install "pymilvus[model]"
    from pymilvus.model.dense import VoyageEmbeddingFunction
     
     voyage_ef = VoyageEmbeddingFunction(
    -    model_name="voyage-lite-02-instruct", # Defaults to `voyage-2`
    +    model_name="voyage-3", # Defaults to `voyage-3`
         api_key=VOYAGE_API_KEY # Provide your Voyage API key
     )
     

    Parameter:

      -
    • model_name (string) Der Name des Voyage-Modells, das für die Kodierung verwendet werden soll. Sie können jeden der verfügbaren Voyage-Modellnamen angeben, z. B. voyage-law-2, voyage-code-2, usw. Wenn Sie diesen Parameter nicht angeben, wird voyage-2 verwendet. Eine Liste der verfügbaren Modelle finden Sie in der offiziellen Voyage-Dokumentation.
    • +
    • model_name (string) Der Name des Voyage-Modells, das für die Kodierung verwendet werden soll. Sie können jeden der verfügbaren Voyage-Modellnamen angeben, z. B. voyage-3-lite, voyage-finance-2, usw. Wenn Sie diesen Parameter nicht angeben, wird voyage-3 verwendet. Eine Liste der verfügbaren Modelle finden Sie in der offiziellen Voyage-Dokumentation.
    • api_key (string) Der API-Schlüssel für den Zugriff auf die Voyage-API. Informationen zur Erstellung eines API-Schlüssels finden Sie unter API-Schlüssel und Python-Client.

    Um Einbettungen für Dokumente zu erstellen, verwenden Sie die Methode encode_documents():

    diff --git a/localization/v2.4.x/site/de/embeddings/embeddings.json b/localization/v2.4.x/site/de/embeddings/embeddings.json index 5bf724d74..e7288be4c 100644 --- a/localization/v2.4.x/site/de/embeddings/embeddings.json +++ b/localization/v2.4.x/site/de/embeddings/embeddings.json @@ -1 +1 @@ -{"codeList":["pip install \"pymilvus[model]\"\n","from pymilvus import model\n\n# This will download \"all-MiniLM-L6-v2\", a light weight model.\nef = model.DefaultEmbeddingFunction()\n\n# Data from which embeddings are to be generated \ndocs = [\n \"Artificial intelligence was founded as an academic discipline in 1956.\",\n \"Alan Turing was the first person to conduct substantial research in AI.\",\n \"Born in Maida Vale, London, Turing was raised in southern England.\",\n]\n\nembeddings = ef.encode_documents(docs)\n\n# Print embeddings\nprint(\"Embeddings:\", embeddings)\n# Print dimension and shape of embeddings\nprint(\"Dim:\", ef.dim, embeddings[0].shape)\n","Embeddings: [array([-3.09392996e-02, -1.80662833e-02, 1.34775648e-02, 2.77156215e-02,\n -4.86349640e-03, -3.12581174e-02, -3.55921760e-02, 5.76934684e-03,\n 2.80773244e-03, 1.35783911e-01, 3.59678417e-02, 6.17732145e-02,\n...\n -4.61330153e-02, -4.85207550e-02, 3.13997865e-02, 7.82178566e-02,\n -4.75336798e-02, 5.21207601e-02, 9.04406682e-02, -5.36676683e-02],\n dtype=float32)]\nDim: 384 (384,)\n","from pymilvus.model.hybrid import BGEM3EmbeddingFunction\nfrom pymilvus import (\n utility,\n FieldSchema, CollectionSchema, DataType,\n Collection, AnnSearchRequest, RRFRanker, connections,\n)\n","# 1. prepare a small corpus to search\ndocs = [\n \"Artificial intelligence was founded as an academic discipline in 1956.\",\n \"Alan Turing was the first person to conduct substantial research in AI.\",\n \"Born in Maida Vale, London, Turing was raised in southern England.\",\n]\nquery = \"Who started AI research?\"\n\n# BGE-M3 model can embed texts as dense and sparse vectors.\n# It is included in the optional `model` module in pymilvus, to install it,\n# simply run \"pip install pymilvus[model]\".\n\nbge_m3_ef = BGEM3EmbeddingFunction(use_fp16=False, device=\"cpu\")\n\ndocs_embeddings = bge_m3_ef(docs)\nquery_embeddings = bge_m3_ef([query])\n","from pymilvus.model.sparse import BM25EmbeddingFunction\n","# 1. prepare a small corpus to search\ndocs = [\n \"Artificial intelligence was founded as an academic discipline in 1956.\",\n \"Alan Turing was the first person to conduct substantial research in AI.\",\n \"Born in Maida Vale, London, Turing was raised in southern England.\",\n]\nquery = \"Where was Turing born?\"\nbm25_ef = BM25EmbeddingFunction()\n\n# 2. fit the corpus to get BM25 model parameters on your documents.\nbm25_ef.fit(docs)\n\n# 3. store the fitted parameters to disk to expedite future processing.\nbm25_ef.save(\"bm25_params.json\")\n\n# 4. load the saved params\nnew_bm25_ef = BM25EmbeddingFunction()\nnew_bm25_ef.load(\"bm25_params.json\")\n\ndocs_embeddings = new_bm25_ef.encode_documents(docs)\nquery_embeddings = new_bm25_ef.encode_queries([query])\nprint(\"Dim:\", new_bm25_ef.dim, list(docs_embeddings)[0].shape)\n","Dim: 21 (1, 21)\n"],"headingContent":"","anchorList":[{"label":"Einbettung - Überblick","href":"Embedding-Overview","type":1,"isActive":false},{"label":"Beispiel 1: Verwendung der Standard-Einbettungsfunktion zur Erzeugung dichter Vektoren","href":"Example-1-Use-default-embedding-function-to-generate-dense-vectors","type":2,"isActive":false},{"label":"Beispiel 2: Erzeugen von dichten und spärlichen Vektoren in einem Aufruf mit dem BGE M3-Modell","href":"Example-2-Generate-dense-and-sparse-vectors-in-one-call-with-BGE-M3-model","type":2,"isActive":false},{"label":"Beispiel 3: Generierung von spärlichen Vektoren mit dem BM25-Modell","href":"Example-3-Generate--sparse-vectors-using-BM25-model","type":2,"isActive":false}]} \ No newline at end of file +{"codeList":["pip install \"pymilvus[model]\"\n","from pymilvus import model\n\n# This will download \"all-MiniLM-L6-v2\", a light weight model.\nef = model.DefaultEmbeddingFunction()\n\n# Data from which embeddings are to be generated \ndocs = [\n \"Artificial intelligence was founded as an academic discipline in 1956.\",\n \"Alan Turing was the first person to conduct substantial research in AI.\",\n \"Born in Maida Vale, London, Turing was raised in southern England.\",\n]\n\nembeddings = ef.encode_documents(docs)\n\n# Print embeddings\nprint(\"Embeddings:\", embeddings)\n# Print dimension and shape of embeddings\nprint(\"Dim:\", ef.dim, embeddings[0].shape)\n","Embeddings: [array([-3.09392996e-02, -1.80662833e-02, 1.34775648e-02, 2.77156215e-02,\n -4.86349640e-03, -3.12581174e-02, -3.55921760e-02, 5.76934684e-03,\n 2.80773244e-03, 1.35783911e-01, 3.59678417e-02, 6.17732145e-02,\n...\n -4.61330153e-02, -4.85207550e-02, 3.13997865e-02, 7.82178566e-02,\n -4.75336798e-02, 5.21207601e-02, 9.04406682e-02, -5.36676683e-02],\n dtype=float32)]\nDim: 384 (384,)\n","from pymilvus.model.hybrid import BGEM3EmbeddingFunction\nfrom pymilvus import (\n utility,\n FieldSchema, CollectionSchema, DataType,\n Collection, AnnSearchRequest, RRFRanker, connections,\n)\n","# 1. prepare a small corpus to search\ndocs = [\n \"Artificial intelligence was founded as an academic discipline in 1956.\",\n \"Alan Turing was the first person to conduct substantial research in AI.\",\n \"Born in Maida Vale, London, Turing was raised in southern England.\",\n]\nquery = \"Who started AI research?\"\n\n# BGE-M3 model can embed texts as dense and sparse vectors.\n# It is included in the optional `model` module in pymilvus, to install it,\n# simply run \"pip install pymilvus[model]\".\n\nbge_m3_ef = BGEM3EmbeddingFunction(use_fp16=False, device=\"cpu\")\n\ndocs_embeddings = bge_m3_ef(docs)\nquery_embeddings = bge_m3_ef([query])\n","from pymilvus.model.sparse import BM25EmbeddingFunction\n","# 1. prepare a small corpus to search\ndocs = [\n \"Artificial intelligence was founded as an academic discipline in 1956.\",\n \"Alan Turing was the first person to conduct substantial research in AI.\",\n \"Born in Maida Vale, London, Turing was raised in southern England.\",\n]\nquery = \"Where was Turing born?\"\nbm25_ef = BM25EmbeddingFunction()\n\n# 2. fit the corpus to get BM25 model parameters on your documents.\nbm25_ef.fit(docs)\n\n# 3. store the fitted parameters to disk to expedite future processing.\nbm25_ef.save(\"bm25_params.json\")\n\n# 4. load the saved params\nnew_bm25_ef = BM25EmbeddingFunction()\nnew_bm25_ef.load(\"bm25_params.json\")\n\ndocs_embeddings = new_bm25_ef.encode_documents(docs)\nquery_embeddings = new_bm25_ef.encode_queries([query])\nprint(\"Dim:\", new_bm25_ef.dim, list(docs_embeddings)[0].shape)\n","Dim: 21 (1, 21)\n"],"headingContent":"Embedding Overview","anchorList":[{"label":"Einbettung Überblick","href":"Embedding-Overview","type":1,"isActive":false},{"label":"Beispiel 1: Verwendung der Standard-Einbettungsfunktion zur Erzeugung dichter Vektoren","href":"Example-1-Use-default-embedding-function-to-generate-dense-vectors","type":2,"isActive":false},{"label":"Beispiel 2: Erzeugen von dichten und spärlichen Vektoren in einem Aufruf mit dem BGE M3-Modell","href":"Example-2-Generate-dense-and-sparse-vectors-in-one-call-with-BGE-M3-model","type":2,"isActive":false},{"label":"Beispiel 3: Generierung von spärlichen Vektoren mit dem BM25-Modell","href":"Example-3-Generate--sparse-vectors-using-BM25-model","type":2,"isActive":false}]} \ No newline at end of file diff --git a/localization/v2.4.x/site/de/embeddings/embeddings.md b/localization/v2.4.x/site/de/embeddings/embeddings.md index bd70a0275..7115b904a 100644 --- a/localization/v2.4.x/site/de/embeddings/embeddings.md +++ b/localization/v2.4.x/site/de/embeddings/embeddings.md @@ -2,9 +2,9 @@ id: embeddings.md order: 1 summary: 'Erfahren Sie, wie Sie Einbettungen für Ihre Daten erstellen können.' -title: Übersicht über die Einbettung +title: Einbettung Überblick --- -

    Einbettung - Überblick

    Embedding ist ein Konzept des maschinellen Lernens zur Abbildung von Daten in einem hochdimensionalen Raum, in dem Daten mit ähnlicher Semantik nahe beieinander angeordnet werden. Bei dem Einbettungsmodell handelt es sich in der Regel um ein Deep Neural Network von BERT oder anderen Transformer-Familien, das die Semantik von Text, Bildern und anderen Datentypen durch eine Reihe von Zahlen, die als Vektoren bezeichnet werden, effektiv darstellen kann. Ein wesentliches Merkmal dieser Modelle ist, dass der mathematische Abstand zwischen Vektoren im hochdimensionalen Raum die Ähnlichkeit der Semantik von Originaltexten oder -bildern anzeigen kann. Diese Eigenschaft ermöglicht zahlreiche Anwendungen für die Informationsgewinnung, z. B. Web-Suchmaschinen wie Google und Bing, Produktsuche und -empfehlungen auf E-Commerce-Websites und das kürzlich populäre Retrieval Augmented Generation (RAG)-Paradigma in der generativen KI.

    +

    Embedding ist ein Konzept des maschinellen Lernens zur Abbildung von Daten in einem hochdimensionalen Raum, in dem Daten mit ähnlicher Semantik nahe beieinander angeordnet werden. Bei dem Einbettungsmodell handelt es sich in der Regel um ein Deep Neural Network von BERT oder anderen Transformer-Familien, das die Semantik von Text, Bildern und anderen Datentypen durch eine Reihe von Zahlen, die als Vektoren bezeichnet werden, effektiv darstellen kann. Ein wesentliches Merkmal dieser Modelle ist, dass der mathematische Abstand zwischen Vektoren im hochdimensionalen Raum die Ähnlichkeit der Semantik von Originaltexten oder -bildern anzeigen kann. Diese Eigenschaft ermöglicht zahlreiche Anwendungen für die Informationsbeschaffung, z. B. Web-Suchmaschinen wie Google und Bing, Produktsuche und -empfehlungen auf E-Commerce-Websites und das kürzlich populäre Paradigma Retrieval Augmented Generation (RAG) in der generativen KI.

    Es gibt zwei Hauptkategorien von Einbettungen, die jeweils eine andere Art von Vektor erzeugen:

    • Dense Embedding: Die meisten Einbettungsmodelle stellen Informationen als Gleitkomma-Vektor mit Hunderten bis Tausenden von Dimensionen dar. Die Ausgabe wird als "dichte" Vektoren bezeichnet, da die meisten Dimensionen Nicht-Null-Werte haben. Das beliebte Open-Source-Einbettungsmodell BAAI/bge-base-de-v1.5 beispielsweise gibt Vektoren mit 768 Fließkommazahlen aus (768-dimensionaler Float-Vektor).

    • Spärliche Einbettung: Im Gegensatz dazu haben die Ausgangsvektoren von spärlichen Einbettungen die meisten Dimensionen Null, nämlich "spärliche" Vektoren. Diese Vektoren haben oft viel höhere Dimensionen (Zehntausende oder mehr), was durch die Größe des Token-Vokabulars bestimmt wird. Sparse Vektoren können durch Deep Neural Networks oder statistische Analyse von Textkorpora erzeugt werden. Aufgrund ihrer Interpretierbarkeit und der beobachteten besseren Verallgemeinerungsfähigkeiten außerhalb der Domäne werden Sparse Embeddings von Entwicklern zunehmend als Ergänzung zu Dense Embeddings eingesetzt.

    -

    Milvus ist eine Vektordatenbank, die für die Verwaltung, Speicherung und Abfrage von Vektordaten entwickelt wurde. Durch die Integration von Mainstream-Embedding- und Reranking-Modellen können Sie den Originaltext einfach in durchsuchbare Vektoren umwandeln oder die Ergebnisse mithilfe leistungsstarker Modelle neu ordnen, um genauere Ergebnisse für RAG zu erzielen. Diese Integration vereinfacht die Textumwandlung und macht zusätzliche Komponenten für die Einbettung oder das Reranking überflüssig, wodurch die Entwicklung und Validierung von RAGs rationalisiert wird.

    +

    Milvus ist eine Vektordatenbank, die für die Verwaltung, Speicherung und Abfrage von Vektordaten entwickelt wurde. Durch die Integration von Mainstream-Embedding- und Reranking-Modellen können Sie den Originaltext einfach in durchsuchbare Vektoren umwandeln oder die Ergebnisse mithilfe leistungsstarker Modelle ranken, um genauere Ergebnisse für RAG zu erzielen. Diese Integration vereinfacht die Textumwandlung und macht zusätzliche Komponenten für die Einbettung oder das Reranking überflüssig, wodurch die Entwicklung und Validierung von RAGs rationalisiert wird.

    Um Einbettungen in Aktion zu erstellen, siehe Verwendung des PyMilvus-Modells zur Generierung von Texteinbettungen.

    ParameterBeschreibungStandardwertErforderlich
    @@ -40,6 +40,10 @@ title: Übersicht über die Einbettung + + + +
    voyageaiDichtesAPI
    jinaDichtesAPI
    kohärentDichtAPI
    AusbilderDichtesOpen-Source
    Mistral AIDichtAPI
    NomicDichtesAPI
    mGTEHybrideOffen zugänglich

    Beispiel 1: Verwendung der Standard-Einbettungsfunktion zur Erzeugung dichter Vektoren

    Der Abruf wird hauptsächlich durch den Indextyp und die Suchparameter beeinflusst.

    Bei FLAT-Indizes führt Milvus einen vollständigen Scan innerhalb einer Sammlung durch, mit einer 100%igen Rückgabe.

    Bei IVF-Indizes bestimmt der Parameter nprobe den Umfang einer Suche innerhalb der Sammlung. Eine Erhöhung von nprobe erhöht den Anteil der durchsuchten Vektoren und den Rücklauf, verschlechtert aber die Abfrageleistung.

    -

    Beim HNSW-Index bestimmt der Parameter ef die Breite der Graphensuche. Eine Erhöhung von ef erhöht die Anzahl der gesuchten Punkte im Graphen und die Wiederauffindbarkeit, verschlechtert jedoch die Abfrageleistung.

    +

    Beim HNSW-Index bestimmt der Parameter ef die Breite der Graphensuche. Eine Erhöhung von ef erhöht die Anzahl der im Graphen gesuchten Punkte und die Wiederauffindbarkeit, verschlechtert jedoch die Abfrageleistung.

    Weitere Informationen finden Sie unter Vektorindizierung.

    Warum sind meine Änderungen an den Konfigurationsdateien nicht wirksam geworden?

    Milvus unterstützt keine Änderungen an den Konfigurationsdateien während der Laufzeit. Sie müssen Milvus Docker neu starten, damit Änderungen an den Konfigurationsdateien wirksam werden.

    -

    Woher weiß ich, ob Milvus erfolgreich gestartet wurde?

    Wenn Milvus mit Docker Compose gestartet wurde, führen Sie docker ps aus, um zu beobachten, wie viele Docker-Container laufen und um zu überprüfen, ob die Milvus-Dienste korrekt gestartet wurden.

    +

    Woher weiß ich, ob Milvus erfolgreich gestartet wurde?

    Wenn Milvus unter Verwendung von Docker Compose gestartet wurde, führen Sie docker ps aus, um zu beobachten, wie viele Docker-Container ausgeführt werden, und um zu überprüfen, ob die Milvus-Dienste korrekt gestartet wurden.

    Bei Milvus standalone sollten Sie mindestens drei laufende Docker-Container beobachten können, von denen einer der Milvus-Dienst und die beiden anderen der etcd-Verwaltungs- und Speicherdienst sind. Weitere Informationen finden Sie unter Installieren von Milvus Standalone.

    Warum weicht die Zeit in den Protokolldateien von der Systemzeit ab?

    Der Zeitunterschied ist in der Regel darauf zurückzuführen, dass der Host-Rechner nicht die Coordinated Universal Time (UTC) verwendet.

    Die Protokolldateien im Docker-Image verwenden standardmäßig die UTC-Zeit. Wenn Ihr Host-Rechner nicht UTC verwendet, kann dieses Problem auftreten.

    @@ -45,7 +45,7 @@ title: Betriebliche FAQ

    Führen Sie den Befehl lscpu aus, um zu überprüfen, ob Ihre CPU die oben genannten SIMD-Befehlssätze unterstützt:

    $ lscpu | grep -e sse4_2 -e avx -e avx2 -e avx512
     
    -

    Warum gibt Milvus während des Starts illegal instruction zurück?

    Milvus setzt voraus, dass Ihre CPU einen SIMD-Befehlssatz unterstützt: SSE4.2, AVX, AVX2, oder AVX512. Die CPU muss mindestens einen dieser Befehle unterstützen, um sicherzustellen, dass Milvus normal funktioniert. Ein illegal instruction Fehler, der während des Starts zurückgegeben wird, deutet darauf hin, dass Ihre CPU keinen der vier oben genannten Befehlssätze unterstützt.

    +

    Warum gibt Milvus während des Starts illegal instruction zurück?

    Milvus erfordert, dass Ihre CPU einen SIMD-Befehlssatz unterstützt: SSE4.2, AVX, AVX2, oder AVX512. Die CPU muss mindestens einen dieser Befehle unterstützen, um sicherzustellen, dass Milvus normal funktioniert. Ein illegal instruction Fehler, der während des Starts zurückgegeben wird, deutet darauf hin, dass Ihre CPU keinen der vier oben genannten Befehlssätze unterstützt.

    Siehe CPU-Unterstützung für SIMD-Befehlssatz.

    Kann ich Milvus unter Windows installieren?

    Ja. Sie können Milvus unter Windows entweder durch Kompilieren aus dem Quellcode oder aus einem Binärpaket installieren.

    Siehe Milvus unter Windows ausführen, um zu erfahren, wie man Milvus unter Windows installiert.

    @@ -72,6 +72,31 @@ title: Betriebliche FAQ
  • Abfrage (exakte Übereinstimmung): Milvus wählt die letzte Entität mit dem passenden PK aus. ANN-Suche: Milvus wählt die Entität mit dem höchsten Ähnlichkeitsscore aus, auch wenn die Entitäten denselben PK haben. Diese Priorisierung kann zu weniger eindeutigen Ergebnissen als dem Limit führen, wenn Ihre Sammlung viele doppelte Primärschlüssel hat.

  • Unzureichende Übereinstimmungen: Ihre Suchfilterausdrücke könnten zu streng sein, was dazu führt, dass weniger Entitäten den Ähnlichkeitsschwellenwert erfüllen. Wenn die für die Suche festgelegten Bedingungen zu restriktiv sind, werden nicht genügend Entitäten übereinstimmen, was zu weniger Ergebnissen als erwartet führt.

  • +

    MilvusClient("milvus_demo.db") gives an error: ModuleNotFoundError: No module named 'milvus_lite'. Was ist die Ursache für diesen Fehler und wie kann er behoben werden?

    Dieser Fehler tritt auf, wenn Sie versuchen, Milvus Lite auf einer Windows-Plattform zu verwenden. Milvus Lite wurde in erster Linie für Linux-Umgebungen entwickelt und bietet möglicherweise keine native Unterstützung für Windows.

    +

    Die Lösung besteht darin, eine Linux-Umgebung zu verwenden:

    +
      +
    • Verwenden Sie ein Linux-basiertes Betriebssystem oder eine virtuelle Maschine, um Milvus Lite auszuführen.
    • +
    • Auf diese Weise wird die Kompatibilität mit den Abhängigkeiten und Funktionen der Bibliothek sichergestellt.
    • +
    +

    Was sind die "Länge überschreitet maximale Länge"-Fehler in Milvus, und wie können sie verstanden und behoben werden?

    "Length exceeds max length"-Fehler treten in Milvus auf, wenn die Größe eines Datenelements die maximal zulässige Größe für eine Sammlung oder ein Feld überschreitet. Hier sind einige Beispiele und Erklärungen:

    +
      +
    • JSON-Feld-Fehler: <MilvusException: (code=1100, message=the length (398324) of json field (metadata) exceeds max length (65536): expected=valid length json string, actual=length exceeds max length: invalid parameter)>

    • +
    • String-Längenfehler: <ParamError: (code=1, message=invalid input, length of string exceeds max length. length: 74238, max length: 60535)>

    • +
    • VarChar-Feldfehler: <MilvusException: (code=1100, message=the length (60540) of 0th VarChar paragraph exceeds max length (0)%!(EXTRA int64=60535): invalid parameter)>

    • +
    +

    Um diese Fehler zu verstehen und zu beheben:

    +
      +
    • Verstehen Sie, dass len(str) in Python die Anzahl der Zeichen und nicht die Größe in Bytes angibt.
    • +
    • Für String-basierte Datentypen wie VARCHAR und JSON verwenden Sie len(bytes(str, encoding='utf-8')), um die tatsächliche Größe in Bytes zu bestimmen, die Milvus für "max-length" verwendet.
    • +
    +

    Beispiel in Python:

    +
    # Python Example: result of len() str cannot be used as "max-length" in Milvus 
    +>>> s = "你好,世界!"
    +>>> len(s) # Number of characters of s.
    +6
    +>>> len(bytes(s, "utf-8")) # Size in bytes of s, max-length in Milvus.
    +18
    +

    Haben Sie noch Fragen?

    Sie können:

    • Schauen Sie sich Milvus auf GitHub an. Sie können Fragen stellen, Ideen austauschen und anderen helfen.
    • diff --git a/localization/v2.4.x/site/de/faq/performance_faq.json b/localization/v2.4.x/site/de/faq/performance_faq.json index 39bf70612..930ad4d1b 100644 --- a/localization/v2.4.x/site/de/faq/performance_faq.json +++ b/localization/v2.4.x/site/de/faq/performance_faq.json @@ -1 +1 @@ -{"codeList":[],"headingContent":"","anchorList":[{"label":"Leistung FAQ","href":"Performance-FAQ","type":1,"isActive":false}]} \ No newline at end of file +{"codeList":[],"headingContent":"Performance FAQ","anchorList":[{"label":"Leistung FAQ","href":"Performance-FAQ","type":1,"isActive":false}]} \ No newline at end of file diff --git a/localization/v2.4.x/site/de/faq/performance_faq.md b/localization/v2.4.x/site/de/faq/performance_faq.md index 16da901b2..11052c28b 100644 --- a/localization/v2.4.x/site/de/faq/performance_faq.md +++ b/localization/v2.4.x/site/de/faq/performance_faq.md @@ -32,15 +32,15 @@ title: Leistung FAQ

      Daher dauert die Abfrage eines kleinen Datensatzes (einer Sammlung) in der Regel länger, da kein Index erstellt wurde. Der Grund dafür ist, dass die Größe der Segmente nicht den von rootCoord.minSegmentSizeToEnableindex festgelegten Schwellenwert für die Indexerstellung erreicht hat. Rufen Sie create_index() auf, um Milvus zu zwingen, Segmente zu indizieren, die den Schwellenwert erreicht haben, aber noch nicht automatisch indiziert wurden, was die Abfrageleistung erheblich verbessert.

      Welche Faktoren beeinflussen die CPU-Auslastung?

      Die CPU-Auslastung steigt, wenn Milvus Indizes aufbaut oder Abfragen ausführt. Im Allgemeinen ist die Indexerstellung CPU-intensiv, außer bei Verwendung von Annoy, das auf einem einzigen Thread läuft.

      Bei der Ausführung von Abfragen wird die CPU-Auslastung durch nq und nprobe beeinflusst. Wenn nq und nprobe klein sind, ist die Gleichzeitigkeit gering und die CPU-Auslastung bleibt niedrig.

      -

      Wirkt sich das gleichzeitige Einfügen von Daten und Suchen auf die Abfrageleistung aus?

      Einfügevorgänge sind nicht CPU-intensiv. Da jedoch neue Segmente möglicherweise noch nicht den Schwellenwert für den Indexaufbau erreicht haben, greift Milvus auf eine Brute-Force-Suche zurück, was die Abfrageleistung erheblich beeinträchtigt.

      +

      Wirkt sich das gleichzeitige Einfügen von Daten und Suchen auf die Abfrageleistung aus?

      Einfügevorgänge sind nicht CPU-intensiv. Da jedoch neue Segmente möglicherweise noch nicht den Schwellenwert für den Indexaufbau erreicht haben, greift Milvus auf die Brute-Force-Suche zurück, was die Abfrageleistung erheblich beeinträchtigt.

      Der Parameter rootcoord.minSegmentSizeToEnableIndex bestimmt den Schwellenwert für die Indexerstellung für ein Segment und ist standardmäßig auf 1024 Zeilen eingestellt. Siehe Systemkonfiguration für weitere Informationen.

      Wird der Speicherplatz nach dem Löschen von Daten in Milvus sofort wieder freigegeben?

      Nein, der Speicherplatz wird nicht sofort freigegeben, wenn Sie Daten in Milvus löschen. Obwohl das Löschen von Daten Entitäten als "logisch gelöscht" kennzeichnet, wird der tatsächliche Speicherplatz möglicherweise nicht sofort freigegeben. Dies ist der Grund:

      • Verdichtung: Milvus komprimiert Daten automatisch im Hintergrund. Bei diesem Prozess werden kleinere Datensegmente zu größeren zusammengeführt und logisch gelöschte Daten (zum Löschen markierte Entitäten) oder Daten, die ihre Time-To-Live (TTL) überschritten haben, entfernt. Bei der Verdichtung werden jedoch neue Segmente erstellt, während alte als "fallengelassen" markiert werden.
      • -
      • Garbage Collection: Ein separater Prozess namens Garbage Collection (GC) entfernt diese "Dropped"-Segmente in regelmäßigen Abständen und gibt so den von ihnen belegten Speicherplatz wieder frei. Dies gewährleistet eine effiziente Nutzung des Speichers, kann aber zu einer leichten Verzögerung zwischen dem Löschen und der Wiedergewinnung von Speicherplatz führen.
      • +
      • Garbage Collection: Ein separater Prozess namens Garbage Collection (GC) entfernt diese "Dropped"-Segmente in regelmäßigen Abständen und gibt den von ihnen belegten Speicherplatz wieder frei. Dies gewährleistet eine effiziente Nutzung des Speichers, kann aber zu einer leichten Verzögerung zwischen dem Löschen und der Wiedergewinnung von Speicherplatz führen.

      Kann ich eingefügte, gelöschte oder hochgeladene Daten sofort nach dem Vorgang sehen, ohne auf einen Flush zu warten?

      Ja, in Milvus ist die Datentransparenz aufgrund der Disaggregationsarchitektur von Storage und Compute nicht direkt an Flush-Vorgänge gebunden. Sie können die Lesbarkeit der Daten über Konsistenzstufen verwalten.

      -

      Bei der Auswahl einer Konsistenzstufe sollten Sie die Kompromisse zwischen Konsistenz und Leistung berücksichtigen. Für Operationen, die eine sofortige Sichtbarkeit erfordern, verwenden Sie eine "starke" Konsistenzstufe. Für schnellere Schreibvorgänge sollten Sie eine schwächere Konsistenz bevorzugen (die Daten sind möglicherweise nicht sofort sichtbar). Weitere Informationen finden Sie unter Konsistenz.

      +

      Bei der Auswahl einer Konsistenzstufe sollten Sie die Kompromisse zwischen Konsistenz und Leistung berücksichtigen. Für Vorgänge, die eine sofortige Sichtbarkeit erfordern, sollten Sie eine "starke" Konsistenzstufe verwenden. Für schnellere Schreibvorgänge sollten Sie eine schwächere Konsistenz bevorzugen (die Daten sind möglicherweise nicht sofort sichtbar). Weitere Informationen finden Sie unter Konsistenz.

      Kann die Indizierung eines VARCHAR-Feldes die Löschgeschwindigkeit verbessern?

      Die Indizierung eines VARCHAR-Feldes kann die "Delete By Expression"-Operationen beschleunigen, aber nur unter bestimmten Bedingungen:

      • INVERTED Index: Dieser Index hilft bei IN oder == Ausdrücken auf VARCHAR-Feldern mit nicht primären Schlüsseln.
      • diff --git a/localization/v2.4.x/site/de/faq/product_faq.json b/localization/v2.4.x/site/de/faq/product_faq.json index 186741457..92ccb79da 100644 --- a/localization/v2.4.x/site/de/faq/product_faq.json +++ b/localization/v2.4.x/site/de/faq/product_faq.json @@ -1 +1 @@ -{"codeList":["60 * 2 * 4 + 40 * 1 * 12 = 960\n"],"headingContent":"","anchorList":[{"label":"Produkt-FAQ","href":"Product-FAQ","type":1,"isActive":false}]} \ No newline at end of file +{"codeList":["60 * 2 * 4 + 40 * 1 * 12 = 960\n"],"headingContent":"Product FAQ","anchorList":[{"label":"Produkt-FAQ","href":"Product-FAQ","type":1,"isActive":false}]} \ No newline at end of file diff --git a/localization/v2.4.x/site/de/faq/product_faq.md b/localization/v2.4.x/site/de/faq/product_faq.md index 0cd23208c..da819b5b3 100644 --- a/localization/v2.4.x/site/de/faq/product_faq.md +++ b/localization/v2.4.x/site/de/faq/product_faq.md @@ -22,7 +22,7 @@ title: Produkt-FAQ

        Wie viel kostet Milvus?

        Milvus ist ein 100% kostenloses Open-Source-Projekt.

        Bitte halten Sie sich an die Apache License 2.0, wenn Sie Milvus für Produktions- oder Vertriebszwecke verwenden.

        -

        Zilliz, das Unternehmen hinter Milvus, bietet auch eine vollständig verwaltete Cloud-Version der Plattform für diejenigen an, die keine eigene verteilte Instanz aufbauen und warten möchten. Zilliz Cloud sorgt automatisch für die Zuverlässigkeit der Daten und ermöglicht es den Nutzern, nur für das zu zahlen, was sie verwenden.

        +

        Zilliz, das Unternehmen hinter Milvus, bietet auch eine vollständig verwaltete Cloud-Version der Plattform für diejenigen an, die keine eigene verteilte Instanz erstellen und warten möchten. Zilliz Cloud sorgt automatisch für die Zuverlässigkeit der Daten und ermöglicht es den Nutzern, nur für das zu zahlen, was sie verwenden.

        Unterstützt Milvus nicht-x86-Architekturen?

        Milvus kann nicht auf nicht-x86 Plattformen installiert oder ausgeführt werden.

        Ihre CPU muss einen der folgenden Befehlssätze unterstützen, um Milvus auszuführen: SSE4.2, AVX, AVX2, AVX512. Dies sind alles x86-spezifische SIMD-Befehlssätze.

        Wie groß ist die maximale Datenmenge, die Milvus verarbeiten kann?

        Theoretisch wird die maximale Größe der Datenmenge, die Milvus verarbeiten kann, durch die Hardware bestimmt, auf der es läuft, insbesondere durch den Systemspeicher und den Speicher:

        @@ -36,7 +36,7 @@ title: Produkt-FAQ

        Warum gibt es keine Vektordaten in etcd?

        etcd speichert Milvus-Modul-Metadaten; MinIO speichert Entitäten.

        Unterstützt Milvus das gleichzeitige Einfügen und Suchen von Daten?

        Ja. Einfügeoperationen und Abfrageoperationen werden von zwei separaten Modulen gehandhabt, die voneinander unabhängig sind. Aus der Sicht des Clients ist ein Einfügevorgang abgeschlossen, wenn die eingefügten Daten in die Nachrichtenwarteschlange gelangen. Die eingefügten Daten sind jedoch nicht durchsuchbar, bis sie in den Abfrageknoten geladen werden. Erreicht die Segmentgröße nicht den Schwellenwert für die Indexerstellung (standardmäßig 512 MB), greift Milvus auf die Brute-Force-Suche zurück, und die Abfrageleistung kann beeinträchtigt werden.

        Können Vektoren mit doppelten Primärschlüsseln in Milvus eingefügt werden?

        Ja. Milvus prüft nicht, ob es sich bei den Primärschlüsseln der Vektoren um Duplikate handelt.

        -

        Wenn Vektoren mit doppelten Primärschlüsseln eingefügt werden, behandelt Milvus dies dann als Aktualisierungsvorgang?

        Nein. Milvus unterstützt derzeit keine Aktualisierungsoperationen und prüft nicht, ob Entity-Primärschlüssel doppelt vorhanden sind. Sie sind dafür verantwortlich, dass die Primärschlüssel der Entitäten eindeutig sind, und wenn sie es nicht sind, kann Milvus mehrere Entitäten mit doppelten Primärschlüsseln enthalten.

        +

        Wenn Vektoren mit doppelten Primärschlüsseln eingefügt werden, behandelt Milvus dies dann als Aktualisierungsvorgang?

        Nein. Milvus unterstützt derzeit keine Aktualisierungsoperationen und prüft nicht, ob Entity-Primärschlüssel Duplikate sind. Sie sind dafür verantwortlich, dass die Primärschlüssel der Entitäten eindeutig sind, und wenn sie es nicht sind, kann Milvus mehrere Entitäten mit doppelten Primärschlüsseln enthalten.

        In diesem Fall bleibt unbekannt, welche Datenkopie bei einer Abfrage zurückgegeben wird. Diese Einschränkung wird in zukünftigen Versionen behoben werden.

        Wie lang dürfen selbst definierte Entitätsprimärschlüssel maximal sein?

        Entitäts-Primärschlüssel müssen nicht-negative 64-Bit-Ganzzahlen sein.

        Wie groß ist die maximale Datenmenge, die pro Einfügevorgang hinzugefügt werden kann?

        Ein Einfügevorgang darf eine Größe von 1.024 MB nicht überschreiten. Dies ist eine von gRPC festgelegte Grenze.

        @@ -64,14 +64,14 @@ title: Produkt-FAQ

        Um dies zu vermeiden, versuchen Sie, nprobe größer und nlist und k kleiner zu setzen.

        Weitere Informationen finden Sie unter Vektorindex.

        Was ist die maximale Vektordimension, die Milvus unterstützt?

        Milvus kann standardmäßig Vektoren mit bis zu 32.768 Dimensionen verwalten. Sie können den Wert von Proxy.maxDimension erhöhen, um einen Vektor mit einer größeren Dimension zu ermöglichen.

        -

        Unterstützt Milvus die Apple M1 CPU?

        Die aktuelle Milvus-Version unterstützt keine Apple M1 CPU.

        +

        Unterstützt Milvus die Apple M1 CPU?

        Die aktuelle Milvus-Version unterstützt die Apple M1-CPU nicht direkt. Nach Milvus 2.3 bietet Milvus Docker-Images für die ARM64-Architektur.

        Welche Datentypen unterstützt Milvus für das Primärschlüsselfeld?

        In der aktuellen Version unterstützt Milvus sowohl INT64 als auch String.

        Ist Milvus skalierbar?

        Ja. Sie können Milvus-Cluster mit mehreren Knoten über Helm Chart auf Kubernetes bereitstellen. Weitere Anweisungen finden Sie im Scale Guide.

        Wird die Abfrage im Speicher ausgeführt? Was sind inkrementelle Daten und historische Daten?

        Ja. Bei einer Abfrage durchsucht Milvus sowohl inkrementelle Daten als auch historische Daten, indem es sie in den Speicher lädt. Inkrementelle Daten befinden sich in den wachsenden Segmenten, die im Speicher gepuffert werden, bevor sie den Schwellenwert für die Persistenz im Speichermodul erreichen, während historische Daten aus den versiegelten Segmenten stammen, die im Objektspeicher gespeichert sind. Inkrementelle Daten und historische Daten bilden zusammen den gesamten zu durchsuchenden Datensatz.

        Ja. Bei Abfragen in derselben Sammlung durchsucht Milvus gleichzeitig die inkrementellen und historischen Daten. Abfragen zu verschiedenen Sammlungen werden jedoch nacheinander durchgeführt. Da es sich bei den historischen Daten um einen extrem großen Datenbestand handeln kann, sind die Abfragen auf den historischen Daten relativ zeitaufwändiger und werden im Wesentlichen in Serien durchgeführt.

        Warum bleiben die Daten in MinIO erhalten, nachdem die entsprechende Sammlung gelöscht wurde?

        Die Daten in MinIO sind so konzipiert, dass sie für einen bestimmten Zeitraum gespeichert bleiben, um ein Rollback der Daten zu ermöglichen.

        Unterstützt Milvus auch andere Message Engines als Pulsar?

        Ja. Kafka wird in Milvus 2.1.0 unterstützt.

        -

        Was ist der Unterschied zwischen einer Suche und einer Abfrage?

        In Milvus findet eine Vektorähnlichkeitssuche Vektoren auf der Grundlage von Ähnlichkeitsberechnungen und Vektorindexbeschleunigung. Im Gegensatz zu einer Vektorähnlichkeitssuche werden bei einer Vektorabfrage Vektoren durch skalare Filterung auf der Grundlage eines booleschen Ausdrucks abgerufen. Der boolesche Ausdruck filtert nach skalaren Feldern oder dem Primärschlüsselfeld und ruft alle Ergebnisse ab, die den Filtern entsprechen. Bei einer Abfrage sind weder Ähnlichkeitsmetriken noch ein Vektorindex beteiligt.

        +

        Was ist der Unterschied zwischen einer Suche und einer Abfrage?

        In Milvus findet eine Vektorähnlichkeitssuche Vektoren auf der Grundlage von Ähnlichkeitsberechnungen und Vektorindexbeschleunigung. Im Gegensatz zu einer Vektorähnlichkeitssuche werden bei einer Vektorabfrage Vektoren durch skalare Filterung auf der Grundlage eines booleschen Ausdrucks abgerufen. Der boolesche Ausdruck filtert auf skalare Felder oder das Primärschlüsselfeld und ruft alle Ergebnisse ab, die den Filtern entsprechen. Bei einer Abfrage sind weder Ähnlichkeitsmetriken noch ein Vektorindex beteiligt.

        Warum hat ein Float-Vektorwert in Milvus eine Genauigkeit von 7 Dezimalstellen?

        Milvus unterstützt die Speicherung von Vektoren als Float32-Arrays. Ein Float32-Wert hat eine Genauigkeit von 7 Dezimalziffern. Selbst bei einem Float64-Wert, wie z.B. 1.3476964684980388, speichert Milvus ihn als 1.347696. Wenn Sie also einen solchen Vektor von Milvus abrufen, geht die Genauigkeit des Float64-Wertes verloren.

        Wie geht Milvus mit Vektordatentypen und Genauigkeit um?

        Milvus unterstützt die Vektortypen Binary, Float32, Float16 und BFloat16.

          diff --git a/localization/v2.4.x/site/de/getstarted/install_SDKs/install-java.json b/localization/v2.4.x/site/de/getstarted/install_SDKs/install-java.json index 62dff1b91..c41ae88ac 100644 --- a/localization/v2.4.x/site/de/getstarted/install_SDKs/install-java.json +++ b/localization/v2.4.x/site/de/getstarted/install_SDKs/install-java.json @@ -1 +1 @@ -{"codeList":["\n io.milvus\n milvus-sdk-java\n 2.4.3\n\n","implementation 'io.milvus:milvus-sdk-java:2.4.3'\n"],"headingContent":"","anchorList":[{"label":"Milvus Java SDK installieren","href":"Install-Milvus-Java-SDK","type":1,"isActive":false},{"label":"Anforderung","href":"Requirement","type":2,"isActive":false},{"label":"Milvus Java SDK installieren","href":"Install-Milvus-Java-SDK","type":2,"isActive":false},{"label":"Wie geht es weiter?","href":"Whats-next","type":2,"isActive":false}]} \ No newline at end of file +{"codeList":["\n io.milvus\n milvus-sdk-java\n 2.4.5\n\n","implementation 'io.milvus:milvus-sdk-java:2.4.5'\n"],"headingContent":"Install Milvus Java SDK","anchorList":[{"label":"Milvus Java SDK installieren","href":"Install-Milvus-Java-SDK","type":1,"isActive":false},{"label":"Anforderung","href":"Requirement","type":2,"isActive":false},{"label":"Milvus Java SDK installieren","href":"Install-Milvus-Java-SDK","type":2,"isActive":false},{"label":"Wie geht es weiter?","href":"Whats-next","type":2,"isActive":false}]} \ No newline at end of file diff --git a/localization/v2.4.x/site/de/getstarted/install_SDKs/install-java.md b/localization/v2.4.x/site/de/getstarted/install_SDKs/install-java.md index 47ff30840..2e9c9745b 100644 --- a/localization/v2.4.x/site/de/getstarted/install_SDKs/install-java.md +++ b/localization/v2.4.x/site/de/getstarted/install_SDKs/install-java.md @@ -63,13 +63,13 @@ title: Milvus Java SDK installieren
          <dependency>
               <groupId>io.milvus</groupId>
               <artifactId>milvus-sdk-java</artifactId>
          -    <version>2.4.3</version>
          +    <version>2.4.5</version>
           </dependency>
           
          • Gradle/Grails
          -
          implementation 'io.milvus:milvus-sdk-java:2.4.3'
          +
          implementation 'io.milvus:milvus-sdk-java:2.4.5'
           

          Wie geht es weiter?

          PyMilvus ist im Python Package Index verfügbar.

          Es wird empfohlen, eine PyMilvus-Version zu installieren, die mit der Version des Milvus-Servers übereinstimmt, den Sie installiert haben. Weitere Informationen finden Sie in den Release Notes.
          -
          $ python3 -m pip install pymilvus==2.4.5
          +
          $ python3 -m pip install pymilvus==2.4.8
           

          Überprüfen Sie die Installation

          Open In Colab

          +

          Open In Colab +GitHub Repository

          Vektoren, das Ausgabedatenformat von Neuronalen Netzwerkmodellen, können Informationen effektiv kodieren und spielen eine zentrale Rolle in KI-Anwendungen wie Wissensdatenbanken, semantischer Suche, Retrieval Augmented Generation (RAG) und mehr.

          Milvus ist eine Open-Source-Vektordatenbank, die sich für KI-Anwendungen jeder Größe eignet, von der Ausführung eines Demo-Chatbots in einem Jupyter-Notebook bis zum Aufbau einer Web-Suche, die Milliarden von Nutzern bedient. In dieser Anleitung zeigen wir Ihnen, wie Sie Milvus innerhalb weniger Minuten lokal einrichten und die Python-Client-Bibliothek zum Erzeugen, Speichern und Durchsuchen von Vektoren verwenden können.

          Milvus installieren

          Sie können die Vektorsuche auch unter Berücksichtigung der Werte der Metadaten durchführen (in Milvus "skalare" Felder genannt, da sich skalar auf Nicht-Vektordaten bezieht). Dies geschieht mit einem Filterausdruck, der bestimmte Kriterien angibt. Im folgenden Beispiel wird gezeigt, wie man mit dem Feld subject suchen und filtern kann.

          +

          Sie können die Vektorsuche auch unter Berücksichtigung der Werte der Metadaten (in Milvus "skalare" Felder genannt, da sich skalar auf Nicht-Vektordaten bezieht) durchführen. Dies geschieht mit einem Filterausdruck, der bestimmte Kriterien angibt. Im folgenden Beispiel wird gezeigt, wie man mit dem Feld subject suchen und filtern kann.

          # Insert more docs in another subject.
           docs = [
               "Machine learning has been used for drug design.",
          @@ -299,7 +301,7 @@ res = client.search(
           

          Standardmäßig werden die skalaren Felder nicht indiziert. Wenn Sie eine gefilterte Metadatensuche in großen Datenbeständen durchführen müssen, können Sie die Verwendung eines festen Schemas in Betracht ziehen und auch den Index aktivieren, um die Suchleistung zu verbessern.

          Neben der Vektorsuche können Sie auch andere Arten von Suchen durchführen:

          -

          Abfrage

          Eine Abfrage() ist eine Operation, die alle Entitäten abruft, die mit einer Kretrie übereinstimmen, wie z.B. einem Filterausdruck oder einigen IDs.

          +

          Abfrage

          Eine Abfrage() ist eine Operation, die alle Entitäten abruft, die einem Kriterium entsprechen, wie z. B. einem Filterausdruck oder einigen IDs.

          Zum Beispiel werden alle Entitäten abgefragt, deren Skalarfeld einen bestimmten Wert hat:

          res = client.query(
               collection_name="demo_collection",
          @@ -400,7 +402,7 @@ client.drop_collection(collection_name="demo_coll
                     d="M4 9h1v1H4c-1.5 0-3-1.69-3-3.5S2.55 3 4 3h4c1.45 0 3 1.69 3 3.5 0 1.41-.91 2.72-2 3.25V8.59c.58-.45 1-1.27 1-2.09C10 5.22 8.98 4 8 4H4c-.98 0-2 1.22-2 2.5S3 9 4 9zm9-3h-1v1h1c1 0 2 1.22 2 2.5S13.98 12 13 12H9c-.98 0-2-1.22-2-2.5 0-.83.42-1.64 1-2.09V6.25c-1.09.53-2 1.84-2 3.25C6 11.31 7.55 13 9 13h4c1.45 0 3-1.69 3-3.5S14.5 6 13 6z"
                   >
                 
          -    

          Milvus Lite ist ideal für den Einstieg in ein lokales Python-Programm. Wenn Sie große Datenmengen haben oder Milvus in der Produktion verwenden möchten, können Sie sich über die Bereitstellung von Milvus auf Docker und Kubernetes informieren. Alle Bereitstellungsmodi von Milvus nutzen dieselbe API, sodass sich Ihr clientseitiger Code beim Wechsel zu einem anderen Bereitstellungsmodus nicht großartig ändern muss. Geben Sie einfach die URI und das Token eines Milvus-Servers an, der überall eingesetzt wird:

          +

          Milvus Lite ist ideal für den Einstieg in ein lokales Python-Programm. Wenn Sie große Datenmengen haben oder Milvus in der Produktion verwenden möchten, können Sie mehr über die Bereitstellung von Milvus auf Docker und Kubernetes erfahren. Alle Bereitstellungsmodi von Milvus nutzen dieselbe API, sodass sich Ihr clientseitiger Code beim Wechsel zu einem anderen Bereitstellungsmodus nicht großartig ändern muss. Geben Sie einfach die URI und das Token eines Milvus-Servers an, der überall eingesetzt wird:

          client = MilvusClient(uri="http://localhost:19530", token="root:Milvus")
           

          Milvus bietet eine REST- und gRPC-API mit Client-Bibliotheken in Sprachen wie Python, Java, Go, C# und Node.js.

          diff --git a/localization/v2.4.x/site/de/getstarted/run-milvus-docker/install_standalone-docker-compose.json b/localization/v2.4.x/site/de/getstarted/run-milvus-docker/install_standalone-docker-compose.json index b008a06cb..824b1f2c9 100644 --- a/localization/v2.4.x/site/de/getstarted/run-milvus-docker/install_standalone-docker-compose.json +++ b/localization/v2.4.x/site/de/getstarted/run-milvus-docker/install_standalone-docker-compose.json @@ -1 +1 @@ -{"codeList":["# Download the configuration file\n$ wget https://github.com/milvus-io/milvus/releases/download/v2.4.9/milvus-standalone-docker-compose.yml -O docker-compose.yml\n\n# Start Milvus\n$ sudo docker compose up -d\n\nCreating milvus-etcd ... done\nCreating milvus-minio ... done\nCreating milvus-standalone ... done\n","$ sudo docker compose ps\n\n Name Command State Ports\n--------------------------------------------------------------------------------------------------------------------\nmilvus-etcd etcd -advertise-client-url ... Up 2379/tcp, 2380/tcp\nmilvus-minio /usr/bin/docker-entrypoint ... Up (healthy) 9000/tcp\nmilvus-standalone /tini -- milvus run standalone Up 0.0.0.0:19530->19530/tcp, 0.0.0.0:9091->9091/tcp\n","# Stop Milvus\n$ sudo docker compose down\n\n# Delete service data\n$ sudo rm -rf volumes\n"],"headingContent":"","anchorList":[{"label":"Milvus mit Docker Compose starten","href":"Run-Milvus-with-Docker-Compose","type":1,"isActive":false},{"label":"Voraussetzungen","href":"Prerequisites","type":2,"isActive":false},{"label":"Milvus installieren","href":"Install-Milvus","type":2,"isActive":false},{"label":"Wie geht es weiter?","href":"Whats-next","type":2,"isActive":false}]} \ No newline at end of file +{"codeList":["# Download the configuration file\n$ wget https://github.com/milvus-io/milvus/releases/download/v2.4.13-hotfix/milvus-standalone-docker-compose.yml -O docker-compose.yml\n\n# Start Milvus\n$ sudo docker-compose up -d\n\nCreating milvus-etcd ... done\nCreating milvus-minio ... done\nCreating milvus-standalone ... done\n","$ sudo docker-compose ps\n\n Name Command State Ports\n--------------------------------------------------------------------------------------------------------------------\nmilvus-etcd etcd -advertise-client-url ... Up 2379/tcp, 2380/tcp\nmilvus-minio /usr/bin/docker-entrypoint ... Up (healthy) 9000/tcp\nmilvus-standalone /tini -- milvus run standalone Up 0.0.0.0:19530->19530/tcp, 0.0.0.0:9091->9091/tcp\n","# Stop Milvus\n$ sudo docker-compose down\n\n# Delete service data\n$ sudo rm -rf volumes\n"],"headingContent":"Run Milvus with Docker Compose","anchorList":[{"label":"Milvus mit Docker Compose starten","href":"Run-Milvus-with-Docker-Compose","type":1,"isActive":false},{"label":"Voraussetzungen","href":"Prerequisites","type":2,"isActive":false},{"label":"Milvus installieren","href":"Install-Milvus","type":2,"isActive":false},{"label":"Wie geht es weiter?","href":"Whats-next","type":2,"isActive":false}]} \ No newline at end of file diff --git a/localization/v2.4.x/site/de/getstarted/run-milvus-docker/install_standalone-docker-compose.md b/localization/v2.4.x/site/de/getstarted/run-milvus-docker/install_standalone-docker-compose.md index 6ac1c3a3f..f697980d7 100644 --- a/localization/v2.4.x/site/de/getstarted/run-milvus-docker/install_standalone-docker-compose.md +++ b/localization/v2.4.x/site/de/getstarted/run-milvus-docker/install_standalone-docker-compose.md @@ -3,7 +3,7 @@ id: install_standalone-docker-compose.md label: Docker Compose related_key: Docker Compose summary: 'Erfahren Sie, wie Sie Milvus eigenständig mit Docker Compose installieren.' -title: Milvus mit Docker Compose ausführen +title: Milvus mit Docker Compose starten ---

          Milvus mit Docker Compose starten

          Diese Seite veranschaulicht, wie man eine Milvus-Instanz in Docker mit Docker Compose startet.

          +

          Diese Seite veranschaulicht, wie Sie eine Milvus-Instanz in Docker mit Docker Compose starten.

          Voraussetzungen

          Milvus installieren

          Milvus bietet eine Docker Compose-Konfigurationsdatei im Milvus-Repository. Um Milvus mit Docker Compose zu installieren, führen Sie einfach folgenden Befehl aus

          # Download the configuration file
          -$ wget https://github.com/milvus-io/milvus/releases/download/v2.4.9/milvus-standalone-docker-compose.yml -O docker-compose.yml
          +$ wget https://github.com/milvus-io/milvus/releases/download/v2.4.13-hotfix/milvus-standalone-docker-compose.yml -O docker-compose.yml
           
           # Start Milvus
          -$ sudo docker compose up -d
          +$ sudo docker-compose up -d
           
           Creating milvus-etcd  ... done
           Creating milvus-minio ... done
          @@ -81,7 +81,7 @@ Creating milvus-standalone ... done
           

      Mit dem folgenden Befehl können Sie überprüfen, ob die Container aktiv sind und laufen:

      -
      $ sudo docker compose ps
      +
      $ sudo docker-compose ps
       
             Name                     Command                  State                            Ports
       --------------------------------------------------------------------------------------------------------------------
      @@ -91,7 +91,7 @@ milvus-standalone   /tini -- milvus run standalone   Up             0.0.0.0:1953
       

      Sie können diesen Container wie folgt stoppen und löschen

      # Stop Milvus
      -$ sudo docker compose down
      +$ sudo docker-compose down
       
       # Delete service data
       $ sudo rm -rf volumes
      diff --git a/localization/v2.4.x/site/de/getstarted/run-milvus-gpu/install_standalone-docker-compose-gpu.json b/localization/v2.4.x/site/de/getstarted/run-milvus-gpu/install_standalone-docker-compose-gpu.json
      index 24a8619fa..003c7983f 100644
      --- a/localization/v2.4.x/site/de/getstarted/run-milvus-gpu/install_standalone-docker-compose-gpu.json
      +++ b/localization/v2.4.x/site/de/getstarted/run-milvus-gpu/install_standalone-docker-compose-gpu.json
      @@ -1 +1 @@
      -{"codeList":["$ wget https://github.com/milvus-io/milvus/releases/download/v2.4.9/milvus-standalone-docker-compose-gpu.yml -O docker-compose.yml\n","...\nstandalone:\n  ...\n  deploy:\n    resources:\n      reservations:\n        devices:\n          - driver: nvidia\n            capabilities: [\"gpu\"]\n            device_ids: [\"0\"]\n...\n","...\nstandalone:\n  ...\n  deploy:\n    resources:\n      reservations:\n        devices:\n          - driver: nvidia\n            capabilities: [\"gpu\"]\n            device_ids: ['0', '1']\n...\n","$ sudo docker compose up -d\n\nCreating milvus-etcd  ... done\nCreating milvus-minio ... done\nCreating milvus-standalone ... done\n","$ sudo docker compose ps\n\n      Name                     Command                  State                            Ports\n--------------------------------------------------------------------------------------------------------------------\nmilvus-etcd         etcd -advertise-client-url ...   Up             2379/tcp, 2380/tcp\nmilvus-minio        /usr/bin/docker-entrypoint ...   Up (healthy)   9000/tcp\nmilvus-standalone   /tini -- milvus run standalone   Up             0.0.0.0:19530->19530/tcp, 0.0.0.0:9091->9091/tcp\n","$ CUDA_VISIBLE_DEVICES=0 ./milvus run standalone\n","$ CUDA_VISIBLE_DEVICES=0,1 ./milvus run standalone\n","# Stop Milvus\n$ sudo docker compose down\n\n# Delete service data\n$ sudo rm -rf volumes\n","docker cp :/milvus/configs/milvus.yaml milvus.yaml\n","vim milvus.yaml\n","...\ngpu:\n  initMemSize: 0\n  maxMemSize: 0\n...\n","docker cp milvus.yaml :/milvus/configs/milvus.yaml\n","docker stop \ndocker start \n"],"headingContent":"","anchorList":[{"label":"Milvus mit GPU-Unterstützung mit Docker Compose starten","href":"Run-Milvus-with-GPU-Support-Using-Docker-Compose","type":1,"isActive":false},{"label":"Voraussetzungen","href":"Prerequisites","type":2,"isActive":false},{"label":"Milvus installieren","href":"Install-Milvus","type":2,"isActive":false},{"label":"Speicherpool konfigurieren","href":"Configure-memory-pool","type":2,"isActive":false},{"label":"Wie geht es weiter?","href":"Whats-next","type":2,"isActive":false}]}
      \ No newline at end of file
      +{"codeList":["$ wget https://github.com/milvus-io/milvus/releases/download/v2.4.13-hotfix/milvus-standalone-docker-compose-gpu.yml -O docker-compose.yml\n","...\nstandalone:\n  ...\n  deploy:\n    resources:\n      reservations:\n        devices:\n          - driver: nvidia\n            capabilities: [\"gpu\"]\n            device_ids: [\"0\"]\n...\n","...\nstandalone:\n  ...\n  deploy:\n    resources:\n      reservations:\n        devices:\n          - driver: nvidia\n            capabilities: [\"gpu\"]\n            device_ids: ['0', '1']\n...\n","$ sudo docker compose up -d\n\nCreating milvus-etcd  ... done\nCreating milvus-minio ... done\nCreating milvus-standalone ... done\n","$ sudo docker compose ps\n\n      Name                     Command                  State                            Ports\n--------------------------------------------------------------------------------------------------------------------\nmilvus-etcd         etcd -advertise-client-url ...   Up             2379/tcp, 2380/tcp\nmilvus-minio        /usr/bin/docker-entrypoint ...   Up (healthy)   9000/tcp\nmilvus-standalone   /tini -- milvus run standalone   Up             0.0.0.0:19530->19530/tcp, 0.0.0.0:9091->9091/tcp\n","$ CUDA_VISIBLE_DEVICES=0 ./milvus run standalone\n","$ CUDA_VISIBLE_DEVICES=0,1 ./milvus run standalone\n","# Stop Milvus\n$ sudo docker compose down\n\n# Delete service data\n$ sudo rm -rf volumes\n","docker cp :/milvus/configs/milvus.yaml milvus.yaml\n","vim milvus.yaml\n","...\ngpu:\n  initMemSize: 0\n  maxMemSize: 0\n...\n","docker cp milvus.yaml :/milvus/configs/milvus.yaml\n","docker stop \ndocker start \n"],"headingContent":"Run Milvus with GPU Support Using Docker Compose","anchorList":[{"label":"Milvus mit GPU-Unterstützung mit Docker Compose starten","href":"Run-Milvus-with-GPU-Support-Using-Docker-Compose","type":1,"isActive":false},{"label":"Voraussetzungen","href":"Prerequisites","type":2,"isActive":false},{"label":"Milvus installieren","href":"Install-Milvus","type":2,"isActive":false},{"label":"Speicherpool konfigurieren","href":"Configure-memory-pool","type":2,"isActive":false},{"label":"Wie geht es weiter?","href":"Whats-next","type":2,"isActive":false}]}
      \ No newline at end of file
      diff --git a/localization/v2.4.x/site/de/getstarted/run-milvus-gpu/install_standalone-docker-compose-gpu.md b/localization/v2.4.x/site/de/getstarted/run-milvus-gpu/install_standalone-docker-compose-gpu.md
      index 74224933b..33fba599b 100644
      --- a/localization/v2.4.x/site/de/getstarted/run-milvus-gpu/install_standalone-docker-compose-gpu.md
      +++ b/localization/v2.4.x/site/de/getstarted/run-milvus-gpu/install_standalone-docker-compose-gpu.md
      @@ -3,7 +3,7 @@ id: install_standalone-docker-compose-gpu.md
       label: Standalone (Docker Compose)
       related_key: Kubernetes
       summary: 'Erfahren Sie, wie Sie Milvus-Cluster auf Kubernetes installieren.'
      -title: Milvus mit GPU-Unterstützung mit Docker Compose ausführen
      +title: Milvus mit GPU-Unterstützung mit Docker Compose starten
       ---
       

      Milvus mit GPU-Unterstützung mit Docker Compose starten

      Um Milvus mit GPU-Unterstützung mit Docker Compose zu installieren, führen Sie die folgenden Schritte aus.

      -

      1. Herunterladen und Konfigurieren der YAML-Datei

      Laden Sie herunter milvus-standalone-docker-compose-gpu.yml und speichern Sie sie als docker-compose.yml manuell oder mit dem folgenden Befehl.

      -
      $ wget https://github.com/milvus-io/milvus/releases/download/v2.4.9/milvus-standalone-docker-compose-gpu.yml -O docker-compose.yml
      +

      1. Herunterladen und Konfigurieren der YAML-Datei

      Laden Sie herunter milvus-standalone-docker-compose-gpu.yml und speichern Sie sie als docker-compose.yml manuell oder mit dem folgenden Befehl.

      +
      $ wget https://github.com/milvus-io/milvus/releases/download/v2.4.13-hotfix/milvus-standalone-docker-compose-gpu.yml -O docker-compose.yml
       

      Sie müssen einige Änderungen an den Umgebungsvariablen des eigenständigen Dienstes in der YAML-Datei wie folgt vornehmen:

        diff --git a/localization/v2.4.x/site/de/integrations/evaluation_with_deepeval.md b/localization/v2.4.x/site/de/integrations/evaluation_with_deepeval.md index 560732433..68a65bcad 100644 --- a/localization/v2.4.x/site/de/integrations/evaluation_with_deepeval.md +++ b/localization/v2.4.x/site/de/integrations/evaluation_with_deepeval.md @@ -20,7 +20,8 @@ title: Auswertung mit DeepEval d="M4 9h1v1H4c-1.5 0-3-1.69-3-3.5S2.55 3 4 3h4c1.45 0 3 1.69 3 3.5 0 1.41-.91 2.72-2 3.25V8.59c.58-.45 1-1.27 1-2.09C10 5.22 8.98 4 8 4H4c-.98 0-2 1.22-2 2.5S3 9 4 9zm9-3h-1v1h1c1 0 2 1.22 2 2.5S13.98 12 13 12H9c-.98 0-2-1.22-2-2.5 0-.83.42-1.64 1-2.09V6.25c-1.09.53-2 1.84-2 3.25C6 11.31 7.55 13 9 13h4c1.45 0 3-1.69 3-3.5S14.5 6 13 6z" > -

        Open In Colab

        +

        Open In Colab +GitHub Repository

        Dieser Leitfaden demonstriert die Verwendung von DeepEval zur Evaluierung einer Retrieval-Augmented Generation (RAG) Pipeline, die auf Milvus aufbaut.

        Das RAG-System kombiniert ein Retrievalsystem mit einem generativen Modell, um neuen Text auf der Grundlage einer vorgegebenen Aufforderung zu generieren. Das System ruft zunächst relevante Dokumente aus einem Korpus mit Milvus ab und verwendet dann ein generatives Modell, um neuen Text auf der Grundlage der abgerufenen Dokumente zu erzeugen.

        DeepEval ist ein Framework, das Sie bei der Bewertung Ihrer RAG-Pipelines unterstützt. Es gibt bereits Tools und Frameworks, die Ihnen bei der Erstellung dieser Pipelines helfen, aber die Evaluierung und Quantifizierung der Leistung Ihrer Pipeline kann schwierig sein. Hier kommt DeepEval ins Spiel.

        @@ -394,7 +395,7 @@ Evaluating 3 test case(s) in parallel: |██████████|100% (3/3

        Um die Qualität der generierten Ausgaben in großen Sprachmodellen (LLMs) zu bewerten, ist es wichtig, sich auf zwei Schlüsselaspekte zu konzentrieren:

        1. Relevanz: Bewerten Sie, ob die Eingabeaufforderung das LLM effektiv dazu anleitet, hilfreiche und kontextuell passende Antworten zu generieren.

        2. -
        3. Wiedergabetreue: Messen Sie die Genauigkeit des Outputs und stellen Sie sicher, dass das Modell Informationen erzeugt, die sachlich korrekt und frei von Halluzinationen oder Widersprüchen sind. Der generierte Inhalt sollte mit den im Abfragekontext bereitgestellten faktischen Informationen übereinstimmen.

        4. +
        5. Wiedergabetreue: Messen Sie die Genauigkeit des Outputs und stellen Sie sicher, dass das Modell Informationen erzeugt, die faktisch korrekt und frei von Halluzinationen oder Widersprüchen sind. Der generierte Inhalt sollte mit den im Abfragekontext bereitgestellten faktischen Informationen übereinstimmen.

        Diese Faktoren zusammen gewährleisten, dass die Ergebnisse sowohl relevant als auch zuverlässig sind.

        from deepeval.metrics import AnswerRelevancyMetric, FaithfulnessMetric
        @@ -422,7 +423,7 @@ result = evaluate(
             print_results=False,  # Change to True to see detailed metric results
         )
         
        -
        ✨ Sie verwenden die neueste Antwortrelevanz-Metrik von DeepEval! (unter Verwendung von gpt-4o, strict=False, async_mode=True)...
        +
        ✨ Sie verwenden die neueste Antwortrelevanz-Metrik von DeepEval! (mit gpt-4o, strict=False, async_mode=True)...
        ✨ Sie führen DeepEvals neueste Faithfulness-Metrik aus! (using gpt-4o, strict=False, async_mode=True)...
        Event loop is already running. Applying nest_asyncio patch to allow async execution...
         
        @@ -430,4 +431,4 @@ result = evaluate(
         Evaluating 3 test case(s) in parallel: |██████████|100% (3/3) [Time Taken: 00:11,  3.97s/test case]
         
         Tests beendet 🎉! Führen Sie 'deepeval login' aus, um die Evaluierungsergebnisse von Confident AI zu sehen. 
        -‼️ HINWEIS: Sie können stattdessen auch Auswertungen für ALLE deepeval-Metriken direkt auf Confident AI durchführen.
        +‼️ HINWEIS: Sie können stattdessen auch Auswertungen zu ALLEN deepeval-Metriken direkt auf Confident AI durchführen.
      diff --git a/localization/v2.4.x/site/de/integrations/evaluation_with_phoenix.md b/localization/v2.4.x/site/de/integrations/evaluation_with_phoenix.md index 57935a637..097ab98bf 100644 --- a/localization/v2.4.x/site/de/integrations/evaluation_with_phoenix.md +++ b/localization/v2.4.x/site/de/integrations/evaluation_with_phoenix.md @@ -21,9 +21,10 @@ title: Auswertung mit Arize Pheonix d="M4 9h1v1H4c-1.5 0-3-1.69-3-3.5S2.55 3 4 3h4c1.45 0 3 1.69 3 3.5 0 1.41-.91 2.72-2 3.25V8.59c.58-.45 1-1.27 1-2.09C10 5.22 8.98 4 8 4H4c-.98 0-2 1.22-2 2.5S3 9 4 9zm9-3h-1v1h1c1 0 2 1.22 2 2.5S13.98 12 13 12H9c-.98 0-2-1.22-2-2.5 0-.83.42-1.64 1-2.09V6.25c-1.09.53-2 1.84-2 3.25C6 11.31 7.55 13 9 13h4c1.45 0 3-1.69 3-3.5S14.5 6 13 6z" > -

      Open In Colab

      +

      Open In Colab +GitHub Repository

      Diese Anleitung zeigt, wie man Arize Pheonix verwendet, um eine Retrieval-Augmented Generation (RAG) Pipeline zu evaluieren, die auf Milvus aufbaut.

      -

      Das RAG-System kombiniert ein Retrieval-System mit einem generativen Modell, um neuen Text auf der Grundlage einer vorgegebenen Aufforderung zu generieren. Das System sucht zunächst mit Milvus relevante Dokumente aus einem Korpus und verwendet dann ein generatives Modell, um neuen Text auf der Grundlage der gefundenen Dokumente zu generieren.

      +

      Das RAG-System kombiniert ein Retrievalsystem mit einem generativen Modell, um neuen Text auf der Grundlage einer gegebenen Aufforderung zu generieren. Das System ruft zunächst relevante Dokumente aus einem Korpus mit Milvus ab und verwendet dann ein generatives Modell, um neuen Text auf der Grundlage der abgerufenen Dokumente zu erzeugen.

      Arize Pheonix ist ein Framework, das Ihnen hilft, Ihre RAG-Pipelines zu bewerten. Es gibt bereits Tools und Frameworks, die Ihnen bei der Erstellung dieser Pipelines helfen, aber die Bewertung und Quantifizierung der Leistung Ihrer Pipeline kann schwierig sein. Hier kommt Arize Pheonix ins Spiel.

      Voraussetzungen

      - Alt Text + Alt Text Alt-Text

      import nest_asyncio
       
      @@ -460,7 +461,7 @@ results_df.head()
             2
             Was sollte vor der Durchführung der Code Coverage...
             [Codeabdeckung\n\nVor dem Einreichen Ihrer Pull ...
      -      Bevor die Codeabdeckung durchgeführt wird, sollte sie...
      +      Bevor die Codeabdeckung ausgeführt wird, sollte sie...
             Bevor Sie Code Coverage durchführen, sollten Sie ...
             [Codeabdeckung\n\nBevor Sie Ihre Pull-Datei einreichen ...
             [Codeabdeckung\n\nBefore submitting your pull ...
      diff --git a/localization/v2.4.x/site/de/integrations/integrate_with_bentoml.json b/localization/v2.4.x/site/de/integrations/integrate_with_bentoml.json
      index c2998057a..237772a65 100644
      --- a/localization/v2.4.x/site/de/integrations/integrate_with_bentoml.json
      +++ b/localization/v2.4.x/site/de/integrations/integrate_with_bentoml.json
      @@ -1 +1 @@
      -{"codeList":["$ pip install -U pymilvus bentoml\n","import bentoml\n\nBENTO_EMBEDDING_MODEL_END_POINT = \"BENTO_EMBEDDING_MODEL_END_POINT\"\nBENTO_API_TOKEN = \"BENTO_API_TOKEN\"\n\nembedding_client = bentoml.SyncHTTPClient(\n    BENTO_EMBEDDING_MODEL_END_POINT, token=BENTO_API_TOKEN\n)\n","# naively chunk on newlines\ndef chunk_text(filename: str) -> list:\n    with open(filename, \"r\") as f:\n        text = f.read()\n    sentences = text.split(\"\\n\")\n    return sentences\n","import os\nimport requests\nimport urllib.request\n\n# set up the data source\nrepo = \"ytang07/bento_octo_milvus_RAG\"\ndirectory = \"data\"\nsave_dir = \"./city_data\"\napi_url = f\"https://api.github.com/repos/{repo}/contents/{directory}\"\n\n\nresponse = requests.get(api_url)\ndata = response.json()\n\nif not os.path.exists(save_dir):\n    os.makedirs(save_dir)\n\nfor item in data:\n    if item[\"type\"] == \"file\":\n        file_url = item[\"download_url\"]\n        file_path = os.path.join(save_dir, item[\"name\"])\n        urllib.request.urlretrieve(file_url, file_path)\n","# please upload your data directory under this file's folder\ncities = os.listdir(\"city_data\")\n# store chunked text for each of the cities in a list of dicts\ncity_chunks = []\nfor city in cities:\n    chunked = chunk_text(f\"city_data/{city}\")\n    cleaned = []\n    for chunk in chunked:\n        if len(chunk) > 7:\n            cleaned.append(chunk)\n    mapped = {\"city_name\": city.split(\".\")[0], \"chunks\": cleaned}\n    city_chunks.append(mapped)\n","def get_embeddings(texts: list) -> list:\n    if len(texts) > 25:\n        splits = [texts[x : x + 25] for x in range(0, len(texts), 25)]\n        embeddings = []\n        for split in splits:\n            embedding_split = embedding_client.encode(sentences=split)\n            embeddings += embedding_split\n        return embeddings\n    return embedding_client.encode(\n        sentences=texts,\n    )\n","entries = []\nfor city_dict in city_chunks:\n    # No need for the embeddings list if get_embeddings already returns a list of lists\n    embedding_list = get_embeddings(city_dict[\"chunks\"])  # returns a list of lists\n    # Now match texts with embeddings and city name\n    for i, embedding in enumerate(embedding_list):\n        entry = {\n            \"embedding\": embedding,\n            \"sentence\": city_dict[\"chunks\"][\n                i\n            ],  # Assume \"chunks\" has the corresponding texts for the embeddings\n            \"city\": city_dict[\"city_name\"],\n        }\n        entries.append(entry)\n    print(entries)\n","from pymilvus import MilvusClient\n\nCOLLECTION_NAME = \"Bento_Milvus_RAG\"  # random name for your collection\nDIMENSION = 384\n\n# Initialize a Milvus Lite client\nmilvus_client = MilvusClient(\"milvus_demo.db\")\n","from pymilvus import connections\n\nconnections.connect(uri=\"milvus_demo.db\")\n","from pymilvus import MilvusClient, DataType, Collection\n\n# Create schema\nschema = MilvusClient.create_schema(\n    auto_id=True,\n    enable_dynamic_field=True,\n)\n\n# 3.2. Add fields to schema\nschema.add_field(field_name=\"id\", datatype=DataType.INT64, is_primary=True)\nschema.add_field(field_name=\"embedding\", datatype=DataType.FLOAT_VECTOR, dim=DIMENSION)\n","# prepare index parameters\nindex_params = milvus_client.prepare_index_params()\n\n# add index\nindex_params.add_index(\n    field_name=\"embedding\",\n    index_type=\"AUTOINDEX\",  # use autoindex instead of other complex indexing method\n    metric_type=\"COSINE\",  # L2, COSINE, or IP\n)\n\n# create collection\nif milvus_client.has_collection(collection_name=COLLECTION_NAME):\n    milvus_client.drop_collection(collection_name=COLLECTION_NAME)\nmilvus_client.create_collection(\n    collection_name=COLLECTION_NAME, schema=schema, index_params=index_params\n)\n\n# Outside the loop, now you upsert all the entries at once\nmilvus_client.insert(collection_name=COLLECTION_NAME, data=entries)\n","BENTO_LLM_END_POINT = \"BENTO_LLM_END_POINT\"\n\nllm_client = bentoml.SyncHTTPClient(BENTO_LLM_END_POINT, token=BENTO_API_TOKEN)\n","def dorag(question: str, context: str):\n\n    prompt = (\n        f\"You are a helpful assistant. The user has a question. Answer the user question based only on the context: {context}. \\n\"\n        f\"The user question is {question}\"\n    )\n\n    results = llm_client.generate(\n        max_tokens=1024,\n        prompt=prompt,\n    )\n\n    res = \"\"\n    for result in results:\n        res += result\n\n    return res\n","question = \"What state is Cambridge in?\"\n\n\ndef ask_a_question(question):\n    embeddings = get_embeddings([question])\n    res = milvus_client.search(\n        collection_name=COLLECTION_NAME,\n        data=embeddings,  # search for the one (1) embedding returned as a list of lists\n        anns_field=\"embedding\",  # Search across embeddings\n        limit=5,  # get me the top 5 results\n        output_fields=[\"sentence\"],  # get the sentence/chunk and city\n    )\n\n    sentences = []\n    for hits in res:\n        for hit in hits:\n            print(hit)\n            sentences.append(hit[\"entity\"][\"sentence\"])\n    context = \". \".join(sentences)\n    return context\n\n\ncontext = ask_a_question(question=question)\nprint(context)\n","print(dorag(question=question, context=context))\n"],"headingContent":"","anchorList":[{"label":"Retrieval-Augmented Generation (RAG) mit Milvus und BentoML","href":"Retrieval-Augmented-Generation-RAG-with-Milvus-and-BentoML","type":1,"isActive":false},{"label":"Einführung","href":"Introduction","type":2,"isActive":false},{"label":"Bevor Sie beginnen","href":"Before-you-begin","type":2,"isActive":false},{"label":"Servieren von Einbettungen mit BentoML/BentoCloud","href":"Serving-Embeddings-with-BentoMLBentoCloud","type":2,"isActive":false},{"label":"Einfügen von Daten in eine Vektordatenbank für den Abruf","href":"Inserting-Data-into-a-Vector-Database-for-Retrieval","type":2,"isActive":false},{"label":"Erstellen Ihrer Milvus-Lite-Sammlung","href":"Creating-Your-Milvus-Lite-Collection","type":2,"isActive":false},{"label":"Richten Sie Ihren LLM für RAG ein","href":"Set-up-Your-LLM-for-RAG","type":2,"isActive":false},{"label":"LLM-Anweisungen","href":"LLM-Instructions","type":2,"isActive":false},{"label":"Ein RAG-Beispiel","href":"A-RAG-Example","type":2,"isActive":false}]}
      \ No newline at end of file
      +{"codeList":["$ pip install -U pymilvus bentoml\n","import bentoml\n\nBENTO_EMBEDDING_MODEL_END_POINT = \"BENTO_EMBEDDING_MODEL_END_POINT\"\nBENTO_API_TOKEN = \"BENTO_API_TOKEN\"\n\nembedding_client = bentoml.SyncHTTPClient(\n    BENTO_EMBEDDING_MODEL_END_POINT, token=BENTO_API_TOKEN\n)\n","# naively chunk on newlines\ndef chunk_text(filename: str) -> list:\n    with open(filename, \"r\") as f:\n        text = f.read()\n    sentences = text.split(\"\\n\")\n    return sentences\n","import os\nimport requests\nimport urllib.request\n\n# set up the data source\nrepo = \"ytang07/bento_octo_milvus_RAG\"\ndirectory = \"data\"\nsave_dir = \"./city_data\"\napi_url = f\"https://api.github.com/repos/{repo}/contents/{directory}\"\n\n\nresponse = requests.get(api_url)\ndata = response.json()\n\nif not os.path.exists(save_dir):\n    os.makedirs(save_dir)\n\nfor item in data:\n    if item[\"type\"] == \"file\":\n        file_url = item[\"download_url\"]\n        file_path = os.path.join(save_dir, item[\"name\"])\n        urllib.request.urlretrieve(file_url, file_path)\n","# please upload your data directory under this file's folder\ncities = os.listdir(\"city_data\")\n# store chunked text for each of the cities in a list of dicts\ncity_chunks = []\nfor city in cities:\n    chunked = chunk_text(f\"city_data/{city}\")\n    cleaned = []\n    for chunk in chunked:\n        if len(chunk) > 7:\n            cleaned.append(chunk)\n    mapped = {\"city_name\": city.split(\".\")[0], \"chunks\": cleaned}\n    city_chunks.append(mapped)\n","def get_embeddings(texts: list) -> list:\n    if len(texts) > 25:\n        splits = [texts[x : x + 25] for x in range(0, len(texts), 25)]\n        embeddings = []\n        for split in splits:\n            embedding_split = embedding_client.encode(sentences=split)\n            embeddings += embedding_split\n        return embeddings\n    return embedding_client.encode(\n        sentences=texts,\n    )\n","entries = []\nfor city_dict in city_chunks:\n    # No need for the embeddings list if get_embeddings already returns a list of lists\n    embedding_list = get_embeddings(city_dict[\"chunks\"])  # returns a list of lists\n    # Now match texts with embeddings and city name\n    for i, embedding in enumerate(embedding_list):\n        entry = {\n            \"embedding\": embedding,\n            \"sentence\": city_dict[\"chunks\"][\n                i\n            ],  # Assume \"chunks\" has the corresponding texts for the embeddings\n            \"city\": city_dict[\"city_name\"],\n        }\n        entries.append(entry)\n    print(entries)\n","from pymilvus import MilvusClient\n\nCOLLECTION_NAME = \"Bento_Milvus_RAG\"  # random name for your collection\nDIMENSION = 384\n\n# Initialize a Milvus Lite client\nmilvus_client = MilvusClient(\"milvus_demo.db\")\n","from pymilvus import connections\n\nconnections.connect(uri=\"milvus_demo.db\")\n","from pymilvus import MilvusClient, DataType, Collection\n\n# Create schema\nschema = MilvusClient.create_schema(\n    auto_id=True,\n    enable_dynamic_field=True,\n)\n\n# 3.2. Add fields to schema\nschema.add_field(field_name=\"id\", datatype=DataType.INT64, is_primary=True)\nschema.add_field(field_name=\"embedding\", datatype=DataType.FLOAT_VECTOR, dim=DIMENSION)\n","# prepare index parameters\nindex_params = milvus_client.prepare_index_params()\n\n# add index\nindex_params.add_index(\n    field_name=\"embedding\",\n    index_type=\"AUTOINDEX\",  # use autoindex instead of other complex indexing method\n    metric_type=\"COSINE\",  # L2, COSINE, or IP\n)\n\n# create collection\nif milvus_client.has_collection(collection_name=COLLECTION_NAME):\n    milvus_client.drop_collection(collection_name=COLLECTION_NAME)\nmilvus_client.create_collection(\n    collection_name=COLLECTION_NAME, schema=schema, index_params=index_params\n)\n\n# Outside the loop, now you upsert all the entries at once\nmilvus_client.insert(collection_name=COLLECTION_NAME, data=entries)\n","BENTO_LLM_END_POINT = \"BENTO_LLM_END_POINT\"\n\nllm_client = bentoml.SyncHTTPClient(BENTO_LLM_END_POINT, token=BENTO_API_TOKEN)\n","def dorag(question: str, context: str):\n\n    prompt = (\n        f\"You are a helpful assistant. The user has a question. Answer the user question based only on the context: {context}. \\n\"\n        f\"The user question is {question}\"\n    )\n\n    results = llm_client.generate(\n        max_tokens=1024,\n        prompt=prompt,\n    )\n\n    res = \"\"\n    for result in results:\n        res += result\n\n    return res\n","question = \"What state is Cambridge in?\"\n\n\ndef ask_a_question(question):\n    embeddings = get_embeddings([question])\n    res = milvus_client.search(\n        collection_name=COLLECTION_NAME,\n        data=embeddings,  # search for the one (1) embedding returned as a list of lists\n        anns_field=\"embedding\",  # Search across embeddings\n        limit=5,  # get me the top 5 results\n        output_fields=[\"sentence\"],  # get the sentence/chunk and city\n    )\n\n    sentences = []\n    for hits in res:\n        for hit in hits:\n            print(hit)\n            sentences.append(hit[\"entity\"][\"sentence\"])\n    context = \". \".join(sentences)\n    return context\n\n\ncontext = ask_a_question(question=question)\nprint(context)\n","print(dorag(question=question, context=context))\n"],"headingContent":"Retrieval-Augmented Generation (RAG) with Milvus and BentoML","anchorList":[{"label":"Retrieval-Augmented Generation (RAG) mit Milvus und BentoML","href":"Retrieval-Augmented-Generation-RAG-with-Milvus-and-BentoML","type":1,"isActive":false},{"label":"Einführung","href":"Introduction","type":2,"isActive":false},{"label":"Bevor Sie beginnen","href":"Before-you-begin","type":2,"isActive":false},{"label":"Servieren von Einbettungen mit BentoML/BentoCloud","href":"Serving-Embeddings-with-BentoMLBentoCloud","type":2,"isActive":false},{"label":"Einfügen von Daten in eine Vektordatenbank für den Abruf","href":"Inserting-Data-into-a-Vector-Database-for-Retrieval","type":2,"isActive":false},{"label":"Erstellen Ihrer Milvus-Lite-Sammlung","href":"Creating-Your-Milvus-Lite-Collection","type":2,"isActive":false},{"label":"Richten Sie Ihren LLM für RAG ein","href":"Set-up-Your-LLM-for-RAG","type":2,"isActive":false},{"label":"LLM-Anweisungen","href":"LLM-Instructions","type":2,"isActive":false},{"label":"Ein RAG-Beispiel","href":"A-RAG-Example","type":2,"isActive":false}]}
      \ No newline at end of file
      diff --git a/localization/v2.4.x/site/de/integrations/integrate_with_bentoml.md b/localization/v2.4.x/site/de/integrations/integrate_with_bentoml.md
      index d31d93a42..282fb9686 100644
      --- a/localization/v2.4.x/site/de/integrations/integrate_with_bentoml.md
      +++ b/localization/v2.4.x/site/de/integrations/integrate_with_bentoml.md
      @@ -2,8 +2,8 @@
       id: integrate_with_bentoml.md
       summary: >-
         Dieser Leitfaden zeigt, wie man ein Open-Source-Einbettungsmodell und ein
      -  Großsprachenmodell auf BentoCloud mit einer Milvus-Vektordatenbank verwendet,
      -  um eine Retrieval Augmented Generation (RAG)-Anwendung zu erstellen.
      +  Großsprachenmodell auf BentoCloud mit der Milvus-Vektordatenbank verwendet, um
      +  eine Retrieval Augmented Generation (RAG)-Anwendung zu erstellen.
       title: Retrieval-Augmented Generation (RAG) mit Milvus und BentoML
       ---
       

      Retrieval-Augmented Generation (RAG) mit Milvus und BentoML

      Open In Colab

      +

      Open In Colab +GitHub Repository

      Einführung

      Dieser Leitfaden zeigt, wie man ein Open-Source-Einbettungsmodell und ein Großsprachenmodell auf BentoCloud mit der Vektordatenbank Milvus verwendet, um eine RAG-Anwendung (Retrieval Augmented Generation) zu erstellen. BentoCloud ist eine KI-Inferenzplattform für schnell arbeitende KI-Teams, die eine vollständig verwaltete Infrastruktur bietet, die auf die Modellinferenz zugeschnitten ist. Sie arbeitet mit BentoML zusammen, einem Open-Source-Framework für die Modellbereitstellung, um die einfache Erstellung und Bereitstellung von Hochleistungsmodelldiensten zu ermöglichen. In dieser Demo verwenden wir Milvus Lite als Vektordatenbank, eine schlanke Version von Milvus, die in Ihre Python-Anwendung eingebettet werden kann.

      +

      Dieser Leitfaden zeigt, wie man ein Open-Source-Einbettungsmodell und ein Großsprachenmodell auf BentoCloud mit der Vektordatenbank Milvus verwendet, um eine RAG-Anwendung (Retrieval Augmented Generation) zu erstellen. BentoCloud ist eine KI-Inferenzplattform für schnell arbeitende KI-Teams, die eine vollständig verwaltete Infrastruktur bietet, die auf die Modellinferenz zugeschnitten ist. Sie arbeitet mit BentoML zusammen, einem Open-Source-Framework für die Modellbereitstellung, um die einfache Erstellung und Bereitstellung von Hochleistungsmodelldiensten zu ermöglichen. In dieser Demo verwenden wir Milvus Lite als Vektordatenbank, die leichtgewichtige Version von Milvus, die in Ihre Python-Anwendung eingebettet werden kann.

      Bevor Sie beginnen

      -

      Wenn Sie Google Colab verwenden, müssen Sie möglicherweise die Runtime neu starten, um die soeben installierten Abhängigkeiten zu aktivieren (klicken Sie auf das Menü "Runtime" am oberen Bildschirmrand und wählen Sie "Restart session" aus dem Dropdown-Menü).

      +

      Wenn Sie Google Colab verwenden, müssen Sie möglicherweise die Runtime neu starten, um die soeben installierten Abhängigkeiten zu aktivieren (klicken Sie auf das Menü "Runtime" oben auf dem Bildschirm und wählen Sie "Restart session" aus dem Dropdown-Menü).

      Nachdem Sie sich bei BentoCloud angemeldet haben, können Sie mit den bereitgestellten BentoCloud-Diensten in Deployments interagieren, und der entsprechende END_POINT und die API befinden sich in Playground -> Python. Sie können die Stadtdaten hier herunterladen.

      Servieren von Einbettungen mit BentoML/BentoCloud

      Nachdem wir unsere Einbettungen und Daten vorbereitet haben, können wir die Vektoren zusammen mit den Metadaten in Milvus Lite für die spätere Vektorsuche einfügen. Der erste Schritt in diesem Abschnitt besteht darin, einen Client zu starten, der eine Verbindung zu Milvus Lite herstellt. Wir importieren einfach das Modul MilvusClient und initialisieren einen Milvus-Lite-Client, der sich mit Ihrer Milvus-Lite-Vektordatenbank verbindet. Die Größe der Dimensionen ergibt sich aus der Größe des Einbettungsmodells, z.B. erzeugt das Sentence Transformer Modell all-MiniLM-L6-v2 Vektoren mit 384 Dimensionen.

      +

      Nachdem wir unsere Einbettungen und Daten vorbereitet haben, können wir die Vektoren zusammen mit den Metadaten in Milvus Lite für die spätere Vektorsuche einfügen. Der erste Schritt in diesem Abschnitt besteht darin, einen Client zu starten, der eine Verbindung zu Milvus Lite herstellt. Wir importieren einfach das Modul MilvusClient und initialisieren einen Milvus Lite-Client, der eine Verbindung zu Ihrer Milvus Lite-Vektordatenbank herstellt. Die Größe der Dimensionen ergibt sich aus der Größe des Einbettungsmodells, z.B. erzeugt das Sentence Transformer Modell all-MiniLM-L6-v2 Vektoren mit 384 Dimensionen.

      from pymilvus import MilvusClient
       
       COLLECTION_NAME = "Bento_Milvus_RAG"  # random name for your collection
      @@ -187,7 +188,7 @@ DIMENSION = 384
       milvus_client = MilvusClient("milvus_demo.db")
       
      -

      Für das Argument von MilvusClient gilt Folgendes:

      +

      Wie bei dem Argument von MilvusClient:

      • Die Einstellung von uri als lokale Datei, z. B../milvus.db, ist die bequemste Methode, da sie automatisch Milvus Lite verwendet, um alle Daten in dieser Datei zu speichern.
      • Wenn Sie große Datenmengen haben, können Sie einen leistungsfähigeren Milvus-Server auf Docker oder Kubernetes einrichten. Bei dieser Einrichtung verwenden Sie bitte die Server-Uri, z. B.http://localhost:19530, als uri.
      • diff --git a/localization/v2.4.x/site/de/integrations/integrate_with_camel.json b/localization/v2.4.x/site/de/integrations/integrate_with_camel.json index 4d96fe61e..0db51530a 100644 --- a/localization/v2.4.x/site/de/integrations/integrate_with_camel.json +++ b/localization/v2.4.x/site/de/integrations/integrate_with_camel.json @@ -1 +1 @@ -{"codeList":["$ pip install -U \"camel-ai[all]\" pymilvus\n","import os\nimport requests\n\nos.makedirs(\"local_data\", exist_ok=True)\n\nurl = \"https://arxiv.org/pdf/2303.17760.pdf\"\nresponse = requests.get(url)\nwith open(\"local_data/camel paper.pdf\", \"wb\") as file:\n file.write(response.content)\n","os.environ[\"OPENAI_API_KEY\"] = \"Your Key\"\n","from camel.embeddings import OpenAIEmbedding\n\nembedding_instance = OpenAIEmbedding()\n","from camel.storages import MilvusStorage\n\nstorage_instance = MilvusStorage(\n vector_dim=embedding_instance.get_output_dim(),\n url_and_api_key=(\n \"./milvus_demo.db\", # Your Milvus connection URI\n \"\", # Your Milvus token\n ),\n collection_name=\"camel_paper\",\n)\n","from camel.retrievers import VectorRetriever\n\nvector_retriever = VectorRetriever(\n embedding_model=embedding_instance, storage=storage_instance\n)\n","vector_retriever.process(content_input_path=\"local_data/camel paper.pdf\")\n","retrieved_info = vector_retriever.query(query=\"What is CAMEL?\", top_k=1)\nprint(retrieved_info)\n","retrieved_info_irrelevant = vector_retriever.query(\n query=\"Compared with dumpling and rice, which should I take for dinner?\", top_k=1\n)\n\nprint(retrieved_info_irrelevant)\n","from camel.retrievers import AutoRetriever\nfrom camel.types import StorageType\n\nauto_retriever = AutoRetriever(\n url_and_api_key=(\n \"./milvus_demo.db\", # Your Milvus connection URI\n \"\", # Your Milvus token\n ),\n storage_type=StorageType.MILVUS,\n embedding_model=embedding_instance,\n)\n\nretrieved_info = auto_retriever.run_vector_retriever(\n query=\"What is CAMEL-AI\",\n content_input_paths=[\n \"local_data/camel paper.pdf\", # example local path\n \"https://www.camel-ai.org/\", # example remote url\n ],\n top_k=1,\n return_detailed_info=True,\n)\n\nprint(retrieved_info)\n","from camel.agents import ChatAgent\nfrom camel.messages import BaseMessage\nfrom camel.types import RoleType\nfrom camel.retrievers import AutoRetriever\nfrom camel.types import StorageType\n\n\ndef single_agent(query: str) -> str:\n # Set agent role\n assistant_sys_msg = BaseMessage(\n role_name=\"Assistant\",\n role_type=RoleType.ASSISTANT,\n meta_dict=None,\n content=\"\"\"You are a helpful assistant to answer question,\n I will give you the Original Query and Retrieved Context,\n answer the Original Query based on the Retrieved Context,\n if you can't answer the question just say I don't know.\"\"\",\n )\n\n # Add auto retriever\n auto_retriever = AutoRetriever(\n url_and_api_key=(\n \"./milvus_demo.db\", # Your Milvus connection URI\n \"\", # Your Milvus token\n ),\n storage_type=StorageType.MILVUS,\n embedding_model=embedding_instance,\n )\n\n retrieved_info = auto_retriever.run_vector_retriever(\n query=query,\n content_input_paths=[\n \"local_data/camel paper.pdf\", # example local path\n \"https://www.camel-ai.org/\", # example remote url\n ],\n # vector_storage_local_path=\"storage_default_run\",\n top_k=1,\n return_detailed_info=True,\n )\n\n # Pass the retrieved infomation to agent\n user_msg = BaseMessage.make_user_message(role_name=\"User\", content=retrieved_info)\n agent = ChatAgent(assistant_sys_msg)\n\n # Get response\n assistant_response = agent.step(user_msg)\n return assistant_response.msg.content\n\n\nprint(single_agent(\"What is CAMEL-AI\"))\n","from typing import List\nfrom colorama import Fore\n\nfrom camel.agents.chat_agent import FunctionCallingRecord\nfrom camel.configs import ChatGPTConfig\nfrom camel.functions import (\n MATH_FUNCS,\n RETRIEVAL_FUNCS,\n)\nfrom camel.societies import RolePlaying\nfrom camel.types import ModelType\nfrom camel.utils import print_text_animated\n\n\ndef role_playing_with_rag(\n task_prompt, model_type=ModelType.GPT_4O, chat_turn_limit=10\n) -> None:\n task_prompt = task_prompt\n\n user_model_config = ChatGPTConfig(temperature=0.0)\n\n function_list = [\n *MATH_FUNCS,\n *RETRIEVAL_FUNCS,\n ]\n assistant_model_config = ChatGPTConfig(\n tools=function_list,\n temperature=0.0,\n )\n\n role_play_session = RolePlaying(\n assistant_role_name=\"Searcher\",\n user_role_name=\"Professor\",\n assistant_agent_kwargs=dict(\n model_type=model_type,\n model_config=assistant_model_config,\n tools=function_list,\n ),\n user_agent_kwargs=dict(\n model_type=model_type,\n model_config=user_model_config,\n ),\n task_prompt=task_prompt,\n with_task_specify=False,\n )\n\n print(\n Fore.GREEN\n + f\"AI Assistant sys message:\\n{role_play_session.assistant_sys_msg}\\n\"\n )\n print(Fore.BLUE + f\"AI User sys message:\\n{role_play_session.user_sys_msg}\\n\")\n\n print(Fore.YELLOW + f\"Original task prompt:\\n{task_prompt}\\n\")\n print(\n Fore.CYAN\n + f\"Specified task prompt:\\n{role_play_session.specified_task_prompt}\\n\"\n )\n print(Fore.RED + f\"Final task prompt:\\n{role_play_session.task_prompt}\\n\")\n\n n = 0\n input_msg = role_play_session.init_chat()\n while n < chat_turn_limit:\n n += 1\n assistant_response, user_response = role_play_session.step(input_msg)\n\n if assistant_response.terminated:\n print(\n Fore.GREEN\n + (\n \"AI Assistant terminated. Reason: \"\n f\"{assistant_response.info['termination_reasons']}.\"\n )\n )\n break\n if user_response.terminated:\n print(\n Fore.GREEN\n + (\n \"AI User terminated. \"\n f\"Reason: {user_response.info['termination_reasons']}.\"\n )\n )\n break\n\n # Print output from the user\n print_text_animated(Fore.BLUE + f\"AI User:\\n\\n{user_response.msg.content}\\n\")\n\n # Print output from the assistant, including any function\n # execution information\n print_text_animated(Fore.GREEN + \"AI Assistant:\")\n tool_calls: List[FunctionCallingRecord] = assistant_response.info[\"tool_calls\"]\n for func_record in tool_calls:\n print_text_animated(f\"{func_record}\")\n print_text_animated(f\"{assistant_response.msg.content}\\n\")\n\n if \"CAMEL_TASK_DONE\" in user_response.msg.content:\n break\n\n input_msg = assistant_response.msg\n","role_playing_with_rag(\n task_prompt=\"\"\"What is the main termination reasons for AI Society\n dataset, how many number of messages did camel decided to\n limit, what's the value plus 100? You should refer to the\n content in path camel/local_data/camel paper.pdf\"\"\"\n)\n"],"headingContent":"","anchorList":[{"label":"Retrieval-erweiterte Generierung (RAG) mit Milvus und Camel","href":"Retrieval-Augmented-Generation-RAG-with-Milvus-and-Camel","type":1,"isActive":false},{"label":"Daten laden","href":"Load-Data","type":2,"isActive":false},{"label":"1. Angepasste RAG","href":"1-Customized-RAG","type":2,"isActive":false},{"label":"2. Auto RAG","href":"2-Auto-RAG","type":2,"isActive":false},{"label":"3. Einzelagent mit Auto RAG","href":"3-Single-Agent-with-Auto-RAG","type":2,"isActive":false},{"label":"4. Rollenspiele mit Auto RAG","href":"4-Role-playing-with-Auto-RAG","type":2,"isActive":false}]} \ No newline at end of file +{"codeList":["$ pip install -U \"camel-ai[all]\" pymilvus\n","import os\nimport requests\n\nos.makedirs(\"local_data\", exist_ok=True)\n\nurl = \"https://arxiv.org/pdf/2303.17760.pdf\"\nresponse = requests.get(url)\nwith open(\"local_data/camel paper.pdf\", \"wb\") as file:\n file.write(response.content)\n","os.environ[\"OPENAI_API_KEY\"] = \"Your Key\"\n","from camel.embeddings import OpenAIEmbedding\n\nembedding_instance = OpenAIEmbedding()\n","from camel.storages import MilvusStorage\n\nstorage_instance = MilvusStorage(\n vector_dim=embedding_instance.get_output_dim(),\n url_and_api_key=(\n \"./milvus_demo.db\", # Your Milvus connection URI\n \"\", # Your Milvus token\n ),\n collection_name=\"camel_paper\",\n)\n","from camel.retrievers import VectorRetriever\n\nvector_retriever = VectorRetriever(\n embedding_model=embedding_instance, storage=storage_instance\n)\n","vector_retriever.process(content_input_path=\"local_data/camel paper.pdf\")\n","retrieved_info = vector_retriever.query(query=\"What is CAMEL?\", top_k=1)\nprint(retrieved_info)\n","retrieved_info_irrelevant = vector_retriever.query(\n query=\"Compared with dumpling and rice, which should I take for dinner?\", top_k=1\n)\n\nprint(retrieved_info_irrelevant)\n","from camel.retrievers import AutoRetriever\nfrom camel.types import StorageType\n\nauto_retriever = AutoRetriever(\n url_and_api_key=(\n \"./milvus_demo.db\", # Your Milvus connection URI\n \"\", # Your Milvus token\n ),\n storage_type=StorageType.MILVUS,\n embedding_model=embedding_instance,\n)\n\nretrieved_info = auto_retriever.run_vector_retriever(\n query=\"What is CAMEL-AI\",\n content_input_paths=[\n \"local_data/camel paper.pdf\", # example local path\n \"https://www.camel-ai.org/\", # example remote url\n ],\n top_k=1,\n return_detailed_info=True,\n)\n\nprint(retrieved_info)\n","from camel.agents import ChatAgent\nfrom camel.messages import BaseMessage\nfrom camel.types import RoleType\nfrom camel.retrievers import AutoRetriever\nfrom camel.types import StorageType\n\n\ndef single_agent(query: str) -> str:\n # Set agent role\n assistant_sys_msg = BaseMessage(\n role_name=\"Assistant\",\n role_type=RoleType.ASSISTANT,\n meta_dict=None,\n content=\"\"\"You are a helpful assistant to answer question,\n I will give you the Original Query and Retrieved Context,\n answer the Original Query based on the Retrieved Context,\n if you can't answer the question just say I don't know.\"\"\",\n )\n\n # Add auto retriever\n auto_retriever = AutoRetriever(\n url_and_api_key=(\n \"./milvus_demo.db\", # Your Milvus connection URI\n \"\", # Your Milvus token\n ),\n storage_type=StorageType.MILVUS,\n embedding_model=embedding_instance,\n )\n\n retrieved_info = auto_retriever.run_vector_retriever(\n query=query,\n content_input_paths=[\n \"local_data/camel paper.pdf\", # example local path\n \"https://www.camel-ai.org/\", # example remote url\n ],\n # vector_storage_local_path=\"storage_default_run\",\n top_k=1,\n return_detailed_info=True,\n )\n\n # Pass the retrieved infomation to agent\n user_msg = BaseMessage.make_user_message(role_name=\"User\", content=retrieved_info)\n agent = ChatAgent(assistant_sys_msg)\n\n # Get response\n assistant_response = agent.step(user_msg)\n return assistant_response.msg.content\n\n\nprint(single_agent(\"What is CAMEL-AI\"))\n","from typing import List\nfrom colorama import Fore\n\nfrom camel.agents.chat_agent import FunctionCallingRecord\nfrom camel.configs import ChatGPTConfig\nfrom camel.functions import (\n MATH_FUNCS,\n RETRIEVAL_FUNCS,\n)\nfrom camel.societies import RolePlaying\nfrom camel.types import ModelType\nfrom camel.utils import print_text_animated\n\n\ndef role_playing_with_rag(\n task_prompt, model_type=ModelType.GPT_4O, chat_turn_limit=10\n) -> None:\n task_prompt = task_prompt\n\n user_model_config = ChatGPTConfig(temperature=0.0)\n\n function_list = [\n *MATH_FUNCS,\n *RETRIEVAL_FUNCS,\n ]\n assistant_model_config = ChatGPTConfig(\n tools=function_list,\n temperature=0.0,\n )\n\n role_play_session = RolePlaying(\n assistant_role_name=\"Searcher\",\n user_role_name=\"Professor\",\n assistant_agent_kwargs=dict(\n model_type=model_type,\n model_config=assistant_model_config,\n tools=function_list,\n ),\n user_agent_kwargs=dict(\n model_type=model_type,\n model_config=user_model_config,\n ),\n task_prompt=task_prompt,\n with_task_specify=False,\n )\n\n print(\n Fore.GREEN\n + f\"AI Assistant sys message:\\n{role_play_session.assistant_sys_msg}\\n\"\n )\n print(Fore.BLUE + f\"AI User sys message:\\n{role_play_session.user_sys_msg}\\n\")\n\n print(Fore.YELLOW + f\"Original task prompt:\\n{task_prompt}\\n\")\n print(\n Fore.CYAN\n + f\"Specified task prompt:\\n{role_play_session.specified_task_prompt}\\n\"\n )\n print(Fore.RED + f\"Final task prompt:\\n{role_play_session.task_prompt}\\n\")\n\n n = 0\n input_msg = role_play_session.init_chat()\n while n < chat_turn_limit:\n n += 1\n assistant_response, user_response = role_play_session.step(input_msg)\n\n if assistant_response.terminated:\n print(\n Fore.GREEN\n + (\n \"AI Assistant terminated. Reason: \"\n f\"{assistant_response.info['termination_reasons']}.\"\n )\n )\n break\n if user_response.terminated:\n print(\n Fore.GREEN\n + (\n \"AI User terminated. \"\n f\"Reason: {user_response.info['termination_reasons']}.\"\n )\n )\n break\n\n # Print output from the user\n print_text_animated(Fore.BLUE + f\"AI User:\\n\\n{user_response.msg.content}\\n\")\n\n # Print output from the assistant, including any function\n # execution information\n print_text_animated(Fore.GREEN + \"AI Assistant:\")\n tool_calls: List[FunctionCallingRecord] = assistant_response.info[\"tool_calls\"]\n for func_record in tool_calls:\n print_text_animated(f\"{func_record}\")\n print_text_animated(f\"{assistant_response.msg.content}\\n\")\n\n if \"CAMEL_TASK_DONE\" in user_response.msg.content:\n break\n\n input_msg = assistant_response.msg\n","role_playing_with_rag(\n task_prompt=\"\"\"What is the main termination reasons for AI Society\n dataset, how many number of messages did camel decided to\n limit, what's the value plus 100? You should refer to the\n content in path camel/local_data/camel paper.pdf\"\"\"\n)\n"],"headingContent":"Retrieval-Augmented Generation (RAG) with Milvus and Camel","anchorList":[{"label":"Retrieval-erweiterte Generierung (RAG) mit Milvus und Camel","href":"Retrieval-Augmented-Generation-RAG-with-Milvus-and-Camel","type":1,"isActive":false},{"label":"Daten laden","href":"Load-Data","type":2,"isActive":false},{"label":"1. Angepasste RAG","href":"1-Customized-RAG","type":2,"isActive":false},{"label":"2. Auto RAG","href":"2-Auto-RAG","type":2,"isActive":false},{"label":"3. Einzelagent mit Auto RAG","href":"3-Single-Agent-with-Auto-RAG","type":2,"isActive":false},{"label":"4. Rollenspiele mit Auto RAG","href":"4-Role-playing-with-Auto-RAG","type":2,"isActive":false}]} \ No newline at end of file diff --git a/localization/v2.4.x/site/de/integrations/integrate_with_camel.md b/localization/v2.4.x/site/de/integrations/integrate_with_camel.md index 5ddd9a8bc..6c702dc3f 100644 --- a/localization/v2.4.x/site/de/integrations/integrate_with_camel.md +++ b/localization/v2.4.x/site/de/integrations/integrate_with_camel.md @@ -2,8 +2,8 @@ id: integrate_with_camel.md summary: >- Dieser Leitfaden zeigt, wie man ein Open-Source-Einbettungsmodell und ein - Großsprachenmodell auf BentoCloud mit einer Milvus-Vektordatenbank verwendet, - um eine Retrieval Augmented Generation (RAG)-Anwendung zu erstellen. + Großsprachenmodell auf BentoCloud mit der Milvus-Vektordatenbank verwendet, um + eine Retrieval Augmented Generation (RAG)-Anwendung zu erstellen. title: Retrieval-Augmented Generation (RAG) mit Milvus und BentoML ---

        Retrieval-erweiterte Generierung (RAG) mit Milvus und Camel

        Open In Colab

        +

        Open In Colab +GitHub Repository

        Dieser Leitfaden zeigt, wie ein Retrieval-Augmented Generation (RAG) System mit CAMEL und Milvus aufgebaut werden kann.

        Das RAG-System kombiniert ein Retrievalsystem mit einem generativen Modell, um neuen Text auf der Grundlage einer vorgegebenen Aufforderung zu generieren. Das System ruft zunächst relevante Dokumente aus einem Korpus mit Milvus ab und verwendet dann ein generatives Modell, um neuen Text auf der Grundlage der abgerufenen Dokumente zu erzeugen.

        CAMEL ist ein Multiagentensystem. Milvus ist die weltweit fortschrittlichste Open-Source-Vektordatenbank, die für die Einbettung von Ähnlichkeitssuche und KI-Anwendungen entwickelt wurde.

        @@ -52,7 +53,7 @@ title: Retrieval-Augmented Generation (RAG) mit Milvus und BentoML
        $ pip install -U "camel-ai[all]" pymilvus
         
        -

        Wenn Sie Google Colab verwenden, müssen Sie möglicherweise die Runtime neu starten, um die soeben installierten Abhängigkeiten zu aktivieren (klicken Sie auf das Menü "Runtime" am oberen Rand des Bildschirms und wählen Sie "Restart session" aus dem Dropdown-Menü).

        +

        Wenn Sie Google Colab verwenden, müssen Sie möglicherweise die Runtime neu starten, um die soeben installierten Abhängigkeiten zu aktivieren (klicken Sie auf das Menü "Runtime" am oberen Bildschirmrand und wählen Sie "Restart session" aus dem Dropdown-Menü).

        import os
         import requests
        diff --git a/localization/v2.4.x/site/de/integrations/integrate_with_dspy.json b/localization/v2.4.x/site/de/integrations/integrate_with_dspy.json
        index 1bb64a72e..0517e1825 100644
        --- a/localization/v2.4.x/site/de/integrations/integrate_with_dspy.json
        +++ b/localization/v2.4.x/site/de/integrations/integrate_with_dspy.json
        @@ -1 +1 @@
        -{"codeList":["$ pip install \"dspy-ai[milvus]\"\n$ pip install -U pymilvus\n","from dspy.datasets import HotPotQA\n\n# Load the dataset.\ndataset = HotPotQA(\n    train_seed=1, train_size=20, eval_seed=2023, dev_size=50, test_size=0\n)\n\n# Tell DSPy that the 'question' field is the input. Any other fields are labels and/or metadata.\ntrainset = [x.with_inputs(\"question\") for x in dataset.train]\ndevset = [x.with_inputs(\"question\") for x in dataset.dev]\n","import requests\nimport os\n\nos.environ[\"OPENAI_API_KEY\"] = \"\"\nMILVUS_URI = \"example.db\"\nMILVUS_TOKEN = \"\"\n\nfrom pymilvus import MilvusClient, DataType, Collection\nfrom dspy.retrieve.milvus_rm import openai_embedding_function\n\nclient = MilvusClient(uri=MILVUS_URI, token=MILVUS_TOKEN)\n\nif \"dspy_example\" not in client.list_collections():\n    client.create_collection(\n        collection_name=\"dspy_example\",\n        overwrite=True,\n        dimension=1536,\n        primary_field_name=\"id\",\n        vector_field_name=\"embedding\",\n        id_type=\"int\",\n        metric_type=\"IP\",\n        max_length=65535,\n        enable_dynamic=True,\n    )\ntext = requests.get(\n    \"https://raw.githubusercontent.com/wxywb/dspy_dataset_sample/master/sample_data.txt\"\n).text\n\nfor idx, passage in enumerate(text.split(\"\\n\")):\n    if len(passage) == 0:\n        continue\n    client.insert(\n        collection_name=\"dspy_example\",\n        data=[\n            {\n                \"id\": idx,\n                \"embedding\": openai_embedding_function(passage)[0],\n                \"text\": passage,\n            }\n        ],\n    )\n","from dspy.retrieve.milvus_rm import MilvusRM\nimport dspy\n\nretriever_model = MilvusRM(\n    collection_name=\"dspy_example\",\n    uri=MILVUS_URI,\n    token=MILVUS_TOKEN,  # ignore this if no token is required for Milvus connection\n    embedding_function=openai_embedding_function,\n)\nturbo = dspy.OpenAI(model=\"gpt-3.5-turbo\")\ndspy.settings.configure(lm=turbo)\n","class GenerateAnswer(dspy.Signature):\n    \"\"\"Answer questions with short factoid answers.\"\"\"\n\n    context = dspy.InputField(desc=\"may contain relevant facts\")\n    question = dspy.InputField()\n    answer = dspy.OutputField(desc=\"often between 1 and 5 words\")\n","class RAG(dspy.Module):\n    def __init__(self, rm):\n        super().__init__()\n        self.retrieve = rm\n\n        # This signature indicates the task imposed on the COT module.\n        self.generate_answer = dspy.ChainOfThought(GenerateAnswer)\n\n    def forward(self, question):\n        # Use milvus_rm to retrieve context for the question.\n        context = self.retrieve(question).passages\n        # COT module takes \"context, query\" and output \"answer\".\n        prediction = self.generate_answer(context=context, question=question)\n        return dspy.Prediction(\n            context=[item.long_text for item in context], answer=prediction.answer\n        )\n","rag = RAG(retriever_model)\nprint(rag(\"who write At My Window\").answer)\n","from dspy.evaluate.evaluate import Evaluate\nfrom dspy.datasets import HotPotQA\n\nevaluate_on_hotpotqa = Evaluate(\n    devset=devset, num_threads=1, display_progress=False, display_table=5\n)\n\nmetric = dspy.evaluate.answer_exact_match\nscore = evaluate_on_hotpotqa(rag, metric=metric)\nprint(\"rag:\", score)\n","from dspy.teleprompt import BootstrapFewShot\n\n# Validation logic: check that the predicted answer is correct.# Also check that the retrieved context does contain that answer.\n\n\ndef validate_context_and_answer(example, pred, trace=None):\n    answer_EM = dspy.evaluate.answer_exact_match(example, pred)\n    answer_PM = dspy.evaluate.answer_passage_match(example, pred)\n    return answer_EM and answer_PM\n\n\n# Set up a basic teleprompter, which will compile our RAG program.\nteleprompter = BootstrapFewShot(metric=validate_context_and_answer)\n\n# Compile!\ncompiled_rag = teleprompter.compile(rag, trainset=trainset)\n\n# Now compiled_rag is optimized and ready to answer your new question!\n# Now, let’s evaluate the compiled RAG program.\nscore = evaluate_on_hotpotqa(compiled_rag, metric=metric)\nprint(score)\nprint(\"compile_rag:\", score)\n"],"headingContent":"","anchorList":[{"label":"Integrieren Sie Milvus mit DSPy","href":"Integrate-Milvus-with-DSPy","type":1,"isActive":false},{"label":"Was ist DSPy?","href":"What-is-DSPy","type":2,"isActive":false},{"label":"Vorteile der Verwendung von DSPy","href":"Benefits-of-using-DSPy","type":2,"isActive":false},{"label":"Module","href":"Modules","type":2,"isActive":false},{"label":"Warum Milvus in DSPy","href":"Why-Milvus-in-DSPy","type":2,"isActive":false},{"label":"Beispiele","href":"Examples","type":2,"isActive":false},{"label":"Zusammenfassung","href":"Summary","type":2,"isActive":false}]}
        \ No newline at end of file
        +{"codeList":["$ pip install \"dspy-ai[milvus]\"\n$ pip install -U pymilvus\n","from dspy.datasets import HotPotQA\n\n# Load the dataset.\ndataset = HotPotQA(\n    train_seed=1, train_size=20, eval_seed=2023, dev_size=50, test_size=0\n)\n\n# Tell DSPy that the 'question' field is the input. Any other fields are labels and/or metadata.\ntrainset = [x.with_inputs(\"question\") for x in dataset.train]\ndevset = [x.with_inputs(\"question\") for x in dataset.dev]\n","import requests\nimport os\n\nos.environ[\"OPENAI_API_KEY\"] = \"\"\nMILVUS_URI = \"example.db\"\nMILVUS_TOKEN = \"\"\n\nfrom pymilvus import MilvusClient, DataType, Collection\nfrom dspy.retrieve.milvus_rm import openai_embedding_function\n\nclient = MilvusClient(uri=MILVUS_URI, token=MILVUS_TOKEN)\n\nif \"dspy_example\" not in client.list_collections():\n    client.create_collection(\n        collection_name=\"dspy_example\",\n        overwrite=True,\n        dimension=1536,\n        primary_field_name=\"id\",\n        vector_field_name=\"embedding\",\n        id_type=\"int\",\n        metric_type=\"IP\",\n        max_length=65535,\n        enable_dynamic=True,\n    )\ntext = requests.get(\n    \"https://raw.githubusercontent.com/wxywb/dspy_dataset_sample/master/sample_data.txt\"\n).text\n\nfor idx, passage in enumerate(text.split(\"\\n\")):\n    if len(passage) == 0:\n        continue\n    client.insert(\n        collection_name=\"dspy_example\",\n        data=[\n            {\n                \"id\": idx,\n                \"embedding\": openai_embedding_function(passage)[0],\n                \"text\": passage,\n            }\n        ],\n    )\n","from dspy.retrieve.milvus_rm import MilvusRM\nimport dspy\n\nretriever_model = MilvusRM(\n    collection_name=\"dspy_example\",\n    uri=MILVUS_URI,\n    token=MILVUS_TOKEN,  # ignore this if no token is required for Milvus connection\n    embedding_function=openai_embedding_function,\n)\nturbo = dspy.OpenAI(model=\"gpt-3.5-turbo\")\ndspy.settings.configure(lm=turbo)\n","class GenerateAnswer(dspy.Signature):\n    \"\"\"Answer questions with short factoid answers.\"\"\"\n\n    context = dspy.InputField(desc=\"may contain relevant facts\")\n    question = dspy.InputField()\n    answer = dspy.OutputField(desc=\"often between 1 and 5 words\")\n","class RAG(dspy.Module):\n    def __init__(self, rm):\n        super().__init__()\n        self.retrieve = rm\n\n        # This signature indicates the task imposed on the COT module.\n        self.generate_answer = dspy.ChainOfThought(GenerateAnswer)\n\n    def forward(self, question):\n        # Use milvus_rm to retrieve context for the question.\n        context = self.retrieve(question).passages\n        # COT module takes \"context, query\" and output \"answer\".\n        prediction = self.generate_answer(context=context, question=question)\n        return dspy.Prediction(\n            context=[item.long_text for item in context], answer=prediction.answer\n        )\n","rag = RAG(retriever_model)\nprint(rag(\"who write At My Window\").answer)\n","from dspy.evaluate.evaluate import Evaluate\nfrom dspy.datasets import HotPotQA\n\nevaluate_on_hotpotqa = Evaluate(\n    devset=devset, num_threads=1, display_progress=False, display_table=5\n)\n\nmetric = dspy.evaluate.answer_exact_match\nscore = evaluate_on_hotpotqa(rag, metric=metric)\nprint(\"rag:\", score)\n","from dspy.teleprompt import BootstrapFewShot\n\n# Validation logic: check that the predicted answer is correct.# Also check that the retrieved context does contain that answer.\n\n\ndef validate_context_and_answer(example, pred, trace=None):\n    answer_EM = dspy.evaluate.answer_exact_match(example, pred)\n    answer_PM = dspy.evaluate.answer_passage_match(example, pred)\n    return answer_EM and answer_PM\n\n\n# Set up a basic teleprompter, which will compile our RAG program.\nteleprompter = BootstrapFewShot(metric=validate_context_and_answer)\n\n# Compile!\ncompiled_rag = teleprompter.compile(rag, trainset=trainset)\n\n# Now compiled_rag is optimized and ready to answer your new question!\n# Now, let’s evaluate the compiled RAG program.\nscore = evaluate_on_hotpotqa(compiled_rag, metric=metric)\nprint(score)\nprint(\"compile_rag:\", score)\n"],"headingContent":"Integrate Milvus with DSPy","anchorList":[{"label":"Integrieren Sie Milvus mit DSPy","href":"Integrate-Milvus-with-DSPy","type":1,"isActive":false},{"label":"Was ist DSPy?","href":"What-is-DSPy","type":2,"isActive":false},{"label":"Vorteile der Verwendung von DSPy","href":"Benefits-of-using-DSPy","type":2,"isActive":false},{"label":"Module","href":"Modules","type":2,"isActive":false},{"label":"Warum Milvus in DSPy","href":"Why-Milvus-in-DSPy","type":2,"isActive":false},{"label":"Beispiele","href":"Examples","type":2,"isActive":false},{"label":"Zusammenfassung","href":"Summary","type":2,"isActive":false}]}
        \ No newline at end of file
        diff --git a/localization/v2.4.x/site/de/integrations/integrate_with_dspy.md b/localization/v2.4.x/site/de/integrations/integrate_with_dspy.md
        index d85d56515..7eaef7449 100644
        --- a/localization/v2.4.x/site/de/integrations/integrate_with_dspy.md
        +++ b/localization/v2.4.x/site/de/integrations/integrate_with_dspy.md
        @@ -20,7 +20,8 @@ title: Integrieren Sie Milvus mit DSPy
                   d="M4 9h1v1H4c-1.5 0-3-1.69-3-3.5S2.55 3 4 3h4c1.45 0 3 1.69 3 3.5 0 1.41-.91 2.72-2 3.25V8.59c.58-.45 1-1.27 1-2.09C10 5.22 8.98 4 8 4H4c-.98 0-2 1.22-2 2.5S3 9 4 9zm9-3h-1v1h1c1 0 2 1.22 2 2.5S13.98 12 13 12H9c-.98 0-2-1.22-2-2.5 0-.83.42-1.64 1-2.09V6.25c-1.09.53-2 1.84-2 3.25C6 11.31 7.55 13 9 13h4c1.45 0 3-1.69 3-3.5S14.5 6 13 6z"
                 >
               
        -    

        Open In Colab

        +

        Open In Colab +GitHub Repository

        Was ist DSPy?

          -
        • Programmieransatz: DSPy bietet einen systematischen Programmieransatz für die Entwicklung von LM-Pipelines, indem Pipelines als Texttransformationsgraphen abstrahiert werden, anstatt nur die LLMs zu instruieren. Seine deklarativen Module ermöglichen einen strukturierten Entwurf und eine Optimierung, die die Trial-and-Error-Methode der traditionellen Prompt-Templates ersetzt.
        • +
        • Programmieransatz: DSPy bietet einen systematischen Programmieransatz für die Entwicklung von LM-Pipelines, indem es die Pipelines als Texttransformationsgraphen abstrahiert, anstatt nur die LLMs zu instruieren. Die deklarativen Module ermöglichen ein strukturiertes Design und eine Optimierung, die die Trial-and-Error-Methode der traditionellen Prompt-Templates ersetzt.
        • Leistungsverbesserung: DSPy demonstriert signifikante Leistungssteigerungen gegenüber bestehenden Methoden. In Fallstudien übertrifft es Standard-Prompting und von Experten erstellte Demonstrationen und zeigt seine Vielseitigkeit und Effektivität, selbst wenn es zu kleineren LM-Modellen kompiliert wird.
        • Modularisierte Abstraktion: DSPy abstrahiert effektiv die komplizierten Aspekte der LM-Pipeline-Entwicklung, wie z.B. die Dekomposition, die Feinabstimmung und die Modellauswahl. Mit DSPy kann ein prägnantes Programm nahtlos in Anweisungen für verschiedene Modelle, wie GPT-4, Llama2-13b oder T5-Basis, übersetzt werden, was die Entwicklung rationalisiert und die Leistung erhöht.
        @@ -77,7 +78,7 @@ title: Integrieren Sie Milvus mit DSPy DSPy Modules DSPy-Module

        -

        Unterschrift: Signaturen in DSPy dienen als deklarative Spezifikationen, die das Eingabe-/Ausgabeverhalten von Modulen umreißen und das Sprachmodell bei der Ausführung von Aufgaben leiten. Modul: DSPy-Module dienen als grundlegende Komponenten für Programme, die Sprachmodelle (LMs) nutzen. Sie abstrahieren verschiedene Prompting-Techniken, wie z.B. Chain of Thought oder ReAct, und sind anpassbar, um jede DSPy-Signatur zu behandeln. Mit lernfähigen Parametern und der Fähigkeit, Eingaben zu verarbeiten und Ausgaben zu erzeugen, können diese Module zu größeren Programmen kombiniert werden, wobei sie sich an den NN-Modulen in PyTorch orientieren, aber auf LM-Anwendungen zugeschnitten sind. Optimierer: Optimierer in DSPy nehmen eine Feinabstimmung der Parameter von DSPy-Programmen vor, wie z. B. Prompts und LLM-Gewichte, um bestimmte Metriken wie die Genauigkeit zu maximieren und die Effizienz des Programms zu verbessern.

        +

        Unterschrift: Signaturen in DSPy dienen als deklarative Spezifikationen, die das Eingabe-/Ausgabeverhalten von Modulen umreißen und das Sprachmodell bei der Ausführung von Aufgaben leiten. Modul: DSPy-Module dienen als grundlegende Komponenten für Programme, die Sprachmodelle (LMs) nutzen. Sie abstrahieren verschiedene Prompting-Techniken, wie z.B. Chain of Thought oder ReAct, und sind anpassbar, um jede DSPy-Signatur zu behandeln. Mit lernfähigen Parametern und der Fähigkeit, Eingaben zu verarbeiten und Ausgaben zu erzeugen, können diese Module zu größeren Programmen kombiniert werden, wobei sie sich an den NN-Modulen in PyTorch orientieren, aber auf LM-Anwendungen zugeschnitten sind. Optimierer: Optimierer in DSPy nehmen eine Feinabstimmung der Parameter von DSPy-Programmen vor, wie z. B. Prompts und LLM-Gewichte, um bestimmte Metriken wie die Genauigkeit zu maximieren und die Programmeffizienz zu verbessern.

        Warum Milvus in DSPy

        -

        Der Ragas-Score ist von seinem vorherigen Wert von 50,0 auf 52,0 gestiegen, was auf eine Verbesserung der Antwortqualität hinweist.

        +

        Der Ragas-Score hat sich von seinem vorherigen Wert von 50,0 auf 52,0 erhöht, was auf eine Verbesserung der Antwortqualität hinweist.

        Zusammenfassung

        DSPy stellt durch seine programmierbare Schnittstelle, die eine algorithmische und automatische Optimierung von Modellaufforderungen und -gewichtungen ermöglicht, einen Sprung in der Interaktion mit Sprachmodellen dar. Durch den Einsatz von DSPy für die RAG-Implementierung wird die Anpassung an unterschiedliche Sprachmodelle oder Datensätze zum Kinderspiel, was den Bedarf an mühsamen manuellen Eingriffen drastisch reduziert.

        +

        DSPy stellt durch seine programmierbare Schnittstelle, die eine algorithmische und automatisierte Optimierung von Modellaufforderungen und Gewichtungen ermöglicht, einen Sprung in der Interaktion mit Sprachmodellen dar. Durch den Einsatz von DSPy für die RAG-Implementierung wird die Anpassung an unterschiedliche Sprachmodelle oder Datensätze zum Kinderspiel, was den Bedarf an mühsamen manuellen Eingriffen drastisch reduziert.

        diff --git a/localization/v2.4.x/site/de/integrations/integrate_with_haystack.json b/localization/v2.4.x/site/de/integrations/integrate_with_haystack.json index c3d4095a1..8d8213544 100644 --- a/localization/v2.4.x/site/de/integrations/integrate_with_haystack.json +++ b/localization/v2.4.x/site/de/integrations/integrate_with_haystack.json @@ -1 +1 @@ -{"codeList":["! pip install --upgrade --quiet pymilvus milvus-haystack markdown-it-py mdit_plain\n","import os\n\nos.environ[\"OPENAI_API_KEY\"] = \"sk-***********\"\n","import os\nimport urllib.request\n\nurl = \"https://www.gutenberg.org/cache/epub/7785/pg7785.txt\"\nfile_path = \"./davinci.txt\"\n\nif not os.path.exists(file_path):\n urllib.request.urlretrieve(url, file_path)\n","from haystack import Pipeline\nfrom haystack.components.converters import MarkdownToDocument\nfrom haystack.components.embedders import OpenAIDocumentEmbedder, OpenAITextEmbedder\nfrom haystack.components.preprocessors import DocumentSplitter\nfrom haystack.components.writers import DocumentWriter\nfrom haystack.utils import Secret\n\nfrom milvus_haystack import MilvusDocumentStore\nfrom milvus_haystack.milvus_embedding_retriever import MilvusEmbeddingRetriever\n\n\ndocument_store = MilvusDocumentStore(\n connection_args={\"uri\": \"./milvus.db\"},\n # connection_args={\"uri\": \"http://localhost:19530\"},\n # connection_args={\"uri\": YOUR_ZILLIZ_CLOUD_URI, \"token\": Secret.from_env_var(\"ZILLIZ_CLOUD_API_KEY\")},\n drop_old=True,\n)\n","indexing_pipeline = Pipeline()\nindexing_pipeline.add_component(\"converter\", MarkdownToDocument())\nindexing_pipeline.add_component(\n \"splitter\", DocumentSplitter(split_by=\"sentence\", split_length=2)\n)\nindexing_pipeline.add_component(\"embedder\", OpenAIDocumentEmbedder())\nindexing_pipeline.add_component(\"writer\", DocumentWriter(document_store))\nindexing_pipeline.connect(\"converter\", \"splitter\")\nindexing_pipeline.connect(\"splitter\", \"embedder\")\nindexing_pipeline.connect(\"embedder\", \"writer\")\nindexing_pipeline.run({\"converter\": {\"sources\": [file_path]}})\n\nprint(\"Number of documents:\", document_store.count_documents())\n","question = 'Where is the painting \"Warrior\" currently stored?'\n\nretrieval_pipeline = Pipeline()\nretrieval_pipeline.add_component(\"embedder\", OpenAITextEmbedder())\nretrieval_pipeline.add_component(\n \"retriever\", MilvusEmbeddingRetriever(document_store=document_store, top_k=3)\n)\nretrieval_pipeline.connect(\"embedder\", \"retriever\")\n\nretrieval_results = retrieval_pipeline.run({\"embedder\": {\"text\": question}})\n\nfor doc in retrieval_results[\"retriever\"][\"documents\"]:\n print(doc.content)\n print(\"-\" * 10)\n","from haystack.utils import Secret\nfrom haystack.components.builders import PromptBuilder\nfrom haystack.components.generators import OpenAIGenerator\n\nprompt_template = \"\"\"Answer the following query based on the provided context. If the context does\n not include an answer, reply with 'I don't know'.\\n\n Query: {{query}}\n Documents:\n {% for doc in documents %}\n {{ doc.content }}\n {% endfor %}\n Answer:\n \"\"\"\n\nrag_pipeline = Pipeline()\nrag_pipeline.add_component(\"text_embedder\", OpenAITextEmbedder())\nrag_pipeline.add_component(\n \"retriever\", MilvusEmbeddingRetriever(document_store=document_store, top_k=3)\n)\nrag_pipeline.add_component(\"prompt_builder\", PromptBuilder(template=prompt_template))\nrag_pipeline.add_component(\n \"generator\",\n OpenAIGenerator(\n api_key=Secret.from_token(os.getenv(\"OPENAI_API_KEY\")),\n generation_kwargs={\"temperature\": 0},\n ),\n)\nrag_pipeline.connect(\"text_embedder.embedding\", \"retriever.query_embedding\")\nrag_pipeline.connect(\"retriever.documents\", \"prompt_builder.documents\")\nrag_pipeline.connect(\"prompt_builder\", \"generator\")\n\nresults = rag_pipeline.run(\n {\n \"text_embedder\": {\"text\": question},\n \"prompt_builder\": {\"query\": question},\n }\n)\nprint(\"RAG answer:\", results[\"generator\"][\"replies\"][0])\n"],"headingContent":"","anchorList":[{"label":"Retrieval-erweiterte Generierung (RAG) mit Milvus und Haystack","href":"Retrieval-Augmented-Generation-RAG-with-Milvus-and-Haystack","type":1,"isActive":false},{"label":"Voraussetzungen","href":"Prerequisites","type":2,"isActive":false},{"label":"Bereiten Sie die Daten vor","href":"Prepare-the-data","type":2,"isActive":false},{"label":"Erstellen Sie die Indizierungspipeline","href":"Create-the-indexing-Pipeline","type":2,"isActive":false},{"label":"Erstellen Sie die Abruf-Pipeline","href":"Create-the-retrieval-pipeline","type":2,"isActive":false},{"label":"Erstellen Sie die RAG-Pipeline","href":"Create-the-RAG-pipeline","type":2,"isActive":false}]} \ No newline at end of file +{"codeList":["! pip install --upgrade --quiet pymilvus milvus-haystack markdown-it-py mdit_plain\n","import os\n\nos.environ[\"OPENAI_API_KEY\"] = \"sk-***********\"\n","import os\nimport urllib.request\n\nurl = \"https://www.gutenberg.org/cache/epub/7785/pg7785.txt\"\nfile_path = \"./davinci.txt\"\n\nif not os.path.exists(file_path):\n urllib.request.urlretrieve(url, file_path)\n","from haystack import Pipeline\nfrom haystack.components.converters import MarkdownToDocument\nfrom haystack.components.embedders import OpenAIDocumentEmbedder, OpenAITextEmbedder\nfrom haystack.components.preprocessors import DocumentSplitter\nfrom haystack.components.writers import DocumentWriter\nfrom haystack.utils import Secret\n\nfrom milvus_haystack import MilvusDocumentStore\nfrom milvus_haystack.milvus_embedding_retriever import MilvusEmbeddingRetriever\n\n\ndocument_store = MilvusDocumentStore(\n connection_args={\"uri\": \"./milvus.db\"},\n # connection_args={\"uri\": \"http://localhost:19530\"},\n # connection_args={\"uri\": YOUR_ZILLIZ_CLOUD_URI, \"token\": Secret.from_env_var(\"ZILLIZ_CLOUD_API_KEY\")},\n drop_old=True,\n)\n","indexing_pipeline = Pipeline()\nindexing_pipeline.add_component(\"converter\", MarkdownToDocument())\nindexing_pipeline.add_component(\n \"splitter\", DocumentSplitter(split_by=\"sentence\", split_length=2)\n)\nindexing_pipeline.add_component(\"embedder\", OpenAIDocumentEmbedder())\nindexing_pipeline.add_component(\"writer\", DocumentWriter(document_store))\nindexing_pipeline.connect(\"converter\", \"splitter\")\nindexing_pipeline.connect(\"splitter\", \"embedder\")\nindexing_pipeline.connect(\"embedder\", \"writer\")\nindexing_pipeline.run({\"converter\": {\"sources\": [file_path]}})\n\nprint(\"Number of documents:\", document_store.count_documents())\n","question = 'Where is the painting \"Warrior\" currently stored?'\n\nretrieval_pipeline = Pipeline()\nretrieval_pipeline.add_component(\"embedder\", OpenAITextEmbedder())\nretrieval_pipeline.add_component(\n \"retriever\", MilvusEmbeddingRetriever(document_store=document_store, top_k=3)\n)\nretrieval_pipeline.connect(\"embedder\", \"retriever\")\n\nretrieval_results = retrieval_pipeline.run({\"embedder\": {\"text\": question}})\n\nfor doc in retrieval_results[\"retriever\"][\"documents\"]:\n print(doc.content)\n print(\"-\" * 10)\n","from haystack.utils import Secret\nfrom haystack.components.builders import PromptBuilder\nfrom haystack.components.generators import OpenAIGenerator\n\nprompt_template = \"\"\"Answer the following query based on the provided context. If the context does\n not include an answer, reply with 'I don't know'.\\n\n Query: {{query}}\n Documents:\n {% for doc in documents %}\n {{ doc.content }}\n {% endfor %}\n Answer:\n \"\"\"\n\nrag_pipeline = Pipeline()\nrag_pipeline.add_component(\"text_embedder\", OpenAITextEmbedder())\nrag_pipeline.add_component(\n \"retriever\", MilvusEmbeddingRetriever(document_store=document_store, top_k=3)\n)\nrag_pipeline.add_component(\"prompt_builder\", PromptBuilder(template=prompt_template))\nrag_pipeline.add_component(\n \"generator\",\n OpenAIGenerator(\n api_key=Secret.from_token(os.getenv(\"OPENAI_API_KEY\")),\n generation_kwargs={\"temperature\": 0},\n ),\n)\nrag_pipeline.connect(\"text_embedder.embedding\", \"retriever.query_embedding\")\nrag_pipeline.connect(\"retriever.documents\", \"prompt_builder.documents\")\nrag_pipeline.connect(\"prompt_builder\", \"generator\")\n\nresults = rag_pipeline.run(\n {\n \"text_embedder\": {\"text\": question},\n \"prompt_builder\": {\"query\": question},\n }\n)\nprint(\"RAG answer:\", results[\"generator\"][\"replies\"][0])\n"],"headingContent":"Retrieval-Augmented Generation (RAG) with Milvus and Haystack","anchorList":[{"label":"Retrieval-erweiterte Generierung (RAG) mit Milvus und Haystack","href":"Retrieval-Augmented-Generation-RAG-with-Milvus-and-Haystack","type":1,"isActive":false},{"label":"Voraussetzungen","href":"Prerequisites","type":2,"isActive":false},{"label":"Bereiten Sie die Daten vor","href":"Prepare-the-data","type":2,"isActive":false},{"label":"Erstellen Sie die Indizierungspipeline","href":"Create-the-indexing-Pipeline","type":2,"isActive":false},{"label":"Erstellen Sie die Abruf-Pipeline","href":"Create-the-retrieval-pipeline","type":2,"isActive":false},{"label":"Erstellen Sie die RAG-Pipeline","href":"Create-the-RAG-pipeline","type":2,"isActive":false}]} \ No newline at end of file diff --git a/localization/v2.4.x/site/de/integrations/integrate_with_haystack.md b/localization/v2.4.x/site/de/integrations/integrate_with_haystack.md index 58093d0bb..d01c8af1f 100644 --- a/localization/v2.4.x/site/de/integrations/integrate_with_haystack.md +++ b/localization/v2.4.x/site/de/integrations/integrate_with_haystack.md @@ -3,7 +3,7 @@ id: integrate_with_haystack.md summary: >- Dieser Leitfaden zeigt, wie man mit Haystack und Milvus ein Retrieval-Augmented Generation (RAG) System aufbaut. -title: Retrieval-Augmented Generation (RAG) mit Milvus und Haystack +title: Retrieval-erweiterte Generierung (RAG) mit Milvus und Haystack ---

        Retrieval-erweiterte Generierung (RAG) mit Milvus und Haystack

        Open In Colab

        +

        Open In Colab +GitHub Repository

        Diese Anleitung zeigt, wie man ein Retrieval-Augmented Generation (RAG) System mit Haystack und Milvus aufbaut.

        Das RAG-System kombiniert ein Retrieval-System mit einem generativen Modell, um neuen Text auf der Grundlage einer vorgegebenen Aufforderung zu generieren. Das System sucht zunächst relevante Dokumente aus einem Korpus mit Milvus und verwendet dann ein generatives Modell, um neuen Text auf der Grundlage der gefundenen Dokumente zu generieren.

        Haystack ist das Open-Source-Python-Framework von deepset für die Entwicklung benutzerdefinierter Anwendungen mit großen Sprachmodellen (LLMs). Milvus ist die weltweit fortschrittlichste Open-Source-Vektordatenbank, die für die Einbettung von Ähnlichkeitssuche und KI-Anwendungen entwickelt wurde.

        diff --git a/localization/v2.4.x/site/de/integrations/integrate_with_hugging-face.json b/localization/v2.4.x/site/de/integrations/integrate_with_hugging-face.json index 0006bb9a4..8f83f6d7d 100644 --- a/localization/v2.4.x/site/de/integrations/integrate_with_hugging-face.json +++ b/localization/v2.4.x/site/de/integrations/integrate_with_hugging-face.json @@ -1 +1 @@ -{"codeList":["$ pip install --upgrade pymilvus transformers datasets torch\n","from datasets import load_dataset\n\n\nDATASET = \"squad\" # Name of dataset from HuggingFace Datasets\nINSERT_RATIO = 0.001 # Ratio of example dataset to be inserted\n\ndata = load_dataset(DATASET, split=\"validation\")\n# Generates a fixed subset. To generate a random subset, remove the seed.\ndata = data.train_test_split(test_size=INSERT_RATIO, seed=42)[\"test\"]\n# Clean up the data structure in the dataset.\ndata = data.map(\n lambda val: {\"answer\": val[\"answers\"][\"text\"][0]},\n remove_columns=[\"id\", \"answers\", \"context\"],\n)\n\n# View summary of example data\nprint(data)\n","from transformers import AutoTokenizer, AutoModel\nimport torch\n\nMODEL = (\n \"sentence-transformers/all-MiniLM-L6-v2\" # Name of model from HuggingFace Models\n)\nINFERENCE_BATCH_SIZE = 64 # Batch size of model inference\n\n# Load tokenizer & model from HuggingFace Hub\ntokenizer = AutoTokenizer.from_pretrained(MODEL)\nmodel = AutoModel.from_pretrained(MODEL)\n\n\ndef encode_text(batch):\n # Tokenize sentences\n encoded_input = tokenizer(\n batch[\"question\"], padding=True, truncation=True, return_tensors=\"pt\"\n )\n\n # Compute token embeddings\n with torch.no_grad():\n model_output = model(**encoded_input)\n\n # Perform pooling\n token_embeddings = model_output[0]\n attention_mask = encoded_input[\"attention_mask\"]\n input_mask_expanded = (\n attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float()\n )\n sentence_embeddings = torch.sum(\n token_embeddings * input_mask_expanded, 1\n ) / torch.clamp(input_mask_expanded.sum(1), min=1e-9)\n\n # Normalize embeddings\n batch[\"question_embedding\"] = torch.nn.functional.normalize(\n sentence_embeddings, p=2, dim=1\n )\n return batch\n\n\ndata = data.map(encode_text, batched=True, batch_size=INFERENCE_BATCH_SIZE)\ndata_list = data.to_list()\n","from pymilvus import MilvusClient\n\n\nMILVUS_URI = \"./huggingface_milvus_test.db\" # Connection URI\nCOLLECTION_NAME = \"huggingface_test\" # Collection name\nDIMENSION = 384 # Embedding dimension depending on model\n\nmilvus_client = MilvusClient(MILVUS_URI)\nif milvus_client.has_collection(collection_name=COLLECTION_NAME):\n milvus_client.drop_collection(collection_name=COLLECTION_NAME)\nmilvus_client.create_collection(\n collection_name=COLLECTION_NAME,\n dimension=DIMENSION,\n auto_id=True, # Enable auto id\n enable_dynamic_field=True, # Enable dynamic fields\n vector_field_name=\"question_embedding\", # Map vector field name and embedding column in dataset\n consistency_level=\"Strong\", # To enable search with latest data\n)\n","milvus_client.insert(collection_name=COLLECTION_NAME, data=data_list)\n","questions = {\n \"question\": [\n \"What is LGM?\",\n \"When did Massachusetts first mandate that children be educated in schools?\",\n ]\n}\n\n# Generate question embeddings\nquestion_embeddings = [v.tolist() for v in encode_text(questions)[\"question_embedding\"]]\n\n# Search across Milvus\nsearch_results = milvus_client.search(\n collection_name=COLLECTION_NAME,\n data=question_embeddings,\n limit=3, # How many search results to output\n output_fields=[\"answer\", \"question\"], # Include these fields in search results\n)\n\n# Print out results\nfor q, res in zip(questions[\"question\"], search_results):\n print(\"Question:\", q)\n for r in res:\n print(\n {\n \"answer\": r[\"entity\"][\"answer\"],\n \"score\": r[\"distance\"],\n \"original question\": r[\"entity\"][\"question\"],\n }\n )\n print(\"\\n\")\n"],"headingContent":"","anchorList":[{"label":"Fragebeantwortung mit Milvus und Hugging Face","href":"Question-Answering-Using-Milvus-and-Hugging-Face","type":1,"isActive":false},{"label":"Bevor Sie beginnen","href":"Before-you-begin","type":2,"isActive":false},{"label":"Daten vorbereiten","href":"Prepare-data","type":2,"isActive":false},{"label":"Daten einfügen","href":"Insert-data","type":2,"isActive":false},{"label":"Fragen stellen","href":"Ask-questions","type":2,"isActive":false}]} \ No newline at end of file +{"codeList":["$ pip install --upgrade pymilvus transformers datasets torch\n","from datasets import load_dataset\n\n\nDATASET = \"squad\" # Name of dataset from HuggingFace Datasets\nINSERT_RATIO = 0.001 # Ratio of example dataset to be inserted\n\ndata = load_dataset(DATASET, split=\"validation\")\n# Generates a fixed subset. To generate a random subset, remove the seed.\ndata = data.train_test_split(test_size=INSERT_RATIO, seed=42)[\"test\"]\n# Clean up the data structure in the dataset.\ndata = data.map(\n lambda val: {\"answer\": val[\"answers\"][\"text\"][0]},\n remove_columns=[\"id\", \"answers\", \"context\"],\n)\n\n# View summary of example data\nprint(data)\n","from transformers import AutoTokenizer, AutoModel\nimport torch\n\nMODEL = (\n \"sentence-transformers/all-MiniLM-L6-v2\" # Name of model from HuggingFace Models\n)\nINFERENCE_BATCH_SIZE = 64 # Batch size of model inference\n\n# Load tokenizer & model from HuggingFace Hub\ntokenizer = AutoTokenizer.from_pretrained(MODEL)\nmodel = AutoModel.from_pretrained(MODEL)\n\n\ndef encode_text(batch):\n # Tokenize sentences\n encoded_input = tokenizer(\n batch[\"question\"], padding=True, truncation=True, return_tensors=\"pt\"\n )\n\n # Compute token embeddings\n with torch.no_grad():\n model_output = model(**encoded_input)\n\n # Perform pooling\n token_embeddings = model_output[0]\n attention_mask = encoded_input[\"attention_mask\"]\n input_mask_expanded = (\n attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float()\n )\n sentence_embeddings = torch.sum(\n token_embeddings * input_mask_expanded, 1\n ) / torch.clamp(input_mask_expanded.sum(1), min=1e-9)\n\n # Normalize embeddings\n batch[\"question_embedding\"] = torch.nn.functional.normalize(\n sentence_embeddings, p=2, dim=1\n )\n return batch\n\n\ndata = data.map(encode_text, batched=True, batch_size=INFERENCE_BATCH_SIZE)\ndata_list = data.to_list()\n","from pymilvus import MilvusClient\n\n\nMILVUS_URI = \"./huggingface_milvus_test.db\" # Connection URI\nCOLLECTION_NAME = \"huggingface_test\" # Collection name\nDIMENSION = 384 # Embedding dimension depending on model\n\nmilvus_client = MilvusClient(MILVUS_URI)\nif milvus_client.has_collection(collection_name=COLLECTION_NAME):\n milvus_client.drop_collection(collection_name=COLLECTION_NAME)\nmilvus_client.create_collection(\n collection_name=COLLECTION_NAME,\n dimension=DIMENSION,\n auto_id=True, # Enable auto id\n enable_dynamic_field=True, # Enable dynamic fields\n vector_field_name=\"question_embedding\", # Map vector field name and embedding column in dataset\n consistency_level=\"Strong\", # To enable search with latest data\n)\n","milvus_client.insert(collection_name=COLLECTION_NAME, data=data_list)\n","questions = {\n \"question\": [\n \"What is LGM?\",\n \"When did Massachusetts first mandate that children be educated in schools?\",\n ]\n}\n\n# Generate question embeddings\nquestion_embeddings = [v.tolist() for v in encode_text(questions)[\"question_embedding\"]]\n\n# Search across Milvus\nsearch_results = milvus_client.search(\n collection_name=COLLECTION_NAME,\n data=question_embeddings,\n limit=3, # How many search results to output\n output_fields=[\"answer\", \"question\"], # Include these fields in search results\n)\n\n# Print out results\nfor q, res in zip(questions[\"question\"], search_results):\n print(\"Question:\", q)\n for r in res:\n print(\n {\n \"answer\": r[\"entity\"][\"answer\"],\n \"score\": r[\"distance\"],\n \"original question\": r[\"entity\"][\"question\"],\n }\n )\n print(\"\\n\")\n"],"headingContent":"Question Answering Using Milvus and Hugging Face","anchorList":[{"label":"Fragebeantwortung mit Milvus und Hugging Face","href":"Question-Answering-Using-Milvus-and-Hugging-Face","type":1,"isActive":false},{"label":"Bevor Sie beginnen","href":"Before-you-begin","type":2,"isActive":false},{"label":"Daten vorbereiten","href":"Prepare-data","type":2,"isActive":false},{"label":"Daten einfügen","href":"Insert-data","type":2,"isActive":false},{"label":"Fragen stellen","href":"Ask-questions","type":2,"isActive":false}]} \ No newline at end of file diff --git a/localization/v2.4.x/site/de/integrations/integrate_with_hugging-face.md b/localization/v2.4.x/site/de/integrations/integrate_with_hugging-face.md index c865198c1..ceabd691a 100644 --- a/localization/v2.4.x/site/de/integrations/integrate_with_hugging-face.md +++ b/localization/v2.4.x/site/de/integrations/integrate_with_hugging-face.md @@ -5,7 +5,7 @@ summary: >- das Hugging Face als Datenlader und Einbettungsgenerator für die Datenverarbeitung und Milvus als Vektordatenbank für die semantische Suche verwendet. -title: Beantwortung von Fragen mit Milvus und Hugging Face +title: Fragebeantwortung mit Milvus und Hugging Face ---

        Fragebeantwortung mit Milvus und Hugging Face

        Open In Colab

        +

        Open In Colab +GitHub Repository

        Ein System zur Beantwortung von Fragen, das auf einer semantischen Suche basiert, findet die ähnlichste Frage aus einem Datensatz von Frage-Antwort-Paaren für eine gegebene Anfrage. Sobald die ähnlichste Frage identifiziert ist, wird die entsprechende Antwort aus dem Datensatz als Antwort auf die Frage betrachtet. Dieser Ansatz stützt sich auf semantische Ähnlichkeitsmaße, um die Ähnlichkeit zwischen Fragen zu bestimmen und relevante Antworten zu finden.

        Dieses Tutorial zeigt, wie man ein System zur Beantwortung von Fragen erstellt, das Hugging Face als Datenlader und Einbettungsgenerator für die Datenverarbeitung und Milvus als Vektordatenbank für die semantische Suche verwendet.

        Bevor Sie beginnen

        Open In Colab

        +

        Open In Colab +GitHub Repository

        Dieser Leitfaden zeigt, wie man Jina AI Einbettungen und Milvus verwendet, um Ähnlichkeitssuche und Retrievalaufgaben durchzuführen.

        Wer ist Jina AI?

        Jina AI wurde 2020 in Berlin gegründet und ist ein bahnbrechendes KI-Unternehmen, das sich darauf konzentriert, die Zukunft der künstlichen Intelligenz durch seine Suchgrundlage zu revolutionieren. Jina AI ist auf multimodale KI spezialisiert und hat sich zum Ziel gesetzt, Unternehmen und Entwickler in die Lage zu versetzen, die Leistungsfähigkeit multimodaler Daten zur Wertschöpfung und Kosteneinsparung zu nutzen, und zwar durch eine integrierte Suite von Komponenten, einschließlich Embeddings, Rerankers, Prompt Ops und Kerninfrastruktur. Die hochmodernen Embeddings von Jina AI zeichnen sich durch eine erstklassige Leistung aus und verfügen über ein Modell mit einer Länge von 8192 Token, das ideal für eine umfassende Datendarstellung ist. Durch die Unterstützung mehrerer Sprachen und die nahtlose Integration mit führenden Plattformen wie OpenAI erleichtern diese Einbettungen sprachübergreifende Anwendungen.

        +

        Jina AI wurde 2020 in Berlin gegründet und ist ein bahnbrechendes KI-Unternehmen, das sich darauf konzentriert, die Zukunft der künstlichen Intelligenz durch seine Suchgrundlage zu revolutionieren. Jina AI ist auf multimodale KI spezialisiert und hat sich zum Ziel gesetzt, Unternehmen und Entwickler in die Lage zu versetzen, die Leistungsfähigkeit multimodaler Daten zur Wertschöpfung und Kosteneinsparung zu nutzen, und zwar durch eine integrierte Suite von Komponenten, einschließlich Embeddings, Rerankers, Prompt Ops und Kerninfrastruktur. Die innovativen Embeddings von Jina AI zeichnen sich durch eine erstklassige Leistung aus und verfügen über ein Modell mit einer Länge von 8192 Token, das ideal für eine umfassende Datendarstellung ist. Durch die Unterstützung mehrerer Sprachen und die nahtlose Integration mit führenden Plattformen wie OpenAI erleichtern diese Einbettungen sprachübergreifende Anwendungen.

        Die Einbettung von Milvus und Jina AI

        Um diese Einbettungen effizient zu speichern und zu durchsuchen, ist eine spezielle, für diesen Zweck entwickelte Infrastruktur erforderlich. Milvus ist eine weithin bekannte, fortschrittliche Open-Source-Vektordatenbank, die in der Lage ist, große Vektordatenmengen zu verarbeiten. Milvus ermöglicht eine schnelle und genaue Vektorsuche (Einbettungen) nach zahlreichen Metriken. Seine Skalierbarkeit ermöglicht den nahtlosen Umgang mit riesigen Mengen an Bilddaten und gewährleistet hochleistungsfähige Suchvorgänge auch bei wachsenden Datensätzen.

        +

        Um diese Einbettungen effizient zu speichern und zu durchsuchen, ist eine spezielle, für diesen Zweck entwickelte Infrastruktur erforderlich. Milvus ist eine weithin bekannte, fortschrittliche Open-Source-Vektordatenbank, die in der Lage ist, große Vektordatenmengen zu verarbeiten. Milvus ermöglicht eine schnelle und genaue Vektorsuche (Einbettungen) nach einer Vielzahl von Metriken. Seine Skalierbarkeit ermöglicht den nahtlosen Umgang mit riesigen Mengen an Bilddaten und gewährleistet hochleistungsfähige Suchvorgänge auch bei wachsenden Datensätzen.

        Beispiele

        Das zentrale Einbettungsmodell von Jina AI zeichnet sich durch ein detailliertes Textverständnis aus und ist daher ideal für die semantische Suche, die Klassifizierung von Inhalten, die erweiterte Stimmungsanalyse, die Textzusammenfassung und personalisierte Empfehlungssysteme.

        -
        from pymilvus.model.dense import JinaEmbeddingFunction
        +    

        Das zentrale Einbettungsmodell von Jina AI zeichnet sich durch ein detailliertes Textverständnis aus und eignet sich daher ideal für die semantische Suche, die Klassifizierung von Inhalten, die erweiterte Stimmungsanalyse, die Zusammenfassung von Texten und personalisierte Empfehlungssysteme.

        +
        from pymilvus.model.dense import JinaEmbeddingFunction
         
         jina_api_key = "<YOUR_JINA_API_KEY>"
        -ef = JinaEmbeddingFunction("jina-embeddings-v2-base-en", jina_api_key)
        +ef = JinaEmbeddingFunction(
        +    "jina-embeddings-v3", 
        +    jina_api_key,
        +    task="retrieval.passage",
        +    dimensions=1024
        +)
         
         query = "what is information retrieval?"
         doc = "Information retrieval is the process of finding relevant information from a large collection of data or documents."
         
        -qvecs = ef.encode_queries([query])
        -dvecs = ef.encode_documents([doc])
        +qvecs = ef.encode_queries([query])  # This method uses `retrieval.query` as the task
        +dvecs = ef.encode_documents([doc])  # This method uses `retrieval.passage` as the task
         

        Zweisprachige Einbettungen

        Mit der leistungsstarken Vektoreinbettungsfunktion können wir die Einbettungen, die durch die Verwendung von Jina AI-Modellen gewonnen wurden, mit der Vektordatenbank Milvus Lite kombinieren, um eine semantische Suche durchzuführen.

        +

        Mit der leistungsstarken Vektoreinbettungsfunktion können wir die durch die Verwendung von Jina AI-Modellen gewonnenen Einbettungen mit der Vektordatenbank Milvus Lite kombinieren, um eine semantische Suche durchzuführen.

        from pymilvus.model.dense import JinaEmbeddingFunction
         from pymilvus import MilvusClient
         
         jina_api_key = "<YOUR_JINA_API_KEY>"
        -ef = JinaEmbeddingFunction("jina-embeddings-v2-base-en", jina_api_key)
        -DIMENSION = 768  # size of jina-embeddings-v2-base-en
        +DIMENSION = 1024  # `jina-embeddings-v3` supports flexible embedding sizes (32, 64, 128, 256, 512, 768, 1024), allowing for truncating embeddings to fit your application. 
        +ef = JinaEmbeddingFunction(
        +    "jina-embeddings-v3", 
        +    jina_api_key,
        +    task="retrieval.passage",
        +    dimensions=DIMENSION,
        +)
        +
         
         doc = [
             "In 1950, Alan Turing published his seminal paper, 'Computing Machinery and Intelligence,' proposing the Turing Test as a criterion of intelligence, a foundational concept in the philosophy and development of artificial intelligence.",
        @@ -215,7 +227,7 @@ doc = [
             "The invention of the Logic Theorist by Allen Newell, Herbert A. Simon, and Cliff Shaw in 1955 marked the creation of the first true AI program, which was capable of solving logic problems, akin to proving mathematical theorems.",
         ]
         
        -dvecs = ef.encode_documents(doc)
        +dvecs = ef.encode_documents(doc) # This method uses `retrieval.passage` as the task
         
         data = [
             {"id": i, "vector": dvecs[i], "text": doc[i], "subject": "history"}
        @@ -242,7 +254,7 @@ res = milvus_client.insert(collection_name=COLLECTION_NAME, data=data)
         

      Mit allen Daten in der Milvus-Vektordatenbank können wir nun eine semantische Suche durchführen, indem wir eine Vektoreinbettung für die Anfrage erzeugen und eine Vektorsuche durchführen.

      queries = "What event in 1956 marked the official birth of artificial intelligence as a discipline?"
      -qvecs = ef.encode_queries([queries])
      +qvecs = ef.encode_queries([queries]) # This method uses `retrieval.query` as the task
       
       res = milvus_client.search(
           collection_name=COLLECTION_NAME,  # target collection
      diff --git a/localization/v2.4.x/site/de/integrations/integrate_with_llamaindex.json b/localization/v2.4.x/site/de/integrations/integrate_with_llamaindex.json
      index 4760c7286..7bfd0617d 100644
      --- a/localization/v2.4.x/site/de/integrations/integrate_with_llamaindex.json
      +++ b/localization/v2.4.x/site/de/integrations/integrate_with_llamaindex.json
      @@ -1 +1 @@
      -{"codeList":["$ pip install pymilvus>=2.4.2\n","$ pip install llama-index-vector-stores-milvus\n","$ pip install llama-index\n","import openai\n\nopenai.api_key = \"sk-***********\"\n","! mkdir -p 'data/'\n! wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham_essay.txt'\n! wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/docs/examples/data/10k/uber_2021.pdf' -O 'data/uber_2021.pdf'\n","from llama_index.core import SimpleDirectoryReader\n\n# load documents\ndocuments = SimpleDirectoryReader(\n    input_files=[\"./data/paul_graham_essay.txt\"]\n).load_data()\n\nprint(\"Document ID:\", documents[0].doc_id)\n","# Create an index over the documents\nfrom llama_index.core import VectorStoreIndex, StorageContext\nfrom llama_index.vector_stores.milvus import MilvusVectorStore\n\n\nvector_store = MilvusVectorStore(uri=\"./milvus_demo.db\", dim=1536, overwrite=True)\nstorage_context = StorageContext.from_defaults(vector_store=vector_store)\nindex = VectorStoreIndex.from_documents(documents, storage_context=storage_context)\n","query_engine = index.as_query_engine()\nres = query_engine.query(\"What did the author learn?\")\nprint(res)\n","res = query_engine.query(\"What challenges did the disease pose for the author?\")\nprint(res)\n","from llama_index.core import Document\n\n\nvector_store = MilvusVectorStore(uri=\"./milvus_demo.db\", dim=1536, overwrite=True)\nstorage_context = StorageContext.from_defaults(vector_store=vector_store)\nindex = VectorStoreIndex.from_documents(\n    [Document(text=\"The number that is being searched for is ten.\")],\n    storage_context,\n)\nquery_engine = index.as_query_engine()\nres = query_engine.query(\"Who is the author?\")\nprint(res)\n","del index, vector_store, storage_context, query_engine\n\nvector_store = MilvusVectorStore(uri=\"./milvus_demo.db\", overwrite=False)\nstorage_context = StorageContext.from_defaults(vector_store=vector_store)\nindex = VectorStoreIndex.from_documents(documents, storage_context=storage_context)\nquery_engine = index.as_query_engine()\nres = query_engine.query(\"What is the number?\")\nprint(res)\n","res = query_engine.query(\"Who is the author?\")\nprint(res)\n","from llama_index.core.vector_stores import ExactMatchFilter, MetadataFilters\n\n# Load all the two documents loaded before\ndocuments_all = SimpleDirectoryReader(\"./data/\").load_data()\n\nvector_store = MilvusVectorStore(uri=\"./milvus_demo.db\", dim=1536, overwrite=True)\nstorage_context = StorageContext.from_defaults(vector_store=vector_store)\nindex = VectorStoreIndex.from_documents(documents_all, storage_context)\n","filters = MetadataFilters(\n    filters=[ExactMatchFilter(key=\"file_name\", value=\"uber_2021.pdf\")]\n)\nquery_engine = index.as_query_engine(filters=filters)\nres = query_engine.query(\"What challenges did the disease pose for the author?\")\n\nprint(res)\n","filters = MetadataFilters(\n    filters=[ExactMatchFilter(key=\"file_name\", value=\"paul_graham_essay.txt\")]\n)\nquery_engine = index.as_query_engine(filters=filters)\nres = query_engine.query(\"What challenges did the disease pose for the author?\")\n\nprint(res)\n"],"headingContent":"","anchorList":[{"label":"Retrieval-erweiterte Generierung (RAG) mit Milvus und LlamaIndex","href":"Retrieval-Augmented-Generation-RAG-with-Milvus-and-LlamaIndex","type":1,"isActive":false},{"label":"Bevor Sie beginnen","href":"Before-you-begin","type":2,"isActive":false},{"label":"Erste Schritte","href":"Getting-Started","type":2,"isActive":false},{"label":"Filtern von Metadaten","href":"Metadata-filtering","type":2,"isActive":false}]}
      \ No newline at end of file
      +{"codeList":["$ pip install pymilvus>=2.4.2\n","$ pip install llama-index-vector-stores-milvus\n","$ pip install llama-index\n","import openai\n\nopenai.api_key = \"sk-***********\"\n","! mkdir -p 'data/'\n! wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham_essay.txt'\n! wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/docs/examples/data/10k/uber_2021.pdf' -O 'data/uber_2021.pdf'\n","from llama_index.core import SimpleDirectoryReader\n\n# load documents\ndocuments = SimpleDirectoryReader(\n    input_files=[\"./data/paul_graham_essay.txt\"]\n).load_data()\n\nprint(\"Document ID:\", documents[0].doc_id)\n","# Create an index over the documents\nfrom llama_index.core import VectorStoreIndex, StorageContext\nfrom llama_index.vector_stores.milvus import MilvusVectorStore\n\n\nvector_store = MilvusVectorStore(uri=\"./milvus_demo.db\", dim=1536, overwrite=True)\nstorage_context = StorageContext.from_defaults(vector_store=vector_store)\nindex = VectorStoreIndex.from_documents(documents, storage_context=storage_context)\n","query_engine = index.as_query_engine()\nres = query_engine.query(\"What did the author learn?\")\nprint(res)\n","res = query_engine.query(\"What challenges did the disease pose for the author?\")\nprint(res)\n","from llama_index.core import Document\n\n\nvector_store = MilvusVectorStore(uri=\"./milvus_demo.db\", dim=1536, overwrite=True)\nstorage_context = StorageContext.from_defaults(vector_store=vector_store)\nindex = VectorStoreIndex.from_documents(\n    [Document(text=\"The number that is being searched for is ten.\")],\n    storage_context,\n)\nquery_engine = index.as_query_engine()\nres = query_engine.query(\"Who is the author?\")\nprint(res)\n","del index, vector_store, storage_context, query_engine\n\nvector_store = MilvusVectorStore(uri=\"./milvus_demo.db\", overwrite=False)\nstorage_context = StorageContext.from_defaults(vector_store=vector_store)\nindex = VectorStoreIndex.from_documents(documents, storage_context=storage_context)\nquery_engine = index.as_query_engine()\nres = query_engine.query(\"What is the number?\")\nprint(res)\n","res = query_engine.query(\"Who is the author?\")\nprint(res)\n","from llama_index.core.vector_stores import ExactMatchFilter, MetadataFilters\n\n# Load all the two documents loaded before\ndocuments_all = SimpleDirectoryReader(\"./data/\").load_data()\n\nvector_store = MilvusVectorStore(uri=\"./milvus_demo.db\", dim=1536, overwrite=True)\nstorage_context = StorageContext.from_defaults(vector_store=vector_store)\nindex = VectorStoreIndex.from_documents(documents_all, storage_context)\n","filters = MetadataFilters(\n    filters=[ExactMatchFilter(key=\"file_name\", value=\"uber_2021.pdf\")]\n)\nquery_engine = index.as_query_engine(filters=filters)\nres = query_engine.query(\"What challenges did the disease pose for the author?\")\n\nprint(res)\n","filters = MetadataFilters(\n    filters=[ExactMatchFilter(key=\"file_name\", value=\"paul_graham_essay.txt\")]\n)\nquery_engine = index.as_query_engine(filters=filters)\nres = query_engine.query(\"What challenges did the disease pose for the author?\")\n\nprint(res)\n"],"headingContent":"Retrieval-Augmented Generation (RAG) with Milvus and LlamaIndex","anchorList":[{"label":"Retrieval-erweiterte Generierung (RAG) mit Milvus und LlamaIndex","href":"Retrieval-Augmented-Generation-RAG-with-Milvus-and-LlamaIndex","type":1,"isActive":false},{"label":"Bevor Sie beginnen","href":"Before-you-begin","type":2,"isActive":false},{"label":"Erste Schritte","href":"Getting-Started","type":2,"isActive":false},{"label":"Filtern von Metadaten","href":"Metadata-filtering","type":2,"isActive":false}]}
      \ No newline at end of file
      diff --git a/localization/v2.4.x/site/de/integrations/integrate_with_llamaindex.md b/localization/v2.4.x/site/de/integrations/integrate_with_llamaindex.md
      index b8c9c2db3..21c77a928 100644
      --- a/localization/v2.4.x/site/de/integrations/integrate_with_llamaindex.md
      +++ b/localization/v2.4.x/site/de/integrations/integrate_with_llamaindex.md
      @@ -3,7 +3,7 @@ id: integrate_with_llamaindex.md
       summary: >-
         Dieser Leitfaden zeigt, wie man ein Retrieval-Augmented Generation (RAG)
         System mit LlamaIndex und Milvus aufbaut.
      -title: Retrieval-Augmented Generation (RAG) mit Milvus und LlamaIndex
      +title: Retrieval-erweiterte Generierung (RAG) mit Milvus und LlamaIndex
       ---
       

      Retrieval-erweiterte Generierung (RAG) mit Milvus und LlamaIndex

      Open In Colab

      +

      Open In Colab +GitHub Repository

      Diese Anleitung zeigt, wie man ein Retrieval-Augmented Generation (RAG) System mit LlamaIndex und Milvus aufbaut.

      -

      Das RAG-System kombiniert ein Retrievalsystem mit einem generativen Modell, um neuen Text auf der Grundlage einer vorgegebenen Aufforderung zu generieren. Das System sucht zunächst mit Milvus relevante Dokumente aus einem Korpus und verwendet dann ein generatives Modell, um neuen Text auf der Grundlage der gefundenen Dokumente zu generieren.

      +

      Das RAG-System kombiniert ein Retrieval-System mit einem generativen Modell, um neuen Text auf der Grundlage einer vorgegebenen Aufforderung zu generieren. Das System sucht zunächst mit Milvus relevante Dokumente aus einem Korpus und verwendet dann ein generatives Modell, um neuen Text auf der Grundlage der gefundenen Dokumente zu generieren.

      LlamaIndex ist ein einfaches, flexibles Daten-Framework für die Verbindung benutzerdefinierter Datenquellen mit großen Sprachmodellen (LLMs). Milvus ist die weltweit fortschrittlichste Open-Source-Vektordatenbank, die für die Einbettung von Ähnlichkeitssuche und KI-Anwendungen entwickelt wurde.

      In diesem Notizbuch zeigen wir eine kurze Demo zur Verwendung des MilvusVectorStore.

      Bevor Sie beginnen

      Open In Colab

      +

      Open In Colab +GitHub Repository

      Dieser Leitfaden zeigt, wie die Einbettungs-API von OpenAI mit der Vektordatenbank Milvus verwendet werden kann, um eine semantische Suche im Text durchzuführen.

      Erste Schritte

      Open In Colab

      +

      Open In Colab +GitHub Repository

      Dieser Leitfaden zeigt, wie man Ragas verwendet, um eine auf Milvus aufbauende Retrieval-Augmented Generation (RAG) Pipeline zu evaluieren.

      Das RAG-System kombiniert ein Retrievalsystem mit einem generativen Modell, um neuen Text auf der Grundlage einer vorgegebenen Aufforderung zu generieren. Das System ruft zunächst relevante Dokumente aus einem Korpus mit Milvus ab und verwendet dann ein generatives Modell, um neuen Text auf der Grundlage der abgerufenen Dokumente zu erzeugen.

      Ragas ist ein Framework, das Sie bei der Bewertung Ihrer RAG-Pipelines unterstützt. Es gibt bereits Tools und Frameworks, die Ihnen bei der Erstellung dieser Pipelines helfen, aber die Bewertung und Quantifizierung der Leistung Ihrer Pipelines kann schwierig sein. An dieser Stelle kommt Ragas (RAG Assessment) ins Spiel.

      diff --git a/localization/v2.4.x/site/de/integrations/integrate_with_vanna.json b/localization/v2.4.x/site/de/integrations/integrate_with_vanna.json index 8f0e627b3..d883a27bd 100644 --- a/localization/v2.4.x/site/de/integrations/integrate_with_vanna.json +++ b/localization/v2.4.x/site/de/integrations/integrate_with_vanna.json @@ -1 +1 @@ -{"codeList":["$ pip install \"vanna[milvus,openai]\"\n","import os\n\nos.environ[\"OPENAI_API_KEY\"] = \"sk-***********\"\n","from pymilvus import MilvusClient, model\nfrom vanna.milvus import Milvus_VectorStore\nfrom vanna.openai import OpenAI_Chat\n\n\nclass VannaMilvus(Milvus_VectorStore, OpenAI_Chat):\n def __init__(self, config=None):\n Milvus_VectorStore.__init__(self, config=config)\n OpenAI_Chat.__init__(self, config=config)\n","milvus_uri = \"./milvus_vanna.db\"\n\nmilvus_client = MilvusClient(uri=milvus_uri)\n\nvn_milvus = VannaMilvus(\n config={\n \"api_key\": os.getenv(\"OPENAI_API_KEY\"),\n \"model\": \"gpt-3.5-turbo\",\n \"milvus_client\": milvus_client,\n \"embedding_function\": model.DefaultEmbeddingFunction(),\n \"n_results\": 2, # The number of results to return from Milvus semantic search.\n }\n)\n","import sqlite3\n\nsqlite_path = \"./my-database.sqlite\"\nsql_connect = sqlite3.connect(sqlite_path)\nc = sql_connect.cursor()\n\ninit_sqls = \"\"\"\nCREATE TABLE IF NOT EXISTS Customer (\n ID INTEGER PRIMARY KEY AUTOINCREMENT,\n Name TEXT NOT NULL,\n Company TEXT NOT NULL,\n City TEXT NOT NULL,\n Phone TEXT NOT NULL\n);\n\nCREATE TABLE IF NOT EXISTS Company (\n ID INTEGER PRIMARY KEY AUTOINCREMENT,\n Name TEXT NOT NULL,\n Industry TEXT NOT NULL,\n Location TEXT NOT NULL,\n EmployeeCount INTEGER NOT NULL\n);\n\nCREATE TABLE IF NOT EXISTS User (\n ID INTEGER PRIMARY KEY AUTOINCREMENT,\n Username TEXT NOT NULL UNIQUE,\n Email TEXT NOT NULL UNIQUE\n);\n\nINSERT INTO Customer (Name, Company, City, Phone) \nVALUES ('John Doe', 'ABC Corp', 'New York', '123-456-7890');\n\nINSERT INTO Customer (Name, Company, City, Phone) \nVALUES ('Jane Smith', 'XYZ Inc', 'Los Angeles', '098-765-4321');\n\nINSERT INTO Company (Name, Industry, Location, EmployeeCount)\nVALUES ('ABC Corp', 'cutting-edge technology', 'New York', 100);\n\nINSERT INTO User (Username, Email)\nVALUES ('johndoe123', 'johndoe123@example.com');\n\"\"\"\n\nfor sql in init_sqls.split(\";\"):\n c.execute(sql)\n\nsql_connect.commit()\n\n# Connect to the SQLite database\nvn_milvus.connect_to_sqlite(sqlite_path)\n","# If there exists training data, we should remove it before training.\nexisting_training_data = vn_milvus.get_training_data()\nif len(existing_training_data) > 0:\n for _, training_data in existing_training_data.iterrows():\n vn_milvus.remove_training_data(training_data[\"id\"])\n\n# Get the DDL of the SQLite database\ndf_ddl = vn_milvus.run_sql(\"SELECT type, sql FROM sqlite_master WHERE sql is not null\")\n\n# Train the model on the DDL data\nfor ddl in df_ddl[\"sql\"].to_list():\n vn_milvus.train(ddl=ddl)\n","# Add documentation about your business terminology or definitions.\nvn_milvus.train(\n documentation=\"ABC Corp specializes in cutting-edge technology solutions and innovation.\"\n)\nvn_milvus.train(\n documentation=\"XYZ Inc is a global leader in manufacturing and supply chain management.\"\n)\n\n# You can also add SQL queries to your training data.\nvn_milvus.train(sql=\"SELECT * FROM Customer WHERE Name = 'John Doe'\")\n","training_data = vn_milvus.get_training_data()\ntraining_data\n","sql = vn_milvus.generate_sql(\"what is the phone number of John Doe?\")\nvn_milvus.run_sql(sql)\n","sql = vn_milvus.generate_sql(\"which customer works for a manufacturing corporation?\")\nvn_milvus.run_sql(sql)\n","sql_connect.close()\nmilvus_client.close()\n\nos.remove(sqlite_path)\nif os.path.exists(milvus_uri):\n os.remove(milvus_uri)\n"],"headingContent":"","anchorList":[{"label":"SQL schreiben mit Vanna und Milvus","href":"Write-SQL-with-Vanna-and-Milvus","type":1,"isActive":false},{"label":"Voraussetzungen","href":"Prerequisites","type":2,"isActive":false},{"label":"Vorbereitung der Daten","href":"Data-preparation","type":2,"isActive":false},{"label":"Trainieren mit Daten","href":"Train-with-data","type":2,"isActive":false},{"label":"SQLs generieren und ausführen","href":"Generate-SQLs-and-execute-them","type":2,"isActive":false}]} \ No newline at end of file +{"codeList":["$ pip install \"vanna[milvus,openai]\"\n","import os\n\nos.environ[\"OPENAI_API_KEY\"] = \"sk-***********\"\n","from pymilvus import MilvusClient, model\nfrom vanna.milvus import Milvus_VectorStore\nfrom vanna.openai import OpenAI_Chat\n\n\nclass VannaMilvus(Milvus_VectorStore, OpenAI_Chat):\n def __init__(self, config=None):\n Milvus_VectorStore.__init__(self, config=config)\n OpenAI_Chat.__init__(self, config=config)\n","milvus_uri = \"./milvus_vanna.db\"\n\nmilvus_client = MilvusClient(uri=milvus_uri)\n\nvn_milvus = VannaMilvus(\n config={\n \"api_key\": os.getenv(\"OPENAI_API_KEY\"),\n \"model\": \"gpt-3.5-turbo\",\n \"milvus_client\": milvus_client,\n \"embedding_function\": model.DefaultEmbeddingFunction(),\n \"n_results\": 2, # The number of results to return from Milvus semantic search.\n }\n)\n","import sqlite3\n\nsqlite_path = \"./my-database.sqlite\"\nsql_connect = sqlite3.connect(sqlite_path)\nc = sql_connect.cursor()\n\ninit_sqls = \"\"\"\nCREATE TABLE IF NOT EXISTS Customer (\n ID INTEGER PRIMARY KEY AUTOINCREMENT,\n Name TEXT NOT NULL,\n Company TEXT NOT NULL,\n City TEXT NOT NULL,\n Phone TEXT NOT NULL\n);\n\nCREATE TABLE IF NOT EXISTS Company (\n ID INTEGER PRIMARY KEY AUTOINCREMENT,\n Name TEXT NOT NULL,\n Industry TEXT NOT NULL,\n Location TEXT NOT NULL,\n EmployeeCount INTEGER NOT NULL\n);\n\nCREATE TABLE IF NOT EXISTS User (\n ID INTEGER PRIMARY KEY AUTOINCREMENT,\n Username TEXT NOT NULL UNIQUE,\n Email TEXT NOT NULL UNIQUE\n);\n\nINSERT INTO Customer (Name, Company, City, Phone) \nVALUES ('John Doe', 'ABC Corp', 'New York', '123-456-7890');\n\nINSERT INTO Customer (Name, Company, City, Phone) \nVALUES ('Jane Smith', 'XYZ Inc', 'Los Angeles', '098-765-4321');\n\nINSERT INTO Company (Name, Industry, Location, EmployeeCount)\nVALUES ('ABC Corp', 'cutting-edge technology', 'New York', 100);\n\nINSERT INTO User (Username, Email)\nVALUES ('johndoe123', 'johndoe123@example.com');\n\"\"\"\n\nfor sql in init_sqls.split(\";\"):\n c.execute(sql)\n\nsql_connect.commit()\n\n# Connect to the SQLite database\nvn_milvus.connect_to_sqlite(sqlite_path)\n","# If there exists training data, we should remove it before training.\nexisting_training_data = vn_milvus.get_training_data()\nif len(existing_training_data) > 0:\n for _, training_data in existing_training_data.iterrows():\n vn_milvus.remove_training_data(training_data[\"id\"])\n\n# Get the DDL of the SQLite database\ndf_ddl = vn_milvus.run_sql(\"SELECT type, sql FROM sqlite_master WHERE sql is not null\")\n\n# Train the model on the DDL data\nfor ddl in df_ddl[\"sql\"].to_list():\n vn_milvus.train(ddl=ddl)\n","# Add documentation about your business terminology or definitions.\nvn_milvus.train(\n documentation=\"ABC Corp specializes in cutting-edge technology solutions and innovation.\"\n)\nvn_milvus.train(\n documentation=\"XYZ Inc is a global leader in manufacturing and supply chain management.\"\n)\n\n# You can also add SQL queries to your training data.\nvn_milvus.train(sql=\"SELECT * FROM Customer WHERE Name = 'John Doe'\")\n","training_data = vn_milvus.get_training_data()\ntraining_data\n","sql = vn_milvus.generate_sql(\"what is the phone number of John Doe?\")\nvn_milvus.run_sql(sql)\n","sql = vn_milvus.generate_sql(\"which customer works for a manufacturing corporation?\")\nvn_milvus.run_sql(sql)\n","sql_connect.close()\nmilvus_client.close()\n\nos.remove(sqlite_path)\nif os.path.exists(milvus_uri):\n os.remove(milvus_uri)\n"],"headingContent":"Write SQL with Vanna and Milvus","anchorList":[{"label":"SQL schreiben mit Vanna und Milvus","href":"Write-SQL-with-Vanna-and-Milvus","type":1,"isActive":false},{"label":"Voraussetzungen","href":"Prerequisites","type":2,"isActive":false},{"label":"Vorbereitung der Daten","href":"Data-preparation","type":2,"isActive":false},{"label":"Trainieren mit Daten","href":"Train-with-data","type":2,"isActive":false},{"label":"SQLs generieren und ausführen","href":"Generate-SQLs-and-execute-them","type":2,"isActive":false}]} \ No newline at end of file diff --git a/localization/v2.4.x/site/de/integrations/integrate_with_vanna.md b/localization/v2.4.x/site/de/integrations/integrate_with_vanna.md index 5a4adf13a..b3de6bf80 100644 --- a/localization/v2.4.x/site/de/integrations/integrate_with_vanna.md +++ b/localization/v2.4.x/site/de/integrations/integrate_with_vanna.md @@ -20,8 +20,10 @@ title: SQL schreiben mit Vanna und Milvus d="M4 9h1v1H4c-1.5 0-3-1.69-3-3.5S2.55 3 4 3h4c1.45 0 3 1.69 3 3.5 0 1.41-.91 2.72-2 3.25V8.59c.58-.45 1-1.27 1-2.09C10 5.22 8.98 4 8 4H4c-.98 0-2 1.22-2 2.5S3 9 4 9zm9-3h-1v1h1c1 0 2 1.22 2 2.5S13.98 12 13 12H9c-.98 0-2-1.22-2-2.5 0-.83.42-1.64 1-2.09V6.25c-1.09.53-2 1.84-2 3.25C6 11.31 7.55 13 9 13h4c1.45 0 3-1.69 3-3.5S14.5 6 13 6z" > -

      Vanna ist ein Open-Source-Python-RAG-Framework (Retrieval-Augmented Generation) für die SQL-Generierung und verwandte Funktionen. Milvus ist die fortschrittlichste Open-Source-Vektordatenbank der Welt, die für die Einbettung von Ähnlichkeitssuche und KI-Anwendungen entwickelt wurde.

      -

      Vanna funktioniert in zwei einfachen Schritten: Trainieren Sie ein RAG-"Modell" auf Ihren Daten, und stellen Sie dann Fragen, die SQL-Abfragen zurückgeben, die auf Ihrer Datenbank ausgeführt werden können. Dieser Leitfaden zeigt Ihnen, wie Sie mit Vanna SQL-Abfragen auf der Grundlage Ihrer in einer Datenbank gespeicherten Daten erstellen und ausführen können.

      +

      Open In Colab +GitHub Repository

      +

      Vanna ist ein Open-Source-Python-RAG-Framework (Retrieval-Augmented Generation) für die SQL-Generierung und verwandte Funktionen. Milvus ist die fortschrittlichste Open-Source-Vektordatenbank der Welt, die für die Einbettung von Ähnlichkeitssuche und KI-Anwendungen entwickelt wurde.

      +

      Vanna funktioniert in zwei einfachen Schritten: Trainieren Sie ein RAG-"Modell" auf Ihren Daten, und stellen Sie dann Fragen, die SQL-Abfragen zurückgeben, die in Ihrer Datenbank ausgeführt werden können. Dieser Leitfaden zeigt Ihnen, wie Sie mit Vanna SQL-Abfragen auf der Grundlage Ihrer in einer Datenbank gespeicherten Daten erstellen und ausführen können.

      Voraussetzungen

      Wir können das Modell mit den DDL-Daten der SQLite-Datenbank trainieren. Wir holen uns die DDL-Daten und füttern damit die Funktion train.

      +

      Wir können das Modell mit den DDL-Daten der SQLite-Datenbank trainieren. Wir erhalten die DDL-Daten und füttern damit die Funktion train.

      # If there exists training data, we should remove it before training.
       existing_training_data = vn_milvus.get_training_data()
       if len(existing_training_data) > 0:
      diff --git a/localization/v2.4.x/site/de/integrations/integrate_with_voyageai.json b/localization/v2.4.x/site/de/integrations/integrate_with_voyageai.json
      index 8afd2a266..4b78f56a7 100644
      --- a/localization/v2.4.x/site/de/integrations/integrate_with_voyageai.json
      +++ b/localization/v2.4.x/site/de/integrations/integrate_with_voyageai.json
      @@ -1 +1 @@
      -{"codeList":["$ pip install --upgrade voyageai pymilvus\n","import voyageai\nfrom pymilvus import MilvusClient\n\nMODEL_NAME = \"voyage-law-2\"  # Which model to use, please check https://docs.voyageai.com/docs/embeddings for available models\nDIMENSION = 1024  # Dimension of vector embedding\n\n# Connect to VoyageAI with API Key.\nvoyage_client = voyageai.Client(api_key=\"\")\n\ndocs = [\n    \"Artificial intelligence was founded as an academic discipline in 1956.\",\n    \"Alan Turing was the first person to conduct substantial research in AI.\",\n    \"Born in Maida Vale, London, Turing was raised in southern England.\",\n]\n\nvectors = voyage_client.embed(texts=docs, model=MODEL_NAME, truncation=False).embeddings\n\n# Prepare data to be stored in Milvus vector database.\n# We can store the id, vector representation, raw text and labels such as \"subject\" in this case in Milvus.\ndata = [\n    {\"id\": i, \"vector\": vectors[i], \"text\": docs[i], \"subject\": \"history\"}\n    for i in range(len(docs))\n]\n\n\n# Connect to Milvus, all data is stored in a local file named \"milvus_voyage_demo.db\"\n# in current directory. You can also connect to a remote Milvus server following this\n# instruction: https://milvus.io/docs/install_standalone-docker.md.\nmilvus_client = MilvusClient(uri=\"milvus_voyage_demo.db\")\nCOLLECTION_NAME = \"demo_collection\"  # Milvus collection name\n# Create a collection to store the vectors and text.\nif milvus_client.has_collection(collection_name=COLLECTION_NAME):\n    milvus_client.drop_collection(collection_name=COLLECTION_NAME)\nmilvus_client.create_collection(collection_name=COLLECTION_NAME, dimension=DIMENSION)\n\n# Insert all data into Milvus vector database.\nres = milvus_client.insert(collection_name=\"demo_collection\", data=data)\n\nprint(res[\"insert_count\"])\n","queries = [\"When was artificial intelligence founded?\"]\n\nquery_vectors = voyage_client.embed(\n    texts=queries, model=MODEL_NAME, truncation=False\n).embeddings\n\nres = milvus_client.search(\n    collection_name=COLLECTION_NAME,  # target collection\n    data=query_vectors,  # query vectors\n    limit=2,  # number of returned entities\n    output_fields=[\"text\", \"subject\"],  # specifies fields to be returned\n)\n\nfor q in queries:\n    print(\"Query:\", q)\n    for result in res:\n        print(result)\n    print(\"\\n\")\n"],"headingContent":"","anchorList":[{"label":"Semantische Suche mit Milvus und VoyageAI","href":"Semantic-Search-with-Milvus-and-VoyageAI","type":1,"isActive":false},{"label":"Erste Schritte","href":"Getting-started","type":2,"isActive":false},{"label":"Durchsuchen von Buchtiteln mit VoyageAI & Milvus","href":"Searching-book-titles-with-VoyageAI--Milvus","type":2,"isActive":false}]}
      \ No newline at end of file
      +{"codeList":["$ pip install --upgrade voyageai pymilvus\n","import voyageai\nfrom pymilvus import MilvusClient\n\nMODEL_NAME = \"voyage-law-2\"  # Which model to use, please check https://docs.voyageai.com/docs/embeddings for available models\nDIMENSION = 1024  # Dimension of vector embedding\n\n# Connect to VoyageAI with API Key.\nvoyage_client = voyageai.Client(api_key=\"\")\n\ndocs = [\n    \"Artificial intelligence was founded as an academic discipline in 1956.\",\n    \"Alan Turing was the first person to conduct substantial research in AI.\",\n    \"Born in Maida Vale, London, Turing was raised in southern England.\",\n]\n\nvectors = voyage_client.embed(texts=docs, model=MODEL_NAME, truncation=False).embeddings\n\n# Prepare data to be stored in Milvus vector database.\n# We can store the id, vector representation, raw text and labels such as \"subject\" in this case in Milvus.\ndata = [\n    {\"id\": i, \"vector\": vectors[i], \"text\": docs[i], \"subject\": \"history\"}\n    for i in range(len(docs))\n]\n\n\n# Connect to Milvus, all data is stored in a local file named \"milvus_voyage_demo.db\"\n# in current directory. You can also connect to a remote Milvus server following this\n# instruction: https://milvus.io/docs/install_standalone-docker.md.\nmilvus_client = MilvusClient(uri=\"milvus_voyage_demo.db\")\nCOLLECTION_NAME = \"demo_collection\"  # Milvus collection name\n# Create a collection to store the vectors and text.\nif milvus_client.has_collection(collection_name=COLLECTION_NAME):\n    milvus_client.drop_collection(collection_name=COLLECTION_NAME)\nmilvus_client.create_collection(collection_name=COLLECTION_NAME, dimension=DIMENSION)\n\n# Insert all data into Milvus vector database.\nres = milvus_client.insert(collection_name=\"demo_collection\", data=data)\n\nprint(res[\"insert_count\"])\n","queries = [\"When was artificial intelligence founded?\"]\n\nquery_vectors = voyage_client.embed(\n    texts=queries, model=MODEL_NAME, truncation=False\n).embeddings\n\nres = milvus_client.search(\n    collection_name=COLLECTION_NAME,  # target collection\n    data=query_vectors,  # query vectors\n    limit=2,  # number of returned entities\n    output_fields=[\"text\", \"subject\"],  # specifies fields to be returned\n)\n\nfor q in queries:\n    print(\"Query:\", q)\n    for result in res:\n        print(result)\n    print(\"\\n\")\n"],"headingContent":"Semantic Search with Milvus and VoyageAI","anchorList":[{"label":"Semantische Suche mit Milvus und VoyageAI","href":"Semantic-Search-with-Milvus-and-VoyageAI","type":1,"isActive":false},{"label":"Erste Schritte","href":"Getting-started","type":2,"isActive":false},{"label":"Durchsuchen von Buchtiteln mit VoyageAI & Milvus","href":"Searching-book-titles-with-VoyageAI--Milvus","type":2,"isActive":false}]}
      \ No newline at end of file
      diff --git a/localization/v2.4.x/site/de/integrations/integrate_with_voyageai.md b/localization/v2.4.x/site/de/integrations/integrate_with_voyageai.md
      index aaae31e28..970e52430 100644
      --- a/localization/v2.4.x/site/de/integrations/integrate_with_voyageai.md
      +++ b/localization/v2.4.x/site/de/integrations/integrate_with_voyageai.md
      @@ -20,7 +20,8 @@ summary: >-
                 d="M4 9h1v1H4c-1.5 0-3-1.69-3-3.5S2.55 3 4 3h4c1.45 0 3 1.69 3 3.5 0 1.41-.91 2.72-2 3.25V8.59c.58-.45 1-1.27 1-2.09C10 5.22 8.98 4 8 4H4c-.98 0-2 1.22-2 2.5S3 9 4 9zm9-3h-1v1h1c1 0 2 1.22 2 2.5S13.98 12 13 12H9c-.98 0-2-1.22-2-2.5 0-.83.42-1.64 1-2.09V6.25c-1.09.53-2 1.84-2 3.25C6 11.31 7.55 13 9 13h4c1.45 0 3-1.69 3-3.5S14.5 6 13 6z"
               >
             
      -    

      Open In Colab

      +

      Open In Colab +GitHub Repository

      Dieser Leitfaden zeigt, wie die Einbettungs-API von VoyageAI mit der Vektordatenbank Milvus verwendet werden kann, um eine semantische Suche im Text durchzuführen.

      Erste Schritte

      Auf dieser Seite finden Sie eine Liste von Tutorials, die Ihnen die Interaktion mit Milvus und Tools von Drittanbietern erleichtern.

      - + @@ -36,7 +36,7 @@ title: Übersicht über Integrationen - + @@ -53,5 +53,13 @@ title: Übersicht über Integrationen + + + + + + + +
      LehrgangAnwendungsfallPartner oder Stacks
      LernprogrammAnwendungsfallPartner oder Stacks
      RAG mit Milvus und LlamaIndexRAGMilvus, LLamaIndex
      Filmsuche mit Milvus und SentenceTransfromersSemantische SucheMilvus, Satz-Transformatoren
      Verwendung von Milvus als Vektorspeicher in LangChainSemantische SucheMilvus, LangChain
      RAG mit Milvus und HaystackRAGMilvus, Haystack
      Vision Searches mit Milvus und FiftyOne durchführenSemantische SucheMilvus, FiftyOne
      Vision Searches mit Milvus und FiftyOne durchführenSemantische SucheMilvus, FiftyOne
      Semantische Suche mit Milvus und VoyageAISemantische SucheMilvus, VoyageAI
      RAG mit Milvus und BentoMLRAGMilvus, BentoML
      RAG mit Milvus und DSPyRAGMilvus, DSPy
      Erweiterte Videosuche: Nutzung von Twelve Labs und Milvus für die semantische SucheSemantische SucheMilvus, Zwölf Laboratorien
      Aufbau von RAG mit Milvus, vLLM und Llama 3.1RAGMilvus, vLLM, LlamaIndex
      Multiagentensysteme mit Mistral AI, Milvus und Llama-AgentenAgentMilvus, Mistral AI, LlamaIndex
      Kafka mit Milvus verbindenDaten-QuellenMilvus, Kafka
      Kotaemon RAG mit MilvusRAGMilvus, Kotaemon
      Retrieval-erweiterte Generierung: Crawlen von Websites mit Apify und Speichern der Daten in Milvus zur Beantwortung von FragenDaten-QuellenMilvus, Apify
      Auswertung mit DeepEvalAuswertung & BeobachtbarkeitMilvus, DeepEval
      Auswertung mit Arize PheonixBewertung & BeobachtbarkeitMilvus, Arize Pheonix
      Einsatz von Dify mit MilvusEinrichtungMilvus, Dify
      Aufbau eines RAG-Systems unter Verwendung von Langflow mit MilvusOchestrationMilvus, Langflow
      RAG auf der Arm-Architektur aufbauenRAGMilvus, Arm
      diff --git a/localization/v2.4.x/site/de/integrations/langchain/integrate_with_langchain.json b/localization/v2.4.x/site/de/integrations/langchain/integrate_with_langchain.json index 78bc42610..5f4098e83 100644 --- a/localization/v2.4.x/site/de/integrations/langchain/integrate_with_langchain.json +++ b/localization/v2.4.x/site/de/integrations/langchain/integrate_with_langchain.json @@ -1 +1 @@ -{"codeList":["$ pip install --upgrade --quiet langchain langchain-core langchain-community langchain-text-splitters langchain-milvus langchain-openai bs4\n","import os\n\nos.environ[\"OPENAI_API_KEY\"] = \"sk-***********\"\n","import bs4\nfrom langchain_community.document_loaders import WebBaseLoader\nfrom langchain_text_splitters import RecursiveCharacterTextSplitter\n\n# Create a WebBaseLoader instance to load documents from web sources\nloader = WebBaseLoader(\n web_paths=(\n \"https://lilianweng.github.io/posts/2023-06-23-agent/\",\n \"https://lilianweng.github.io/posts/2023-03-15-prompt-engineering/\",\n ),\n bs_kwargs=dict(\n parse_only=bs4.SoupStrainer(\n class_=(\"post-content\", \"post-title\", \"post-header\")\n )\n ),\n)\n# Load documents from web sources using the loader\ndocuments = loader.load()\n# Initialize a RecursiveCharacterTextSplitter for splitting text into chunks\ntext_splitter = RecursiveCharacterTextSplitter(chunk_size=2000, chunk_overlap=200)\n\n# Split the documents into chunks using the text_splitter\ndocs = text_splitter.split_documents(documents)\n\n# Let's take a look at the first document\ndocs[1]\n","from langchain_milvus import Milvus, Zilliz\nfrom langchain_openai import OpenAIEmbeddings\n\nembeddings = OpenAIEmbeddings()\n\nvectorstore = Milvus.from_documents( # or Zilliz.from_documents\n documents=docs,\n embedding=embeddings,\n connection_args={\n \"uri\": \"./milvus_demo.db\",\n },\n drop_old=True, # Drop the old Milvus collection if it exists\n)\n","query = \"What is self-reflection of an AI Agent?\"\nvectorstore.similarity_search(query, k=1)\n","from langchain_core.runnables import RunnablePassthrough\nfrom langchain_core.prompts import PromptTemplate\nfrom langchain_core.output_parsers import StrOutputParser\nfrom langchain_openai import ChatOpenAI\n\n# Initialize the OpenAI language model for response generation\nllm = ChatOpenAI(model_name=\"gpt-3.5-turbo\", temperature=0)\n\n# Define the prompt template for generating AI responses\nPROMPT_TEMPLATE = \"\"\"\nHuman: You are an AI assistant, and provides answers to questions by using fact based and statistical information when possible.\nUse the following pieces of information to provide a concise answer to the question enclosed in tags.\nIf you don't know the answer, just say that you don't know, don't try to make up an answer.\n\n{context}\n\n\n\n{question}\n\n\nThe response should be specific and use statistics or numbers when possible.\n\nAssistant:\"\"\"\n\n# Create a PromptTemplate instance with the defined template and input variables\nprompt = PromptTemplate(\n template=PROMPT_TEMPLATE, input_variables=[\"context\", \"question\"]\n)\n# Convert the vector store to a retriever\nretriever = vectorstore.as_retriever()\n\n\n# Define a function to format the retrieved documents\ndef format_docs(docs):\n return \"\\n\\n\".join(doc.page_content for doc in docs)\n","# Define the RAG (Retrieval-Augmented Generation) chain for AI response generation\nrag_chain = (\n {\"context\": retriever | format_docs, \"question\": RunnablePassthrough()}\n | prompt\n | llm\n | StrOutputParser()\n)\n\n# rag_chain.get_graph().print_ascii()\n\n# Invoke the RAG chain with a specific question and retrieve the response\nres = rag_chain.invoke(query)\nres\n","vectorstore.similarity_search(\n \"What is CoT?\",\n k=1,\n expr=\"source == 'https://lilianweng.github.io/posts/2023-06-23-agent/'\",\n)\n\n# The same as:\n# vectorstore.as_retriever(search_kwargs=dict(\n# k=1,\n# expr=\"source == 'https://lilianweng.github.io/posts/2023-06-23-agent/'\",\n# )).invoke(\"What is CoT?\")\n","from langchain_core.runnables import ConfigurableField\n\n# Define a new retriever with a configurable field for search_kwargs\nretriever2 = vectorstore.as_retriever().configurable_fields(\n search_kwargs=ConfigurableField(\n id=\"retriever_search_kwargs\",\n )\n)\n\n# Invoke the retriever with a specific search_kwargs which filter the documents by source\nretriever2.with_config(\n configurable={\n \"retriever_search_kwargs\": dict(\n expr=\"source == 'https://lilianweng.github.io/posts/2023-06-23-agent/'\",\n k=1,\n )\n }\n).invoke(query)\n","# Define a new RAG chain with this dynamically configurable retriever\nrag_chain2 = (\n {\"context\": retriever2 | format_docs, \"question\": RunnablePassthrough()}\n | prompt\n | llm\n | StrOutputParser()\n)\n","# Invoke this RAG chain with a specific question and config\nrag_chain2.with_config(\n configurable={\n \"retriever_search_kwargs\": dict(\n expr=\"source == 'https://lilianweng.github.io/posts/2023-06-23-agent/'\",\n )\n }\n).invoke(query)\n","rag_chain2.with_config(\n configurable={\n \"retriever_search_kwargs\": dict(\n expr=\"source == 'https://lilianweng.github.io/posts/2023-03-15-prompt-engineering/'\",\n )\n }\n).invoke(query)\n"],"headingContent":"","anchorList":[{"label":"Retrieval-erweiterte Generierung (RAG) mit Milvus und LangChain","href":"Retrieval-Augmented-Generation-RAG-with-Milvus-and-LangChain","type":1,"isActive":false},{"label":"Voraussetzungen","href":"Prerequisites","type":2,"isActive":false},{"label":"Bereiten Sie die Daten vor","href":"Prepare-the-data","type":2,"isActive":false},{"label":"RAG-Kette mit Milvus-Vektorspeicher aufbauen","href":"Build-RAG-chain-with-Milvus-Vector-Store","type":2,"isActive":false},{"label":"Filtern von Metadaten","href":"Metadata-filtering","type":2,"isActive":false}]} \ No newline at end of file +{"codeList":["$ pip install --upgrade --quiet langchain langchain-core langchain-community langchain-text-splitters langchain-milvus langchain-openai bs4\n","import os\n\nos.environ[\"OPENAI_API_KEY\"] = \"sk-***********\"\n","import bs4\nfrom langchain_community.document_loaders import WebBaseLoader\nfrom langchain_text_splitters import RecursiveCharacterTextSplitter\n\n# Create a WebBaseLoader instance to load documents from web sources\nloader = WebBaseLoader(\n web_paths=(\n \"https://lilianweng.github.io/posts/2023-06-23-agent/\",\n \"https://lilianweng.github.io/posts/2023-03-15-prompt-engineering/\",\n ),\n bs_kwargs=dict(\n parse_only=bs4.SoupStrainer(\n class_=(\"post-content\", \"post-title\", \"post-header\")\n )\n ),\n)\n# Load documents from web sources using the loader\ndocuments = loader.load()\n# Initialize a RecursiveCharacterTextSplitter for splitting text into chunks\ntext_splitter = RecursiveCharacterTextSplitter(chunk_size=2000, chunk_overlap=200)\n\n# Split the documents into chunks using the text_splitter\ndocs = text_splitter.split_documents(documents)\n\n# Let's take a look at the first document\ndocs[1]\n","from langchain_milvus import Milvus, Zilliz\nfrom langchain_openai import OpenAIEmbeddings\n\nembeddings = OpenAIEmbeddings()\n\nvectorstore = Milvus.from_documents( # or Zilliz.from_documents\n documents=docs,\n embedding=embeddings,\n connection_args={\n \"uri\": \"./milvus_demo.db\",\n },\n drop_old=True, # Drop the old Milvus collection if it exists\n)\n","query = \"What is self-reflection of an AI Agent?\"\nvectorstore.similarity_search(query, k=1)\n","from langchain_core.runnables import RunnablePassthrough\nfrom langchain_core.prompts import PromptTemplate\nfrom langchain_core.output_parsers import StrOutputParser\nfrom langchain_openai import ChatOpenAI\n\n# Initialize the OpenAI language model for response generation\nllm = ChatOpenAI(model_name=\"gpt-3.5-turbo\", temperature=0)\n\n# Define the prompt template for generating AI responses\nPROMPT_TEMPLATE = \"\"\"\nHuman: You are an AI assistant, and provides answers to questions by using fact based and statistical information when possible.\nUse the following pieces of information to provide a concise answer to the question enclosed in tags.\nIf you don't know the answer, just say that you don't know, don't try to make up an answer.\n\n{context}\n\n\n\n{question}\n\n\nThe response should be specific and use statistics or numbers when possible.\n\nAssistant:\"\"\"\n\n# Create a PromptTemplate instance with the defined template and input variables\nprompt = PromptTemplate(\n template=PROMPT_TEMPLATE, input_variables=[\"context\", \"question\"]\n)\n# Convert the vector store to a retriever\nretriever = vectorstore.as_retriever()\n\n\n# Define a function to format the retrieved documents\ndef format_docs(docs):\n return \"\\n\\n\".join(doc.page_content for doc in docs)\n","# Define the RAG (Retrieval-Augmented Generation) chain for AI response generation\nrag_chain = (\n {\"context\": retriever | format_docs, \"question\": RunnablePassthrough()}\n | prompt\n | llm\n | StrOutputParser()\n)\n\n# rag_chain.get_graph().print_ascii()\n\n# Invoke the RAG chain with a specific question and retrieve the response\nres = rag_chain.invoke(query)\nres\n","vectorstore.similarity_search(\n \"What is CoT?\",\n k=1,\n expr=\"source == 'https://lilianweng.github.io/posts/2023-06-23-agent/'\",\n)\n\n# The same as:\n# vectorstore.as_retriever(search_kwargs=dict(\n# k=1,\n# expr=\"source == 'https://lilianweng.github.io/posts/2023-06-23-agent/'\",\n# )).invoke(\"What is CoT?\")\n","from langchain_core.runnables import ConfigurableField\n\n# Define a new retriever with a configurable field for search_kwargs\nretriever2 = vectorstore.as_retriever().configurable_fields(\n search_kwargs=ConfigurableField(\n id=\"retriever_search_kwargs\",\n )\n)\n\n# Invoke the retriever with a specific search_kwargs which filter the documents by source\nretriever2.with_config(\n configurable={\n \"retriever_search_kwargs\": dict(\n expr=\"source == 'https://lilianweng.github.io/posts/2023-06-23-agent/'\",\n k=1,\n )\n }\n).invoke(query)\n","# Define a new RAG chain with this dynamically configurable retriever\nrag_chain2 = (\n {\"context\": retriever2 | format_docs, \"question\": RunnablePassthrough()}\n | prompt\n | llm\n | StrOutputParser()\n)\n","# Invoke this RAG chain with a specific question and config\nrag_chain2.with_config(\n configurable={\n \"retriever_search_kwargs\": dict(\n expr=\"source == 'https://lilianweng.github.io/posts/2023-06-23-agent/'\",\n )\n }\n).invoke(query)\n","rag_chain2.with_config(\n configurable={\n \"retriever_search_kwargs\": dict(\n expr=\"source == 'https://lilianweng.github.io/posts/2023-03-15-prompt-engineering/'\",\n )\n }\n).invoke(query)\n"],"headingContent":"Retrieval-Augmented Generation (RAG) with Milvus and LangChain","anchorList":[{"label":"Retrieval-erweiterte Generierung (RAG) mit Milvus und LangChain","href":"Retrieval-Augmented-Generation-RAG-with-Milvus-and-LangChain","type":1,"isActive":false},{"label":"Voraussetzungen","href":"Prerequisites","type":2,"isActive":false},{"label":"Bereiten Sie die Daten vor","href":"Prepare-the-data","type":2,"isActive":false},{"label":"RAG-Kette mit Milvus-Vektorspeicher aufbauen","href":"Build-RAG-chain-with-Milvus-Vector-Store","type":2,"isActive":false},{"label":"Filtern von Metadaten","href":"Metadata-filtering","type":2,"isActive":false}]} \ No newline at end of file diff --git a/localization/v2.4.x/site/de/integrations/langchain/integrate_with_langchain.md b/localization/v2.4.x/site/de/integrations/langchain/integrate_with_langchain.md index 3e902618f..b158d92b4 100644 --- a/localization/v2.4.x/site/de/integrations/langchain/integrate_with_langchain.md +++ b/localization/v2.4.x/site/de/integrations/langchain/integrate_with_langchain.md @@ -3,7 +3,7 @@ id: integrate_with_langchain.md summary: >- Dieser Leitfaden zeigt, wie man ein Retrieval-Augmented Generation (RAG) System mit LangChain und Milvus aufbaut. -title: Retrieval-Augmented Generation (RAG) mit Milvus und LangChain +title: Retrieval-erweiterte Generierung (RAG) mit Milvus und LangChain ---

      Retrieval-erweiterte Generierung (RAG) mit Milvus und LangChain

      Open In Colab

      +

      Open In Colab +GitHub Repository

      Diese Anleitung zeigt, wie man ein Retrieval-Augmented Generation (RAG) System mit LangChain und Milvus aufbaut.

      Das RAG-System kombiniert ein Retrieval-System mit einem generativen Modell, um neuen Text auf der Grundlage einer vorgegebenen Aufforderung zu generieren. Das System ruft zunächst relevante Dokumente aus einem Korpus mit Milvus ab und verwendet dann ein generatives Modell, um neuen Text auf der Grundlage der abgerufenen Dokumente zu erzeugen.

      LangChain ist ein Framework für die Entwicklung von Anwendungen, die auf großen Sprachmodellen (LLMs) basieren. Milvus ist die weltweit fortschrittlichste Open-Source-Vektordatenbank, die für die Einbettung von Ähnlichkeitssuche und KI-Anwendungen entwickelt wurde.

      @@ -45,7 +46,7 @@ title: Retrieval-Augmented Generation (RAG) mit Milvus und LangChain

      Wenn Sie Google Colab verwenden, müssen Sie möglicherweise die Runtime neu starten, um die gerade installierten Abhängigkeiten zu aktivieren. (Klicken Sie auf das Menü "Runtime" am oberen Rand des Bildschirms und wählen Sie "Restart session" aus dem Dropdown-Menü).

      -

      Wir werden die Modelle von OpenAI verwenden. Sie sollten den api-Schlüssel OPENAI_API_KEY als Umgebungsvariable vorbereiten.

      +

      Wir werden die Modelle von OpenAI verwenden. Sie sollten den Api-Schlüssel OPENAI_API_KEY als Umgebungsvariable vorbereiten.

      import os
       
       os.environ["OPENAI_API_KEY"] = "sk-***********"
      diff --git a/localization/v2.4.x/site/de/reference/architecture/architecture_overview.json b/localization/v2.4.x/site/de/reference/architecture/architecture_overview.json
      index 03650f779..7847be060 100644
      --- a/localization/v2.4.x/site/de/reference/architecture/architecture_overview.json
      +++ b/localization/v2.4.x/site/de/reference/architecture/architecture_overview.json
      @@ -1 +1 @@
      -{"codeList":[],"headingContent":"","anchorList":[{"label":"Milvus Architektur Überblick","href":"Milvus-Architecture-Overview","type":1,"isActive":false},{"label":"Was kommt als Nächstes?","href":"Whats-next","type":2,"isActive":false}]}
      \ No newline at end of file
      +{"codeList":[],"headingContent":"Milvus Architecture Overview","anchorList":[{"label":"Milvus Architektur Überblick","href":"Milvus-Architecture-Overview","type":1,"isActive":false},{"label":"Was kommt als Nächstes?","href":"Whats-next","type":2,"isActive":false}]}
      \ No newline at end of file
      diff --git a/localization/v2.4.x/site/de/reference/architecture/architecture_overview.md b/localization/v2.4.x/site/de/reference/architecture/architecture_overview.md
      index e33f66e5c..f3e5c8536 100644
      --- a/localization/v2.4.x/site/de/reference/architecture/architecture_overview.md
      +++ b/localization/v2.4.x/site/de/reference/architecture/architecture_overview.md
      @@ -4,7 +4,7 @@ summary: >-
         Milvus bietet eine schnelle, zuverlässige und stabile Vektordatenbank, die
         speziell für die Ähnlichkeitssuche und künstliche Intelligenz entwickelt
         wurde.
      -title: Milvus Architektur Übersicht
      +title: Milvus Architektur Überblick
       ---
       

      Milvus Architektur Überblick

      Milvus basiert auf beliebten Vektorsuchbibliotheken wie Faiss, HNSW, DiskANN, SCANN und anderen und wurde für die Ähnlichkeitssuche in dichten Vektordatensätzen mit Millionen, Milliarden oder sogar Billionen von Vektoren entwickelt. Bevor Sie fortfahren, sollten Sie sich mit den Grundprinzipien des Embedding Retrieval vertraut machen.

      -

      Milvus unterstützt auch Daten-Sharding, Streaming Data Ingestion, dynamische Schemata, die Kombination von Vektor- und Skalardaten, Multi-Vektor- und Hybrid-Suche, Sparse Vector und viele andere erweiterte Funktionen. Die Plattform bietet Leistung nach Bedarf und kann für jedes Einbettungsszenario optimiert werden. Wir empfehlen die Bereitstellung von Milvus mit Kubernetes für optimale Verfügbarkeit und Elastizität.

      +

      Milvus unterstützt auch Daten-Sharding, Streaming Data Ingestion, dynamische Schemata, die Kombination von Vektor- und Skalardaten bei der Suche, Multi-Vektor- und Hybrid-Suche, Sparse Vector und viele andere erweiterte Funktionen. Die Plattform bietet Leistung nach Bedarf und kann für jedes Einbettungsszenario optimiert werden. Wir empfehlen die Bereitstellung von Milvus mit Kubernetes für optimale Verfügbarkeit und Elastizität.

      Milvus verwendet eine Shared-Storage-Architektur mit Disaggregation von Speicher und Rechenleistung und horizontaler Skalierbarkeit für seine Rechenknoten. Nach dem Prinzip der Disaggregation von Daten- und Steuerungsebene besteht Milvus aus vier Schichten: Zugriffsschicht, Koordinatordienst, Arbeitsknoten und Speicher. Diese Schichten sind voneinander unabhängig, wenn es um Skalierung oder Notfallwiederherstellung geht.

      Architecture_diagram Architektur_Diagramm

      +

      Gemäß der Abbildung können die Schnittstellen in die folgenden Kategorien eingeteilt werden:

      +
        +
      • DDL / DCL: createCollection / createPartition / dropCollection / dropPartition / hasCollection / hasPartition
      • +
      • DML / Produce: einfügen / löschen / upsert
      • +
      • DQL: Suchen / Abfragen
      • +

      Was kommt als Nächstes?

      diff --git a/localization/v2.4.x/site/de/reference/disk_index.json b/localization/v2.4.x/site/de/reference/disk_index.json index 48052dfad..fe86aebfd 100644 --- a/localization/v2.4.x/site/de/reference/disk_index.json +++ b/localization/v2.4.x/site/de/reference/disk_index.json @@ -1 +1 @@ -{"codeList":["...\nDiskIndex:\n MaxDegree: 56\n SearchListSize: 100\n PQCodeBugetGBRatio: 0.125\n SearchCacheBudgetGBRatio: 0.125\n BeamWidthRatio: 4.0\n...\n"],"headingContent":"","anchorList":[{"label":"On-Disk-Index","href":"On-disk-Index","type":1,"isActive":false},{"label":"Voraussetzungen","href":"Prerequisites","type":2,"isActive":false},{"label":"Begrenzungen","href":"Limits","type":2,"isActive":false},{"label":"Index- und Sucheinstellungen","href":"Index-and-search-settings","type":2,"isActive":false},{"label":"DiskANN-bezogene Milvus-Konfigurationen","href":"DiskANN-related-Milvus-configurations","type":2,"isActive":false},{"label":"Fehlersuche","href":"Troubleshooting","type":2,"isActive":false}]} \ No newline at end of file +{"codeList":["...\nDiskIndex:\n MaxDegree: 56\n SearchListSize: 100\n PQCodeBugetGBRatio: 0.125\n SearchCacheBudgetGBRatio: 0.125\n BeamWidthRatio: 4.0\n...\n"],"headingContent":"On-disk Index","anchorList":[{"label":"On-Disk-Index","href":"On-disk-Index","type":1,"isActive":false},{"label":"Voraussetzungen","href":"Prerequisites","type":2,"isActive":false},{"label":"Begrenzungen","href":"Limits","type":2,"isActive":false},{"label":"Index- und Sucheinstellungen","href":"Index-and-search-settings","type":2,"isActive":false},{"label":"DiskANN-bezogene Milvus-Konfigurationen","href":"DiskANN-related-Milvus-configurations","type":2,"isActive":false},{"label":"Fehlersuche","href":"Troubleshooting","type":2,"isActive":false}]} \ No newline at end of file diff --git a/localization/v2.4.x/site/de/reference/disk_index.md b/localization/v2.4.x/site/de/reference/disk_index.md index dec8d2bc8..f242181df 100644 --- a/localization/v2.4.x/site/de/reference/disk_index.md +++ b/localization/v2.4.x/site/de/reference/disk_index.md @@ -2,7 +2,7 @@ id: disk_index.md related_key: disk_index summary: Plattenindex-Mechanismus in Milvus. -title: Index auf der Festplatte +title: On-Disk-Index ---

      On-Disk-Index

      Um DiskANN zu verwenden, müssen Sie sicherstellen, dass Sie

      • Verwenden Sie nur Float-Vektoren mit mindestens 1 Dimension in Ihren Daten.
      • -
      • Verwenden Sie nur den Euklidischen Abstand (L2) oder das Innere Produkt (IP), um den Abstand zwischen Vektoren zu messen.
      • +
      • Verwenden Sie nur Euklidische Distanz (L2), Inneres Produkt (IP) oder COSINE, um den Abstand zwischen Vektoren zu messen.

      Index- und Sucheinstellungen

      In-Memory-Replikate sind als Replikatgruppen organisiert. Jede Replikatgruppe enthält Shard-Replikate. Jedes Shard-Replikat hat ein Streaming-Replikat und ein historisches Replikat, die den wachsenden und versiegelten Segmenten im Shard entsprechen (d. h. DML-Kanal).

      - An illustration of how in-memory replica works + An illustration of how in-memory replica works Ein Beispiel für die Funktionsweise der In-Memory-Replikation

      Replikatgruppe

      Eine Replikatgruppe besteht aus mehreren Abfrageknoten, die für die Verarbeitung historischer Daten und Replikate zuständig sind.

      Shard-Replikat

      Ein Shard-Replikat besteht aus einem Streaming-Replikat und einem historischen Replikat, die beide zu demselben Shard gehören. Die Anzahl der Shard-Replikate in einer Replikatgruppe wird durch die Anzahl der Shards in einer bestimmten Sammlung bestimmt.

      @@ -85,7 +85,7 @@ title: In-Memory-Replik

      Balance

      Ein neues Segment, das geladen werden muss, wird auf mehrere verschiedene Abfrageknoten verteilt. Eine Suchanfrage kann bearbeitet werden, sobald mindestens ein Replikat erfolgreich geladen wurde.

      Cache

      Der Proxy unterhält einen Cache, der die Segmente den Abfrageknoten zuordnet, und aktualisiert ihn in regelmäßigen Abständen. Wenn der Proxy eine Anfrage erhält, holt Milvus alle versiegelten Segmente, die durchsucht werden müssen, aus dem Cache und versucht, sie gleichmäßig den Abfrageknoten zuzuordnen.

      Für wachsende Segmente unterhält der Proxy auch einen Channel-to-Query-Node-Cache und sendet Anfragen an die entsprechenden Query-Nodes.

      -

      Ausfallsicherung

      Die Caches des Proxys sind nicht immer auf dem neuesten Stand. Einige Segmente oder Kanäle können zu anderen Abfrageknoten verschoben worden sein, wenn eine Anfrage eingeht. In diesem Fall erhält der Proxy eine Fehlerantwort, aktualisiert den Cache und versucht, sie einem anderen Abfrageknoten zuzuweisen.

      +

      Ausfallsicherung

      Die Caches des Proxys sind nicht immer auf dem neuesten Stand. Einige Segmente oder Kanäle können zu anderen Abfrageknoten verschoben worden sein, wenn eine Anfrage eingeht. In diesem Fall erhält der Proxy eine Fehlerantwort, aktualisiert den Cache und versucht, sie einem anderen Abfrageknoten zuzuordnen.

      Ein Segment wird ignoriert, wenn der Proxy es auch nach der Aktualisierung des Caches nicht finden kann. Dies kann der Fall sein, wenn das Segment verdichtet wurde.

      Wenn der Cache nicht genau ist, kann der Proxy einige Segmente übersehen. Abfrageknoten mit DML-Kanälen (wachsende Segmente) geben Suchantworten zusammen mit einer Liste zuverlässiger Segmente zurück, mit denen der Proxy vergleichen und den Cache aktualisieren kann.

      -

      Erweiterung

      Der Proxy kann Suchanfragen nicht vollständig gleichmäßig auf die Abfrageknoten verteilen, und die Abfrageknoten können über unterschiedliche Ressourcen zur Bedienung von Suchanfragen verfügen. Um eine langschwänzige Verteilung der Ressourcen zu vermeiden, weist der Proxy aktive Segmente auf anderen Abfrageknoten einem untätigen Abfrageknoten zu, der ebenfalls über diese Segmente verfügt.

      +

      Erweiterung

      Der Proxy kann Suchanfragen nicht vollständig gleichmäßig auf die Abfrageknoten verteilen, und die Abfrageknoten verfügen möglicherweise über unterschiedliche Ressourcen zur Bedienung von Suchanfragen. Um eine langschwänzige Verteilung der Ressourcen zu vermeiden, weist der Proxy aktive Segmente auf anderen Abfrageknoten einem untätigen Abfrageknoten zu, der ebenfalls über diese Segmente verfügt.

      diff --git a/localization/v2.4.x/site/de/release_notes.json b/localization/v2.4.x/site/de/release_notes.json index 46daa013d..a3406b4d6 100644 --- a/localization/v2.4.x/site/de/release_notes.json +++ b/localization/v2.4.x/site/de/release_notes.json @@ -1 +1 @@ -{"codeList":[],"headingContent":"Release Notes","anchorList":[{"label":"Hinweise zur Veröffentlichung","href":"Release-Notes","type":1,"isActive":false},{"label":"v2.4.11","href":"v2411","type":2,"isActive":false},{"label":"v2.4.10","href":"v2410","type":2,"isActive":false},{"label":"v2.4.9","href":"v249","type":2,"isActive":false},{"label":"v2.4.8","href":"v248","type":2,"isActive":false},{"label":"v2.4.6","href":"v246","type":2,"isActive":false},{"label":"v2.4.5","href":"v245","type":2,"isActive":false},{"label":"v2.4.4","href":"v244","type":2,"isActive":false},{"label":"v2.4.3","href":"v243","type":2,"isActive":false},{"label":"v2.4.1","href":"v241","type":2,"isActive":false},{"label":"v2.4.0","href":"v240","type":2,"isActive":false},{"label":"v2.4.0-rc.1","href":"v240-rc1","type":2,"isActive":false}]} \ No newline at end of file +{"codeList":[],"headingContent":"Release Notes","anchorList":[{"label":"Hinweise zur Veröffentlichung","href":"Release-Notes","type":1,"isActive":false},{"label":"v2.4.13-Hotfix","href":"v2413-hotfix","type":2,"isActive":false},{"label":"[Veraltet] v2.4.13","href":"Deprecated-v2413","type":2,"isActive":false},{"label":"v2.4.12","href":"v2412","type":2,"isActive":false},{"label":"v2.4.11","href":"v2411","type":2,"isActive":false},{"label":"v2.4.10","href":"v2410","type":2,"isActive":false},{"label":"v2.4.9","href":"v249","type":2,"isActive":false},{"label":"v2.4.8","href":"v248","type":2,"isActive":false},{"label":"v2.4.6","href":"v246","type":2,"isActive":false},{"label":"v2.4.5","href":"v245","type":2,"isActive":false},{"label":"v2.4.4","href":"v244","type":2,"isActive":false},{"label":"v2.4.3","href":"v243","type":2,"isActive":false},{"label":"v2.4.1","href":"v241","type":2,"isActive":false},{"label":"v2.4.0","href":"v240","type":2,"isActive":false},{"label":"v2.4.0-rc.1","href":"v240-rc1","type":2,"isActive":false}]} \ No newline at end of file diff --git a/localization/v2.4.x/site/de/release_notes.md b/localization/v2.4.x/site/de/release_notes.md index a53a31235..67872c85d 100644 --- a/localization/v2.4.x/site/de/release_notes.md +++ b/localization/v2.4.x/site/de/release_notes.md @@ -18,7 +18,164 @@ title: Hinweise zur Veröffentlichung d="M4 9h1v1H4c-1.5 0-3-1.69-3-3.5S2.55 3 4 3h4c1.45 0 3 1.69 3 3.5 0 1.41-.91 2.72-2 3.25V8.59c.58-.45 1-1.27 1-2.09C10 5.22 8.98 4 8 4H4c-.98 0-2 1.22-2 2.5S3 9 4 9zm9-3h-1v1h1c1 0 2 1.22 2 2.5S13.98 12 13 12H9c-.98 0-2-1.22-2-2.5 0-.83.42-1.64 1-2.09V6.25c-1.09.53-2 1.84-2 3.25C6 11.31 7.55 13 9 13h4c1.45 0 3-1.69 3-3.5S14.5 6 13 6z" > -

      Finden Sie heraus, was es Neues in Milvus gibt! Auf dieser Seite werden neue Funktionen, Verbesserungen, bekannte Probleme und Fehlerbehebungen in jeder Version zusammengefasst. Sie können die Versionshinweise für jede Version nach v2.4.0 in diesem Abschnitt finden. Wir empfehlen Ihnen, diese Seite regelmäßig zu besuchen, um sich über Updates zu informieren.

      +

      Finden Sie heraus, was es Neues in Milvus gibt! Auf dieser Seite werden neue Funktionen, Verbesserungen, bekannte Probleme und Fehlerbehebungen in jeder Version zusammengefasst. In diesem Abschnitt finden Sie die Versionshinweise für jede veröffentlichte Version nach v2.4.0. Wir empfehlen Ihnen, diese Seite regelmäßig zu besuchen, um sich über Updates zu informieren.

      +

      v2.4.13-Hotfix

      Veröffentlichungsdatum: Oktober 17, 2024

      + + + + + + + +
      Milvus-VersionPython SDK-VersionJava SDK-VersionNode.js SDK-Version
      2.4.13-Hotfix2.4.82.4.52.4.9
      +

      Milvus v2.4.13-hotfix behebt ein kritisches Problem in v2.4.13, bei dem Milvus nach einem Neustart möglicherweise keine Sammlungsinformationen abrufen kann, wenn alle MetaKV-Snapshots als Müll gesammelt wurden(#36933). Benutzern, die derzeit v2.4.13 verwenden, wird empfohlen, so schnell wie möglich auf v2.4.13-Hotfix zu aktualisieren, um mögliche Störungen zu vermeiden.

      +

      Kritische Korrekturen

        +
      • Originalschlüssel laden, wenn Zeitstempel MaxTimestamp ist(#36935)
      • +
      +

      [Veraltet] v2.4.13

      Veröffentlichungsdatum: Oktober 12, 2024

      + + + + + + + +
      Milvus-VersionPython SDK-VersionJava SDK-VersionNode.js SDK-Version
      2.4.132.4.82.4.52.4.9
      +

      Milvus 2.4.13 führt die dynamische Replikabelastung ein, die es Benutzern ermöglicht, die Anzahl der Replikate der Sammlung anzupassen, ohne die Sammlung freigeben und neu laden zu müssen. Diese Version behebt außerdem mehrere kritische Fehler im Zusammenhang mit dem Massenimport, dem Parsen von Ausdrücken, dem Lastausgleich und der Fehlerbehebung. Darüber hinaus wurden die MMAP-Ressourcennutzung und die Importleistung erheblich verbessert, wodurch die Effizienz des Systems insgesamt gesteigert wurde. Wir empfehlen dringend ein Upgrade auf diese Version, um die Leistung und Stabilität zu verbessern.

      +

      Funktionen

        +
      • Dynamische Replika-Anpassung für geladene Sammlungen(#36417)
      • +
      • Sparse-Vector-MMAP in wachsenden Segmenttypen(#36565)
      • +
      +

      Fehlerbehebungen

        +
      • Ein Problem mit der Flush-Leistung wurde behoben(#36741)
      • +
      • Fehler mit JSON-Ausdrücken in "[]" behoben(#36722)
      • +
      • Entfernte Nachbarn, wenn kompaktes Ziel unindiziert ist(#36694)
      • +
      • Verbesserte Leistung für Rocksmq, wenn der Kanal voll ist(#36618)
      • +
      • Es wurde ein Problem behoben, bei dem Fehler während des Entpinnens nicht verzögert wurden(#36665)
      • +
      • Ein Speicherleck für importierte Segmente im Segmentmanager wurde behoben(#36631)
      • +
      • Unnötige Gesundheitsprüfungen für Abfrageknoten im Proxy wurden übersprungen(#36553)
      • +
      • Ein Überlaufproblem mit Termausdrücken wurde behoben(#36534)
      • +
      • Aufzeichnung der Knoten-ID vor der Zuweisung von Aufgaben, um Fehlzuweisungen von Aufgaben zu verhindern(#36493)
      • +
      • Daten-Wettlauf-Probleme in der Clustering-Kompaktierung behoben(#36499)
      • +
      • Überprüfung der maximalen Länge von String-Arrays nach dem Typabgleich hinzugefügt(#36497)
      • +
      • Behebung von Race Conditions im Mix- oder Standalone-Modus(#36459)
      • +
      • Segment-Ungleichgewicht nach wiederholten Lade- und Freigabeoperationen behoben(#36543)
      • +
      • Korrigierte einen Eckfall, bei dem Segmente nicht von einem stoppenden Knoten verschoben werden konnten(#36475)
      • +
      • Aktualisierte die Segmentinformationen korrekt, auch wenn einige Segmente fehlten(#36729)
      • +
      • Verhindert, dass etcd-Transaktionen das maximale Limit im Snapshot KV überschreiten(#36773)
      • +
      +

      Verbesserungen

        +
      • Verbesserte Schätzung der MMAP-Ressourcen:
          +
        • Verbesserter MMAP-bezogener Code in column.h(#36521)
        • +
        • Verfeinerte Ressourcenabschätzung beim Laden von Collections(#36728)
        • +
      • +
      • Leistungsverbesserungen:
          +
        • Verbesserte Effizienz beim Parsen von Ausdrücken durch Konvertierung von Unicode nach ASCII(#36676)
        • +
        • Ermöglicht parallele Produktion von Nachrichten für mehrere Themen(#36462)
        • +
        • Reduzierter CPU-Overhead bei der Berechnung der Indexdateigröße(#36580)
        • +
        • Abrufen des Nachrichtentyps aus dem Header, um Unmarshalling zu minimieren(#36454)
        • +
        • Optimierte Workload-basierte Replika-Auswahlpolitik(#36384)
        • +
      • +
      • Aufteilung von Lösch-Task-Nachrichten, um die maximale Nachrichtengröße einzuhalten(#36574)
      • +
      • Neue RESTful URL zur Beschreibung von Importaufträgen hinzugefügt(#36754)
      • +
      • Optimierte Import-Planung und Hinzufügen einer Zeitkosten-Metrik(#36684)
      • +
      • Balance Report Protokoll für Query Coordinator Balancer hinzugefügt(#36749)
      • +
      • Umstellung auf die Verwendung einer gemeinsamen GC-Konfiguration(#36670)
      • +
      • Streaming Forward Policy Schalter für Delegator hinzugefügt(#36712)
      • +
      • Aktivierte manuelle Verdichtung für Sammlungen ohne Indizes(#36581)
      • +
      • Aktivierte Lastverteilung auf Abfrageknoten mit unterschiedlichen Speicherkapazitäten(#36625)
      • +
      • Vereinheitlichter Fall für eingehende Labels mit metrics.label(#36616)
      • +
      • Übertragungskanal/Segment-Operationen wurden idempotent gemacht(#36552)
      • +
      • Metriken zur Überwachung des Importdurchsatzes und der Anzahl importierter Zeilen hinzugefügt(#36588)
      • +
      • Verhinderte die Erstellung von mehreren Timer-Objekten in Zielen(#36573)
      • +
      • Aktualisierte Ausdrucksversion und formatierte HTTP-Antwort für Ausdrücke(#36467)
      • +
      • Verbesserte Garbage Collection in Snapshot KV(#36793)
      • +
      • Unterstützung für die Ausführung von Methoden mit Kontextparametern hinzugefügt(#36798)
      • +
      +

      v2.4.12

      Veröffentlichungsdatum: September 26, 2024

      + + + + + + + +
      Milvus-VersionPython SDK-VersionJava SDK-VersionNode.js SDK-Version
      2.4.122.4.72.4.42.4.9
      +

      Milvus 2.4.12 enthält wichtige Verbesserungen und kritische Fehlerbehebungen. Diese Version behebt Probleme mit der Datenduplizierung und verbessert die Geschwindigkeit der Fehlerbehebung, insbesondere bei der Verarbeitung einer großen Anzahl von Löschungen. Es gibt jedoch nach wie vor ein bekanntes Problem, bei dem die Fehlerbehebung beim Löschen großer Datenmengen langsam sein kann. Wir arbeiten aktiv an der Behebung dieses Problems.

      +

      Verbesserungen

        +
      • Graceful Stop für Flowgraph Manager implementiert(#36358)
      • +
      • Deaktivierte Indexprüfungen für nicht geladene Vektorfelder(#36280)
      • +
      • Ausfiltern von nicht getroffenen Löschdatensätzen während des Deltaladens(#36272)
      • +
      • Verbesserte Fehlerbehandlung für std::stoi Ausnahmen(#36296)
      • +
      • Unzulässige Schlüsselwörter als Feldnamen oder dynamische Feldnamen(#36108)
      • +
      • Metriken für Löscheinträge in L0-Segmenten hinzugefügt(#36227)
      • +
      • L0-Weiterleitungsrichtlinie implementiert, um Fernladen zu unterstützen(#36208)
      • +
      • ANN-Feldladeprüfung im Proxy hinzugefügt(#36194)
      • +
      • Unterstützung für leere Sparse-Zeilen aktiviert(#36061)
      • +
      • Eine Sicherheitslücke wurde behoben(#36156)
      • +
      • Implementierte Statistik-Handler für Anfrage/Antwort Größenmetriken(#36118)
      • +
      • Korrigierte Größenabschätzung für kodierte Array-Daten(#36379)
      • +
      +

      Fehlerbehebungen

        +
      • Metrik-Typ-Fehler für Sammlungen mit zwei Vektorfeldern behoben(#36473)
      • +
      • Probleme mit langen Puffern behoben, die zu Empfangsfehlern in der Nachrichtenwarteschlange führten(#36425)
      • +
      • Implementierte korrekte 'Kompakt-zu-Segment'-Rückgabe nach Split-Unterstützung(#36429)
      • +
      • Daten-Race-Probleme mit der Node ID Check Goroutine behoben(#36377)
      • +
      • Entfernte Elementtyp-Prüfung(#36324)
      • +
      • Probleme mit gleichzeitigem Zugriff für wachsende und versiegelte Segmente behoben(#36288)
      • +
      • Implementierte zukünftige zustandsabhängige Sperre(#36333)
      • +
      • Korrigierte Offset-Verwendung in HybridSearch(#36287, #36253)
      • +
      • Behebung von schmutzigen Segment/Kanal Lecks auf QueryNode(#36259)
      • +
      • Korrigierte Behandlung von Primärschlüssel-Duplikationen(#36274)
      • +
      • Erzwungene Metrik-Typ-Einstellung in Suchanfragen(#36279)
      • +
      • Korrigiertes Stored_index_files_size Metrik-Löschproblem(#36161)
      • +
      • Korrigiertes Verhalten der Lese- und Schreibberechtigungsgruppe für globalen API-Zugriff(#36145)
      • +

      v2.4.11

      Veröffentlichungsdatum: 11. September 2024

      +

      Veröffentlichungsdatum: September 11, 2024

      @@ -49,7 +206,7 @@ title: Hinweise zur Veröffentlichung
    • Quotenlogik für Löschpuffer implementiert(#35997)
    • Fehlerbehebungen

        -
      • Problem mit Trie-Indexbereich-Operationen für GreaterThan und GreaterThanEqual Vergleiche behoben(#36126)
      • +
      • Problem mit Trie-Indexbereichsoperationen für GreaterThan und GreaterThanEqual Vergleiche behoben(#36126)
      • Korrigierte marisa_label_order Verwendung in der Trie Index Konstruktion(#36060)
      • Verbesserte Wertüberprüfung für trie.predictive_search (#35999)
      • Aktivierte Unterstützung für binäre arithmetische Ausdrücke bei invertiertem Index(#36097)
      • @@ -112,7 +269,7 @@ title: Hinweise zur Veröffentlichung
      Milvus-VersionPython SDK-VersionJava SDK-VersionNode.js SDK-Version
      2.4.102.4.62.4.32.4.6
      -

      Milvus 2.4.10 bietet erhebliche Verbesserungen in Bezug auf Funktionalität und Stabilität. Zu den wichtigsten Funktionen gehören die Unterstützung von Upsert-Operationen bei AutoID-aktivierten Sammlungen, die Möglichkeit, Sammlungen teilweise zu laden, und verschiedene Memory-Mapped-Konfigurationen (MMAP) zur Optimierung der Speichernutzung. In dieser Version wurden außerdem mehrere Fehler behoben, die zu Panics, Core Dumps und Ressourcenlecks führten. Wir empfehlen ein Upgrade, um alle Vorteile dieser Verbesserungen zu nutzen.

      +

      Milvus 2.4.10 bietet erhebliche Verbesserungen in Bezug auf Funktionalität und Stabilität. Zu den wichtigsten Funktionen gehören die Unterstützung von Upsert-Operationen bei AutoID-aktivierten Sammlungen, die Möglichkeit, Sammlungen teilweise zu laden, und verschiedene Memory-Mapped-Konfigurationen (MMAP) zur Optimierung der Speichernutzung. In dieser Version wurden außerdem mehrere Fehler behoben, die zu Panics, Core Dumps und Ressourcenlecks führten. Wir empfehlen ein Upgrade, um die Vorteile dieser Verbesserungen voll auszuschöpfen.

      Funktionen

      • Upsert mit automatischer ID: Unterstützung für Upsert-Operationen mit automatischer ID-Generierung(#34633)
      • Feldpartielles Laden von Sammlungen [Beta-Vorschau]: Ermöglicht das Laden bestimmter Felder einer Sammlung(#35696)
      • @@ -135,7 +292,7 @@ title: Hinweise zur Veröffentlichung
      • Sicherstellung der korrekten Channel-Überwachung bei Upgrades von 2.2 auf 2.4(#35695)
      • Reparierte DataNode unüberwachte Channel Release Funktion(#35657)
      • Korrigierte Partitionsanzahl in RootCoord Metadaten(#35601)
      • -
      • Probleme mit dynamischen Konfigurations-Updates für bestimmte Parameter behoben(#35637)
      • +
      • Behobene Probleme mit dynamischen Konfigurations-Updates für bestimmte Parameter(#35637)

      Verbesserungen

      Leistung

      • Optimierte Abfrage von dynamischen Feldern(#35602)
      • @@ -180,7 +337,7 @@ title: Hinweise zur Veröffentlichung 2.4.92.4.52.4.32.4.4 -

        Milvus v2.4.9 behebt ein kritisches Problem, das in einigen Fällen Ergebnisse unterhalb des Limits (topk) liefern konnte, und enthält mehrere wichtige Verbesserungen, um die Leistung und Benutzerfreundlichkeit der Plattform zu verbessern.

        +

        Milvus v2.4.9 behebt ein kritisches Problem, das in einigen Fällen Ergebnisse unterhalb des Limits (topk) zurückgeben konnte, und enthält mehrere wichtige Verbesserungen, um die Leistung und Benutzerfreundlichkeit der Plattform zu verbessern.

        Kritische Fehlerbehebungen

        • Das l0-Segment wurde aus dem lesbaren Snapshot ausgeschlossen(#35510).
        @@ -217,7 +374,7 @@ title: Hinweise zur Veröffentlichung 2.4.82.4.52.4.32.4.4 -

        Mit Milvus 2.4.8 wurden mehrere bedeutende Verbesserungen der Leistung und Stabilität des Systems eingeführt. Das bemerkenswerteste Merkmal war die Implementierung von Clustering Compaction, einem Mechanismus, der die Such- und Abfrageeffizienz erhöht, indem er Daten in großen Sammlungen auf der Grundlage eines bestimmten Clustering-Schlüssels neu verteilt und so die Menge der gescannten Daten reduziert. Die Verdichtung wurde auch vom Shard DataNode entkoppelt, so dass jeder DataNode die Verdichtung unabhängig durchführen kann, was die Fehlertoleranz, Stabilität, Leistung und Skalierbarkeit verbessert. Außerdem wurde die Schnittstelle zwischen den Go- und C++-Komponenten überarbeitet, um asynchrone CGO-Aufrufe zu verwenden, wodurch Probleme wie Session-Timeouts behoben wurden. Die Abhängigkeiten der Anwendung wurden ebenfalls aktualisiert, um bekannte Sicherheitslücken zu schließen. Darüber hinaus enthält diese Version auch zahlreiche Leistungsoptimierungen und kritische Bugfixes.

        +

        Mit Milvus 2.4.8 wurden mehrere bedeutende Verbesserungen der Leistung und Stabilität des Systems eingeführt. Das bemerkenswerteste Merkmal war die Implementierung der Clusterverdichtung, ein Mechanismus, der die Such- und Abfrageeffizienz erhöht, indem er Daten in großen Sammlungen auf der Grundlage eines festgelegten Clustering-Schlüssels umverteilt und so die Menge der gescannten Daten reduziert. Die Verdichtung wurde auch vom Shard DataNode entkoppelt, so dass jeder DataNode die Verdichtung unabhängig durchführen kann, was die Fehlertoleranz, Stabilität, Leistung und Skalierbarkeit verbessert. Darüber hinaus wurde die Schnittstelle zwischen den Go- und C++-Komponenten überarbeitet, um asynchrone CGO-Aufrufe zu verwenden und Probleme wie Session-Timeouts zu beheben. Die Abhängigkeiten der Anwendung wurden ebenfalls aktualisiert, um bekannte Sicherheitslücken zu schließen. Darüber hinaus enthält diese Version auch zahlreiche Leistungsoptimierungen und kritische Bugfixes.

        Funktionen

        • Es wurde eine Clustering-Kompaktierung implementiert, die es ermöglicht, Daten auf der Grundlage eines bestimmten Clustering-Schlüssels neu zu verteilen, um die Abfrageeffizienz zu verbessern(#34326),(#34363).
        @@ -268,12 +425,12 @@ title: Hinweise zur Veröffentlichung

    Änderungen

      -
    • Für Open-Source-Nutzer ändert diese Version die Metrik-Typen in AutoIndex für FloatVector und BinaryVector in Cosine bzw. Hamming.

    • +
    • Für Open-Source-Benutzer ändert diese Version die Metrik-Typen in AutoIndex für FloatVector und BinaryVector in Cosine bzw. Hamming.

    • Korrigierte Versionen von Drittanbieter-Abhängigkeiten:

      • Mit dieser Version werden feste Versionen für bestimmte Bibliotheken von Drittanbietern eingeführt, was das Software Supply Chain Management von Milvus erheblich verbessert.
      • Durch die Isolierung des Projekts von Upstream-Änderungen werden die täglichen Builds vor möglichen Unterbrechungen geschützt.
      • -
      • Das Update gewährleistet Stabilität, indem es ausschließlich validierte C++-Pakete von Drittanbietern in der JFrog Cloud hostet und Conan Recipe Revisions (RREV) verwendet.
      • +
      • Das Update gewährleistet Stabilität, indem ausschließlich validierte C++-Pakete von Drittanbietern in der JFrog Cloud gehostet werden und Conan Recipe Revisions (RREV) verwendet wird.
      • Dieser Ansatz mindert das Risiko, dass Änderungen durch Updates im ConanCenter unterbrochen werden.
      • Entwickler, die Ubuntu 22.04 verwenden, werden sofort von diesen Änderungen profitieren. Entwickler auf anderen Betriebssystemen müssen jedoch möglicherweise ihre glibc Version aktualisieren, um Kompatibilitätsprobleme zu vermeiden.
    • @@ -341,7 +498,7 @@ title: Hinweise zur Veröffentlichung 2.4.62.4.42.4.22.4.4 -

      Milvus v2.4.6 ist ein Bugfix-Release, das kritische Probleme wie Panics, Speicherlecks und Datenverluste bei Löschvorgängen behebt. Außerdem werden mehrere Optimierungen eingeführt, darunter Verbesserungen der Überwachungsmetriken, die Aktualisierung der Go-Version auf 1.21 und die Verbesserung der Benutzerfreundlichkeit bei RESTful count(*)-Abfragen.

      +

      Milvus v2.4.6 ist eine Fehlerbehebungsversion, die kritische Probleme wie Panics, Speicherlecks und Datenverluste beim Löschen behebt. Außerdem werden mehrere Optimierungen eingeführt, darunter Verbesserungen der Überwachungsmetriken, die Aktualisierung der Go-Version auf 1.21 und die Verbesserung der Benutzerfreundlichkeit bei RESTful count(*)-Abfragen.

      Verbesserungen

      • Die Benutzerfreundlichkeit von RESTful-API-Abfragen wurde verbessert(#34444).
      • Aktualisierung der Go-Version von 1.20 auf 1.21(#33940).
      • @@ -351,7 +508,7 @@ title: Hinweise zur Veröffentlichung

        Fehlerbehebungen

        • Es wurde ein Problem behoben, bei dem die GetReplicas API einen Nil-Status zurückgab(#34019).
        • Es wurde ein Problem behoben, bei dem Abfragen gelöschte Datensätze zurückgeben konnten(#34502).
        • -
        • Es wurde ein Problem behoben, bei dem IndexNode während des Anhaltens aufgrund einer falschen Lebenszeitkontrolle stecken blieb(#34559).
        • +
        • Es wurde ein Problem behoben, bei dem IndexNode während des Anhaltens aufgrund einer inkorrekten Lebenszeitkontrolle stecken blieb(#34559).
        • Es wurde ein Speicherleck bei Primärschlüssel-Orakel-Objekten behoben, wenn ein Worker offline ist(#34020).
        • ChannelManagerImplV2 korrigiert, um den richtigen Knoten zu benachrichtigen, und behebt Probleme mit der Parametererfassung beim Schließen von Schleifen(#34004).
        • Ein Lese- und Schreibdaten-Race in ImportTask segmentsInfo wurde durch die Implementierung einer tiefen Kopie behoben(#34126).
        • @@ -366,7 +523,7 @@ title: Hinweise zur Veröffentlichung
        • Ein Problem mit fehlenden versiegelten Segmenten bei der L0-Verdichtung wurde behoben(#34566).
        • Das Problem der schmutzigen Daten in der channel-cp-Meta, die nach der Garbage Collection erzeugt wurde, wurde behoben(#34609).
        • Korrigierte die Metriken, bei denen database_num nach dem Neustart von RootCoord 0 war(#34010).
        • -
        • Ein Speicherleck im SegmentManager in DataCoord wurde behoben, indem durch Import erzeugte geflushte Segmente entfernt wurden(#34652).
        • +
        • Es wurde ein Speicherleck im SegmentManager in DataCoord behoben, indem durch Import erzeugte geflushte Segmente entfernt wurden(#34652).
        • Es wurde sichergestellt, dass compressBinlog die logID von binlogs nach einem Neustart von DataCoord füllt, um ein korrektes Neuladen von KV sicherzustellen(#34064).

        v2.4.5

      v2.4.4

      Open In Colab

      +

      Open In Colab +GitHub Repository

      In diesem Tutorial zeigen wir Ihnen, wie Sie eine RAG-Pipeline (Retrieval-Augmented Generation) mit Milvus aufbauen können.

      -

      Das RAG-System kombiniert ein Retrievalsystem mit einem generativen Modell, um neuen Text auf der Grundlage einer vorgegebenen Aufforderung zu generieren. Das System holt zunächst relevante Dokumente aus einem Korpus mit Milvus und verwendet dann ein generatives Modell, um neuen Text auf der Grundlage der geholten Dokumente zu erzeugen.

      +

      Das RAG-System kombiniert ein Retrievalsystem mit einem generativen Modell, um neuen Text auf der Grundlage einer vorgegebenen Aufforderung zu generieren. Das System sucht zunächst relevante Dokumente aus einem Korpus mit Milvus und verwendet dann ein generatives Modell, um neuen Text auf der Grundlage der gefundenen Dokumente zu erzeugen.

      Vorbereitung

      Open In Colab

      -

      Die weit verbreitete Anwendung großer Sprachmodelle macht deutlich, wie wichtig es ist, die Genauigkeit und Relevanz ihrer Antworten zu verbessern. Retrieval-Augmented Generation (RAG) erweitert Modelle mit externen Wissensdatenbanken, liefert mehr kontextuelle Informationen und mildert Probleme wie Halluzinationen und unzureichendes Wissen. Sich ausschließlich auf einfache RAG-Paradigmen zu verlassen, hat jedoch seine Grenzen, insbesondere wenn es um komplexe Entitätsbeziehungen und Multi-Hop-Fragen geht, bei denen das Modell oft Schwierigkeiten hat, genaue Antworten zu geben.

      +

      Open In Colab +GitHub Repository

      +

      Die weit verbreitete Anwendung großer Sprachmodelle macht deutlich, wie wichtig es ist, die Genauigkeit und Relevanz ihrer Antworten zu verbessern. Retrieval-Augmented Generation (RAG) erweitert Modelle mit externen Wissensdatenbanken, liefert mehr kontextbezogene Informationen und mildert Probleme wie Halluzinationen und unzureichendes Wissen. Sich ausschließlich auf einfache RAG-Paradigmen zu verlassen, hat jedoch seine Grenzen, insbesondere wenn es um komplexe Entitätsbeziehungen und Multi-Hop-Fragen geht, bei denen das Modell oft Schwierigkeiten hat, genaue Antworten zu geben.

      Die Einführung von Wissensgraphen (KGs) in das RAG-System bietet eine neue Lösung. KGs stellen Entitäten und ihre Beziehungen auf strukturierte Weise dar, liefern präzisere Suchinformationen und helfen RAG dabei, komplexe Aufgaben zur Beantwortung von Fragen besser zu bewältigen. KG-RAG befindet sich noch in der Anfangsphase, und es gibt keinen Konsens darüber, wie Entitäten und Beziehungen aus KGs effektiv abgerufen werden können oder wie die vektorielle Ähnlichkeitssuche mit Graphenstrukturen integriert werden kann.

      In diesem Notizbuch stellen wir einen einfachen, aber leistungsfähigen Ansatz vor, um die Leistung dieses Szenarios erheblich zu verbessern. Es handelt sich um ein einfaches RAG-Paradigma mit mehrseitigem Retrieval und anschließendem Reranking, das jedoch Graph RAG logisch implementiert und bei der Behandlung von Multi-Hop-Fragen eine Spitzenleistung erzielt. Schauen wir uns an, wie es implementiert ist.

      @@ -78,7 +79,7 @@ embedding_model = OpenAIEmbeddings(model=

      Für die Args in MilvusClient:

        -
      • Die Einstellung von uri als lokale Datei, z. B../milvus.db, ist die bequemste Methode, da sie automatisch Milvus Lite nutzt, um alle Daten in dieser Datei zu speichern.
      • +
      • Die Einstellung von uri als lokale Datei, z. B../milvus.db, ist die bequemste Methode, da sie automatisch Milvus Lite verwendet, um alle Daten in dieser Datei zu speichern.
      • Wenn Sie große Datenmengen haben, können Sie einen leistungsfähigeren Milvus-Server auf Docker oder Kubernetes einrichten. Bei dieser Einrichtung verwenden Sie bitte die Server-Uri, z. B.http://localhost:19530, als uri.
      • Wenn Sie Zilliz Cloud, den vollständig verwalteten Cloud-Service für Milvus, verwenden möchten, passen Sie uri und token an, die dem öffentlichen Endpunkt und dem Api-Schlüssel in Zilliz Cloud entsprechen.
      @@ -163,7 +164,7 @@ embedding_model = OpenAIEmbeddings(model=

      Wir konstruieren die Entitäten und Relationen wie folgt:

      • Die Entität ist das Subjekt oder Objekt im Triplett, also extrahieren wir sie direkt aus den Tripletts.
      • -
      • Hier konstruieren wir das Konzept der Beziehung, indem wir das Subjekt, das Prädikat und das Objekt mit einem Leerzeichen dazwischen direkt aneinanderreihen.
      • +
      • Hier konstruieren wir das Konzept der Beziehung, indem wir das Subjekt, das Prädikat und das Objekt mit einem Leerzeichen dazwischen direkt verketten.

      Wir bereiten auch ein Diktat vor, um die Entitäts-ID auf die Beziehungs-ID abzubilden, und ein weiteres Diktat, um die Beziehungs-ID auf die Passagen-ID abzubilden, um sie später zu verwenden.

      entityid_2_relationids = defaultdict(list)
      @@ -271,7 +272,7 @@ Inserting: 100%|█████████████████████
                 d="M4 9h1v1H4c-1.5 0-3-1.69-3-3.5S2.55 3 4 3h4c1.45 0 3 1.69 3 3.5 0 1.41-.91 2.72-2 3.25V8.59c.58-.45 1-1.27 1-2.09C10 5.22 8.98 4 8 4H4c-.98 0-2 1.22-2 2.5S3 9 4 9zm9-3h-1v1h1c1 0 2 1.22 2 2.5S13.98 12 13 12H9c-.98 0-2-1.22-2-2.5 0-.83.42-1.64 1-2.09V6.25c-1.09.53-2 1.84-2 3.25C6 11.31 7.55 13 9 13h4c1.45 0 3-1.69 3-3.5S14.5 6 13 6z"
               >
             
      -    

      Ähnlichkeitsabfrage

      Wir rufen die TopK ähnlichen Entitäten und Relationen basierend auf der Eingabeabfrage von Milvus ab.

      +

      Abfrage der Ähnlichkeit

      Wir rufen die TopK ähnlichen Entitäten und Relationen basierend auf der Eingabeabfrage von Milvus ab.

      Bei der Suche nach Entitäten sollten wir zunächst die Entitäten aus dem Abfragetext extrahieren, indem wir eine spezielle Methode wie NER (Named-entity recognition) anwenden. Der Einfachheit halber bereiten wir hier die NER-Ergebnisse auf. Wenn Sie die Abfrage als Ihre benutzerdefinierte Frage ändern möchten, müssen Sie die entsprechende NER-Liste der Abfrage ändern. In der Praxis können Sie jedes andere Modell oder jeden anderen Ansatz zur Extraktion der Entitäten aus der Abfrage verwenden.

      query = "What contribution did the son of Euler's teacher make?"
       
      @@ -371,7 +372,7 @@ relation_candidate_texts = [
       ]
       

      Durch die Expansion des Teilgraphen haben wir die Kandidatenbeziehungen erhalten, die im nächsten Schritt durch LLM neu bewertet werden.

      -

      LLM-Ranking

      In dieser Phase setzen wir den leistungsstarken Selbstbeobachtungsmechanismus des LLM ein, um die Kandidaten für die Beziehungen weiter zu filtern und zu verfeinern. Wir verwenden einen One-Shot-Prompt, der die Anfrage und den Kandidatensatz von Beziehungen in den Prompt einbezieht, und weisen LLM an, potenzielle Beziehungen auszuwählen, die bei der Beantwortung der Anfrage helfen könnten. In Anbetracht der Tatsache, dass einige Abfragen komplex sein können, verwenden wir den Chain-of-Thought-Ansatz, der es dem LLM ermöglicht, seinen Gedankenprozess in seiner Antwort zu artikulieren. Wir legen fest, dass die Antwort des LLM im json-Format vorliegt, um die Analyse zu erleichtern.

      +

      LLM-Ranking

      In dieser Phase setzen wir den leistungsstarken Selbstbeobachtungsmechanismus des LLM ein, um die Kandidaten für die Beziehungen weiter zu filtern und zu verfeinern. Wir verwenden einen One-Shot-Prompt, der die Anfrage und den Kandidatensatz von Beziehungen in den Prompt einbezieht, und weisen LLM an, potenzielle Beziehungen auszuwählen, die bei der Beantwortung der Anfrage helfen könnten. In Anbetracht der Tatsache, dass einige Abfragen komplex sein können, verwenden wir den Chain-of-Thought-Ansatz, der es dem LLM ermöglicht, seinen Gedankenprozess in seiner Antwort zu artikulieren. Wir legen fest, dass die Antwort des LLM im json-Format vorliegt, um eine bequeme Analyse zu ermöglichen.

      query_prompt_one_shot_input = """I will provide you with a list of relationship descriptions. Your task is to select 3 relationships that may be useful to answer the given question. Please return a JSON object containing your thought process and a list of the selected relationships in order of their relevance.
       
       Question:
      diff --git a/localization/v2.4.x/site/de/tutorials/hybrid_search_with_milvus.json b/localization/v2.4.x/site/de/tutorials/hybrid_search_with_milvus.json
      index 0da31e4e1..2b7bacd9a 100644
      --- a/localization/v2.4.x/site/de/tutorials/hybrid_search_with_milvus.json
      +++ b/localization/v2.4.x/site/de/tutorials/hybrid_search_with_milvus.json
      @@ -1 +1 @@
      -{"codeList":["$ pip install --upgrade pymilvus \"pymilvus[model]\"\n","# Run this cell to download the dataset\n$ wget http://qim.fs.quoracdn.net/quora_duplicate_questions.tsv\n","import pandas as pd\n\nfile_path = \"quora_duplicate_questions.tsv\"\ndf = pd.read_csv(file_path, sep=\"\\t\")\nquestions = set()\nfor _, row in df.iterrows():\n    obj = row.to_dict()\n    questions.add(obj[\"question1\"][:512])\n    questions.add(obj[\"question2\"][:512])\n    if len(questions) > 500:  # Skip this if you want to use the full dataset\n        break\n\ndocs = list(questions)\n\n# example question\nprint(docs[0])\n","from milvus_model.hybrid import BGEM3EmbeddingFunction\n\nef = BGEM3EmbeddingFunction(use_fp16=False, device=\"cpu\")\ndense_dim = ef.dim[\"dense\"]\n\n# Generate embeddings using BGE-M3 model\ndocs_embeddings = ef(docs)\n","from pymilvus import (\n    connections,\n    utility,\n    FieldSchema,\n    CollectionSchema,\n    DataType,\n    Collection,\n)\n\n# Connect to Milvus given URI\nconnections.connect(uri=\"./milvus.db\")\n\n# Specify the data schema for the new Collection\nfields = [\n    # Use auto generated id as primary key\n    FieldSchema(\n        name=\"pk\", dtype=DataType.VARCHAR, is_primary=True, auto_id=True, max_length=100\n    ),\n    # Store the original text to retrieve based on semantically distance\n    FieldSchema(name=\"text\", dtype=DataType.VARCHAR, max_length=512),\n    # Milvus now supports both sparse and dense vectors,\n    # we can store each in a separate field to conduct hybrid search on both vectors\n    FieldSchema(name=\"sparse_vector\", dtype=DataType.SPARSE_FLOAT_VECTOR),\n    FieldSchema(name=\"dense_vector\", dtype=DataType.FLOAT_VECTOR, dim=dense_dim),\n]\nschema = CollectionSchema(fields)\n\n# Create collection (drop the old one if exists)\ncol_name = \"hybrid_demo\"\nif utility.has_collection(col_name):\n    Collection(col_name).drop()\ncol = Collection(col_name, schema, consistency_level=\"Strong\")\n\n# To make vector search efficient, we need to create indices for the vector fields\nsparse_index = {\"index_type\": \"SPARSE_INVERTED_INDEX\", \"metric_type\": \"IP\"}\ncol.create_index(\"sparse_vector\", sparse_index)\ndense_index = {\"index_type\": \"AUTOINDEX\", \"metric_type\": \"IP\"}\ncol.create_index(\"dense_vector\", dense_index)\ncol.load()\n","# For efficiency, we insert 50 records in each small batch\nfor i in range(0, len(docs), 50):\n    batched_entities = [\n        docs[i : i + 50],\n        docs_embeddings[\"sparse\"][i : i + 50],\n        docs_embeddings[\"dense\"][i : i + 50],\n    ]\n    col.insert(batched_entities)\nprint(\"Number of entities inserted:\", col.num_entities)\n","# Enter your search query\nquery = input(\"Enter your search query: \")\nprint(query)\n\n# Generate embeddings for the query\nquery_embeddings = ef([query])\n# print(query_embeddings)\n","from pymilvus import (\n    AnnSearchRequest,\n    WeightedRanker,\n)\n\n\ndef dense_search(col, query_dense_embedding, limit=10):\n    search_params = {\"metric_type\": \"IP\", \"params\": {}}\n    res = col.search(\n        [query_dense_embedding],\n        anns_field=\"dense_vector\",\n        limit=limit,\n        output_fields=[\"text\"],\n        param=search_params,\n    )[0]\n    return [hit.get(\"text\") for hit in res]\n\n\ndef sparse_search(col, query_sparse_embedding, limit=10):\n    search_params = {\n        \"metric_type\": \"IP\",\n        \"params\": {},\n    }\n    res = col.search(\n        [query_sparse_embedding],\n        anns_field=\"sparse_vector\",\n        limit=limit,\n        output_fields=[\"text\"],\n        param=search_params,\n    )[0]\n    return [hit.get(\"text\") for hit in res]\n\n\ndef hybrid_search(\n    col,\n    query_dense_embedding,\n    query_sparse_embedding,\n    sparse_weight=1.0,\n    dense_weight=1.0,\n    limit=10,\n):\n    dense_search_params = {\"metric_type\": \"IP\", \"params\": {}}\n    dense_req = AnnSearchRequest(\n        [query_dense_embedding], \"dense_vector\", dense_search_params, limit=limit\n    )\n    sparse_search_params = {\"metric_type\": \"IP\", \"params\": {}}\n    sparse_req = AnnSearchRequest(\n        [query_sparse_embedding], \"sparse_vector\", sparse_search_params, limit=limit\n    )\n    rerank = WeightedRanker(sparse_weight, dense_weight)\n    res = col.hybrid_search(\n        [sparse_req, dense_req], rerank=rerank, limit=limit, output_fields=[\"text\"]\n    )[0]\n    return [hit.get(\"text\") for hit in res]\n","dense_results = dense_search(col, query_embeddings[\"dense\"][0])\nsparse_results = sparse_search(col, query_embeddings[\"sparse\"][0])\nhybrid_results = hybrid_search(\n    col,\n    query_embeddings[\"dense\"][0],\n    query_embeddings[\"sparse\"][0],\n    sparse_weight=0.7,\n    dense_weight=1.0,\n)\n","def doc_text_formatting(ef, query, docs):\n    tokenizer = ef.model.tokenizer\n    query_tokens_ids = tokenizer.encode(query, return_offsets_mapping=True)\n    query_tokens = tokenizer.convert_ids_to_tokens(query_tokens_ids)\n    formatted_texts = []\n\n    for doc in docs:\n        ldx = 0\n        landmarks = []\n        encoding = tokenizer.encode_plus(doc, return_offsets_mapping=True)\n        tokens = tokenizer.convert_ids_to_tokens(encoding[\"input_ids\"])[1:-1]\n        offsets = encoding[\"offset_mapping\"][1:-1]\n        for token, (start, end) in zip(tokens, offsets):\n            if token in query_tokens:\n                if len(landmarks) != 0 and start == landmarks[-1]:\n                    landmarks[-1] = end\n                else:\n                    landmarks.append(start)\n                    landmarks.append(end)\n        close = False\n        formatted_text = \"\"\n        for i, c in enumerate(doc):\n            if ldx == len(landmarks):\n                pass\n            elif i == landmarks[ldx]:\n                if close:\n                    formatted_text += \"\"\n                else:\n                    formatted_text += \"\"\n                close = not close\n                ldx = ldx + 1\n            formatted_text += c\n        if close is True:\n            formatted_text += \"\"\n        formatted_texts.append(formatted_text)\n    return formatted_texts\n","from IPython.display import Markdown, display\n\n# Dense search results\ndisplay(Markdown(\"**Dense Search Results:**\"))\nformatted_results = doc_text_formatting(ef, query, dense_results)\nfor result in dense_results:\n    display(Markdown(result))\n\n# Sparse search results\ndisplay(Markdown(\"\\n**Sparse Search Results:**\"))\nformatted_results = doc_text_formatting(ef, query, sparse_results)\nfor result in formatted_results:\n    display(Markdown(result))\n\n# Hybrid search results\ndisplay(Markdown(\"\\n**Hybrid Search Results:**\"))\nformatted_results = doc_text_formatting(ef, query, hybrid_results)\nfor result in formatted_results:\n    display(Markdown(result))\n"],"headingContent":"","anchorList":[{"label":"Hybride Suche mit Milvus","href":"Hybrid-Search-with-Milvus","type":1,"isActive":false}]}
      \ No newline at end of file
      +{"codeList":["$ pip install --upgrade pymilvus \"pymilvus[model]\"\n","# Run this cell to download the dataset\n$ wget http://qim.fs.quoracdn.net/quora_duplicate_questions.tsv\n","import pandas as pd\n\nfile_path = \"quora_duplicate_questions.tsv\"\ndf = pd.read_csv(file_path, sep=\"\\t\")\nquestions = set()\nfor _, row in df.iterrows():\n    obj = row.to_dict()\n    questions.add(obj[\"question1\"][:512])\n    questions.add(obj[\"question2\"][:512])\n    if len(questions) > 500:  # Skip this if you want to use the full dataset\n        break\n\ndocs = list(questions)\n\n# example question\nprint(docs[0])\n","from milvus_model.hybrid import BGEM3EmbeddingFunction\n\nef = BGEM3EmbeddingFunction(use_fp16=False, device=\"cpu\")\ndense_dim = ef.dim[\"dense\"]\n\n# Generate embeddings using BGE-M3 model\ndocs_embeddings = ef(docs)\n","from pymilvus import (\n    connections,\n    utility,\n    FieldSchema,\n    CollectionSchema,\n    DataType,\n    Collection,\n)\n\n# Connect to Milvus given URI\nconnections.connect(uri=\"./milvus.db\")\n\n# Specify the data schema for the new Collection\nfields = [\n    # Use auto generated id as primary key\n    FieldSchema(\n        name=\"pk\", dtype=DataType.VARCHAR, is_primary=True, auto_id=True, max_length=100\n    ),\n    # Store the original text to retrieve based on semantically distance\n    FieldSchema(name=\"text\", dtype=DataType.VARCHAR, max_length=512),\n    # Milvus now supports both sparse and dense vectors,\n    # we can store each in a separate field to conduct hybrid search on both vectors\n    FieldSchema(name=\"sparse_vector\", dtype=DataType.SPARSE_FLOAT_VECTOR),\n    FieldSchema(name=\"dense_vector\", dtype=DataType.FLOAT_VECTOR, dim=dense_dim),\n]\nschema = CollectionSchema(fields)\n\n# Create collection (drop the old one if exists)\ncol_name = \"hybrid_demo\"\nif utility.has_collection(col_name):\n    Collection(col_name).drop()\ncol = Collection(col_name, schema, consistency_level=\"Strong\")\n\n# To make vector search efficient, we need to create indices for the vector fields\nsparse_index = {\"index_type\": \"SPARSE_INVERTED_INDEX\", \"metric_type\": \"IP\"}\ncol.create_index(\"sparse_vector\", sparse_index)\ndense_index = {\"index_type\": \"AUTOINDEX\", \"metric_type\": \"IP\"}\ncol.create_index(\"dense_vector\", dense_index)\ncol.load()\n","# For efficiency, we insert 50 records in each small batch\nfor i in range(0, len(docs), 50):\n    batched_entities = [\n        docs[i : i + 50],\n        docs_embeddings[\"sparse\"][i : i + 50],\n        docs_embeddings[\"dense\"][i : i + 50],\n    ]\n    col.insert(batched_entities)\nprint(\"Number of entities inserted:\", col.num_entities)\n","# Enter your search query\nquery = input(\"Enter your search query: \")\nprint(query)\n\n# Generate embeddings for the query\nquery_embeddings = ef([query])\n# print(query_embeddings)\n","from pymilvus import (\n    AnnSearchRequest,\n    WeightedRanker,\n)\n\n\ndef dense_search(col, query_dense_embedding, limit=10):\n    search_params = {\"metric_type\": \"IP\", \"params\": {}}\n    res = col.search(\n        [query_dense_embedding],\n        anns_field=\"dense_vector\",\n        limit=limit,\n        output_fields=[\"text\"],\n        param=search_params,\n    )[0]\n    return [hit.get(\"text\") for hit in res]\n\n\ndef sparse_search(col, query_sparse_embedding, limit=10):\n    search_params = {\n        \"metric_type\": \"IP\",\n        \"params\": {},\n    }\n    res = col.search(\n        [query_sparse_embedding],\n        anns_field=\"sparse_vector\",\n        limit=limit,\n        output_fields=[\"text\"],\n        param=search_params,\n    )[0]\n    return [hit.get(\"text\") for hit in res]\n\n\ndef hybrid_search(\n    col,\n    query_dense_embedding,\n    query_sparse_embedding,\n    sparse_weight=1.0,\n    dense_weight=1.0,\n    limit=10,\n):\n    dense_search_params = {\"metric_type\": \"IP\", \"params\": {}}\n    dense_req = AnnSearchRequest(\n        [query_dense_embedding], \"dense_vector\", dense_search_params, limit=limit\n    )\n    sparse_search_params = {\"metric_type\": \"IP\", \"params\": {}}\n    sparse_req = AnnSearchRequest(\n        [query_sparse_embedding], \"sparse_vector\", sparse_search_params, limit=limit\n    )\n    rerank = WeightedRanker(sparse_weight, dense_weight)\n    res = col.hybrid_search(\n        [sparse_req, dense_req], rerank=rerank, limit=limit, output_fields=[\"text\"]\n    )[0]\n    return [hit.get(\"text\") for hit in res]\n","dense_results = dense_search(col, query_embeddings[\"dense\"][0])\nsparse_results = sparse_search(col, query_embeddings[\"sparse\"]._getrow(0))\nhybrid_results = hybrid_search(\n    col,\n    query_embeddings[\"dense\"][0],\n    query_embeddings[\"sparse\"]._getrow(0),\n    sparse_weight=0.7,\n    dense_weight=1.0,\n)\n","def doc_text_formatting(ef, query, docs):\n    tokenizer = ef.model.tokenizer\n    query_tokens_ids = tokenizer.encode(query, return_offsets_mapping=True)\n    query_tokens = tokenizer.convert_ids_to_tokens(query_tokens_ids)\n    formatted_texts = []\n\n    for doc in docs:\n        ldx = 0\n        landmarks = []\n        encoding = tokenizer.encode_plus(doc, return_offsets_mapping=True)\n        tokens = tokenizer.convert_ids_to_tokens(encoding[\"input_ids\"])[1:-1]\n        offsets = encoding[\"offset_mapping\"][1:-1]\n        for token, (start, end) in zip(tokens, offsets):\n            if token in query_tokens:\n                if len(landmarks) != 0 and start == landmarks[-1]:\n                    landmarks[-1] = end\n                else:\n                    landmarks.append(start)\n                    landmarks.append(end)\n        close = False\n        formatted_text = \"\"\n        for i, c in enumerate(doc):\n            if ldx == len(landmarks):\n                pass\n            elif i == landmarks[ldx]:\n                if close:\n                    formatted_text += \"\"\n                else:\n                    formatted_text += \"\"\n                close = not close\n                ldx = ldx + 1\n            formatted_text += c\n        if close is True:\n            formatted_text += \"\"\n        formatted_texts.append(formatted_text)\n    return formatted_texts\n","from IPython.display import Markdown, display\n\n# Dense search results\ndisplay(Markdown(\"**Dense Search Results:**\"))\nformatted_results = doc_text_formatting(ef, query, dense_results)\nfor result in dense_results:\n    display(Markdown(result))\n\n# Sparse search results\ndisplay(Markdown(\"\\n**Sparse Search Results:**\"))\nformatted_results = doc_text_formatting(ef, query, sparse_results)\nfor result in formatted_results:\n    display(Markdown(result))\n\n# Hybrid search results\ndisplay(Markdown(\"\\n**Hybrid Search Results:**\"))\nformatted_results = doc_text_formatting(ef, query, hybrid_results)\nfor result in formatted_results:\n    display(Markdown(result))\n"],"headingContent":"Hybrid Search with Milvus","anchorList":[{"label":"Hybride Suche mit Milvus","href":"Hybrid-Search-with-Milvus","type":1,"isActive":false}]}
      \ No newline at end of file
      diff --git a/localization/v2.4.x/site/de/tutorials/hybrid_search_with_milvus.md b/localization/v2.4.x/site/de/tutorials/hybrid_search_with_milvus.md
      index 958232e39..fa07e98ca 100644
      --- a/localization/v2.4.x/site/de/tutorials/hybrid_search_with_milvus.md
      +++ b/localization/v2.4.x/site/de/tutorials/hybrid_search_with_milvus.md
      @@ -18,16 +18,17 @@ title: Hybride Suche mit Milvus
                 d="M4 9h1v1H4c-1.5 0-3-1.69-3-3.5S2.55 3 4 3h4c1.45 0 3 1.69 3 3.5 0 1.41-.91 2.72-2 3.25V8.59c.58-.45 1-1.27 1-2.09C10 5.22 8.98 4 8 4H4c-.98 0-2 1.22-2 2.5S3 9 4 9zm9-3h-1v1h1c1 0 2 1.22 2 2.5S13.98 12 13 12H9c-.98 0-2-1.22-2-2.5 0-.83.42-1.64 1-2.09V6.25c-1.09.53-2 1.84-2 3.25C6 11.31 7.55 13 9 13h4c1.45 0 3-1.69 3-3.5S14.5 6 13 6z"
               >
             
      -    

      Open In Colab

      +

      Open In Colab +GitHub Repository

      In diesem Tutorium wird gezeigt, wie man eine hybride Suche mit Milvus und dem BGE-M3-Modell durchführt. Das BGE-M3-Modell kann Text in dichte und spärliche Vektoren umwandeln. Milvus unterstützt die Speicherung beider Arten von Vektoren in einer Sammlung und ermöglicht so eine hybride Suche, die die Relevanz der Ergebnisse erhöht.

      Milvus unterstützt dichte, spärliche und hybride Retrievalmethoden:

        -
      • Dense Retrieval: Nutzt den semantischen Kontext, um die Bedeutung hinter den Abfragen zu verstehen.
      • +
      • Dichtes Retrieval: Nutzt den semantischen Kontext, um die Bedeutung hinter den Abfragen zu verstehen.
      • Sparse Retrieval: Konzentriert sich auf den Abgleich von Schlüsselwörtern, um Ergebnisse auf der Grundlage bestimmter Begriffe zu finden, was einer Volltextsuche entspricht.
      • Hybrides Retrieval: Kombiniert sowohl Dense- als auch Sparse-Ansätze und erfasst den vollständigen Kontext und spezifische Schlüsselwörter für umfassende Suchergebnisse.
      -

      Durch die Integration dieser Methoden gleicht die Milvus-Hybridsuche semantische und lexikalische Ähnlichkeiten aus und verbessert so die Gesamtrelevanz der Suchergebnisse. Dieses Notizbuch führt durch den Prozess der Einrichtung und Verwendung dieser Suchstrategien und zeigt ihre Effektivität in verschiedenen Such-Szenarien auf.

      +

      Durch die Integration dieser Methoden gleicht die Milvus-Hybridsuche semantische und lexikalische Ähnlichkeiten aus und verbessert so die Gesamtrelevanz der Suchergebnisse. Dieses Notebook führt durch den Prozess der Einrichtung und Verwendung dieser Suchstrategien und zeigt ihre Effektivität in verschiedenen Suchszenarien auf.

      Abhängigkeiten und Umgebung

      $ pip install --upgrade pymilvus "pymilvus[model]"
       

      Datensatz herunterladen

      Um die Suche zu demonstrieren, benötigen wir einen Korpus von Dokumenten. Wir verwenden das Quora Duplicate Questions Dataset und legen es im lokalen Verzeichnis ab.

      @@ -201,11 +202,11 @@ def dense_search(col,

      Lassen Sie uns drei verschiedene Suchen mit den definierten Funktionen durchführen:

      dense_results = dense_search(col, query_embeddings["dense"][0])
      -sparse_results = sparse_search(col, query_embeddings["sparse"][0])
      +sparse_results = sparse_search(col, query_embeddings["sparse"]._getrow(0))
       hybrid_results = hybrid_search(
           col,
           query_embeddings["dense"][0],
      -    query_embeddings["sparse"][0],
      +    query_embeddings["sparse"]._getrow(0),
           sparse_weight=0.7,
           dense_weight=1.0,
       )
      @@ -282,17 +283,17 @@ formatted_results = doc_text_formatting(ef, query, hybrid_results)
       

      Wie bereiten wir uns auf UPSC vor?

      Sparsame Suchergebnisse:

      Was ist Java-Programmierung? Wie lernt man die Programmiersprache Java?

      -

      Wie fängt man am besten an, Robotik zu lernen?

      +

      Wie fängt man am besten an, Robotik zu lernen?

      Was ist die Alternative zum maschinellen Lernen?

      Wie erstelle ich ein neues Terminal und eine neue Shell in Linux mit C-Programmierung?

      Wie erstelle ich eine neue Shell in einem neuen Terminal mit C-Programmierung (Linux-Terminal)?

      Welches Unternehmen ist in Hyderabad besser zu gründen?

      Welches Unternehmen ist ein guter Start in Hyderabad?

      -

      Was ist der beste Weg, um mit der Robotik zu beginnen? Welches ist die beste Entwicklungsplatine, mit der ich anfangen kann zu arbeiten?

      +

      Was ist der beste Weg, um mit der Robotik zu beginnen? Welches ist die beste Entwicklungsplatine, mit der ich anfangen kann zu arbeiten?

      Welche Mathematik braucht ein absoluter Neuling, um Algorithmen für die Computerprogrammierung zu verstehen? Welche Bücher über Algorithmen sind für einen absoluten Anfänger geeignet?

      Wie kann man das Leben so gestalten, dass es zu einem passt, und wie kann man verhindern, dass das Leben einen geistig und emotional missbraucht?

      Hybride Suchergebnisse:

      -

      Wie fange ich am besten mit der Robotik an? Welches ist die beste Entwicklungsplatine, mit der ich anfangen kann zu arbeiten?

      +

      Wie fange ich am besten mit der Robotik an? Welches ist die beste Entwicklungsplatine, mit der ich anfangen kann zu arbeiten?

      Was ist Java-Programmierung? Wie lernt man die Programmiersprache Java?

      Was ist der beste Weg, um mit dem Lernen von Robotik zu beginnen?

      Wie bereitet man sich auf UPSC vor?

      diff --git a/localization/v2.4.x/site/de/tutorials/image_similarity_search.json b/localization/v2.4.x/site/de/tutorials/image_similarity_search.json index aac522622..75411be1e 100644 --- a/localization/v2.4.x/site/de/tutorials/image_similarity_search.json +++ b/localization/v2.4.x/site/de/tutorials/image_similarity_search.json @@ -1 +1 @@ -{"codeList":["!wget https://github.com/milvus-io/pymilvus-assets/releases/download/imagedata/reverse_image_search.zip\n!unzip -q -o reverse_image_search.zip\n","$ pip install pymilvus --upgrade\n$ pip install timm\n","import torch\nfrom PIL import Image\nimport timm\nfrom sklearn.preprocessing import normalize\nfrom timm.data import resolve_data_config\nfrom timm.data.transforms_factory import create_transform\n\n\nclass FeatureExtractor:\n def __init__(self, modelname):\n # Load the pre-trained model\n self.model = timm.create_model(\n modelname, pretrained=True, num_classes=0, global_pool=\"avg\"\n )\n self.model.eval()\n\n # Get the input size required by the model\n self.input_size = self.model.default_cfg[\"input_size\"]\n\n config = resolve_data_config({}, model=modelname)\n # Get the preprocessing function provided by TIMM for the model\n self.preprocess = create_transform(**config)\n\n def __call__(self, imagepath):\n # Preprocess the input image\n input_image = Image.open(imagepath).convert(\"RGB\") # Convert to RGB if needed\n input_image = self.preprocess(input_image)\n\n # Convert the image to a PyTorch tensor and add a batch dimension\n input_tensor = input_image.unsqueeze(0)\n\n # Perform inference\n with torch.no_grad():\n output = self.model(input_tensor)\n\n # Extract the feature vector\n feature_vector = output.squeeze().numpy()\n\n return normalize(feature_vector.reshape(1, -1), norm=\"l2\").flatten()\n","from pymilvus import MilvusClient\n\n# Set up a Milvus client\nclient = MilvusClient(uri=\"example.db\")\n# Create a collection in quick setup mode\nif client.has_collection(collection_name=\"image_embeddings\"):\n client.drop_collection(collection_name=\"image_embeddings\")\nclient.create_collection(\n collection_name=\"image_embeddings\",\n vector_field_name=\"vector\",\n dimension=512,\n auto_id=True,\n enable_dynamic_field=True,\n metric_type=\"COSINE\",\n)\n","import os\n\nextractor = FeatureExtractor(\"resnet34\")\n\nroot = \"./train\"\ninsert = True\nif insert is True:\n for dirpath, foldername, filenames in os.walk(root):\n for filename in filenames:\n if filename.endswith(\".JPEG\"):\n filepath = dirpath + \"/\" + filename\n image_embedding = extractor(filepath)\n client.insert(\n \"image_embeddings\",\n {\"vector\": image_embedding, \"filename\": filepath},\n )\n","from IPython.display import display\n\nquery_image = \"./test/Afghan_hound/n02088094_4261.JPEG\"\n\nresults = client.search(\n \"image_embeddings\",\n data=[extractor(query_image)],\n output_fields=[\"filename\"],\n search_params={\"metric_type\": \"COSINE\"},\n)\nimages = []\nfor result in results:\n for hit in result[:10]:\n filename = hit[\"entity\"][\"filename\"]\n img = Image.open(filename)\n img = img.resize((150, 150))\n images.append(img)\n\nwidth = 150 * 5\nheight = 150 * 2\nconcatenated_image = Image.new(\"RGB\", (width, height))\n\nfor idx, img in enumerate(images):\n x = idx % 5\n y = idx // 5\n concatenated_image.paste(img, (x * 150, y * 150))\ndisplay(\"query\")\ndisplay(Image.open(query_image).resize((150, 150)))\ndisplay(\"results\")\ndisplay(concatenated_image)\n"],"headingContent":"","anchorList":[{"label":"Bildsuche mit Milvus","href":"Image-Search-with-Milvus","type":1,"isActive":false},{"label":"Vorbereitung des Datensatzes","href":"Dataset-Preparation","type":2,"isActive":false},{"label":"Vorraussetzungen","href":"Prequisites","type":2,"isActive":false},{"label":"Definieren Sie den Feature Extractor","href":"Define-the-Feature-Extractor","type":2,"isActive":false},{"label":"Erstellen einer Milvus-Sammlung","href":"Create-a-Milvus-Collection","type":2,"isActive":false},{"label":"Einfügen der Einbettungen in Milvus","href":"Insert-the-Embeddings-to-Milvus","type":2,"isActive":false},{"label":"Schnelles Einsetzen","href":"Quick-Deploy","type":2,"isActive":false}]} \ No newline at end of file +{"codeList":["!wget https://github.com/milvus-io/pymilvus-assets/releases/download/imagedata/reverse_image_search.zip\n!unzip -q -o reverse_image_search.zip\n","$ pip install pymilvus --upgrade\n$ pip install timm\n","import torch\nfrom PIL import Image\nimport timm\nfrom sklearn.preprocessing import normalize\nfrom timm.data import resolve_data_config\nfrom timm.data.transforms_factory import create_transform\n\n\nclass FeatureExtractor:\n def __init__(self, modelname):\n # Load the pre-trained model\n self.model = timm.create_model(\n modelname, pretrained=True, num_classes=0, global_pool=\"avg\"\n )\n self.model.eval()\n\n # Get the input size required by the model\n self.input_size = self.model.default_cfg[\"input_size\"]\n\n config = resolve_data_config({}, model=modelname)\n # Get the preprocessing function provided by TIMM for the model\n self.preprocess = create_transform(**config)\n\n def __call__(self, imagepath):\n # Preprocess the input image\n input_image = Image.open(imagepath).convert(\"RGB\") # Convert to RGB if needed\n input_image = self.preprocess(input_image)\n\n # Convert the image to a PyTorch tensor and add a batch dimension\n input_tensor = input_image.unsqueeze(0)\n\n # Perform inference\n with torch.no_grad():\n output = self.model(input_tensor)\n\n # Extract the feature vector\n feature_vector = output.squeeze().numpy()\n\n return normalize(feature_vector.reshape(1, -1), norm=\"l2\").flatten()\n","from pymilvus import MilvusClient\n\n# Set up a Milvus client\nclient = MilvusClient(uri=\"example.db\")\n# Create a collection in quick setup mode\nif client.has_collection(collection_name=\"image_embeddings\"):\n client.drop_collection(collection_name=\"image_embeddings\")\nclient.create_collection(\n collection_name=\"image_embeddings\",\n vector_field_name=\"vector\",\n dimension=512,\n auto_id=True,\n enable_dynamic_field=True,\n metric_type=\"COSINE\",\n)\n","import os\n\nextractor = FeatureExtractor(\"resnet34\")\n\nroot = \"./train\"\ninsert = True\nif insert is True:\n for dirpath, foldername, filenames in os.walk(root):\n for filename in filenames:\n if filename.endswith(\".JPEG\"):\n filepath = dirpath + \"/\" + filename\n image_embedding = extractor(filepath)\n client.insert(\n \"image_embeddings\",\n {\"vector\": image_embedding, \"filename\": filepath},\n )\n","from IPython.display import display\n\nquery_image = \"./test/Afghan_hound/n02088094_4261.JPEG\"\n\nresults = client.search(\n \"image_embeddings\",\n data=[extractor(query_image)],\n output_fields=[\"filename\"],\n search_params={\"metric_type\": \"COSINE\"},\n)\nimages = []\nfor result in results:\n for hit in result[:10]:\n filename = hit[\"entity\"][\"filename\"]\n img = Image.open(filename)\n img = img.resize((150, 150))\n images.append(img)\n\nwidth = 150 * 5\nheight = 150 * 2\nconcatenated_image = Image.new(\"RGB\", (width, height))\n\nfor idx, img in enumerate(images):\n x = idx % 5\n y = idx // 5\n concatenated_image.paste(img, (x * 150, y * 150))\ndisplay(\"query\")\ndisplay(Image.open(query_image).resize((150, 150)))\ndisplay(\"results\")\ndisplay(concatenated_image)\n"],"headingContent":"Image Search with Milvus","anchorList":[{"label":"Bildsuche mit Milvus","href":"Image-Search-with-Milvus","type":1,"isActive":false},{"label":"Vorbereitung des Datensatzes","href":"Dataset-Preparation","type":2,"isActive":false},{"label":"Vorraussetzungen","href":"Prequisites","type":2,"isActive":false},{"label":"Definieren Sie den Feature Extractor","href":"Define-the-Feature-Extractor","type":2,"isActive":false},{"label":"Erstellen einer Milvus-Sammlung","href":"Create-a-Milvus-Collection","type":2,"isActive":false},{"label":"Einfügen der Einbettungen in Milvus","href":"Insert-the-Embeddings-to-Milvus","type":2,"isActive":false},{"label":"Schnelles Einsetzen","href":"Quick-Deploy","type":2,"isActive":false}]} \ No newline at end of file diff --git a/localization/v2.4.x/site/de/tutorials/image_similarity_search.md b/localization/v2.4.x/site/de/tutorials/image_similarity_search.md index 4f27ab61e..1429c9925 100644 --- a/localization/v2.4.x/site/de/tutorials/image_similarity_search.md +++ b/localization/v2.4.x/site/de/tutorials/image_similarity_search.md @@ -18,7 +18,8 @@ title: Bildsuche mit Milvus d="M4 9h1v1H4c-1.5 0-3-1.69-3-3.5S2.55 3 4 3h4c1.45 0 3 1.69 3 3.5 0 1.41-.91 2.72-2 3.25V8.59c.58-.45 1-1.27 1-2.09C10 5.22 8.98 4 8 4H4c-.98 0-2 1.22-2 2.5S3 9 4 9zm9-3h-1v1h1c1 0 2 1.22 2 2.5S13.98 12 13 12H9c-.98 0-2-1.22-2-2.5 0-.83.42-1.64 1-2.09V6.25c-1.09.53-2 1.84-2 3.25C6 11.31 7.55 13 9 13h4c1.45 0 3-1.69 3-3.5S14.5 6 13 6z" > -

      Open In Colab

      +

      Open In Colab +GitHub Repository

      In diesem Notizbuch zeigen wir Ihnen, wie Sie Milvus verwenden können, um nach ähnlichen Bildern in einem Datensatz zu suchen. Wir werden eine Teilmenge des ImageNet-Datensatzes verwenden und dann nach einem Bild eines afghanischen Hundes suchen, um dies zu demonstrieren.

      Vorbereitung des Datensatzes

      Open In Colab

      +

      Open In Colab +GitHub Repository

      -

      Dieses Tutorial zeigt das multimodale RAG, das von Milvus, dem visualisierten BGE-Modell und GPT-4o unterstützt wird. Mit diesem System können Benutzer ein Bild hochladen und Textanweisungen bearbeiten, die von dem zusammengesetzten BGE-Modell verarbeitet werden, um nach möglichen Bildern zu suchen. GPT-4o fungiert dann als Reranker, der das am besten geeignete Bild auswählt und die Gründe für die Auswahl angibt. Diese leistungsstarke Kombination ermöglicht eine nahtlose und intuitive Bildsuche, wobei Milvus für eine effiziente Suche, das BGE-Modell für eine präzise Bildverarbeitung und -abgleichung und GPT-4o für ein fortschrittliches Reranking eingesetzt werden.

      +

      Dieses Tutorial zeigt das multimodale RAG, das von Milvus, dem visualisierten BGE-Modell und GPT-4o unterstützt wird. Mit diesem System können Benutzer ein Bild hochladen und Textanweisungen bearbeiten, die von dem zusammengesetzten BGE-Modell verarbeitet werden, um nach möglichen Bildern zu suchen. GPT-4o fungiert dann als Reranker, der das am besten geeignete Bild auswählt und die Gründe für die Auswahl angibt. Diese leistungsstarke Kombination ermöglicht eine nahtlose und intuitive Bildsuche, wobei Milvus für die effiziente Suche, das BGE-Modell für die präzise Bildverarbeitung und den Abgleich und GPT-4o für das fortschrittliche Reranking eingesetzt werden.

      Vorbereitung

    from pymilvus import MilvusClient
    @@ -199,7 +200,7 @@ retrieved_images = [hit.get("entity")
     
    ['./images_folder/images/518Gj1WQ-RL._AC_.jpg', './images_folder/images/41n00AOfWhL._AC_.jpg', './images_folder/images/51Wqge9HySL._AC_.jpg', './images_folder/images/51R2SZiywnL._AC_.jpg', './images_folder/images/516PebbMAcL._AC_.jpg', './images_folder/images/51RrgfYKUfL._AC_.jpg', './images_folder/images/515DzQVKKwL._AC_.jpg', './images_folder/images/51BsgVw6RhL._AC_.jpg', './images_folder/images/51INtcXu9FL._AC_.jpg']
     
    -

    Neueinstufung mit GPT-4o

    Wir werden einen LLM-Dienst verwenden, um Bilder zu bewerten und eine Erklärung für das beste Ergebnis basierend auf der Benutzeranfrage und den abgerufenen Ergebnissen zu generieren.

    +

    Neueinstufung mit GPT-4o

    Wir werden einen LLM-Dienst verwenden, um Bilder zu bewerten und eine Erklärung für das beste Ergebnis auf der Grundlage der Benutzeranfrage und der abgerufenen Ergebnisse zu erstellen.

    1. Erstellen einer Panorama-Ansicht

    import numpy as np
     import cv2
    diff --git a/localization/v2.4.x/site/de/tutorials/tutorials-overview.json b/localization/v2.4.x/site/de/tutorials/tutorials-overview.json
    index 028815bed..c8ab5c20a 100644
    --- a/localization/v2.4.x/site/de/tutorials/tutorials-overview.json
    +++ b/localization/v2.4.x/site/de/tutorials/tutorials-overview.json
    @@ -1 +1 @@
    -{"codeList":[],"headingContent":"","anchorList":[{"label":"Übersicht Tutorials","href":"Tutorials-Overview","type":1,"isActive":false}]}
    \ No newline at end of file
    +{"codeList":[],"headingContent":"Tutorials Overview","anchorList":[{"label":"Übersicht Tutorials","href":"Tutorials-Overview","type":1,"isActive":false}]}
    \ No newline at end of file
    diff --git a/localization/v2.4.x/site/de/tutorials/tutorials-overview.md b/localization/v2.4.x/site/de/tutorials/tutorials-overview.md
    index c39e93b79..8eb75fe39 100644
    --- a/localization/v2.4.x/site/de/tutorials/tutorials-overview.md
    +++ b/localization/v2.4.x/site/de/tutorials/tutorials-overview.md
    @@ -3,7 +3,7 @@ id: tutorials-overview.md
     summary: >-
       Auf dieser Seite finden Sie eine Liste von Tutorials, die Ihnen helfen, mit
       Milvus zu arbeiten.
    -title: Tutorials Übersicht
    +title: Übersicht Tutorials
     ---
     

    Übersicht Tutorials

    Die Clustering Compaction-Funktion in Milvus ist in hohem Maße konfigurierbar. Sie können sie manuell auslösen oder sie so einstellen, dass sie automatisch in bestimmten Abständen von Milvus ausgelöst wird. Um die Clustering Compaction zu aktivieren, gehen Sie wie folgt vor:

    +

    Die Clustering Compaction Funktion in Milvus ist in hohem Maße konfigurierbar. Sie können sie manuell auslösen oder sie so einstellen, dass sie automatisch in bestimmten Abständen von Milvus ausgelöst wird. Um die Clustering Compaction zu aktivieren, gehen Sie wie folgt vor:

    Globale Konfiguration

    Sie müssen Ihre Milvus-Konfigurationsdatei wie unten gezeigt ändern.

    dataCoord:
       compaction:
    @@ -99,8 +99,8 @@ common:
     enableGibt an, ob die Clustering-Verdichtung aktiviert werden soll.
    Setzen Sie dies auf true, wenn Sie diese Funktion für jede Sammlung mit einem Clustering-Schlüssel aktivieren müssen.false autoEnableLegt fest, ob die automatisch ausgelöste Verdichtung aktiviert werden soll.
    Die Einstellung true bedeutet, dass Milvus die Sammlungen mit einem Clustering-Schlüssel in den angegebenen Intervallen komprimiert.false triggerIntervalGibt das Intervall in Millisekunden an, in dem Milvus die Gruppierungsverdichtung startet.
    Dieser Parameter ist nur gültig, wenn autoEnable auf true eingestellt ist.- -minIntervalLegt das Mindestintervall in Millisekunden fest.
    Dieser Parameter ist nur gültig, wenn autoEnable auf true eingestellt ist.
    Wenn Sie diesen Wert auf eine ganze Zahl setzen, die größer als triggerInterval ist, können Sie wiederholte Verdichtungen innerhalb eines kurzen Zeitraums vermeiden.- -maxIntervalGibt das maximale Intervall in Millisekunden an.
    Dieser Parameter ist nur gültig, wenn autoEnable auf true eingestellt ist.
    Sobald Milvus feststellt, dass eine Sammlung länger als diesen Wert nicht verdichtet wurde, erzwingt es eine Clusterverdichtung.- +minIntervalLegt das Mindestintervall in Sekunden fest.
    Dieser Parameter ist nur gültig, wenn autoEnable auf true eingestellt ist.
    Die Einstellung auf eine ganze Zahl größer als triggerInterval hilft, wiederholte Verdichtungen innerhalb eines kurzen Zeitraums zu vermeiden.- +maxIntervalGibt das maximale Intervall in Sekunden an.
    Dieser Parameter ist nur gültig, wenn autoEnable auf true eingestellt ist.
    Sobald Milvus feststellt, dass eine Sammlung länger als diese Zeitspanne nicht komprimiert wurde, erzwingt es eine Komprimierung durch Clustering.- newDataSizeThresholdLegt den oberen Schwellenwert für die Auslösung einer Clustering-Verdichtung fest.
    Dieser Parameter ist nur gültig, wenn autoEnable auf true eingestellt ist.
    Sobald Milvus feststellt, dass das Datenvolumen in einer Sammlung diesen Wert übersteigt, wird ein Clustering-Compaction-Prozess eingeleitet.- timeoutGibt die Timeout-Dauer für eine Clustering-Compaction an.
    Eine Clustering-Compaction schlägt fehl, wenn ihre Ausführungszeit diesen Wert überschreitet.- @@ -138,23 +138,8 @@ common: -

    Um die oben genannten Änderungen auf Ihren Milvus-Cluster anzuwenden, folgen Sie bitte den Schritten in Konfigurieren von Milvus mit Helm und Konfigurieren von Milvus mit Milvus Operators.

    -

    Konfiguration der Sammlung

    Für die Clusterverdichtung in einer bestimmten Sammlung sollten Sie ein skalares Feld aus der Sammlung als Clusterschlüssel auswählen.

    +

    Um die oben genannten Änderungen auf Ihren Milvus-Cluster anzuwenden, folgen Sie bitte den Schritten in Konfigurieren Sie Milvus mit Helm und Konfigurieren Sie Milvus mit Milvus Operators.

    +

    Konfiguration der Sammlung

    Für die Clusterverdichtung in einer bestimmten Sammlung sollten Sie ein skalares Feld aus der Sammlung als Clusterschlüssel auswählen.

    default_fields = [
         FieldSchema(name="id", dtype=DataType.INT64, is_primary=True),
         FieldSchema(name="key", dtype=DataType.INT64, is_clustering_key=True),
    @@ -283,6 +268,6 @@ coll1.wait_for_compaction_completed(is_clustering=Tru
     
    • Aktivieren Sie diese Funktion für Sammlungen mit großen Datenmengen. Die Suchleistung verbessert sich mit größeren Datenmengen in einer Sammlung. Es ist eine gute Wahl, diese Funktion für Sammlungen mit mehr als 1 Million Entitäten zu aktivieren.

    • Wählen Sie einen geeigneten Clustering-Schlüssel: Sie können skalare Felder, die üblicherweise als Filterbedingungen verwendet werden, als Clustering-Schlüssel verwenden. Für eine Sammlung, die Daten von mehreren Tenants enthält, können Sie das Feld, das einen Tenant von einem anderen unterscheidet, als Clustering-Schlüssel verwenden.

    • -
    • Verwenden Sie den Partitionsschlüssel als Clustering-Schlüssel. Sie können common.usePartitionKeyAsClusteringKey auf true setzen, wenn Sie diese Funktion für alle Sammlungen in Ihrer Milvus-Instanz aktivieren möchten oder wenn Sie in einer großen Sammlung mit einem Partitionsschlüssel noch Leistungsprobleme haben. Auf diese Weise haben Sie einen Clustering-Schlüssel und einen Partitionsschlüssel, wenn Sie ein skalares Feld in einer Sammlung als Partitionsschlüssel wählen.

      +
    • Verwenden Sie den Partitionsschlüssel als Clustering-Schlüssel. Sie können common.usePartitionKeyAsClusteringKey auf true setzen, wenn Sie diese Funktion für alle Sammlungen in Ihrer Milvus-Instanz aktivieren möchten oder wenn Sie in einer großen Sammlung mit einem Partitionsschlüssel immer noch Leistungsprobleme haben. Auf diese Weise haben Sie einen Clustering-Schlüssel und einen Partitionsschlüssel, wenn Sie ein skalares Feld in einer Sammlung als Partitionsschlüssel wählen.

      Beachten Sie, dass diese Einstellung Sie nicht daran hindert, ein anderes skalares Feld als Clustering-Schlüssel zu wählen. Der explizit angegebene Clustering-Schlüssel hat immer Vorrang.

    diff --git a/localization/v2.4.x/site/de/userGuide/insert-update-delete.json b/localization/v2.4.x/site/de/userGuide/insert-update-delete.json index 33faee381..e9ed59421 100644 --- a/localization/v2.4.x/site/de/userGuide/insert-update-delete.json +++ b/localization/v2.4.x/site/de/userGuide/insert-update-delete.json @@ -1 +1 @@ -{"codeList":["from pymilvus import MilvusClient\n\n# 1. Set up a Milvus client\nclient = MilvusClient(\n uri=\"http://localhost:19530\"\n)\n\n# 2. Create a collection\nclient.create_collection(\n collection_name=\"quick_setup\",\n dimension=5,\n metric_type=\"IP\"\n)\n","import io.milvus.v2.client.ConnectConfig;\nimport io.milvus.v2.client.MilvusClientV2;\nimport io.milvus.v2.service.collection.request.CreateCollectionReq;\n\nString CLUSTER_ENDPOINT = \"http://localhost:19530\";\n\n// 1. Connect to Milvus server\nConnectConfig connectConfig = ConnectConfig.builder()\n .uri(CLUSTER_ENDPOINT)\n .build();\n\nMilvusClientV2 client = new MilvusClientV2(connectConfig);\n\n// 2. Create a collection in quick setup mode\nCreateCollectionReq quickSetupReq = CreateCollectionReq.builder()\n .collectionName(\"quick_setup\")\n .dimension(5)\n .metricType(\"IP\")\n .build();\n\nclient.createCollection(quickSetupReq);\n","const { MilvusClient, DataType, sleep } = require(\"@zilliz/milvus2-sdk-node\")\n\nconst address = \"http://localhost:19530\"\n\n// 1. Set up a Milvus Client\nclient = new MilvusClient({address});\n\n// 2. Create a collection in quick setup mode\nawait client.createCollection({\n collection_name: \"quick_setup\",\n dimension: 5,\n metric_type: \"IP\"\n}); \n","# 3. Insert some data\ndata=[\n {\"id\": 0, \"vector\": [0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592], \"color\": \"pink_8682\"},\n {\"id\": 1, \"vector\": [0.19886812562848388, 0.06023560599112088, 0.6976963061752597, 0.2614474506242501, 0.838729485096104], \"color\": \"red_7025\"},\n {\"id\": 2, \"vector\": [0.43742130801983836, -0.5597502546264526, 0.6457887650909682, 0.7894058910881185, 0.20785793220625592], \"color\": \"orange_6781\"},\n {\"id\": 3, \"vector\": [0.3172005263489739, 0.9719044792798428, -0.36981146090600725, -0.4860894583077995, 0.95791889146345], \"color\": \"pink_9298\"},\n {\"id\": 4, \"vector\": [0.4452349528804562, -0.8757026943054742, 0.8220779437047674, 0.46406290649483184, 0.30337481143159106], \"color\": \"red_4794\"},\n {\"id\": 5, \"vector\": [0.985825131989184, -0.8144651566660419, 0.6299267002202009, 0.1206906911183383, -0.1446277761879955], \"color\": \"yellow_4222\"},\n {\"id\": 6, \"vector\": [0.8371977790571115, -0.015764369584852833, -0.31062937026679327, -0.562666951622192, -0.8984947637863987], \"color\": \"red_9392\"},\n {\"id\": 7, \"vector\": [-0.33445148015177995, -0.2567135004164067, 0.8987539745369246, 0.9402995886420709, 0.5378064918413052], \"color\": \"grey_8510\"},\n {\"id\": 8, \"vector\": [0.39524717779832685, 0.4000257286739164, -0.5890507376891594, -0.8650502298996872, -0.6140360785406336], \"color\": \"white_9381\"},\n {\"id\": 9, \"vector\": [0.5718280481994695, 0.24070317428066512, -0.3737913482606834, -0.06726932177492717, -0.6980531615588608], \"color\": \"purple_4976\"}\n]\n\nres = client.insert(\n collection_name=\"quick_setup\",\n data=data\n)\n\nprint(res)\n\n# Output\n#\n# {\n# \"insert_count\": 10,\n# \"ids\": [\n# 0,\n# 1,\n# 2,\n# 3,\n# 4,\n# 5,\n# 6,\n# 7,\n# 8,\n# 9\n# ]\n# }\n","import java.util.Arrays;\nimport java.util.List;\nimport java.util.Map;\nimport io.milvus.v2.service.vector.request.InsertReq;\nimport io.milvus.v2.service.vector.response.InsertResp;\n\n// 3. Insert some data\nList data = Arrays.asList(\n new JSONObject(Map.of(\"id\", 0L, \"vector\", Arrays.asList(0.3580376395471989f, -0.6023495712049978f, 0.18414012509913835f, -0.26286205330961354f, 0.9029438446296592f), \"color\", \"pink_8682\")),\n new JSONObject(Map.of(\"id\", 1L, \"vector\", Arrays.asList(0.19886812562848388f, 0.06023560599112088f, 0.6976963061752597f, 0.2614474506242501f, 0.838729485096104f), \"color\", \"red_7025\")),\n new JSONObject(Map.of(\"id\", 2L, \"vector\", Arrays.asList(0.43742130801983836f, -0.5597502546264526f, 0.6457887650909682f, 0.7894058910881185f, 0.20785793220625592f), \"color\", \"orange_6781\")),\n new JSONObject(Map.of(\"id\", 3L, \"vector\", Arrays.asList(0.3172005263489739f, 0.9719044792798428f, -0.36981146090600725f, -0.4860894583077995f, 0.95791889146345f), \"color\", \"pink_9298\")),\n new JSONObject(Map.of(\"id\", 4L, \"vector\", Arrays.asList(0.4452349528804562f, -0.8757026943054742f, 0.8220779437047674f, 0.46406290649483184f, 0.30337481143159106f), \"color\", \"red_4794\")),\n new JSONObject(Map.of(\"id\", 5L, \"vector\", Arrays.asList(0.985825131989184f, -0.8144651566660419f, 0.6299267002202009f, 0.1206906911183383f, -0.1446277761879955f), \"color\", \"yellow_4222\")),\n new JSONObject(Map.of(\"id\", 6L, \"vector\", Arrays.asList(0.8371977790571115f, -0.015764369584852833f, -0.31062937026679327f, -0.562666951622192f, -0.8984947637863987f), \"color\", \"red_9392\")),\n new JSONObject(Map.of(\"id\", 7L, \"vector\", Arrays.asList(-0.33445148015177995f, -0.2567135004164067f, 0.8987539745369246f, 0.9402995886420709f, 0.5378064918413052f), \"color\", \"grey_8510\")),\n new JSONObject(Map.of(\"id\", 8L, \"vector\", Arrays.asList(0.39524717779832685f, 0.4000257286739164f, -0.5890507376891594f, -0.8650502298996872f, -0.6140360785406336f), \"color\", \"white_9381\")),\n new JSONObject(Map.of(\"id\", 9L, \"vector\", Arrays.asList(0.5718280481994695f, 0.24070317428066512f, -0.3737913482606834f, -0.06726932177492717f, -0.6980531615588608f), \"color\", \"purple_4976\"))\n);\n\nInsertReq insertReq = InsertReq.builder()\n .collectionName(\"quick_setup\")\n .data(data)\n .build();\n\nInsertResp insertResp = client.insert(insertReq);\n\nSystem.out.println(JSONObject.toJSON(insertResp));\n\n// Output:\n// {\"insertCnt\": 10}\n","// 3. Insert some data\n\nvar data = [\n {id: 0, vector: [0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592], color: \"pink_8682\"},\n {id: 1, vector: [0.19886812562848388, 0.06023560599112088, 0.6976963061752597, 0.2614474506242501, 0.838729485096104], color: \"red_7025\"},\n {id: 2, vector: [0.43742130801983836, -0.5597502546264526, 0.6457887650909682, 0.7894058910881185, 0.20785793220625592], color: \"orange_6781\"},\n {id: 3, vector: [0.3172005263489739, 0.9719044792798428, -0.36981146090600725, -0.4860894583077995, 0.95791889146345], color: \"pink_9298\"},\n {id: 4, vector: [0.4452349528804562, -0.8757026943054742, 0.8220779437047674, 0.46406290649483184, 0.30337481143159106], color: \"red_4794\"},\n {id: 5, vector: [0.985825131989184, -0.8144651566660419, 0.6299267002202009, 0.1206906911183383, -0.1446277761879955], color: \"yellow_4222\"},\n {id: 6, vector: [0.8371977790571115, -0.015764369584852833, -0.31062937026679327, -0.562666951622192, -0.8984947637863987], color: \"red_9392\"},\n {id: 7, vector: [-0.33445148015177995, -0.2567135004164067, 0.8987539745369246, 0.9402995886420709, 0.5378064918413052], color: \"grey_8510\"},\n {id: 8, vector: [0.39524717779832685, 0.4000257286739164, -0.5890507376891594, -0.8650502298996872, -0.6140360785406336], color: \"white_9381\"},\n {id: 9, vector: [0.5718280481994695, 0.24070317428066512, -0.3737913482606834, -0.06726932177492717, -0.6980531615588608], color: \"purple_4976\"} \n]\n\nvar res = await client.insert({\n collection_name: \"quick_setup\",\n data: data,\n})\n\nconsole.log(res.insert_cnt)\n\n// Output\n// \n// 10\n// \n","# 4. Insert some more data into a specific partition\ndata=[\n {\"id\": 10, \"vector\": [-0.5570353903748935, -0.8997887893201304, -0.7123782431855732, -0.6298990746450119, 0.6699215060604258], \"color\": \"red_1202\"},\n {\"id\": 11, \"vector\": [0.6319019033373907, 0.6821488267878275, 0.8552303045704168, 0.36929791364943054, -0.14152860714878068], \"color\": \"blue_4150\"},\n {\"id\": 12, \"vector\": [0.9483947484855766, -0.32294203351925344, 0.9759290319978025, 0.8262982148666174, -0.8351194181285713], \"color\": \"orange_4590\"},\n {\"id\": 13, \"vector\": [-0.5449109892498731, 0.043511240563786524, -0.25105249484790804, -0.012030655265886425, -0.0010987671273892108], \"color\": \"pink_9619\"},\n {\"id\": 14, \"vector\": [0.6603339372951424, -0.10866551787442225, -0.9435597754324891, 0.8230244263466688, -0.7986720938400362], \"color\": \"orange_4863\"},\n {\"id\": 15, \"vector\": [-0.8825129181091456, -0.9204557711667729, -0.935350065513425, 0.5484069690287079, 0.24448151140671204], \"color\": \"orange_7984\"},\n {\"id\": 16, \"vector\": [0.6285586391568163, 0.5389064528263487, -0.3163366239905099, 0.22036279378888013, 0.15077052220816167], \"color\": \"blue_9010\"},\n {\"id\": 17, \"vector\": [-0.20151825016059233, -0.905239387635804, 0.6749305353372479, -0.7324272081377843, -0.33007998971889263], \"color\": \"blue_4521\"},\n {\"id\": 18, \"vector\": [0.2432286610792349, 0.01785636564206139, -0.651356982731391, -0.35848148851027895, -0.7387383128324057], \"color\": \"orange_2529\"},\n {\"id\": 19, \"vector\": [0.055512329053363674, 0.7100266349039421, 0.4956956543575197, 0.24541352586717702, 0.4209030729923515], \"color\": \"red_9437\"}\n]\n\nclient.create_partition(\n collection_name=\"quick_setup\",\n partition_name=\"partitionA\"\n)\n\nres = client.insert(\n collection_name=\"quick_setup\",\n data=data,\n partition_name=\"partitionA\"\n)\n\nprint(res)\n\n# Output\n#\n# {\n# \"insert_count\": 10,\n# \"ids\": [\n# 10,\n# 11,\n# 12,\n# 13,\n# 14,\n# 15,\n# 16,\n# 17,\n# 18,\n# 19\n# ]\n# }\n","// 4. Insert some more data into a specific partition\ndata = Arrays.asList(\n new JSONObject(Map.of(\"id\", 10L, \"vector\", Arrays.asList(-0.5570353903748935f, -0.8997887893201304f, -0.7123782431855732f, -0.6298990746450119f, 0.6699215060604258f), \"color\", \"red_1202\")),\n new JSONObject(Map.of(\"id\", 11L, \"vector\", Arrays.asList(0.6319019033373907f, 0.6821488267878275f, 0.8552303045704168f, 0.36929791364943054f, -0.14152860714878068f), \"color\", \"blue_4150\")),\n new JSONObject(Map.of(\"id\", 12L, \"vector\", Arrays.asList(0.9483947484855766f, -0.32294203351925344f, 0.9759290319978025f, 0.8262982148666174f, -0.8351194181285713f), \"color\", \"orange_4590\")),\n new JSONObject(Map.of(\"id\", 13L, \"vector\", Arrays.asList(-0.5449109892498731f, 0.043511240563786524f, -0.25105249484790804f, -0.012030655265886425f, -0.0010987671273892108f), \"color\", \"pink_9619\")),\n new JSONObject(Map.of(\"id\", 14L, \"vector\", Arrays.asList(0.6603339372951424f, -0.10866551787442225f, -0.9435597754324891f, 0.8230244263466688f, -0.7986720938400362f), \"color\", \"orange_4863\")),\n new JSONObject(Map.of(\"id\", 15L, \"vector\", Arrays.asList(-0.8825129181091456f, -0.9204557711667729f, -0.935350065513425f, 0.5484069690287079f, 0.24448151140671204f), \"color\", \"orange_7984\")),\n new JSONObject(Map.of(\"id\", 16L, \"vector\", Arrays.asList(0.6285586391568163f, 0.5389064528263487f, -0.3163366239905099f, 0.22036279378888013f, 0.15077052220816167f), \"color\", \"blue_9010\")),\n new JSONObject(Map.of(\"id\", 17L, \"vector\", Arrays.asList(-0.20151825016059233f, -0.905239387635804f, 0.6749305353372479f, -0.7324272081377843f, -0.33007998971889263f), \"color\", \"blue_4521\")),\n new JSONObject(Map.of(\"id\", 18L, \"vector\", Arrays.asList(0.2432286610792349f, 0.01785636564206139f, -0.651356982731391f, -0.35848148851027895f, -0.7387383128324057f), \"color\", \"orange_2529\")),\n new JSONObject(Map.of(\"id\", 19L, \"vector\", Arrays.asList(0.055512329053363674f, 0.7100266349039421f, 0.4956956543575197f, 0.24541352586717702f, 0.4209030729923515f), \"color\", \"red_9437\"))\n);\n\nCreatePartitionReq createPartitionReq = CreatePartitionReq.builder()\n .collectionName(\"quick_setup\")\n .partitionName(\"partitionA\")\n .build();\n\nclient.createPartition(createPartitionReq);\n\ninsertReq = InsertReq.builder()\n .collectionName(\"quick_setup\")\n .data(data)\n .partitionName(\"partitionA\")\n .build();\n\ninsertResp = client.insert(insertReq);\n\nSystem.out.println(JSONObject.toJSON(insertResp));\n\n// Output:\n// {\"insertCnt\": 10}\n","// 4. Insert some more data into a specific partition\ndata = [\n {id: 10, vector: [-0.5570353903748935, -0.8997887893201304, -0.7123782431855732, -0.6298990746450119, 0.6699215060604258], color: \"red_1202\"},\n {id: 11, vector: [0.6319019033373907, 0.6821488267878275, 0.8552303045704168, 0.36929791364943054, -0.14152860714878068], color: \"blue_4150\"},\n {id: 12, vector: [0.9483947484855766, -0.32294203351925344, 0.9759290319978025, 0.8262982148666174, -0.8351194181285713], color: \"orange_4590\"},\n {id: 13, vector: [-0.5449109892498731, 0.043511240563786524, -0.25105249484790804, -0.012030655265886425, -0.0010987671273892108], color: \"pink_9619\"},\n {id: 14, vector: [0.6603339372951424, -0.10866551787442225, -0.9435597754324891, 0.8230244263466688, -0.7986720938400362], color: \"orange_4863\"},\n {id: 15, vector: [-0.8825129181091456, -0.9204557711667729, -0.935350065513425, 0.5484069690287079, 0.24448151140671204], color: \"orange_7984\"},\n {id: 16, vector: [0.6285586391568163, 0.5389064528263487, -0.3163366239905099, 0.22036279378888013, 0.15077052220816167], color: \"blue_9010\"},\n {id: 17, vector: [-0.20151825016059233, -0.905239387635804, 0.6749305353372479, -0.7324272081377843, -0.33007998971889263], color: \"blue_4521\"},\n {id: 18, vector: [0.2432286610792349, 0.01785636564206139, -0.651356982731391, -0.35848148851027895, -0.7387383128324057], color: \"orange_2529\"},\n {id: 19, vector: [0.055512329053363674, 0.7100266349039421, 0.4956956543575197, 0.24541352586717702, 0.4209030729923515], color: \"red_9437\"}\n]\n\nawait client.createPartition({\n collection_name: \"quick_setup\",\n partition_name: \"partitionA\"\n})\n\nres = await client.insert({\n collection_name: \"quick_setup\",\n data: data,\n partition_name: \"partitionA\"\n})\n\nconsole.log(res.insert_cnt)\n\n// Output\n// \n// 10\n// \n","# 5. Upsert some data\ndata=[\n {\"id\": 0, \"vector\": [-0.619954382375778, 0.4479436794798608, -0.17493894838751745, -0.4248030059917294, -0.8648452746018911], \"color\": \"black_9898\"},\n {\"id\": 1, \"vector\": [0.4762662251462588, -0.6942502138717026, -0.4490002642657902, -0.628696575798281, 0.9660395877041965], \"color\": \"red_7319\"},\n {\"id\": 2, \"vector\": [-0.8864122635045097, 0.9260170474445351, 0.801326976181461, 0.6383943392381306, 0.7563037341572827], \"color\": \"white_6465\"},\n {\"id\": 3, \"vector\": [0.14594326235891586, -0.3775407299900644, -0.3765479013078812, 0.20612075380355122, 0.4902678929632145], \"color\": \"orange_7580\"},\n {\"id\": 4, \"vector\": [0.4548498669607359, -0.887610217681605, 0.5655081329910452, 0.19220509387904117, 0.016513983433433577], \"color\": \"red_3314\"},\n {\"id\": 5, \"vector\": [0.11755001847051827, -0.7295149788999611, 0.2608115847524266, -0.1719167007897875, 0.7417611743754855], \"color\": \"black_9955\"},\n {\"id\": 6, \"vector\": [0.9363032158314308, 0.030699901477745373, 0.8365910312319647, 0.7823840208444011, 0.2625222076909237], \"color\": \"yellow_2461\"},\n {\"id\": 7, \"vector\": [0.0754823906014721, -0.6390658668265143, 0.5610517334334937, -0.8986261118798251, 0.9372056764266794], \"color\": \"white_5015\"},\n {\"id\": 8, \"vector\": [-0.3038434006935904, 0.1279149203380523, 0.503958664270957, -0.2622661156746988, 0.7407627307791929], \"color\": \"purple_6414\"},\n {\"id\": 9, \"vector\": [-0.7125086947677588, -0.8050968321012257, -0.32608864121785786, 0.3255654958645424, 0.26227968923834233], \"color\": \"brown_7231\"}\n]\n\nres = client.upsert(\n collection_name='quick_setup',\n data=data\n)\n\nprint(res)\n\n# Output\n#\n# {\n# \"upsert_count\": 10\n# }\n","// 5. Upsert some data\ndata = Arrays.asList(\n new JSONObject(Map.of(\"id\", 0L, \"vector\", Arrays.asList(-0.619954382375778f, 0.4479436794798608f, -0.17493894838751745f, -0.4248030059917294f, -0.8648452746018911f), \"color\", \"black_9898\")),\n new JSONObject(Map.of(\"id\", 1L, \"vector\", Arrays.asList(0.4762662251462588f, -0.6942502138717026f, -0.4490002642657902f, -0.628696575798281f, 0.9660395877041965f), \"color\", \"red_7319\")),\n new JSONObject(Map.of(\"id\", 2L, \"vector\", Arrays.asList(-0.8864122635045097f, 0.9260170474445351f, 0.801326976181461f, 0.6383943392381306f, 0.7563037341572827f), \"color\", \"white_6465\")),\n new JSONObject(Map.of(\"id\", 3L, \"vector\", Arrays.asList(0.14594326235891586f, -0.3775407299900644f, -0.3765479013078812f, 0.20612075380355122f, 0.4902678929632145f), \"color\", \"orange_7580\")),\n new JSONObject(Map.of(\"id\", 4L, \"vector\", Arrays.asList(0.4548498669607359f, -0.887610217681605f, 0.5655081329910452f, 0.19220509387904117f, 0.016513983433433577f), \"color\", \"red_3314\")),\n new JSONObject(Map.of(\"id\", 5L, \"vector\", Arrays.asList(0.11755001847051827f, -0.7295149788999611f, 0.2608115847524266f, -0.1719167007897875f, 0.7417611743754855f), \"color\", \"black_9955\")),\n new JSONObject(Map.of(\"id\", 6L, \"vector\", Arrays.asList(0.9363032158314308f, 0.030699901477745373f, 0.8365910312319647f, 0.7823840208444011f, 0.2625222076909237f), \"color\", \"yellow_2461\")),\n new JSONObject(Map.of(\"id\", 7L, \"vector\", Arrays.asList(0.0754823906014721f, -0.6390658668265143f, 0.5610517334334937f, -0.8986261118798251f, 0.9372056764266794f), \"color\", \"white_5015\")),\n new JSONObject(Map.of(\"id\", 8L, \"vector\", Arrays.asList(-0.3038434006935904f, 0.1279149203380523f, 0.503958664270957f, -0.2622661156746988f, 0.7407627307791929f), \"color\", \"purple_6414\")),\n new JSONObject(Map.of(\"id\", 9L, \"vector\", Arrays.asList(-0.7125086947677588f, -0.8050968321012257f, -0.32608864121785786f, 0.3255654958645424f, 0.26227968923834233f), \"color\", \"brown_7231\"))\n);\n\nUpsertReq upsertReq = UpsertReq.builder()\n .collectionName(\"quick_setup\")\n .data(data)\n .build();\n\nUpsertResp upsertResp = client.upsert(upsertReq);\n\nSystem.out.println(JSONObject.toJSON(upsertResp));\n\n// Output:\n// {\"upsertCnt\": 10}\n","// 5. Upsert some data\ndata = [\n {id: 0, vector: [-0.619954382375778, 0.4479436794798608, -0.17493894838751745, -0.4248030059917294, -0.8648452746018911], color: \"black_9898\"},\n {id: 1, vector: [0.4762662251462588, -0.6942502138717026, -0.4490002642657902, -0.628696575798281, 0.9660395877041965], color: \"red_7319\"},\n {id: 2, vector: [-0.8864122635045097, 0.9260170474445351, 0.801326976181461, 0.6383943392381306, 0.7563037341572827], color: \"white_6465\"},\n {id: 3, vector: [0.14594326235891586, -0.3775407299900644, -0.3765479013078812, 0.20612075380355122, 0.4902678929632145], color: \"orange_7580\"},\n {id: 4, vector: [0.4548498669607359, -0.887610217681605, 0.5655081329910452, 0.19220509387904117, 0.016513983433433577], color: \"red_3314\"},\n {id: 5, vector: [0.11755001847051827, -0.7295149788999611, 0.2608115847524266, -0.1719167007897875, 0.7417611743754855], color: \"black_9955\"},\n {id: 6, vector: [0.9363032158314308, 0.030699901477745373, 0.8365910312319647, 0.7823840208444011, 0.2625222076909237], color: \"yellow_2461\"},\n {id: 7, vector: [0.0754823906014721, -0.6390658668265143, 0.5610517334334937, -0.8986261118798251, 0.9372056764266794], color: \"white_5015\"},\n {id: 8, vector: [-0.3038434006935904, 0.1279149203380523, 0.503958664270957, -0.2622661156746988, 0.7407627307791929], color: \"purple_6414\"},\n {id: 9, vector: [-0.7125086947677588, -0.8050968321012257, -0.32608864121785786, 0.3255654958645424, 0.26227968923834233], color: \"brown_7231\"}\n]\n\nres = await client.upsert({\n collection_name: \"quick_setup\",\n data: data,\n})\n\nconsole.log(res.upsert_cnt)\n\n// Output\n// \n// 10\n// \n","# 6. Upsert data in partitions\ndata=[\n {\"id\": 10, \"vector\": [0.06998888224297328, 0.8582816610326578, -0.9657938677934292, 0.6527905683627726, -0.8668460657158576], \"color\": \"black_3651\"},\n {\"id\": 11, \"vector\": [0.6060703043917468, -0.3765080534566074, -0.7710758854987239, 0.36993888322346136, 0.5507513364206531], \"color\": \"grey_2049\"},\n {\"id\": 12, \"vector\": [-0.9041813104515337, -0.9610546012461163, 0.20033003106083358, 0.11842506351635174, 0.8327356724591011], \"color\": \"blue_6168\"},\n {\"id\": 13, \"vector\": [0.3202914977909075, -0.7279137773695252, -0.04747830871620273, 0.8266053056909548, 0.8277957187455489], \"color\": \"blue_1672\"},\n {\"id\": 14, \"vector\": [0.2975811497890859, 0.2946936202691086, 0.5399463833894609, 0.8385334966677529, -0.4450543984655133], \"color\": \"pink_1601\"},\n {\"id\": 15, \"vector\": [-0.04697464305600074, -0.08509022265734134, 0.9067184632552001, -0.2281912685064822, -0.9747503428652762], \"color\": \"yellow_9925\"},\n {\"id\": 16, \"vector\": [-0.9363075919673911, -0.8153981031085669, 0.7943039120490902, -0.2093886809842529, 0.0771191335807897], \"color\": \"orange_9872\"},\n {\"id\": 17, \"vector\": [-0.050451522820639916, 0.18931572752321935, 0.7522886192190488, -0.9071793089474034, 0.6032647330692296], \"color\": \"red_6450\"},\n {\"id\": 18, \"vector\": [-0.9181544231141592, 0.6700755998126806, -0.014174674636136642, 0.6325780463623432, -0.49662222164032976], \"color\": \"purple_7392\"},\n {\"id\": 19, \"vector\": [0.11426945899602536, 0.6089190684002581, -0.5842735738352236, 0.057050610092692855, -0.035163433018196244], \"color\": \"pink_4996\"}\n]\n\nres = client.upsert(\n collection_name=\"quick_setup\",\n data=data,\n partition_name=\"partitionA\"\n)\n\nprint(res)\n\n# Output\n#\n# {\n# \"upsert_count\": 10\n# }\n","import io.milvus.v2.service.vector.request.UpsertReq;\nimport io.milvus.v2.service.vector.response.UpsertResp;\n\n// 6. Upsert data in parition\n\ndata = Arrays.asList(\n new JSONObject(Map.of(\"id\", 10L, \"vector\", Arrays.asList(0.06998888224297328f, 0.8582816610326578f, -0.9657938677934292f, 0.6527905683627726f, -0.8668460657158576f), \"color\", \"black_3651\")),\n new JSONObject(Map.of(\"id\", 11L, \"vector\", Arrays.asList(0.6060703043917468f, -0.3765080534566074f, -0.7710758854987239f, 0.36993888322346136f, 0.5507513364206531f), \"color\", \"grey_2049\")),\n new JSONObject(Map.of(\"id\", 12L, \"vector\", Arrays.asList(-0.9041813104515337f, -0.9610546012461163f, 0.20033003106083358f, 0.11842506351635174f, 0.8327356724591011f), \"color\", \"blue_6168\")),\n new JSONObject(Map.of(\"id\", 13L, \"vector\", Arrays.asList(0.3202914977909075f, -0.7279137773695252f, -0.04747830871620273f, 0.8266053056909548f, 0.8277957187455489f), \"color\", \"blue_1672\")),\n new JSONObject(Map.of(\"id\", 14L, \"vector\", Arrays.asList(0.2975811497890859f, 0.2946936202691086f, 0.5399463833894609f, 0.8385334966677529f, -0.4450543984655133f), \"color\", \"pink_1601\")),\n new JSONObject(Map.of(\"id\", 15L, \"vector\", Arrays.asList(-0.04697464305600074f, -0.08509022265734134f, 0.9067184632552001f, -0.2281912685064822f, -0.9747503428652762f), \"color\", \"yellow_9925\")),\n new JSONObject(Map.of(\"id\", 16L, \"vector\", Arrays.asList(-0.9363075919673911f, -0.8153981031085669f, 0.7943039120490902f, -0.2093886809842529f, 0.0771191335807897f), \"color\", \"orange_9872\")),\n new JSONObject(Map.of(\"id\", 17L, \"vector\", Arrays.asList(-0.050451522820639916f, 0.18931572752321935f, 0.7522886192190488f, -0.9071793089474034f, 0.6032647330692296f), \"color\", \"red_6450\")),\n new JSONObject(Map.of(\"id\", 18L, \"vector\", Arrays.asList(-0.9181544231141592f, 0.6700755998126806f, -0.014174674636136642f, 0.6325780463623432f, -0.49662222164032976f), \"color\", \"purple_7392\")),\n new JSONObject(Map.of(\"id\", 19L, \"vector\", Arrays.asList(0.11426945899602536f, 0.6089190684002581f, -0.5842735738352236f, 0.057050610092692855f, -0.035163433018196244f), \"color\", \"pink_4996\"))\n);\n\nupsertReq = UpsertReq.builder()\n .collectionName(\"quick_setup\")\n .data(data)\n .partitionName(\"partitionA\")\n .build();\n\nupsertResp = client.upsert(upsertReq);\n\nSystem.out.println(JSONObject.toJSON(upsertResp));\n\n// Output:\n// {\"upsertCnt\": 10}\n","// 6. Upsert data in partitions\ndata = [\n {id: 10, vector: [0.06998888224297328, 0.8582816610326578, -0.9657938677934292, 0.6527905683627726, -0.8668460657158576], color: \"black_3651\"},\n {id: 11, vector: [0.6060703043917468, -0.3765080534566074, -0.7710758854987239, 0.36993888322346136, 0.5507513364206531], color: \"grey_2049\"},\n {id: 12, vector: [-0.9041813104515337, -0.9610546012461163, 0.20033003106083358, 0.11842506351635174, 0.8327356724591011], color: \"blue_6168\"},\n {id: 13, vector: [0.3202914977909075, -0.7279137773695252, -0.04747830871620273, 0.8266053056909548, 0.8277957187455489], color: \"blue_1672\"},\n {id: 14, vector: [0.2975811497890859, 0.2946936202691086, 0.5399463833894609, 0.8385334966677529, -0.4450543984655133], color: \"pink_1601\"},\n {id: 15, vector: [-0.04697464305600074, -0.08509022265734134, 0.9067184632552001, -0.2281912685064822, -0.9747503428652762], color: \"yellow_9925\"},\n {id: 16, vector: [-0.9363075919673911, -0.8153981031085669, 0.7943039120490902, -0.2093886809842529, 0.0771191335807897], color: \"orange_9872\"},\n {id: 17, vector: [-0.050451522820639916, 0.18931572752321935, 0.7522886192190488, -0.9071793089474034, 0.6032647330692296], color: \"red_6450\"},\n {id: 18, vector: [-0.9181544231141592, 0.6700755998126806, -0.014174674636136642, 0.6325780463623432, -0.49662222164032976], color: \"purple_7392\"},\n {id: 19, vector: [0.11426945899602536, 0.6089190684002581, -0.5842735738352236, 0.057050610092692855, -0.035163433018196244], color: \"pink_4996\"}\n]\n\nres = await client.upsert({\n collection_name: \"quick_setup\",\n data: data,\n partition_name: \"partitionA\"\n})\n\nconsole.log(res.upsert_cnt)\n\n// Output\n// \n// 10\n// \n","# 7. Delete entities\nres = client.delete(\n collection_name=\"quick_setup\",\n filter=\"id in [4,5,6]\"\n)\n\nprint(res)\n\n# Output\n#\n# {\n# \"delete_count\": 3\n# }\n","import io.milvus.v2.service.vector.request.DeleteReq;\nimport io.milvus.v2.service.vector.response.DeleteResp;\n\n\n// 7. Delete entities\n\nDeleteReq deleteReq = DeleteReq.builder()\n .collectionName(\"quick_setup\")\n .filter(\"id in [4, 5, 6]\")\n .build();\n\nDeleteResp deleteResp = client.delete(deleteReq);\n\nSystem.out.println(JSONObject.toJSON(deleteResp));\n\n// Output:\n// {\"deleteCnt\": 3}\n","// 7. Delete entities\nres = await client.delete({\n collection_name: \"quick_setup\",\n filter: \"id in [4,5,6]\"\n})\n\nconsole.log(res.delete_cnt)\n\n// Output\n// \n// 3\n// \n","res = client.delete(\n collection_name=\"quick_setup\",\n ids=[18, 19],\n partition_name=\"partitionA\"\n)\n\nprint(res)\n\n# Output\n#\n# {\n# \"delete_count\": 2\n# }\n","deleteReq = DeleteReq.builder()\n .collectionName(\"quick_setup\")\n .ids(Arrays.asList(18L, 19L))\n .partitionName(\"partitionA\")\n .build();\n\ndeleteResp = client.delete(deleteReq);\n\nSystem.out.println(JSONObject.toJSON(deleteResp));\n\n// Output:\n// {\"deleteCnt\": 2}\n","res = await client.delete({\n collection_name: \"quick_setup\",\n ids: [18, 19],\n partition_name: \"partitionA\"\n})\n\nconsole.log(res.delete_cnt)\n\n// Output\n// \n// 2\n// \n","res = client.delete(\ncollection_name='quick_setup',\npartition_name='partitionA',\nfilter='color like \"blue%\"'\n)\n\nprint(\"Entities deleted from partitionA: \", res['delete_count'])\n\n# Output:\n# Entities deleted from partitionA: 3\n","deleteReq = DeleteReq.builder()\n .collectionName(\"quick_setup\")\n .filter('color like \"blue%\"')\n .partitionName(\"partitionA\")\n .build();\n\ndeleteResp = client.delete(deleteReq);\n\nSystem.out.println(JSONObject.toJSON(deleteResp));\n\n// Output:\n// {\"deleteCnt\": 3}\n","res = await client.delete({\ncollection_name: \"quick_setup\",\npartition_name: \"partitionA\",\nfilter: 'color like \"blue%\"'\n})\n\nconsole.log(\"Entities deleted from partitionA: \" + res.delete_cnt)\n\n// Output:\n// Entities deleted from partitionA: 3\n"],"headingContent":"","anchorList":[{"label":"Einfügen, Upsert und Löschen","href":"Insert-Upsert--Delete","type":1,"isActive":false},{"label":"Bevor Sie beginnen","href":"Before-you-start","type":2,"isActive":false},{"label":"Übersicht","href":"Overview","type":2,"isActive":false},{"label":"Vorbereitungen","href":"Preparations","type":2,"isActive":false},{"label":"Einfügen von Entitäten","href":"Insert-entities","type":2,"isActive":false},{"label":"Upsert Entitäten","href":"Upsert-entities","type":2,"isActive":false},{"label":"Entitäten löschen","href":"Delete-entities","type":2,"isActive":false}]} \ No newline at end of file +{"codeList":["from pymilvus import MilvusClient\n\n# 1. Set up a Milvus client\nclient = MilvusClient(\n uri=\"http://localhost:19530\"\n)\n\n# 2. Create a collection\nclient.create_collection(\n collection_name=\"quick_setup\",\n dimension=5,\n metric_type=\"IP\"\n)\n","import io.milvus.v2.client.ConnectConfig;\nimport io.milvus.v2.client.MilvusClientV2;\nimport io.milvus.v2.service.collection.request.CreateCollectionReq;\n\nString CLUSTER_ENDPOINT = \"http://localhost:19530\";\n\n// 1. Connect to Milvus server\nConnectConfig connectConfig = ConnectConfig.builder()\n .uri(CLUSTER_ENDPOINT)\n .build();\n\nMilvusClientV2 client = new MilvusClientV2(connectConfig);\n\n// 2. Create a collection in quick setup mode\nCreateCollectionReq quickSetupReq = CreateCollectionReq.builder()\n .collectionName(\"quick_setup\")\n .dimension(5)\n .metricType(\"IP\")\n .build();\n\nclient.createCollection(quickSetupReq);\n","const { MilvusClient, DataType, sleep } = require(\"@zilliz/milvus2-sdk-node\")\n\nconst address = \"http://localhost:19530\"\n\n// 1. Set up a Milvus Client\nclient = new MilvusClient({address});\n\n// 2. Create a collection in quick setup mode\nawait client.createCollection({\n collection_name: \"quick_setup\",\n dimension: 5,\n metric_type: \"IP\"\n}); \n","# 3. Insert some data\ndata=[\n {\"id\": 0, \"vector\": [0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592], \"color\": \"pink_8682\"},\n {\"id\": 1, \"vector\": [0.19886812562848388, 0.06023560599112088, 0.6976963061752597, 0.2614474506242501, 0.838729485096104], \"color\": \"red_7025\"},\n {\"id\": 2, \"vector\": [0.43742130801983836, -0.5597502546264526, 0.6457887650909682, 0.7894058910881185, 0.20785793220625592], \"color\": \"orange_6781\"},\n {\"id\": 3, \"vector\": [0.3172005263489739, 0.9719044792798428, -0.36981146090600725, -0.4860894583077995, 0.95791889146345], \"color\": \"pink_9298\"},\n {\"id\": 4, \"vector\": [0.4452349528804562, -0.8757026943054742, 0.8220779437047674, 0.46406290649483184, 0.30337481143159106], \"color\": \"red_4794\"},\n {\"id\": 5, \"vector\": [0.985825131989184, -0.8144651566660419, 0.6299267002202009, 0.1206906911183383, -0.1446277761879955], \"color\": \"yellow_4222\"},\n {\"id\": 6, \"vector\": [0.8371977790571115, -0.015764369584852833, -0.31062937026679327, -0.562666951622192, -0.8984947637863987], \"color\": \"red_9392\"},\n {\"id\": 7, \"vector\": [-0.33445148015177995, -0.2567135004164067, 0.8987539745369246, 0.9402995886420709, 0.5378064918413052], \"color\": \"grey_8510\"},\n {\"id\": 8, \"vector\": [0.39524717779832685, 0.4000257286739164, -0.5890507376891594, -0.8650502298996872, -0.6140360785406336], \"color\": \"white_9381\"},\n {\"id\": 9, \"vector\": [0.5718280481994695, 0.24070317428066512, -0.3737913482606834, -0.06726932177492717, -0.6980531615588608], \"color\": \"purple_4976\"}\n]\n\nres = client.insert(\n collection_name=\"quick_setup\",\n data=data\n)\n\nprint(res)\n\n# Output\n#\n# {\n# \"insert_count\": 10,\n# \"ids\": [\n# 0,\n# 1,\n# 2,\n# 3,\n# 4,\n# 5,\n# 6,\n# 7,\n# 8,\n# 9\n# ]\n# }\n","import java.util.Arrays;\nimport java.util.List;\nimport java.util.Map;\nimport io.milvus.v2.service.vector.request.InsertReq;\nimport io.milvus.v2.service.vector.response.InsertResp;\n\n// 3. Insert some data\nList data = Arrays.asList(\n new JSONObject(Map.of(\"id\", 0L, \"vector\", Arrays.asList(0.3580376395471989f, -0.6023495712049978f, 0.18414012509913835f, -0.26286205330961354f, 0.9029438446296592f), \"color\", \"pink_8682\")),\n new JSONObject(Map.of(\"id\", 1L, \"vector\", Arrays.asList(0.19886812562848388f, 0.06023560599112088f, 0.6976963061752597f, 0.2614474506242501f, 0.838729485096104f), \"color\", \"red_7025\")),\n new JSONObject(Map.of(\"id\", 2L, \"vector\", Arrays.asList(0.43742130801983836f, -0.5597502546264526f, 0.6457887650909682f, 0.7894058910881185f, 0.20785793220625592f), \"color\", \"orange_6781\")),\n new JSONObject(Map.of(\"id\", 3L, \"vector\", Arrays.asList(0.3172005263489739f, 0.9719044792798428f, -0.36981146090600725f, -0.4860894583077995f, 0.95791889146345f), \"color\", \"pink_9298\")),\n new JSONObject(Map.of(\"id\", 4L, \"vector\", Arrays.asList(0.4452349528804562f, -0.8757026943054742f, 0.8220779437047674f, 0.46406290649483184f, 0.30337481143159106f), \"color\", \"red_4794\")),\n new JSONObject(Map.of(\"id\", 5L, \"vector\", Arrays.asList(0.985825131989184f, -0.8144651566660419f, 0.6299267002202009f, 0.1206906911183383f, -0.1446277761879955f), \"color\", \"yellow_4222\")),\n new JSONObject(Map.of(\"id\", 6L, \"vector\", Arrays.asList(0.8371977790571115f, -0.015764369584852833f, -0.31062937026679327f, -0.562666951622192f, -0.8984947637863987f), \"color\", \"red_9392\")),\n new JSONObject(Map.of(\"id\", 7L, \"vector\", Arrays.asList(-0.33445148015177995f, -0.2567135004164067f, 0.8987539745369246f, 0.9402995886420709f, 0.5378064918413052f), \"color\", \"grey_8510\")),\n new JSONObject(Map.of(\"id\", 8L, \"vector\", Arrays.asList(0.39524717779832685f, 0.4000257286739164f, -0.5890507376891594f, -0.8650502298996872f, -0.6140360785406336f), \"color\", \"white_9381\")),\n new JSONObject(Map.of(\"id\", 9L, \"vector\", Arrays.asList(0.5718280481994695f, 0.24070317428066512f, -0.3737913482606834f, -0.06726932177492717f, -0.6980531615588608f), \"color\", \"purple_4976\"))\n);\n\nInsertReq insertReq = InsertReq.builder()\n .collectionName(\"quick_setup\")\n .data(data)\n .build();\n\nInsertResp insertResp = client.insert(insertReq);\n\nSystem.out.println(JSONObject.toJSON(insertResp));\n\n// Output:\n// {\"insertCnt\": 10}\n","// 3. Insert some data\n\nvar data = [\n {id: 0, vector: [0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592], color: \"pink_8682\"},\n {id: 1, vector: [0.19886812562848388, 0.06023560599112088, 0.6976963061752597, 0.2614474506242501, 0.838729485096104], color: \"red_7025\"},\n {id: 2, vector: [0.43742130801983836, -0.5597502546264526, 0.6457887650909682, 0.7894058910881185, 0.20785793220625592], color: \"orange_6781\"},\n {id: 3, vector: [0.3172005263489739, 0.9719044792798428, -0.36981146090600725, -0.4860894583077995, 0.95791889146345], color: \"pink_9298\"},\n {id: 4, vector: [0.4452349528804562, -0.8757026943054742, 0.8220779437047674, 0.46406290649483184, 0.30337481143159106], color: \"red_4794\"},\n {id: 5, vector: [0.985825131989184, -0.8144651566660419, 0.6299267002202009, 0.1206906911183383, -0.1446277761879955], color: \"yellow_4222\"},\n {id: 6, vector: [0.8371977790571115, -0.015764369584852833, -0.31062937026679327, -0.562666951622192, -0.8984947637863987], color: \"red_9392\"},\n {id: 7, vector: [-0.33445148015177995, -0.2567135004164067, 0.8987539745369246, 0.9402995886420709, 0.5378064918413052], color: \"grey_8510\"},\n {id: 8, vector: [0.39524717779832685, 0.4000257286739164, -0.5890507376891594, -0.8650502298996872, -0.6140360785406336], color: \"white_9381\"},\n {id: 9, vector: [0.5718280481994695, 0.24070317428066512, -0.3737913482606834, -0.06726932177492717, -0.6980531615588608], color: \"purple_4976\"} \n]\n\nvar res = await client.insert({\n collection_name: \"quick_setup\",\n data: data,\n})\n\nconsole.log(res.insert_cnt)\n\n// Output\n// \n// 10\n// \n","# 4. Insert some more data into a specific partition\ndata=[\n {\"id\": 10, \"vector\": [-0.5570353903748935, -0.8997887893201304, -0.7123782431855732, -0.6298990746450119, 0.6699215060604258], \"color\": \"red_1202\"},\n {\"id\": 11, \"vector\": [0.6319019033373907, 0.6821488267878275, 0.8552303045704168, 0.36929791364943054, -0.14152860714878068], \"color\": \"blue_4150\"},\n {\"id\": 12, \"vector\": [0.9483947484855766, -0.32294203351925344, 0.9759290319978025, 0.8262982148666174, -0.8351194181285713], \"color\": \"orange_4590\"},\n {\"id\": 13, \"vector\": [-0.5449109892498731, 0.043511240563786524, -0.25105249484790804, -0.012030655265886425, -0.0010987671273892108], \"color\": \"pink_9619\"},\n {\"id\": 14, \"vector\": [0.6603339372951424, -0.10866551787442225, -0.9435597754324891, 0.8230244263466688, -0.7986720938400362], \"color\": \"orange_4863\"},\n {\"id\": 15, \"vector\": [-0.8825129181091456, -0.9204557711667729, -0.935350065513425, 0.5484069690287079, 0.24448151140671204], \"color\": \"orange_7984\"},\n {\"id\": 16, \"vector\": [0.6285586391568163, 0.5389064528263487, -0.3163366239905099, 0.22036279378888013, 0.15077052220816167], \"color\": \"blue_9010\"},\n {\"id\": 17, \"vector\": [-0.20151825016059233, -0.905239387635804, 0.6749305353372479, -0.7324272081377843, -0.33007998971889263], \"color\": \"blue_4521\"},\n {\"id\": 18, \"vector\": [0.2432286610792349, 0.01785636564206139, -0.651356982731391, -0.35848148851027895, -0.7387383128324057], \"color\": \"orange_2529\"},\n {\"id\": 19, \"vector\": [0.055512329053363674, 0.7100266349039421, 0.4956956543575197, 0.24541352586717702, 0.4209030729923515], \"color\": \"red_9437\"}\n]\n\nclient.create_partition(\n collection_name=\"quick_setup\",\n partition_name=\"partitionA\"\n)\n\nres = client.insert(\n collection_name=\"quick_setup\",\n data=data,\n partition_name=\"partitionA\"\n)\n\nprint(res)\n\n# Output\n#\n# {\n# \"insert_count\": 10,\n# \"ids\": [\n# 10,\n# 11,\n# 12,\n# 13,\n# 14,\n# 15,\n# 16,\n# 17,\n# 18,\n# 19\n# ]\n# }\n","// 4. Insert some more data into a specific partition\ndata = Arrays.asList(\n new JSONObject(Map.of(\"id\", 10L, \"vector\", Arrays.asList(-0.5570353903748935f, -0.8997887893201304f, -0.7123782431855732f, -0.6298990746450119f, 0.6699215060604258f), \"color\", \"red_1202\")),\n new JSONObject(Map.of(\"id\", 11L, \"vector\", Arrays.asList(0.6319019033373907f, 0.6821488267878275f, 0.8552303045704168f, 0.36929791364943054f, -0.14152860714878068f), \"color\", \"blue_4150\")),\n new JSONObject(Map.of(\"id\", 12L, \"vector\", Arrays.asList(0.9483947484855766f, -0.32294203351925344f, 0.9759290319978025f, 0.8262982148666174f, -0.8351194181285713f), \"color\", \"orange_4590\")),\n new JSONObject(Map.of(\"id\", 13L, \"vector\", Arrays.asList(-0.5449109892498731f, 0.043511240563786524f, -0.25105249484790804f, -0.012030655265886425f, -0.0010987671273892108f), \"color\", \"pink_9619\")),\n new JSONObject(Map.of(\"id\", 14L, \"vector\", Arrays.asList(0.6603339372951424f, -0.10866551787442225f, -0.9435597754324891f, 0.8230244263466688f, -0.7986720938400362f), \"color\", \"orange_4863\")),\n new JSONObject(Map.of(\"id\", 15L, \"vector\", Arrays.asList(-0.8825129181091456f, -0.9204557711667729f, -0.935350065513425f, 0.5484069690287079f, 0.24448151140671204f), \"color\", \"orange_7984\")),\n new JSONObject(Map.of(\"id\", 16L, \"vector\", Arrays.asList(0.6285586391568163f, 0.5389064528263487f, -0.3163366239905099f, 0.22036279378888013f, 0.15077052220816167f), \"color\", \"blue_9010\")),\n new JSONObject(Map.of(\"id\", 17L, \"vector\", Arrays.asList(-0.20151825016059233f, -0.905239387635804f, 0.6749305353372479f, -0.7324272081377843f, -0.33007998971889263f), \"color\", \"blue_4521\")),\n new JSONObject(Map.of(\"id\", 18L, \"vector\", Arrays.asList(0.2432286610792349f, 0.01785636564206139f, -0.651356982731391f, -0.35848148851027895f, -0.7387383128324057f), \"color\", \"orange_2529\")),\n new JSONObject(Map.of(\"id\", 19L, \"vector\", Arrays.asList(0.055512329053363674f, 0.7100266349039421f, 0.4956956543575197f, 0.24541352586717702f, 0.4209030729923515f), \"color\", \"red_9437\"))\n);\n\nCreatePartitionReq createPartitionReq = CreatePartitionReq.builder()\n .collectionName(\"quick_setup\")\n .partitionName(\"partitionA\")\n .build();\n\nclient.createPartition(createPartitionReq);\n\ninsertReq = InsertReq.builder()\n .collectionName(\"quick_setup\")\n .data(data)\n .partitionName(\"partitionA\")\n .build();\n\ninsertResp = client.insert(insertReq);\n\nSystem.out.println(JSONObject.toJSON(insertResp));\n\n// Output:\n// {\"insertCnt\": 10}\n","// 4. Insert some more data into a specific partition\ndata = [\n {id: 10, vector: [-0.5570353903748935, -0.8997887893201304, -0.7123782431855732, -0.6298990746450119, 0.6699215060604258], color: \"red_1202\"},\n {id: 11, vector: [0.6319019033373907, 0.6821488267878275, 0.8552303045704168, 0.36929791364943054, -0.14152860714878068], color: \"blue_4150\"},\n {id: 12, vector: [0.9483947484855766, -0.32294203351925344, 0.9759290319978025, 0.8262982148666174, -0.8351194181285713], color: \"orange_4590\"},\n {id: 13, vector: [-0.5449109892498731, 0.043511240563786524, -0.25105249484790804, -0.012030655265886425, -0.0010987671273892108], color: \"pink_9619\"},\n {id: 14, vector: [0.6603339372951424, -0.10866551787442225, -0.9435597754324891, 0.8230244263466688, -0.7986720938400362], color: \"orange_4863\"},\n {id: 15, vector: [-0.8825129181091456, -0.9204557711667729, -0.935350065513425, 0.5484069690287079, 0.24448151140671204], color: \"orange_7984\"},\n {id: 16, vector: [0.6285586391568163, 0.5389064528263487, -0.3163366239905099, 0.22036279378888013, 0.15077052220816167], color: \"blue_9010\"},\n {id: 17, vector: [-0.20151825016059233, -0.905239387635804, 0.6749305353372479, -0.7324272081377843, -0.33007998971889263], color: \"blue_4521\"},\n {id: 18, vector: [0.2432286610792349, 0.01785636564206139, -0.651356982731391, -0.35848148851027895, -0.7387383128324057], color: \"orange_2529\"},\n {id: 19, vector: [0.055512329053363674, 0.7100266349039421, 0.4956956543575197, 0.24541352586717702, 0.4209030729923515], color: \"red_9437\"}\n]\n\nawait client.createPartition({\n collection_name: \"quick_setup\",\n partition_name: \"partitionA\"\n})\n\nres = await client.insert({\n collection_name: \"quick_setup\",\n data: data,\n partition_name: \"partitionA\"\n})\n\nconsole.log(res.insert_cnt)\n\n// Output\n// \n// 10\n// \n","# 5. Upsert some data\ndata=[\n {\"id\": 0, \"vector\": [-0.619954382375778, 0.4479436794798608, -0.17493894838751745, -0.4248030059917294, -0.8648452746018911], \"color\": \"black_9898\"},\n {\"id\": 1, \"vector\": [0.4762662251462588, -0.6942502138717026, -0.4490002642657902, -0.628696575798281, 0.9660395877041965], \"color\": \"red_7319\"},\n {\"id\": 2, \"vector\": [-0.8864122635045097, 0.9260170474445351, 0.801326976181461, 0.6383943392381306, 0.7563037341572827], \"color\": \"white_6465\"},\n {\"id\": 3, \"vector\": [0.14594326235891586, -0.3775407299900644, -0.3765479013078812, 0.20612075380355122, 0.4902678929632145], \"color\": \"orange_7580\"},\n {\"id\": 4, \"vector\": [0.4548498669607359, -0.887610217681605, 0.5655081329910452, 0.19220509387904117, 0.016513983433433577], \"color\": \"red_3314\"},\n {\"id\": 5, \"vector\": [0.11755001847051827, -0.7295149788999611, 0.2608115847524266, -0.1719167007897875, 0.7417611743754855], \"color\": \"black_9955\"},\n {\"id\": 6, \"vector\": [0.9363032158314308, 0.030699901477745373, 0.8365910312319647, 0.7823840208444011, 0.2625222076909237], \"color\": \"yellow_2461\"},\n {\"id\": 7, \"vector\": [0.0754823906014721, -0.6390658668265143, 0.5610517334334937, -0.8986261118798251, 0.9372056764266794], \"color\": \"white_5015\"},\n {\"id\": 8, \"vector\": [-0.3038434006935904, 0.1279149203380523, 0.503958664270957, -0.2622661156746988, 0.7407627307791929], \"color\": \"purple_6414\"},\n {\"id\": 9, \"vector\": [-0.7125086947677588, -0.8050968321012257, -0.32608864121785786, 0.3255654958645424, 0.26227968923834233], \"color\": \"brown_7231\"}\n]\n\nres = client.upsert(\n collection_name='quick_setup',\n data=data\n)\n\nprint(res)\n\n# Output\n#\n# {\n# \"upsert_count\": 10\n# }\n","// 5. Upsert some data\ndata = Arrays.asList(\n new JSONObject(Map.of(\"id\", 0L, \"vector\", Arrays.asList(-0.619954382375778f, 0.4479436794798608f, -0.17493894838751745f, -0.4248030059917294f, -0.8648452746018911f), \"color\", \"black_9898\")),\n new JSONObject(Map.of(\"id\", 1L, \"vector\", Arrays.asList(0.4762662251462588f, -0.6942502138717026f, -0.4490002642657902f, -0.628696575798281f, 0.9660395877041965f), \"color\", \"red_7319\")),\n new JSONObject(Map.of(\"id\", 2L, \"vector\", Arrays.asList(-0.8864122635045097f, 0.9260170474445351f, 0.801326976181461f, 0.6383943392381306f, 0.7563037341572827f), \"color\", \"white_6465\")),\n new JSONObject(Map.of(\"id\", 3L, \"vector\", Arrays.asList(0.14594326235891586f, -0.3775407299900644f, -0.3765479013078812f, 0.20612075380355122f, 0.4902678929632145f), \"color\", \"orange_7580\")),\n new JSONObject(Map.of(\"id\", 4L, \"vector\", Arrays.asList(0.4548498669607359f, -0.887610217681605f, 0.5655081329910452f, 0.19220509387904117f, 0.016513983433433577f), \"color\", \"red_3314\")),\n new JSONObject(Map.of(\"id\", 5L, \"vector\", Arrays.asList(0.11755001847051827f, -0.7295149788999611f, 0.2608115847524266f, -0.1719167007897875f, 0.7417611743754855f), \"color\", \"black_9955\")),\n new JSONObject(Map.of(\"id\", 6L, \"vector\", Arrays.asList(0.9363032158314308f, 0.030699901477745373f, 0.8365910312319647f, 0.7823840208444011f, 0.2625222076909237f), \"color\", \"yellow_2461\")),\n new JSONObject(Map.of(\"id\", 7L, \"vector\", Arrays.asList(0.0754823906014721f, -0.6390658668265143f, 0.5610517334334937f, -0.8986261118798251f, 0.9372056764266794f), \"color\", \"white_5015\")),\n new JSONObject(Map.of(\"id\", 8L, \"vector\", Arrays.asList(-0.3038434006935904f, 0.1279149203380523f, 0.503958664270957f, -0.2622661156746988f, 0.7407627307791929f), \"color\", \"purple_6414\")),\n new JSONObject(Map.of(\"id\", 9L, \"vector\", Arrays.asList(-0.7125086947677588f, -0.8050968321012257f, -0.32608864121785786f, 0.3255654958645424f, 0.26227968923834233f), \"color\", \"brown_7231\"))\n);\n\nUpsertReq upsertReq = UpsertReq.builder()\n .collectionName(\"quick_setup\")\n .data(data)\n .build();\n\nUpsertResp upsertResp = client.upsert(upsertReq);\n\nSystem.out.println(JSONObject.toJSON(upsertResp));\n\n// Output:\n// {\"upsertCnt\": 10}\n","// 5. Upsert some data\ndata = [\n {id: 0, vector: [-0.619954382375778, 0.4479436794798608, -0.17493894838751745, -0.4248030059917294, -0.8648452746018911], color: \"black_9898\"},\n {id: 1, vector: [0.4762662251462588, -0.6942502138717026, -0.4490002642657902, -0.628696575798281, 0.9660395877041965], color: \"red_7319\"},\n {id: 2, vector: [-0.8864122635045097, 0.9260170474445351, 0.801326976181461, 0.6383943392381306, 0.7563037341572827], color: \"white_6465\"},\n {id: 3, vector: [0.14594326235891586, -0.3775407299900644, -0.3765479013078812, 0.20612075380355122, 0.4902678929632145], color: \"orange_7580\"},\n {id: 4, vector: [0.4548498669607359, -0.887610217681605, 0.5655081329910452, 0.19220509387904117, 0.016513983433433577], color: \"red_3314\"},\n {id: 5, vector: [0.11755001847051827, -0.7295149788999611, 0.2608115847524266, -0.1719167007897875, 0.7417611743754855], color: \"black_9955\"},\n {id: 6, vector: [0.9363032158314308, 0.030699901477745373, 0.8365910312319647, 0.7823840208444011, 0.2625222076909237], color: \"yellow_2461\"},\n {id: 7, vector: [0.0754823906014721, -0.6390658668265143, 0.5610517334334937, -0.8986261118798251, 0.9372056764266794], color: \"white_5015\"},\n {id: 8, vector: [-0.3038434006935904, 0.1279149203380523, 0.503958664270957, -0.2622661156746988, 0.7407627307791929], color: \"purple_6414\"},\n {id: 9, vector: [-0.7125086947677588, -0.8050968321012257, -0.32608864121785786, 0.3255654958645424, 0.26227968923834233], color: \"brown_7231\"}\n]\n\nres = await client.upsert({\n collection_name: \"quick_setup\",\n data: data,\n})\n\nconsole.log(res.upsert_cnt)\n\n// Output\n// \n// 10\n// \n","# 6. Upsert data in partitions\ndata=[\n {\"id\": 10, \"vector\": [0.06998888224297328, 0.8582816610326578, -0.9657938677934292, 0.6527905683627726, -0.8668460657158576], \"color\": \"black_3651\"},\n {\"id\": 11, \"vector\": [0.6060703043917468, -0.3765080534566074, -0.7710758854987239, 0.36993888322346136, 0.5507513364206531], \"color\": \"grey_2049\"},\n {\"id\": 12, \"vector\": [-0.9041813104515337, -0.9610546012461163, 0.20033003106083358, 0.11842506351635174, 0.8327356724591011], \"color\": \"blue_6168\"},\n {\"id\": 13, \"vector\": [0.3202914977909075, -0.7279137773695252, -0.04747830871620273, 0.8266053056909548, 0.8277957187455489], \"color\": \"blue_1672\"},\n {\"id\": 14, \"vector\": [0.2975811497890859, 0.2946936202691086, 0.5399463833894609, 0.8385334966677529, -0.4450543984655133], \"color\": \"pink_1601\"},\n {\"id\": 15, \"vector\": [-0.04697464305600074, -0.08509022265734134, 0.9067184632552001, -0.2281912685064822, -0.9747503428652762], \"color\": \"yellow_9925\"},\n {\"id\": 16, \"vector\": [-0.9363075919673911, -0.8153981031085669, 0.7943039120490902, -0.2093886809842529, 0.0771191335807897], \"color\": \"orange_9872\"},\n {\"id\": 17, \"vector\": [-0.050451522820639916, 0.18931572752321935, 0.7522886192190488, -0.9071793089474034, 0.6032647330692296], \"color\": \"red_6450\"},\n {\"id\": 18, \"vector\": [-0.9181544231141592, 0.6700755998126806, -0.014174674636136642, 0.6325780463623432, -0.49662222164032976], \"color\": \"purple_7392\"},\n {\"id\": 19, \"vector\": [0.11426945899602536, 0.6089190684002581, -0.5842735738352236, 0.057050610092692855, -0.035163433018196244], \"color\": \"pink_4996\"}\n]\n\nres = client.upsert(\n collection_name=\"quick_setup\",\n data=data,\n partition_name=\"partitionA\"\n)\n\nprint(res)\n\n# Output\n#\n# {\n# \"upsert_count\": 10\n# }\n","import io.milvus.v2.service.vector.request.UpsertReq;\nimport io.milvus.v2.service.vector.response.UpsertResp;\n\n// 6. Upsert data in parition\n\ndata = Arrays.asList(\n new JSONObject(Map.of(\"id\", 10L, \"vector\", Arrays.asList(0.06998888224297328f, 0.8582816610326578f, -0.9657938677934292f, 0.6527905683627726f, -0.8668460657158576f), \"color\", \"black_3651\")),\n new JSONObject(Map.of(\"id\", 11L, \"vector\", Arrays.asList(0.6060703043917468f, -0.3765080534566074f, -0.7710758854987239f, 0.36993888322346136f, 0.5507513364206531f), \"color\", \"grey_2049\")),\n new JSONObject(Map.of(\"id\", 12L, \"vector\", Arrays.asList(-0.9041813104515337f, -0.9610546012461163f, 0.20033003106083358f, 0.11842506351635174f, 0.8327356724591011f), \"color\", \"blue_6168\")),\n new JSONObject(Map.of(\"id\", 13L, \"vector\", Arrays.asList(0.3202914977909075f, -0.7279137773695252f, -0.04747830871620273f, 0.8266053056909548f, 0.8277957187455489f), \"color\", \"blue_1672\")),\n new JSONObject(Map.of(\"id\", 14L, \"vector\", Arrays.asList(0.2975811497890859f, 0.2946936202691086f, 0.5399463833894609f, 0.8385334966677529f, -0.4450543984655133f), \"color\", \"pink_1601\")),\n new JSONObject(Map.of(\"id\", 15L, \"vector\", Arrays.asList(-0.04697464305600074f, -0.08509022265734134f, 0.9067184632552001f, -0.2281912685064822f, -0.9747503428652762f), \"color\", \"yellow_9925\")),\n new JSONObject(Map.of(\"id\", 16L, \"vector\", Arrays.asList(-0.9363075919673911f, -0.8153981031085669f, 0.7943039120490902f, -0.2093886809842529f, 0.0771191335807897f), \"color\", \"orange_9872\")),\n new JSONObject(Map.of(\"id\", 17L, \"vector\", Arrays.asList(-0.050451522820639916f, 0.18931572752321935f, 0.7522886192190488f, -0.9071793089474034f, 0.6032647330692296f), \"color\", \"red_6450\")),\n new JSONObject(Map.of(\"id\", 18L, \"vector\", Arrays.asList(-0.9181544231141592f, 0.6700755998126806f, -0.014174674636136642f, 0.6325780463623432f, -0.49662222164032976f), \"color\", \"purple_7392\")),\n new JSONObject(Map.of(\"id\", 19L, \"vector\", Arrays.asList(0.11426945899602536f, 0.6089190684002581f, -0.5842735738352236f, 0.057050610092692855f, -0.035163433018196244f), \"color\", \"pink_4996\"))\n);\n\nupsertReq = UpsertReq.builder()\n .collectionName(\"quick_setup\")\n .data(data)\n .partitionName(\"partitionA\")\n .build();\n\nupsertResp = client.upsert(upsertReq);\n\nSystem.out.println(JSONObject.toJSON(upsertResp));\n\n// Output:\n// {\"upsertCnt\": 10}\n","// 6. Upsert data in partitions\ndata = [\n {id: 10, vector: [0.06998888224297328, 0.8582816610326578, -0.9657938677934292, 0.6527905683627726, -0.8668460657158576], color: \"black_3651\"},\n {id: 11, vector: [0.6060703043917468, -0.3765080534566074, -0.7710758854987239, 0.36993888322346136, 0.5507513364206531], color: \"grey_2049\"},\n {id: 12, vector: [-0.9041813104515337, -0.9610546012461163, 0.20033003106083358, 0.11842506351635174, 0.8327356724591011], color: \"blue_6168\"},\n {id: 13, vector: [0.3202914977909075, -0.7279137773695252, -0.04747830871620273, 0.8266053056909548, 0.8277957187455489], color: \"blue_1672\"},\n {id: 14, vector: [0.2975811497890859, 0.2946936202691086, 0.5399463833894609, 0.8385334966677529, -0.4450543984655133], color: \"pink_1601\"},\n {id: 15, vector: [-0.04697464305600074, -0.08509022265734134, 0.9067184632552001, -0.2281912685064822, -0.9747503428652762], color: \"yellow_9925\"},\n {id: 16, vector: [-0.9363075919673911, -0.8153981031085669, 0.7943039120490902, -0.2093886809842529, 0.0771191335807897], color: \"orange_9872\"},\n {id: 17, vector: [-0.050451522820639916, 0.18931572752321935, 0.7522886192190488, -0.9071793089474034, 0.6032647330692296], color: \"red_6450\"},\n {id: 18, vector: [-0.9181544231141592, 0.6700755998126806, -0.014174674636136642, 0.6325780463623432, -0.49662222164032976], color: \"purple_7392\"},\n {id: 19, vector: [0.11426945899602536, 0.6089190684002581, -0.5842735738352236, 0.057050610092692855, -0.035163433018196244], color: \"pink_4996\"}\n]\n\nres = await client.upsert({\n collection_name: \"quick_setup\",\n data: data,\n partition_name: \"partitionA\"\n})\n\nconsole.log(res.upsert_cnt)\n\n// Output\n// \n// 10\n// \n","# 7. Delete entities\nres = client.delete(\n collection_name=\"quick_setup\",\n filter=\"id in [4,5,6]\"\n)\n\nprint(res)\n\n# Output\n#\n# {\n# \"delete_count\": 3\n# }\n","import io.milvus.v2.service.vector.request.DeleteReq;\nimport io.milvus.v2.service.vector.response.DeleteResp;\n\n\n// 7. Delete entities\n\nDeleteReq deleteReq = DeleteReq.builder()\n .collectionName(\"quick_setup\")\n .filter(\"id in [4, 5, 6]\")\n .build();\n\nDeleteResp deleteResp = client.delete(deleteReq);\n\nSystem.out.println(JSONObject.toJSON(deleteResp));\n\n// Output:\n// {\"deleteCnt\": 3}\n","// 7. Delete entities\nres = await client.delete({\n collection_name: \"quick_setup\",\n filter: \"id in [4,5,6]\"\n})\n\nconsole.log(res.delete_cnt)\n\n// Output\n// \n// 3\n// \n","res = client.delete(\n collection_name=\"quick_setup\",\n ids=[18, 19],\n partition_name=\"partitionA\"\n)\n\nprint(res)\n\n# Output\n#\n# {\n# \"delete_count\": 2\n# }\n","deleteReq = DeleteReq.builder()\n .collectionName(\"quick_setup\")\n .ids(Arrays.asList(18L, 19L))\n .partitionName(\"partitionA\")\n .build();\n\ndeleteResp = client.delete(deleteReq);\n\nSystem.out.println(JSONObject.toJSON(deleteResp));\n\n// Output:\n// {\"deleteCnt\": 2}\n","res = await client.delete({\n collection_name: \"quick_setup\",\n ids: [18, 19],\n partition_name: \"partitionA\"\n})\n\nconsole.log(res.delete_cnt)\n\n// Output\n// \n// 2\n// \n","res = client.delete(\ncollection_name='quick_setup',\npartition_name='partitionA',\nfilter='color like \"blue%\"'\n)\n\nprint(\"Entities deleted from partitionA: \", res['delete_count'])\n\n# Output:\n# Entities deleted from partitionA: 3\n","deleteReq = DeleteReq.builder()\n .collectionName(\"quick_setup\")\n .filter('color like \"blue%\"')\n .partitionName(\"partitionA\")\n .build();\n\ndeleteResp = client.delete(deleteReq);\n\nSystem.out.println(JSONObject.toJSON(deleteResp));\n\n// Output:\n// {\"deleteCnt\": 3}\n","res = await client.delete({\ncollection_name: \"quick_setup\",\npartition_name: \"partitionA\",\nfilter: 'color like \"blue%\"'\n})\n\nconsole.log(\"Entities deleted from partitionA: \" + res.delete_cnt)\n\n// Output:\n// Entities deleted from partitionA: 3\n"],"headingContent":"Insert, Upsert & Delete","anchorList":[{"label":"Einfügen, Upsert und Löschen","href":"Insert-Upsert--Delete","type":1,"isActive":false},{"label":"Bevor Sie beginnen","href":"Before-you-start","type":2,"isActive":false},{"label":"Übersicht","href":"Overview","type":2,"isActive":false},{"label":"Vorbereitungen","href":"Preparations","type":2,"isActive":false},{"label":"Einfügen von Entitäten","href":"Insert-entities","type":2,"isActive":false},{"label":"Upsert Entitäten","href":"Upsert-entities","type":2,"isActive":false},{"label":"Entitäten löschen","href":"Delete-entities","type":2,"isActive":false}]} \ No newline at end of file diff --git a/localization/v2.4.x/site/de/userGuide/insert-update-delete.md b/localization/v2.4.x/site/de/userGuide/insert-update-delete.md index eeadf9fa9..f463054ba 100644 --- a/localization/v2.4.x/site/de/userGuide/insert-update-delete.md +++ b/localization/v2.4.x/site/de/userGuide/insert-update-delete.md @@ -3,7 +3,7 @@ id: insert-update-delete.md summary: >- Dieses Handbuch führt Sie durch die Datenmanipulationsoperationen innerhalb einer Sammlung, einschließlich Einfügen, Hochstellen und Löschen. -title: 'Einfügen, Hochstellen und Löschen' +title: 'Einfügen, Upsert und Löschen' ---

    Einfügen, Upsert und Löschen

    Eine Entität im Kontext von Milvus-Sammlungen ist eine einzelne, identifizierbare Instanz innerhalb einer Sammlung. Sie stellt ein eindeutiges Mitglied einer bestimmten Klasse dar, sei es ein Buch in einer Bibliothek, ein Gen in einem Genom oder eine andere identifizierbare Einheit.

    -

    Entitäten innerhalb einer Sammlung haben einen gemeinsamen Satz von Attributen, die als Schema bezeichnet werden und die Struktur beschreiben, die jede Entität einhalten muss, einschließlich Feldnamen, Datentypen und andere Einschränkungen.

    +

    Entitäten innerhalb einer Sammlung haben einen gemeinsamen Satz von Attributen, der als Schema bezeichnet wird und die Struktur umreißt, die jede Entität einhalten muss, einschließlich Feldnamen, Datentypen und andere Beschränkungen.

    Das erfolgreiche Einfügen von Entitäten in eine Sammlung erfordert, dass die bereitgestellten Daten alle schema-definierten Felder der Zielsammlung enthalten. Darüber hinaus können Sie auch nicht schema-definierte Felder einfügen, wenn Sie das dynamische Feld aktiviert haben. Weitere Informationen finden Sie unter Dynamisches Feld aktivieren.

    Vorbereitungen

    +

    Eine Sammlung teilweise laden (öffentliche Vorschau)

    +

    Diese Funktion befindet sich derzeit in der öffentlichen Vorschau. Die API und die Funktionalität können sich in Zukunft noch ändern.

    +
    +

    Beim Empfang Ihrer Ladeanforderung lädt Milvus alle Vektorfeld-Indizes und alle skalaren Felddaten in den Speicher. Wenn einige Felder nicht in Suchen und Abfragen einbezogen werden sollen, können Sie sie vom Laden ausschließen, um die Speichernutzung zu reduzieren und die Suchleistung zu verbessern.

    +
    +
    # 7. Load the collection
    +client.load_collection(
    +    collection_name="customized_setup_2",
    +    load_fields=["my_id", "my_vector"] # Load only the specified fields
    +    skip_load_dynamic_field=True # Skip loading the dynamic field
    +)
    +
    +res = client.get_load_state(
    +    collection_name="customized_setup_2"
    +)
    +
    +print(res)
    +
    +# Output
    +#
    +# {
    +#     "state": "<LoadState: Loaded>"
    +# }
    +
    +

    Beachten Sie, dass nur die in load_fields aufgeführten Felder als Filterbedingungen und Ausgabefelder in Suchen und Abfragen verwendet werden können. Sie sollten immer den Primärschlüssel in die Liste aufnehmen. Die Feldnamen, die vom Laden ausgeschlossen sind, sind nicht für die Filterung oder Ausgabe verfügbar.

    +

    Sie können skip_load_dynamic_field=True verwenden, um das Laden des dynamischen Feldes zu überspringen. Milvus behandelt das dynamische Feld als ein einziges Feld, so dass alle Schlüssel des dynamischen Feldes zusammen einbezogen oder ausgeschlossen werden.

    +

    Eine Sammlung freigeben

    Um eine Sammlung freizugeben, verwenden Sie die release_collection() Methode und geben Sie den Namen der Sammlung an.

    -

    Um eine Sammlung freizugeben, verwenden Sie die releaseCollection() Methode und geben Sie den Namen der Sammlung an.

    +

    Um eine Sammlung freizugeben, verwenden Sie die Methode releaseCollection() Methode unter Angabe des Sammlungsnamens.

    Um eine Sammlung freizugeben, verwenden Sie die releaseCollection() Methode und geben Sie den Namen der Sammlung an.

    @@ -2323,7 +2350,7 @@ collection.set_properties( } )
    -

    MMAP einstellen

    Konfigurieren Sie die Eigenschaft Speicherzuordnung (MMAP) für die Sammlung, die festlegt, ob Daten im Speicher abgebildet werden, um die Abfrageleistung zu verbessern. Weitere Informationen finden Sie unter Konfigurieren der Speicherzuordnung .

    +

    MMAP einstellen

    Konfigurieren Sie die Eigenschaft Speicherzuordnung (MMAP) für die Sammlung, die festlegt, ob Daten im Speicher abgebildet werden, um die Abfrageleistung zu verbessern. Weitere Informationen finden Sie unter Konfigurieren der Speicherzuordnung.

    Bevor Sie die MMAP-Eigenschaft einstellen, müssen Sie die Sammlung zuerst freigeben. Andernfalls tritt ein Fehler auf.

    diff --git a/localization/v2.4.x/site/de/userGuide/manage-indexes/index-vector-fields.json b/localization/v2.4.x/site/de/userGuide/manage-indexes/index-vector-fields.json index b63efa60f..902b7b6d5 100644 --- a/localization/v2.4.x/site/de/userGuide/manage-indexes/index-vector-fields.json +++ b/localization/v2.4.x/site/de/userGuide/manage-indexes/index-vector-fields.json @@ -1 +1 @@ -{"codeList":["from pymilvus import MilvusClient, DataType\n\n# 1. Set up a Milvus client\nclient = MilvusClient(\n uri=\"http://localhost:19530\"\n)\n\n# 2. Create schema\n# 2.1. Create schema\nschema = MilvusClient.create_schema(\n auto_id=False,\n enable_dynamic_field=True,\n)\n\n# 2.2. Add fields to schema\nschema.add_field(field_name=\"id\", datatype=DataType.INT64, is_primary=True)\nschema.add_field(field_name=\"vector\", datatype=DataType.FLOAT_VECTOR, dim=5)\n\n# 3. Create collection\nclient.create_collection(\n collection_name=\"customized_setup\", \n schema=schema, \n)\n","import io.milvus.v2.client.ConnectConfig;\nimport io.milvus.v2.client.MilvusClientV2;\nimport io.milvus.v2.common.DataType;\nimport io.milvus.v2.service.collection.request.CreateCollectionReq;\n\nString CLUSTER_ENDPOINT = \"http://localhost:19530\";\n\n// 1. Connect to Milvus server\nConnectConfig connectConfig = ConnectConfig.builder()\n .uri(CLUSTER_ENDPOINT)\n .build();\n\nMilvusClientV2 client = new MilvusClientV2(connectConfig);\n\n// 2. Create a collection\n\n// 2.1 Create schema\nCreateCollectionReq.CollectionSchema schema = client.createSchema();\n\n// 2.2 Add fields to schema\nschema.addField(AddFieldReq.builder().fieldName(\"id\").dataType(DataType.Int64).isPrimaryKey(true).autoID(false).build());\nschema.addField(AddFieldReq.builder().fieldName(\"vector\").dataType(DataType.FloatVector).dimension(5).build());\n\n// 3 Create a collection without schema and index parameters\nCreateCollectionReq customizedSetupReq = CreateCollectionReq.builder()\n.collectionName(\"customized_setup\")\n.collectionSchema(schema)\n.build();\n\nclient.createCollection(customizedSetupReq);\n","// 1. Set up a Milvus Client\nclient = new MilvusClient({address, token});\n\n// 2. Define fields for the collection\nconst fields = [\n {\n name: \"id\",\n data_type: DataType.Int64,\n is_primary_key: true,\n autoID: false\n },\n {\n name: \"vector\",\n data_type: DataType.FloatVector,\n dim: 5\n },\n]\n\n// 3. Create a collection\nres = await client.createCollection({\n collection_name: \"customized_setup\",\n fields: fields,\n})\n\nconsole.log(res.error_code) \n\n// Output\n// \n// Success\n// \n","# 4.1. Set up the index parameters\nindex_params = MilvusClient.prepare_index_params()\n\n# 4.2. Add an index on the vector field.\nindex_params.add_index(\n field_name=\"vector\",\n metric_type=\"COSINE\",\n index_type=\"IVF_FLAT\",\n index_name=\"vector_index\",\n params={ \"nlist\": 128 }\n)\n\n# 4.3. Create an index file\nclient.create_index(\n collection_name=\"customized_setup\",\n index_params=index_params\n)\n","import io.milvus.v2.common.IndexParam;\nimport io.milvus.v2.service.index.request.CreateIndexReq;\n\n// 4 Prepare index parameters\n\n// 4.2 Add an index for the vector field \"vector\"\nIndexParam indexParamForVectorField = IndexParam.builder()\n .fieldName(\"vector\")\n .indexName(\"vector_index\")\n .indexType(IndexParam.IndexType.IVF_FLAT)\n .metricType(IndexParam.MetricType.COSINE)\n .extraParams(Map.of(\"nlist\", 128))\n .build();\n\nList indexParams = new ArrayList<>();\nindexParams.add(indexParamForVectorField);\n\n// 4.3 Crate an index file\nCreateIndexReq createIndexReq = CreateIndexReq.builder()\n .collectionName(\"customized_setup\")\n .indexParams(indexParams)\n .build();\n\nclient.createIndex(createIndexReq);\n","// 4. Set up index for the collection\n// 4.1. Set up the index parameters\nres = await client.createIndex({\n collection_name: \"customized_setup\",\n field_name: \"vector\",\n index_type: \"AUTOINDEX\",\n metric_type: \"COSINE\", \n index_name: \"vector_index\",\n params: { \"nlist\": 128 }\n})\n\nconsole.log(res.error_code)\n\n// Output\n// \n// Success\n// \n","# 5. Describe index\nres = client.list_indexes(\n collection_name=\"customized_setup\"\n)\n\nprint(res)\n\n# Output\n#\n# [\n# \"vector_index\",\n# ]\n\nres = client.describe_index(\n collection_name=\"customized_setup\",\n index_name=\"vector_index\"\n)\n\nprint(res)\n\n# Output\n#\n# {\n# \"index_type\": ,\n# \"metric_type\": \"COSINE\",\n# \"field_name\": \"vector\",\n# \"index_name\": \"vector_index\"\n# }\n","import io.milvus.v2.service.index.request.DescribeIndexReq;\nimport io.milvus.v2.service.index.response.DescribeIndexResp;\n\n// 5. Describe index\n// 5.1 List the index names\nListIndexesReq listIndexesReq = ListIndexesReq.builder()\n .collectionName(\"customized_setup\")\n .build();\n\nList indexNames = client.listIndexes(listIndexesReq);\n\nSystem.out.println(indexNames);\n\n// Output:\n// [\n// \"vector_index\"\n// ]\n\n// 5.2 Describe an index\nDescribeIndexReq describeIndexReq = DescribeIndexReq.builder()\n .collectionName(\"customized_setup\")\n .indexName(\"vector_index\")\n .build();\n\nDescribeIndexResp describeIndexResp = client.describeIndex(describeIndexReq);\n\nSystem.out.println(JSONObject.toJSON(describeIndexResp));\n\n// Output:\n// {\n// \"metricType\": \"COSINE\",\n// \"indexType\": \"AUTOINDEX\",\n// \"fieldName\": \"vector\",\n// \"indexName\": \"vector_index\"\n// }\n","// 5. Describe the index\nres = await client.describeIndex({\n collection_name: \"customized_setup\",\n index_name: \"vector_index\"\n})\n\nconsole.log(JSON.stringify(res.index_descriptions, null, 2))\n\n// Output\n// \n// [\n// {\n// \"params\": [\n// {\n// \"key\": \"index_type\",\n// \"value\": \"AUTOINDEX\"\n// },\n// {\n// \"key\": \"metric_type\",\n// \"value\": \"COSINE\"\n// }\n// ],\n// \"index_name\": \"vector_index\",\n// \"indexID\": \"449007919953063141\",\n// \"field_name\": \"vector\",\n// \"indexed_rows\": \"0\",\n// \"total_rows\": \"0\",\n// \"state\": \"Finished\",\n// \"index_state_fail_reason\": \"\",\n// \"pending_index_rows\": \"0\"\n// }\n// ]\n// \n","# 6. Drop index\nclient.drop_index(\n collection_name=\"customized_setup\",\n index_name=\"vector_index\"\n)\n","// 6. Drop index\n\nDropIndexReq dropIndexReq = DropIndexReq.builder()\n .collectionName(\"customized_setup\")\n .indexName(\"vector_index\")\n .build();\n\nclient.dropIndex(dropIndexReq);\n","// 6. Drop the index\nres = await client.dropIndex({\n collection_name: \"customized_setup\",\n index_name: \"vector_index\"\n})\n\nconsole.log(res.error_code)\n\n// Output\n// \n// Success\n// \n"],"headingContent":"","anchorList":[{"label":"Vektorfelder indizieren","href":"Index-Vector-Fields","type":1,"isActive":false},{"label":"Übersicht","href":"Overview","type":2,"isActive":false},{"label":"Vorbereitungen","href":"Preparations","type":2,"isActive":false},{"label":"Indizieren einer Sammlung","href":"Index-a-Collection","type":2,"isActive":false},{"label":"Indexdetails prüfen","href":"Check-Index-Details","type":2,"isActive":false},{"label":"Einen Index löschen","href":"Drop-an-Index","type":2,"isActive":false}]} \ No newline at end of file +{"codeList":["from pymilvus import MilvusClient, DataType\n\n# 1. Set up a Milvus client\nclient = MilvusClient(\n uri=\"http://localhost:19530\"\n)\n\n# 2. Create schema\n# 2.1. Create schema\nschema = MilvusClient.create_schema(\n auto_id=False,\n enable_dynamic_field=True,\n)\n\n# 2.2. Add fields to schema\nschema.add_field(field_name=\"id\", datatype=DataType.INT64, is_primary=True)\nschema.add_field(field_name=\"vector\", datatype=DataType.FLOAT_VECTOR, dim=5)\n\n# 3. Create collection\nclient.create_collection(\n collection_name=\"customized_setup\", \n schema=schema, \n)\n","import io.milvus.v2.client.ConnectConfig;\nimport io.milvus.v2.client.MilvusClientV2;\nimport io.milvus.v2.common.DataType;\nimport io.milvus.v2.service.collection.request.CreateCollectionReq;\n\nString CLUSTER_ENDPOINT = \"http://localhost:19530\";\n\n// 1. Connect to Milvus server\nConnectConfig connectConfig = ConnectConfig.builder()\n .uri(CLUSTER_ENDPOINT)\n .build();\n\nMilvusClientV2 client = new MilvusClientV2(connectConfig);\n\n// 2. Create a collection\n\n// 2.1 Create schema\nCreateCollectionReq.CollectionSchema schema = client.createSchema();\n\n// 2.2 Add fields to schema\nschema.addField(AddFieldReq.builder().fieldName(\"id\").dataType(DataType.Int64).isPrimaryKey(true).autoID(false).build());\nschema.addField(AddFieldReq.builder().fieldName(\"vector\").dataType(DataType.FloatVector).dimension(5).build());\n\n// 3 Create a collection without schema and index parameters\nCreateCollectionReq customizedSetupReq = CreateCollectionReq.builder()\n.collectionName(\"customized_setup\")\n.collectionSchema(schema)\n.build();\n\nclient.createCollection(customizedSetupReq);\n","// 1. Set up a Milvus Client\nclient = new MilvusClient({address, token});\n\n// 2. Define fields for the collection\nconst fields = [\n {\n name: \"id\",\n data_type: DataType.Int64,\n is_primary_key: true,\n autoID: false\n },\n {\n name: \"vector\",\n data_type: DataType.FloatVector,\n dim: 5\n },\n]\n\n// 3. Create a collection\nres = await client.createCollection({\n collection_name: \"customized_setup\",\n fields: fields,\n})\n\nconsole.log(res.error_code) \n\n// Output\n// \n// Success\n// \n","# 4.1. Set up the index parameters\nindex_params = MilvusClient.prepare_index_params()\n\n# 4.2. Add an index on the vector field.\nindex_params.add_index(\n field_name=\"vector\",\n metric_type=\"COSINE\",\n index_type=\"IVF_FLAT\",\n index_name=\"vector_index\",\n params={ \"nlist\": 128 }\n)\n\n# 4.3. Create an index file\nclient.create_index(\n collection_name=\"customized_setup\",\n index_params=index_params,\n sync=False # Whether to wait for index creation to complete before returning. Defaults to True.\n)\n","import io.milvus.v2.common.IndexParam;\nimport io.milvus.v2.service.index.request.CreateIndexReq;\n\n// 4 Prepare index parameters\n\n// 4.2 Add an index for the vector field \"vector\"\nIndexParam indexParamForVectorField = IndexParam.builder()\n .fieldName(\"vector\")\n .indexName(\"vector_index\")\n .indexType(IndexParam.IndexType.IVF_FLAT)\n .metricType(IndexParam.MetricType.COSINE)\n .extraParams(Map.of(\"nlist\", 128))\n .build();\n\nList indexParams = new ArrayList<>();\nindexParams.add(indexParamForVectorField);\n\n// 4.3 Crate an index file\nCreateIndexReq createIndexReq = CreateIndexReq.builder()\n .collectionName(\"customized_setup\")\n .indexParams(indexParams)\n .build();\n\nclient.createIndex(createIndexReq);\n","// 4. Set up index for the collection\n// 4.1. Set up the index parameters\nres = await client.createIndex({\n collection_name: \"customized_setup\",\n field_name: \"vector\",\n index_type: \"AUTOINDEX\",\n metric_type: \"COSINE\", \n index_name: \"vector_index\",\n params: { \"nlist\": 128 }\n})\n\nconsole.log(res.error_code)\n\n// Output\n// \n// Success\n// \n","# 5. Describe index\nres = client.list_indexes(\n collection_name=\"customized_setup\"\n)\n\nprint(res)\n\n# Output\n#\n# [\n# \"vector_index\",\n# ]\n\nres = client.describe_index(\n collection_name=\"customized_setup\",\n index_name=\"vector_index\"\n)\n\nprint(res)\n\n# Output\n#\n# {\n# \"index_type\": ,\n# \"metric_type\": \"COSINE\",\n# \"field_name\": \"vector\",\n# \"index_name\": \"vector_index\"\n# }\n","import io.milvus.v2.service.index.request.DescribeIndexReq;\nimport io.milvus.v2.service.index.response.DescribeIndexResp;\n\n// 5. Describe index\n// 5.1 List the index names\nListIndexesReq listIndexesReq = ListIndexesReq.builder()\n .collectionName(\"customized_setup\")\n .build();\n\nList indexNames = client.listIndexes(listIndexesReq);\n\nSystem.out.println(indexNames);\n\n// Output:\n// [\n// \"vector_index\"\n// ]\n\n// 5.2 Describe an index\nDescribeIndexReq describeIndexReq = DescribeIndexReq.builder()\n .collectionName(\"customized_setup\")\n .indexName(\"vector_index\")\n .build();\n\nDescribeIndexResp describeIndexResp = client.describeIndex(describeIndexReq);\n\nSystem.out.println(JSONObject.toJSON(describeIndexResp));\n\n// Output:\n// {\n// \"metricType\": \"COSINE\",\n// \"indexType\": \"AUTOINDEX\",\n// \"fieldName\": \"vector\",\n// \"indexName\": \"vector_index\"\n// }\n","// 5. Describe the index\nres = await client.describeIndex({\n collection_name: \"customized_setup\",\n index_name: \"vector_index\"\n})\n\nconsole.log(JSON.stringify(res.index_descriptions, null, 2))\n\n// Output\n// \n// [\n// {\n// \"params\": [\n// {\n// \"key\": \"index_type\",\n// \"value\": \"AUTOINDEX\"\n// },\n// {\n// \"key\": \"metric_type\",\n// \"value\": \"COSINE\"\n// }\n// ],\n// \"index_name\": \"vector_index\",\n// \"indexID\": \"449007919953063141\",\n// \"field_name\": \"vector\",\n// \"indexed_rows\": \"0\",\n// \"total_rows\": \"0\",\n// \"state\": \"Finished\",\n// \"index_state_fail_reason\": \"\",\n// \"pending_index_rows\": \"0\"\n// }\n// ]\n// \n","# 6. Drop index\nclient.drop_index(\n collection_name=\"customized_setup\",\n index_name=\"vector_index\"\n)\n","// 6. Drop index\n\nDropIndexReq dropIndexReq = DropIndexReq.builder()\n .collectionName(\"customized_setup\")\n .indexName(\"vector_index\")\n .build();\n\nclient.dropIndex(dropIndexReq);\n","// 6. Drop the index\nres = await client.dropIndex({\n collection_name: \"customized_setup\",\n index_name: \"vector_index\"\n})\n\nconsole.log(res.error_code)\n\n// Output\n// \n// Success\n// \n"],"headingContent":"Index Vector Fields","anchorList":[{"label":"Vektorfelder indizieren","href":"Index-Vector-Fields","type":1,"isActive":false},{"label":"Übersicht","href":"Overview","type":2,"isActive":false},{"label":"Vorbereitungen","href":"Preparations","type":2,"isActive":false},{"label":"Indizieren einer Sammlung","href":"Index-a-Collection","type":2,"isActive":false},{"label":"Indexdetails prüfen","href":"Check-Index-Details","type":2,"isActive":false},{"label":"Einen Index löschen","href":"Drop-an-Index","type":2,"isActive":false}]} \ No newline at end of file diff --git a/localization/v2.4.x/site/de/userGuide/manage-indexes/index-vector-fields.md b/localization/v2.4.x/site/de/userGuide/manage-indexes/index-vector-fields.md index fe9ca92b3..1806e40ec 100644 --- a/localization/v2.4.x/site/de/userGuide/manage-indexes/index-vector-fields.md +++ b/localization/v2.4.x/site/de/userGuide/manage-indexes/index-vector-fields.md @@ -2,9 +2,9 @@ id: index-vector-fields.md order: 1 summary: >- - Dieses Handbuch führt Sie durch die grundlegenden Vorgänge zum Erstellen und - Verwalten von Indizes für Vektorfelder in einer Sammlung. -title: Index-Vektorfelder + Dieses Handbuch führt Sie durch die grundlegenden Operationen zum Erstellen + und Verwalten von Indizes für Vektorfelder in einer Sammlung. +title: Vektorfelder indizieren ---

    Vektorfelder indizieren

    Durch die Nutzung der in einer Indexdatei gespeicherten Metadaten organisiert Milvus Ihre Daten in einer spezialisierten Struktur, die ein schnelles Auffinden der gewünschten Informationen bei Suchen oder Abfragen erleichtert.

    -

    Milvus bietet verschiedene Indextypen und Metriken zum Sortieren von Feldwerten für effiziente Ähnlichkeitssuchen. In der folgenden Tabelle sind die unterstützten Indextypen und Metriken für verschiedene Vektorfeldtypen aufgeführt. Einzelheiten finden Sie unter In-memory Index und Ähnlichkeitsmetriken.

    +

    Milvus bietet verschiedene Indextypen und Metriken zum Sortieren von Feldwerten für effiziente Ähnlichkeitssuchen. In der folgenden Tabelle sind die unterstützten Indextypen und Metriken für verschiedene Vektorfeldtypen aufgeführt. Details finden Sie unter In-memory Index und Ähnlichkeitsmetriken.

    @@ -249,7 +249,8 @@ index_params.add_index( # 4.3. Create an index file client.create_index( collection_name="customized_setup", - index_params=index_params + index_params=index_params, + sync=False # Whether to wait for index creation to complete before returning. Defaults to True. )
    import io.milvus.v2.common.IndexParam;
    @@ -331,6 +332,10 @@ res = await client.index_params
           Ein IndexParams-Objekt, das eine Liste von IndexParam-Objekten enthält.
         
    +    
    +      sync
    +      Steuert, wie der Index in Bezug auf die Anfrage des Clients aufgebaut wird. Gültige Werte:
    • True (Standard): Der Client wartet, bis der Index vollständig aufgebaut ist, bevor er zurückkehrt. Das bedeutet, dass Sie keine Antwort erhalten, bis der Prozess abgeschlossen ist.
    • False: Der Client kehrt sofort zurück, nachdem die Anfrage eingegangen ist und der Index im Hintergrund aufgebaut wird. Um herauszufinden, ob die Indexerstellung abgeschlossen ist, verwenden Sie die Methode describe_index().
    + @@ -347,7 +352,7 @@ res = await client.indexName - + @@ -527,7 +532,7 @@ res = await client.// ]// -

    Sie können die Indexdatei überprüfen, die für ein bestimmtes Feld erstellt wurde, und die Statistik über die Anzahl der Zeilen, die mit dieser Indexdatei indiziert wurden, sammeln.

    +

    Sie können die Indexdatei prüfen, die für ein bestimmtes Feld erstellt wurde, und die Statistik über die Anzahl der mit dieser Indexdatei indizierten Zeilen sammeln.

    Einen Index löschen +

    Um bestimmte Felder in einer oder mehreren Partitionen zu laden, gehen Sie wie folgt vor:

    +
    client.load_partitions(
    +    collection_name="quick_setup",
    +    partition_names=["partitionA"],
    +    load_fields=["id", "vector"],
    +    skip_load_dynamic_field=True
    +)
    +
    +

    Beachten Sie, dass nur die in load_fields aufgeführten Felder als Filterbedingungen und Ausgabefelder in Suchen und Abfragen verwendet werden können. Sie sollten immer den Primärschlüssel in die Liste aufnehmen. Die Feldnamen, die vom Laden ausgeschlossen sind, stehen nicht für die Filterung oder Ausgabe zur Verfügung.

    +

    Sie können skip_load_dynamic_field=True verwenden, um das Laden des dynamischen Feldes zu überspringen. Milvus behandelt das dynamische Feld als ein einziges Feld, so dass alle Schlüssel des dynamischen Feldes gemeinsam ein- oder ausgeschlossen werden.

    Partitionen freigeben

    Um alle Partitionen einer Sammlung freizugeben, können Sie einfach release_collection(). Um bestimmte Partitionen einer Sammlung freizugeben, verwenden Sie release_partitions().

    @@ -930,7 +939,7 @@ res = await client.rootCoord.maxPartitionNum konfigurieren. Einzelheiten finden Sie unter Systemkonfigurationen.

    +

    Standardmäßig erlaubt Milvus die Erstellung von maximal 1.024 Partitionen. Sie können die maximale Anzahl der Partitionen anpassen, indem Sie rootCoord.maxPartitionNum konfigurieren. Einzelheiten finden Sie unter Systemkonfigurationen.

  • Wie kann ich zwischen Partitionen und Partitionsschlüsseln unterscheiden?

    Partitionen sind physische Speichereinheiten, während Partitionsschlüssel logische Konzepte sind, die Daten automatisch bestimmten Partitionen auf der Grundlage einer bestimmten Spalte zuweisen.

    Wenn Sie beispielsweise in Milvus eine Sammlung mit einem Partitionsschlüssel haben, der als Feld color definiert ist, ordnet das System die Daten automatisch den Partitionen zu, basierend auf den Hash-Werten des Feldes color für jede Entität. Dieser automatisierte Prozess entbindet den Benutzer von der Verantwortung, die Partition beim Einfügen oder Suchen von Daten manuell anzugeben.

    diff --git a/localization/v2.4.x/site/de/userGuide/search-query-get/single-vector-search.json b/localization/v2.4.x/site/de/userGuide/search-query-get/single-vector-search.json index 80d81e6b4..af3d69f7f 100644 --- a/localization/v2.4.x/site/de/userGuide/search-query-get/single-vector-search.json +++ b/localization/v2.4.x/site/de/userGuide/search-query-get/single-vector-search.json @@ -1 +1 @@ -{"codeList":["# 1. Set up a Milvus client\nclient = MilvusClient(\n uri=CLUSTER_ENDPOINT,\n token=TOKEN \n)\n\n# 2. Create a collection\nclient.create_collection(\n collection_name=\"quick_setup\",\n dimension=5,\n metric_type=\"IP\"\n)\n\n# 3. Insert randomly generated vectors \ncolors = [\"green\", \"blue\", \"yellow\", \"red\", \"black\", \"white\", \"purple\", \"pink\", \"orange\", \"brown\", \"grey\"]\ndata = []\n\nfor i in range(1000):\n current_color = random.choice(colors)\n data.append({\n \"id\": i,\n \"vector\": [ random.uniform(-1, 1) for _ in range(5) ],\n \"color\": current_color,\n \"color_tag\": f\"{current_color}_{str(random.randint(1000, 9999))}\"\n })\n\nres = client.insert(\n collection_name=\"quick_setup\",\n data=data\n)\n\nprint(res)\n\n# Output\n#\n# {\n# \"insert_count\": 1000,\n# \"ids\": [\n# 0,\n# 1,\n# 2,\n# 3,\n# 4,\n# 5,\n# 6,\n# 7,\n# 8,\n# 9,\n# \"(990 more items hidden)\"\n# ]\n# }\n\n# 6.1 Create partitions \nclient.create_partition(\n collection_name=\"quick_setup\",\n partition_name=\"red\"\n)\n\nclient.create_partition(\n collection_name=\"quick_setup\",\n partition_name=\"blue\"\n)\n\n# 6.1 Insert data into partitions\nred_data = [ {\"id\": i, \"vector\": [ random.uniform(-1, 1) for _ in range(5) ], \"color\": \"red\", \"color_tag\": f\"red_{str(random.randint(1000, 9999))}\" } for i in range(500) ]\nblue_data = [ {\"id\": i, \"vector\": [ random.uniform(-1, 1) for _ in range(5) ], \"color\": \"blue\", \"color_tag\": f\"blue_{str(random.randint(1000, 9999))}\" } for i in range(500) ]\n\nres = client.insert(\n collection_name=\"quick_setup\",\n data=red_data,\n partition_name=\"red\"\n)\n\nprint(res)\n\n# Output\n#\n# {\n# \"insert_count\": 500,\n# \"ids\": [\n# 0,\n# 1,\n# 2,\n# 3,\n# 4,\n# 5,\n# 6,\n# 7,\n# 8,\n# 9,\n# \"(490 more items hidden)\"\n# ]\n# }\n\nres = client.insert(\n collection_name=\"quick_setup\",\n data=blue_data,\n partition_name=\"blue\"\n)\n\nprint(res)\n\n# Output\n#\n# {\n# \"insert_count\": 500,\n# \"ids\": [\n# 0,\n# 1,\n# 2,\n# 3,\n# 4,\n# 5,\n# 6,\n# 7,\n# 8,\n# 9,\n# \"(490 more items hidden)\"\n# ]\n# }\n","import java.util.ArrayList;\nimport java.util.Arrays;\nimport java.util.List;\nimport java.util.Map;\nimport java.util.Random;\n\nimport com.alibaba.fastjson.JSONObject;\n\nimport io.milvus.v2.client.ConnectConfig;\nimport io.milvus.v2.client.MilvusClientV2;\nimport io.milvus.v2.service.collection.request.CreateCollectionReq;\nimport io.milvus.v2.service.collection.request.GetLoadStateReq;\nimport io.milvus.v2.service.vector.request.InsertReq;\nimport io.milvus.v2.service.vector.response.InsertResp; \n\nString CLUSTER_ENDPOINT = \"http://localhost:19530\";\n\n// 1. Connect to Milvus server\nConnectConfig connectConfig = ConnectConfig.builder()\n .uri(CLUSTER_ENDPOINT)\n .build();\n\nMilvusClientV2 client = new MilvusClientV2(connectConfig); \n\n// 2. Create a collection in quick setup mode\nCreateCollectionReq quickSetupReq = CreateCollectionReq.builder()\n .collectionName(\"quick_setup\")\n .dimension(5)\n .metricType(\"IP\")\n .build();\n\nclient.createCollection(quickSetupReq);\n\nGetLoadStateReq loadStateReq = GetLoadStateReq.builder()\n .collectionName(\"quick_setup\")\n .build();\n\nboolean state = client.getLoadState(loadStateReq);\n\nSystem.out.println(state);\n\n// Output:\n// true\n\n// 3. Insert randomly generated vectors into the collection\nList colors = Arrays.asList(\"green\", \"blue\", \"yellow\", \"red\", \"black\", \"white\", \"purple\", \"pink\", \"orange\", \"brown\", \"grey\");\nList data = new ArrayList<>();\n\nfor (int i=0; i<1000; i++) {\n Random rand = new Random();\n String current_color = colors.get(rand.nextInt(colors.size()-1));\n JSONObject row = new JSONObject();\n row.put(\"id\", Long.valueOf(i));\n row.put(\"vector\", Arrays.asList(rand.nextFloat(), rand.nextFloat(), rand.nextFloat(), rand.nextFloat(), rand.nextFloat()));\n row.put(\"color_tag\", current_color + \"_\" + String.valueOf(rand.nextInt(8999) + 1000));\n data.add(row);\n}\n\nInsertReq insertReq = InsertReq.builder()\n .collectionName(\"quick_setup\")\n .data(data)\n .build();\n\nInsertResp insertResp = client.insert(insertReq);\n\nSystem.out.println(JSONObject.toJSON(insertResp));\n\n// Output:\n// {\"insertCnt\": 1000}\n\n// 6.1. Create a partition\nCreatePartitionReq partitionReq = CreatePartitionReq.builder()\n .collectionName(\"quick_setup\")\n .partitionName(\"red\")\n .build();\n\nclient.createPartition(partitionReq);\n\npartitionReq = CreatePartitionReq.builder()\n .collectionName(\"quick_setup\")\n .partitionName(\"blue\")\n .build();\n\nclient.createPartition(partitionReq);\n\n// 6.2 Insert data into the partition\ndata = new ArrayList<>();\n\nfor (int i=1000; i<1500; i++) {\n Random rand = new Random();\n String current_color = \"red\";\n JSONObject row = new JSONObject();\n row.put(\"id\", Long.valueOf(i));\n row.put(\"vector\", Arrays.asList(rand.nextFloat(), rand.nextFloat(), rand.nextFloat(), rand.nextFloat(), rand.nextFloat()));\n row.put(\"color\", current_color);\n row.put(\"color_tag\", current_color + \"_\" + String.valueOf(rand.nextInt(8999) + 1000));\n data.add(row);\n} \n\ninsertReq = InsertReq.builder()\n .collectionName(\"quick_setup\")\n .data(data)\n .partitionName(\"red\")\n .build();\n\ninsertResp = client.insert(insertReq);\n\nSystem.out.println(JSONObject.toJSON(insertResp));\n\n// Output:\n// {\"insertCnt\": 500}\n\ndata = new ArrayList<>();\n\nfor (int i=1500; i<2000; i++) {\n Random rand = new Random();\n String current_color = \"blue\";\n JSONObject row = new JSONObject();\n row.put(\"id\", Long.valueOf(i));\n row.put(\"vector\", Arrays.asList(rand.nextFloat(), rand.nextFloat(), rand.nextFloat(), rand.nextFloat(), rand.nextFloat()));\n row.put(\"color\", current_color);\n row.put(\"color_tag\", current_color + \"_\" + String.valueOf(rand.nextInt(8999) + 1000));\n data.add(row);\n}\n\ninsertReq = InsertReq.builder()\n .collectionName(\"quick_setup\")\n .data(data)\n .partitionName(\"blue\")\n .build();\n\ninsertResp = client.insert(insertReq);\n\nSystem.out.println(JSONObject.toJSON(insertResp));\n\n// Output:\n// {\"insertCnt\": 500}\n","const { MilvusClient, DataType, sleep } = require(\"@zilliz/milvus2-sdk-node\")\n\nconst address = \"http://localhost:19530\"\n\n// 1. Set up a Milvus Client\nclient = new MilvusClient({address});\n\n// 2. Create a collection in quick setup mode\nawait client.createCollection({\n collection_name: \"quick_setup\",\n dimension: 5,\n metric_type: \"IP\"\n}); \n\n// 3. Insert randomly generated vectors\nconst colors = [\"green\", \"blue\", \"yellow\", \"red\", \"black\", \"white\", \"purple\", \"pink\", \"orange\", \"brown\", \"grey\"]\ndata = []\n\nfor (let i = 0; i < 1000; i++) {\n current_color = colors[Math.floor(Math.random() * colors.length)]\n data.push({\n id: i,\n vector: [Math.random(), Math.random(), Math.random(), Math.random(), Math.random()],\n color: current_color,\n color_tag: `${current_color}_${Math.floor(Math.random() * 8999) + 1000}`\n })\n}\n\nvar res = await client.insert({\n collection_name: \"quick_setup\",\n data: data\n})\n\nconsole.log(res.insert_cnt)\n\n// Output\n// \n// 1000\n// \n\nawait client.createPartition({\n collection_name: \"quick_setup\",\n partition_name: \"red\"\n})\n\nawait client.createPartition({\n collection_name: \"quick_setup\",\n partition_name: \"blue\"\n})\n\n// 6.1 Insert data into partitions\nvar red_data = []\nvar blue_data = []\n\nfor (let i = 1000; i < 1500; i++) {\n red_data.push({\n id: i,\n vector: [Math.random(), Math.random(), Math.random(), Math.random(), Math.random()],\n color: \"red\",\n color_tag: `red_${Math.floor(Math.random() * 8999) + 1000}`\n })\n}\n\nfor (let i = 1500; i < 2000; i++) {\n blue_data.push({\n id: i,\n vector: [Math.random(), Math.random(), Math.random(), Math.random(), Math.random()],\n color: \"blue\",\n color_tag: `blue_${Math.floor(Math.random() * 8999) + 1000}`\n })\n}\n\nres = await client.insert({\n collection_name: \"quick_setup\",\n data: red_data,\n partition_name: \"red\"\n})\n\nconsole.log(res.insert_cnt)\n\n// Output\n// \n// 500\n// \n\nres = await client.insert({\n collection_name: \"quick_setup\",\n data: blue_data,\n partition_name: \"blue\"\n})\n\nconsole.log(res.insert_cnt)\n\n// Output\n// \n// 500\n// \n","# Single vector search\nres = client.search(\n collection_name=\"test_collection\", # Replace with the actual name of your collection\n # Replace with your query vector\n data=[[0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592]],\n limit=5, # Max. number of search results to return\n search_params={\"metric_type\": \"IP\", \"params\": {}} # Search parameters\n)\n\n# Convert the output to a formatted JSON string\nresult = json.dumps(res, indent=4)\nprint(result)\n","// 4. Single vector search\nList> query_vectors = Arrays.asList(Arrays.asList(0.3580376395471989f, -0.6023495712049978f, 0.18414012509913835f, -0.26286205330961354f, 0.9029438446296592f));\n\nSearchReq searchReq = SearchReq.builder()\n .collectionName(\"quick_setup\")\n .data(query_vectors)\n .topK(3) // The number of results to return\n .build();\n\nSearchResp searchResp = client.search(searchReq);\n\nSystem.out.println(JSONObject.toJSON(searchResp));\n","// 4. Single vector search\nvar query_vector = [0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592],\n\nres = await client.search({\n collection_name: \"quick_setup\",\n data: [query_vector],\n limit: 3, // The number of results to return\n})\n\nconsole.log(res.results)\n","[\n [\n {\n \"id\": 0,\n \"distance\": 1.4093276262283325,\n \"entity\": {}\n },\n {\n \"id\": 4,\n \"distance\": 0.9902134537696838,\n \"entity\": {}\n },\n {\n \"id\": 1,\n \"distance\": 0.8519943356513977,\n \"entity\": {}\n },\n {\n \"id\": 5,\n \"distance\": 0.7972343564033508,\n \"entity\": {}\n },\n {\n \"id\": 2,\n \"distance\": 0.5928734540939331,\n \"entity\": {}\n }\n ]\n]\n","{\"searchResults\": [[\n {\n \"score\": 1.263043,\n \"fields\": {\n \"vector\": [\n 0.9533119,\n 0.02538395,\n 0.76714665,\n 0.35481733,\n 0.9845762\n ],\n \"id\": 740\n }\n },\n {\n \"score\": 1.2377806,\n \"fields\": {\n \"vector\": [\n 0.7411156,\n 0.08687937,\n 0.8254139,\n 0.08370924,\n 0.99095553\n ],\n \"id\": 640\n }\n },\n {\n \"score\": 1.1869997,\n \"fields\": {\n \"vector\": [\n 0.87928146,\n 0.05324632,\n 0.6312755,\n 0.28005534,\n 0.9542448\n ],\n \"id\": 455\n }\n }\n]]}\n","[\n { score: 1.7463608980178833, id: '854' },\n { score: 1.744946002960205, id: '425' },\n { score: 1.7258622646331787, id: '718' }\n]\n","# Bulk-vector search\nres = client.search(\n collection_name=\"test_collection\", # Replace with the actual name of your collection\n data=[\n [0.19886812562848388, 0.06023560599112088, 0.6976963061752597, 0.2614474506242501, 0.838729485096104],\n [0.3172005263489739, 0.9719044792798428, -0.36981146090600725, -0.4860894583077995, 0.95791889146345]\n ], # Replace with your query vectors\n limit=2, # Max. number of search results to return\n search_params={\"metric_type\": \"IP\", \"params\": {}} # Search parameters\n)\n\nresult = json.dumps(res, indent=4)\nprint(result)\n","// 5. Batch vector search\nquery_vectors = Arrays.asList(\n Arrays.asList(0.3580376395471989f, -0.6023495712049978f, 0.18414012509913835f, -0.26286205330961354f, 0.9029438446296592f),\n Arrays.asList(0.19886812562848388f, 0.06023560599112088f, 0.6976963061752597f, 0.2614474506242501f, 0.838729485096104f)\n);\n\nsearchReq = SearchReq.builder()\n .collectionName(\"quick_setup\")\n .data(query_vectors)\n .topK(2)\n .build();\n\nsearchResp = client.search(searchReq);\n\nSystem.out.println(JSONObject.toJSON(searchResp));\n","// 5. Batch vector search\nvar query_vectors = [\n [0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592],\n [0.19886812562848388, 0.06023560599112088, 0.6976963061752597, 0.2614474506242501, 0.838729485096104]\n]\n\nres = await client.search({\n collection_name: \"quick_setup\",\n data: query_vectors,\n limit: 2,\n})\n\nconsole.log(res.results)\n","[\n [\n {\n \"id\": 1,\n \"distance\": 1.3017789125442505,\n \"entity\": {}\n },\n {\n \"id\": 7,\n \"distance\": 1.2419954538345337,\n \"entity\": {}\n }\n ], # Result set 1\n [\n {\n \"id\": 3,\n \"distance\": 2.3358664512634277,\n \"entity\": {}\n },\n {\n \"id\": 8,\n \"distance\": 0.5642921924591064,\n \"entity\": {}\n }\n ] # Result set 2\n]\n","// Two sets of vectors are returned as expected\n\n{\"searchResults\": [\n [\n {\n \"score\": 1.263043,\n \"fields\": {\n \"vector\": [\n 0.9533119,\n 0.02538395,\n 0.76714665,\n 0.35481733,\n 0.9845762\n ],\n \"id\": 740\n }\n },\n {\n \"score\": 1.2377806,\n \"fields\": {\n \"vector\": [\n 0.7411156,\n 0.08687937,\n 0.8254139,\n 0.08370924,\n 0.99095553\n ],\n \"id\": 640\n }\n }\n ],\n [\n {\n \"score\": 1.8654699,\n \"fields\": {\n \"vector\": [\n 0.4671427,\n 0.8378432,\n 0.98844475,\n 0.82763994,\n 0.9729997\n ],\n \"id\": 638\n }\n },\n {\n \"score\": 1.8581753,\n \"fields\": {\n \"vector\": [\n 0.735541,\n 0.60140246,\n 0.86730254,\n 0.93152493,\n 0.98603314\n ],\n \"id\": 855\n }\n }\n ]\n]}\n","[\n [\n { score: 2.3590476512908936, id: '854' },\n { score: 2.2896690368652344, id: '59' }\n [\n { score: 2.664059638977051, id: '59' },\n { score: 2.59483003616333, id: '854' }\n ]\n]\n","# 6.2 Search within a partition\nquery_vector = [0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592]\n\nres = client.search(\n collection_name=\"quick_setup\",\n data=[query_vector],\n limit=5,\n search_params={\"metric_type\": \"IP\", \"params\": {\"level\": 1}},\n partition_names=[\"red\"]\n)\n\nprint(res)\n","// 6.3 Search within partitions\nquery_vectors = Arrays.asList(Arrays.asList(0.3580376395471989f, -0.6023495712049978f, 0.18414012509913835f, -0.26286205330961354f, 0.9029438446296592f));\n\nsearchReq = SearchReq.builder()\n .collectionName(\"quick_setup\")\n .data(query_vectors)\n .partitionNames(Arrays.asList(\"red\"))\n .topK(5)\n .build();\n\nsearchResp = client.search(searchReq);\n\nSystem.out.println(JSONObject.toJSON(searchResp));\n","// 6.2 Search within partitions\nquery_vector = [0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592]\n\nres = await client.search({\n collection_name: \"quick_setup\",\n data: [query_vector],\n partition_names: [\"red\"],\n limit: 5,\n})\n\nconsole.log(res.results)\n","[\n [\n {\n \"id\": 16,\n \"distance\": 0.9200337529182434,\n \"entity\": {}\n },\n {\n \"id\": 14,\n \"distance\": 0.4505271911621094,\n \"entity\": {}\n },\n {\n \"id\": 15,\n \"distance\": 0.19924677908420563,\n \"entity\": {}\n },\n {\n \"id\": 17,\n \"distance\": 0.0075093843042850494,\n \"entity\": {}\n },\n {\n \"id\": 13,\n \"distance\": -0.14609718322753906,\n \"entity\": {}\n }\n ]\n]\n","{\"searchResults\": [\n [\n {\n \"score\": 1.1677284,\n \"fields\": {\n \"vector\": [\n 0.9986977,\n 0.17964739,\n 0.49086612,\n 0.23155272,\n 0.98438674\n ],\n \"id\": 1435\n }\n },\n {\n \"score\": 1.1476475,\n \"fields\": {\n \"vector\": [\n 0.6952647,\n 0.13417172,\n 0.91045254,\n 0.119336545,\n 0.9338931\n ],\n \"id\": 1291\n }\n },\n {\n \"score\": 1.0969629,\n \"fields\": {\n \"vector\": [\n 0.3363194,\n 0.028906643,\n 0.6675426,\n 0.030419827,\n 0.9735209\n ],\n \"id\": 1168\n }\n },\n {\n \"score\": 1.0741848,\n \"fields\": {\n \"vector\": [\n 0.9980543,\n 0.36063594,\n 0.66427994,\n 0.17359233,\n 0.94954175\n ],\n \"id\": 1164\n }\n },\n {\n \"score\": 1.0584627,\n \"fields\": {\n \"vector\": [\n 0.7187005,\n 0.12674773,\n 0.987718,\n 0.3110777,\n 0.86093885\n ],\n \"id\": 1085\n }\n }\n ],\n [\n {\n \"score\": 1.8030131,\n \"fields\": {\n \"vector\": [\n 0.59726167,\n 0.7054632,\n 0.9573117,\n 0.94529945,\n 0.8664103\n ],\n \"id\": 1203\n }\n },\n {\n \"score\": 1.7728865,\n \"fields\": {\n \"vector\": [\n 0.6672442,\n 0.60448086,\n 0.9325822,\n 0.80272985,\n 0.8861626\n ],\n \"id\": 1448\n }\n },\n {\n \"score\": 1.7536311,\n \"fields\": {\n \"vector\": [\n 0.59663296,\n 0.77831805,\n 0.8578314,\n 0.88818026,\n 0.9030075\n ],\n \"id\": 1010\n }\n },\n {\n \"score\": 1.7520742,\n \"fields\": {\n \"vector\": [\n 0.854198,\n 0.72294194,\n 0.9245805,\n 0.86126596,\n 0.7969224\n ],\n \"id\": 1219\n }\n },\n {\n \"score\": 1.7452049,\n \"fields\": {\n \"vector\": [\n 0.96419,\n 0.943535,\n 0.87611496,\n 0.8268136,\n 0.79786557\n ],\n \"id\": 1149\n }\n }\n ]\n]}\n","[\n { score: 3.0258803367614746, id: '1201' },\n { score: 3.004319190979004, id: '1458' },\n { score: 2.880324363708496, id: '1187' },\n { score: 2.8246407508850098, id: '1347' },\n { score: 2.797295093536377, id: '1406' }\n]\n","res = client.search(\n collection_name=\"quick_setup\",\n data=[query_vector],\n limit=5,\n search_params={\"metric_type\": \"IP\", \"params\": {\"level\": 1}},\n partition_names=[\"blue\"]\n)\n\nprint(res)\n","searchReq = SearchReq.builder()\n .collectionName(\"quick_setup\")\n .data(query_vectors)\n .partitionNames(Arrays.asList(\"blue\"))\n .topK(5)\n .build();\n\nsearchResp = client.search(searchReq);\n\nSystem.out.println(JSONObject.toJSON(searchResp));\n","res = await client.search({\n collection_name: \"quick_setup\",\n data: [query_vector],\n partition_names: [\"blue\"],\n limit: 5,\n})\n\nconsole.log(res.results)\n","[\n [\n {\n \"id\": 20,\n \"distance\": 2.363696813583374,\n \"entity\": {}\n },\n {\n \"id\": 26,\n \"distance\": 1.0665391683578491,\n \"entity\": {}\n },\n {\n \"id\": 23,\n \"distance\": 1.066049575805664,\n \"entity\": {}\n },\n {\n \"id\": 29,\n \"distance\": 0.8353596925735474,\n \"entity\": {}\n },\n {\n \"id\": 28,\n \"distance\": 0.7484277486801147,\n \"entity\": {}\n }\n ]\n]\n","{\"searchResults\": [\n [\n {\n \"score\": 1.1628494,\n \"fields\": {\n \"vector\": [\n 0.7442872,\n 0.046407282,\n 0.71031404,\n 0.3544345,\n 0.9819991\n ],\n \"id\": 1992\n }\n },\n {\n \"score\": 1.1470042,\n \"fields\": {\n \"vector\": [\n 0.5505825,\n 0.04367262,\n 0.9985836,\n 0.18922359,\n 0.93255126\n ],\n \"id\": 1977\n }\n },\n {\n \"score\": 1.1450152,\n \"fields\": {\n \"vector\": [\n 0.89994013,\n 0.052991092,\n 0.8645576,\n 0.6406729,\n 0.95679337\n ],\n \"id\": 1573\n }\n },\n {\n \"score\": 1.1439825,\n \"fields\": {\n \"vector\": [\n 0.9253267,\n 0.15890503,\n 0.7999555,\n 0.19126713,\n 0.898583\n ],\n \"id\": 1552\n }\n },\n {\n \"score\": 1.1029172,\n \"fields\": {\n \"vector\": [\n 0.95661926,\n 0.18777144,\n 0.38115507,\n 0.14323527,\n 0.93137646\n ],\n \"id\": 1823\n }\n }\n ],\n [\n {\n \"score\": 1.8005109,\n \"fields\": {\n \"vector\": [\n 0.5953582,\n 0.7794224,\n 0.9388869,\n 0.79825854,\n 0.9197286\n ],\n \"id\": 1888\n }\n },\n {\n \"score\": 1.7714822,\n \"fields\": {\n \"vector\": [\n 0.56805456,\n 0.89422905,\n 0.88187534,\n 0.914824,\n 0.8944365\n ],\n \"id\": 1648\n }\n },\n {\n \"score\": 1.7561421,\n \"fields\": {\n \"vector\": [\n 0.83421993,\n 0.39865613,\n 0.92319834,\n 0.42695504,\n 0.96633124\n ],\n \"id\": 1688\n }\n },\n {\n \"score\": 1.7553532,\n \"fields\": {\n \"vector\": [\n 0.89994013,\n 0.052991092,\n 0.8645576,\n 0.6406729,\n 0.95679337\n ],\n \"id\": 1573\n }\n },\n {\n \"score\": 1.7543385,\n \"fields\": {\n \"vector\": [\n 0.16542226,\n 0.38248396,\n 0.9888778,\n 0.80913955,\n 0.9501492\n ],\n \"id\": 1544\n }\n }\n ]\n]}\n","[\n { score: 2.8421106338500977, id: '1745' },\n { score: 2.838560104370117, id: '1782' },\n { score: 2.8134000301361084, id: '1511' },\n { score: 2.718268871307373, id: '1679' },\n { score: 2.7014894485473633, id: '1597' }\n]\n","# Search with output fields\nres = client.search(\n collection_name=\"test_collection\", # Replace with the actual name of your collection\n data=[[0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592]],\n limit=5, # Max. number of search results to return\n search_params={\"metric_type\": \"IP\", \"params\": {}}, # Search parameters\n output_fields=[\"color\"] # Output fields to return\n)\n\nresult = json.dumps(res, indent=4)\nprint(result)\n","// 7. Search with output fields\nquery_vectors = Arrays.asList(Arrays.asList(0.3580376395471989f, -0.6023495712049978f, 0.18414012509913835f, -0.26286205330961354f, 0.9029438446296592f));\n\nsearchReq = SearchReq.builder()\n .collectionName(\"quick_setup\")\n .data(query_vectors)\n .outputFields(Arrays.asList(\"color\"))\n .topK(5)\n .build();\n\nsearchResp = client.search(searchReq);\n\nSystem.out.println(JSONObject.toJSON(searchResp));\n","// 7. Search with output fields\nquery_vector = [0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592]\n\nres = await client.search({\n collection_name: \"quick_setup\",\n data: [query_vector],\n limit: 5,\n output_fields: [\"color\"],\n})\n\nconsole.log(res.results)\n","[\n [\n {\n \"id\": 0,\n \"distance\": 1.4093276262283325,\n \"entity\": {\n \"color\": \"pink_8682\"\n }\n },\n {\n \"id\": 16,\n \"distance\": 1.0159327983856201,\n \"entity\": {\n \"color\": \"yellow_1496\"\n }\n },\n {\n \"id\": 4,\n \"distance\": 0.9902134537696838,\n \"entity\": {\n \"color\": \"red_4794\"\n }\n },\n {\n \"id\": 14,\n \"distance\": 0.9803846478462219,\n \"entity\": {\n \"color\": \"green_2899\"\n }\n },\n {\n \"id\": 1,\n \"distance\": 0.8519943356513977,\n \"entity\": {\n \"color\": \"red_7025\"\n }\n }\n ]\n]\n","{\"searchResults\": [\n [\n {\n \"score\": 1.263043,\n \"fields\": {}\n },\n {\n \"score\": 1.2377806,\n \"fields\": {}\n },\n {\n \"score\": 1.1869997,\n \"fields\": {}\n },\n {\n \"score\": 1.1748955,\n \"fields\": {}\n },\n {\n \"score\": 1.1720343,\n \"fields\": {}\n }\n ]\n]}\n","\n[\n { score: 3.036271572113037, id: '59', color: 'orange' },\n { score: 3.0267879962921143, id: '1745', color: 'blue' },\n { score: 3.0069446563720703, id: '854', color: 'black' },\n { score: 2.984386682510376, id: '718', color: 'black' },\n { score: 2.916019916534424, id: '425', color: 'purple' }\n]\n","# Search with filter\nres = client.search(\n collection_name=\"test_collection\", # Replace with the actual name of your collection\n data=[[0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592]],\n limit=5, # Max. number of search results to return\n search_params={\"metric_type\": \"IP\", \"params\": {}}, # Search parameters\n output_fields=[\"color\"], # Output fields to return\n filter='color like \"red%\"'\n)\n\nresult = json.dumps(res, indent=4)\nprint(result)\n","// 8. Filtered search\nquery_vectors = Arrays.asList(Arrays.asList(0.3580376395471989f, -0.6023495712049978f, 0.18414012509913835f, -0.26286205330961354f, 0.9029438446296592f));\n\nsearchReq = SearchReq.builder()\n .collectionName(\"quick_setup\")\n .data(query_vectors)\n .outputFields(Arrays.asList(\"color_tag\"))\n .filter(\"color_tag like \\\"red%\\\"\")\n .topK(5)\n .build();\n\nsearchResp = client.search(searchReq);\n\nSystem.out.println(JSONObject.toJSON(searchResp));\n","// 8. Filtered search\n// 8.1 Filter with \"like\" operator and prefix wildcard\nquery_vector = [0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592]\n\nres = await client.search({\n collection_name: \"quick_setup\",\n data: [query_vector],\n limit: 5,\n filters: \"color_tag like \\\"red%\\\"\",\n output_fields: [\"color_tag\"]\n})\n\nconsole.log(res.results)\n","[\n [\n {\n \"id\": 4,\n \"distance\": 0.9902134537696838,\n \"entity\": {\n \"color\": \"red_4794\"\n }\n },\n {\n \"id\": 1,\n \"distance\": 0.8519943356513977,\n \"entity\": {\n \"color\": \"red_7025\"\n }\n },\n {\n \"id\": 6,\n \"distance\": -0.4113418459892273,\n \"entity\": {\n \"color\": \"red_9392\"\n }\n }\n ]\n]\n","{\"searchResults\": [\n [\n {\n \"score\": 1.1869997,\n \"fields\": {\"color_tag\": \"red_3026\"}\n },\n {\n \"score\": 1.1677284,\n \"fields\": {\"color_tag\": \"red_9030\"}\n },\n {\n \"score\": 1.1476475,\n \"fields\": {\"color_tag\": \"red_3744\"}\n },\n {\n \"score\": 1.0969629,\n \"fields\": {\"color_tag\": \"red_4168\"}\n },\n {\n \"score\": 1.0741848,\n \"fields\": {\"color_tag\": \"red_9678\"}\n }\n ]\n]}\n","[\n { score: 2.5080761909484863, id: '1201', color_tag: 'red_8904' },\n { score: 2.491129159927368, id: '425', color_tag: 'purple_8212' },\n { score: 2.4889798164367676, id: '1458', color_tag: 'red_6891' },\n { score: 2.42964243888855, id: '724', color_tag: 'black_9885' },\n { score: 2.4004223346710205, id: '854', color_tag: 'black_5990' }\n]\n","# Infix match on color field\nres = client.search(\n collection_name=\"test_collection\", # Replace with the actual name of your collection\n data=[[0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592]],\n limit=5, # Max. number of search results to return\n search_params={\"metric_type\": \"IP\", \"params\": {}}, # Search parameters\n output_fields=[\"color\"], # Output fields to return\n filter='color like \"%ll%\"' # Filter on color field, infix match on \"ll\"\n)\n\nresult = json.dumps(res, indent=4)\nprint(result)\n","// 8. Filtered search\nquery_vectors = Arrays.asList(Arrays.asList(0.3580376395471989f, -0.6023495712049978f, 0.18414012509913835f, -0.26286205330961354f, 0.9029438446296592f));\n\nsearchReq = SearchReq.builder()\n .collectionName(\"quick_setup\")\n .data(query_vectors)\n .outputFields(Arrays.asList(\"color_tag\"))\n .filter(\"color like \\\"%ll%\\\"\")\n .topK(5)\n .build();\n\nsearchResp = client.search(searchReq);\n\nSystem.out.println(JSONObject.toJSON(searchResp));\n","// 8. Filtered search\n// 8.1 Filter with \"like\" operator and prefix wildcard\nquery_vector = [0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592]\n\nres = await client.search({\n collection_name: \"quick_setup\",\n data: [query_vector],\n limit: 5,\n filters: \"color_tag like \\\"%ll%\\\"\",\n output_fields: [\"color_tag\"]\n})\n\nconsole.log(res.results)\n","[\n [\n {\n \"id\": 5,\n \"distance\": 0.7972343564033508,\n \"entity\": {\n \"color\": \"yellow_4222\"\n }\n }\n ]\n]\n","{\"searchResults\": [\n [\n {\n \"score\": 1.1869997,\n \"fields\": {\"color_tag\": \"yellow_4222\"}\n }\n ]\n]}\n","[\n { score: 2.5080761909484863, id: '1201', color_tag: 'yellow_4222' }\n]\n","# Conduct a range search\nsearch_params = {\n \"metric_type\": \"IP\",\n \"params\": {\n \"radius\": 0.8, # Radius of the search circle\n \"range_filter\": 1.0 # Range filter to filter out vectors that are not within the search circle\n }\n}\n\nres = client.search(\n collection_name=\"test_collection\", # Replace with the actual name of your collection\n data=[[0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592]],\n limit=3, # Max. number of search results to return\n search_params=search_params, # Search parameters\n output_fields=[\"color\"], # Output fields to return\n)\n\nresult = json.dumps(res, indent=4)\nprint(result)\n","// 9. Range search\nquery_vectors = Arrays.asList(Arrays.asList(0.3580376395471989f, -0.6023495712049978f, 0.18414012509913835f, -0.26286205330961354f, 0.9029438446296592f));\n\nsearchReq = SearchReq.builder()\n .collectionName(\"quick_setup\")\n .data(query_vectors)\n .outputFields(Arrays.asList(\"color_tag\"))\n .searchParams(Map.of(\"radius\", 0.1, \"range\", 1.0))\n .topK(5)\n .build();\n\nsearchResp = client.search(searchReq);\n\nSystem.out.println(JSONObject.toJSON(searchResp));\n","// 9. Range search\nquery_vector = [0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592]\n\nres = await client.search({\n collection_name: \"quick_setup\",\n data: [query_vector],\n limit: 5,\n params: {\n radius: 0.1,\n range: 1.0\n },\n output_fields: [\"color_tag\"]\n})\n\nconsole.log(res.results)\n","[\n [\n {\n \"id\": 4,\n \"distance\": 0.9902134537696838,\n \"entity\": {\n \"color\": \"red_4794\"\n }\n },\n {\n \"id\": 14,\n \"distance\": 0.9803846478462219,\n \"entity\": {\n \"color\": \"green_2899\"\n }\n },\n {\n \"id\": 1,\n \"distance\": 0.8519943356513977,\n \"entity\": {\n \"color\": \"red_7025\"\n }\n }\n ]\n]\n","{\"searchResults\": [\n [\n {\n \"score\": 1.263043,\n \"fields\": {\"color_tag\": \"green_2052\"}\n },\n {\n \"score\": 1.2377806,\n \"fields\": {\"color_tag\": \"purple_3709\"}\n },\n {\n \"score\": 1.1869997,\n \"fields\": {\"color_tag\": \"red_3026\"}\n },\n {\n \"score\": 1.1748955,\n \"fields\": {\"color_tag\": \"black_1646\"}\n },\n {\n \"score\": 1.1720343,\n \"fields\": {\"color_tag\": \"green_4853\"}\n }\n ]\n]}\n","[\n { score: 2.3387961387634277, id: '718', color_tag: 'black_7154' },\n { score: 2.3352415561676025, id: '1745', color_tag: 'blue_8741' },\n { score: 2.290485382080078, id: '1408', color_tag: 'red_2324' },\n { score: 2.285870313644409, id: '854', color_tag: 'black_5990' },\n { score: 2.2593345642089844, id: '1309', color_tag: 'red_8458' }\n]\n","# Connect to Milvus\nclient = MilvusClient(uri='http://localhost:19530') # Milvus server address\n\n# Load data into collection\nclient.load_collection(\"group_search\") # Collection name\n\n# Group search results\nres = client.search(\n collection_name=\"group_search\", # Collection name\n data=[[0.14529211512077012, 0.9147257273453546, 0.7965055218724449, 0.7009258593102812, 0.5605206522382088]], # Query vector\n search_params={\n \"metric_type\": \"L2\",\n \"params\": {\"nprobe\": 10},\n }, # Search parameters\n limit=10, # Max. number of search results to return\n group_by_field=\"doc_id\", # Group results by document ID\n output_fields=[\"doc_id\", \"passage_id\"]\n)\n\n# Retrieve the values in the `doc_id` column\ndoc_ids = [result['entity']['doc_id'] for result in res[0]]\n\nprint(doc_ids)\n","[5, 10, 1, 7, 9, 6, 3, 4, 8, 2]\n","# Connect to Milvus\nclient = MilvusClient(uri='http://localhost:19530') # Milvus server address\n\n# Load data into collection\nclient.load_collection(\"group_search\") # Collection name\n\n# Search without `group_by_field`\nres = client.search(\n collection_name=\"group_search\", # Collection name\n data=query_passage_vector, # Replace with your query vector\n search_params={\n \"metric_type\": \"L2\",\n \"params\": {\"nprobe\": 10},\n }, # Search parameters\n limit=10, # Max. number of search results to return\n # group_by_field=\"doc_id\", # Group results by document ID\n output_fields=[\"doc_id\", \"passage_id\"]\n)\n\n# Retrieve the values in the `doc_id` column\ndoc_ids = [result['entity']['doc_id'] for result in res[0]]\n\nprint(doc_ids)\n","[1, 10, 3, 10, 1, 9, 4, 4, 8, 6]\n","# In normal cases, you do not need to set search parameters manually\n# Except for range searches.\nsearch_parameters = {\n 'metric_type': 'L2',\n 'params': {\n 'nprobe': 10,\n 'level': 1,\n 'radius': 1.0\n 'range_filter': 0.8\n }\n}\n"],"headingContent":"","anchorList":[{"label":"Einzelvektor-Suche","href":"Single-Vector-Search","type":1,"isActive":false},{"label":"Überblick","href":"Overview","type":2,"isActive":false},{"label":"Vorbereitungen","href":"Preparations","type":2,"isActive":false},{"label":"Einfache Suche","href":"Basic-search","type":2,"isActive":false},{"label":"Gefilterte Suche","href":"Filtered-search","type":2,"isActive":false},{"label":"Bereichssuche","href":"Range-search","type":2,"isActive":false},{"label":"Gruppierungssuche","href":"Grouping-search","type":2,"isActive":false},{"label":"Suchparameter","href":"Search-parameters","type":2,"isActive":false}]} \ No newline at end of file +{"codeList":["# 1. Set up a Milvus client\nclient = MilvusClient(\n uri=CLUSTER_ENDPOINT,\n token=TOKEN \n)\n\n# 2. Create a collection\nclient.create_collection(\n collection_name=\"quick_setup\",\n dimension=5,\n metric_type=\"IP\"\n)\n\n# 3. Insert randomly generated vectors \ncolors = [\"green\", \"blue\", \"yellow\", \"red\", \"black\", \"white\", \"purple\", \"pink\", \"orange\", \"brown\", \"grey\"]\ndata = []\n\nfor i in range(1000):\n current_color = random.choice(colors)\n data.append({\n \"id\": i,\n \"vector\": [ random.uniform(-1, 1) for _ in range(5) ],\n \"color\": current_color,\n \"color_tag\": f\"{current_color}_{str(random.randint(1000, 9999))}\"\n })\n\nres = client.insert(\n collection_name=\"quick_setup\",\n data=data\n)\n\nprint(res)\n\n# Output\n#\n# {\n# \"insert_count\": 1000,\n# \"ids\": [\n# 0,\n# 1,\n# 2,\n# 3,\n# 4,\n# 5,\n# 6,\n# 7,\n# 8,\n# 9,\n# \"(990 more items hidden)\"\n# ]\n# }\n\n# 6.1 Create partitions \nclient.create_partition(\n collection_name=\"quick_setup\",\n partition_name=\"red\"\n)\n\nclient.create_partition(\n collection_name=\"quick_setup\",\n partition_name=\"blue\"\n)\n\n# 6.1 Insert data into partitions\nred_data = [ {\"id\": i, \"vector\": [ random.uniform(-1, 1) for _ in range(5) ], \"color\": \"red\", \"color_tag\": f\"red_{str(random.randint(1000, 9999))}\" } for i in range(500) ]\nblue_data = [ {\"id\": i, \"vector\": [ random.uniform(-1, 1) for _ in range(5) ], \"color\": \"blue\", \"color_tag\": f\"blue_{str(random.randint(1000, 9999))}\" } for i in range(500) ]\n\nres = client.insert(\n collection_name=\"quick_setup\",\n data=red_data,\n partition_name=\"red\"\n)\n\nprint(res)\n\n# Output\n#\n# {\n# \"insert_count\": 500,\n# \"ids\": [\n# 0,\n# 1,\n# 2,\n# 3,\n# 4,\n# 5,\n# 6,\n# 7,\n# 8,\n# 9,\n# \"(490 more items hidden)\"\n# ]\n# }\n\nres = client.insert(\n collection_name=\"quick_setup\",\n data=blue_data,\n partition_name=\"blue\"\n)\n\nprint(res)\n\n# Output\n#\n# {\n# \"insert_count\": 500,\n# \"ids\": [\n# 0,\n# 1,\n# 2,\n# 3,\n# 4,\n# 5,\n# 6,\n# 7,\n# 8,\n# 9,\n# \"(490 more items hidden)\"\n# ]\n# }\n","import java.util.ArrayList;\nimport java.util.Arrays;\nimport java.util.List;\nimport java.util.Map;\nimport java.util.Random;\n\nimport com.alibaba.fastjson.JSONObject;\n\nimport io.milvus.v2.client.ConnectConfig;\nimport io.milvus.v2.client.MilvusClientV2;\nimport io.milvus.v2.service.collection.request.CreateCollectionReq;\nimport io.milvus.v2.service.collection.request.GetLoadStateReq;\nimport io.milvus.v2.service.vector.request.InsertReq;\nimport io.milvus.v2.service.vector.response.InsertResp; \n\nString CLUSTER_ENDPOINT = \"http://localhost:19530\";\n\n// 1. Connect to Milvus server\nConnectConfig connectConfig = ConnectConfig.builder()\n .uri(CLUSTER_ENDPOINT)\n .build();\n\nMilvusClientV2 client = new MilvusClientV2(connectConfig); \n\n// 2. Create a collection in quick setup mode\nCreateCollectionReq quickSetupReq = CreateCollectionReq.builder()\n .collectionName(\"quick_setup\")\n .dimension(5)\n .metricType(\"IP\")\n .build();\n\nclient.createCollection(quickSetupReq);\n\nGetLoadStateReq loadStateReq = GetLoadStateReq.builder()\n .collectionName(\"quick_setup\")\n .build();\n\nboolean state = client.getLoadState(loadStateReq);\n\nSystem.out.println(state);\n\n// Output:\n// true\n\n// 3. Insert randomly generated vectors into the collection\nList colors = Arrays.asList(\"green\", \"blue\", \"yellow\", \"red\", \"black\", \"white\", \"purple\", \"pink\", \"orange\", \"brown\", \"grey\");\nList data = new ArrayList<>();\n\nfor (int i=0; i<1000; i++) {\n Random rand = new Random();\n String current_color = colors.get(rand.nextInt(colors.size()-1));\n JSONObject row = new JSONObject();\n row.put(\"id\", Long.valueOf(i));\n row.put(\"vector\", Arrays.asList(rand.nextFloat(), rand.nextFloat(), rand.nextFloat(), rand.nextFloat(), rand.nextFloat()));\n row.put(\"color_tag\", current_color + \"_\" + String.valueOf(rand.nextInt(8999) + 1000));\n data.add(row);\n}\n\nInsertReq insertReq = InsertReq.builder()\n .collectionName(\"quick_setup\")\n .data(data)\n .build();\n\nInsertResp insertResp = client.insert(insertReq);\n\nSystem.out.println(JSONObject.toJSON(insertResp));\n\n// Output:\n// {\"insertCnt\": 1000}\n\n// 6.1. Create a partition\nCreatePartitionReq partitionReq = CreatePartitionReq.builder()\n .collectionName(\"quick_setup\")\n .partitionName(\"red\")\n .build();\n\nclient.createPartition(partitionReq);\n\npartitionReq = CreatePartitionReq.builder()\n .collectionName(\"quick_setup\")\n .partitionName(\"blue\")\n .build();\n\nclient.createPartition(partitionReq);\n\n// 6.2 Insert data into the partition\ndata = new ArrayList<>();\n\nfor (int i=1000; i<1500; i++) {\n Random rand = new Random();\n String current_color = \"red\";\n JSONObject row = new JSONObject();\n row.put(\"id\", Long.valueOf(i));\n row.put(\"vector\", Arrays.asList(rand.nextFloat(), rand.nextFloat(), rand.nextFloat(), rand.nextFloat(), rand.nextFloat()));\n row.put(\"color\", current_color);\n row.put(\"color_tag\", current_color + \"_\" + String.valueOf(rand.nextInt(8999) + 1000));\n data.add(row);\n} \n\ninsertReq = InsertReq.builder()\n .collectionName(\"quick_setup\")\n .data(data)\n .partitionName(\"red\")\n .build();\n\ninsertResp = client.insert(insertReq);\n\nSystem.out.println(JSONObject.toJSON(insertResp));\n\n// Output:\n// {\"insertCnt\": 500}\n\ndata = new ArrayList<>();\n\nfor (int i=1500; i<2000; i++) {\n Random rand = new Random();\n String current_color = \"blue\";\n JSONObject row = new JSONObject();\n row.put(\"id\", Long.valueOf(i));\n row.put(\"vector\", Arrays.asList(rand.nextFloat(), rand.nextFloat(), rand.nextFloat(), rand.nextFloat(), rand.nextFloat()));\n row.put(\"color\", current_color);\n row.put(\"color_tag\", current_color + \"_\" + String.valueOf(rand.nextInt(8999) + 1000));\n data.add(row);\n}\n\ninsertReq = InsertReq.builder()\n .collectionName(\"quick_setup\")\n .data(data)\n .partitionName(\"blue\")\n .build();\n\ninsertResp = client.insert(insertReq);\n\nSystem.out.println(JSONObject.toJSON(insertResp));\n\n// Output:\n// {\"insertCnt\": 500}\n","const { MilvusClient, DataType, sleep } = require(\"@zilliz/milvus2-sdk-node\")\n\nconst address = \"http://localhost:19530\"\n\n// 1. Set up a Milvus Client\nclient = new MilvusClient({address});\n\n// 2. Create a collection in quick setup mode\nawait client.createCollection({\n collection_name: \"quick_setup\",\n dimension: 5,\n metric_type: \"IP\"\n}); \n\n// 3. Insert randomly generated vectors\nconst colors = [\"green\", \"blue\", \"yellow\", \"red\", \"black\", \"white\", \"purple\", \"pink\", \"orange\", \"brown\", \"grey\"]\ndata = []\n\nfor (let i = 0; i < 1000; i++) {\n current_color = colors[Math.floor(Math.random() * colors.length)]\n data.push({\n id: i,\n vector: [Math.random(), Math.random(), Math.random(), Math.random(), Math.random()],\n color: current_color,\n color_tag: `${current_color}_${Math.floor(Math.random() * 8999) + 1000}`\n })\n}\n\nvar res = await client.insert({\n collection_name: \"quick_setup\",\n data: data\n})\n\nconsole.log(res.insert_cnt)\n\n// Output\n// \n// 1000\n// \n\nawait client.createPartition({\n collection_name: \"quick_setup\",\n partition_name: \"red\"\n})\n\nawait client.createPartition({\n collection_name: \"quick_setup\",\n partition_name: \"blue\"\n})\n\n// 6.1 Insert data into partitions\nvar red_data = []\nvar blue_data = []\n\nfor (let i = 1000; i < 1500; i++) {\n red_data.push({\n id: i,\n vector: [Math.random(), Math.random(), Math.random(), Math.random(), Math.random()],\n color: \"red\",\n color_tag: `red_${Math.floor(Math.random() * 8999) + 1000}`\n })\n}\n\nfor (let i = 1500; i < 2000; i++) {\n blue_data.push({\n id: i,\n vector: [Math.random(), Math.random(), Math.random(), Math.random(), Math.random()],\n color: \"blue\",\n color_tag: `blue_${Math.floor(Math.random() * 8999) + 1000}`\n })\n}\n\nres = await client.insert({\n collection_name: \"quick_setup\",\n data: red_data,\n partition_name: \"red\"\n})\n\nconsole.log(res.insert_cnt)\n\n// Output\n// \n// 500\n// \n\nres = await client.insert({\n collection_name: \"quick_setup\",\n data: blue_data,\n partition_name: \"blue\"\n})\n\nconsole.log(res.insert_cnt)\n\n// Output\n// \n// 500\n// \n","# Single vector search\nres = client.search(\n collection_name=\"test_collection\", # Replace with the actual name of your collection\n # Replace with your query vector\n data=[[0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592]],\n limit=5, # Max. number of search results to return\n search_params={\"metric_type\": \"IP\", \"params\": {}} # Search parameters\n)\n\n# Convert the output to a formatted JSON string\nresult = json.dumps(res, indent=4)\nprint(result)\n","// 4. Single vector search\nList> query_vectors = Arrays.asList(Arrays.asList(0.3580376395471989f, -0.6023495712049978f, 0.18414012509913835f, -0.26286205330961354f, 0.9029438446296592f));\n\nSearchReq searchReq = SearchReq.builder()\n .collectionName(\"quick_setup\")\n .data(query_vectors)\n .topK(3) // The number of results to return\n .build();\n\nSearchResp searchResp = client.search(searchReq);\n\nSystem.out.println(JSONObject.toJSON(searchResp));\n","// 4. Single vector search\nvar query_vector = [0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592],\n\nres = await client.search({\n collection_name: \"quick_setup\",\n data: [query_vector],\n limit: 3, // The number of results to return\n})\n\nconsole.log(res.results)\n","[\n [\n {\n \"id\": 0,\n \"distance\": 1.4093276262283325,\n \"entity\": {}\n },\n {\n \"id\": 4,\n \"distance\": 0.9902134537696838,\n \"entity\": {}\n },\n {\n \"id\": 1,\n \"distance\": 0.8519943356513977,\n \"entity\": {}\n },\n {\n \"id\": 5,\n \"distance\": 0.7972343564033508,\n \"entity\": {}\n },\n {\n \"id\": 2,\n \"distance\": 0.5928734540939331,\n \"entity\": {}\n }\n ]\n]\n","{\"searchResults\": [[\n {\n \"score\": 1.263043,\n \"fields\": {\n \"vector\": [\n 0.9533119,\n 0.02538395,\n 0.76714665,\n 0.35481733,\n 0.9845762\n ],\n \"id\": 740\n }\n },\n {\n \"score\": 1.2377806,\n \"fields\": {\n \"vector\": [\n 0.7411156,\n 0.08687937,\n 0.8254139,\n 0.08370924,\n 0.99095553\n ],\n \"id\": 640\n }\n },\n {\n \"score\": 1.1869997,\n \"fields\": {\n \"vector\": [\n 0.87928146,\n 0.05324632,\n 0.6312755,\n 0.28005534,\n 0.9542448\n ],\n \"id\": 455\n }\n }\n]]}\n","[\n { score: 1.7463608980178833, id: '854' },\n { score: 1.744946002960205, id: '425' },\n { score: 1.7258622646331787, id: '718' }\n]\n","# Bulk-vector search\nres = client.search(\n collection_name=\"test_collection\", # Replace with the actual name of your collection\n data=[\n [0.19886812562848388, 0.06023560599112088, 0.6976963061752597, 0.2614474506242501, 0.838729485096104],\n [0.3172005263489739, 0.9719044792798428, -0.36981146090600725, -0.4860894583077995, 0.95791889146345]\n ], # Replace with your query vectors\n limit=2, # Max. number of search results to return\n search_params={\"metric_type\": \"IP\", \"params\": {}} # Search parameters\n)\n\nresult = json.dumps(res, indent=4)\nprint(result)\n","// 5. Batch vector search\nquery_vectors = Arrays.asList(\n Arrays.asList(0.3580376395471989f, -0.6023495712049978f, 0.18414012509913835f, -0.26286205330961354f, 0.9029438446296592f),\n Arrays.asList(0.19886812562848388f, 0.06023560599112088f, 0.6976963061752597f, 0.2614474506242501f, 0.838729485096104f)\n);\n\nsearchReq = SearchReq.builder()\n .collectionName(\"quick_setup\")\n .data(query_vectors)\n .topK(2)\n .build();\n\nsearchResp = client.search(searchReq);\n\nSystem.out.println(JSONObject.toJSON(searchResp));\n","// 5. Batch vector search\nvar query_vectors = [\n [0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592],\n [0.19886812562848388, 0.06023560599112088, 0.6976963061752597, 0.2614474506242501, 0.838729485096104]\n]\n\nres = await client.search({\n collection_name: \"quick_setup\",\n data: query_vectors,\n limit: 2,\n})\n\nconsole.log(res.results)\n","[\n [\n {\n \"id\": 1,\n \"distance\": 1.3017789125442505,\n \"entity\": {}\n },\n {\n \"id\": 7,\n \"distance\": 1.2419954538345337,\n \"entity\": {}\n }\n ], # Result set 1\n [\n {\n \"id\": 3,\n \"distance\": 2.3358664512634277,\n \"entity\": {}\n },\n {\n \"id\": 8,\n \"distance\": 0.5642921924591064,\n \"entity\": {}\n }\n ] # Result set 2\n]\n","// Two sets of vectors are returned as expected\n\n{\"searchResults\": [\n [\n {\n \"score\": 1.263043,\n \"fields\": {\n \"vector\": [\n 0.9533119,\n 0.02538395,\n 0.76714665,\n 0.35481733,\n 0.9845762\n ],\n \"id\": 740\n }\n },\n {\n \"score\": 1.2377806,\n \"fields\": {\n \"vector\": [\n 0.7411156,\n 0.08687937,\n 0.8254139,\n 0.08370924,\n 0.99095553\n ],\n \"id\": 640\n }\n }\n ],\n [\n {\n \"score\": 1.8654699,\n \"fields\": {\n \"vector\": [\n 0.4671427,\n 0.8378432,\n 0.98844475,\n 0.82763994,\n 0.9729997\n ],\n \"id\": 638\n }\n },\n {\n \"score\": 1.8581753,\n \"fields\": {\n \"vector\": [\n 0.735541,\n 0.60140246,\n 0.86730254,\n 0.93152493,\n 0.98603314\n ],\n \"id\": 855\n }\n }\n ]\n]}\n","[\n [\n { score: 2.3590476512908936, id: '854' },\n { score: 2.2896690368652344, id: '59' }\n [\n { score: 2.664059638977051, id: '59' },\n { score: 2.59483003616333, id: '854' }\n ]\n]\n","# 6.2 Search within a partition\nquery_vector = [0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592]\n\nres = client.search(\n collection_name=\"quick_setup\",\n data=[query_vector],\n limit=5,\n search_params={\"metric_type\": \"IP\", \"params\": {\"level\": 1}},\n partition_names=[\"red\"]\n)\n\nprint(res)\n","// 6.3 Search within partitions\nquery_vectors = Arrays.asList(Arrays.asList(0.3580376395471989f, -0.6023495712049978f, 0.18414012509913835f, -0.26286205330961354f, 0.9029438446296592f));\n\nsearchReq = SearchReq.builder()\n .collectionName(\"quick_setup\")\n .data(query_vectors)\n .partitionNames(Arrays.asList(\"red\"))\n .topK(5)\n .build();\n\nsearchResp = client.search(searchReq);\n\nSystem.out.println(JSONObject.toJSON(searchResp));\n","// 6.2 Search within partitions\nquery_vector = [0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592]\n\nres = await client.search({\n collection_name: \"quick_setup\",\n data: [query_vector],\n partition_names: [\"red\"],\n limit: 5,\n})\n\nconsole.log(res.results)\n","[\n [\n {\n \"id\": 16,\n \"distance\": 0.9200337529182434,\n \"entity\": {}\n },\n {\n \"id\": 14,\n \"distance\": 0.4505271911621094,\n \"entity\": {}\n },\n {\n \"id\": 15,\n \"distance\": 0.19924677908420563,\n \"entity\": {}\n },\n {\n \"id\": 17,\n \"distance\": 0.0075093843042850494,\n \"entity\": {}\n },\n {\n \"id\": 13,\n \"distance\": -0.14609718322753906,\n \"entity\": {}\n }\n ]\n]\n","{\"searchResults\": [\n [\n {\n \"score\": 1.1677284,\n \"fields\": {\n \"vector\": [\n 0.9986977,\n 0.17964739,\n 0.49086612,\n 0.23155272,\n 0.98438674\n ],\n \"id\": 1435\n }\n },\n {\n \"score\": 1.1476475,\n \"fields\": {\n \"vector\": [\n 0.6952647,\n 0.13417172,\n 0.91045254,\n 0.119336545,\n 0.9338931\n ],\n \"id\": 1291\n }\n },\n {\n \"score\": 1.0969629,\n \"fields\": {\n \"vector\": [\n 0.3363194,\n 0.028906643,\n 0.6675426,\n 0.030419827,\n 0.9735209\n ],\n \"id\": 1168\n }\n },\n {\n \"score\": 1.0741848,\n \"fields\": {\n \"vector\": [\n 0.9980543,\n 0.36063594,\n 0.66427994,\n 0.17359233,\n 0.94954175\n ],\n \"id\": 1164\n }\n },\n {\n \"score\": 1.0584627,\n \"fields\": {\n \"vector\": [\n 0.7187005,\n 0.12674773,\n 0.987718,\n 0.3110777,\n 0.86093885\n ],\n \"id\": 1085\n }\n }\n ],\n [\n {\n \"score\": 1.8030131,\n \"fields\": {\n \"vector\": [\n 0.59726167,\n 0.7054632,\n 0.9573117,\n 0.94529945,\n 0.8664103\n ],\n \"id\": 1203\n }\n },\n {\n \"score\": 1.7728865,\n \"fields\": {\n \"vector\": [\n 0.6672442,\n 0.60448086,\n 0.9325822,\n 0.80272985,\n 0.8861626\n ],\n \"id\": 1448\n }\n },\n {\n \"score\": 1.7536311,\n \"fields\": {\n \"vector\": [\n 0.59663296,\n 0.77831805,\n 0.8578314,\n 0.88818026,\n 0.9030075\n ],\n \"id\": 1010\n }\n },\n {\n \"score\": 1.7520742,\n \"fields\": {\n \"vector\": [\n 0.854198,\n 0.72294194,\n 0.9245805,\n 0.86126596,\n 0.7969224\n ],\n \"id\": 1219\n }\n },\n {\n \"score\": 1.7452049,\n \"fields\": {\n \"vector\": [\n 0.96419,\n 0.943535,\n 0.87611496,\n 0.8268136,\n 0.79786557\n ],\n \"id\": 1149\n }\n }\n ]\n]}\n","[\n { score: 3.0258803367614746, id: '1201' },\n { score: 3.004319190979004, id: '1458' },\n { score: 2.880324363708496, id: '1187' },\n { score: 2.8246407508850098, id: '1347' },\n { score: 2.797295093536377, id: '1406' }\n]\n","res = client.search(\n collection_name=\"quick_setup\",\n data=[query_vector],\n limit=5,\n search_params={\"metric_type\": \"IP\", \"params\": {\"level\": 1}},\n partition_names=[\"blue\"]\n)\n\nprint(res)\n","searchReq = SearchReq.builder()\n .collectionName(\"quick_setup\")\n .data(query_vectors)\n .partitionNames(Arrays.asList(\"blue\"))\n .topK(5)\n .build();\n\nsearchResp = client.search(searchReq);\n\nSystem.out.println(JSONObject.toJSON(searchResp));\n","res = await client.search({\n collection_name: \"quick_setup\",\n data: [query_vector],\n partition_names: [\"blue\"],\n limit: 5,\n})\n\nconsole.log(res.results)\n","[\n [\n {\n \"id\": 20,\n \"distance\": 2.363696813583374,\n \"entity\": {}\n },\n {\n \"id\": 26,\n \"distance\": 1.0665391683578491,\n \"entity\": {}\n },\n {\n \"id\": 23,\n \"distance\": 1.066049575805664,\n \"entity\": {}\n },\n {\n \"id\": 29,\n \"distance\": 0.8353596925735474,\n \"entity\": {}\n },\n {\n \"id\": 28,\n \"distance\": 0.7484277486801147,\n \"entity\": {}\n }\n ]\n]\n","{\"searchResults\": [\n [\n {\n \"score\": 1.1628494,\n \"fields\": {\n \"vector\": [\n 0.7442872,\n 0.046407282,\n 0.71031404,\n 0.3544345,\n 0.9819991\n ],\n \"id\": 1992\n }\n },\n {\n \"score\": 1.1470042,\n \"fields\": {\n \"vector\": [\n 0.5505825,\n 0.04367262,\n 0.9985836,\n 0.18922359,\n 0.93255126\n ],\n \"id\": 1977\n }\n },\n {\n \"score\": 1.1450152,\n \"fields\": {\n \"vector\": [\n 0.89994013,\n 0.052991092,\n 0.8645576,\n 0.6406729,\n 0.95679337\n ],\n \"id\": 1573\n }\n },\n {\n \"score\": 1.1439825,\n \"fields\": {\n \"vector\": [\n 0.9253267,\n 0.15890503,\n 0.7999555,\n 0.19126713,\n 0.898583\n ],\n \"id\": 1552\n }\n },\n {\n \"score\": 1.1029172,\n \"fields\": {\n \"vector\": [\n 0.95661926,\n 0.18777144,\n 0.38115507,\n 0.14323527,\n 0.93137646\n ],\n \"id\": 1823\n }\n }\n ],\n [\n {\n \"score\": 1.8005109,\n \"fields\": {\n \"vector\": [\n 0.5953582,\n 0.7794224,\n 0.9388869,\n 0.79825854,\n 0.9197286\n ],\n \"id\": 1888\n }\n },\n {\n \"score\": 1.7714822,\n \"fields\": {\n \"vector\": [\n 0.56805456,\n 0.89422905,\n 0.88187534,\n 0.914824,\n 0.8944365\n ],\n \"id\": 1648\n }\n },\n {\n \"score\": 1.7561421,\n \"fields\": {\n \"vector\": [\n 0.83421993,\n 0.39865613,\n 0.92319834,\n 0.42695504,\n 0.96633124\n ],\n \"id\": 1688\n }\n },\n {\n \"score\": 1.7553532,\n \"fields\": {\n \"vector\": [\n 0.89994013,\n 0.052991092,\n 0.8645576,\n 0.6406729,\n 0.95679337\n ],\n \"id\": 1573\n }\n },\n {\n \"score\": 1.7543385,\n \"fields\": {\n \"vector\": [\n 0.16542226,\n 0.38248396,\n 0.9888778,\n 0.80913955,\n 0.9501492\n ],\n \"id\": 1544\n }\n }\n ]\n]}\n","[\n { score: 2.8421106338500977, id: '1745' },\n { score: 2.838560104370117, id: '1782' },\n { score: 2.8134000301361084, id: '1511' },\n { score: 2.718268871307373, id: '1679' },\n { score: 2.7014894485473633, id: '1597' }\n]\n","# Search with output fields\nres = client.search(\n collection_name=\"test_collection\", # Replace with the actual name of your collection\n data=[[0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592]],\n limit=5, # Max. number of search results to return\n search_params={\"metric_type\": \"IP\", \"params\": {}}, # Search parameters\n output_fields=[\"color\"] # Output fields to return\n)\n\nresult = json.dumps(res, indent=4)\nprint(result)\n","// 7. Search with output fields\nquery_vectors = Arrays.asList(Arrays.asList(0.3580376395471989f, -0.6023495712049978f, 0.18414012509913835f, -0.26286205330961354f, 0.9029438446296592f));\n\nsearchReq = SearchReq.builder()\n .collectionName(\"quick_setup\")\n .data(query_vectors)\n .outputFields(Arrays.asList(\"color\"))\n .topK(5)\n .build();\n\nsearchResp = client.search(searchReq);\n\nSystem.out.println(JSONObject.toJSON(searchResp));\n","// 7. Search with output fields\nquery_vector = [0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592]\n\nres = await client.search({\n collection_name: \"quick_setup\",\n data: [query_vector],\n limit: 5,\n output_fields: [\"color\"],\n})\n\nconsole.log(res.results)\n","[\n [\n {\n \"id\": 0,\n \"distance\": 1.4093276262283325,\n \"entity\": {\n \"color\": \"pink_8682\"\n }\n },\n {\n \"id\": 16,\n \"distance\": 1.0159327983856201,\n \"entity\": {\n \"color\": \"yellow_1496\"\n }\n },\n {\n \"id\": 4,\n \"distance\": 0.9902134537696838,\n \"entity\": {\n \"color\": \"red_4794\"\n }\n },\n {\n \"id\": 14,\n \"distance\": 0.9803846478462219,\n \"entity\": {\n \"color\": \"green_2899\"\n }\n },\n {\n \"id\": 1,\n \"distance\": 0.8519943356513977,\n \"entity\": {\n \"color\": \"red_7025\"\n }\n }\n ]\n]\n","{\"searchResults\": [\n [\n {\n \"score\": 1.263043,\n \"fields\": {}\n },\n {\n \"score\": 1.2377806,\n \"fields\": {}\n },\n {\n \"score\": 1.1869997,\n \"fields\": {}\n },\n {\n \"score\": 1.1748955,\n \"fields\": {}\n },\n {\n \"score\": 1.1720343,\n \"fields\": {}\n }\n ]\n]}\n","\n[\n { score: 3.036271572113037, id: '59', color: 'orange' },\n { score: 3.0267879962921143, id: '1745', color: 'blue' },\n { score: 3.0069446563720703, id: '854', color: 'black' },\n { score: 2.984386682510376, id: '718', color: 'black' },\n { score: 2.916019916534424, id: '425', color: 'purple' }\n]\n","# Search with filter\nres = client.search(\n collection_name=\"test_collection\", # Replace with the actual name of your collection\n data=[[0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592]],\n limit=5, # Max. number of search results to return\n search_params={\"metric_type\": \"IP\", \"params\": {}}, # Search parameters\n output_fields=[\"color\"], # Output fields to return\n filter='color like \"red%\"'\n)\n\nresult = json.dumps(res, indent=4)\nprint(result)\n","// 8. Filtered search\nquery_vectors = Arrays.asList(Arrays.asList(0.3580376395471989f, -0.6023495712049978f, 0.18414012509913835f, -0.26286205330961354f, 0.9029438446296592f));\n\nsearchReq = SearchReq.builder()\n .collectionName(\"quick_setup\")\n .data(query_vectors)\n .outputFields(Arrays.asList(\"color_tag\"))\n .filter(\"color_tag like \\\"red%\\\"\")\n .topK(5)\n .build();\n\nsearchResp = client.search(searchReq);\n\nSystem.out.println(JSONObject.toJSON(searchResp));\n","// 8. Filtered search\n// 8.1 Filter with \"like\" operator and prefix wildcard\nquery_vector = [0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592]\n\nres = await client.search({\n collection_name: \"quick_setup\",\n data: [query_vector],\n limit: 5,\n filters: \"color_tag like \\\"red%\\\"\",\n output_fields: [\"color_tag\"]\n})\n\nconsole.log(res.results)\n","[\n [\n {\n \"id\": 4,\n \"distance\": 0.9902134537696838,\n \"entity\": {\n \"color\": \"red_4794\"\n }\n },\n {\n \"id\": 1,\n \"distance\": 0.8519943356513977,\n \"entity\": {\n \"color\": \"red_7025\"\n }\n },\n {\n \"id\": 6,\n \"distance\": -0.4113418459892273,\n \"entity\": {\n \"color\": \"red_9392\"\n }\n }\n ]\n]\n","{\"searchResults\": [\n [\n {\n \"score\": 1.1869997,\n \"fields\": {\"color_tag\": \"red_3026\"}\n },\n {\n \"score\": 1.1677284,\n \"fields\": {\"color_tag\": \"red_9030\"}\n },\n {\n \"score\": 1.1476475,\n \"fields\": {\"color_tag\": \"red_3744\"}\n },\n {\n \"score\": 1.0969629,\n \"fields\": {\"color_tag\": \"red_4168\"}\n },\n {\n \"score\": 1.0741848,\n \"fields\": {\"color_tag\": \"red_9678\"}\n }\n ]\n]}\n","[\n { score: 2.5080761909484863, id: '1201', color_tag: 'red_8904' },\n { score: 2.491129159927368, id: '425', color_tag: 'purple_8212' },\n { score: 2.4889798164367676, id: '1458', color_tag: 'red_6891' },\n { score: 2.42964243888855, id: '724', color_tag: 'black_9885' },\n { score: 2.4004223346710205, id: '854', color_tag: 'black_5990' }\n]\n","# Infix match on color field\nres = client.search(\n collection_name=\"test_collection\", # Replace with the actual name of your collection\n data=[[0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592]],\n limit=5, # Max. number of search results to return\n search_params={\"metric_type\": \"IP\", \"params\": {}}, # Search parameters\n output_fields=[\"color\"], # Output fields to return\n filter='color like \"%ll%\"' # Filter on color field, infix match on \"ll\"\n)\n\nresult = json.dumps(res, indent=4)\nprint(result)\n","// 8. Filtered search\nquery_vectors = Arrays.asList(Arrays.asList(0.3580376395471989f, -0.6023495712049978f, 0.18414012509913835f, -0.26286205330961354f, 0.9029438446296592f));\n\nsearchReq = SearchReq.builder()\n .collectionName(\"quick_setup\")\n .data(query_vectors)\n .outputFields(Arrays.asList(\"color_tag\"))\n .filter(\"color like \\\"%ll%\\\"\")\n .topK(5)\n .build();\n\nsearchResp = client.search(searchReq);\n\nSystem.out.println(JSONObject.toJSON(searchResp));\n","// 8. Filtered search\n// 8.1 Filter with \"like\" operator and prefix wildcard\nquery_vector = [0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592]\n\nres = await client.search({\n collection_name: \"quick_setup\",\n data: [query_vector],\n limit: 5,\n filters: \"color_tag like \\\"%ll%\\\"\",\n output_fields: [\"color_tag\"]\n})\n\nconsole.log(res.results)\n","[\n [\n {\n \"id\": 5,\n \"distance\": 0.7972343564033508,\n \"entity\": {\n \"color\": \"yellow_4222\"\n }\n }\n ]\n]\n","{\"searchResults\": [\n [\n {\n \"score\": 1.1869997,\n \"fields\": {\"color_tag\": \"yellow_4222\"}\n }\n ]\n]}\n","[\n { score: 2.5080761909484863, id: '1201', color_tag: 'yellow_4222' }\n]\n","# Conduct a range search\nsearch_params = {\n \"metric_type\": \"IP\",\n \"params\": {\n \"radius\": 0.8, # Radius of the search circle\n \"range_filter\": 1.0 # Range filter to filter out vectors that are not within the search circle\n }\n}\n\nres = client.search(\n collection_name=\"test_collection\", # Replace with the actual name of your collection\n data=[[0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592]],\n limit=3, # Max. number of search results to return\n search_params=search_params, # Search parameters\n output_fields=[\"color\"], # Output fields to return\n)\n\nresult = json.dumps(res, indent=4)\nprint(result)\n","// 9. Range search\nquery_vectors = Arrays.asList(Arrays.asList(0.3580376395471989f, -0.6023495712049978f, 0.18414012509913835f, -0.26286205330961354f, 0.9029438446296592f));\n\nsearchReq = SearchReq.builder()\n .collectionName(\"quick_setup\")\n .data(query_vectors)\n .outputFields(Arrays.asList(\"color_tag\"))\n .searchParams(Map.of(\"radius\", 0.1, \"range\", 1.0))\n .topK(5)\n .build();\n\nsearchResp = client.search(searchReq);\n\nSystem.out.println(JSONObject.toJSON(searchResp));\n","// 9. Range search\nquery_vector = [0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592]\n\nres = await client.search({\n collection_name: \"quick_setup\",\n data: [query_vector],\n limit: 5,\n params: {\n radius: 0.1,\n range: 1.0\n },\n output_fields: [\"color_tag\"]\n})\n\nconsole.log(res.results)\n","[\n [\n {\n \"id\": 4,\n \"distance\": 0.9902134537696838,\n \"entity\": {\n \"color\": \"red_4794\"\n }\n },\n {\n \"id\": 14,\n \"distance\": 0.9803846478462219,\n \"entity\": {\n \"color\": \"green_2899\"\n }\n },\n {\n \"id\": 1,\n \"distance\": 0.8519943356513977,\n \"entity\": {\n \"color\": \"red_7025\"\n }\n }\n ]\n]\n","{\"searchResults\": [\n [\n {\n \"score\": 1.263043,\n \"fields\": {\"color_tag\": \"green_2052\"}\n },\n {\n \"score\": 1.2377806,\n \"fields\": {\"color_tag\": \"purple_3709\"}\n },\n {\n \"score\": 1.1869997,\n \"fields\": {\"color_tag\": \"red_3026\"}\n },\n {\n \"score\": 1.1748955,\n \"fields\": {\"color_tag\": \"black_1646\"}\n },\n {\n \"score\": 1.1720343,\n \"fields\": {\"color_tag\": \"green_4853\"}\n }\n ]\n]}\n","[\n { score: 2.3387961387634277, id: '718', color_tag: 'black_7154' },\n { score: 2.3352415561676025, id: '1745', color_tag: 'blue_8741' },\n { score: 2.290485382080078, id: '1408', color_tag: 'red_2324' },\n { score: 2.285870313644409, id: '854', color_tag: 'black_5990' },\n { score: 2.2593345642089844, id: '1309', color_tag: 'red_8458' }\n]\n","# Connect to Milvus\nclient = MilvusClient(uri='http://localhost:19530') # Milvus server address\n\n# Load data into collection\nclient.load_collection(\"group_search\") # Collection name\n\n# Group search results\nres = client.search(\n collection_name=\"group_search\", # Collection name\n data=[[0.14529211512077012, 0.9147257273453546, 0.7965055218724449, 0.7009258593102812, 0.5605206522382088]], # Query vector\n search_params={\n \"metric_type\": \"L2\",\n \"params\": {\"nprobe\": 10},\n }, # Search parameters\n limit=5, # Max. number of groups to return\n group_by_field=\"doc_id\", # Group results by document ID\n group_size=2, # returned at most 2 passages per document, the default value is 1\n group_strict_size=True, # ensure every group contains exactly 3 passages\n output_fields=[\"doc_id\", \"passage_id\"]\n)\n\n# Retrieve the values in the `doc_id` column\ndoc_ids = [result['entity']['doc_id'] for result in res[0]]\npassage_ids = [result['entity']['passage_id'] for result in res[0]]\n\nprint(doc_ids)\nprint(passage_ids)\n","[\"doc_11\", \"doc_11\", \"doc_7\", \"doc_7\", \"doc_3\", \"doc_3\", \"doc_2\", \"doc_2\", \"doc_8\", \"doc_8\"]\n[5, 10, 11, 10, 9, 6, 5, 4, 9, 2]\n","# Connect to Milvus\nclient = MilvusClient(uri='http://localhost:19530') # Milvus server address\n\n# Load data into collection\nclient.load_collection(\"group_search\") # Collection name\n\n# Search without `group_by_field`\nres = client.search(\n collection_name=\"group_search\", # Collection name\n data=query_passage_vector, # Replace with your query vector\n search_params={\n \"metric_type\": \"L2\",\n \"params\": {\"nprobe\": 10},\n }, # Search parameters\n limit=5, # Max. number of search results to return\n # group_by_field=\"doc_id\", # Group results by document ID\n # group_size=2, \n # group_strict_size=True,\n output_fields=[\"doc_id\", \"passage_id\"]\n)\n\n# Retrieve the values in the `doc_id` column\ndoc_ids = [result['entity']['doc_id'] for result in res[0]]\npassage_ids = [result['entity']['passage_id'] for result in res[0]]\n\nprint(doc_ids)\nprint(passage_ids)\n","[\"doc_11\", \"doc_11\", \"doc_11\", \"doc_11\", \"doc_11\"]\n[1, 10, 3, 12, 9]\n","# In normal cases, you do not need to set search parameters manually\n# Except for range searches.\nsearch_parameters = {\n 'metric_type': 'L2',\n 'params': {\n 'nprobe': 10,\n 'level': 1,\n 'radius': 1.0\n 'range_filter': 0.8\n }\n}\n"],"headingContent":"Single-Vector Search","anchorList":[{"label":"Einzelvektor-Suche","href":"Single-Vector-Search","type":1,"isActive":false},{"label":"Überblick","href":"Overview","type":2,"isActive":false},{"label":"Vorbereitungen","href":"Preparations","type":2,"isActive":false},{"label":"Einfache Suche","href":"Basic-search","type":2,"isActive":false},{"label":"Gefilterte Suche","href":"Filtered-search","type":2,"isActive":false},{"label":"Bereichssuche","href":"Range-search","type":2,"isActive":false},{"label":"Gruppierungssuche","href":"Grouping-search","type":2,"isActive":false},{"label":"Suchparameter","href":"Search-parameters","type":2,"isActive":false}]} \ No newline at end of file diff --git a/localization/v2.4.x/site/de/userGuide/search-query-get/single-vector-search.md b/localization/v2.4.x/site/de/userGuide/search-query-get/single-vector-search.md index 667051107..9e870c1de 100644 --- a/localization/v2.4.x/site/de/userGuide/search-query-get/single-vector-search.md +++ b/localization/v2.4.x/site/de/userGuide/search-query-get/single-vector-search.md @@ -4,7 +4,7 @@ order: 1 summary: >- Dieser Artikel beschreibt die Suche nach Vektoren in einer Milvus-Sammlung anhand eines einzelnen Abfragevektors. -title: Ein-Vektor-Suche +title: Einzelvektor-Suche ---

    Einzelvektor-Suche

  • Gefilterte Suche: Wendet Filterkriterien an, die auf skalaren Feldern basieren, um die Suchergebnisse zu verfeinern.

  • Bereichssuche: Findet Vektoren innerhalb eines bestimmten Abstandsbereichs vom Abfragevektor.

  • -
  • Gruppierungssuche: Gruppiert die Suchergebnisse auf der Grundlage eines bestimmten Feldes, um die Vielfalt der Ergebnisse zu gewährleisten.

  • +
  • Gruppierungssuche: Gruppiert Suchergebnisse auf der Grundlage eines bestimmten Feldes, um die Vielfalt der Ergebnisse zu gewährleisten.

  • Vorbereitungen

    +
    Der Name der Indexdatei, die nach Anwendung dieses Objekts erstellt wird.Der Name der Indexdatei, die nach Anwendung dieses Objekts erstellt wurde.
    indexType Die Anzahl der Datensätze, die im Suchergebnis zurückgegeben werden sollen. Dieser Parameter verwendet dieselbe Syntax wie der Parameter limit, so dass Sie nur einen von beiden setzen sollten.
    Sie können diesen Parameter in Kombination mit offset in param verwenden, um die Paginierung zu aktivieren.
    Die Summe aus diesem Wert und offset in param sollte kleiner als 16.384 sein.
    Die Anzahl der Datensätze, die im Suchergebnis zurückgegeben werden sollen. Dieser Parameter verwendet dieselbe Syntax wie der Parameter limit, so dass Sie nur einen der beiden Parameter setzen sollten.
    Sie können diesen Parameter in Kombination mit offset in param verwenden, um die Paginierung zu aktivieren.
    Die Summe aus diesem Wert und offset in param sollte kleiner als 16.384 sein.
    @@ -1365,7 +1365,7 @@ res = await client.'like "%suffix"'
    .

    Bei der Suche nach einzelnen Zeichen dient der Unterstrich (_) als Platzhalter für ein Zeichen, z. B. 'like "y_llow"'.

    -

    Sonderzeichen in Suchzeichenketten

    Wenn Sie nach einer Zeichenkette suchen wollen, die Sonderzeichen wie Unterstriche (_) oder Prozentzeichen (%) enthält, die normalerweise als Platzhalter in Suchmustern verwendet werden (_ für ein einzelnes Zeichen und % für eine beliebige Folge von Zeichen), müssen Sie diese Zeichen mit einem Escapezeichen versehen, um sie als literale Zeichen zu behandeln. Verwenden Sie einen Backslash (\), um Sonderzeichen zu entschlüsseln, und denken Sie daran, den Backslash selbst zu entschlüsseln. Ein Beispiel:

    +

    Sonderzeichen in Suchzeichenfolgen

    Wenn Sie nach einer Zeichenkette suchen wollen, die Sonderzeichen wie Unterstriche (_) oder Prozentzeichen (%) enthält, die normalerweise als Platzhalter in Suchmustern verwendet werden (_ für ein einzelnes Zeichen und % für eine beliebige Folge von Zeichen), müssen Sie diese Zeichen mit einem Escapezeichen versehen, um sie als literale Zeichen zu behandeln. Verwenden Sie einen Backslash (\), um Sonderzeichen zu entschlüsseln, und denken Sie daran, den Backslash selbst zu entschlüsseln. Ein Beispiel:

    • Um nach einem buchstäblichen Unterstrich zu suchen, verwenden Sie \\_.
    • Um nach einem wörtlichen Prozentzeichen zu suchen, verwenden Sie \\%.
    • @@ -1696,7 +1696,7 @@ res = await client.L2
      Kleinere L2-Distanzen zeigen eine höhere Ähnlichkeit an.Um die nächstgelegenen Vektoren von den Ergebnissen auszuschließen, stellen Sie sicher, dass:
      range_filter <= distance < radius IPGrößere IP-Distanzen weisen auf eine höhere Ähnlichkeit hin.Um die nächstgelegenen Vektoren von den Ergebnissen auszuschließen, stellen Sie sicher, dass:
      radius <= distance <= range_filter -COSINEEin größerer Kosinuswert deutet auf eine größere Ähnlichkeit hin.Um die engsten Vektoren von den Ergebnissen auszuschließen, stellen Sie sicher, dass:
      radius < Abstand <= range_filter +COSINEEin größerer Kosinuswert deutet auf eine größere Ähnlichkeit hin.Um die nächstgelegenen Vektoren aus den Ergebnissen auszuschließen, stellen Sie sicher, dass:
      radius < Abstand <= range_filter JACCARDKleinere Jaccard-Distanzen weisen auf eine höhere Ähnlichkeit hin.Um die nächstgelegenen Vektoren aus den Ergebnissen auszuschließen, stellen Sie sicher, dass:
      range_filter <= distance < radius HAMMINGKleinere Hamming-Distanzen weisen auf eine höhere Ähnlichkeit hin.Um die nächstgelegenen Vektoren aus den Ergebnissen auszuschließen, stellen Sie sicher, dass:
      range_filter <= distance < radius @@ -1717,8 +1717,9 @@ res = await client. -

      In Milvus kann die Gruppierung der Suche nach einem bestimmten Feld die Redundanz desselben Feldelements in den Ergebnissen vermeiden. Sie können eine Vielzahl von Ergebnissen für ein bestimmtes Feld erhalten.

      -

      Betrachten Sie eine Sammlung von Dokumenten, wobei jedes Dokument in verschiedene Passagen aufgeteilt ist. Jede Passage wird durch eine Vektoreinbettung dargestellt und gehört zu einem Dokument. Um relevante Dokumente anstelle ähnlicher Passagen zu finden, können Sie das Argument group_by_field in die search() -Operation aufnehmen, um die Ergebnisse nach der Dokument-ID zu gruppieren. Auf diese Weise werden die relevantesten und eindeutigsten Dokumente zurückgegeben und nicht einzelne Passagen aus demselben Dokument.

      +

      In Milvus wurde die Gruppierungssuche entwickelt, um die Vollständigkeit und Genauigkeit der Suchergebnisse zu verbessern.

      +

      Stellen Sie sich ein Szenario in RAG vor, in dem eine Menge von Dokumenten in verschiedene Passagen aufgeteilt ist und jede Passage durch eine Vektoreinbettung dargestellt wird. Die Benutzer wollen die relevantesten Passagen finden, um die LLMs genau abzufragen. Die gewöhnliche Milvus-Suchfunktion kann diese Anforderung erfüllen, aber sie kann zu stark verzerrten und voreingenommenen Ergebnissen führen: Die meisten Passagen stammen aus nur wenigen Dokumenten, und die Vollständigkeit der Suchergebnisse ist sehr gering. Dies kann die Genauigkeit oder sogar die Korrektheit der vom LLM gelieferten Ergebnisse ernsthaft beeinträchtigen und die Erfahrung der LLM-Nutzer negativ beeinflussen.

      +

      Die Gruppensuche kann dieses Problem wirksam lösen. Durch Übergabe eines group_by_field und group_size können Milvus-Benutzer die Suchergebnisse in mehrere Gruppen aufteilen und sicherstellen, dass die Anzahl der Entitäten aus jeder Gruppe eine bestimmte group_size nicht überschreitet. Diese Funktion kann den Umfang und die Fairness der Suchergebnisse erheblich steigern und die Qualität der LLM-Ausgabe spürbar verbessern.

      Hier ist der Beispielcode zum Gruppieren von Suchergebnissen nach Feld:

      # Connect to Milvus
       client = MilvusClient(uri='http://localhost:19530') # Milvus server address
      @@ -1734,21 +1735,26 @@ res = client.search(
           "metric_type": "L2",
           "params": {"nprobe": 10},
           }, # Search parameters
      -    limit=10, # Max. number of search results to return
      +    limit=5, # Max. number of groups to return
           group_by_field="doc_id", # Group results by document ID
      +    group_size=2, # returned at most 2 passages per document, the default value is 1
      +    group_strict_size=True, # ensure every group contains exactly 3 passages
           output_fields=["doc_id", "passage_id"]
       )
       
       # Retrieve the values in the `doc_id` column
       doc_ids = [result['entity']['doc_id'] for result in res[0]]
      +passage_ids = [result['entity']['passage_id'] for result in res[0]]
       
       print(doc_ids)
      +print(passage_ids)
       

      Die Ausgabe ist ähnlich wie die folgende:

      -
      [5, 10, 1, 7, 9, 6, 3, 4, 8, 2]
      +
      ["doc_11", "doc_11", "doc_7", "doc_7", "doc_3", "doc_3", "doc_2", "doc_2", "doc_8", "doc_8"]
      +[5, 10, 11, 10, 9, 6, 5, 4, 9, 2]
       
      -

      In der gegebenen Ausgabe ist zu erkennen, dass die zurückgegebenen Entitäten keine doppelten doc_id Werte enthalten.

      -

      Zum Vergleich lassen wir die group_by_field auskommentieren und führen eine reguläre Suche durch:

      +

      In der gegebenen Ausgabe ist zu sehen, dass für jedes Dokument genau zwei Passagen gefunden werden und insgesamt 5 Dokumente die Ergebnisse ausmachen.

      +

      Zum Vergleich kommentieren wir die gruppenbezogenen Parameter aus und führen eine reguläre Suche durch:

      # Connect to Milvus
       client = MilvusClient(uri='http://localhost:19530') # Milvus server address
       
      @@ -1763,27 +1769,33 @@ res = client.search(
           "metric_type": "L2",
           "params": {"nprobe": 10},
           }, # Search parameters
      -    limit=10, # Max. number of search results to return
      +    limit=5, # Max. number of search results to return
           # group_by_field="doc_id", # Group results by document ID
      +    # group_size=2, 
      +    # group_strict_size=True,
           output_fields=["doc_id", "passage_id"]
       )
       
       # Retrieve the values in the `doc_id` column
       doc_ids = [result['entity']['doc_id'] for result in res[0]]
      +passage_ids = [result['entity']['passage_id'] for result in res[0]]
       
       print(doc_ids)
      +print(passage_ids)
       

      Die Ausgabe ist ähnlich wie die folgende:

      -
      [1, 10, 3, 10, 1, 9, 4, 4, 8, 6]
      +
      ["doc_11", "doc_11", "doc_11", "doc_11", "doc_11"]
      +[1, 10, 3, 12, 9]
       
      -

      In der gegebenen Ausgabe ist zu erkennen, dass die zurückgegebenen Entitäten doppelte doc_id Werte enthalten.

      +

      In der gegebenen Ausgabe ist zu beobachten, dass "doc_11" die Suchergebnisse vollständig dominiert und die qualitativ hochwertigen Absätze aus anderen Dokumenten überschattet, was eine schlechte Aufforderung an LLM sein kann.

      +

      Noch ein Hinweis: Standardmäßig gibt grouping_search sofort Ergebnisse zurück, wenn es genügend Gruppen hat, was dazu führen kann, dass die Anzahl der Ergebnisse in jeder Gruppe nicht ausreicht, um die group_size zu erfüllen. Wenn Ihnen die Anzahl der Ergebnisse für jede Gruppe wichtig ist, setzen Sie group_strict_size=True wie im obigen Code gezeigt. Dies führt dazu, dass Milvus sich bemüht, genügend Ergebnisse für jede Gruppe zu erhalten, was allerdings zu einem leichten Leistungsverlust führt.

      Beschränkungen

      • Indizierung: Diese Gruppierungsfunktion funktioniert nur für Sammlungen, die mit dem HNSW-, IVF_FLAT- oder FLAT-Typ indiziert sind. Weitere Informationen finden Sie unter In-Memory-Index.

      • Vektor: Derzeit unterstützt die Gruppierungssuche kein Vektorfeld vom Typ BINARY_VECTOR. Weitere Informationen zu Datentypen finden Sie unter Unterstützte Datentypen.

      • Feld: Derzeit ist bei der Gruppierungssuche nur eine einzige Spalte möglich. Sie können nicht mehrere Feldnamen in der Konfiguration group_by_field angeben. Außerdem ist die Gruppierungssuche nicht mit den Datentypen JSON, FLOAT, DOUBLE, ARRAY oder Vektorfeldern kompatibel.

      • Auswirkungen auf die Leistung: Beachten Sie, dass die Leistung mit zunehmender Anzahl von Abfragevektoren abnimmt. Am Beispiel eines Clusters mit 2 CPU-Kernen und 8 GB Speicher steigt die Ausführungszeit für die Gruppierungssuche proportional zur Anzahl der Eingabeabfragevektoren.

      • -
      • Funktionsweise: Derzeit wird die Gruppierungssuche nicht von der Bereichssuche, den Suchiteratoren oder der hybriden Suche unterstützt.

      • +
      • Funktionsweise: Derzeit wird die Gruppierungssuche nicht von der Bereichssuche und den Suchitern unterstützt.

      Suchparameter

      Milvus bietet Such- und Abfrage-Iteratoren für die Iteration von Ergebnissen mit einer großen Anzahl von Entitäten. Da Milvus TopK auf 16384 begrenzt, können Benutzer Iteratoren verwenden, um große Zahlen oder sogar ganze Entitäten in einer Sammlung im Batch-Modus zurückzugeben.

      +

      Milvus bietet Such- und Abfrage-Iteratoren für die Iteration durch ein großes Volumen von Entitäten. Da Milvus TopK auf 16384 begrenzt, können Benutzer Iteratoren verwenden, um große Zahlen oder sogar ganze Entitäten in einer Sammlung im Batch-Modus zurückzugeben.

      Überblick

      Iteratoren sind leistungsstarke Werkzeuge, mit denen Sie eine große Menge von Daten oder alle Daten innerhalb einer Sammlung mit Hilfe von Primärschlüsselwerten und booleschen Ausdrücken durchlaufen können. Dies kann die Art und Weise, wie Sie Daten abrufen, erheblich verbessern. Im Gegensatz zur herkömmlichen Verwendung von Offset- und Limit-Parametern, die mit der Zeit an Effizienz verlieren können, bieten Iteratoren eine besser skalierbare Lösung.

      +

      Iteratoren sind ein effizientes Werkzeug zum Scannen einer ganzen Sammlung oder zum Iterieren durch eine große Menge von Entitäten durch Angabe von Primärschlüsselwerten oder eines Filterausdrucks. Im Vergleich zu einem Such- oder Abfrageaufruf mit Offset- und Limit-Parametern ist die Verwendung von Iteratoren effizienter und skalierbarer.

      Vorteile der Verwendung von Iteratoren

        -
      • Vereinfachung: Komplexe Offset- und Grenzwerteinstellungen entfallen.

      • -
      • Effizient: Skalierbarer Datenabruf, da nur die benötigten Daten abgerufen werden.

      • +
      • Vereinfachung: Die komplexen Offset- und Limit-Einstellungen entfallen.

      • +
      • Effizient: Ermöglicht einen skalierbaren Datenabruf, indem nur die benötigten Daten abgerufen werden.

      • Konsistenz: Gewährleistet eine konsistente Datensatzgröße mit booleschen Filtern.

      @@ -64,7 +64,7 @@ title: Mit Iteratoren d="M4 9h1v1H4c-1.5 0-3-1.69-3-3.5S2.55 3 4 3h4c1.45 0 3 1.69 3 3.5 0 1.41-.91 2.72-2 3.25V8.59c.58-.45 1-1.27 1-2.09C10 5.22 8.98 4 8 4H4c-.98 0-2 1.22-2 2.5S3 9 4 9zm9-3h-1v1h1c1 0 2 1.22 2 2.5S13.98 12 13 12H9c-.98 0-2-1.22-2-2.5 0-.83.42-1.64 1-2.09V6.25c-1.09.53-2 1.84-2 3.25C6 11.31 7.55 13 9 13h4c1.45 0 3-1.69 3-3.5S14.5 6 13 6z" > -

      In den folgenden Schritten wird der Code für die Verbindung mit Milvus, die schnelle Einrichtung einer Sammlung und das Einfügen von über 10.000 zufällig generierten Entitäten in die Sammlung wiederholt.

      +

      Der folgende Vorbereitungsschritt stellt eine Verbindung zu Milvus her und fügt zufällig generierte Entitäten in eine Sammlung ein.

      Schritt 1: Erstellen einer Sammlung

      Verwenden Sie MilvusClient um sich mit dem Milvus-Server zu verbinden und create_collection() um eine Sammlung zu erstellen.

      @@ -266,8 +266,9 @@ iterator = collection.search_iterator( batch_size=10, param=search_params, output_fields=["color_tag"], - limit=3 + limit=300 ) +# search 300 entities totally with 10 entities per page results = [] @@ -366,7 +367,7 @@ System.out.println(results.size()); batch_size - Die Anzahl der Entitäten, die jedes Mal zurückgegeben werden sollen, wenn Sie next() für den aktuellen Iterator aufrufen.
      Der Wert ist standardmäßig 1000. Setzen Sie ihn auf einen geeigneten Wert, um die Anzahl der pro Iteration zurückzugebenden Objekte zu steuern. + Die Anzahl der Entitäten, die jedes Mal zurückgegeben werden sollen, wenn Sie next() für den aktuellen Iterator aufrufen.
      Der Standardwert ist 1000. Setzen Sie ihn auf einen geeigneten Wert, um die Anzahl der pro Iteration zurückzugebenden Objekte zu steuern. param diff --git a/localization/v2.4.x/site/de/userGuide/tools/cli_commands.json b/localization/v2.4.x/site/de/userGuide/tools/cli_commands.json index 841f90a05..d73a2a0e0 100644 --- a/localization/v2.4.x/site/de/userGuide/tools/cli_commands.json +++ b/localization/v2.4.x/site/de/userGuide/tools/cli_commands.json @@ -1 +1 @@ -{"codeList":["clear\n","connect [-uri (text)] [-t (text)]\n","milvus_cli > connect -uri http://127.0.0.1:19530 \n","create database -db (text) \n","milvus_cli > create database -db testdb\n","use database -db (text) \n","milvus_cli > use database -db testdb\n","list databases \n","milvus_cli > list databases\n","delete database -db (text) \n","milvus_cli > delete database -db testdb\n","create user -u (text) -p (text)\n","milvus_cli > create user -u zilliz -p zilliz\n","create alias -c (text) -a (text) [-A] \n","milvus_cli > create alias -c car -a carAlias1\n","milvus_cli > create alias -c car2 -A -a carAlias1\n","create collection -c (text) -f (text) -p (text) [-a] [-d (text)]\n","## For array field: --schema-field support :::(:if Varchar)\n\nmilvus_cli > create collection -c car -f id:INT64:primary_field -f vector:FLOAT_VECTOR:128 -f color:INT64:color -f brand:ARRAY:64:VARCHAR:128 -p id -A -d 'car_collection'\n","create partition -c (text) -p (text) [-d (text)]\n","milvus_cli > create partition -c car -p new_partition -d test_add_partition\n","create index\n","milvus_cli > create index\n\nCollection name (car, car2): car2\n\nThe name of the field to create an index for (vector): vector\n\nIndex name: vectorIndex\n\n# Default is ''\nIndex type FLAT, IVF_FLAT, IVF_SQ8, IVF_PQ, RNSG, HNSW, ANNOY, AUTOINDEX, DISKANN, GPU_IVF_FLAT, GPU_IVF_PQ, SPARSE_INVERTED_INDEX, SPARSE_WAND, SCANN, STL_SORT, Trie, INVERTED, ) []: IVF_FLAT \n\n# Default is ''\nIndex metric type (L2, IP, HAMMING, TANIMOTO, COSINE, ) []: \n\nTimeout []:\n","delete user -u (text)\n","milvus_cli > delete user -u zilliz\n","delete alias -a (text) \n","delete collection -c (text) \n","milvus_cli > delete collection -c car\n","delete entities -c (text) -p (text) \n","milvus_cli > delete entities -c car\n\nThe expression to specify entities to be deleted, such as \"film_id in [ 0, 1 ]\": film_id in [ 0, 1 ]\n\nYou are trying to delete the entities of collection. This action cannot be undone!\n\nDo you want to continue? [y/N]: y\n","delete partition -c (text) -p (text)\n","milvus_cli > delete partition -c car -p new_partition\n","delete index -c (text) -in (text)\n","milvus_cli > delete index -c car -in indexName\n","show collection -c (text)\n","milvus_cli > show collection -c test_collection_insert\n","show partition -c (text) -p (text)\n","milvus_cli > show partition -c test_collection_insert -p _default\n","show index -c (text) -in (text)\n","milvus_cli > show index -c test_collection -in index_name\n","exit\n","help \n","import -c (text)[-p (text)] \n","milvus_cli > import -c car 'examples/import_csv/vectors.csv'\n\nReading csv file... [####################################] 100%\n\nColumn names are ['vector', 'color', 'brand']\n\nProcessed 50001 lines.\n\nInserting ...\n\nInsert successfully.\n-------------------------- ------------------\nTotal insert entities: 50000\nTotal collection entities: 150000\nMilvus timestamp: 428849214449254403\n-------------------------- ------------------\n","milvus_cli > import -c car 'https://raw.githubusercontent.com/milvus-\nio/milvus_cli/main/examples/import_csv/vectors.csv'\n\nReading file from remote URL.\n\nReading csv file... [####################################] 100%\n\nColumn names are ['vector', 'color', 'brand']\n\nProcessed 50001 lines.\n\nInserting ...\n\nInsert successfully.\n\n-------------------------- ------------------\nTotal insert entities: 50000\nTotal collection entities: 150000\nMilvus timestamp: 428849214449254403\n-------------------------- ------------------\n","list users\n","list collections\n","list indexes -c (text)\n","list partitions -c (text)\n","load -c (text) [-p (text)]\n","query\n","milvus_cli > query\n\nCollection name: car\n\nThe query expression: id in [ 428960801420883491, 428960801420883492,\n428960801420883493 ]\n\nName of partitions that contain entities(split by \",\" if multiple) []:\ndefault\n\nA list of fields to return(split by \",\" if multiple) []: color, brand\n\ntimeout []:\n\nGuarantee timestamp. This instructs Milvus to see all operations performed before a provided timestamp. If no such timestamp is provided, then Milvus will search all operations performed to date. [0]:\nGraceful time. Only used in bounded consistency level. If graceful_time is set, PyMilvus will use current timestamp minus the graceful_time as the guarantee_timestamp. This option is 5s by default if not set. [5]:\n","milvus_cli > query\n\nCollection name: car\n\nThe query expression: id > 428960801420883491\n\nName of partitions that contain entities(split by \",\" if multiple) []:\ndefault\n\nA list of fields to return(split by \",\" if multiple) []: id, color,\nbrand\n\ntimeout []:\n\nGuarantee timestamp. This instructs Milvus to see all operations performed before a provided timestamp. If no such timestamp is provided, then Milvus will search all operations performed to date. [0]:\nGraceful time. Only used in bounded consistency level. If graceful_time is set, PyMilvus will use current timestamp minus the graceful_time as the guarantee_timestamp. This option is 5s by default if not set. [5]:\n","release -c (text) [-p (text)]\n","search\n","milvus_cli > search\n\nCollection name (car, test_collection): car\n\nThe vectors of search data(the length of data is number of query (nq), the dim of every vector in data must be equal to vector field’s of collection. You can also import a csv file\nout headers): examples/import_csv/search_vectors.csv\n\nThe vector field used to search of collection (vector): vector\n\nSearch parameter nprobe's value: 10\n\nThe max number of returned record, also known as topk: 2\n\nThe boolean expression used to filter attribute []: id > 0\n\nThe names of partitions to search (split by \",\" if multiple) ['_default'] []: _default\n\ntimeout []:\n\nGuarantee Timestamp(It instructs Milvus to see all operations performed before a provided timestamp. If no such timestamp is provided, then Milvus will search all operations performed to date) [0]:\n\n","milvus_cli > search\n\nCollection name (car, test_collection): car\n\nThe vectors of search data(the length of data is number of query (nq), the dim of every vector in data must be equal to vector field’s of collection. You can also import a csv file without headers):\n [[0.71, 0.76, 0.17, 0.13, 0.42, 0.07, 0.15, 0.67, 0.58, 0.02, 0.39, 0.47, 0.58, 0.88, 0.73, 0.31, 0.23, 0.57, 0.33, 0.2, 0.03, 0.43, 0.78, 0.49, 0.17, 0.56, 0.76, 0.54, 0.45, 0.46, 0.05, 0.1, 0.43, 0.63, 0.29, 0.44, 0.65, 0.01, 0.35, 0.46, 0.66, 0.7, 0.88, 0.07, 0.49, 0.92, 0.57, 0.5, 0.16, 0.77, 0.98, 0.1, 0.44, 0.88, 0.82, 0.16, 0.67, 0.63, 0.57, 0.55, 0.95, 0.13, 0.64, 0.43, 0.71, 0.81, 0.43, 0.65, 0.76, 0.7, 0.05, 0.24, 0.03, 0.9, 0.46, 0.28, 0.92, 0.25, 0.97, 0.79, 0.73, 0.97, 0.49, 0.28, 0.64, 0.19, 0.23, 0.51, 0.09, 0.1, 0.53, 0.03, 0.23, 0.94, 0.87, 0.14, 0.42, 0.82, 0.91, 0.11, 0.91, 0.37, 0.26, 0.6, 0.89, 0.6, 0.32, 0.11, 0.98, 0.67, 0.12, 0.66, 0.47, 0.02, 0.15, 0.6, 0.64, 0.57, 0.14, 0.81, 0.75, 0.11, 0.49, 0.78, 0.16, 0.63, 0.57, 0.18]]\n\nThe vector field used to search of collection (vector): vector\n\nSearch parameter nprobe's value: 10\n\nThe specified number of decimal places of returned distance [-1]: 5\n\nThe max number of returned record, also known as topk: 2\n\nThe boolean expression used to filter attribute []: id > 0\n\nThe names of partitions to search (split by \",\" if multiple) ['_default'] []: _default\n\ntimeout []:\n\nGuarantee Timestamp(It instructs Milvus to see all operations performed before a provided timestamp. If no such timestamp is provided, then Milvus will search all operations performed to date) [0]:\n\n","milvus_cli > search\n\nCollection name (car, car2): car\n\nThe vectors of search data(the length of data is number of query (nq), the dim of every vector in data must be equal to vector field’s of collection. You can also import a csv file without headers): examples/import_csv/search_vectors.csv\n\nThe vector field used to search of collection (vector): vector\n\nThe specified number of decimal places of returned distance [-1]: 5\n\nThe max number of returned record, also known as topk: 2\n\nThe boolean expression used to filter attribute []:\n\nThe names of partitions to search (split by \",\" if multiple) ['_default'] []:\n\ntimeout []:\n\nGuarantee Timestamp(It instructs Milvus to see all operations performed before a provided timestamp. If no such timestamp is provided, then Milvus will search all operations performed to date) [0]:\n\n","list connections \n","show index_progress -c (text) [-i (text)]\n","show loading_progress -c (text) [-p (text)]\n","version\n","$ milvus_cli --version\nMilvus_CLI v0.4.0\n"],"headingContent":"","anchorList":[{"label":"Milvus_CLI Befehlsreferenz","href":"MilvusCLI-Command-Reference","type":1,"isActive":false},{"label":"löschen","href":"clear","type":2,"isActive":false},{"label":"verbinden","href":"connect","type":2,"isActive":false},{"label":"create Datenbank","href":"create-Database","type":2,"isActive":false},{"label":"Datenbank verwenden","href":"use-Database","type":2,"isActive":false},{"label":"Datenbanken auflisten","href":"List-Databases","type":2,"isActive":false},{"label":"Datenbank löschen","href":"Delete-Database","type":2,"isActive":false},{"label":"Benutzer anlegen","href":"create-user","type":2,"isActive":false},{"label":"alias erstellen","href":"create-alias","type":2,"isActive":false},{"label":"create collection","href":"create-collection","type":2,"isActive":false},{"label":"Partition erstellen","href":"create-partition","type":2,"isActive":false},{"label":"Index erstellen","href":"create-index","type":2,"isActive":false},{"label":"Benutzer löschen","href":"delete-user","type":2,"isActive":false},{"label":"alias löschen","href":"delete-alias","type":2,"isActive":false},{"label":"delete Sammlung","href":"delete-collection","type":2,"isActive":false},{"label":"Entitäten löschen","href":"delete-entities","type":2,"isActive":false},{"label":"Partition löschen","href":"delete-partition","type":2,"isActive":false},{"label":"delete index","href":"delete-index","type":2,"isActive":false},{"label":"show collection","href":"show-collection","type":2,"isActive":false},{"label":"show partition","href":"show-partition","type":2,"isActive":false},{"label":"index anzeigen","href":"show-index","type":2,"isActive":false},{"label":"exit","href":"exit","type":2,"isActive":false},{"label":"Hilfe","href":"help","type":2,"isActive":false},{"label":"importieren","href":"import","type":2,"isActive":false},{"label":"Benutzer auflisten","href":"list-users","type":2,"isActive":false},{"label":"Sammlungen auflisten","href":"list-collections","type":2,"isActive":false},{"label":"Indizes auflisten","href":"list-indexes","type":2,"isActive":false},{"label":"list partitions","href":"list-partitions","type":2,"isActive":false},{"label":"laden","href":"load","type":2,"isActive":false},{"label":"Abfrage","href":"query","type":2,"isActive":false},{"label":"freigeben","href":"release","type":2,"isActive":false},{"label":"Suche","href":"search","type":2,"isActive":false},{"label":"Verbindung auflisten","href":"List-connection","type":2,"isActive":false},{"label":"show index_progress","href":"show-indexprogress","type":2,"isActive":false},{"label":"show loading_progress","href":"show-loadingprogress","type":2,"isActive":false},{"label":"Version","href":"version","type":2,"isActive":false}]} \ No newline at end of file +{"codeList":["clear\n","connect [-uri (text)] [-t (text)]\n","milvus_cli > connect -uri http://127.0.0.1:19530 \n","create database -db (text) \n","milvus_cli > create database -db testdb\n","use database -db (text) \n","milvus_cli > use database -db testdb\n","list databases \n","milvus_cli > list databases\n","delete database -db (text) \n","milvus_cli > delete database -db testdb\n","create user -u (text) -p (text)\n","milvus_cli > create user -u zilliz -p zilliz\n","create alias -c (text) -a (text) [-A] \n","milvus_cli > create alias -c car -a carAlias1\n","milvus_cli > create alias -c car2 -A -a carAlias1\n","create collection -c (text) -f (text) -p (text) [-a] [-d (text)]\n","## For array field: --schema-field support :::(:if Varchar)\n\nmilvus_cli > create collection -c car -f id:INT64:primary_field -f vector:FLOAT_VECTOR:128 -f color:INT64:color -f brand:ARRAY:64:VARCHAR:128 -p id -A -d 'car_collection'\n","create partition -c (text) -p (text) [-d (text)]\n","milvus_cli > create partition -c car -p new_partition -d test_add_partition\n","create index\n","milvus_cli > create index\n\nCollection name (car, car2): car2\n\nThe name of the field to create an index for (vector): vector\n\nIndex name: vectorIndex\n\n# Default is ''\nIndex type FLAT, IVF_FLAT, IVF_SQ8, IVF_PQ, RNSG, HNSW, ANNOY, AUTOINDEX, DISKANN, GPU_IVF_FLAT, GPU_IVF_PQ, SPARSE_INVERTED_INDEX, SPARSE_WAND, SCANN, STL_SORT, Trie, INVERTED, ) []: IVF_FLAT \n\n# Default is ''\nIndex metric type (L2, IP, HAMMING, TANIMOTO, COSINE, ) []: \n\nTimeout []:\n","delete user -u (text)\n","milvus_cli > delete user -u zilliz\n","delete alias -a (text) \n","delete collection -c (text) \n","milvus_cli > delete collection -c car\n","delete entities -c (text) -p (text) \n","milvus_cli > delete entities -c car\n\nThe expression to specify entities to be deleted, such as \"film_id in [ 0, 1 ]\": film_id in [ 0, 1 ]\n\nYou are trying to delete the entities of collection. This action cannot be undone!\n\nDo you want to continue? [y/N]: y\n","delete partition -c (text) -p (text)\n","milvus_cli > delete partition -c car -p new_partition\n","delete index -c (text) -in (text)\n","milvus_cli > delete index -c car -in indexName\n","show collection -c (text)\n","milvus_cli > show collection -c test_collection_insert\n","show partition -c (text) -p (text)\n","milvus_cli > show partition -c test_collection_insert -p _default\n","show index -c (text) -in (text)\n","milvus_cli > show index -c test_collection -in index_name\n","exit\n","help \n","import -c (text)[-p (text)] \n","milvus_cli > import -c car 'examples/import_csv/vectors.csv'\n\nReading csv file... [####################################] 100%\n\nColumn names are ['vector', 'color', 'brand']\n\nProcessed 50001 lines.\n\nInserting ...\n\nInsert successfully.\n-------------------------- ------------------\nTotal insert entities: 50000\nTotal collection entities: 150000\nMilvus timestamp: 428849214449254403\n-------------------------- ------------------\n","milvus_cli > import -c car 'https://raw.githubusercontent.com/milvus-\nio/milvus_cli/main/examples/import_csv/vectors.csv'\n\nReading file from remote URL.\n\nReading csv file... [####################################] 100%\n\nColumn names are ['vector', 'color', 'brand']\n\nProcessed 50001 lines.\n\nInserting ...\n\nInsert successfully.\n\n-------------------------- ------------------\nTotal insert entities: 50000\nTotal collection entities: 150000\nMilvus timestamp: 428849214449254403\n-------------------------- ------------------\n","list users\n","list collections\n","list indexes -c (text)\n","list partitions -c (text)\n","load -c (text) [-p (text)]\n","query\n","milvus_cli > query\n\nCollection name: car\n\nThe query expression: id in [ 428960801420883491, 428960801420883492,\n428960801420883493 ]\n\nName of partitions that contain entities(split by \",\" if multiple) []:\ndefault\n\nA list of fields to return(split by \",\" if multiple) []: color, brand\n\ntimeout []:\n\nGuarantee timestamp. This instructs Milvus to see all operations performed before a provided timestamp. If no such timestamp is provided, then Milvus will search all operations performed to date. [0]:\nGraceful time. Only used in bounded consistency level. If graceful_time is set, PyMilvus will use current timestamp minus the graceful_time as the guarantee_timestamp. This option is 5s by default if not set. [5]:\n","milvus_cli > query\n\nCollection name: car\n\nThe query expression: id > 428960801420883491\n\nName of partitions that contain entities(split by \",\" if multiple) []:\ndefault\n\nA list of fields to return(split by \",\" if multiple) []: id, color,\nbrand\n\ntimeout []:\n\nGuarantee timestamp. This instructs Milvus to see all operations performed before a provided timestamp. If no such timestamp is provided, then Milvus will search all operations performed to date. [0]:\nGraceful time. Only used in bounded consistency level. If graceful_time is set, PyMilvus will use current timestamp minus the graceful_time as the guarantee_timestamp. This option is 5s by default if not set. [5]:\n","release -c (text) [-p (text)]\n","search\n","milvus_cli > search\n\nCollection name (car, test_collection): car\n\nThe vectors of search data(the length of data is number of query (nq), the dim of every vector in data must be equal to vector field’s of collection. You can also import a csv file\nout headers): examples/import_csv/search_vectors.csv\n\nThe vector field used to search of collection (vector): vector\n\nSearch parameter nprobe's value: 10\n\nThe max number of returned record, also known as topk: 2\n\nThe boolean expression used to filter attribute []: id > 0\n\nThe names of partitions to search (split by \",\" if multiple) ['_default'] []: _default\n\ntimeout []:\n\nGuarantee Timestamp(It instructs Milvus to see all operations performed before a provided timestamp. If no such timestamp is provided, then Milvus will search all operations performed to date) [0]:\n\n","milvus_cli > search\n\nCollection name (car, test_collection): car\n\nThe vectors of search data(the length of data is number of query (nq), the dim of every vector in data must be equal to vector field’s of collection. You can also import a csv file without headers):\n [[0.71, 0.76, 0.17, 0.13, 0.42, 0.07, 0.15, 0.67, 0.58, 0.02, 0.39, 0.47, 0.58, 0.88, 0.73, 0.31, 0.23, 0.57, 0.33, 0.2, 0.03, 0.43, 0.78, 0.49, 0.17, 0.56, 0.76, 0.54, 0.45, 0.46, 0.05, 0.1, 0.43, 0.63, 0.29, 0.44, 0.65, 0.01, 0.35, 0.46, 0.66, 0.7, 0.88, 0.07, 0.49, 0.92, 0.57, 0.5, 0.16, 0.77, 0.98, 0.1, 0.44, 0.88, 0.82, 0.16, 0.67, 0.63, 0.57, 0.55, 0.95, 0.13, 0.64, 0.43, 0.71, 0.81, 0.43, 0.65, 0.76, 0.7, 0.05, 0.24, 0.03, 0.9, 0.46, 0.28, 0.92, 0.25, 0.97, 0.79, 0.73, 0.97, 0.49, 0.28, 0.64, 0.19, 0.23, 0.51, 0.09, 0.1, 0.53, 0.03, 0.23, 0.94, 0.87, 0.14, 0.42, 0.82, 0.91, 0.11, 0.91, 0.37, 0.26, 0.6, 0.89, 0.6, 0.32, 0.11, 0.98, 0.67, 0.12, 0.66, 0.47, 0.02, 0.15, 0.6, 0.64, 0.57, 0.14, 0.81, 0.75, 0.11, 0.49, 0.78, 0.16, 0.63, 0.57, 0.18]]\n\nThe vector field used to search of collection (vector): vector\n\nSearch parameter nprobe's value: 10\n\nThe specified number of decimal places of returned distance [-1]: 5\n\nThe max number of returned record, also known as topk: 2\n\nThe boolean expression used to filter attribute []: id > 0\n\nThe names of partitions to search (split by \",\" if multiple) ['_default'] []: _default\n\ntimeout []:\n\nGuarantee Timestamp(It instructs Milvus to see all operations performed before a provided timestamp. If no such timestamp is provided, then Milvus will search all operations performed to date) [0]:\n\n","milvus_cli > search\n\nCollection name (car, car2): car\n\nThe vectors of search data(the length of data is number of query (nq), the dim of every vector in data must be equal to vector field’s of collection. You can also import a csv file without headers): examples/import_csv/search_vectors.csv\n\nThe vector field used to search of collection (vector): vector\n\nThe specified number of decimal places of returned distance [-1]: 5\n\nThe max number of returned record, also known as topk: 2\n\nThe boolean expression used to filter attribute []:\n\nThe names of partitions to search (split by \",\" if multiple) ['_default'] []:\n\ntimeout []:\n\nGuarantee Timestamp(It instructs Milvus to see all operations performed before a provided timestamp. If no such timestamp is provided, then Milvus will search all operations performed to date) [0]:\n\n","list connections \n","show index_progress -c (text) [-i (text)]\n","show loading_progress -c (text) [-p (text)]\n","version\n","$ milvus_cli --version\nMilvus_CLI v0.4.0\n"],"headingContent":"Milvus_CLI Command Reference","anchorList":[{"label":"Milvus_CLI Befehlsreferenz","href":"MilvusCLI-Command-Reference","type":1,"isActive":false},{"label":"löschen","href":"clear","type":2,"isActive":false},{"label":"verbinden","href":"connect","type":2,"isActive":false},{"label":"create Datenbank","href":"create-Database","type":2,"isActive":false},{"label":"Datenbank verwenden","href":"use-Database","type":2,"isActive":false},{"label":"Datenbanken auflisten","href":"list-Databases","type":2,"isActive":false},{"label":"delete Datenbank","href":"delete-Database","type":2,"isActive":false},{"label":"Benutzer anlegen","href":"create-user","type":2,"isActive":false},{"label":"alias erstellen","href":"create-alias","type":2,"isActive":false},{"label":"create collection","href":"create-collection","type":2,"isActive":false},{"label":"Partition erstellen","href":"create-partition","type":2,"isActive":false},{"label":"Index erstellen","href":"create-index","type":2,"isActive":false},{"label":"Benutzer löschen","href":"delete-user","type":2,"isActive":false},{"label":"alias löschen","href":"delete-alias","type":2,"isActive":false},{"label":"delete Sammlung","href":"delete-collection","type":2,"isActive":false},{"label":"Entitäten löschen","href":"delete-entities","type":2,"isActive":false},{"label":"Partition löschen","href":"delete-partition","type":2,"isActive":false},{"label":"delete index","href":"delete-index","type":2,"isActive":false},{"label":"show collection","href":"show-collection","type":2,"isActive":false},{"label":"show partition","href":"show-partition","type":2,"isActive":false},{"label":"index anzeigen","href":"show-index","type":2,"isActive":false},{"label":"exit","href":"exit","type":2,"isActive":false},{"label":"Hilfe","href":"help","type":2,"isActive":false},{"label":"importieren","href":"import","type":2,"isActive":false},{"label":"Benutzer auflisten","href":"list-users","type":2,"isActive":false},{"label":"Sammlungen auflisten","href":"list-collections","type":2,"isActive":false},{"label":"Indizes auflisten","href":"list-indexes","type":2,"isActive":false},{"label":"list partitions","href":"list-partitions","type":2,"isActive":false},{"label":"laden","href":"load","type":2,"isActive":false},{"label":"Abfrage","href":"query","type":2,"isActive":false},{"label":"freigeben","href":"release","type":2,"isActive":false},{"label":"Suche","href":"search","type":2,"isActive":false},{"label":"Verbindung auflisten","href":"list-connection","type":2,"isActive":false},{"label":"show index_progress","href":"show-indexprogress","type":2,"isActive":false},{"label":"show loading_progress","href":"show-loadingprogress","type":2,"isActive":false},{"label":"Version","href":"version","type":2,"isActive":false}]} \ No newline at end of file diff --git a/localization/v2.4.x/site/de/userGuide/tools/cli_commands.md b/localization/v2.4.x/site/de/userGuide/tools/cli_commands.md index 679a85082..c0d50ff05 100644 --- a/localization/v2.4.x/site/de/userGuide/tools/cli_commands.md +++ b/localization/v2.4.x/site/de/userGuide/tools/cli_commands.md @@ -1,6 +1,6 @@ --- id: cli_commands.md -summary: Interagieren Sie mit Milvus über Befehle. +summary: Interaktion mit Milvus über Befehle. title: Milvus_CLI Befehlsreferenz ---

      Milvus_CLI Befehlsreferenz

      -

      Datenbanken auflisten

      Beispiel 1

      Das folgende Beispiel listet die Datenbanken in Milvus auf.

      milvus_cli > list databases
       
      -

      Datenbank löschen

      Datenbank löschen in Milvus

      +

      Datenbank in Milvus löschen

      Syntax

      delete database -db (text) 
       
      @@ -669,7 +669,7 @@ Timeout []: d="M4 9h1v1H4c-1.5 0-3-1.69-3-3.5S2.55 3 4 3h4c1.45 0 3 1.69 3 3.5 0 1.41-.91 2.72-2 3.25V8.59c.58-.45 1-1.27 1-2.09C10 5.22 8.98 4 8 4H4c-.98 0-2 1.22-2 2.5S3 9 4 9zm9-3h-1v1h1c1 0 2 1.22 2 2.5S13.98 12 13 12H9c-.98 0-2-1.22-2-2.5 0-.83.42-1.64 1-2.09V6.25c-1.09.53-2 1.84-2 3.25C6 11.31 7.55 13 9 13h4c1.45 0 3-1.69 3-3.5S14.5 6 13 6z" > -

      Zeigt die detaillierten Informationen eines Indexes an.

      +

      Zeigt die detaillierten Informationen zu einem Index an.

      Syntax

      show index -c (text) -in (text)
       
      @@ -785,7 +785,7 @@ Timeout []: -c-sammlung-nameDer Name der Sammlung, in die die Daten eingefügt werden. -p-partition(Optional) Der Name der Partition, in die die Daten eingefügt werden. Wenn Sie diese Partitionsoption nicht angeben, wird die Partition "_default" gewählt. --helpn/aZeigt die Hilfe zur Verwendung des Befehls an. +-helpk.A.Zeigt die Hilfe zur Verwendung des Befehls an.

      Beispiel 1

      @@ -1165,7 +1165,7 @@ timeout []: Guarantee Timestamp(It instructs Milvus to see all operations performed before a provided timestamp. If no such timestamp is provided, then Milvus will search all operations performed to date) [0]:
      -

      Verbindung auflisten

      Zeigt den Fortschritt des Ladens von Entitäten an.

      +

      Zeigt den Fortschritt beim Laden einer Sammlung an.

      Syntax

      show loading_progress -c (text) [-p (text)]
       
      diff --git a/localization/v2.4.x/site/de/userGuide/tools/milvus_backup_overview.json b/localization/v2.4.x/site/de/userGuide/tools/milvus_backup_overview.json index cc63c6196..1fe209c37 100644 --- a/localization/v2.4.x/site/de/userGuide/tools/milvus_backup_overview.json +++ b/localization/v2.4.x/site/de/userGuide/tools/milvus_backup_overview.json @@ -1 +1 @@ -{"codeList":[],"headingContent":"","anchorList":[{"label":"Milvus-Sicherung","href":"Milvus-Backup","type":1,"isActive":false},{"label":"Voraussetzungen","href":"Prerequisites","type":2,"isActive":false},{"label":"Architektur","href":"Architecture","type":2,"isActive":false},{"label":"Neueste Version","href":"Latest-release","type":2,"isActive":false}]} \ No newline at end of file +{"codeList":[],"headingContent":"Milvus Backup","anchorList":[{"label":"Milvus-Sicherung","href":"Milvus-Backup","type":1,"isActive":false},{"label":"Voraussetzungen","href":"Prerequisites","type":2,"isActive":false},{"label":"Architektur","href":"Architecture","type":2,"isActive":false},{"label":"Neueste Version","href":"Latest-release","type":2,"isActive":false}]} \ No newline at end of file diff --git a/localization/v2.4.x/site/de/userGuide/tools/milvus_backup_overview.md b/localization/v2.4.x/site/de/userGuide/tools/milvus_backup_overview.md index b88521d48..4b44f09e1 100644 --- a/localization/v2.4.x/site/de/userGuide/tools/milvus_backup_overview.md +++ b/localization/v2.4.x/site/de/userGuide/tools/milvus_backup_overview.md @@ -1,7 +1,7 @@ --- id: milvus_backup_overview.md summary: >- - Milvus-Backup ist ein Tool, mit dem Benutzer Milvus-Daten sichern und + Milvus-Backup ist ein Werkzeug, mit dem Benutzer Milvus-Daten sichern und wiederherstellen können. title: Milvus-Sicherung --- @@ -60,7 +60,7 @@ title: Milvus-Sicherung Milvus Backup architecture Aufbau von Milvus Backup

      -

      Milvus Backup ermöglicht die Sicherung und Wiederherstellung von Metadaten, Segmenten und Daten über Milvus-Instanzen hinweg. Es bietet Northbound-Schnittstellen, wie CLI, API und gRPC-basiertes Go-Modul, zur flexiblen Manipulation der Sicherungs- und Wiederherstellungsprozesse.

      +

      Milvus Backup ermöglicht die Sicherung und Wiederherstellung von Metadaten, Segmenten und Daten über Milvus-Instanzen hinweg. Es bietet Northbound-Schnittstellen, wie CLI, API und gRPC-basiertes Go-Modul, für eine flexible Manipulation der Sicherungs- und Wiederherstellungsprozesse.

      Milvus Backup liest Sammlungsmetadaten und -segmente aus der Milvus-Quellinstanz, um ein Backup zu erstellen. Dann kopiert es Sammlungsdaten aus dem Stammverzeichnis der Milvus-Quellinstanz und speichert die kopierten Daten im Stammverzeichnis der Sicherung.

      Um von einem Backup wiederherzustellen, erstellt Milvus Backup eine neue Sammlung in der Ziel-Milvus-Instanz, basierend auf den Metadaten und Segmentinformationen der Sammlung im Backup. Anschließend kopiert es die Sicherungsdaten aus dem Stammverzeichnis der Sicherung in das Stammverzeichnis der Zielinstanz.

      Neueste Version

      diff --git a/localization/v2.4.x/site/de/userGuide/use-partition-key.json b/localization/v2.4.x/site/de/userGuide/use-partition-key.json index e7630a3f7..17285f201 100644 --- a/localization/v2.4.x/site/de/userGuide/use-partition-key.json +++ b/localization/v2.4.x/site/de/userGuide/use-partition-key.json @@ -1 +1 @@ -{"codeList":["import random, time\nfrom pymilvus import connections, MilvusClient, DataType\n\nSERVER_ADDR = \"http://localhost:19530\"\n\n# 1. Set up a Milvus client\nclient = MilvusClient(\n uri=SERVER_ADDR\n)\n\n# 2. Create a collection\nschema = MilvusClient.create_schema(\n auto_id=False,\n enable_dynamic_field=True,\n partition_key_field=\"color\",\n num_partitions=16 # Number of partitions. Defaults to 16.\n)\n\nschema.add_field(field_name=\"id\", datatype=DataType.INT64, is_primary=True)\nschema.add_field(field_name=\"vector\", datatype=DataType.FLOAT_VECTOR, dim=5)\nschema.add_field(field_name=\"color\", datatype=DataType.VARCHAR, max_length=512)\n","import io.milvus.v2.client.ConnectConfig;\nimport io.milvus.v2.client.MilvusClientV2;\nimport io.milvus.v2.common.DataType;\nimport io.milvus.v2.common.IndexParam;\nimport io.milvus.v2.service.collection.request.AddFieldReq;\nimport io.milvus.v2.service.collection.request.CreateCollectionReq;\n\nString CLUSTER_ENDPOINT = \"http://localhost:19530\";\n\n// 1. Connect to Milvus server\nConnectConfig connectConfig = ConnectConfig.builder()\n .uri(CLUSTER_ENDPOINT)\n .build();\n\nMilvusClientV2 client = new MilvusClientV2(connectConfig);\n\n// 2. Create a collection in customized setup mode\n\n// 2.1 Create schema\nCreateCollectionReq.CollectionSchema schema = client.createSchema();\n\n// 2.2 Add fields to schema\nschema.addField(AddFieldReq.builder()\n .fieldName(\"id\")\n .dataType(DataType.Int64)\n .isPrimaryKey(true)\n .autoID(false)\n .build());\n\nschema.addField(AddFieldReq.builder()\n .fieldName(\"vector\")\n .dataType(DataType.FloatVector)\n .dimension(5)\n .build());\n \nschema.addField(AddFieldReq.builder()\n .fieldName(\"color\")\n .dataType(DataType.VarChar)\n .maxLength(512)\n .isPartitionKey(true)\n .build());\n","const { MilvusClient, DataType, sleep } = require(\"@zilliz/milvus2-sdk-node\")\n\nconst address = \"http://localhost:19530\"\n\nasync function main() {\n// 1. Set up a Milvus Client\nclient = new MilvusClient({address}); \n\n// 2. Create a collection\n// 2.1 Define fields\nconst fields = [\n {\n name: \"id\",\n data_type: DataType.Int64,\n is_primary_key: true,\n auto_id: false\n },\n {\n name: \"vector\",\n data_type: DataType.FloatVector,\n dim: 5\n },\n {\n name: \"color\",\n data_type: DataType.VarChar,\n max_length: 512,\n is_partition_key: true\n }\n]\n","index_params = MilvusClient.prepare_index_params()\n\nindex_params.add_index(\n field_name=\"id\",\n index_type=\"STL_SORT\"\n)\n\nindex_params.add_index(\n field_name=\"color\",\n index_type=\"Trie\"\n)\n\nindex_params.add_index(\n field_name=\"vector\",\n index_type=\"IVF_FLAT\",\n metric_type=\"L2\",\n params={\"nlist\": 1024}\n)\n","// 2.3 Prepare index parameters\nIndexParam indexParamForVectorField = IndexParam.builder()\n .fieldName(\"vector\")\n .indexType(IndexParam.IndexType.IVF_FLAT)\n .metricType(IndexParam.MetricType.IP)\n .extraParams(Map.of(\"nlist\", 1024))\n .build();\n\nList indexParams = new ArrayList<>();\nindexParams.add(indexParamForVectorField);\n","// 2.2 Prepare index parameters\nconst index_params = [{\n field_name: \"color\",\n index_type: \"Trie\"\n},{\n field_name: \"id\",\n index_type: \"STL_SORT\"\n},{\n field_name: \"vector\",\n index_type: \"IVF_FLAT\",\n metric_type: \"IP\",\n params: { nlist: 1024}\n}]\n","client.create_collection(\n collection_name=\"test_collection\",\n schema=schema,\n index_params=index_params\n)\n","// 2.4 Create a collection with schema and index parameters\nCreateCollectionReq customizedSetupReq = CreateCollectionReq.builder()\n .collectionName(\"test_collection\")\n .collectionSchema(schema)\n .indexParams(indexParams) \n .build();\n\nclient.createCollection(customizedSetupReq);\n","// 2.3 Create a collection with fields and index parameters\nres = await client.createCollection({\n collection_name: \"test_collection\",\n fields: fields, \n index_params: index_params,\n})\n\nconsole.log(res.error_code)\n\n// Output\n// \n// Success\n//\n","# 3. Insert randomly generated vectors \ncolors = [\"green\", \"blue\", \"yellow\", \"red\", \"black\", \"white\", \"purple\", \"pink\", \"orange\", \"brown\", \"grey\"]\ndata = []\n\nfor i in range(1000):\n current_color = random.choice(colors)\n current_tag = random.randint(1000, 9999)\n data.append({\n \"id\": i,\n \"vector\": [ random.uniform(-1, 1) for _ in range(5) ],\n \"color\": current_color,\n \"tag\": current_tag,\n \"color_tag\": f\"{current_color}_{str(current_tag)}\"\n })\n\nprint(data[0])\n","// 3. Insert randomly generated vectors\nList colors = Arrays.asList(\"green\", \"blue\", \"yellow\", \"red\", \"black\", \"white\", \"purple\", \"pink\", \"orange\", \"brown\", \"grey\");\nList data = new ArrayList<>();\n\nfor (int i=0; i<1000; i++) {\n Random rand = new Random();\n String current_color = colors.get(rand.nextInt(colors.size()-1));\n int current_tag = rand.nextInt(8999) + 1000;\n JSONObject row = new JSONObject();\n row.put(\"id\", Long.valueOf(i));\n row.put(\"vector\", Arrays.asList(rand.nextFloat(), rand.nextFloat(), rand.nextFloat(), rand.nextFloat(), rand.nextFloat()));\n row.put(\"color\", current_color);\n row.put(\"tag\", current_tag);\n row.put(\"color_tag\", current_color + \"_\" + String.valueOf(rand.nextInt(8999) + 1000));\n data.add(row);\n}\n\nSystem.out.println(JSONObject.toJSON(data.get(0))); \n","// 3. Insert randomly generated vectors \nconst colors = [\"green\", \"blue\", \"yellow\", \"red\", \"black\", \"white\", \"purple\", \"pink\", \"orange\", \"brown\", \"grey\"]\nvar data = []\n\nfor (let i = 0; i < 1000; i++) {\n const current_color = colors[Math.floor(Math.random() * colors.length)]\n const current_tag = Math.floor(Math.random() * 8999 + 1000)\n data.push({\n id: i,\n vector: [Math.random(), Math.random(), Math.random(), Math.random(), Math.random()],\n color: current_color,\n tag: current_tag,\n color_tag: `${current_color}_${current_tag}`\n })\n}\n\nconsole.log(data[0])\n","{\n id: 0,\n vector: [\n 0.1275656405044483,\n 0.47417858592773277,\n 0.13858264437643286,\n 0.2390904907020377,\n 0.8447862593689635\n ],\n color: 'blue',\n tag: 2064,\n color_tag: 'blue_2064'\n}\n","res = client.insert(\n collection_name=\"test_collection\",\n data=data\n)\n\nprint(res)\n\n# Output\n#\n# {\n# \"insert_count\": 1000,\n# \"ids\": [\n# 0,\n# 1,\n# 2,\n# 3,\n# 4,\n# 5,\n# 6,\n# 7,\n# 8,\n# 9,\n# \"(990 more items hidden)\"\n# ]\n# }\n","// 3.1 Insert data into the collection\nInsertReq insertReq = InsertReq.builder()\n .collectionName(\"test_collection\")\n .data(data)\n .build();\n\nInsertResp insertResp = client.insert(insertReq);\n\nSystem.out.println(JSONObject.toJSON(insertResp));\n\n// Output:\n// {\"insertCnt\": 1000}\n","res = await client.insert({\n collection_name: \"test_collection\",\n data: data,\n})\n\nconsole.log(res.insert_cnt)\n\n// Output\n// \n// 1000\n// \n","# 4. Search with partition key\nquery_vectors = [[0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592]]\n\nres = client.search(\n collection_name=\"test_collection\",\n data=query_vectors,\n filter=\"color == 'green'\",\n search_params={\"metric_type\": \"L2\", \"params\": {\"nprobe\": 10}},\n output_fields=[\"id\", \"color_tag\"],\n limit=3\n)\n\nprint(res)\n\n# Output\n#\n# [\n# [\n# {\n# \"id\": 970,\n# \"distance\": 0.5770174264907837,\n# \"entity\": {\n# \"id\": 970,\n# \"color_tag\": \"green_9828\"\n# }\n# },\n# {\n# \"id\": 115,\n# \"distance\": 0.6898155808448792,\n# \"entity\": {\n# \"id\": 115,\n# \"color_tag\": \"green_4073\"\n# }\n# },\n# {\n# \"id\": 899,\n# \"distance\": 0.7028976678848267,\n# \"entity\": {\n# \"id\": 899,\n# \"color_tag\": \"green_9897\"\n# }\n# }\n# ]\n# ]\n","// 4. Search with partition key\nList> query_vectors = Arrays.asList(Arrays.asList(0.3580376395471989f, -0.6023495712049978f, 0.18414012509913835f, -0.26286205330961354f, 0.9029438446296592f));\n\nSearchReq searchReq = SearchReq.builder()\n .collectionName(\"test_collection\")\n .data(query_vectors)\n .filter(\"color == \\\"green\\\"\")\n .topK(3)\n .build();\n\nSearchResp searchResp = client.search(searchReq);\n\nSystem.out.println(JSONObject.toJSON(searchResp)); \n\n// Output:\n// {\"searchResults\": [[\n// {\n// \"distance\": 1.0586997,\n// \"id\": 414,\n// \"entity\": {}\n// },\n// {\n// \"distance\": 0.981384,\n// \"id\": 293,\n// \"entity\": {}\n// },\n// {\n// \"distance\": 0.9548756,\n// \"id\": 325,\n// \"entity\": {}\n// }\n// ]]}\n","// 4. Search with partition key\nconst query_vectors = [0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592]\n\nres = await client.search({\n collection_name: \"test_collection\",\n data: query_vectors,\n filter: \"color == 'green'\",\n output_fields: [\"color_tag\"],\n limit: 3\n})\n\nconsole.log(res.results)\n\n// Output\n// \n// [\n// { score: 2.402090549468994, id: '135', color_tag: 'green_2694' },\n// { score: 2.3938629627227783, id: '326', color_tag: 'green_7104' },\n// { score: 2.3235254287719727, id: '801', color_tag: 'green_3162' }\n// ]\n// \n"],"headingContent":"","anchorList":[{"label":"Use Partition Key","href":"Use-Partition-Key","type":1,"isActive":false},{"label":"Overview","href":"Overview","type":2,"isActive":false},{"label":"Enable partition key","href":"Enable-partition-key","type":2,"isActive":false},{"label":"List partitions","href":"List-partitions","type":2,"isActive":false},{"label":"Insert data","href":"Insert-data","type":2,"isActive":false},{"label":"Use partition key","href":"Use-partition-key","type":2,"isActive":false},{"label":"Typical use cases","href":"Typical-use-cases","type":2,"isActive":false}]} \ No newline at end of file +{"codeList":["import random, time\nfrom pymilvus import connections, MilvusClient, DataType\n\nSERVER_ADDR = \"http://localhost:19530\"\n\n# 1. Set up a Milvus client\nclient = MilvusClient(\n uri=SERVER_ADDR\n)\n\n# 2. Create a collection\nschema = MilvusClient.create_schema(\n auto_id=False,\n enable_dynamic_field=True,\n partition_key_field=\"color\",\n num_partitions=64 # Number of partitions. Defaults to 64.\n)\n\nschema.add_field(field_name=\"id\", datatype=DataType.INT64, is_primary=True)\nschema.add_field(field_name=\"vector\", datatype=DataType.FLOAT_VECTOR, dim=5)\nschema.add_field(field_name=\"color\", datatype=DataType.VARCHAR, max_length=512)\n","import io.milvus.v2.client.ConnectConfig;\nimport io.milvus.v2.client.MilvusClientV2;\nimport io.milvus.v2.common.DataType;\nimport io.milvus.v2.common.IndexParam;\nimport io.milvus.v2.service.collection.request.AddFieldReq;\nimport io.milvus.v2.service.collection.request.CreateCollectionReq;\n\nString CLUSTER_ENDPOINT = \"http://localhost:19530\";\n\n// 1. Connect to Milvus server\nConnectConfig connectConfig = ConnectConfig.builder()\n .uri(CLUSTER_ENDPOINT)\n .build();\n\nMilvusClientV2 client = new MilvusClientV2(connectConfig);\n\n// 2. Create a collection in customized setup mode\n\n// 2.1 Create schema\nCreateCollectionReq.CollectionSchema schema = client.createSchema();\n\n// 2.2 Add fields to schema\nschema.addField(AddFieldReq.builder()\n .fieldName(\"id\")\n .dataType(DataType.Int64)\n .isPrimaryKey(true)\n .autoID(false)\n .build());\n\nschema.addField(AddFieldReq.builder()\n .fieldName(\"vector\")\n .dataType(DataType.FloatVector)\n .dimension(5)\n .build());\n \nschema.addField(AddFieldReq.builder()\n .fieldName(\"color\")\n .dataType(DataType.VarChar)\n .maxLength(512)\n .isPartitionKey(true)\n .build());\n","const { MilvusClient, DataType, sleep } = require(\"@zilliz/milvus2-sdk-node\")\n\nconst address = \"http://localhost:19530\"\n\nasync function main() {\n// 1. Set up a Milvus Client\nclient = new MilvusClient({address}); \n\n// 2. Create a collection\n// 2.1 Define fields\nconst fields = [\n {\n name: \"id\",\n data_type: DataType.Int64,\n is_primary_key: true,\n auto_id: false\n },\n {\n name: \"vector\",\n data_type: DataType.FloatVector,\n dim: 5\n },\n {\n name: \"color\",\n data_type: DataType.VarChar,\n max_length: 512,\n is_partition_key: true\n }\n]\n","index_params = MilvusClient.prepare_index_params()\n\nindex_params.add_index(\n field_name=\"id\",\n index_type=\"STL_SORT\"\n)\n\nindex_params.add_index(\n field_name=\"color\",\n index_type=\"Trie\"\n)\n\nindex_params.add_index(\n field_name=\"vector\",\n index_type=\"IVF_FLAT\",\n metric_type=\"L2\",\n params={\"nlist\": 1024}\n)\n","// 2.3 Prepare index parameters\nIndexParam indexParamForVectorField = IndexParam.builder()\n .fieldName(\"vector\")\n .indexType(IndexParam.IndexType.IVF_FLAT)\n .metricType(IndexParam.MetricType.IP)\n .extraParams(Map.of(\"nlist\", 1024))\n .build();\n\nList indexParams = new ArrayList<>();\nindexParams.add(indexParamForVectorField);\n","// 2.2 Prepare index parameters\nconst index_params = [{\n field_name: \"color\",\n index_type: \"Trie\"\n},{\n field_name: \"id\",\n index_type: \"STL_SORT\"\n},{\n field_name: \"vector\",\n index_type: \"IVF_FLAT\",\n metric_type: \"IP\",\n params: { nlist: 1024}\n}]\n","client.create_collection(\n collection_name=\"test_collection\",\n schema=schema,\n index_params=index_params\n)\n","// 2.4 Create a collection with schema and index parameters\nCreateCollectionReq customizedSetupReq = CreateCollectionReq.builder()\n .collectionName(\"test_collection\")\n .collectionSchema(schema)\n .indexParams(indexParams) \n .build();\n\nclient.createCollection(customizedSetupReq);\n","// 2.3 Create a collection with fields and index parameters\nres = await client.createCollection({\n collection_name: \"test_collection\",\n fields: fields, \n index_params: index_params,\n})\n\nconsole.log(res.error_code)\n\n// Output\n// \n// Success\n//\n","# 3. Insert randomly generated vectors \ncolors = [\"green\", \"blue\", \"yellow\", \"red\", \"black\", \"white\", \"purple\", \"pink\", \"orange\", \"brown\", \"grey\"]\ndata = []\n\nfor i in range(1000):\n current_color = random.choice(colors)\n current_tag = random.randint(1000, 9999)\n data.append({\n \"id\": i,\n \"vector\": [ random.uniform(-1, 1) for _ in range(5) ],\n \"color\": current_color,\n \"tag\": current_tag,\n \"color_tag\": f\"{current_color}_{str(current_tag)}\"\n })\n\nprint(data[0])\n","// 3. Insert randomly generated vectors\nList colors = Arrays.asList(\"green\", \"blue\", \"yellow\", \"red\", \"black\", \"white\", \"purple\", \"pink\", \"orange\", \"brown\", \"grey\");\nList data = new ArrayList<>();\n\nfor (int i=0; i<1000; i++) {\n Random rand = new Random();\n String current_color = colors.get(rand.nextInt(colors.size()-1));\n int current_tag = rand.nextInt(8999) + 1000;\n JSONObject row = new JSONObject();\n row.put(\"id\", Long.valueOf(i));\n row.put(\"vector\", Arrays.asList(rand.nextFloat(), rand.nextFloat(), rand.nextFloat(), rand.nextFloat(), rand.nextFloat()));\n row.put(\"color\", current_color);\n row.put(\"tag\", current_tag);\n row.put(\"color_tag\", current_color + \"_\" + String.valueOf(rand.nextInt(8999) + 1000));\n data.add(row);\n}\n\nSystem.out.println(JSONObject.toJSON(data.get(0))); \n","// 3. Insert randomly generated vectors \nconst colors = [\"green\", \"blue\", \"yellow\", \"red\", \"black\", \"white\", \"purple\", \"pink\", \"orange\", \"brown\", \"grey\"]\nvar data = []\n\nfor (let i = 0; i < 1000; i++) {\n const current_color = colors[Math.floor(Math.random() * colors.length)]\n const current_tag = Math.floor(Math.random() * 8999 + 1000)\n data.push({\n id: i,\n vector: [Math.random(), Math.random(), Math.random(), Math.random(), Math.random()],\n color: current_color,\n tag: current_tag,\n color_tag: `${current_color}_${current_tag}`\n })\n}\n\nconsole.log(data[0])\n","{\n id: 0,\n vector: [\n 0.1275656405044483,\n 0.47417858592773277,\n 0.13858264437643286,\n 0.2390904907020377,\n 0.8447862593689635\n ],\n color: 'blue',\n tag: 2064,\n color_tag: 'blue_2064'\n}\n","res = client.insert(\n collection_name=\"test_collection\",\n data=data\n)\n\nprint(res)\n\n# Output\n#\n# {\n# \"insert_count\": 1000,\n# \"ids\": [\n# 0,\n# 1,\n# 2,\n# 3,\n# 4,\n# 5,\n# 6,\n# 7,\n# 8,\n# 9,\n# \"(990 more items hidden)\"\n# ]\n# }\n","// 3.1 Insert data into the collection\nInsertReq insertReq = InsertReq.builder()\n .collectionName(\"test_collection\")\n .data(data)\n .build();\n\nInsertResp insertResp = client.insert(insertReq);\n\nSystem.out.println(JSONObject.toJSON(insertResp));\n\n// Output:\n// {\"insertCnt\": 1000}\n","res = await client.insert({\n collection_name: \"test_collection\",\n data: data,\n})\n\nconsole.log(res.insert_cnt)\n\n// Output\n// \n// 1000\n// \n","# 4. Search with partition key\nquery_vectors = [[0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592]]\n\nres = client.search(\n collection_name=\"test_collection\",\n data=query_vectors,\n filter=\"color == 'green'\",\n search_params={\"metric_type\": \"L2\", \"params\": {\"nprobe\": 10}},\n output_fields=[\"id\", \"color_tag\"],\n limit=3\n)\n\nprint(res)\n\n# Output\n#\n# [\n# [\n# {\n# \"id\": 970,\n# \"distance\": 0.5770174264907837,\n# \"entity\": {\n# \"id\": 970,\n# \"color_tag\": \"green_9828\"\n# }\n# },\n# {\n# \"id\": 115,\n# \"distance\": 0.6898155808448792,\n# \"entity\": {\n# \"id\": 115,\n# \"color_tag\": \"green_4073\"\n# }\n# },\n# {\n# \"id\": 899,\n# \"distance\": 0.7028976678848267,\n# \"entity\": {\n# \"id\": 899,\n# \"color_tag\": \"green_9897\"\n# }\n# }\n# ]\n# ]\n","// 4. Search with partition key\nList> query_vectors = Arrays.asList(Arrays.asList(0.3580376395471989f, -0.6023495712049978f, 0.18414012509913835f, -0.26286205330961354f, 0.9029438446296592f));\n\nSearchReq searchReq = SearchReq.builder()\n .collectionName(\"test_collection\")\n .data(query_vectors)\n .filter(\"color == \\\"green\\\"\")\n .topK(3)\n .build();\n\nSearchResp searchResp = client.search(searchReq);\n\nSystem.out.println(JSONObject.toJSON(searchResp)); \n\n// Output:\n// {\"searchResults\": [[\n// {\n// \"distance\": 1.0586997,\n// \"id\": 414,\n// \"entity\": {}\n// },\n// {\n// \"distance\": 0.981384,\n// \"id\": 293,\n// \"entity\": {}\n// },\n// {\n// \"distance\": 0.9548756,\n// \"id\": 325,\n// \"entity\": {}\n// }\n// ]]}\n","// 4. Search with partition key\nconst query_vectors = [0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592]\n\nres = await client.search({\n collection_name: \"test_collection\",\n data: query_vectors,\n filter: \"color == 'green'\",\n output_fields: [\"color_tag\"],\n limit: 3\n})\n\nconsole.log(res.results)\n\n// Output\n// \n// [\n// { score: 2.402090549468994, id: '135', color_tag: 'green_2694' },\n// { score: 2.3938629627227783, id: '326', color_tag: 'green_7104' },\n// { score: 2.3235254287719727, id: '801', color_tag: 'green_3162' }\n// ]\n// \n"],"headingContent":"Use Partition Key","anchorList":[{"label":"Partitionsschlüssel verwenden","href":"Use-Partition-Key","type":1,"isActive":false},{"label":"Übersicht","href":"Overview","type":2,"isActive":false},{"label":"Aktivieren des Partitionsschlüssels","href":"Enable-partition-key","type":2,"isActive":false},{"label":"Partitionen auflisten","href":"List-partitions","type":2,"isActive":false},{"label":"Daten einfügen","href":"Insert-data","type":2,"isActive":false},{"label":"Partitionsschlüssel verwenden","href":"Use-partition-key","type":2,"isActive":false},{"label":"Typische Anwendungsfälle","href":"Typical-use-cases","type":2,"isActive":false}]} \ No newline at end of file diff --git a/localization/v2.4.x/site/de/userGuide/use-partition-key.md b/localization/v2.4.x/site/de/userGuide/use-partition-key.md index 990c59b03..910b9a6ff 100644 --- a/localization/v2.4.x/site/de/userGuide/use-partition-key.md +++ b/localization/v2.4.x/site/de/userGuide/use-partition-key.md @@ -1,9 +1,8 @@ --- id: use-partition-key.md -title: Use Partition Key -summary: '' +title: Partitionsschlüssel verwenden --- -

      Use Partition Key

      This guide walks you through using the partition key to accelerate data retrieval from your collection.

      -

      Overview

      Dieser Leitfaden führt Sie durch die Verwendung des Partitionsschlüssels zur Beschleunigung des Datenabrufs in Ihrer Sammlung.

      +

      Übersicht

      You can set a particular field in a collection as the partition key so that Milvus distributes incoming entities into different partitions according to their respective partition values in this field. This allows entities with the same key value to be grouped in a partition, accelerating search performance by avoiding the need to scan irrelevant partitions when filtering by the key field. When compared to traditional filtering methods, the partition key can greatly enhance query performance.

      -

      You can use the partition key to implement multi-tenancy. For details on multi-tenancy, read Multi-tenancy for more.

      -

      Enable partition key

      Sie können ein bestimmtes Feld in einer Sammlung als Partitionsschlüssel festlegen, so dass Milvus eingehende Entitäten in verschiedene Partitionen entsprechend ihrer jeweiligen Partitionswerte in diesem Feld verteilt. Dadurch können Entitäten mit demselben Schlüsselwert in einer Partition gruppiert werden, was die Suchleistung beschleunigt, da beim Filtern nach dem Schlüsselfeld keine irrelevanten Partitionen durchsucht werden müssen. Im Vergleich zu herkömmlichen Filtermethoden kann der Partitionsschlüssel die Abfrageleistung erheblich verbessern.

      +

      Sie können den Partitionsschlüssel verwenden, um eine Mehrmandantenfähigkeit zu implementieren. Für weitere Details zur Mehrmandantenfähigkeit lesen Sie bitte Mehrmandantenfähigkeit.

      +

      Aktivieren des Partitionsschlüssels

      To set a field as the partition key, specify partition_key_field when creating a collection schema.

      -

      In the example code below, num_partitions determines the number of partitions that will be created. By default, it is set to 16. We recommend you retain the default value.

      +

      Um ein Feld als Partitionsschlüssel festzulegen, geben Sie beim Erstellen eines Auflistungsschemas partition_key_field an.

      +

      Im folgenden Beispielcode bestimmt num_partitions die Anzahl der zu erstellenden Partitionen. Standardmäßig ist er auf 64 eingestellt. Es wird empfohlen, den Standardwert beizubehalten.

      -

      For more information on parameters, refer to MilvusClient, create_schema(), and add_field() in the SDK reference.

      +

      Weitere Informationen zu Parametern finden Sie unter MilvusClient, create_schema(), und add_field() in der SDK-Referenz.

      -

      For more information on parameters, refer to MilvusClientV2, createSchema(), and addField() in the SDK reference.

      +

      Weitere Informationen zu Parametern finden Sie unter MilvusClientV2, createSchema(), und addField() in der SDK-Referenz.

      -

      For more information on parameters, refer to MilvusClient and createCollection() in the SDK reference.

      +

      Weitere Informationen zu Parametern finden Sie unter MilvusClient und createCollection() in der SDK-Referenz.

      + Python Java Node.js
      import random, time
       from pymilvus import connections, MilvusClient, DataType
       
      @@ -82,7 +78,7 @@ schema = MilvusClient.create_schema(
           auto_id=False,
           enable_dynamic_field=True,
           partition_key_field="color",
      -    num_partitions=16 # Number of partitions. Defaults to 16.
      +    num_partitions=64 # Number of partitions. Defaults to 64.
       )
       
       schema.add_field(field_name="id", datatype=DataType.INT64, is_primary=True)
      @@ -161,12 +157,9 @@ client = new M
           }
       ]
       
      -

      After you have defined the fields, set up the index parameters.

      +

      Nachdem Sie die Felder definiert haben, richten Sie die Indexparameter ein.

      + Python Java Node.js
      index_params = MilvusClient.prepare_index_params()
       
       index_params.add_index(
      @@ -211,12 +204,9 @@ indexParams.add(indexParamForVectorFie
           params: { nlist: 1024}
       }]
       
      -

      Finally, you can create a collection.

      +

      Schließlich können Sie eine Sammlung erstellen.

      + Python Java Node.js
      client.create_collection(
           collection_name="test_collection",
           schema=schema,
      @@ -246,7 +236,7 @@ res = await client.// Success
       //
       
      -

      List partitions

      Once a field of a collection is used as the partition key, Milvus creates the specified number of partitions and manages them on your behalf. Therefore, you cannot manipulate the partitions in this collection anymore.

      -

      The following snippet demonstrates that 64 partitions in a collection once one of its fields is used as the partition key.

      -

      Insert data

      Sobald ein Feld einer Sammlung als Partitionsschlüssel verwendet wird, erstellt Milvus die angegebene Anzahl von Partitionen und verwaltet sie in Ihrem Namen. Daher können Sie die Partitionen in dieser Sammlung nicht mehr manipulieren.

      +

      Das folgende Snippet zeigt, dass 64 Partitionen in einer Sammlung erstellt werden können, sobald eines ihrer Felder als Partitionsschlüssel verwendet wird.

      +

      Daten einfügen

      Once the collection is ready, start inserting data as follows:

      -

      Prepare data

      +

      Sobald die Sammlung fertig ist, beginnen Sie mit dem Einfügen der Daten wie folgt:

      +

      Daten vorbereiten

      # 3. Insert randomly generated vectors 
       colors = ["green", "blue", "yellow", "red", "black", "white", "purple", "pink", "orange", "brown", "grey"]
       data = []
      @@ -338,7 +325,7 @@ data = []
       
       console.log(data[0])
       
      -

      You can view the structure of the generated data by checking its first entry.

      +

      Sie können die Struktur der generierten Daten sehen, indem Sie den ersten Eintrag überprüfen.

      {
           id: 0,
           vector: [
      @@ -353,20 +340,17 @@ data = []
           color_tag: 'blue_2064'
       }
       
      -

      Insert data

      -

      Use the insert() method to insert the data into the collection.

      +

      Daten einfügen

      +

      Verwenden Sie die insert() Methode, um die Daten in die Sammlung einzufügen.

      -

      Use the insert() method to insert the data into the collection.

      +

      Verwenden Sie die insert() um die Daten in die Sammlung einzufügen.

      -

      Use the insert() method to insert the data into the collection.

      +

      Verwenden Sie die insert() Methode, um die Daten in die Sammlung einzufügen.

      + Python Java Node.js
      res = client.insert(
           collection_name="test_collection",
           data=data
      @@ -418,7 +402,7 @@ data = []
       // 1000
       // 
       
      -

      Use partition key

      Once you have indexed and loaded the collection as well as inserted data, you can conduct a similarity search using the partition key.

      +

      Sobald Sie die Sammlung indiziert und geladen sowie Daten eingefügt haben, können Sie eine Ähnlichkeitssuche mit dem Partitionsschlüssel durchführen.

      -

      For more information on parameters, refer to search() in the SDK reference.

      +

      Weitere Informationen zu den Parametern finden Sie unter search() in der SDK-Referenz.

      -

      For more information on parameters, refer to search() in the SDK reference.

      +

      Weitere Informationen zu den Parametern finden Sie unter search() in der SDK-Referenz.

      -

      For more information on parameters, refer to search() in the SDK reference.

      +

      Weitere Informationen zu Parametern finden Sie unter search() in der SDK-Referenz.

      -

      notes

      -

      To conduct a similarity search using the partition key, you should include either of the following in the boolean expression of the search request:

      +

      Hinweise

      +

      Um eine Ähnlichkeitssuche unter Verwendung des Partitionsschlüssels durchzuführen, sollten Sie einen der folgenden Punkte in den booleschen Ausdruck der Suchanfrage aufnehmen:

      • expr='<partition_key>=="xxxx"'

      • expr='<partition_key> in ["xxx", "xxx"]'

      -

      Do replace <partition_key> with the name of the field that is designated as the partition key.

      +

      Ersetzen Sie <partition_key> durch den Namen des Feldes, das als Partitionsschlüssel bezeichnet wird.

      + Python Java Node.js
      # 4. Search with partition key
       query_vectors = [[0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592]]
       
      @@ -557,7 +538,7 @@ res = await client.// ]
       // 
       
      -

      Typical use cases

      You can utilize the partition key feature to achieve better search performance and enable multi-tenancy. This can be done by assigning a tenant-specific value as the partition key field for each entity. When searching or querying the collection, you can filter entities by the tenant-specific value by including the partition key field in the boolean expression. This approach ensures data isolation by tenants and avoids scanning unnecessary partitions.

      +

      Sie können die Partitionsschlüssel-Funktion nutzen, um eine bessere Suchleistung zu erzielen und die Mandantenfähigkeit zu aktivieren. Dies kann durch Zuweisung eines mandantenspezifischen Wertes als Partitionsschlüsselfeld für jede Entität erfolgen. Bei der Suche oder Abfrage der Sammlung können Sie Entitäten nach dem mieterspezifischen Wert filtern, indem Sie das Partitionsschlüsselfeld in den booleschen Ausdruck aufnehmen. Dieser Ansatz gewährleistet die Datenisolierung nach Mandanten und vermeidet das Scannen unnötiger Partitionen.

      diff --git a/localization/v2.4.x/site/es/adminGuide/clouds/aws/s3.json b/localization/v2.4.x/site/es/adminGuide/clouds/aws/s3.json index 02cd26189..a09a377a4 100644 --- a/localization/v2.4.x/site/es/adminGuide/clouds/aws/s3.json +++ b/localization/v2.4.x/site/es/adminGuide/clouds/aws/s3.json @@ -1 +1 @@ -{"codeList":["milvus_bucket_name=\"milvus-bucket-$(openssl rand -hex 12)\"\n\naws s3api create-bucket --bucket \"$milvus_bucket_name\" --region 'us-east-2' --acl private --object-ownership ObjectWriter --create-bucket-configuration LocationConstraint='us-east-2'\n\n\n# Output\n#\n# \"Location\": \"http://milvus-bucket-039dd013c0712f085d60e21f.s3.amazonaws.com/\"\n","echo '{\n \"Version\": \"2012-10-17\",\n \"Statement\": [\n {\n \"Effect\": \"Allow\",\n \"Action\": [\n \"s3:GetObject\",\n \"s3:PutObject\",\n \"s3:ListBucket\",\n \"s3:DeleteObject\"\n ],\n \"Resource\": [\n \"arn:aws:s3:::\",\n \"arn:aws:s3:::/*\"\n ]\n }\n ]\n}' > milvus-s3-policy.json\n\naws iam create-policy --policy-name MilvusS3ReadWrite --policy-document file://milvus-s3-policy.json\n\n\n# Get the ARN from the command output as follows:\n# {\n# \"Policy\": {\n# \"PolicyName\": \"MilvusS3ReadWrite\",\n# \"PolicyId\": \"AN5QQVVPM1BVTFlBNkdZT\",\n# \"Arn\": \"arn:aws:iam::12345678901:policy/MilvusS3ReadWrite\",\n# \"Path\": \"/\",\n# \"DefaultVersionId\": \"v1\",\n# \"AttachmentCount\": 0,\n# \"PermissionsBoundaryUsageCount\": 0,\n# \"IsAttachable\": true,\n# \"CreateDate\": \"2023-11-16T06:00:01+00:00\",\n# \"UpdateDate\": \"2023-11-16T06:00:01+00:00\"\n# }\n# } \n","eksctl create iamserviceaccount --name milvus-s3-access-sa --namespace milvus --cluster milvus-eks-cluster --role-name milvus-s3-access-sa \\\n --attach-policy-arn arn:aws:iam:::policy/MilvusS3ReadWrite --approve\n","aws iam get-role --role-name milvus-s3-access-sa --query Role.AssumeRolePolicyDocument\n# An example output is as follows\n{\n \"Version\": \"2012-10-17\",\n \"Statement\": [\n {\n \"Effect\": \"Allow\",\n \"Principal\": {\n \"Federated\": \"arn:aws:iam::111122223333:oidc-provider/oidc.eks.region-code.amazonaws.com/id/EXAMPLED539D4633E53DE1B71EXAMPLE\"\n },\n \"Action\": \"sts:AssumeRoleWithWebIdentity\",\n \"Condition\": {\n \"StringEquals\": {\n \"oidc.eks.region-code.amazonaws.com/id/EXAMPLED539D4633E53DE1B71EXAMPLE:sub\": \"system:serviceaccount:default:my-service-account\",\n \"oidc.eks.region-code.amazonaws.com/id/EXAMPLED539D4633E53DE1B71EXAMPLE:aud\": \"sts.amazonaws.com\"\n }\n }\n }\n ]\n}\n","aws iam list-attached-role-policies --role-name milvus-s3-access-sa --query 'AttachedPolicies[].PolicyArn' --output text\n# An example output is as follows\narn:aws:iam::12345678901:policy/MilvusS3ReadWrite\n","export policy_arn='arn:aws:iam::12345678901:policy/MilvusS3ReadWrite'\naws iam get-policy --policy-arn $policy_arn\n# An example output is as follows\n{\n \"Policy\": {\n \"PolicyName\": \"MilvusS3ReadWrite\",\n \"PolicyId\": \"EXAMPLEBIOWGLDEXAMPLE\",\n \"Arn\": \"arn:aws:iam::12345678901:policy/MilvusS3ReadWrite\",\n \"Path\": \"/\",\n \"DefaultVersionId\": \"v2\",\n [...]\n }\n}\n","aws iam get-policy-version --policy-arn $policy_arn --version-id v2\n# An example output is as follows\n{\n \"PolicyVersion\": {\n \"Document\": {\n \"Version\": \"2012-10-17\",\n \"Statement\": [\n {\n \"Effect\": \"Allow\",\n \"Action\": [\n \"s3:GetObject\",\n \"s3:PutObject\",\n \"s3:ListBucket\",\n \"s3:DeleteObject\"\n ],\n \"Resource\": [\n \"arn:aws:s3:::\",\n \"arn:aws:s3:::/*\"\n ]\n }\n ]\n },\n [...]\n }\n}\n","kubectl describe serviceaccount milvus-s3-access-sa -n milvus\n# An example output is as follows\nName: milvus-s3-access-sa\nNamespace: milvus\nLabels: app.kubernetes.io/managed-by=eksctl\nAnnotations: eks.amazonaws.com/role-arn: arn:aws:iam::12345678901:role/milvus-s3-access-sa\n[...]\n","helm repo add milvus https://zilliztech.github.io/milvus-helm/\nhelm repo update\n","cluster:\n enabled: true\n\nservice:\n type: LoadBalancer\n port: 19530\n annotations: \n service.beta.kubernetes.io/aws-load-balancer-type: external\n service.beta.kubernetes.io/aws-load-balancer-name: milvus-service\n service.beta.kubernetes.io/aws-load-balancer-scheme: internet-facing\n service.beta.kubernetes.io/aws-load-balancer-nlb-target-type: ip\n\nserviceAccount:\n create: false\n name: milvus-s3-access-sa\n\nminio:\n enabled: false\n\nexternalS3:\n enabled: true\n host: \"s3.us-east-2.amazonaws.com\"\n port: \"443\"\n useSSL: true\n bucketName: \"\"\n useIAM: true\n cloudProvider: \"aws\"\n iamEndpoint: \"\"\n\nrootCoordinator:\n replicas: 2\n activeStandby:\n enabled: true\n resources: \n limits:\n cpu: 1\n memory: 2Gi\n\nindexCoordinator:\n replicas: 2\n activeStandby:\n enabled: true\n resources: \n limits:\n cpu: \"0.5\"\n memory: 0.5Gi\n\nqueryCoordinator:\n replicas: 2\n activeStandby:\n enabled: true\n resources: \n limits:\n cpu: \"0.5\"\n memory: 0.5Gi\n\ndataCoordinator:\n replicas: 2\n activeStandby:\n enabled: true\n resources: \n limits:\n cpu: \"0.5\"\n memory: 0.5Gi\n\nproxy:\n replicas: 2\n resources: \n limits:\n cpu: 1\n memory: 2Gi \n","helm upgrade --install milvus-demo milvus/milvus -n milvus -f milvus.yaml\n"],"headingContent":"","anchorList":[{"label":"Configurar Acceso S3 por Rol IAM","href":"Configure-S3-Access-by-IAM-Role","type":1,"isActive":false},{"label":"Antes de comenzar","href":"Before-you-start","type":2,"isActive":false},{"label":"Asociar un rol de IAM con una cuenta de servicio de Kubernetes","href":"Associate-an-IAM-role-with-a-Kubernetes-service-account","type":2,"isActive":false},{"label":"Verifique la configuración de la función y la cuenta de servicio","href":"Verify-the-role-and-service-account-setup","type":2,"isActive":false},{"label":"Despliegue de Milvus","href":"Deploy-Milvus","type":2,"isActive":false},{"label":"Verifique la instalación","href":"Verify-the-installation","type":2,"isActive":false}]} \ No newline at end of file +{"codeList":["milvus_bucket_name=\"milvus-bucket-$(openssl rand -hex 12)\"\n\naws s3api create-bucket --bucket \"$milvus_bucket_name\" --region 'us-east-2' --acl private --object-ownership ObjectWriter --create-bucket-configuration LocationConstraint='us-east-2'\n\n\n# Output\n#\n# \"Location\": \"http://milvus-bucket-039dd013c0712f085d60e21f.s3.amazonaws.com/\"\n","echo '{\n \"Version\": \"2012-10-17\",\n \"Statement\": [\n {\n \"Effect\": \"Allow\",\n \"Action\": [\n \"s3:ListBucket\"\n ],\n \"Resource\": [\n \"arn:aws:s3:::\"\n ]\n },\n {\n \"Effect\": \"Allow\",\n \"Action\": [\n \"s3:DeleteObject\",\n \"s3:GetObject\",\n \"s3:PutObject\"\n ],\n \"Resource\": [\n \"arn:aws:s3:::/*\"\n ]\n }\n ]\n}' > milvus-s3-policy.json\n\naws iam create-policy --policy-name MilvusS3ReadWrite --policy-document file://milvus-s3-policy.json\n\n\n# Get the ARN from the command output as follows:\n# {\n# \"Policy\": {\n# \"PolicyName\": \"MilvusS3ReadWrite\",\n# \"PolicyId\": \"AN5QQVVPM1BVTFlBNkdZT\",\n# \"Arn\": \"arn:aws:iam::12345678901:policy/MilvusS3ReadWrite\",\n# \"Path\": \"/\",\n# \"DefaultVersionId\": \"v1\",\n# \"AttachmentCount\": 0,\n# \"PermissionsBoundaryUsageCount\": 0,\n# \"IsAttachable\": true,\n# \"CreateDate\": \"2023-11-16T06:00:01+00:00\",\n# \"UpdateDate\": \"2023-11-16T06:00:01+00:00\"\n# }\n# } \n","eksctl create iamserviceaccount --name milvus-s3-access-sa --namespace milvus --cluster milvus-eks-cluster --role-name milvus-s3-access-sa \\\n --attach-policy-arn arn:aws:iam:::policy/MilvusS3ReadWrite --approve\n","aws iam get-role --role-name milvus-s3-access-sa --query Role.AssumeRolePolicyDocument\n# An example output is as follows\n{\n \"Version\": \"2012-10-17\",\n \"Statement\": [\n {\n \"Effect\": \"Allow\",\n \"Principal\": {\n \"Federated\": \"arn:aws:iam::111122223333:oidc-provider/oidc.eks.region-code.amazonaws.com/id/EXAMPLED539D4633E53DE1B71EXAMPLE\"\n },\n \"Action\": \"sts:AssumeRoleWithWebIdentity\",\n \"Condition\": {\n \"StringEquals\": {\n \"oidc.eks.region-code.amazonaws.com/id/EXAMPLED539D4633E53DE1B71EXAMPLE:sub\": \"system:serviceaccount:default:my-service-account\",\n \"oidc.eks.region-code.amazonaws.com/id/EXAMPLED539D4633E53DE1B71EXAMPLE:aud\": \"sts.amazonaws.com\"\n }\n }\n }\n ]\n}\n","aws iam list-attached-role-policies --role-name milvus-s3-access-sa --query 'AttachedPolicies[].PolicyArn' --output text\n# An example output is as follows\narn:aws:iam::12345678901:policy/MilvusS3ReadWrite\n","export policy_arn='arn:aws:iam::12345678901:policy/MilvusS3ReadWrite'\naws iam get-policy --policy-arn $policy_arn\n# An example output is as follows\n{\n \"Policy\": {\n \"PolicyName\": \"MilvusS3ReadWrite\",\n \"PolicyId\": \"EXAMPLEBIOWGLDEXAMPLE\",\n \"Arn\": \"arn:aws:iam::12345678901:policy/MilvusS3ReadWrite\",\n \"Path\": \"/\",\n \"DefaultVersionId\": \"v2\",\n [...]\n }\n}\n","aws iam get-policy-version --policy-arn $policy_arn --version-id v2\n# An example output is as follows\n{\n \"PolicyVersion\": {\n \"Document\": {\n \"Version\": \"2012-10-17\",\n \"Statement\": [\n {\n \"Effect\": \"Allow\",\n \"Action\": [\n \"s3:GetObject\",\n \"s3:PutObject\",\n \"s3:ListBucket\",\n \"s3:DeleteObject\"\n ],\n \"Resource\": [\n \"arn:aws:s3:::\",\n \"arn:aws:s3:::/*\"\n ]\n }\n ]\n },\n [...]\n }\n}\n","kubectl describe serviceaccount milvus-s3-access-sa -n milvus\n# An example output is as follows\nName: milvus-s3-access-sa\nNamespace: milvus\nLabels: app.kubernetes.io/managed-by=eksctl\nAnnotations: eks.amazonaws.com/role-arn: arn:aws:iam::12345678901:role/milvus-s3-access-sa\n[...]\n","helm repo add milvus https://zilliztech.github.io/milvus-helm/\nhelm repo update\n","cluster:\n enabled: true\n\nservice:\n type: LoadBalancer\n port: 19530\n annotations: \n service.beta.kubernetes.io/aws-load-balancer-type: external\n service.beta.kubernetes.io/aws-load-balancer-name: milvus-service\n service.beta.kubernetes.io/aws-load-balancer-scheme: internet-facing\n service.beta.kubernetes.io/aws-load-balancer-nlb-target-type: ip\n\nserviceAccount:\n create: false\n name: milvus-s3-access-sa\n\nminio:\n enabled: false\n\nexternalS3:\n enabled: true\n host: \"s3.us-east-2.amazonaws.com\"\n port: \"443\"\n useSSL: true\n bucketName: \"\"\n useIAM: true\n cloudProvider: \"aws\"\n iamEndpoint: \"\"\n\nrootCoordinator:\n replicas: 2\n activeStandby:\n enabled: true\n resources: \n limits:\n cpu: 1\n memory: 2Gi\n\nindexCoordinator:\n replicas: 2\n activeStandby:\n enabled: true\n resources: \n limits:\n cpu: \"0.5\"\n memory: 0.5Gi\n\nqueryCoordinator:\n replicas: 2\n activeStandby:\n enabled: true\n resources: \n limits:\n cpu: \"0.5\"\n memory: 0.5Gi\n\ndataCoordinator:\n replicas: 2\n activeStandby:\n enabled: true\n resources: \n limits:\n cpu: \"0.5\"\n memory: 0.5Gi\n\nproxy:\n replicas: 2\n resources: \n limits:\n cpu: 1\n memory: 2Gi \n","helm upgrade --install milvus-demo milvus/milvus -n milvus -f milvus.yaml\n"],"headingContent":"Configure S3 Access by IAM Role","anchorList":[{"label":"Configurar Acceso S3 por Rol IAM","href":"Configure-S3-Access-by-IAM-Role","type":1,"isActive":false},{"label":"Antes de comenzar","href":"Before-you-start","type":2,"isActive":false},{"label":"Asociar un rol de IAM con una cuenta de servicio de Kubernetes","href":"Associate-an-IAM-role-with-a-Kubernetes-service-account","type":2,"isActive":false},{"label":"Verifique la configuración de la función y la cuenta de servicio","href":"Verify-the-role-and-service-account-setup","type":2,"isActive":false},{"label":"Despliegue de Milvus","href":"Deploy-Milvus","type":2,"isActive":false},{"label":"Verifique la instalación","href":"Verify-the-installation","type":2,"isActive":false}]} \ No newline at end of file diff --git a/localization/v2.4.x/site/es/adminGuide/clouds/aws/s3.md b/localization/v2.4.x/site/es/adminGuide/clouds/aws/s3.md index b149cb158..5dfc7a9d5 100644 --- a/localization/v2.4.x/site/es/adminGuide/clouds/aws/s3.md +++ b/localization/v2.4.x/site/es/adminGuide/clouds/aws/s3.md @@ -19,7 +19,7 @@ summary: Aprenda a configurar s3 con IAM Role. d="M4 9h1v1H4c-1.5 0-3-1.69-3-3.5S2.55 3 4 3h4c1.45 0 3 1.69 3 3.5 0 1.41-.91 2.72-2 3.25V8.59c.58-.45 1-1.27 1-2.09C10 5.22 8.98 4 8 4H4c-.98 0-2 1.22-2 2.5S3 9 4 9zm9-3h-1v1h1c1 0 2 1.22 2 2.5S13.98 12 13 12H9c-.98 0-2-1.22-2-2.5 0-.83.42-1.64 1-2.09V6.25c-1.09.53-2 1.84-2 3.25C6 11.31 7.55 13 9 13h4c1.45 0 3-1.69 3-3.5S14.5 6 13 6z" > -

      Este tema presenta cómo configurar el acceso s3 por Rol IAM cuando instala Milvus con helm. Para más detalles, refiérase a Roles IAM.

      +

      Este tema presenta cómo configurar el acceso s3 por Rol IAM cuando instala Milvus con helm. Para más detalles, refiérase a Roles IAM.

      Antes de comenzar

      Consulte Roles de IAM.

      +

      Consulte Roles de IAM.

      • Confirme que la política de confianza del rol IAM está configurada correctamente.
      diff --git a/localization/v2.4.x/site/es/adminGuide/clouds/openshift/openshift.json b/localization/v2.4.x/site/es/adminGuide/clouds/openshift/openshift.json index d313d4b07..dec4e83af 100644 --- a/localization/v2.4.x/site/es/adminGuide/clouds/openshift/openshift.json +++ b/localization/v2.4.x/site/es/adminGuide/clouds/openshift/openshift.json @@ -1 +1 @@ -{"codeList":["# milvus-operator-certificate.yaml\napiVersion: cert-manager.io/v1\nkind: Certificate\nmetadata:\nname: milvus-operator-serving-cert\nnamespace: milvus-operator\nspec:\ndnsNames:\n- milvus-operator-webhook-service.milvus-operator.svc\n- milvus-operator-webhook-service.milvus-operator.svc.cluster.local\nissuerRef:\n kind: Issuer\n name: milvus-operator-selfsigned-issuer\nsecretName: milvus-operator-webhook-cert\n---\napiVersion: cert-manager.io/v1\nkind: Issuer\nmetadata:\nname: milvus-operator-selfsigned-issuer\nnamespace: milvus-operator\nspec:\nselfSigned: {}\n","kubectl apply -f milvus-operator-certificate.yaml\n","helm repo add milvus-operator https://zilliztech.github.io/milvus-operator/\nhelm repo update milvus-operator\n","helm -n milvus-operator upgrade --install --create-namespace milvus-operator milvus-operator/milvus-operator\n"],"headingContent":"","anchorList":[{"label":"Desplegar un clúster Milvus en OpenShift","href":"Deploy-a-Milvus-Cluster-on-OpenShift","type":1,"isActive":false},{"label":"Requisitos previos","href":"Prerequisites","type":2,"isActive":false},{"label":"Paso 1: Instalar Cert Manager","href":"Step-1-Install-Cert-Manager","type":2,"isActive":false},{"label":"Paso 2: Emitir un certificado autofirmado para Milvus Operator","href":"Step-2-Issue-a-Self-Signed-Certificate-for-Milvus-Operator","type":2,"isActive":false},{"label":"Paso 3: Instalar Milvus Operator","href":"Step-3-Install-Milvus-Operator","type":2,"isActive":false},{"label":"Paso 4: Desplegar Milvus","href":"Step-4-Deploy-Milvus","type":2,"isActive":false},{"label":"Lo que sigue","href":"Whats-Next","type":2,"isActive":false}]} \ No newline at end of file +{"codeList":["# milvus-operator-certificate.yaml\napiVersion: cert-manager.io/v1\nkind: Certificate\nmetadata:\n name: milvus-operator-serving-cert\n namespace: milvus-operator\nspec:\n dnsNames:\n - milvus-operator-webhook-service.milvus-operator.svc\n - milvus-operator-webhook-service.milvus-operator.svc.cluster.local\n issuerRef:\n kind: Issuer\n name: milvus-operator-selfsigned-issuer\n secretName: milvus-operator-webhook-cert\n---\napiVersion: cert-manager.io/v1\nkind: Issuer\nmetadata:\n name: milvus-operator-selfsigned-issuer\n namespace: milvus-operator\nspec:\n selfSigned: {}\n","kubectl apply -f milvus-operator-certificate.yaml\n","helm repo add milvus-operator https://zilliztech.github.io/milvus-operator/\nhelm repo update milvus-operator\n","helm -n milvus-operator upgrade --install --create-namespace milvus-operator milvus-operator/milvus-operator\n"],"headingContent":"Deploy a Milvus Cluster on OpenShift","anchorList":[{"label":"Desplegar un clúster Milvus en OpenShift","href":"Deploy-a-Milvus-Cluster-on-OpenShift","type":1,"isActive":false},{"label":"Requisitos previos","href":"Prerequisites","type":2,"isActive":false},{"label":"Paso 1: Instalar Cert Manager","href":"Step-1-Install-Cert-Manager","type":2,"isActive":false},{"label":"Paso 2: Emitir un certificado autofirmado para Milvus Operator","href":"Step-2-Issue-a-Self-Signed-Certificate-for-Milvus-Operator","type":2,"isActive":false},{"label":"Paso 3: Instalar Milvus Operator","href":"Step-3-Install-Milvus-Operator","type":2,"isActive":false},{"label":"Paso 4: Desplegar Milvus","href":"Step-4-Deploy-Milvus","type":2,"isActive":false},{"label":"Lo que sigue","href":"Whats-Next","type":2,"isActive":false}]} \ No newline at end of file diff --git a/localization/v2.4.x/site/es/adminGuide/clouds/openshift/openshift.md b/localization/v2.4.x/site/es/adminGuide/clouds/openshift/openshift.md index 8ead9e131..fddc146b8 100644 --- a/localization/v2.4.x/site/es/adminGuide/clouds/openshift/openshift.md +++ b/localization/v2.4.x/site/es/adminGuide/clouds/openshift/openshift.md @@ -1,6 +1,6 @@ --- id: openshift.md -title: Despliegue de un clúster Milvus en OpenShift +title: Desplegar un clúster Milvus en OpenShift related_key: cluster summary: Aprenda a desplegar un clúster Milvus en OpenShift. --- @@ -96,24 +96,24 @@ summary: Aprenda a desplegar un clúster Milvus en OpenShift. apiVersion: cert-manager.io/v1 kind: Certificate metadata: -name: milvus-operator-serving-cert -namespace: milvus-operator + name: milvus-operator-serving-cert + namespace: milvus-operator spec: -dnsNames: -- milvus-operator-webhook-service.milvus-operator.svc -- milvus-operator-webhook-service.milvus-operator.svc.cluster.local -issuerRef: + dnsNames: + - milvus-operator-webhook-service.milvus-operator.svc + - milvus-operator-webhook-service.milvus-operator.svc.cluster.local + issuerRef: kind: Issuer name: milvus-operator-selfsigned-issuer -secretName: milvus-operator-webhook-cert + secretName: milvus-operator-webhook-cert --- apiVersion: cert-manager.io/v1 kind: Issuer metadata: -name: milvus-operator-selfsigned-issuer -namespace: milvus-operator + name: milvus-operator-selfsigned-issuer + namespace: milvus-operator spec: -selfSigned: {} + selfSigned: {}
  • Aplique el archivo:

    kubectl apply -f milvus-operator-certificate.yaml
    diff --git a/localization/v2.4.x/site/es/adminGuide/configure-docker.json b/localization/v2.4.x/site/es/adminGuide/configure-docker.json
    index 86559fcfd..4d47804dc 100644
    --- a/localization/v2.4.x/site/es/adminGuide/configure-docker.json
    +++ b/localization/v2.4.x/site/es/adminGuide/configure-docker.json
    @@ -1 +1 @@
    -{"codeList":["$ wget https://raw.githubusercontent.com/milvus-io/milvus/v2.4.9/configs/milvus.yaml\n","# For Milvus standalone\n$ wget https://github.com/milvus-io/milvus/releases/download/v2.4.9/milvus-standalone-docker-compose.yml -O docker-compose.yml\n","...\n  standalone:\n    container_name: milvus-standalone\n    image: milvusdb/milvus:v2.2.13\n    command: [\"milvus\", \"run\", \"standalone\"]\n    environment:\n      ETCD_ENDPOINTS: etcd:2379\n      MINIO_ADDRESS: minio:9000\n    volumes:\n      - /local/path/to/your/milvus.yaml:/milvus/configs/milvus.yaml   # Map the local path to the container path\n      - ${DOCKER_VOLUME_DIRECTORY:-.}/volumes/milvus:/var/lib/milvus\n    ports:\n      - \"19530:19530\"\n      - \"9091:9091\"\n    depends_on:\n      - \"etcd\"\n      - \"minio\"\n...\n","$ sudo docker compose up -d\n"],"headingContent":"","anchorList":[{"label":"Configurar Milvus con Docker Compose","href":"Configure-Milvus-with-Docker-Compose","type":1,"isActive":false},{"label":"Descargar un archivo de configuración","href":"Download-a-configuration-file","type":2,"isActive":false},{"label":"Modifique el archivo de configuración","href":"Modify-the-configuration-file","type":2,"isActive":false},{"label":"Descargar un archivo de instalación","href":"Download-an-installation-file","type":2,"isActive":false},{"label":"Modifique el archivo de instalación","href":"Modify-the-installation-file","type":2,"isActive":false},{"label":"Iniciar Milvus","href":"Start-Milvus","type":2,"isActive":false},{"label":"A continuación","href":"Whats-next","type":2,"isActive":false}]}
    \ No newline at end of file
    +{"codeList":["$ wget https://raw.githubusercontent.com/milvus-io/milvus/v2.4.13-hotfix/configs/milvus.yaml\n","# For Milvus standalone\n$ wget https://github.com/milvus-io/milvus/releases/download/v2.4.13-hotfix/milvus-standalone-docker-compose.yml -O docker-compose.yml\n","...\n  standalone:\n    container_name: milvus-standalone\n    image: milvusdb/milvus:v2.2.13\n    command: [\"milvus\", \"run\", \"standalone\"]\n    environment:\n      ETCD_ENDPOINTS: etcd:2379\n      MINIO_ADDRESS: minio:9000\n    volumes:\n      - /local/path/to/your/milvus.yaml:/milvus/configs/milvus.yaml   # Map the local path to the container path\n      - ${DOCKER_VOLUME_DIRECTORY:-.}/volumes/milvus:/var/lib/milvus\n    ports:\n      - \"19530:19530\"\n      - \"9091:9091\"\n    depends_on:\n      - \"etcd\"\n      - \"minio\"\n...\n","$ sudo docker compose up -d\n"],"headingContent":"Configure Milvus with Docker Compose","anchorList":[{"label":"Configurar Milvus con Docker Compose","href":"Configure-Milvus-with-Docker-Compose","type":1,"isActive":false},{"label":"Descargar un archivo de configuración","href":"Download-a-configuration-file","type":2,"isActive":false},{"label":"Modifique el archivo de configuración","href":"Modify-the-configuration-file","type":2,"isActive":false},{"label":"Descargar un archivo de instalación","href":"Download-an-installation-file","type":2,"isActive":false},{"label":"Modifique el archivo de instalación","href":"Modify-the-installation-file","type":2,"isActive":false},{"label":"Iniciar Milvus","href":"Start-Milvus","type":2,"isActive":false},{"label":"A continuación","href":"Whats-next","type":2,"isActive":false}]}
    \ No newline at end of file
    diff --git a/localization/v2.4.x/site/es/adminGuide/configure-docker.md b/localization/v2.4.x/site/es/adminGuide/configure-docker.md
    index 7e7686f18..29651cfce 100644
    --- a/localization/v2.4.x/site/es/adminGuide/configure-docker.md
    +++ b/localization/v2.4.x/site/es/adminGuide/configure-docker.md
    @@ -38,8 +38,8 @@ En la versión actual, todos los parámetros tienen efecto sólo después de rei
               d="M4 9h1v1H4c-1.5 0-3-1.69-3-3.5S2.55 3 4 3h4c1.45 0 3 1.69 3 3.5 0 1.41-.91 2.72-2 3.25V8.59c.58-.45 1-1.27 1-2.09C10 5.22 8.98 4 8 4H4c-.98 0-2 1.22-2 2.5S3 9 4 9zm9-3h-1v1h1c1 0 2 1.22 2 2.5S13.98 12 13 12H9c-.98 0-2-1.22-2-2.5 0-.83.42-1.64 1-2.09V6.25c-1.09.53-2 1.84-2 3.25C6 11.31 7.55 13 9 13h4c1.45 0 3-1.69 3-3.5S14.5 6 13 6z"
             >
           
    -    

    Descargue milvus.yaml directamente o con el siguiente comando.

    -
    $ wget https://raw.githubusercontent.com/milvus-io/milvus/v2.4.9/configs/milvus.yaml
    +    

    Descargue milvus.yaml directamente o con el siguiente comando.

    +
    $ wget https://raw.githubusercontent.com/milvus-io/milvus/v2.4.13-hotfix/configs/milvus.yaml
     

    Modifique el archivo de configuración

    Descargue el archivo de instalación de Milvus standalone y guárdelo como docker-compose.yml.

    +

    Descargue el archivo de instalación de Milvus standalone y guárdelo como docker-compose.yml.

    También puede ejecutar el siguiente comando.

    # For Milvus standalone
    -$ wget https://github.com/milvus-io/milvus/releases/download/v2.4.9/milvus-standalone-docker-compose.yml -O docker-compose.yml
    +$ wget https://github.com/milvus-io/milvus/releases/download/v2.4.13-hotfix/milvus-standalone-docker-compose.yml -O docker-compose.yml
     

    Modifique el archivo de instalación

    Para configurar el QueryNode de Milvus Distributed para utilizar el almacenamiento en disco NVMe, debe configurar los nodos trabajadores de los clústeres Kubernetes de destino para almacenar los contenedores y las imágenes en un disco NVMe. El procedimiento para ello varía en función de los proveedores de nube.

    +

    Para configurar el QueryNode de Milvus Distributed para que utilice almacenamiento en disco NVMe, debe configurar los nodos trabajadores de los clústeres Kubernetes de destino para que almacenen los contenedores y las imágenes en un disco NVMe. El procedimiento para ello varía en función de los proveedores de nube.

    AWS

    Al utilizar Amazon EKS, puede personalizar los nodos administrados con plantillas de lanzamiento, en las que puede especificar los ajustes de configuración para sus grupos de nodos. A continuación se muestra un ejemplo de cómo montar un disco NVMe en los nodos trabajadores de su clúster de Amazon EKS:

    MIME-Version: 1.0
     Content-Type: multipart/mixed; boundary="==MYBOUNDARY=="
    @@ -215,7 +215,7 @@ apt-get install fio -y
     cd /data
     
     # write 10GB
    -fio -direct=1-iodepth=128 -rw=randwrite -ioengine=libaio -bs=4K -size=10G -numjobs=10 -runtime=600 -group_reporting -filename=test -name=Rand_Write_IOPS_Test
    +fio -direct=1 -iodepth=128 -rw=randwrite -ioengine=libaio -bs=4K -size=10G -numjobs=10 -runtime=600 -group_reporting -filename=test -name=Rand_Write_IOPS_Test
     
     # verify the read speed
     # compare with the disk performance indicators provided by various cloud providers.
    diff --git a/localization/v2.4.x/site/es/adminGuide/limit_collection_counts.json b/localization/v2.4.x/site/es/adminGuide/limit_collection_counts.json
    index 97ad7e9ad..f8f357d4c 100644
    --- a/localization/v2.4.x/site/es/adminGuide/limit_collection_counts.json
    +++ b/localization/v2.4.x/site/es/adminGuide/limit_collection_counts.json
    @@ -1 +1 @@
    -{"codeList":["rootCoord:\n    maxGeneralCapacity: 1024\n","60 (collections) x 2 (shards) x 4 (partitions) + 40 (collections) x 1 (shard) x 12 (partitions) = 960\n","failed checking constraint: sum_collections(parition*shard) exceeding the max general capacity:\n"],"headingContent":"","anchorList":[{"label":"Limitar el número de colecciones","href":"Limit-Collection-Counts","type":1,"isActive":false},{"label":"Opciones de configuración","href":"Configuration-options","type":2,"isActive":false},{"label":"Cálculo del número de colecciones","href":"Calculating-the-number-of-collections","type":2,"isActive":false}]}
    \ No newline at end of file
    +{"codeList":["rootCoord:\n    maxGeneralCapacity: 65536\n","60 (collections) x 2 (shards) x 4 (partitions) + 40 (collections) x 1 (shard) x 12 (partitions) = 960\n","failed checking constraint: sum_collections(parition*shard) exceeding the max general capacity:\n"],"headingContent":"Limit Collection Counts","anchorList":[{"label":"Limitar el número de colecciones","href":"Limit-Collection-Counts","type":1,"isActive":false},{"label":"Opciones de configuración","href":"Configuration-options","type":2,"isActive":false},{"label":"Cálculo del número de colecciones","href":"Calculating-the-number-of-collections","type":2,"isActive":false}]}
    \ No newline at end of file
    diff --git a/localization/v2.4.x/site/es/adminGuide/limit_collection_counts.md b/localization/v2.4.x/site/es/adminGuide/limit_collection_counts.md
    index dcf3f8f0f..1bc46a0e0 100644
    --- a/localization/v2.4.x/site/es/adminGuide/limit_collection_counts.md
    +++ b/localization/v2.4.x/site/es/adminGuide/limit_collection_counts.md
    @@ -1,7 +1,6 @@
     ---
     id: limit_collection_counts.md
     title: Limitar el número de cobros
    -summary: ''
     ---
     

    Limitar el número de colecciones

    rootCoord:
    -    maxGeneralCapacity: 1024
    +    maxGeneralCapacity: 65536
     
    -

    El parámetro maxGeneralCapacity establece el número máximo de colecciones que puede contener la instancia Milvus actual. El valor por defecto es 1024.

    +

    El parámetro maxGeneralCapacity establece el número máximo de colecciones que puede contener la instancia Milvus actual. El valor por defecto es 65536.

    Cálculo del número de colecciones

    En una colección, puede configurar múltiples shards y particiones. Los shards son unidades lógicas utilizadas para distribuir las operaciones de escritura de datos entre múltiples nodos de datos. Las particiones son unidades lógicas utilizadas para mejorar la eficiencia de la recuperación de datos cargando sólo un subconjunto de los datos de la colección. Al calcular el número de colecciones en la instancia actual de Milvus, también debe contar los fragmentos y las particiones.

    +

    En una colección, puede configurar múltiples shards y particiones. Los fragmentos son unidades lógicas utilizadas para distribuir las operaciones de escritura de datos entre múltiples nodos de datos. Las particiones son unidades lógicas utilizadas para mejorar la eficiencia de la recuperación de datos cargando sólo un subconjunto de los datos de la colección. Al calcular el número de colecciones en la instancia actual de Milvus, también debe contar los fragmentos y las particiones.

    Por ejemplo, supongamos que ya ha creado 100 colecciones, con 2 fragmentos y 4 particiones en 60 de ellas y con 1 fragmento y 12 particiones en las 40 colecciones restantes. El número actual de colecciones se puede calcular como:

    60 (collections) x 2 (shards) x 4 (partitions) + 40 (collections) x 1 (shard) x 12 (partitions) = 960
     
    diff --git a/localization/v2.4.x/site/es/adminGuide/rbac.json b/localization/v2.4.x/site/es/adminGuide/rbac.json index ab939ad05..470c69eb9 100644 --- a/localization/v2.4.x/site/es/adminGuide/rbac.json +++ b/localization/v2.4.x/site/es/adminGuide/rbac.json @@ -1 +1 @@ -{"codeList":["from pymilvus import MilvusClient\n\nclient = MilvusClient(\n uri='http://localhost:19530', # replace with your own Milvus server address\n token='root:Milvus' # replace with your own Milvus server token\n)\n","client.create_user(\n user_name='user_1',\n password='P@ssw0rd'\n)\n","client.update_password(\n user_name='user_1',\n old_password='P@ssw0rd',\n new_password='P@ssw0rd123'\n)\n","client.list_users()\n\n# output:\n# ['root', 'user_1']\n","client.describe_user(user_name='user_1')\n\n# output:\n# {'user_name': 'user_1', 'roles': ()}\n","client.create_role(\n role_name=\"roleA\",\n)\n","client.list_roles()\n\n# output:\n# ['admin', 'public', 'roleA']\n","# grant privilege to a role\n\nclient.grant_privilege(\n role_name='roleA',\n object_type='User', # value here can be Global, Collection or User, object type also depends on the API defined in privilegeName\n object_name='user_1', # value here can be * or a specific user name if object type is 'User'\n privilege='SelectUser'\n)\n","client.describe_role(\n role_name='roleA'\n)\n\n# output:\n# {'role': 'roleA',\n# 'privileges': [{'object_type': 'User',\n# 'object_name': 'user_1',\n# 'db_name': 'default',\n# 'role_name': 'roleA',\n# 'privilege': 'SelectUser',\n# 'grantor_name': 'root'}]}\n","# grant a role to a user\n\nclient.grant_role(\n user_name='user_1',\n role_name='roleA'\n)\n","client.describe_user(\n user_name='user_1'\n)\n\n# output:\n# {'user_name': 'user_1', 'roles': ('roleA')}\n","client.revoke_privilege(\n role_name='roleA',\n object_type='User', # value here can be Global, Collection or User, object type also depends on the API defined in privilegeName\n object_name='user_1', # value here can be * or a specific user name if object type is 'User'\n privilege='SelectUser'\n)\n","client.revoke_role(\n user_name='user_1',\n role_name='roleA'\n)\n","client.drop_role(role_name='roleA')\n","client.drop_user(user_name='user_1')\n"],"headingContent":"","anchorList":[{"label":"Habilitar RBAC","href":"Enable-RBAC","type":1,"isActive":false},{"label":"1. Inicie un cliente Milvus para establecer una conexión","href":"1-Initiate-a-Milvus-client-to-establish-a-connection","type":2,"isActive":false},{"label":"2. Cree un usuario","href":"2-Create-a-user","type":2,"isActive":false},{"label":"3. Crear un rol","href":"3-Create-a-role","type":2,"isActive":false},{"label":"4. 4. Conceder un privilegio a un rol","href":"4-Grant-a-privilege-to-a-role","type":2,"isActive":false},{"label":"5. Conceder un rol a un usuario","href":"5-Grant-a-role-to-a-user","type":2,"isActive":false},{"label":"6. Revocar privilegios","href":"6-Revoke-privileges","type":2,"isActive":false},{"label":"A continuación","href":"Whats-next","type":2,"isActive":false}]} \ No newline at end of file +{"codeList":["from pymilvus import MilvusClient\n\nclient = MilvusClient(\n uri='http://localhost:19530', # replace with your own Milvus server address\n token='root:Milvus' # replace with your own Milvus server token\n)\n","client.create_user(\n user_name='user_1',\n password='P@ssw0rd'\n)\n","client.update_password(\n user_name='user_1',\n old_password='P@ssw0rd',\n new_password='P@ssw0rd123'\n)\n","client.list_users()\n\n# output:\n# ['root', 'user_1']\n","client.describe_user(user_name='user_1')\n\n# output:\n# {'user_name': 'user_1', 'roles': ()}\n","client.create_role(\n role_name=\"roleA\",\n)\n","client.list_roles()\n\n# output:\n# ['admin', 'public', 'roleA']\n","# grant privilege to a role\n\nclient.grant_privilege(\n role_name='roleA',\n object_type='User', # value here can be Global, Collection or User, object type also depends on the API defined in privilegeName\n object_name='user_1', # value here can be * or a specific user name if object type is 'User'\n privilege='SelectUser'\n)\n","client.describe_role(\n role_name='roleA'\n)\n\n# output:\n# {'role': 'roleA',\n# 'privileges': [{'object_type': 'User',\n# 'object_name': 'user_1',\n# 'db_name': 'default',\n# 'role_name': 'roleA',\n# 'privilege': 'SelectUser',\n# 'grantor_name': 'root'}]}\n","# grant a role to a user\n\nclient.grant_role(\n user_name='user_1',\n role_name='roleA'\n)\n","client.describe_user(\n user_name='user_1'\n)\n\n# output:\n# {'user_name': 'user_1', 'roles': ('roleA')}\n","client.revoke_privilege(\n role_name='roleA',\n object_type='User', # value here can be Global, Collection or User, object type also depends on the API defined in privilegeName\n object_name='user_1', # value here can be * or a specific user name if object type is 'User'\n privilege='SelectUser'\n)\n","client.revoke_role(\n user_name='user_1',\n role_name='roleA'\n)\n","client.drop_role(role_name='roleA')\n","client.drop_user(user_name='user_1')\n"],"headingContent":"Enable RBAC","anchorList":[{"label":"Habilitar RBAC","href":"Enable-RBAC","type":1,"isActive":false},{"label":"1. Inicie un cliente Milvus para establecer una conexión","href":"1-Initiate-a-Milvus-client-to-establish-a-connection","type":2,"isActive":false},{"label":"2. Cree un usuario","href":"2-Create-a-user","type":2,"isActive":false},{"label":"3. Crear un rol","href":"3-Create-a-role","type":2,"isActive":false},{"label":"4. 4. Conceder un privilegio a un rol","href":"4-Grant-a-privilege-to-a-role","type":2,"isActive":false},{"label":"5. Conceder un rol a un usuario","href":"5-Grant-a-role-to-a-user","type":2,"isActive":false},{"label":"6. Revocar privilegios","href":"6-Revoke-privileges","type":2,"isActive":false},{"label":"A continuación","href":"Whats-next","type":2,"isActive":false}]} \ No newline at end of file diff --git a/localization/v2.4.x/site/es/adminGuide/rbac.md b/localization/v2.4.x/site/es/adminGuide/rbac.md index 8f446a8f5..0f50a32bc 100644 --- a/localization/v2.4.x/site/es/adminGuide/rbac.md +++ b/localization/v2.4.x/site/es/adminGuide/rbac.md @@ -2,7 +2,7 @@ id: rbac.md related_key: enable RBAC summary: 'Aprenda a gestionar usuarios, funciones y privilegios.' -title: Activar RBAC +title: Habilitar RBAC ---

    Habilitar RBAC

    Habilitando RBAC, puede controlar el acceso a recursos específicos de Milvus (Ej. una colección o una partición) o permisos basados en el rol y privilegios del usuario. Actualmente, esta característica sólo está disponible en Python y Java.

    Este tema describe cómo habilitar RBAC y gestionar usuarios y roles.

    -

    Los fragmentos de código de esta página utilizan el nuevo MilvusClient (Python) para interactuar con Milvus. En futuras actualizaciones se publicarán nuevos SDK de MilvusClient para otros lenguajes.

    +

    Los fragmentos de código de esta página utilizan el nuevo MilvusClient (Python) para interactuar con Milvus. En futuras actualizaciones se publicarán nuevos SDK de MilvusClient para otros lenguajes.

    1. Inicie un cliente Milvus para establecer una conexión

    -

    Todos los ejemplos de código de esta página están en PyMilvus 2.4.5. Actualiza tu instalación de PyMilvus antes de ejecutarlos.

    +

    Todos los ejemplos de código de esta página están en PyMilvus 2.4.8. Actualiza tu instalación de PyMilvus antes de ejecutarlos.

    1. Crear un grupo de recursos.

      @@ -183,7 +183,7 @@ partition.load(replica_number=2, _resource_grou

      Tenga en cuenta que _resource_groups es un parámetro opcional, y dejándolo sin especificar Milvus cargará las réplicas en los nodos de consulta en el grupo de recursos por defecto.

      Para que Milus cargue cada réplica de una colección en un grupo de recursos separado, asegúrese de que el número de grupos de recursos es igual al número de réplicas.

    2. Transferir réplicas entre grupos de recursos.

      -

      Milvus utiliza réplicas para lograr el equilibrio de carga entre segmentos distribuidos en varios nodos de consulta. Puede mover ciertas réplicas de una colección de un grupo de recursos a otro de la siguiente manera:

      +

      Milvus utiliza réplicas para equilibrar la carga entre segmentos distribuidos en varios nodos de consulta. Puede mover ciertas réplicas de una colección de un grupo de recursos a otro de la siguiente manera:

      source = '__default_resource_group'
       target = 'rg'
       collection_name = 'c'
      @@ -212,7 +212,7 @@ num_replicas = 1
           print(f"Something went wrong while dropping {source}.")
       
    -

    Para más detalles, consulte los ejemplos relevantes en pymilvus

    +

    Para obtener más información, consulte los ejemplos correspondientes en pymilvus.

    Una buena práctica para gestionar el escalado del cluster

    Actualmente, Milvus no puede escalar independientemente en entornos nativos de la nube. Sin embargo, mediante el uso de la API Declarative Resource Group junto con la orquestación de contenedores, Milvus puede lograr fácilmente el aislamiento y la gestión de recursos para QueryNodes. He aquí una buena práctica para la gestión de QueryNodes en un entorno de nube:

      -
    1. Por defecto, Milvus crea un __default_resource_group. Este grupo de recursos no se puede eliminar y también sirve como grupo de recursos de carga por defecto para todas las colecciones y los QueryNodes redundantes siempre se asignan a él. Por lo tanto, podemos crear un grupo de recursos pendiente para mantener los recursos QueryNode no utilizados, evitando que los recursos QueryNode sean ocupados por el __default_resource_group.

      +
    2. Por defecto, Milvus crea un __default_resource_group. Este grupo de recursos no se puede eliminar y también sirve como grupo de recursos de carga por defecto para todas las colecciones y los QueryNodes redundantes siempre se asignan a él. Por lo tanto, podemos crear un grupo de recursos pendiente para retener los recursos QueryNode no utilizados, evitando que los recursos QueryNode sean ocupados por el __default_resource_group.

      Además, si aplicamos estrictamente la restricción sum(.requests.nodeNum) <= queryNodeNum, podemos controlar con precisión la asignación de QueryNodes en el cluster. Supongamos que actualmente sólo hay un QueryNode en el cluster e inicialicemos el cluster. He aquí un ejemplo de configuración:

      from pymilvus import utility
       from pymilvus.client.types import ResourceGroupConfig
      @@ -296,7 +296,7 @@ scale_to(5)
       # rg1 has 3 nodes, rg2 has 1 node, __default_resource_group has 1 node.
       
    3. Escalado en clúster

      -

      De forma similar, podemos establecer reglas de escalado de entrada que prioricen la selección de QueryNodes del grupo de recursos __pending_nodes. Esta información puede obtenerse a través de la API describe_resource_group. Conseguir el objetivo de escalado en el grupo de recursos especificado.

      +

      De forma similar, podemos establecer reglas de escalado que prioricen la selección de QueryNodes del grupo de recursos __pending_nodes. Esta información puede obtenerse a través de la API describe_resource_group. Conseguir el objetivo de escalado en el grupo de recursos especificado.

      # scale rg1 from 3 nodes into 2 nodes
       utility.update_resource_groups({
           "rg1": ResourceGroupConfig(
      diff --git a/localization/v2.4.x/site/es/adminGuide/tls.json b/localization/v2.4.x/site/es/adminGuide/tls.json
      index 78a195500..e84144c0a 100644
      --- a/localization/v2.4.x/site/es/adminGuide/tls.json
      +++ b/localization/v2.4.x/site/es/adminGuide/tls.json
      @@ -1 +1 @@
      -{"codeList":["openssl version\n","sudo apt install openssl\n","mkdir cert && cd cert\ntouch openssl.cnf gen.sh\n","#\n# OpenSSL example configuration file.\n# This is mostly being used for generation of certificate requests.\n#\n\n# This definition stops the following lines choking if HOME isn't\n# defined.\nHOME            = .\nRANDFILE        = $ENV::HOME/.rnd\n\n# Extra OBJECT IDENTIFIER info:\n#oid_file       = $ENV::HOME/.oid\noid_section     = new_oids\n\n# To use this configuration file with the \"-extfile\" option of the\n# \"openssl x509\" utility, name here the section containing the\n# X.509v3 extensions to use:\n# extensions        = \n# (Alternatively, use a configuration file that has only\n# X.509v3 extensions in its main [= default] section.)\n\n[ new_oids ]\n\n# We can add new OIDs in here for use by 'ca', 'req' and 'ts'.\n# Add a simple OID like this:\n# testoid1=1.2.3.4\n# Or use config file substitution like this:\n# testoid2=${testoid1}.5.6\n\n# Policies used by the TSA examples.\ntsa_policy1 = 1.2.3.4.1\ntsa_policy2 = 1.2.3.4.5.6\ntsa_policy3 = 1.2.3.4.5.7\n\n####################################################################\n[ ca ]\ndefault_ca  = CA_default        # The default ca section\n\n####################################################################\n[ CA_default ]\n\ndir     = ./demoCA      # Where everything is kept\ncerts       = $dir/certs        # Where the issued certs are kept\ncrl_dir     = $dir/crl      # Where the issued crl are kept\ndatabase    = $dir/index.txt    # database index file.\n#unique_subject = no            # Set to 'no' to allow creation of\n                    # several ctificates with same subject.\nnew_certs_dir   = $dir/newcerts     # default place for new certs.\n\ncertificate = $dir/cacert.pem   # The CA certificate\nserial      = $dir/serial       # The current serial number\ncrlnumber   = $dir/crlnumber    # the current crl number\n                    # must be commented out to leave a V1 CRL\ncrl     = $dir/crl.pem      # The current CRL\nprivate_key = $dir/private/cakey.pem# The private key\nRANDFILE    = $dir/private/.rand    # private random number file\n\nx509_extensions = usr_cert      # The extentions to add to the cert\n\n# Comment out the following two lines for the \"traditional\"\n# (and highly broken) format.\nname_opt    = ca_default        # Subject Name options\ncert_opt    = ca_default        # Certificate field options\n\n# Extension copying option: use with caution.\ncopy_extensions = copy\n\n# Extensions to add to a CRL. Note: Netscape communicator chokes on V2 CRLs\n# so this is commented out by default to leave a V1 CRL.\n# crlnumber must also be commented out to leave a V1 CRL.\n# crl_extensions    = crl_ext\n\ndefault_days    = 365           # how long to certify for\ndefault_crl_days= 30            # how long before next CRL\ndefault_md  = default       # use public key default MD\npreserve    = no            # keep passed DN ordering\n\n# A few difference way of specifying how similar the request should look\n# For type CA, the listed attributes must be the same, and the optional\n# and supplied fields are just that :-)\npolicy      = policy_match\n\n# For the CA policy\n[ policy_match ]\ncountryName     = match\nstateOrProvinceName = match\norganizationName    = match\norganizationalUnitName  = optional\ncommonName      = supplied\nemailAddress        = optional\n\n# For the 'anything' policy\n# At this point in time, you must list all acceptable 'object'\n# types.\n[ policy_anything ]\ncountryName     = optional\nstateOrProvinceName = optional\nlocalityName        = optional\norganizationName    = optional\norganizationalUnitName  = optional\ncommonName      = supplied\nemailAddress        = optional\n\n####################################################################\n[ req ]\ndefault_bits        = 2048\ndefault_keyfile     = privkey.pem\ndistinguished_name  = req_distinguished_name\nattributes      = req_attributes\nx509_extensions = v3_ca # The extentions to add to the self signed cert\n\n# Passwords for private keys if not present they will be prompted for\n# input_password = secret\n# output_password = secret\n\n# This sets a mask for permitted string types. There are several options. \n# default: PrintableString, T61String, BMPString.\n# pkix   : PrintableString, BMPString (PKIX recommendation before 2004)\n# utf8only: only UTF8Strings (PKIX recommendation after 2004).\n# nombstr : PrintableString, T61String (no BMPStrings or UTF8Strings).\n# MASK:XXXX a literal mask value.\n# WARNING: ancient versions of Netscape crash on BMPStrings or UTF8Strings.\nstring_mask = utf8only\n\nreq_extensions = v3_req # The extensions to add to a certificate request\n\n[ req_distinguished_name ]\ncountryName         = Country Name (2 letter code)\ncountryName_default     = AU\ncountryName_min         = 2\ncountryName_max         = 2\n\nstateOrProvinceName     = State or Province Name (full name)\nstateOrProvinceName_default = Some-State\n\nlocalityName            = Locality Name (eg, city)\n\n0.organizationName      = Organization Name (eg, company)\n0.organizationName_default  = Internet Widgits Pty Ltd\n\n# we can do this but it is not needed normally :-)\n#1.organizationName     = Second Organization Name (eg, company)\n#1.organizationName_default = World Wide Web Pty Ltd\n\norganizationalUnitName      = Organizational Unit Name (eg, section)\n#organizationalUnitName_default =\n\ncommonName          = Common Name (e.g. server FQDN or YOUR name)\ncommonName_max          = 64\n\nemailAddress            = Email Address\nemailAddress_max        = 64\n\n# SET-ex3           = SET extension number 3\n\n[ req_attributes ]\nchallengePassword       = A challenge password\nchallengePassword_min       = 4\nchallengePassword_max       = 20\n\nunstructuredName        = An optional company name\n\n[ usr_cert ]\n\n# These extensions are added when 'ca' signs a request.\n\n# This goes against PKIX guidelines but some CAs do it and some software\n# requires this to avoid interpreting an end user certificate as a CA.\n\nbasicConstraints=CA:FALSE\n\n# Here are some examples of the usage of nsCertType. If it is omitted\n# the certificate can be used for anything *except* object signing.\n\n# This is OK for an SSL server.\n# nsCertType            = server\n\n# For an object signing certificate this would be used.\n# nsCertType = objsign\n\n# For normal client use this is typical\n# nsCertType = client, email\n\n# and for everything including object signing:\n# nsCertType = client, email, objsign\n\n# This is typical in keyUsage for a client certificate.\n# keyUsage = nonRepudiation, digitalSignature, keyEncipherment\n\n# This will be displayed in Netscape's comment listbox.\nnsComment           = \"OpenSSL Generated Certificate\"\n\n# PKIX recommendations harmless if included in all certificates.\nsubjectKeyIdentifier=hash\nauthorityKeyIdentifier=keyid,issuer\n\n# This stuff is for subjectAltName and issuerAltname.\n# Import the email address.\n# subjectAltName=email:copy\n# An alternative to produce certificates that aren't\n# deprecated according to PKIX.\n# subjectAltName=email:move\n\n# Copy subject details\n# issuerAltName=issuer:copy\n\n#nsCaRevocationUrl      = http://www.domain.dom/ca-crl.pem\n#nsBaseUrl\n#nsRevocationUrl\n#nsRenewalUrl\n#nsCaPolicyUrl\n#nsSslServerName\n\n# This is required for TSA certificates.\n# extendedKeyUsage = critical,timeStamping\n\n[ v3_req ]\n\n# Extensions to add to a certificate request\n\nbasicConstraints = CA:FALSE\nkeyUsage = nonRepudiation, digitalSignature, keyEncipherment\n\n\n[ v3_ca ]\n\n\n# Extensions for a typical CA\n\n\n# PKIX recommendation.\n\nsubjectKeyIdentifier=hash\n\nauthorityKeyIdentifier=keyid:always,issuer\n\n# This is what PKIX recommends but some broken software chokes on critical\n# extensions.\n#basicConstraints = critical,CA:true\n# So we do this instead.\nbasicConstraints = CA:true\n\n# Key usage: this is typical for a CA certificate. However since it will\n# prevent it being used as an test self-signed certificate it is best\n# left out by default.\n# keyUsage = cRLSign, keyCertSign\n\n# Some might want this also\n# nsCertType = sslCA, emailCA\n\n# Include email address in subject alt name: another PKIX recommendation\n# subjectAltName=email:copy\n# Copy issuer details\n# issuerAltName=issuer:copy\n\n# DER hex encoding of an extension: beware experts only!\n# obj=DER:02:03\n# Where 'obj' is a standard or added object\n# You can even override a supported extension:\n# basicConstraints= critical, DER:30:03:01:01:FF\n\n[ crl_ext ]\n\n# CRL extensions.\n# Only issuerAltName and authorityKeyIdentifier make any sense in a CRL.\n\n# issuerAltName=issuer:copy\nauthorityKeyIdentifier=keyid:always\n\n[ proxy_cert_ext ]\n# These extensions should be added when creating a proxy certificate\n\n# This goes against PKIX guidelines but some CAs do it and some software\n# requires this to avoid interpreting an end user certificate as a CA.\n\nbasicConstraints=CA:FALSE\n\n# Here are some examples of the usage of nsCertType. If it is omitted\n# the certificate can be used for anything *except* object signing.\n\n# This is OK for an SSL server.\n# nsCertType            = server\n\n# For an object signing certificate this would be used.\n# nsCertType = objsign\n\n# For normal client use this is typical\n# nsCertType = client, email\n\n# and for everything including object signing:\n# nsCertType = client, email, objsign\n\n# This is typical in keyUsage for a client certificate.\n# keyUsage = nonRepudiation, digitalSignature, keyEncipherment\n\n# This will be displayed in Netscape's comment listbox.\nnsComment           = \"OpenSSL Generated Certificate\"\n\n# PKIX recommendations harmless if included in all certificates.\nsubjectKeyIdentifier=hash\nauthorityKeyIdentifier=keyid,issuer\n\n# This stuff is for subjectAltName and issuerAltname.\n# Import the email address.\n# subjectAltName=email:copy\n# An alternative to produce certificates that aren't\n# deprecated according to PKIX.\n# subjectAltName=email:move\n\n# Copy subject details\n# issuerAltName=issuer:copy\n\n#nsCaRevocationUrl      = http://www.domain.dom/ca-crl.pem\n#nsBaseUrl\n#nsRevocationUrl\n#nsRenewalUrl\n#nsCaPolicyUrl\n#nsSslServerName\n\n# This really needs to be in place for it to be a proxy certificate.\nproxyCertInfo=critical,language:id-ppl-anyLanguage,pathlen:3,policy:foo\n\n####################################################################\n[ tsa ]\n\ndefault_tsa = tsa_config1   # the default TSA section\n\n[ tsa_config1 ]\n\n# These are used by the TSA reply generation only.\ndir     = ./demoCA      # TSA root directory\nserial      = $dir/tsaserial    # The current serial number (mandatory)\ncrypto_device   = builtin       # OpenSSL engine to use for signing\nsigner_cert = $dir/tsacert.pem  # The TSA signing certificate\n                    # (optional)\ncerts       = $dir/cacert.pem   # Certificate chain to include in reply\n                    # (optional)\nsigner_key  = $dir/private/tsakey.pem # The TSA private key (optional)\n\ndefault_policy  = tsa_policy1       # Policy if request did not specify it\n                    # (optional)\nother_policies  = tsa_policy2, tsa_policy3  # acceptable policies (optional)\ndigests     = md5, sha1     # Acceptable message digests (mandatory)\naccuracy    = secs:1, millisecs:500, microsecs:100  # (optional)\nclock_precision_digits  = 0 # number of digits after dot. (optional)\nordering        = yes   # Is ordering defined for timestamps?\n                # (optional, default: no)\ntsa_name        = yes   # Must the TSA name be included in the reply?\n                # (optional, default: no)\ness_cert_id_chain   = no    # Must the ESS cert id chain be included?\n                # (optional, default: no)\n","#!/usr/bin/env sh\n# your variables\nCountry=\"CN\"\nState=\"Shanghai\"\nLocation=\"Shanghai\"\nOrganization=\"milvus\"\nOrganizational=\"milvus\"\nCommonName=\"localhost\"\n\necho \"generate ca.key\"\nopenssl genrsa -out ca.key 2048\n\necho \"generate ca.pem\"\nopenssl req -new -x509 -key ca.key -out ca.pem -days 3650 -subj \"/C=$Country/ST=$State/L=$Location/O=$Organization/OU=$Organizational/CN=$CommonName\"\n\necho \"generate server SAN certificate\"\nopenssl genpkey -algorithm RSA -out server.key\nopenssl req -new -nodes -key server.key -out server.csr -days 3650 -subj \"/C=$Country/O=$Organization/OU=$Organizational/CN=$CommonName\" -config ./openssl.cnf -extensions v3_req\nopenssl x509 -req -days 3650 -in server.csr -out server.pem -CA ca.pem -CAkey ca.key -CAcreateserial -extfile ./openssl.cnf -extensions v3_req\n\necho \"generate client SAN certificate\"\nopenssl genpkey -algorithm RSA -out client.key\nopenssl req -new -nodes -key client.key -out client.csr -days 3650 -subj \"/C=$Country/O=$Organization/OU=$Organizational/CN=$CommonName\" -config ./openssl.cnf -extensions v3_req\nopenssl x509 -req -days 3650 -in client.csr -out client.pem -CA ca.pem -CAkey ca.key -CAcreateserial -extfile ./openssl.cnf -extensions v3_req\n\n","chmod +x gen.sh\n./gen.sh\n","openssl genpkey -algorithm RSA -out ca.key\n","openssl req -new -x509 -key ca.key -out ca.pem -days 3650 -subj \"/C=$Country/ST=$State/L=$Location/O=$Organization/OU=$Organizational/CN=$CommonName\"\n","openssl genpkey -algorithm RSA -out server.key\n","openssl req -new -nodes -key server.key -out server.csr -days 3650 -subj \"/C=$Country/O=$Organization/OU=$Organizational/CN=$CommonName\" -config ./openssl.cnf -extensions v3_req\n","openssl x509 -req -days 3650 -in server.csr -out server.pem -CA ca.pem -CAkey ca.key -CAcreateserial -extfile ./openssl.cnf -extensions v3_req\n","tls:\n  serverPemPath: /milvus/tls/server.pem\n  serverKeyPath: /milvus/tls/server.key\n  caPemPath: /milvus/tls/ca.pem\n\ncommon:\n  security:\n    tlsMode: 1\n","├── docker-compose.yml\n├── milvus.yaml\n└── tls\n     ├── server.pem\n     ├── server.key\n     └── ca.pem\n","  standalone:\n    container_name: milvus-standalone\n    image: milvusdb/milvus:latest\n    command: [\"milvus\", \"run\", \"standalone\"]\n    security_opt:\n    - seccomp:unconfined\n    environment:\n      ETCD_ENDPOINTS: etcd:2379\n      MINIO_ADDRESS: minio:9000\n    volumes:\n      - ${DOCKER_VOLUME_DIRECTORY:-.}/volumes/milvus:/var/lib/milvus\n      - ${DOCKER_VOLUME_DIRECTORY:-.}/tls:/milvus/tls\n      - ${DOCKER_VOLUME_DIRECTORY:-.}/milvus.yaml:/milvus/configs/milvus.yaml\n","sudo docker compose up -d\n","from pymilvus import MilvusClient\n\nclient = MilvusClient(\n    uri=\"http://localhost:19530\",\n    secure=True,\n    server_pem_path=\"path_to/server.pem\",\n    server_name=\"localhost\"\n)\n","from pymilvus import MilvusClient\n\nclient = MilvusClient(\n    uri=\"http://localhost:19530\",\n    secure=True,\n    client_pem_path=\"path_to/client.pem\",\n    client_key_path=\"path_to/client.key\",\n    ca_pem_path=\"path_to/ca.pem\",\n    server_name=\"localhost\"\n)\n"],"headingContent":"","anchorList":[{"label":"Cifrado en tránsito","href":"Encryption-in-Transit","type":1,"isActive":false},{"label":"Cree su propio certificado","href":"Create-your-own-certificate","type":2,"isActive":false},{"label":"Configurar un servidor Milvus con TLS","href":"Set-up-a-Milvus-server-with-TLS","type":2,"isActive":false},{"label":"Conectarse al servidor Milvus con TLS","href":"Connect-to-the-Milvus-server-with-TLS","type":2,"isActive":false}]}
      \ No newline at end of file
      +{"codeList":["openssl version\n","sudo apt install openssl\n","mkdir cert && cd cert\ntouch openssl.cnf gen.sh\n","#\n# OpenSSL example configuration file.\n# This is mostly being used for generation of certificate requests.\n#\n\n# This definition stops the following lines choking if HOME isn't\n# defined.\nHOME            = .\nRANDFILE        = $ENV::HOME/.rnd\n\n# Extra OBJECT IDENTIFIER info:\n#oid_file       = $ENV::HOME/.oid\noid_section     = new_oids\n\n# To use this configuration file with the \"-extfile\" option of the\n# \"openssl x509\" utility, name here the section containing the\n# X.509v3 extensions to use:\n# extensions        = \n# (Alternatively, use a configuration file that has only\n# X.509v3 extensions in its main [= default] section.)\n\n[ new_oids ]\n\n# We can add new OIDs in here for use by 'ca', 'req' and 'ts'.\n# Add a simple OID like this:\n# testoid1=1.2.3.4\n# Or use config file substitution like this:\n# testoid2=${testoid1}.5.6\n\n# Policies used by the TSA examples.\ntsa_policy1 = 1.2.3.4.1\ntsa_policy2 = 1.2.3.4.5.6\ntsa_policy3 = 1.2.3.4.5.7\n\n####################################################################\n[ ca ]\ndefault_ca  = CA_default        # The default ca section\n\n####################################################################\n[ CA_default ]\n\ndir     = ./demoCA      # Where everything is kept\ncerts       = $dir/certs        # Where the issued certs are kept\ncrl_dir     = $dir/crl      # Where the issued crl are kept\ndatabase    = $dir/index.txt    # database index file.\n#unique_subject = no            # Set to 'no' to allow creation of\n                    # several ctificates with same subject.\nnew_certs_dir   = $dir/newcerts     # default place for new certs.\n\ncertificate = $dir/cacert.pem   # The CA certificate\nserial      = $dir/serial       # The current serial number\ncrlnumber   = $dir/crlnumber    # the current crl number\n                    # must be commented out to leave a V1 CRL\ncrl     = $dir/crl.pem      # The current CRL\nprivate_key = $dir/private/cakey.pem# The private key\nRANDFILE    = $dir/private/.rand    # private random number file\n\nx509_extensions = usr_cert      # The extentions to add to the cert\n\n# Comment out the following two lines for the \"traditional\"\n# (and highly broken) format.\nname_opt    = ca_default        # Subject Name options\ncert_opt    = ca_default        # Certificate field options\n\n# Extension copying option: use with caution.\ncopy_extensions = copy\n\n# Extensions to add to a CRL. Note: Netscape communicator chokes on V2 CRLs\n# so this is commented out by default to leave a V1 CRL.\n# crlnumber must also be commented out to leave a V1 CRL.\n# crl_extensions    = crl_ext\n\ndefault_days    = 365           # how long to certify for\ndefault_crl_days= 30            # how long before next CRL\ndefault_md  = default       # use public key default MD\npreserve    = no            # keep passed DN ordering\n\n# A few difference way of specifying how similar the request should look\n# For type CA, the listed attributes must be the same, and the optional\n# and supplied fields are just that :-)\npolicy      = policy_match\n\n# For the CA policy\n[ policy_match ]\ncountryName     = match\nstateOrProvinceName = match\norganizationName    = match\norganizationalUnitName  = optional\ncommonName      = supplied\nemailAddress        = optional\n\n# For the 'anything' policy\n# At this point in time, you must list all acceptable 'object'\n# types.\n[ policy_anything ]\ncountryName     = optional\nstateOrProvinceName = optional\nlocalityName        = optional\norganizationName    = optional\norganizationalUnitName  = optional\ncommonName      = supplied\nemailAddress        = optional\n\n####################################################################\n[ req ]\ndefault_bits        = 2048\ndefault_keyfile     = privkey.pem\ndistinguished_name  = req_distinguished_name\nattributes      = req_attributes\nx509_extensions = v3_ca # The extentions to add to the self signed cert\n\n# Passwords for private keys if not present they will be prompted for\n# input_password = secret\n# output_password = secret\n\n# This sets a mask for permitted string types. There are several options. \n# default: PrintableString, T61String, BMPString.\n# pkix   : PrintableString, BMPString (PKIX recommendation before 2004)\n# utf8only: only UTF8Strings (PKIX recommendation after 2004).\n# nombstr : PrintableString, T61String (no BMPStrings or UTF8Strings).\n# MASK:XXXX a literal mask value.\n# WARNING: ancient versions of Netscape crash on BMPStrings or UTF8Strings.\nstring_mask = utf8only\n\nreq_extensions = v3_req # The extensions to add to a certificate request\n\n[ req_distinguished_name ]\ncountryName         = Country Name (2 letter code)\ncountryName_default     = AU\ncountryName_min         = 2\ncountryName_max         = 2\n\nstateOrProvinceName     = State or Province Name (full name)\nstateOrProvinceName_default = Some-State\n\nlocalityName            = Locality Name (eg, city)\n\n0.organizationName      = Organization Name (eg, company)\n0.organizationName_default  = Internet Widgits Pty Ltd\n\n# we can do this but it is not needed normally :-)\n#1.organizationName     = Second Organization Name (eg, company)\n#1.organizationName_default = World Wide Web Pty Ltd\n\norganizationalUnitName      = Organizational Unit Name (eg, section)\n#organizationalUnitName_default =\n\ncommonName          = Common Name (e.g. server FQDN or YOUR name)\ncommonName_max          = 64\n\nemailAddress            = Email Address\nemailAddress_max        = 64\n\n# SET-ex3           = SET extension number 3\n\n[ req_attributes ]\nchallengePassword       = A challenge password\nchallengePassword_min       = 4\nchallengePassword_max       = 20\n\nunstructuredName        = An optional company name\n\n[ usr_cert ]\n\n# These extensions are added when 'ca' signs a request.\n\n# This goes against PKIX guidelines but some CAs do it and some software\n# requires this to avoid interpreting an end user certificate as a CA.\n\nbasicConstraints=CA:FALSE\n\n# Here are some examples of the usage of nsCertType. If it is omitted\n# the certificate can be used for anything *except* object signing.\n\n# This is OK for an SSL server.\n# nsCertType            = server\n\n# For an object signing certificate this would be used.\n# nsCertType = objsign\n\n# For normal client use this is typical\n# nsCertType = client, email\n\n# and for everything including object signing:\n# nsCertType = client, email, objsign\n\n# This is typical in keyUsage for a client certificate.\n# keyUsage = nonRepudiation, digitalSignature, keyEncipherment\n\n# This will be displayed in Netscape's comment listbox.\nnsComment           = \"OpenSSL Generated Certificate\"\n\n# PKIX recommendations harmless if included in all certificates.\nsubjectKeyIdentifier=hash\nauthorityKeyIdentifier=keyid,issuer\n\n# This stuff is for subjectAltName and issuerAltname.\n# Import the email address.\n# subjectAltName=email:copy\n# An alternative to produce certificates that aren't\n# deprecated according to PKIX.\n# subjectAltName=email:move\n\n# Copy subject details\n# issuerAltName=issuer:copy\n\n#nsCaRevocationUrl      = http://www.domain.dom/ca-crl.pem\n#nsBaseUrl\n#nsRevocationUrl\n#nsRenewalUrl\n#nsCaPolicyUrl\n#nsSslServerName\n\n# This is required for TSA certificates.\n# extendedKeyUsage = critical,timeStamping\n\n[ v3_req ]\n\n# Extensions to add to a certificate request\n\nbasicConstraints = CA:FALSE\nkeyUsage = nonRepudiation, digitalSignature, keyEncipherment\n\n\n[ v3_ca ]\n\n\n# Extensions for a typical CA\n\n\n# PKIX recommendation.\n\nsubjectKeyIdentifier=hash\n\nauthorityKeyIdentifier=keyid:always,issuer\n\n# This is what PKIX recommends but some broken software chokes on critical\n# extensions.\n#basicConstraints = critical,CA:true\n# So we do this instead.\nbasicConstraints = CA:true\n\n# Key usage: this is typical for a CA certificate. However since it will\n# prevent it being used as an test self-signed certificate it is best\n# left out by default.\n# keyUsage = cRLSign, keyCertSign\n\n# Some might want this also\n# nsCertType = sslCA, emailCA\n\n# Include email address in subject alt name: another PKIX recommendation\n# subjectAltName=email:copy\n# Copy issuer details\n# issuerAltName=issuer:copy\n\n# DER hex encoding of an extension: beware experts only!\n# obj=DER:02:03\n# Where 'obj' is a standard or added object\n# You can even override a supported extension:\n# basicConstraints= critical, DER:30:03:01:01:FF\n\n[ crl_ext ]\n\n# CRL extensions.\n# Only issuerAltName and authorityKeyIdentifier make any sense in a CRL.\n\n# issuerAltName=issuer:copy\nauthorityKeyIdentifier=keyid:always\n\n[ proxy_cert_ext ]\n# These extensions should be added when creating a proxy certificate\n\n# This goes against PKIX guidelines but some CAs do it and some software\n# requires this to avoid interpreting an end user certificate as a CA.\n\nbasicConstraints=CA:FALSE\n\n# Here are some examples of the usage of nsCertType. If it is omitted\n# the certificate can be used for anything *except* object signing.\n\n# This is OK for an SSL server.\n# nsCertType            = server\n\n# For an object signing certificate this would be used.\n# nsCertType = objsign\n\n# For normal client use this is typical\n# nsCertType = client, email\n\n# and for everything including object signing:\n# nsCertType = client, email, objsign\n\n# This is typical in keyUsage for a client certificate.\n# keyUsage = nonRepudiation, digitalSignature, keyEncipherment\n\n# This will be displayed in Netscape's comment listbox.\nnsComment           = \"OpenSSL Generated Certificate\"\n\n# PKIX recommendations harmless if included in all certificates.\nsubjectKeyIdentifier=hash\nauthorityKeyIdentifier=keyid,issuer\n\n# This stuff is for subjectAltName and issuerAltname.\n# Import the email address.\n# subjectAltName=email:copy\n# An alternative to produce certificates that aren't\n# deprecated according to PKIX.\n# subjectAltName=email:move\n\n# Copy subject details\n# issuerAltName=issuer:copy\n\n#nsCaRevocationUrl      = http://www.domain.dom/ca-crl.pem\n#nsBaseUrl\n#nsRevocationUrl\n#nsRenewalUrl\n#nsCaPolicyUrl\n#nsSslServerName\n\n# This really needs to be in place for it to be a proxy certificate.\nproxyCertInfo=critical,language:id-ppl-anyLanguage,pathlen:3,policy:foo\n\n####################################################################\n[ tsa ]\n\ndefault_tsa = tsa_config1   # the default TSA section\n\n[ tsa_config1 ]\n\n# These are used by the TSA reply generation only.\ndir     = ./demoCA      # TSA root directory\nserial      = $dir/tsaserial    # The current serial number (mandatory)\ncrypto_device   = builtin       # OpenSSL engine to use for signing\nsigner_cert = $dir/tsacert.pem  # The TSA signing certificate\n                    # (optional)\ncerts       = $dir/cacert.pem   # Certificate chain to include in reply\n                    # (optional)\nsigner_key  = $dir/private/tsakey.pem # The TSA private key (optional)\n\ndefault_policy  = tsa_policy1       # Policy if request did not specify it\n                    # (optional)\nother_policies  = tsa_policy2, tsa_policy3  # acceptable policies (optional)\ndigests     = md5, sha1     # Acceptable message digests (mandatory)\naccuracy    = secs:1, millisecs:500, microsecs:100  # (optional)\nclock_precision_digits  = 0 # number of digits after dot. (optional)\nordering        = yes   # Is ordering defined for timestamps?\n                # (optional, default: no)\ntsa_name        = yes   # Must the TSA name be included in the reply?\n                # (optional, default: no)\ness_cert_id_chain   = no    # Must the ESS cert id chain be included?\n                # (optional, default: no)\n","#!/usr/bin/env sh\n# your variables\nCountry=\"CN\"\nState=\"Shanghai\"\nLocation=\"Shanghai\"\nOrganization=\"milvus\"\nOrganizational=\"milvus\"\nCommonName=\"localhost\"\n\necho \"generate ca.key\"\nopenssl genrsa -out ca.key 2048\n\necho \"generate ca.pem\"\nopenssl req -new -x509 -key ca.key -out ca.pem -days 3650 -subj \"/C=$Country/ST=$State/L=$Location/O=$Organization/OU=$Organizational/CN=$CommonName\"\n\necho \"generate server SAN certificate\"\nopenssl genpkey -algorithm RSA -out server.key\nopenssl req -new -nodes -key server.key -out server.csr -days 3650 -subj \"/C=$Country/O=$Organization/OU=$Organizational/CN=$CommonName\" -config ./openssl.cnf -extensions v3_req\nopenssl x509 -req -days 3650 -in server.csr -out server.pem -CA ca.pem -CAkey ca.key -CAcreateserial -extfile ./openssl.cnf -extensions v3_req\n\necho \"generate client SAN certificate\"\nopenssl genpkey -algorithm RSA -out client.key\nopenssl req -new -nodes -key client.key -out client.csr -days 3650 -subj \"/C=$Country/O=$Organization/OU=$Organizational/CN=$CommonName\" -config ./openssl.cnf -extensions v3_req\nopenssl x509 -req -days 3650 -in client.csr -out client.pem -CA ca.pem -CAkey ca.key -CAcreateserial -extfile ./openssl.cnf -extensions v3_req\n\n","chmod +x gen.sh\n./gen.sh\n","openssl genpkey -algorithm RSA -out ca.key\n","openssl req -new -x509 -key ca.key -out ca.pem -days 3650 -subj \"/C=$Country/ST=$State/L=$Location/O=$Organization/OU=$Organizational/CN=$CommonName\"\n","openssl genpkey -algorithm RSA -out server.key\n","openssl req -new -nodes -key server.key -out server.csr -days 3650 -subj \"/C=$Country/O=$Organization/OU=$Organizational/CN=$CommonName\" -config ./openssl.cnf -extensions v3_req\n","openssl x509 -req -days 3650 -in server.csr -out server.pem -CA ca.pem -CAkey ca.key -CAcreateserial -extfile ./openssl.cnf -extensions v3_req\n","tls:\n  serverPemPath: /milvus/tls/server.pem\n  serverKeyPath: /milvus/tls/server.key\n  caPemPath: /milvus/tls/ca.pem\n\ncommon:\n  security:\n    tlsMode: 1\n","├── docker-compose.yml\n├── milvus.yaml\n└── tls\n     ├── server.pem\n     ├── server.key\n     └── ca.pem\n","  standalone:\n    container_name: milvus-standalone\n    image: milvusdb/milvus:latest\n    command: [\"milvus\", \"run\", \"standalone\"]\n    security_opt:\n    - seccomp:unconfined\n    environment:\n      ETCD_ENDPOINTS: etcd:2379\n      MINIO_ADDRESS: minio:9000\n    volumes:\n      - ${DOCKER_VOLUME_DIRECTORY:-.}/volumes/milvus:/var/lib/milvus\n      - ${DOCKER_VOLUME_DIRECTORY:-.}/tls:/milvus/tls\n      - ${DOCKER_VOLUME_DIRECTORY:-.}/milvus.yaml:/milvus/configs/milvus.yaml\n","sudo docker compose up -d\n","from pymilvus import MilvusClient\n\nclient = MilvusClient(\n    uri=\"https://localhost:19530\",\n    secure=True,\n    server_pem_path=\"path_to/server.pem\",\n    server_name=\"localhost\"\n)\n","from pymilvus import MilvusClient\n\nclient = MilvusClient(\n    uri=\"https://localhost:19530\",\n    secure=True,\n    client_pem_path=\"path_to/client.pem\",\n    client_key_path=\"path_to/client.key\",\n    ca_pem_path=\"path_to/ca.pem\",\n    server_name=\"localhost\"\n)\n","curl --cacert path_to/ca.pem https://localhost:19530/v2/vectordb/collections/list\n","curl --cert path_to/client.pem --key path_to/client.key --cacert path_to/ca.pem https://localhost:19530/v2/vectordb/collections/list\n"],"headingContent":"Encryption in Transit","anchorList":[{"label":"Cifrado en tránsito","href":"Encryption-in-Transit","type":1,"isActive":false},{"label":"Cree su propio certificado","href":"Create-your-own-certificate","type":2,"isActive":false},{"label":"Configurar un servidor Milvus con TLS","href":"Set-up-a-Milvus-server-with-TLS","type":2,"isActive":false},{"label":"Conectarse al servidor Milvus con TLS","href":"Connect-to-the-Milvus-server-with-TLS","type":2,"isActive":false},{"label":"Conectarse al servidor RESTful de Milvus con TLS","href":"Connect-to-the-Milvus-RESTful-server-with-TLS","type":2,"isActive":false}]}
      \ No newline at end of file
      diff --git a/localization/v2.4.x/site/es/adminGuide/tls.md b/localization/v2.4.x/site/es/adminGuide/tls.md
      index 5a195a197..580d98a9a 100644
      --- a/localization/v2.4.x/site/es/adminGuide/tls.md
      +++ b/localization/v2.4.x/site/es/adminGuide/tls.md
      @@ -18,10 +18,10 @@ summary: Aprenda a activar el proxy TLS en Milvus.
                 d="M4 9h1v1H4c-1.5 0-3-1.69-3-3.5S2.55 3 4 3h4c1.45 0 3 1.69 3 3.5 0 1.41-.91 2.72-2 3.25V8.59c.58-.45 1-1.27 1-2.09C10 5.22 8.98 4 8 4H4c-.98 0-2 1.22-2 2.5S3 9 4 9zm9-3h-1v1h1c1 0 2 1.22 2 2.5S13.98 12 13 12H9c-.98 0-2-1.22-2-2.5 0-.83.42-1.64 1-2.09V6.25c-1.09.53-2 1.84-2 3.25C6 11.31 7.55 13 9 13h4c1.45 0 3-1.69 3-3.5S14.5 6 13 6z"
               >
             
      -    

      TLS (Transport Layer Security) es un protocolo de encriptación para garantizar la seguridad de las comunicaciones. El proxy de Milvus utiliza la autenticación TLS unidireccional y bidireccional.

      -

      Este tema describe cómo habilitar el proxy TLS en Milvus.

      +

      TLS (Transport Layer Security) es un protocolo de encriptación para garantizar la seguridad de las comunicaciones. Milvus proxy utiliza la autenticación TLS unidireccional y bidireccional.

      +

      Este tema describe cómo habilitar TLS en Milvus proxy tanto para tráfico gRPC como RESTful.

      -

      TLS y la autenticación de usuario son dos enfoques de seguridad distintos. Si ha habilitado tanto la autenticación de usuario como TLS en su sistema Milvus, necesitará proporcionar un nombre de usuario, contraseña y rutas de archivos de certificado. Para obtener información sobre cómo habilitar la autenticación de usuario, consulte Autenticar el acceso de usuario.

      +

      TLS y la autenticación de usuario son dos enfoques de seguridad distintos. Si ha habilitado tanto la autenticación de usuario como TLS en su sistema Milvus, necesitará proporcionar un nombre de usuario, contraseña y rutas de archivos de certificado. Para obtener información sobre cómo habilitar la autenticación de usuario, consulte Autenticar el acceso de usuarios.

      Cree su propio certificado

      Consulte example_tls1.py y example_tls2.py para obtener más información.

      +

      Conectarse al servidor RESTful de Milvus con TLS

      Para las API RESTful, puede comprobar tls utilizando el comando curl.

      +

      Conexión TLS unidireccional

      curl --cacert path_to/ca.pem https://localhost:19530/v2/vectordb/collections/list
      +
      +

      Conexión TLS bidireccional

      curl --cert path_to/client.pem --key path_to/client.key --cacert path_to/ca.pem https://localhost:19530/v2/vectordb/collections/list
      +
      diff --git a/localization/v2.4.x/site/es/adminGuide/upgrade_milvus_cluster-docker.json b/localization/v2.4.x/site/es/adminGuide/upgrade_milvus_cluster-docker.json index f6a33e86a..2c2bb7941 100644 --- a/localization/v2.4.x/site/es/adminGuide/upgrade_milvus_cluster-docker.json +++ b/localization/v2.4.x/site/es/adminGuide/upgrade_milvus_cluster-docker.json @@ -1 +1 @@ -{"codeList":["...\nrootcoord:\n container_name: milvus-rootcoord\n image: milvusdb/milvus:v2.4.9\n...\nproxy:\n container_name: milvus-proxy\n image: milvusdb/milvus:v2.4.9\n...\nquerycoord:\n container_name: milvus-querycoord\n image: milvusdb/milvus:v2.4.9 \n...\nquerynode:\n container_name: milvus-querynode\n image: milvusdb/milvus:v2.4.9\n...\nindexcoord:\n container_name: milvus-indexcoord\n image: milvusdb/milvus:v2.4.9\n...\nindexnode:\n container_name: milvus-indexnode\n image: milvusdb/milvus:v2.4.9 \n...\ndatacoord:\n container_name: milvus-datacoord\n image: milvusdb/milvus:v2.4.9 \n...\ndatanode:\n container_name: milvus-datanode\n image: milvusdb/milvus:v2.4.9\n","docker compose down\ndocker compose up -d\n","docker stop \n","# migration.yaml\ncmd:\n # Option: run/backup/rollback\n type: run\n runWithBackup: true\nconfig:\n sourceVersion: 2.1.4 # Specify your milvus version\n targetVersion: 2.4.9\n backupFilePath: /tmp/migration.bak\nmetastore:\n type: etcd\netcd:\n endpoints:\n - milvus-etcd:2379 # Use the etcd container name\n rootPath: by-dev # The root path where data is stored in etcd\n metaSubPath: meta\n kvSubPath: kv\n","# Suppose your docker-compose run with the default milvus network,\n# and you put migration.yaml in the same directory with docker-compose.yaml.\ndocker run --rm -it --network milvus -v $(pwd)/migration.yaml:/milvus/configs/migration.yaml milvus/meta-migration:v2.2.0 /milvus/bin/meta-migration -config=/milvus/configs/migration.yaml\n","Update the milvus image tag in the docker-compose.yaml\ndocker compose down\ndocker compose up -d\n"],"headingContent":"","anchorList":[{"label":"Actualizar Milvus Cluster con Docker Compose","href":"Upgrade-Milvus-Cluster-with-Docker-Compose","type":1,"isActive":false},{"label":"Actualizar Milvus cambiando su imagen","href":"Upgrade-Milvus-by-changing-its-image","type":2,"isActive":false},{"label":"Migrar los metadatos","href":"Migrate-the-metadata","type":2,"isActive":false},{"label":"Lo que sigue","href":"Whats-next","type":2,"isActive":false}]} \ No newline at end of file +{"codeList":["...\nrootcoord:\n container_name: milvus-rootcoord\n image: milvusdb/milvus:v2.4.13-hotfix\n...\nproxy:\n container_name: milvus-proxy\n image: milvusdb/milvus:v2.4.13-hotfix\n...\nquerycoord:\n container_name: milvus-querycoord\n image: milvusdb/milvus:v2.4.13-hotfix \n...\nquerynode:\n container_name: milvus-querynode\n image: milvusdb/milvus:v2.4.13-hotfix\n...\nindexcoord:\n container_name: milvus-indexcoord\n image: milvusdb/milvus:v2.4.13-hotfix\n...\nindexnode:\n container_name: milvus-indexnode\n image: milvusdb/milvus:v2.4.13-hotfix \n...\ndatacoord:\n container_name: milvus-datacoord\n image: milvusdb/milvus:v2.4.13-hotfix \n...\ndatanode:\n container_name: milvus-datanode\n image: milvusdb/milvus:v2.4.13-hotfix\n","docker compose down\ndocker compose up -d\n","docker stop \n","# migration.yaml\ncmd:\n # Option: run/backup/rollback\n type: run\n runWithBackup: true\nconfig:\n sourceVersion: 2.1.4 # Specify your milvus version\n targetVersion: 2.4.13-hotfix\n backupFilePath: /tmp/migration.bak\nmetastore:\n type: etcd\netcd:\n endpoints:\n - milvus-etcd:2379 # Use the etcd container name\n rootPath: by-dev # The root path where data is stored in etcd\n metaSubPath: meta\n kvSubPath: kv\n","# Suppose your docker-compose run with the default milvus network,\n# and you put migration.yaml in the same directory with docker-compose.yaml.\ndocker run --rm -it --network milvus -v $(pwd)/migration.yaml:/milvus/configs/migration.yaml milvus/meta-migration:v2.2.0 /milvus/bin/meta-migration -config=/milvus/configs/migration.yaml\n","Update the milvus image tag in the docker-compose.yaml\ndocker compose down\ndocker compose up -d\n"],"headingContent":"Upgrade Milvus Cluster with Docker Compose","anchorList":[{"label":"Actualizar Milvus Cluster con Docker Compose","href":"Upgrade-Milvus-Cluster-with-Docker-Compose","type":1,"isActive":false},{"label":"Actualizar Milvus cambiando su imagen","href":"Upgrade-Milvus-by-changing-its-image","type":2,"isActive":false},{"label":"Migrar los metadatos","href":"Migrate-the-metadata","type":2,"isActive":false},{"label":"Lo que sigue","href":"Whats-next","type":2,"isActive":false}]} \ No newline at end of file diff --git a/localization/v2.4.x/site/es/adminGuide/upgrade_milvus_cluster-docker.md b/localization/v2.4.x/site/es/adminGuide/upgrade_milvus_cluster-docker.md index f3fde123d..4fa76284b 100644 --- a/localization/v2.4.x/site/es/adminGuide/upgrade_milvus_cluster-docker.md +++ b/localization/v2.4.x/site/es/adminGuide/upgrade_milvus_cluster-docker.md @@ -20,7 +20,7 @@ title: Actualizar Milvus Cluster con Docker Compose >

      Este tema describe cómo actualizar su Milvus utilizando Docker Compose.

      -

      En casos normales, puede actualizar Milvus cambiando su imagen. Sin embargo, necesita migrar los metadatos antes de cualquier actualización de v2.1.x a v2.4.9.

      +

      En casos normales, puede actualizar Milvus cambiando su imagen. Sin embargo, necesita migrar los metadatos antes de cualquier actualización de v2.1.x a v2.4.13-hotfix.

      Actualizar Milvus cambiando su imagen

  • Ejecute los siguientes comandos para realizar la actualización.

    docker compose down
    @@ -105,7 +105,7 @@ cmd:
       runWithBackup: true
     config:
       sourceVersion: 2.1.4   # Specify your milvus version
    -  targetVersion: 2.4.9
    +  targetVersion: 2.4.13-hotfix
       backupFilePath: /tmp/migration.bak
     metastore:
       type: etcd
    diff --git a/localization/v2.4.x/site/es/adminGuide/upgrade_milvus_cluster-helm.json b/localization/v2.4.x/site/es/adminGuide/upgrade_milvus_cluster-helm.json
    index 220335201..cd00b0fac 100644
    --- a/localization/v2.4.x/site/es/adminGuide/upgrade_milvus_cluster-helm.json
    +++ b/localization/v2.4.x/site/es/adminGuide/upgrade_milvus_cluster-helm.json
    @@ -1 +1 @@
    -{"codeList":["$ helm repo update\n$ helm search repo zilliztech/milvus --versions\n","helm repo add zilliztech https://zilliztech.github.io/milvus-helm\nhelm repo update\n# upgrade existing helm release\nhelm upgrade my-release zilliztech/milvus\n","NAME                    CHART VERSION   APP VERSION             DESCRIPTION                                       \nzilliztech/milvus       4.1.34          2.4.5                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.33          2.4.4                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.32          2.4.3                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.31          2.4.1                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.30          2.4.1                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.29          2.4.0                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.24          2.3.11                  Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.23          2.3.10                  Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.22          2.3.10                  Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.21          2.3.10                  Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.20          2.3.10                  Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.18          2.3.10                  Milvus is an open-source vector database built ... \nzilliztech/milvus       4.1.18          2.3.9                   Milvus is an open-source vector database built ...                                       \nzilliztech/milvus       4.1.17          2.3.8                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.16          2.3.7                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.15          2.3.5                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.14          2.3.6                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.13          2.3.5                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.12          2.3.5                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.11          2.3.4                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.10          2.3.3                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.9           2.3.3                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.8           2.3.2                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.7           2.3.2                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.6           2.3.1                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.5           2.3.1                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.4           2.3.1                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.3           2.3.1                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.2           2.3.1                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.1           2.3.0                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.0           2.3.0                   Milvus is an open-source vector database built ...\n","sh rollingUpdate.sh -n default -i my-release -o update -t 2.4.9 -w 'milvusdb/milvus:v2.4.9'\n","helm repo update\nhelm upgrade my-release zilliztech/milvus --reuse-values --version=4.1.24 # use the helm chart version here\n","NAME                NAMESPACE   REVISION    UPDATED                                 STATUS      CHART           APP VERSION    \nnew-release         default     1           2022-11-21 15:41:25.51539 +0800 CST     deployed    milvus-3.2.18   2.1.4 \n","NAME                                             READY   STATUS      RESTARTS   AGE\nmy-release-etcd-0                               1/1     Running     0          21m\nmy-release-etcd-1                               1/1     Running     0          21m\nmy-release-etcd-2                               1/1     Running     0          21m\nmy-release-milvus-datacoord-664c58798d-fl75s    1/1     Running     0          21m\nmy-release-milvus-datanode-5f75686c55-xfg2r     1/1     Running     0          21m\nmy-release-milvus-indexcoord-5f98b97589-2l48r   1/1     Running     0          21m\nmy-release-milvus-indexnode-857b4ddf98-vmd75    1/1     Running     0          21m\nmy-release-milvus-proxy-6c548f787f-scspp        1/1     Running     0          21m\nmy-release-milvus-querycoord-c454f44cd-dwmwq    1/1     Running     0          21m\nmy-release-milvus-querynode-76bb4946d-lbrz6     1/1     Running     0          21m\nmy-release-milvus-rootcoord-7764c5b686-62msm    1/1     Running     0          21m\nmy-release-minio-0                              1/1     Running     0          21m\nmy-release-minio-1                              1/1     Running     0          21m\nmy-release-minio-2                              1/1     Running     0          21m\nmy-release-minio-3                              1/1     Running     0          21m\nmy-release-pulsar-bookie-0                      1/1     Running     0          21m\nmy-release-pulsar-bookie-1                      1/1     Running     0          21m\nmy-release-pulsar-bookie-2                      1/1     Running     0          21m\nmy-release-pulsar-bookie-init-tjxpj             0/1     Completed   0          21m\nmy-release-pulsar-broker-0                      1/1     Running     0          21m\nmy-release-pulsar-proxy-0                       1/1     Running     0          21m\nmy-release-pulsar-pulsar-init-c8vvc             0/1     Completed   0          21m\nmy-release-pulsar-recovery-0                    1/1     Running     0          21m\nmy-release-pulsar-zookeeper-0                   1/1     Running     0          21m\nmy-release-pulsar-zookeeper-1                   1/1     Running     0          20m\nmy-release-pulsar-zookeeper-2                   1/1     Running     0          20m\n","$ kubectl get pods my-release-milvus-proxy-6c548f787f-scspp -o=jsonpath='{$.spec.containers[0].image}'\n# milvusdb/milvus:v2.1.4\n","./migrate.sh -i my-release -s 2.1.4 -t 2.4.9\n","./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.9\n","./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.9 -r by-dev\n","./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.9 -r by-dev -w milvusdb/milvus:v2.4.9\n","./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.9 -w milvusdb/milvus:v2.4.9 -d true\n","./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.9 -r by-dev -o rollback -w milvusdb/milvus:v2.1.1\n./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.9 -r by-dev -o migrate -w milvusdb/milvus:v2.4.9\n"],"headingContent":"","anchorList":[{"label":"Actualizar Milvus Cluster con Helm Chart","href":"Upgrade-Milvus-Cluster-with-Helm-Chart","type":1,"isActive":false},{"label":"Comprobar Milvus Helm Chart","href":"Check-Milvus-Helm-Chart","type":2,"isActive":false},{"label":"Realizar una actualización continua","href":"Conduct-a-rolling-upgrade","type":2,"isActive":false},{"label":"Actualizar Milvus utilizando Helm","href":"Upgrade-Milvus-using-Helm","type":2,"isActive":false},{"label":"Migrar los metadatos","href":"Migrate-the-metadata","type":2,"isActive":false}]}
    \ No newline at end of file
    +{"codeList":["$ helm repo update\n$ helm search repo zilliztech/milvus --versions\n","helm repo add zilliztech https://zilliztech.github.io/milvus-helm\nhelm repo update\n# upgrade existing helm release\nhelm upgrade my-release zilliztech/milvus\n","NAME                    CHART VERSION   APP VERSION             DESCRIPTION                                       \nzilliztech/milvus       4.1.34          2.4.5                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.33          2.4.4                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.32          2.4.3                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.31          2.4.1                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.30          2.4.1                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.29          2.4.0                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.24          2.3.11                  Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.23          2.3.10                  Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.22          2.3.10                  Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.21          2.3.10                  Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.20          2.3.10                  Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.18          2.3.10                  Milvus is an open-source vector database built ... \nzilliztech/milvus       4.1.18          2.3.9                   Milvus is an open-source vector database built ...                                       \nzilliztech/milvus       4.1.17          2.3.8                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.16          2.3.7                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.15          2.3.5                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.14          2.3.6                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.13          2.3.5                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.12          2.3.5                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.11          2.3.4                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.10          2.3.3                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.9           2.3.3                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.8           2.3.2                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.7           2.3.2                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.6           2.3.1                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.5           2.3.1                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.4           2.3.1                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.3           2.3.1                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.2           2.3.1                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.1           2.3.0                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.0           2.3.0                   Milvus is an open-source vector database built ...\n","sh rollingUpdate.sh -n default -i my-release -o update -t 2.4.13-hotfix -w 'milvusdb/milvus:v2.4.13-hotfix'\n","helm repo update\nhelm upgrade my-release zilliztech/milvus --reuse-values --version=4.1.24 # use the helm chart version here\n","NAME                NAMESPACE   REVISION    UPDATED                                 STATUS      CHART           APP VERSION    \nnew-release         default     1           2022-11-21 15:41:25.51539 +0800 CST     deployed    milvus-3.2.18   2.1.4 \n","NAME                                             READY   STATUS      RESTARTS   AGE\nmy-release-etcd-0                               1/1     Running     0          21m\nmy-release-etcd-1                               1/1     Running     0          21m\nmy-release-etcd-2                               1/1     Running     0          21m\nmy-release-milvus-datacoord-664c58798d-fl75s    1/1     Running     0          21m\nmy-release-milvus-datanode-5f75686c55-xfg2r     1/1     Running     0          21m\nmy-release-milvus-indexcoord-5f98b97589-2l48r   1/1     Running     0          21m\nmy-release-milvus-indexnode-857b4ddf98-vmd75    1/1     Running     0          21m\nmy-release-milvus-proxy-6c548f787f-scspp        1/1     Running     0          21m\nmy-release-milvus-querycoord-c454f44cd-dwmwq    1/1     Running     0          21m\nmy-release-milvus-querynode-76bb4946d-lbrz6     1/1     Running     0          21m\nmy-release-milvus-rootcoord-7764c5b686-62msm    1/1     Running     0          21m\nmy-release-minio-0                              1/1     Running     0          21m\nmy-release-minio-1                              1/1     Running     0          21m\nmy-release-minio-2                              1/1     Running     0          21m\nmy-release-minio-3                              1/1     Running     0          21m\nmy-release-pulsar-bookie-0                      1/1     Running     0          21m\nmy-release-pulsar-bookie-1                      1/1     Running     0          21m\nmy-release-pulsar-bookie-2                      1/1     Running     0          21m\nmy-release-pulsar-bookie-init-tjxpj             0/1     Completed   0          21m\nmy-release-pulsar-broker-0                      1/1     Running     0          21m\nmy-release-pulsar-proxy-0                       1/1     Running     0          21m\nmy-release-pulsar-pulsar-init-c8vvc             0/1     Completed   0          21m\nmy-release-pulsar-recovery-0                    1/1     Running     0          21m\nmy-release-pulsar-zookeeper-0                   1/1     Running     0          21m\nmy-release-pulsar-zookeeper-1                   1/1     Running     0          20m\nmy-release-pulsar-zookeeper-2                   1/1     Running     0          20m\n","$ kubectl get pods my-release-milvus-proxy-6c548f787f-scspp -o=jsonpath='{$.spec.containers[0].image}'\n# milvusdb/milvus:v2.1.4\n","./migrate.sh -i my-release -s 2.1.4 -t 2.4.13-hotfix\n","./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.13-hotfix\n","./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.13-hotfix -r by-dev\n","./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.13-hotfix -r by-dev -w milvusdb/milvus:v2.4.13-hotfix\n","./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.13-hotfix -w milvusdb/milvus:v2.4.13-hotfix -d true\n","./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.13-hotfix -r by-dev -o rollback -w milvusdb/milvus:v2.1.1\n./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.13-hotfix -r by-dev -o migrate -w milvusdb/milvus:v2.4.13-hotfix\n"],"headingContent":"Upgrade Milvus Cluster with Helm Chart","anchorList":[{"label":"Actualizar Milvus Cluster con Helm Chart","href":"Upgrade-Milvus-Cluster-with-Helm-Chart","type":1,"isActive":false},{"label":"Comprobar Milvus Helm Chart","href":"Check-Milvus-Helm-Chart","type":2,"isActive":false},{"label":"Realizar una actualización continua","href":"Conduct-a-rolling-upgrade","type":2,"isActive":false},{"label":"Actualizar Milvus usando Helm","href":"Upgrade-Milvus-using-Helm","type":2,"isActive":false},{"label":"Migrar los metadatos","href":"Migrate-the-metadata","type":2,"isActive":false}]}
    \ No newline at end of file
    diff --git a/localization/v2.4.x/site/es/adminGuide/upgrade_milvus_cluster-helm.md b/localization/v2.4.x/site/es/adminGuide/upgrade_milvus_cluster-helm.md
    index e6a7adf1d..72df28634 100644
    --- a/localization/v2.4.x/site/es/adminGuide/upgrade_milvus_cluster-helm.md
    +++ b/localization/v2.4.x/site/es/adminGuide/upgrade_milvus_cluster-helm.md
    @@ -86,10 +86,10 @@ zilliztech/milvus       4.1.1           2.3.0                   Milvus is an ope
     zilliztech/milvus       4.1.0           2.3.0                   Milvus is an open-source vector database built ...
     

    Puede elegir la ruta de actualización para su Milvus de la siguiente manera:

    -
    - Realice una actualización continua](#conduct-a-rolling-upgrade) de Milvus v2.2.3 y versiones posteriores a v2.4.9.
    +
    - Realice una actualización continua](#conduct-a-rolling-upgrade) de Milvus v2.2.3 y versiones posteriores a v2.4.13-hotfix.

    Realizar una actualización continua

    Desde Milvus 2.2.3, puede configurar los coordinadores de Milvus para que funcionen en modo activo-espera y habilitar la función de actualización continua para ellos, de modo que Milvus pueda responder a las solicitudes entrantes durante las actualizaciones de los coordinadores. En versiones anteriores, los coordinadores debían eliminarse y luego crearse durante una actualización, lo que podía introducir cierto tiempo de inactividad del servicio.

    +

    Desde Milvus 2.2.3, puede configurar los coordinadores de Milvus para que funcionen en modo activo-espera y habilitar la función de actualización continua para ellos, de forma que Milvus pueda responder a las solicitudes entrantes durante las actualizaciones de los coordinadores. En versiones anteriores, los coordinadores debían eliminarse y luego crearse durante una actualización, lo que podía introducir cierto tiempo de inactividad del servicio.

    Las actualizaciones continuas requieren que los coordinadores trabajen en modo activo-espera. Puede utilizar el script que le proporcionamos para configurar los coordinadores para que trabajen en modo de espera activo e iniciar la actualización continua.

    Basándose en las capacidades de actualización continua proporcionadas por Kubernetes, el script anterior impone una actualización ordenada de los despliegues según sus dependencias. Además, Milvus implementa un mecanismo para garantizar que sus componentes sigan siendo compatibles con aquellos que dependen de ellos durante la actualización, lo que reduce significativamente el posible tiempo de inactividad del servicio.

    El script sólo se aplica a la actualización de Milvus instalado con Helm. La siguiente tabla enumera los indicadores de comando disponibles en los scripts.

    @@ -123,18 +123,18 @@ zilliztech/milvus 4.1.0 2.3.0 Milvus is an ope oOperaciónupdateFalso -

    Una vez que se haya asegurado de que todos los despliegues en su instancia de Milvus están en su estado normal. Puede ejecutar el siguiente comando para actualizar la instancia de Milvus a 2.4.9.

    -
    sh rollingUpdate.sh -n default -i my-release -o update -t 2.4.9 -w 'milvusdb/milvus:v2.4.9'
    +

    Una vez que se haya asegurado de que todos los despliegues de su instancia de Milvus están en su estado normal. Puede ejecutar el siguiente comando para actualizar la instancia de Milvus a 2.4.13-hotfix.

    +
    sh rollingUpdate.sh -n default -i my-release -o update -t 2.4.13-hotfix -w 'milvusdb/milvus:v2.4.13-hotfix'
     
    1. El script codifica el orden de actualización de los despliegues y no puede cambiarse.
    2. -
    3. El script utiliza kubectl patch para actualizar los despliegues y kubectl rollout status para observar su estado.
    4. +
    5. El script utiliza kubectl patch para actualizar los despliegues y kubectl rollout status para ver su estado.
    6. El script utiliza kubectl patch para actualizar la etiqueta app.kubernetes.io/version de los despliegues a la especificada después de la bandera -t en el comando.
    -

    Actualizar Milvus utilizando Helm
      @@ -135,7 +135,7 @@ zilliztech/milvus 4.1.0 2.3.0 Milvus is an ope
    -

    Actualizar Milvus utilizando Helm

  • Inicie los componentes de Milvus con una nueva imagen.
  • -

    2. Actualizar Milvus de v2.1.x a 2.4.9

    Los siguientes comandos asumen que usted actualiza Milvus de v2.1.4 a 2.4.9. Cámbielos por las versiones que se ajusten a sus necesidades.

    +

    2. Actualice Milvus de v2.1.x a 2.4.13-hotfix

    Los siguientes comandos asumen que usted actualiza Milvus de v2.1.4 a 2.4.13-hotfix. Cámbielos por las versiones que se ajusten a sus necesidades.

      -
    1. Especifique el nombre de la instancia de Milvus, la versión de Milvus de origen y la versión de Milvus de destino.

      -
      ./migrate.sh -i my-release -s 2.1.4 -t 2.4.9
      +
    2. Especifique el nombre de instancia de Milvus, la versión de Milvus de origen y la versión de Milvus de destino.

      +
      ./migrate.sh -i my-release -s 2.1.4 -t 2.4.13-hotfix
       
    3. Especifique el espacio de nombres con -n si su Milvus no está instalado en el espacio de nombres predeterminado de K8s.

      -
      ./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.9
      +
      ./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.13-hotfix
       
    4. Especifique la ruta raíz con -r si su Milvus está instalado con la costumbre rootpath.

      -
      ./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.9 -r by-dev
      +
      ./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.13-hotfix -r by-dev
       
    5. Especifique la etiqueta de imagen con -w si su Milvus está instalado con la personalizada image.

      -
      ./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.9 -r by-dev -w milvusdb/milvus:v2.4.9
      +
      ./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.13-hotfix -r by-dev -w milvusdb/milvus:v2.4.13-hotfix
       
    6. Establezca -d true si desea eliminar automáticamente el pod de migración una vez finalizada la migración.

      -
      ./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.9 -w milvusdb/milvus:v2.4.9 -d true
      +
      ./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.13-hotfix -w milvusdb/milvus:v2.4.13-hotfix -d true
       
    7. Retroceda y migre de nuevo si la migración falla.

      -
      ./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.9 -r by-dev -o rollback -w milvusdb/milvus:v2.1.1
      -./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.9 -r by-dev -o migrate -w milvusdb/milvus:v2.4.9
      +
      ./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.13-hotfix -r by-dev -o rollback -w milvusdb/milvus:v2.1.1
      +./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.13-hotfix -r by-dev -o migrate -w milvusdb/milvus:v2.4.13-hotfix
       
    diff --git a/localization/v2.4.x/site/es/adminGuide/upgrade_milvus_standalone-operator.json b/localization/v2.4.x/site/es/adminGuide/upgrade_milvus_standalone-operator.json index 700189a6f..878d07ed3 100644 --- a/localization/v2.4.x/site/es/adminGuide/upgrade_milvus_standalone-operator.json +++ b/localization/v2.4.x/site/es/adminGuide/upgrade_milvus_standalone-operator.json @@ -1 +1 @@ -{"codeList":["helm repo add zilliztech-milvus-operator https://zilliztech.github.io/milvus-operator/\nhelm repo update zilliztech-milvus-operator\nhelm -n milvus-operator upgrade milvus-operator zilliztech-milvus-operator/milvus-operator\n","apiVersion: milvus.io/v1beta1\nkind: Milvus\nmetadata:\n name: my-release\nspec:\n components:\n enableRollingUpdate: true\n imageUpdateMode: rollingUpgrade # Default value, can be omitted\n image: milvusdb/milvus:v2.4.9\n","apiVersion: milvus.io/v1beta1\nkind: Milvus\nmetadata:\n name: my-release\nspec:\n components:\n enableRollingUpdate: true\n imageUpdateMode: all\n image: milvusdb/milvus:v2.4.9\n","apiVersion: milvus.io/v1beta1\nkind: Milvus\nmetadata:\n name: my-release\nspec:\n components:\n enableRollingUpdate: true\n imageUpdateMode: rollingDowngrade\n image: milvusdb/milvus:\n","kubectl apply -f milvusupgrade.yml\n","apiVersion: milvus.io/v1beta1\nkind: Milvus\nmetadata:\n name: my-release\nlabels:\n app: milvus\nspec:\n # Omit other fields ...\n components:\n image: milvusdb/milvus:v2.4.9\n","kubectl apply -f milvusupgrade.yaml\n","apiVersion: milvus.io/v1beta1\nkind: MilvusUpgrade\nmetadata:\n name: my-release-upgrade\nspec:\n milvus:\n namespace: default\n name: my-release\n sourceVersion: \"v2.1.4\"\n targetVersion: \"v2.4.9\"\n # below are some omit default values:\n # targetImage: \"milvusdb/milvus:v2.4.9\"\n # toolImage: \"milvusdb/meta-migration:v2.2.0\"\n # operation: upgrade\n # rollbackIfFailed: true\n # backupPVC: \"\"\n # maxRetry: 3\n","$ kubectl apply -f https://raw.githubusercontent.com/zilliztech/milvus-operator/main/config/samples/milvusupgrade.yaml\n","kubectl describe milvus release-name\n"],"headingContent":"","anchorList":[{"label":"Actualizar Milvus Standalone con Milvus Operator","href":"Upgrade-Milvus-Standalone-with-Milvus-Operator","type":1,"isActive":false},{"label":"Actualice su operador Milvus","href":"Upgrade-your-Milvus-operator","type":2,"isActive":false},{"label":"Realizar una actualización continua","href":"Conduct-a-rolling-upgrade","type":2,"isActive":false},{"label":"Actualizar Milvus cambiando su imagen","href":"Upgrade-Milvus-by-changing-its-image","type":2,"isActive":false},{"label":"Migrar los metadatos","href":"Migrate-the-metadata","type":2,"isActive":false}]} \ No newline at end of file +{"codeList":["helm repo add zilliztech-milvus-operator https://zilliztech.github.io/milvus-operator/\nhelm repo update zilliztech-milvus-operator\nhelm -n milvus-operator upgrade milvus-operator zilliztech-milvus-operator/milvus-operator\n","apiVersion: milvus.io/v1beta1\nkind: Milvus\nmetadata:\n name: my-release\nspec:\n components:\n enableRollingUpdate: true\n imageUpdateMode: rollingUpgrade # Default value, can be omitted\n image: milvusdb/milvus:v2.4.13-hotfix\n","apiVersion: milvus.io/v1beta1\nkind: Milvus\nmetadata:\n name: my-release\nspec:\n components:\n enableRollingUpdate: true\n imageUpdateMode: all\n image: milvusdb/milvus:v2.4.13-hotfix\n","apiVersion: milvus.io/v1beta1\nkind: Milvus\nmetadata:\n name: my-release\nspec:\n components:\n enableRollingUpdate: true\n imageUpdateMode: rollingDowngrade\n image: milvusdb/milvus:\n","kubectl apply -f milvusupgrade.yml\n","apiVersion: milvus.io/v1beta1\nkind: Milvus\nmetadata:\n name: my-release\nlabels:\n app: milvus\nspec:\n # Omit other fields ...\n components:\n image: milvusdb/milvus:v2.4.13-hotfix\n","kubectl apply -f milvusupgrade.yaml\n","apiVersion: milvus.io/v1beta1\nkind: MilvusUpgrade\nmetadata:\n name: my-release-upgrade\nspec:\n milvus:\n namespace: default\n name: my-release\n sourceVersion: \"v2.1.4\"\n targetVersion: \"v2.4.13-hotfix\"\n # below are some omit default values:\n # targetImage: \"milvusdb/milvus:v2.4.13-hotfix\"\n # toolImage: \"milvusdb/meta-migration:v2.2.0\"\n # operation: upgrade\n # rollbackIfFailed: true\n # backupPVC: \"\"\n # maxRetry: 3\n","$ kubectl apply -f https://raw.githubusercontent.com/zilliztech/milvus-operator/main/config/samples/milvusupgrade.yaml\n","kubectl describe milvus release-name\n"],"headingContent":"Upgrade Milvus Standalone with Milvus Operator","anchorList":[{"label":"Actualizar Milvus Standalone con Milvus Operator","href":"Upgrade-Milvus-Standalone-with-Milvus-Operator","type":1,"isActive":false},{"label":"Actualice su operador Milvus","href":"Upgrade-your-Milvus-operator","type":2,"isActive":false},{"label":"Realizar una actualización continua","href":"Conduct-a-rolling-upgrade","type":2,"isActive":false},{"label":"Actualizar Milvus cambiando su imagen","href":"Upgrade-Milvus-by-changing-its-image","type":2,"isActive":false},{"label":"Migrar los metadatos","href":"Migrate-the-metadata","type":2,"isActive":false}]} \ No newline at end of file diff --git a/localization/v2.4.x/site/es/adminGuide/upgrade_milvus_standalone-operator.md b/localization/v2.4.x/site/es/adminGuide/upgrade_milvus_standalone-operator.md index e9a8ece85..83e3409ea 100644 --- a/localization/v2.4.x/site/es/adminGuide/upgrade_milvus_standalone-operator.md +++ b/localization/v2.4.x/site/es/adminGuide/upgrade_milvus_standalone-operator.md @@ -46,9 +46,9 @@ helm -n milvus-operator upgrade milvus-

    Una vez que haya actualizado su operador Milvus a la última versión, tiene las siguientes opciones:

    Realizar una actualización continua

    Desde Milvus 2.2.3, puede configurar los coordinadores de Milvus para que funcionen en modo activo-espera y activar la función de actualización continua para ellos, de modo que Milvus pueda responder a las solicitudes entrantes durante las actualizaciones de los coordinadores. En versiones anteriores, los coordinadores debían eliminarse y luego crearse durante una actualización, lo que podía introducir cierto tiempo de inactividad del servicio.

    -

    Basándose en las capacidades de actualización continua proporcionadas por Kubernetes, el operador Milvus aplica una actualización ordenada de los despliegues en función de sus dependencias. Además, Milvus implementa un mecanismo para garantizar que sus componentes sigan siendo compatibles con aquellos que dependen de ellos durante la actualización, lo que reduce significativamente el posible tiempo de inactividad del servicio.

    -

    La función de actualización continua está desactivada por defecto. Es necesario habilitarla explícitamente a través de un fichero de configuración.

    +

    Desde Milvus 2.2.3, puede configurar los coordinadores de Milvus para que funcionen en modo activo-espera y habilitar la función de actualización continua para ellos, de forma que Milvus pueda responder a las peticiones entrantes durante las actualizaciones de los coordinadores. En versiones anteriores, los coordinadores debían eliminarse y luego crearse durante una actualización, lo que podía introducir cierto tiempo de inactividad del servicio.

    +

    Basándose en las capacidades de actualización continua proporcionadas por Kubernetes, el operador Milvus aplica una actualización ordenada de los despliegues según sus dependencias. Además, Milvus implementa un mecanismo para garantizar que sus componentes sigan siendo compatibles con aquellos que dependen de ellos durante la actualización, lo que reduce significativamente el posible tiempo de inactividad del servicio.

    +

    La función de actualización continua está desactivada por defecto. Es necesario habilitarla explícitamente a través de un archivo de configuración.

    apiVersion: milvus.io/v1beta1
     kind: Milvus
     metadata:
    @@ -76,7 +76,7 @@ spec:
       components:
         enableRollingUpdate: true
         imageUpdateMode: rollingUpgrade # Default value, can be omitted
    -    image: milvusdb/milvus:v2.4.9
    +    image: milvusdb/milvus:v2.4.13-hotfix
     

    En el archivo de configuración anterior, establezca spec.components.enableRollingUpdate en true y spec.components.image en la versión de Milvus deseada.

    Por defecto, Milvus realiza una actualización continua para los coordinadores de forma ordenada, en la que sustituye las imágenes del pod de coordinador una tras otra. Para reducir el tiempo de actualización, considere configurar spec.components.imageUpdateMode a all para que Milvus reemplace todas las imágenes de pod al mismo tiempo.

    @@ -88,7 +88,7 @@ spec: components: enableRollingUpdate: true imageUpdateMode: all - image: milvusdb/milvus:v2.4.9 + image: milvusdb/milvus:v2.4.13-hotfix

    Puede configurar spec.components.imageUpdateMode a rollingDowngrade para que Milvus reemplace las imágenes de pod coordinador con una versión inferior.

    apiVersion: milvus.io/v1beta1
    @@ -130,7 +130,7 @@ labels:
     spec:
       # Omit other fields ...
       components:
    -   image: milvusdb/milvus:v2.4.9
    +   image: milvusdb/milvus:v2.4.13-hotfix
     

    A continuación, ejecute lo siguiente para realizar la actualización:

    kubectl apply -f milvusupgrade.yaml
    @@ -150,8 +150,8 @@ spec:
               d="M4 9h1v1H4c-1.5 0-3-1.69-3-3.5S2.55 3 4 3h4c1.45 0 3 1.69 3 3.5 0 1.41-.91 2.72-2 3.25V8.59c.58-.45 1-1.27 1-2.09C10 5.22 8.98 4 8 4H4c-.98 0-2 1.22-2 2.5S3 9 4 9zm9-3h-1v1h1c1 0 2 1.22 2 2.5S13.98 12 13 12H9c-.98 0-2-1.22-2-2.5 0-.83.42-1.64 1-2.09V6.25c-1.09.53-2 1.84-2 3.25C6 11.31 7.55 13 9 13h4c1.45 0 3-1.69 3-3.5S14.5 6 13 6z"
             >
           
    -    

    Desde Milvus 2.2.0, los metadatos son incompatibles con los de versiones anteriores. Los siguientes ejemplos suponen una actualización de Milvus 2.1.4 a Milvus v2.4.9.

    -

    1. Crear un archivo .yaml para la migración de metadatos

    Cree un archivo de migración de metadatos. El siguiente es un ejemplo. Debe especificar name, sourceVersion y targetVersion en el archivo de configuración. El siguiente ejemplo establece name en my-release-upgrade, sourceVersion en v2.1.4 y targetVersion en v2.4.9. Esto significa que su instancia de Milvus se actualizará de v2.1.4 a v2.4.9.

    +

    Desde Milvus 2.2.0, los metadatos son incompatibles con los de versiones anteriores. Los siguientes ejemplos suponen una actualización de Milvus 2.1.4 a Milvus v2.4.13-hotfix.

    +

    1. Crear un archivo .yaml para la migración de metadatos

    Cree un archivo de migración de metadatos. El siguiente es un ejemplo. Debe especificar name, sourceVersion y targetVersion en el archivo de configuración. El siguiente ejemplo establece name en my-release-upgrade, sourceVersion en v2.1.4 y targetVersion en v2.4.13-hotfix. Esto significa que su instancia de Milvus se actualizará de v2.1.4 a v2.4.13-hotfix.

    apiVersion: milvus.io/v1beta1
     kind: MilvusUpgrade
     metadata:
    @@ -161,9 +161,9 @@ spec:
         namespace: default
         name: my-release
       sourceVersion: "v2.1.4"
    -  targetVersion: "v2.4.9"
    +  targetVersion: "v2.4.13-hotfix"
       # below are some omit default values:
    -  # targetImage: "milvusdb/milvus:v2.4.9"
    +  # targetImage: "milvusdb/milvus:v2.4.13-hotfix"
       # toolImage: "milvusdb/meta-migration:v2.2.0"
       # operation: upgrade
       # rollbackIfFailed: true
    @@ -173,7 +173,7 @@ spec:
     

    2. Aplique la nueva configuración

    Ejecute el siguiente comando para aplicar la nueva configuración.

    $ kubectl apply -f https://raw.githubusercontent.com/zilliztech/milvus-operator/main/config/samples/milvusupgrade.yaml
     
    -

    3. Compruebe el estado de la migración de metadatos

    Ejecute el siguiente comando para comprobar el estado de la migración de metadatos.

    +

    3. 3. Compruebe el estado de la migración de metadatos

    Ejecute el siguiente comando para comprobar el estado de la migración de metadatos.

    kubectl describe milvus release-name
     

    El estado de ready en la salida significa que la migración de metadatos se ha realizado correctamente.

    diff --git a/localization/v2.4.x/site/es/embeddings/embed-with-cohere.json b/localization/v2.4.x/site/es/embeddings/embed-with-cohere.json index 40401b602..3ce35a476 100644 --- a/localization/v2.4.x/site/es/embeddings/embed-with-cohere.json +++ b/localization/v2.4.x/site/es/embeddings/embed-with-cohere.json @@ -1 +1 @@ -{"codeList":["pip install --upgrade pymilvus\npip install \"pymilvus[model]\"\n","cohere_ef = CohereEmbeddingFunction(\n model_name=\"embed-english-light-v3.0\",\n api_key=\"YOUR_COHERE_API_KEY\",\n input_type=\"search_document\",\n embedding_types=[\"float\"]\n)\n","docs = [\n \"Artificial intelligence was founded as an academic discipline in 1956.\",\n \"Alan Turing was the first person to conduct substantial research in AI.\",\n \"Born in Maida Vale, London, Turing was raised in southern England.\",\n]\n\ndocs_embeddings = cohere_ef.encode_documents(docs)\n\n# Print embeddings\nprint(\"Embeddings:\", docs_embeddings)\n# Print dimension and shape of embeddings\nprint(\"Dim:\", cohere_ef.dim, docs_embeddings[0].shape)\n","Embeddings: [array([ 3.43322754e-02, 1.16252899e-03, -5.25207520e-02, 1.32846832e-03,\n -6.80541992e-02, 6.10961914e-02, -7.06176758e-02, 1.48925781e-01,\n 1.54174805e-01, 1.98516846e-02, 2.43835449e-02, 3.55224609e-02,\n 1.82952881e-02, 7.57446289e-02, -2.40783691e-02, 4.40063477e-02,\n...\n 0.06359863, -0.01971436, -0.02253723, 0.00354195, 0.00222015,\n 0.00184727, 0.03408813, -0.00777817, 0.04919434, 0.01519775,\n -0.02862549, 0.04760742, -0.07891846, 0.0124054 ], dtype=float32)]\nDim: 384 (384,)\n","queries = [\"When was artificial intelligence founded\", \n \"Where was Alan Turing born?\"]\n\nquery_embeddings = cohere_ef.encode_queries(queries)\n\nprint(\"Embeddings:\", query_embeddings)\nprint(\"Dim\", cohere_ef.dim, query_embeddings[0].shape)\n","Embeddings: [array([-1.33361816e-02, 9.79423523e-04, -7.28759766e-02, -1.93786621e-02,\n -9.71679688e-02, 4.34875488e-02, -9.81445312e-02, 1.16882324e-01,\n 5.89904785e-02, -4.19921875e-02, 4.95910645e-02, 5.83496094e-02,\n 3.47595215e-02, -5.87463379e-03, -7.30514526e-03, 2.92816162e-02,\n...\n 0.00749969, -0.01192474, 0.02719116, 0.03347778, 0.07696533,\n 0.01409149, 0.00964355, -0.01681519, -0.0073204 , 0.00043154,\n -0.04577637, 0.03591919, -0.02807617, -0.04812622], dtype=float32)]\nDim 384 (384,)\n"],"headingContent":"","anchorList":[{"label":"Cohere","href":"Cohere","type":1,"isActive":false}]} \ No newline at end of file +{"codeList":["pip install --upgrade pymilvus\npip install \"pymilvus[model]\"\n","from pymilvus.model.dense import CohereEmbeddingFunction\n\ncohere_ef = CohereEmbeddingFunction(\n model_name=\"embed-english-light-v3.0\",\n api_key=\"YOUR_COHERE_API_KEY\",\n input_type=\"search_document\",\n embedding_types=[\"float\"]\n)\n","docs = [\n \"Artificial intelligence was founded as an academic discipline in 1956.\",\n \"Alan Turing was the first person to conduct substantial research in AI.\",\n \"Born in Maida Vale, London, Turing was raised in southern England.\",\n]\n\ndocs_embeddings = cohere_ef.encode_documents(docs)\n\n# Print embeddings\nprint(\"Embeddings:\", docs_embeddings)\n# Print dimension and shape of embeddings\nprint(\"Dim:\", cohere_ef.dim, docs_embeddings[0].shape)\n","Embeddings: [array([ 3.43322754e-02, 1.16252899e-03, -5.25207520e-02, 1.32846832e-03,\n -6.80541992e-02, 6.10961914e-02, -7.06176758e-02, 1.48925781e-01,\n 1.54174805e-01, 1.98516846e-02, 2.43835449e-02, 3.55224609e-02,\n 1.82952881e-02, 7.57446289e-02, -2.40783691e-02, 4.40063477e-02,\n...\n 0.06359863, -0.01971436, -0.02253723, 0.00354195, 0.00222015,\n 0.00184727, 0.03408813, -0.00777817, 0.04919434, 0.01519775,\n -0.02862549, 0.04760742, -0.07891846, 0.0124054 ], dtype=float32)]\nDim: 384 (384,)\n","queries = [\"When was artificial intelligence founded\", \n \"Where was Alan Turing born?\"]\n\nquery_embeddings = cohere_ef.encode_queries(queries)\n\nprint(\"Embeddings:\", query_embeddings)\nprint(\"Dim\", cohere_ef.dim, query_embeddings[0].shape)\n","Embeddings: [array([-1.33361816e-02, 9.79423523e-04, -7.28759766e-02, -1.93786621e-02,\n -9.71679688e-02, 4.34875488e-02, -9.81445312e-02, 1.16882324e-01,\n 5.89904785e-02, -4.19921875e-02, 4.95910645e-02, 5.83496094e-02,\n 3.47595215e-02, -5.87463379e-03, -7.30514526e-03, 2.92816162e-02,\n...\n 0.00749969, -0.01192474, 0.02719116, 0.03347778, 0.07696533,\n 0.01409149, 0.00964355, -0.01681519, -0.0073204 , 0.00043154,\n -0.04577637, 0.03591919, -0.02807617, -0.04812622], dtype=float32)]\nDim 384 (384,)\n"],"headingContent":"Cohere","anchorList":[{"label":"Cohere","href":"Cohere","type":1,"isActive":false}]} \ No newline at end of file diff --git a/localization/v2.4.x/site/es/embeddings/embed-with-cohere.md b/localization/v2.4.x/site/es/embeddings/embed-with-cohere.md index ce316690e..dcdc2d8ee 100644 --- a/localization/v2.4.x/site/es/embeddings/embed-with-cohere.md +++ b/localization/v2.4.x/site/es/embeddings/embed-with-cohere.md @@ -28,7 +28,9 @@ title: Incrustar Cohere pip install "pymilvus[model]"

    A continuación, instancie la clase CohereEmbeddingFunction:

    -
    cohere_ef = CohereEmbeddingFunction(
    +
    from pymilvus.model.dense import CohereEmbeddingFunction
    +
    +cohere_ef = CohereEmbeddingFunction(
         model_name="embed-english-light-v3.0",
         api_key="YOUR_COHERE_API_KEY",
         input_type="search_document",
    diff --git a/localization/v2.4.x/site/es/embeddings/embed-with-jina.json b/localization/v2.4.x/site/es/embeddings/embed-with-jina.json
    index 3ba04b590..4cee744ff 100644
    --- a/localization/v2.4.x/site/es/embeddings/embed-with-jina.json
    +++ b/localization/v2.4.x/site/es/embeddings/embed-with-jina.json
    @@ -1 +1 @@
    -{"codeList":["pip install --upgrade pymilvus\npip install \"pymilvus[model]\"\n","from pymilvus.model.dense import JinaEmbeddingFunction\n\njina_ef = JinaEmbeddingFunction(\n    model_name=\"jina-embeddings-v2-base-en\", # Defaults to `jina-embeddings-v2-base-en`\n    api_key=JINAAI_API_KEY # Provide your Jina AI API key\n)\n","docs = [\n    \"Artificial intelligence was founded as an academic discipline in 1956.\",\n    \"Alan Turing was the first person to conduct substantial research in AI.\",\n    \"Born in Maida Vale, London, Turing was raised in southern England.\",\n]\n\ndocs_embeddings = jina_ef.encode_documents(docs)\n\n# Print embeddings\nprint(\"Embeddings:\", docs_embeddings)\n# Print dimension and shape of embeddings\nprint(\"Dim:\", jina_ef.dim, docs_embeddings[0].shape)\n","Embeddings: [array([-4.88487840e-01, -4.28095880e-01,  4.90086500e-01, -1.63274320e-01,\n        3.43437800e-01,  3.21476880e-01,  2.83173790e-02, -3.10403670e-01,\n        4.76985040e-01, -1.77410420e-01, -3.84803180e-01, -2.19224200e-01,\n       -2.52898000e-01,  6.62411900e-02, -8.58173100e-01,  1.05221800e+00,\n...\n       -2.04462400e-01,  7.14229800e-01, -1.66823000e-01,  8.72551440e-01,\n        5.53560140e-01,  8.92506300e-01, -2.39408610e-01, -4.22413560e-01,\n       -3.19551350e-01,  5.59153850e-01,  2.44338100e-01, -8.60452100e-01])]\nDim: 768 (768,)\n","queries = [\"When was artificial intelligence founded\", \n           \"Where was Alan Turing born?\"]\n\nquery_embeddings = jina_ef.encode_queries(queries)\n\nprint(\"Embeddings:\", query_embeddings)\nprint(\"Dim\", jina_ef.dim, query_embeddings[0].shape)\n","Embeddings: [array([-5.99164660e-01, -3.49827350e-01,  8.22405160e-01, -1.18632730e-01,\n        5.78107540e-01,  1.09789170e-01,  2.91604200e-01, -3.29306450e-01,\n        2.93779640e-01, -2.17880800e-01, -6.84535440e-01, -3.79752000e-01,\n       -3.47541800e-01,  9.20846100e-02, -6.13804400e-01,  6.31312800e-01,\n...\n       -1.84993740e-02,  9.38629150e-01,  2.74858470e-02,  1.09396360e+00,\n        3.96270750e-01,  7.44445800e-01, -1.95404050e-01, -6.08383200e-01,\n       -3.75076300e-01,  3.87512200e-01,  8.11889650e-01, -3.76407620e-01])]\nDim 768 (768,)\n"],"headingContent":"","anchorList":[{"label":"Jina AI","href":"Jina-AI","type":1,"isActive":false}]}
    \ No newline at end of file
    +{"codeList":["pip install --upgrade pymilvus\npip install \"pymilvus[model]\"\n","from pymilvus.model.dense import JinaEmbeddingFunction\n\njina_ef = JinaEmbeddingFunction(\n    model_name=\"jina-embeddings-v3\", # Defaults to `jina-embeddings-v3`\n    api_key=JINAAI_API_KEY, # Provide your Jina AI API key\n    task=\"retrieval.passage\", # Specify the task\n    dimensions=1024, # Defaults to 1024\n)\n","\n```python\ndocs = [\n    \"Artificial intelligence was founded as an academic discipline in 1956.\",\n    \"Alan Turing was the first person to conduct substantial research in AI.\",\n    \"Born in Maida Vale, London, Turing was raised in southern England.\",\n]\n\ndocs_embeddings = jina_ef.encode_documents(docs)\n\n# Print embeddings\nprint(\"Embeddings:\", docs_embeddings)\n# Print dimension and shape of embeddings\nprint(\"Dim:\", jina_ef.dim, docs_embeddings[0].shape)\n","Embeddings: [array([9.80641991e-02, -8.51697400e-02,  7.36531913e-02,  1.42558888e-02,\n       -2.23589484e-02,  1.68494112e-03, -3.50753777e-02, -3.11530549e-02,\n       -3.26012149e-02,  5.04568312e-03,  3.69836427e-02,  3.48948985e-02,\n        8.19722563e-03,  5.88679723e-02, -6.71099266e-03, -1.82369724e-02,\n...\n        2.48654783e-02,  3.43279652e-02, -1.66154150e-02, -9.90478322e-03,\n       -2.96043139e-03, -8.57473817e-03, -7.39028037e-04,  6.25024503e-03,\n       -1.08831357e-02, -4.00776342e-02,  3.25369164e-02, -1.42691191e-03])]\nDim: 1024 (1024,)\n","queries = [\"When was artificial intelligence founded\", \n           \"Where was Alan Turing born?\"]\n\nquery_embeddings = jina_ef.encode_queries(queries)\n\nprint(\"Embeddings:\", query_embeddings)\nprint(\"Dim\", jina_ef.dim, query_embeddings[0].shape)\n","Embeddings: [array([8.79201014e-03,  1.47551354e-02,  4.02722731e-02, -2.52991207e-02,\n        1.12719582e-02,  3.75947170e-02,  3.97946090e-02, -7.36681819e-02,\n       -2.17952449e-02, -1.16298944e-02, -6.83426252e-03, -5.12507409e-02,\n        5.26071340e-02,  6.75181448e-02,  3.92445624e-02, -1.40817231e-02,\n...\n        8.81703943e-03,  4.24629413e-02, -2.32944116e-02, -2.05193572e-02,\n       -3.22035812e-02,  2.81896023e-03,  3.85326855e-02,  3.64372656e-02,\n       -1.65050142e-02, -4.26847413e-02,  2.02664156e-02, -1.72684863e-02])]\nDim 1024 (1024,)\n","from pymilvus.model.dense import JinaEmbeddingFunction\n\njina_ef = JinaEmbeddingFunction(\n    model_name=\"jina-embeddings-v3\", # Defaults to `jina-embeddings-v3`\n    api_key=JINA_API_KEY, # Provide your Jina AI API key\n    task=\"text-matching\",\n    dimensions=1024, # Defaults to 1024\n)\n\ntexts = [\n    \"Follow the white rabbit.\",  # English\n    \"Sigue al conejo blanco.\",  # Spanish\n    \"Suis le lapin blanc.\",  # French\n    \"跟着白兔走。\",  # Chinese\n    \"اتبع الأرنب الأبيض.\",  # Arabic\n    \"Folge dem weißen Kaninchen.\",  # German\n]\n\nembeddings = jina_ef(texts)\n\n# Compute similarities\nprint(embeddings[0] @ embeddings[1].T)\n"],"headingContent":"Jina AI","anchorList":[{"label":"Jina AI","href":"Jina-AI","type":1,"isActive":false}]}
    \ No newline at end of file
    diff --git a/localization/v2.4.x/site/es/embeddings/embed-with-jina.md b/localization/v2.4.x/site/es/embeddings/embed-with-jina.md
    index b12881a7e..5ddcdad01 100644
    --- a/localization/v2.4.x/site/es/embeddings/embed-with-jina.md
    +++ b/localization/v2.4.x/site/es/embeddings/embed-with-jina.md
    @@ -31,19 +31,36 @@ pip install "pymilvus[model]"
     
    from pymilvus.model.dense import JinaEmbeddingFunction
     
     jina_ef = JinaEmbeddingFunction(
    -    model_name="jina-embeddings-v2-base-en", # Defaults to `jina-embeddings-v2-base-en`
    -    api_key=JINAAI_API_KEY # Provide your Jina AI API key
    +    model_name="jina-embeddings-v3", # Defaults to `jina-embeddings-v3`
    +    api_key=JINAAI_API_KEY, # Provide your Jina AI API key
    +    task="retrieval.passage", # Specify the task
    +    dimensions=1024, # Defaults to 1024
     )
     

    Parámetros:

    • model_name (cadena)

      -

      Nombre del modelo de incrustación de Jina AI que se utilizará para la codificación. Puede especificar cualquiera de los nombres de modelo de incrustación de Jina AI disponibles, por ejemplo, jina-embeddings-v2-base-en, jina-embeddings-v2-small-en, etc. Si deja este parámetro sin especificar, se utilizará jina-embeddings-v2-base-en. Para obtener una lista de los modelos disponibles, consulte Jina Embeddings.

    • +

      Nombre del modelo de incrustación de Jina AI que se utilizará para la codificación. Puede especificar cualquiera de los nombres de modelo de incrustación de Jina AI disponibles, por ejemplo, jina-embeddings-v3, jina-embeddings-v2-base-en, etc. Si deja este parámetro sin especificar, se utilizará jina-embeddings-v3. Para obtener una lista de los modelos disponibles, consulte Jina Embeddings.

    • api_key (cadena)

      La clave API para acceder a la API de Jina AI.

    • +
    • task (cadena)

      +

      Tipo de entrada que se pasa al modelo. Requerido para modelos de incrustación v3 y superiores.

      +
        +
      • "retrieval.passage": Se utiliza para codificar documentos de gran tamaño en tareas de recuperación en el momento de la indexación.
      • +
      • "retrieval.query": Se utiliza para codificar las consultas o preguntas de los usuarios en las tareas de recuperación.
      • +
      • "classification": Se utiliza para codificar texto en tareas de clasificación de texto.
      • +
      • "text-matching": Se utiliza para codificar texto en tareas de comparación de similitudes, como la medición de la similitud entre dos frases.
      • +
      • "clustering": Se utiliza para tareas de clustering o reranking.
      • +
    • +
    • dimensions (int)

      +

      Número de dimensiones que deben tener las incrustaciones resultantes. El valor predeterminado es 1024. Sólo se admite para modelos de incrustación v3 y superiores.

    • +
    • late_chunking (bool)

      +

      Este parámetro controla si se utiliza el nuevo método de fragmentación que Jina AI introdujo el mes pasado para codificar un lote de frases. El valor predeterminado es False. Si se establece en True, la API de Jina AI concatenará todas las frases del campo de entrada y las introducirá como una única cadena en el modelo. Internamente, el modelo incrusta esta larga cadena concatenada y, a continuación, realiza un chunking tardío, devolviendo una lista de incrustaciones que coincide con el tamaño de la lista de entrada.

    -

    Para crear incrustaciones para documentos, utilice el método encode_documents():

    -
    docs = [
    +

    Para crear incrustaciones de documentos, utilice el método encode_documents(). Este método está diseñado para incrustaciones de documentos en tareas de recuperación asimétrica, como la indexación de documentos para tareas de búsqueda o recomendación. Este método utiliza retrieval.passage como tarea.

    +
    
    +```python
    +docs = [
         "Artificial intelligence was founded as an academic discipline in 1956.",
         "Alan Turing was the first person to conduct substantial research in AI.",
         "Born in Maida Vale, London, Turing was raised in southern England.",
    @@ -57,17 +74,17 @@ docs_embeddings = jina_ef.encode_documents(docs)
     print("Dim:", jina_ef.dim, docs_embeddings[0].shape)
     

    El resultado esperado es similar al siguiente:

    -
    Embeddings: [array([-4.88487840e-01, -4.28095880e-01,  4.90086500e-01, -1.63274320e-01,
    -        3.43437800e-01,  3.21476880e-01,  2.83173790e-02, -3.10403670e-01,
    -        4.76985040e-01, -1.77410420e-01, -3.84803180e-01, -2.19224200e-01,
    -       -2.52898000e-01,  6.62411900e-02, -8.58173100e-01,  1.05221800e+00,
    +
    Embeddings: [array([9.80641991e-02, -8.51697400e-02,  7.36531913e-02,  1.42558888e-02,
    +       -2.23589484e-02,  1.68494112e-03, -3.50753777e-02, -3.11530549e-02,
    +       -3.26012149e-02,  5.04568312e-03,  3.69836427e-02,  3.48948985e-02,
    +        8.19722563e-03,  5.88679723e-02, -6.71099266e-03, -1.82369724e-02,
     ...
    -       -2.04462400e-01,  7.14229800e-01, -1.66823000e-01,  8.72551440e-01,
    -        5.53560140e-01,  8.92506300e-01, -2.39408610e-01, -4.22413560e-01,
    -       -3.19551350e-01,  5.59153850e-01,  2.44338100e-01, -8.60452100e-01])]
    -Dim: 768 (768,)
    +        2.48654783e-02,  3.43279652e-02, -1.66154150e-02, -9.90478322e-03,
    +       -2.96043139e-03, -8.57473817e-03, -7.39028037e-04,  6.25024503e-03,
    +       -1.08831357e-02, -4.00776342e-02,  3.25369164e-02, -1.42691191e-03])]
    +Dim: 1024 (1024,)
     
    -

    Para crear incrustaciones para consultas, utilice el método encode_queries():

    +

    Para crear incrustaciones para consultas, utilice el método encode_queries(). Este método está diseñado para incrustaciones de consultas en tareas de recuperación asimétricas, como consultas de búsqueda o preguntas. Este método utiliza retrieval.query como tarea.

    queries = ["When was artificial intelligence founded", 
                "Where was Alan Turing born?"]
     
    @@ -77,13 +94,37 @@ query_embeddings = jina_ef.encode_queries(queries)
     print("Dim", jina_ef.dim, query_embeddings[0].shape)
     

    El resultado esperado es similar al siguiente:

    -
    Embeddings: [array([-5.99164660e-01, -3.49827350e-01,  8.22405160e-01, -1.18632730e-01,
    -        5.78107540e-01,  1.09789170e-01,  2.91604200e-01, -3.29306450e-01,
    -        2.93779640e-01, -2.17880800e-01, -6.84535440e-01, -3.79752000e-01,
    -       -3.47541800e-01,  9.20846100e-02, -6.13804400e-01,  6.31312800e-01,
    +
    Embeddings: [array([8.79201014e-03,  1.47551354e-02,  4.02722731e-02, -2.52991207e-02,
    +        1.12719582e-02,  3.75947170e-02,  3.97946090e-02, -7.36681819e-02,
    +       -2.17952449e-02, -1.16298944e-02, -6.83426252e-03, -5.12507409e-02,
    +        5.26071340e-02,  6.75181448e-02,  3.92445624e-02, -1.40817231e-02,
     ...
    -       -1.84993740e-02,  9.38629150e-01,  2.74858470e-02,  1.09396360e+00,
    -        3.96270750e-01,  7.44445800e-01, -1.95404050e-01, -6.08383200e-01,
    -       -3.75076300e-01,  3.87512200e-01,  8.11889650e-01, -3.76407620e-01])]
    -Dim 768 (768,)
    +        8.81703943e-03,  4.24629413e-02, -2.32944116e-02, -2.05193572e-02,
    +       -3.22035812e-02,  2.81896023e-03,  3.85326855e-02,  3.64372656e-02,
    +       -1.65050142e-02, -4.26847413e-02,  2.02664156e-02, -1.72684863e-02])]
    +Dim 1024 (1024,)
    +
    +

    Para crear incrustaciones de entradas para la comparación de similitudes (como las tareas STS o de recuperación simétrica), la clasificación de textos, la agrupación o las tareas de reordenación, utilice el valor del parámetro task adecuado al instanciar la clase JinaEmbeddingFunction.

    +
    from pymilvus.model.dense import JinaEmbeddingFunction
    +
    +jina_ef = JinaEmbeddingFunction(
    +    model_name="jina-embeddings-v3", # Defaults to `jina-embeddings-v3`
    +    api_key=JINA_API_KEY, # Provide your Jina AI API key
    +    task="text-matching",
    +    dimensions=1024, # Defaults to 1024
    +)
    +
    +texts = [
    +    "Follow the white rabbit.",  # English
    +    "Sigue al conejo blanco.",  # Spanish
    +    "Suis le lapin blanc.",  # French
    +    "跟着白兔走。",  # Chinese
    +    "اتبع الأرنب الأبيض.",  # Arabic
    +    "Folge dem weißen Kaninchen.",  # German
    +]
    +
    +embeddings = jina_ef(texts)
    +
    +# Compute similarities
    +print(embeddings[0] @ embeddings[1].T)
     
    diff --git a/localization/v2.4.x/site/es/embeddings/embed-with-voyage.json b/localization/v2.4.x/site/es/embeddings/embed-with-voyage.json index c5fe18415..ba7547304 100644 --- a/localization/v2.4.x/site/es/embeddings/embed-with-voyage.json +++ b/localization/v2.4.x/site/es/embeddings/embed-with-voyage.json @@ -1 +1 @@ -{"codeList":["pip install --upgrade pymilvus\npip install \"pymilvus[model]\"\n","from pymilvus.model.dense import VoyageEmbeddingFunction\n\nvoyage_ef = VoyageEmbeddingFunction(\n model_name=\"voyage-lite-02-instruct\", # Defaults to `voyage-2`\n api_key=VOYAGE_API_KEY # Provide your Voyage API key\n)\n","docs = [\n \"Artificial intelligence was founded as an academic discipline in 1956.\",\n \"Alan Turing was the first person to conduct substantial research in AI.\",\n \"Born in Maida Vale, London, Turing was raised in southern England.\",\n]\n\ndocs_embeddings = voyage_ef.encode_documents(docs)\n\n# Print embeddings\nprint(\"Embeddings:\", docs_embeddings)\n# Print dimension and shape of embeddings\nprint(\"Dim:\", voyage_ef.dim, docs_embeddings[0].shape)\n","Embeddings: [array([ 0.02582654, -0.00907086, -0.04604037, ..., -0.01227521,\n 0.04420955, -0.00038829]), array([ 0.03844212, -0.01597065, -0.03728884, ..., -0.02118733,\n 0.03349845, 0.0065346 ]), array([ 0.05143557, -0.01096631, -0.02690451, ..., -0.02416254,\n 0.07658645, 0.03064499])]\nDim: 1024 (1024,)\n","queries = [\"When was artificial intelligence founded\", \n \"Where was Alan Turing born?\"]\n\nquery_embeddings = voyage_ef.encode_queries(queries)\n\nprint(\"Embeddings:\", query_embeddings)\nprint(\"Dim\", voyage_ef.dim, query_embeddings[0].shape)\n","Embeddings: [array([ 0.01733501, -0.0230672 , -0.05208827, ..., -0.00957995,\n 0.04493361, 0.01485138]), array([ 0.05937521, -0.00729363, -0.02184347, ..., -0.02107683,\n 0.05706626, 0.0263358 ])]\nDim 1024 (1024,)\n"],"headingContent":"","anchorList":[{"label":"Voyage","href":"Voyage","type":1,"isActive":false}]} \ No newline at end of file +{"codeList":["pip install --upgrade pymilvus\npip install \"pymilvus[model]\"\n","from pymilvus.model.dense import VoyageEmbeddingFunction\n\nvoyage_ef = VoyageEmbeddingFunction(\n model_name=\"voyage-3\", # Defaults to `voyage-3`\n api_key=VOYAGE_API_KEY # Provide your Voyage API key\n)\n","docs = [\n \"Artificial intelligence was founded as an academic discipline in 1956.\",\n \"Alan Turing was the first person to conduct substantial research in AI.\",\n \"Born in Maida Vale, London, Turing was raised in southern England.\",\n]\n\ndocs_embeddings = voyage_ef.encode_documents(docs)\n\n# Print embeddings\nprint(\"Embeddings:\", docs_embeddings)\n# Print dimension and shape of embeddings\nprint(\"Dim:\", voyage_ef.dim, docs_embeddings[0].shape)\n","Embeddings: [array([ 0.02582654, -0.00907086, -0.04604037, ..., -0.01227521,\n 0.04420955, -0.00038829]), array([ 0.03844212, -0.01597065, -0.03728884, ..., -0.02118733,\n 0.03349845, 0.0065346 ]), array([ 0.05143557, -0.01096631, -0.02690451, ..., -0.02416254,\n 0.07658645, 0.03064499])]\nDim: 1024 (1024,)\n","queries = [\"When was artificial intelligence founded\", \n \"Where was Alan Turing born?\"]\n\nquery_embeddings = voyage_ef.encode_queries(queries)\n\nprint(\"Embeddings:\", query_embeddings)\nprint(\"Dim\", voyage_ef.dim, query_embeddings[0].shape)\n","Embeddings: [array([ 0.01733501, -0.0230672 , -0.05208827, ..., -0.00957995,\n 0.04493361, 0.01485138]), array([ 0.05937521, -0.00729363, -0.02184347, ..., -0.02107683,\n 0.05706626, 0.0263358 ])]\nDim 1024 (1024,)\n"],"headingContent":"Voyage","anchorList":[{"label":"Voyage","href":"Voyage","type":1,"isActive":false}]} \ No newline at end of file diff --git a/localization/v2.4.x/site/es/embeddings/embed-with-voyage.md b/localization/v2.4.x/site/es/embeddings/embed-with-voyage.md index 5a9b0d391..a83585ede 100644 --- a/localization/v2.4.x/site/es/embeddings/embed-with-voyage.md +++ b/localization/v2.4.x/site/es/embeddings/embed-with-voyage.md @@ -30,13 +30,13 @@ pip install "pymilvus[model]"
    from pymilvus.model.dense import VoyageEmbeddingFunction
     
     voyage_ef = VoyageEmbeddingFunction(
    -    model_name="voyage-lite-02-instruct", # Defaults to `voyage-2`
    +    model_name="voyage-3", # Defaults to `voyage-3`
         api_key=VOYAGE_API_KEY # Provide your Voyage API key
     )
     

    Parámetros:

      -
    • model_name (cadena) El nombre del modelo Voyage a utilizar para la codificación. Puede especificar cualquiera de los nombres de modelo Voyage disponibles, por ejemplo, voyage-law-2, voyage-code-2, etc. Si deja este parámetro sin especificar, se utilizará voyage-2. Para obtener una lista de los modelos disponibles, consulte la documentación oficial de Voyage.
    • +
    • model_name (cadena) El nombre del modelo Voyage a utilizar para la codificación. Puede especificar cualquiera de los nombres de modelo Voyage disponibles, por ejemplo, voyage-3-lite, voyage-finance-2, etc. Si deja este parámetro sin especificar, se utilizará voyage-3. Para obtener una lista de los modelos disponibles, consulte la documentación oficial de Voyage.
    • api_key (cadena) La clave API para acceder a la API de Voyage. Para más información sobre cómo crear una clave API, consulte Clave API y Cliente Python.

    Para crear incrustaciones para documentos, utilice el método encode_documents():

    diff --git a/localization/v2.4.x/site/es/embeddings/embeddings.json b/localization/v2.4.x/site/es/embeddings/embeddings.json index 9b1c1c292..b655c6b3a 100644 --- a/localization/v2.4.x/site/es/embeddings/embeddings.json +++ b/localization/v2.4.x/site/es/embeddings/embeddings.json @@ -1 +1 @@ -{"codeList":["pip install \"pymilvus[model]\"\n","from pymilvus import model\n\n# This will download \"all-MiniLM-L6-v2\", a light weight model.\nef = model.DefaultEmbeddingFunction()\n\n# Data from which embeddings are to be generated \ndocs = [\n \"Artificial intelligence was founded as an academic discipline in 1956.\",\n \"Alan Turing was the first person to conduct substantial research in AI.\",\n \"Born in Maida Vale, London, Turing was raised in southern England.\",\n]\n\nembeddings = ef.encode_documents(docs)\n\n# Print embeddings\nprint(\"Embeddings:\", embeddings)\n# Print dimension and shape of embeddings\nprint(\"Dim:\", ef.dim, embeddings[0].shape)\n","Embeddings: [array([-3.09392996e-02, -1.80662833e-02, 1.34775648e-02, 2.77156215e-02,\n -4.86349640e-03, -3.12581174e-02, -3.55921760e-02, 5.76934684e-03,\n 2.80773244e-03, 1.35783911e-01, 3.59678417e-02, 6.17732145e-02,\n...\n -4.61330153e-02, -4.85207550e-02, 3.13997865e-02, 7.82178566e-02,\n -4.75336798e-02, 5.21207601e-02, 9.04406682e-02, -5.36676683e-02],\n dtype=float32)]\nDim: 384 (384,)\n","from pymilvus.model.hybrid import BGEM3EmbeddingFunction\nfrom pymilvus import (\n utility,\n FieldSchema, CollectionSchema, DataType,\n Collection, AnnSearchRequest, RRFRanker, connections,\n)\n","# 1. prepare a small corpus to search\ndocs = [\n \"Artificial intelligence was founded as an academic discipline in 1956.\",\n \"Alan Turing was the first person to conduct substantial research in AI.\",\n \"Born in Maida Vale, London, Turing was raised in southern England.\",\n]\nquery = \"Who started AI research?\"\n\n# BGE-M3 model can embed texts as dense and sparse vectors.\n# It is included in the optional `model` module in pymilvus, to install it,\n# simply run \"pip install pymilvus[model]\".\n\nbge_m3_ef = BGEM3EmbeddingFunction(use_fp16=False, device=\"cpu\")\n\ndocs_embeddings = bge_m3_ef(docs)\nquery_embeddings = bge_m3_ef([query])\n","from pymilvus.model.sparse import BM25EmbeddingFunction\n","# 1. prepare a small corpus to search\ndocs = [\n \"Artificial intelligence was founded as an academic discipline in 1956.\",\n \"Alan Turing was the first person to conduct substantial research in AI.\",\n \"Born in Maida Vale, London, Turing was raised in southern England.\",\n]\nquery = \"Where was Turing born?\"\nbm25_ef = BM25EmbeddingFunction()\n\n# 2. fit the corpus to get BM25 model parameters on your documents.\nbm25_ef.fit(docs)\n\n# 3. store the fitted parameters to disk to expedite future processing.\nbm25_ef.save(\"bm25_params.json\")\n\n# 4. load the saved params\nnew_bm25_ef = BM25EmbeddingFunction()\nnew_bm25_ef.load(\"bm25_params.json\")\n\ndocs_embeddings = new_bm25_ef.encode_documents(docs)\nquery_embeddings = new_bm25_ef.encode_queries([query])\nprint(\"Dim:\", new_bm25_ef.dim, list(docs_embeddings)[0].shape)\n","Dim: 21 (1, 21)\n"],"headingContent":"","anchorList":[{"label":"Visión general de la incrustación","href":"Embedding-Overview","type":1,"isActive":false},{"label":"Ejemplo 1: Usar la función de incrustación por defecto para generar vectores densos","href":"Example-1-Use-default-embedding-function-to-generate-dense-vectors","type":2,"isActive":false},{"label":"Ejemplo 2: Generar vectores densos y dispersos en una llamada con el modelo BGE M3","href":"Example-2-Generate-dense-and-sparse-vectors-in-one-call-with-BGE-M3-model","type":2,"isActive":false},{"label":"Ejemplo 3: Generar vectores dispersos utilizando el modelo BM25","href":"Example-3-Generate--sparse-vectors-using-BM25-model","type":2,"isActive":false}]} \ No newline at end of file +{"codeList":["pip install \"pymilvus[model]\"\n","from pymilvus import model\n\n# This will download \"all-MiniLM-L6-v2\", a light weight model.\nef = model.DefaultEmbeddingFunction()\n\n# Data from which embeddings are to be generated \ndocs = [\n \"Artificial intelligence was founded as an academic discipline in 1956.\",\n \"Alan Turing was the first person to conduct substantial research in AI.\",\n \"Born in Maida Vale, London, Turing was raised in southern England.\",\n]\n\nembeddings = ef.encode_documents(docs)\n\n# Print embeddings\nprint(\"Embeddings:\", embeddings)\n# Print dimension and shape of embeddings\nprint(\"Dim:\", ef.dim, embeddings[0].shape)\n","Embeddings: [array([-3.09392996e-02, -1.80662833e-02, 1.34775648e-02, 2.77156215e-02,\n -4.86349640e-03, -3.12581174e-02, -3.55921760e-02, 5.76934684e-03,\n 2.80773244e-03, 1.35783911e-01, 3.59678417e-02, 6.17732145e-02,\n...\n -4.61330153e-02, -4.85207550e-02, 3.13997865e-02, 7.82178566e-02,\n -4.75336798e-02, 5.21207601e-02, 9.04406682e-02, -5.36676683e-02],\n dtype=float32)]\nDim: 384 (384,)\n","from pymilvus.model.hybrid import BGEM3EmbeddingFunction\nfrom pymilvus import (\n utility,\n FieldSchema, CollectionSchema, DataType,\n Collection, AnnSearchRequest, RRFRanker, connections,\n)\n","# 1. prepare a small corpus to search\ndocs = [\n \"Artificial intelligence was founded as an academic discipline in 1956.\",\n \"Alan Turing was the first person to conduct substantial research in AI.\",\n \"Born in Maida Vale, London, Turing was raised in southern England.\",\n]\nquery = \"Who started AI research?\"\n\n# BGE-M3 model can embed texts as dense and sparse vectors.\n# It is included in the optional `model` module in pymilvus, to install it,\n# simply run \"pip install pymilvus[model]\".\n\nbge_m3_ef = BGEM3EmbeddingFunction(use_fp16=False, device=\"cpu\")\n\ndocs_embeddings = bge_m3_ef(docs)\nquery_embeddings = bge_m3_ef([query])\n","from pymilvus.model.sparse import BM25EmbeddingFunction\n","# 1. prepare a small corpus to search\ndocs = [\n \"Artificial intelligence was founded as an academic discipline in 1956.\",\n \"Alan Turing was the first person to conduct substantial research in AI.\",\n \"Born in Maida Vale, London, Turing was raised in southern England.\",\n]\nquery = \"Where was Turing born?\"\nbm25_ef = BM25EmbeddingFunction()\n\n# 2. fit the corpus to get BM25 model parameters on your documents.\nbm25_ef.fit(docs)\n\n# 3. store the fitted parameters to disk to expedite future processing.\nbm25_ef.save(\"bm25_params.json\")\n\n# 4. load the saved params\nnew_bm25_ef = BM25EmbeddingFunction()\nnew_bm25_ef.load(\"bm25_params.json\")\n\ndocs_embeddings = new_bm25_ef.encode_documents(docs)\nquery_embeddings = new_bm25_ef.encode_queries([query])\nprint(\"Dim:\", new_bm25_ef.dim, list(docs_embeddings)[0].shape)\n","Dim: 21 (1, 21)\n"],"headingContent":"Embedding Overview","anchorList":[{"label":"Visión general de la incrustación","href":"Embedding-Overview","type":1,"isActive":false},{"label":"Ejemplo 1: Utilizar la función de incrustación por defecto para generar vectores densos","href":"Example-1-Use-default-embedding-function-to-generate-dense-vectors","type":2,"isActive":false},{"label":"Ejemplo 2: Generar vectores densos y dispersos en una llamada con el modelo BGE M3","href":"Example-2-Generate-dense-and-sparse-vectors-in-one-call-with-BGE-M3-model","type":2,"isActive":false},{"label":"Ejemplo 3: Generar vectores dispersos utilizando el modelo BM25","href":"Example-3-Generate--sparse-vectors-using-BM25-model","type":2,"isActive":false}]} \ No newline at end of file diff --git a/localization/v2.4.x/site/es/embeddings/embeddings.md b/localization/v2.4.x/site/es/embeddings/embeddings.md index e76cd4d1b..7414586e0 100644 --- a/localization/v2.4.x/site/es/embeddings/embeddings.md +++ b/localization/v2.4.x/site/es/embeddings/embeddings.md @@ -19,13 +19,13 @@ title: Visión general de la incrustación d="M4 9h1v1H4c-1.5 0-3-1.69-3-3.5S2.55 3 4 3h4c1.45 0 3 1.69 3 3.5 0 1.41-.91 2.72-2 3.25V8.59c.58-.45 1-1.27 1-2.09C10 5.22 8.98 4 8 4H4c-.98 0-2 1.22-2 2.5S3 9 4 9zm9-3h-1v1h1c1 0 2 1.22 2 2.5S13.98 12 13 12H9c-.98 0-2-1.22-2-2.5 0-.83.42-1.64 1-2.09V6.25c-1.09.53-2 1.84-2 3.25C6 11.31 7.55 13 9 13h4c1.45 0 3-1.69 3-3.5S14.5 6 13 6z" > -

    La incrustación es un concepto de aprendizaje automático para mapear datos en un espacio de alta dimensión, donde los datos de semántica similar se colocan cerca unos de otros. Al tratarse normalmente de una red neuronal profunda de BERT u otras familias de transformadores, el modelo de incrustación puede representar eficazmente la semántica de texto, imágenes y otros tipos de datos con una serie de números conocidos como vectores. Una característica clave de estos modelos es que la distancia matemática entre vectores en el espacio de alta dimensión puede indicar la similitud de la semántica del texto o las imágenes originales. Esta propiedad desbloquea muchas aplicaciones de recuperación de información, como los motores de búsqueda web como Google y Bing, la búsqueda de productos y las recomendaciones en sitios de comercio electrónico, y el recientemente popular paradigma de Generación Aumentada de Recuperación (RAG) en IA generativa.

    +

    La incrustación es un concepto de aprendizaje automático para mapear datos en un espacio de alta dimensión, donde los datos de semántica similar se colocan cerca unos de otros. Al tratarse normalmente de una red neuronal profunda de BERT u otras familias de transformadores, el modelo de incrustación puede representar eficazmente la semántica de texto, imágenes y otros tipos de datos con una serie de números conocidos como vectores. Una característica clave de estos modelos es que la distancia matemática entre vectores en el espacio de alta dimensión puede indicar la similitud de la semántica del texto o las imágenes originales. Esta propiedad abre muchas aplicaciones de recuperación de información, como los motores de búsqueda web como Google y Bing, la búsqueda de productos y las recomendaciones en sitios de comercio electrónico, y el paradigma recientemente popular de la Generación Aumentada de Recuperación (RAG) en la IA generativa.

    Existen dos categorías principales de incrustaciones, cada una de las cuales produce un tipo diferente de vector:

    • Incrustación densa: La mayoría de los modelos de incrustación representan la información como un vector de coma flotante de cientos a miles de dimensiones. Los resultados se denominan vectores "densos", ya que la mayoría de las dimensiones tienen valores distintos de cero. Por ejemplo, el popular modelo de incrustación de código abierto BAAI/bge-base-en-v1.5 genera vectores de 768 números en coma flotante (vector flotante de 768 dimensiones).

    • Incrustación dispersa: Por el contrario, los vectores de salida de las incrustaciones dispersas tienen en su mayoría dimensiones cero, es decir, vectores "dispersos". Estos vectores suelen tener dimensiones mucho mayores (decenas de miles o más), lo que viene determinado por el tamaño del vocabulario de tokens. Los vectores dispersos pueden generarse mediante redes neuronales profundas o análisis estadísticos de corpus de texto. Debido a su interpretabilidad y a su mejor capacidad de generalización fuera del dominio, los desarrolladores adoptan cada vez más las incrustaciones dispersas como complemento de las incrustaciones densas.

    -

    Milvus es una base de datos vectorial diseñada para la gestión, el almacenamiento y la recuperación de datos vectoriales. Gracias a la integración de los principales modelos de incrustación y reordenación, puede transformar fácilmente el texto original en vectores susceptibles de búsqueda o reordenar los resultados utilizando potentes modelos para obtener resultados más precisos para la GAR. Esta integración simplifica la transformación del texto y elimina la necesidad de componentes adicionales de incrustación o renumeración, lo que agiliza el desarrollo y la validación de la GAR.

    +

    Milvus es una base de datos vectorial diseñada para la gestión, el almacenamiento y la recuperación de datos vectoriales. Gracias a la integración de los principales modelos de incrustación y reordenación, puede transformar fácilmente el texto original en vectores susceptibles de búsqueda o reordenar los resultados utilizando potentes modelos para obtener resultados más precisos para la GAR. Esta integración simplifica la transformación del texto y elimina la necesidad de componentes adicionales de incrustación o renumeración, agilizando así el desarrollo y la validación de la GAR.

    Para crear incrustaciones en acción, consulte Uso del modelo de PyMilvus para generar incrustaciones de texto.

    @@ -40,9 +40,13 @@ title: Visión general de la incrustación + + + +
    voyageaiDensoAPI
    jinaDensaAPI
    cohereDensoAPI
    InstructorDensoFuente abierta
    Mistral AIDensoAPI
    NomicDensoAPI
    mGTEHíbridoFuente abierta
    -

    Ejemplo 1: Usar la función de incrustación por defecto para generar vectores densos

    En este ejemplo, utilizamos el modelo híbrido BGE M3 para incrustar texto en vectores densos y dispersos y utilizarlos para recuperar documentos relevantes. Los pasos generales son los siguientes

    +

    En este ejemplo, utilizamos el modelo híbrido BGE M3 para incrustar texto en vectores densos y dispersos y utilizarlos para recuperar documentos relevantes. Los pasos generales son los siguientes

    1. Incrustar el texto como vectores densos y dispersos utilizando el modelo BGE-M3;

    2. Crear una colección Milvus para almacenar los vectores densos y dispersos;

    3. diff --git a/localization/v2.4.x/site/es/faq/operational_faq.json b/localization/v2.4.x/site/es/faq/operational_faq.json index f10ad2034..e3d7b89c7 100644 --- a/localization/v2.4.x/site/es/faq/operational_faq.json +++ b/localization/v2.4.x/site/es/faq/operational_faq.json @@ -1 +1 @@ -{"codeList":["{\n \"registry-mirrors\": [\"https://registry.docker-cn.com\"]\n}\n","$ lscpu | grep -e sse4_2 -e avx -e avx2 -e avx512\n","pip install pymilvus>=2.4.2\n"],"headingContent":"","anchorList":[{"label":"FAQ Operativas","href":"Operational-FAQ","type":1,"isActive":false}]} \ No newline at end of file +{"codeList":["{\n \"registry-mirrors\": [\"https://registry.docker-cn.com\"]\n}\n","$ lscpu | grep -e sse4_2 -e avx -e avx2 -e avx512\n","pip install pymilvus>=2.4.2\n","# Python Example: result of len() str cannot be used as \"max-length\" in Milvus \n>>> s = \"你好,世界!\"\n>>> len(s) # Number of characters of s.\n6\n>>> len(bytes(s, \"utf-8\")) # Size in bytes of s, max-length in Milvus.\n18\n"],"headingContent":"Operational FAQ","anchorList":[{"label":"FAQ Operativas","href":"Operational-FAQ","type":1,"isActive":false}]} \ No newline at end of file diff --git a/localization/v2.4.x/site/es/faq/operational_faq.md b/localization/v2.4.x/site/es/faq/operational_faq.md index 52f692d71..ca5b9d751 100644 --- a/localization/v2.4.x/site/es/faq/operational_faq.md +++ b/localization/v2.4.x/site/es/faq/operational_faq.md @@ -3,7 +3,7 @@ id: operational_faq.md summary: >- Encuentre respuestas a las preguntas más frecuentes sobre las operaciones en Milvus. -title: Preguntas más frecuentes +title: FAQ Operativas ---

      FAQ Operativas

    -

    ¿Es Docker la única forma de instalar y ejecutar Milvus?

    Docker es una forma eficiente de desplegar Milvus, pero no la única. También puede desplegar Milvus desde el código fuente. Esto requiere Ubuntu (18.04 o superior) o CentOS (7 o superior). Vea Construir Milvus desde el código fuente para más información.

    +

    ¿Es Docker la única forma de instalar y ejecutar Milvus?

    Docker es una forma eficiente de desplegar Milvus, pero no la única. También puede desplegar Milvus desde el código fuente. Esto requiere Ubuntu (18.04 o superior) o CentOS (7 o superior). Vea Construir Milvus desde el código fuente para más información.

    ¿Cuáles son los principales factores que afectan a la recuperación?

    La recuperación se ve afectada principalmente por el tipo de índice y los parámetros de búsqueda.

    Para índices FLAT, Milvus realiza una búsqueda exhaustiva dentro de una colección, con un retorno del 100%.

    En los índices IVF, el parámetro nprobe determina el alcance de la búsqueda dentro de la colección. Aumentar nprobe aumenta la proporción de vectores buscados y la recuperación, pero disminuye el rendimiento de la consulta.

    En los índices HNSW, el parámetro ef determina la amplitud de la búsqueda en el grafo. Aumentar ef incrementa el número de puntos buscados en el gráfico y la recuperación, pero disminuye el rendimiento de la consulta.

    Para más información, véase Indexación vectorial.

    -

    ¿Por qué no surten efecto mis cambios en los archivos de configuración?

    Milvus no admite la modificación de los archivos de configuración durante el tiempo de ejecución. Debe reiniciar Milvus Docker para que los cambios en los archivos de configuración surtan efecto.

    +

    ¿Por qué no surten efecto mis cambios en los archivos de configuración?

    Milvus no soporta la modificación de los archivos de configuración durante el tiempo de ejecución. Debe reiniciar Milvus Docker para que los cambios en los archivos de configuración surtan efecto.

    ¿Cómo sé si Milvus se ha iniciado correctamente?

    Si Milvus se inicia utilizando Docker Compose, ejecute docker ps para observar cuántos contenedores Docker se están ejecutando y comprobar si los servicios de Milvus se han iniciado correctamente.

    Para Milvus standalone, debería poder observar al menos tres contenedores Docker en ejecución, siendo uno el servicio Milvus y los otros dos la gestión etcd y el servicio de almacenamiento. Para obtener más información, consulte Instalación de Milvus Standalone.

    ¿Por qué la hora en los archivos de registro es diferente de la hora del sistema?

    La diferencia horaria suele deberse a que la máquina anfitriona no utiliza el Tiempo Universal Coordinado (UTC).

    @@ -48,7 +48,7 @@ title: Preguntas más frecuentes
    $ lscpu | grep -e sse4_2 -e avx -e avx2 -e avx512
     

    ¿Por qué Milvus devuelve illegal instruction durante el arranque?

    Milvus requiere que su CPU soporte un conjunto de instrucciones SIMD: SSE4.2, AVX, AVX2, o AVX512. La CPU debe soportar al menos uno de estos conjuntos para asegurar que Milvus funciona con normalidad. Un error illegal instruction devuelto durante el arranque sugiere que su CPU no soporta ninguno de los cuatro conjuntos de instrucciones anteriores.

    -

    Consulte la compatibilidad de la CPU con el conjunto de instrucciones SIMD.

    +

    Consulte la compatibilidad de la CPU con el conjunto de instrucciones SIMD.

    ¿Puedo instalar Milvus en Windows?

    Sí. Puede instalar Milvus en Windows compilando desde el código fuente o desde un paquete binario.

    Consulte Ejecutar Milvus en Windows para saber cómo instalar Milvus en Windows.

    Obtuve un error al instalar pymilvus en Windows. ¿Qué debo hacer?

    No se recomienda instalar PyMilvus en Windows. Pero si tiene que instalar PyMilvus en Windows pero obtiene un error, intente instalarlo en un entorno Conda. Vea Instalar Milvus SDK para más información sobre cómo instalar PyMilvus en el entorno Conda.

    @@ -74,8 +74,33 @@ title: Preguntas más frecuentes
  • Consulta (Coincidencia exacta): Milvus selecciona la última entidad con la PK coincidente. Búsqueda RNA: Milvus selecciona la entidad con la puntuación de similitud más alta, incluso si las entidades comparten el mismo PK. Esta priorización puede dar lugar a menos resultados únicos que el límite si su colección tiene muchas claves primarias duplicadas.

  • Coincidencias insuficientes: Es posible que las expresiones de filtrado de la búsqueda sean demasiado estrictas, por lo que habrá menos entidades que cumplan el umbral de similitud. Si las condiciones establecidas para la búsqueda son demasiado restrictivas, no coincidirán suficientes entidades, lo que dará lugar a menos resultados de los esperados.

  • -

    ¿Aún tiene preguntas?

    Puede hacerlo:

    +

    MilvusClient("milvus_demo.db") gives an error: ModuleNotFoundError: No module named 'milvus_lite'. ¿Cuál es la causa y cómo puede solucionarse?

    Este error se produce cuando intenta utilizar Milvus Lite en una plataforma Windows. Milvus Lite está diseñado principalmente para entornos Linux y puede no tener soporte nativo para Windows.

    +

    La solución es utilizar un entorno Linux:

      -
    • Eche un vistazo a Milvus en GitHub. Siéntase libre de hacer preguntas, compartir ideas y ayudar a otros.
    • +
    • Utilice un sistema operativo basado en Linux o una máquina virtual para ejecutar Milvus Lite.
    • +
    • Este enfoque garantizará la compatibilidad con las dependencias y la funcionalidad de la biblioteca.
    • +
    +

    ¿Qué son los errores "la longitud excede la longitud máxima" en Milvus, y cómo pueden entenderse y solucionarse?

    Los errores "La longitud excede la longitud máxima" en Milvus se producen cuando el tamaño de un elemento de datos supera el tamaño máximo permitido para una colección o campo. He aquí algunos ejemplos y explicaciones:

    +
      +
    • Error de campo JSON: <MilvusException: (code=1100, message=the length (398324) of json field (metadata) exceeds max length (65536): expected=valid length json string, actual=length exceeds max length: invalid parameter)>

    • +
    • Error de longitud de cadena: <ParamError: (code=1, message=invalid input, length of string exceeds max length. length: 74238, max length: 60535)>

    • +
    • Error de campo VarChar: <MilvusException: (code=1100, message=the length (60540) of 0th VarChar paragraph exceeds max length (0)%!(EXTRA int64=60535): invalid parameter)>

    • +
    +

    Para entender y solucionar estos errores

    +
      +
    • Comprenda que len(str) en Python representa el número de caracteres, no el tamaño en bytes.
    • +
    • Para tipos de datos basados en cadenas como VARCHAR y JSON, utilice len(bytes(str, encoding='utf-8')) para determinar el tamaño real en bytes, que es lo que Milvus utiliza para "max-length".
    • +
    +

    Ejemplo en Python:

    +
    # Python Example: result of len() str cannot be used as "max-length" in Milvus 
    +>>> s = "你好,世界!"
    +>>> len(s) # Number of characters of s.
    +6
    +>>> len(bytes(s, "utf-8")) # Size in bytes of s, max-length in Milvus.
    +18
    +
    +

    ¿Todavía tiene preguntas?

    Puede hacerlo:

    +
      +
    • Echa un vistazo a Milvus en GitHub. Siéntase libre de hacer preguntas, compartir ideas y ayudar a otros.
    • Únase a nuestro Foro Milvus o Canal Slack para encontrar apoyo y participar con nuestra comunidad de código abierto.
    diff --git a/localization/v2.4.x/site/es/faq/performance_faq.json b/localization/v2.4.x/site/es/faq/performance_faq.json index 82b61b6e9..93ad4fecd 100644 --- a/localization/v2.4.x/site/es/faq/performance_faq.json +++ b/localization/v2.4.x/site/es/faq/performance_faq.json @@ -1 +1 @@ -{"codeList":[],"headingContent":"","anchorList":[{"label":"Preguntas más frecuentes","href":"Performance-FAQ","type":1,"isActive":false}]} \ No newline at end of file +{"codeList":[],"headingContent":"Performance FAQ","anchorList":[{"label":"Preguntas más frecuentes","href":"Performance-FAQ","type":1,"isActive":false}]} \ No newline at end of file diff --git a/localization/v2.4.x/site/es/faq/performance_faq.md b/localization/v2.4.x/site/es/faq/performance_faq.md index c6b13339c..706f55dbb 100644 --- a/localization/v2.4.x/site/es/faq/performance_faq.md +++ b/localization/v2.4.x/site/es/faq/performance_faq.md @@ -4,7 +4,7 @@ summary: >- Encuentre respuestas a las preguntas más frecuentes sobre el rendimiento de las búsquedas, las mejoras de rendimiento y otras cuestiones relacionadas con el rendimiento. -title: Preguntas frecuentes +title: Preguntas más frecuentes ---

    Preguntas más frecuentes

    ¿Cómo configurar nlist y nprobe para los índices FIV?

    La configuración de nlist depende del escenario. Como regla general, el valor recomendado de nlist es 4 × sqrt(n), donde n es el número total de entidades en un segmento.

    El tamaño de cada segmento viene determinado por el parámetro datacoord.segment.maxSize, cuyo valor predeterminado es 512 MB. El número total de entidades en un segmento n puede estimarse dividiendo datacoord.segment.maxSize por el tamaño de cada entidad.

    El ajuste de nprobe es específico del conjunto de datos y del escenario, e implica un compromiso entre la precisión y el rendimiento de la consulta. Recomendamos encontrar el valor ideal mediante la experimentación repetida.

    -

    Los siguientes gráficos son los resultados de una prueba realizada en el conjunto de datos sift50m y el índice IVF_SQ8, que compara la recuperación y el rendimiento de consulta de diferentes pares nlist/nprobe.

    +

    Los siguientes gráficos son los resultados de una prueba realizada con el conjunto de datos sift50m y el índice IVF_SQ8, que compara la recuperación y el rendimiento de consulta de diferentes pares nlist/nprobe.

    Accuracy test - Prueba de precisión Performance testPrueba de rendimiento

    -

    ¿Por qué a veces las consultas tardan más en conjuntos de datos más pequeños?

    Las operaciones de consulta se realizan en segmentos. Los índices reducen el tiempo que se tarda en consultar un segmento. Si un segmento no ha sido indexado, Milvus recurre a la búsqueda de fuerza bruta en los datos brutos, lo que aumenta drásticamente el tiempo de consulta.

    + Prueba de precisión Performance testPrueba de rendimiento

    +

    ¿Por qué a veces las consultas tardan más en conjuntos de datos más pequeños?

    Las operaciones de consulta se realizan en segmentos. Los índices reducen el tiempo necesario para consultar un segmento. Si un segmento no ha sido indexado, Milvus recurre a la búsqueda de fuerza bruta en los datos brutos, lo que aumenta drásticamente el tiempo de consulta.

    Por lo tanto, normalmente se tarda más en consultar un conjunto de datos pequeño (colección) porque no se ha creado un índice. Esto se debe a que los tamaños de sus segmentos no han alcanzado el umbral de creación de índices establecido por rootCoord.minSegmentSizeToEnableindex. Llame a create_index() para forzar a Milvus a indexar los segmentos que han alcanzado el umbral pero que aún no han sido indexados automáticamente, mejorando significativamente el rendimiento de la consulta.

    ¿Qué factores afectan al uso de la CPU?

    El uso de la CPU aumenta cuando Milvus construye índices o ejecuta consultas. En general, la creación de índices requiere un uso intensivo de la CPU, excepto cuando se utiliza Annoy, que se ejecuta en un único subproceso.

    Cuando se ejecutan consultas, el uso de la CPU se ve afectado por nq y nprobe. Cuando nq y nprobe son pequeños, la concurrencia es baja y el uso de la CPU se mantiene bajo.

    diff --git a/localization/v2.4.x/site/es/faq/product_faq.json b/localization/v2.4.x/site/es/faq/product_faq.json index a5c0fdb58..a8ed2d398 100644 --- a/localization/v2.4.x/site/es/faq/product_faq.json +++ b/localization/v2.4.x/site/es/faq/product_faq.json @@ -1 +1 @@ -{"codeList":["60 * 2 * 4 + 40 * 1 * 12 = 960\n"],"headingContent":"","anchorList":[{"label":"Preguntas frecuentes sobre el producto","href":"Product-FAQ","type":1,"isActive":false}]} \ No newline at end of file +{"codeList":["60 * 2 * 4 + 40 * 1 * 12 = 960\n"],"headingContent":"Product FAQ","anchorList":[{"label":"Preguntas frecuentes sobre el producto","href":"Product-FAQ","type":1,"isActive":false}]} \ No newline at end of file diff --git a/localization/v2.4.x/site/es/faq/product_faq.md b/localization/v2.4.x/site/es/faq/product_faq.md index 3d90554b5..c1466a454 100644 --- a/localization/v2.4.x/site/es/faq/product_faq.md +++ b/localization/v2.4.x/site/es/faq/product_faq.md @@ -3,7 +3,7 @@ id: product_faq.md summary: >- Encuentre respuestas a las preguntas más frecuentes sobre la base de datos vectorial más avanzada del mundo. -title: Preguntas más frecuentes +title: Preguntas frecuentes sobre el producto ---

    Preguntas frecuentes sobre el producto

    -

    ¿Por qué obtengo menos de k vectores cuando busco vectores en topk?

    Entre los índices que soporta Milvus, IVF_FLAT e IVF_SQ8 implementan el método de agrupación k-means. Un espacio de datos se divide en nlist clusters y los vectores insertados se distribuyen en estos clusters. A continuación, Milvus selecciona los nprobe clusters más cercanos y compara las distancias entre el vector objetivo y todos los vectores de los clusters seleccionados para devolver los resultados finales.

    +

    ¿Por qué obtengo menos de k vectores cuando busco vectores en topk?

    Entre los índices que soporta Milvus, IVF_FLAT e IVF_SQ8 implementan el método de agrupación k-means. Un espacio de datos se divide en nlist clusters y los vectores insertados se distribuyen en estos clusters. Milvus selecciona entonces los nprobe clusters más cercanos y compara las distancias entre el vector objetivo y todos los vectores en los clusters seleccionados para devolver los resultados finales.

    Si nlist y topk son grandes y nprobe es pequeño, el número de vectores en los clusters nprobe puede ser menor que k. Por lo tanto, cuando se buscan los vectores más cercanos topk, el número de vectores devueltos es menor que k.

    Para evitarlo, intente establecer nprobe más grande y nlist y k más pequeños.

    Consulte Índice de vectores para obtener más información.

    ¿Cuál es la dimensión máxima de vector soportada en Milvus?

    Milvus puede gestionar vectores con hasta 32.768 dimensiones por defecto. Puede aumentar el valor de Proxy.maxDimension para permitir un vector de mayor dimensión.

    -

    ¿Es Milvus compatible con la CPU Apple M1?

    La versión actual de Milvus no es compatible con la CPU Apple M1.

    -

    ¿Qué tipos de datos soporta Milvus en el campo de clave primaria?

    En la versión actual, Milvus soporta INT64 y string.

    +

    ¿Es Milvus compatible con la CPU Apple M1?

    La versión actual de Milvus no es compatible directamente con la CPU Apple M1. Después de Milvus 2.3, Milvus proporciona imágenes Docker para la arquitectura ARM64.

    +

    ¿Qué tipos de datos soporta Milvus en el campo de clave primaria?

    En la versión actual, Milvus soporta tanto INT64 como string.

    ¿Es Milvus escalable?

    Sí. Puede desplegar un clúster Milvus con múltiples nodos a través de Helm Chart en Kubernetes. Consulte la Guía de Escalado para más instrucciones.

    ¿La consulta se realiza en memoria? ¿Qué son los datos incrementales y los datos históricos?

    Sí. Cuando llega una petición de consulta, Milvus busca tanto datos incrementales como datos históricos cargándolos en memoria. Los datos incrementales se encuentran en los segmentos crecientes, que se almacenan en memoria antes de que alcancen el umbral para persistir en el motor de almacenamiento, mientras que los datos históricos son de los segmentos sellados que se almacenan en el almacenamiento de objetos. Los datos incrementales y los datos históricos juntos constituyen todo el conjunto de datos a buscar.

    Sí. Para consultas sobre la misma colección, Milvus busca simultáneamente en los datos incrementales e históricos. Sin embargo, las consultas sobre colecciones diferentes se realizan en serie. Mientras que los datos históricos pueden ser un conjunto de datos extremadamente grande, las búsquedas en los datos históricos consumen relativamente más tiempo y se realizan esencialmente en serie.

    diff --git a/localization/v2.4.x/site/es/getstarted/install_SDKs/install-java.json b/localization/v2.4.x/site/es/getstarted/install_SDKs/install-java.json index bad8e1bcd..e258cf273 100644 --- a/localization/v2.4.x/site/es/getstarted/install_SDKs/install-java.json +++ b/localization/v2.4.x/site/es/getstarted/install_SDKs/install-java.json @@ -1 +1 @@ -{"codeList":["\n io.milvus\n milvus-sdk-java\n 2.4.3\n\n","implementation 'io.milvus:milvus-sdk-java:2.4.3'\n"],"headingContent":"","anchorList":[{"label":"Instalar Milvus Java SDK","href":"Install-Milvus-Java-SDK","type":1,"isActive":false},{"label":"Requisitos","href":"Requirement","type":2,"isActive":false},{"label":"Instalar Milvus Java SDK","href":"Install-Milvus-Java-SDK","type":2,"isActive":false},{"label":"A continuación","href":"Whats-next","type":2,"isActive":false}]} \ No newline at end of file +{"codeList":["\n io.milvus\n milvus-sdk-java\n 2.4.5\n\n","implementation 'io.milvus:milvus-sdk-java:2.4.5'\n"],"headingContent":"Install Milvus Java SDK","anchorList":[{"label":"Instalar Milvus Java SDK","href":"Install-Milvus-Java-SDK","type":1,"isActive":false},{"label":"Requisitos","href":"Requirement","type":2,"isActive":false},{"label":"Instalar Milvus Java SDK","href":"Install-Milvus-Java-SDK","type":2,"isActive":false},{"label":"A continuación","href":"Whats-next","type":2,"isActive":false}]} \ No newline at end of file diff --git a/localization/v2.4.x/site/es/getstarted/install_SDKs/install-java.md b/localization/v2.4.x/site/es/getstarted/install_SDKs/install-java.md index 834425d38..9716b5c20 100644 --- a/localization/v2.4.x/site/es/getstarted/install_SDKs/install-java.md +++ b/localization/v2.4.x/site/es/getstarted/install_SDKs/install-java.md @@ -63,13 +63,13 @@ title: Instalar Milvus Java SDK
    <dependency>
         <groupId>io.milvus</groupId>
         <artifactId>milvus-sdk-java</artifactId>
    -    <version>2.4.3</version>
    +    <version>2.4.5</version>
     </dependency>
     
    • Gradle/Grails
    -
    implementation 'io.milvus:milvus-sdk-java:2.4.3'
    +
    implementation 'io.milvus:milvus-sdk-java:2.4.5'
     

    A continuación

    PyMilvus está disponible en Python Package Index.

    Se recomienda instalar una versión de PyMilvus que coincida con la versión del servidor Milvus que ha instalado. Para más información, consulte las Notas de la versión.
    -
    $ python3 -m pip install pymilvus==2.4.5
    +
    $ python3 -m pip install pymilvus==2.4.8
     

    Verifique la instalación

    Open In Colab

    +

    Open In Colab +GitHub Repository

    Los vectores, el formato de datos de salida de los modelos de redes neuronales, pueden codificar eficazmente la información y desempeñar un papel fundamental en aplicaciones de IA como la base de conocimientos, la búsqueda semántica, la generación aumentada de recuperación (RAG) y más.

    Milvus es una base de datos vectorial de código abierto que se adapta a aplicaciones de IA de todos los tamaños, desde la ejecución de un chatbot de demostración en el cuaderno Jupyter hasta la creación de búsquedas a escala web que sirven a miles de millones de usuarios. En esta guía, le mostraremos cómo configurar Milvus localmente en cuestión de minutos y utilizar la biblioteca cliente Python para generar, almacenar y buscar vectores.

    Instalar Milvus

    Por defecto, los campos escalares no se indexan. Si necesita realizar una búsqueda filtrada de metadatos en un conjunto de datos de gran tamaño, puede considerar la posibilidad de utilizar un esquema fijo y activar también el índice para mejorar el rendimiento de la búsqueda.

    Además de la búsqueda vectorial, también puede realizar otros tipos de búsqueda:

    -

    Consulta

    Una consulta() es una operación que recupera todas las entidades que coinciden con una cretria, como una expresión de filtro o que coinciden con algunos ids.

    +

    Consulta

    Una consulta() es una operación que recupera todas las entidades que coinciden con un criterio, como una expresión de filtro o la coincidencia con algunos ids.

    Por ejemplo, recuperar todas las entidades cuyo campo escalar tenga un valor determinado:

    res = client.query(
         collection_name="demo_collection",
    @@ -329,7 +331,7 @@ res = client.search(
               d="M4 9h1v1H4c-1.5 0-3-1.69-3-3.5S2.55 3 4 3h4c1.45 0 3 1.69 3 3.5 0 1.41-.91 2.72-2 3.25V8.59c.58-.45 1-1.27 1-2.09C10 5.22 8.98 4 8 4H4c-.98 0-2 1.22-2 2.5S3 9 4 9zm9-3h-1v1h1c1 0 2 1.22 2 2.5S13.98 12 13 12H9c-.98 0-2-1.22-2-2.5 0-.83.42-1.64 1-2.09V6.25c-1.09.53-2 1.84-2 3.25C6 11.31 7.55 13 9 13h4c1.45 0 3-1.69 3-3.5S14.5 6 13 6z"
             >
           
    -    

    Si desea purgar datos, puede eliminar entidades especificando la clave primaria o eliminar todas las entidades que coincidan con una determinada expresión de filtro.

    +

    Si desea purgar datos, puede eliminar entidades especificando la clave primaria o eliminar todas las entidades que coincidan con una expresión de filtro determinada.

    # Delete entities by primary key
     res = client.delete(collection_name="demo_collection", ids=[0, 2])
     
    diff --git a/localization/v2.4.x/site/es/getstarted/run-milvus-docker/install_standalone-docker-compose.json b/localization/v2.4.x/site/es/getstarted/run-milvus-docker/install_standalone-docker-compose.json
    index 3baddf3b6..c405ba7b6 100644
    --- a/localization/v2.4.x/site/es/getstarted/run-milvus-docker/install_standalone-docker-compose.json
    +++ b/localization/v2.4.x/site/es/getstarted/run-milvus-docker/install_standalone-docker-compose.json
    @@ -1 +1 @@
    -{"codeList":["# Download the configuration file\n$ wget https://github.com/milvus-io/milvus/releases/download/v2.4.9/milvus-standalone-docker-compose.yml -O docker-compose.yml\n\n# Start Milvus\n$ sudo docker compose up -d\n\nCreating milvus-etcd  ... done\nCreating milvus-minio ... done\nCreating milvus-standalone ... done\n","$ sudo docker compose ps\n\n      Name                     Command                  State                            Ports\n--------------------------------------------------------------------------------------------------------------------\nmilvus-etcd         etcd -advertise-client-url ...   Up             2379/tcp, 2380/tcp\nmilvus-minio        /usr/bin/docker-entrypoint ...   Up (healthy)   9000/tcp\nmilvus-standalone   /tini -- milvus run standalone   Up             0.0.0.0:19530->19530/tcp, 0.0.0.0:9091->9091/tcp\n","# Stop Milvus\n$ sudo docker compose down\n\n# Delete service data\n$ sudo rm -rf volumes\n"],"headingContent":"","anchorList":[{"label":"Ejecutar Milvus con Docker Compose","href":"Run-Milvus-with-Docker-Compose","type":1,"isActive":false},{"label":"Requisitos previos","href":"Prerequisites","type":2,"isActive":false},{"label":"Instale Milvus","href":"Install-Milvus","type":2,"isActive":false},{"label":"Lo que sigue","href":"Whats-next","type":2,"isActive":false}]}
    \ No newline at end of file
    +{"codeList":["# Download the configuration file\n$ wget https://github.com/milvus-io/milvus/releases/download/v2.4.13-hotfix/milvus-standalone-docker-compose.yml -O docker-compose.yml\n\n# Start Milvus\n$ sudo docker-compose up -d\n\nCreating milvus-etcd  ... done\nCreating milvus-minio ... done\nCreating milvus-standalone ... done\n","$ sudo docker-compose ps\n\n      Name                     Command                  State                            Ports\n--------------------------------------------------------------------------------------------------------------------\nmilvus-etcd         etcd -advertise-client-url ...   Up             2379/tcp, 2380/tcp\nmilvus-minio        /usr/bin/docker-entrypoint ...   Up (healthy)   9000/tcp\nmilvus-standalone   /tini -- milvus run standalone   Up             0.0.0.0:19530->19530/tcp, 0.0.0.0:9091->9091/tcp\n","# Stop Milvus\n$ sudo docker-compose down\n\n# Delete service data\n$ sudo rm -rf volumes\n"],"headingContent":"Run Milvus with Docker Compose","anchorList":[{"label":"Ejecutar Milvus con Docker Compose","href":"Run-Milvus-with-Docker-Compose","type":1,"isActive":false},{"label":"Requisitos previos","href":"Prerequisites","type":2,"isActive":false},{"label":"Instale Milvus","href":"Install-Milvus","type":2,"isActive":false},{"label":"Lo que sigue","href":"Whats-next","type":2,"isActive":false}]}
    \ No newline at end of file
    diff --git a/localization/v2.4.x/site/es/getstarted/run-milvus-docker/install_standalone-docker-compose.md b/localization/v2.4.x/site/es/getstarted/run-milvus-docker/install_standalone-docker-compose.md
    index 2bbfa939c..a3e67812d 100644
    --- a/localization/v2.4.x/site/es/getstarted/run-milvus-docker/install_standalone-docker-compose.md
    +++ b/localization/v2.4.x/site/es/getstarted/run-milvus-docker/install_standalone-docker-compose.md
    @@ -57,10 +57,10 @@ title: Ejecutar Milvus con Docker Compose
           
         

    Milvus proporciona un archivo de configuración Docker Compose en el repositorio Milvus. Para instalar Milvus utilizando Docker Compose, simplemente ejecute

    # Download the configuration file
    -$ wget https://github.com/milvus-io/milvus/releases/download/v2.4.9/milvus-standalone-docker-compose.yml -O docker-compose.yml
    +$ wget https://github.com/milvus-io/milvus/releases/download/v2.4.13-hotfix/milvus-standalone-docker-compose.yml -O docker-compose.yml
     
     # Start Milvus
    -$ sudo docker compose up -d
    +$ sudo docker-compose up -d
     
     Creating milvus-etcd  ... done
     Creating milvus-minio ... done
    @@ -81,7 +81,7 @@ Creating milvus-standalone ... done
     
     
     

    Puede comprobar si los contenedores están en funcionamiento utilizando el siguiente comando:

    -
    $ sudo docker compose ps
    +
    $ sudo docker-compose ps
     
           Name                     Command                  State                            Ports
     --------------------------------------------------------------------------------------------------------------------
    @@ -91,7 +91,7 @@ milvus-standalone   /tini -- milvus run standalone   Up             0.0.0.0:1953
     

    Puede detener y eliminar este contenedor de la siguiente manera

    # Stop Milvus
    -$ sudo docker compose down
    +$ sudo docker-compose down
     
     # Delete service data
     $ sudo rm -rf volumes
    @@ -113,7 +113,7 @@ $ sudo rm
           
         

    Habiendo instalado Milvus en Docker, puede:

      -
    • Comprobar Quickstart para ver lo que Milvus puede hacer.

    • +
    • Comprobar Quickstart para ver qué puede hacer Milvus.

    • Aprender las operaciones básicas de Milvus:

    • Actualice Milvus utilizando Helm Chart.

    • Escale su cluster Milvus.

    • -
    • Despliegue su clúster Milvu en nubes:

      +
    • Despliegue su clúster Milvus en nubes:

      • Amazon EKS
      • Google Cloud
      • diff --git a/localization/v2.4.x/site/es/getstarted/run-milvus-gpu/install_standalone-docker-compose-gpu.json b/localization/v2.4.x/site/es/getstarted/run-milvus-gpu/install_standalone-docker-compose-gpu.json index 4499069de..4c72e0f58 100644 --- a/localization/v2.4.x/site/es/getstarted/run-milvus-gpu/install_standalone-docker-compose-gpu.json +++ b/localization/v2.4.x/site/es/getstarted/run-milvus-gpu/install_standalone-docker-compose-gpu.json @@ -1 +1 @@ -{"codeList":["$ wget https://github.com/milvus-io/milvus/releases/download/v2.4.9/milvus-standalone-docker-compose-gpu.yml -O docker-compose.yml\n","...\nstandalone:\n ...\n deploy:\n resources:\n reservations:\n devices:\n - driver: nvidia\n capabilities: [\"gpu\"]\n device_ids: [\"0\"]\n...\n","...\nstandalone:\n ...\n deploy:\n resources:\n reservations:\n devices:\n - driver: nvidia\n capabilities: [\"gpu\"]\n device_ids: ['0', '1']\n...\n","$ sudo docker compose up -d\n\nCreating milvus-etcd ... done\nCreating milvus-minio ... done\nCreating milvus-standalone ... done\n","$ sudo docker compose ps\n\n Name Command State Ports\n--------------------------------------------------------------------------------------------------------------------\nmilvus-etcd etcd -advertise-client-url ... Up 2379/tcp, 2380/tcp\nmilvus-minio /usr/bin/docker-entrypoint ... Up (healthy) 9000/tcp\nmilvus-standalone /tini -- milvus run standalone Up 0.0.0.0:19530->19530/tcp, 0.0.0.0:9091->9091/tcp\n","$ CUDA_VISIBLE_DEVICES=0 ./milvus run standalone\n","$ CUDA_VISIBLE_DEVICES=0,1 ./milvus run standalone\n","# Stop Milvus\n$ sudo docker compose down\n\n# Delete service data\n$ sudo rm -rf volumes\n","docker cp :/milvus/configs/milvus.yaml milvus.yaml\n","vim milvus.yaml\n","...\ngpu:\n initMemSize: 0\n maxMemSize: 0\n...\n","docker cp milvus.yaml :/milvus/configs/milvus.yaml\n","docker stop \ndocker start \n"],"headingContent":"","anchorList":[{"label":"Ejecutar Milvus con soporte GPU usando Docker Compose","href":"Run-Milvus-with-GPU-Support-Using-Docker-Compose","type":1,"isActive":false},{"label":"Requisitos previos","href":"Prerequisites","type":2,"isActive":false},{"label":"Instalar Milvus","href":"Install-Milvus","type":2,"isActive":false},{"label":"Configurar el pool de memoria","href":"Configure-memory-pool","type":2,"isActive":false},{"label":"Lo que sigue","href":"Whats-next","type":2,"isActive":false}]} \ No newline at end of file +{"codeList":["$ wget https://github.com/milvus-io/milvus/releases/download/v2.4.13-hotfix/milvus-standalone-docker-compose-gpu.yml -O docker-compose.yml\n","...\nstandalone:\n ...\n deploy:\n resources:\n reservations:\n devices:\n - driver: nvidia\n capabilities: [\"gpu\"]\n device_ids: [\"0\"]\n...\n","...\nstandalone:\n ...\n deploy:\n resources:\n reservations:\n devices:\n - driver: nvidia\n capabilities: [\"gpu\"]\n device_ids: ['0', '1']\n...\n","$ sudo docker compose up -d\n\nCreating milvus-etcd ... done\nCreating milvus-minio ... done\nCreating milvus-standalone ... done\n","$ sudo docker compose ps\n\n Name Command State Ports\n--------------------------------------------------------------------------------------------------------------------\nmilvus-etcd etcd -advertise-client-url ... Up 2379/tcp, 2380/tcp\nmilvus-minio /usr/bin/docker-entrypoint ... Up (healthy) 9000/tcp\nmilvus-standalone /tini -- milvus run standalone Up 0.0.0.0:19530->19530/tcp, 0.0.0.0:9091->9091/tcp\n","$ CUDA_VISIBLE_DEVICES=0 ./milvus run standalone\n","$ CUDA_VISIBLE_DEVICES=0,1 ./milvus run standalone\n","# Stop Milvus\n$ sudo docker compose down\n\n# Delete service data\n$ sudo rm -rf volumes\n","docker cp :/milvus/configs/milvus.yaml milvus.yaml\n","vim milvus.yaml\n","...\ngpu:\n initMemSize: 0\n maxMemSize: 0\n...\n","docker cp milvus.yaml :/milvus/configs/milvus.yaml\n","docker stop \ndocker start \n"],"headingContent":"Run Milvus with GPU Support Using Docker Compose","anchorList":[{"label":"Ejecutar Milvus con soporte GPU usando Docker Compose","href":"Run-Milvus-with-GPU-Support-Using-Docker-Compose","type":1,"isActive":false},{"label":"Requisitos previos","href":"Prerequisites","type":2,"isActive":false},{"label":"Instalar Milvus","href":"Install-Milvus","type":2,"isActive":false},{"label":"Configurar el pool de memoria","href":"Configure-memory-pool","type":2,"isActive":false},{"label":"Lo que sigue","href":"Whats-next","type":2,"isActive":false}]} \ No newline at end of file diff --git a/localization/v2.4.x/site/es/getstarted/run-milvus-gpu/install_standalone-docker-compose-gpu.md b/localization/v2.4.x/site/es/getstarted/run-milvus-gpu/install_standalone-docker-compose-gpu.md index 92823ae6a..cebf91450 100644 --- a/localization/v2.4.x/site/es/getstarted/run-milvus-gpu/install_standalone-docker-compose-gpu.md +++ b/localization/v2.4.x/site/es/getstarted/run-milvus-gpu/install_standalone-docker-compose-gpu.md @@ -3,7 +3,7 @@ id: install_standalone-docker-compose-gpu.md label: Standalone (Docker Compose) related_key: Kubernetes summary: Aprenda a instalar el clúster Milvus en Kubernetes. -title: Ejecutar Milvus con soporte GPU utilizando Docker Compose +title: Ejecutar Milvus con soporte GPU usando Docker Compose ---

        Ejecutar Milvus con soporte GPU usando Docker Compose

      -

      Si encuentra algún problema al arrancar la imagen, póngase en contacto con nosotros en community@zilliz.com con detalles sobre el problema, y le proporcionaremos el soporte necesario.

      +

      Si encuentra algún problema al arrancar la imagen, contacte con nosotros en community@zilliz.com con detalles sobre el problema, y le proporcionaremos el soporte necesario.

      Instalar Milvus

      Para instalar Milvus con soporte GPU utilizando Docker Compose, siga estos pasos.

      -

      1. Descargue y configure el archivo YAML

      Descarga milvus-standalone-docker-compose-gpu.yml y guárdelo como docker-compose.yml manualmente, o con el siguiente comando.

      -
      $ wget https://github.com/milvus-io/milvus/releases/download/v2.4.9/milvus-standalone-docker-compose-gpu.yml -O docker-compose.yml
      +

      1. Descargue y configure el archivo YAML

      Descarga milvus-standalone-docker-compose-gpu.yml y guárdelo como docker-compose.yml manualmente, o con el siguiente comando.

      +
      $ wget https://github.com/milvus-io/milvus/releases/download/v2.4.13-hotfix/milvus-standalone-docker-compose-gpu.yml -O docker-compose.yml
       

      Es necesario realizar algunos cambios en las variables de entorno del servicio autónomo en el archivo YAML como se indica a continuación:

        diff --git a/localization/v2.4.x/site/es/integrations/evaluation_with_deepeval.md b/localization/v2.4.x/site/es/integrations/evaluation_with_deepeval.md index 50f7f804e..8cd752105 100644 --- a/localization/v2.4.x/site/es/integrations/evaluation_with_deepeval.md +++ b/localization/v2.4.x/site/es/integrations/evaluation_with_deepeval.md @@ -20,7 +20,8 @@ title: Evaluación con DeepEval d="M4 9h1v1H4c-1.5 0-3-1.69-3-3.5S2.55 3 4 3h4c1.45 0 3 1.69 3 3.5 0 1.41-.91 2.72-2 3.25V8.59c.58-.45 1-1.27 1-2.09C10 5.22 8.98 4 8 4H4c-.98 0-2 1.22-2 2.5S3 9 4 9zm9-3h-1v1h1c1 0 2 1.22 2 2.5S13.98 12 13 12H9c-.98 0-2-1.22-2-2.5 0-.83.42-1.64 1-2.09V6.25c-1.09.53-2 1.84-2 3.25C6 11.31 7.55 13 9 13h4c1.45 0 3-1.69 3-3.5S14.5 6 13 6z" > -

        Open In Colab

        +

        Open In Colab +GitHub Repository

        Esta guía muestra cómo utilizar DeepEval para evaluar un sistema de generación mejorada por recuperación (RAG) basado en Milvus.

        El sistema RAG combina un sistema de recuperación con un modelo generativo para generar nuevo texto basado en una petición dada. En primer lugar, el sistema recupera documentos relevantes de un corpus utilizando Milvus y, a continuación, utiliza un modelo generativo para generar un nuevo texto basado en los documentos recuperados.

        DeepEval es un marco de trabajo que le ayuda a evaluar sus procesos GAR. Existen herramientas y marcos de trabajo que ayudan a construir estos pipelines, pero evaluarlos y cuantificar su rendimiento puede ser difícil. Aquí es donde entra DeepEval.

        @@ -297,7 +298,7 @@ Answering questions: 100%|██████████| 3/3 [00:03<00:00, 1 ¿Cuál es el lenguaje de programación utilizado... - [CMake & Conan\n\nLa biblioteca de algoritmos de Mil... + [CMake & Conan\nLa biblioteca de algoritmos de Mil... ¿Cuál es el lenguaje de programación utilizado... ¿Cuál es el lenguaje de programación utilizado... @@ -394,7 +395,7 @@ Evaluating 3 test case(s) in parallel: |██████████|100% (3/3

        Para evaluar la calidad de los resultados generados en grandes modelos lingüísticos (LLM), es importante centrarse en dos aspectos clave:

        1. Relevancia: Evaluar si la instrucción guía eficazmente al LLM para generar respuestas útiles y adecuadas al contexto.

        2. -
        3. Fidelidad: Mide la exactitud del resultado, asegurando que el modelo produce información que es factualmente correcta y libre de alucinaciones o contradicciones. El contenido generado debe coincidir con la información factual proporcionada en el contexto de recuperación.

        4. +
        5. Fidelidad: Mide la exactitud del resultado, asegurándote de que el modelo produce información objetivamente correcta y libre de alucinaciones o contradicciones. El contenido generado debe coincidir con la información factual proporcionada en el contexto de recuperación.

        Todos estos factores garantizan que los resultados sean pertinentes y fiables.

        from deepeval.metrics import AnswerRelevancyMetric, FaithfulnessMetric
        diff --git a/localization/v2.4.x/site/es/integrations/evaluation_with_phoenix.md b/localization/v2.4.x/site/es/integrations/evaluation_with_phoenix.md
        index 40ba2a9cf..aaff166bd 100644
        --- a/localization/v2.4.x/site/es/integrations/evaluation_with_phoenix.md
        +++ b/localization/v2.4.x/site/es/integrations/evaluation_with_phoenix.md
        @@ -20,7 +20,8 @@ title: Evaluación con Arize Pheonix
                   d="M4 9h1v1H4c-1.5 0-3-1.69-3-3.5S2.55 3 4 3h4c1.45 0 3 1.69 3 3.5 0 1.41-.91 2.72-2 3.25V8.59c.58-.45 1-1.27 1-2.09C10 5.22 8.98 4 8 4H4c-.98 0-2 1.22-2 2.5S3 9 4 9zm9-3h-1v1h1c1 0 2 1.22 2 2.5S13.98 12 13 12H9c-.98 0-2-1.22-2-2.5 0-.83.42-1.64 1-2.09V6.25c-1.09.53-2 1.84-2 3.25C6 11.31 7.55 13 9 13h4c1.45 0 3-1.69 3-3.5S14.5 6 13 6z"
                 >
               
        -    

        Open In Colab

        +

        Open In Colab +GitHub Repository

        Esta guía muestra cómo utilizar Arize Pheonix para evaluar un sistema de generación mejorada por recuperación (RAG) basado en Milvus.

        El sistema RAG combina un sistema de recuperación con un modelo generativo para generar texto nuevo basado en una petición dada. En primer lugar, el sistema recupera documentos relevantes de un corpus utilizando Milvus y, a continuación, utiliza un modelo generativo para generar un nuevo texto basado en los documentos recuperados.

        Arize Pheonix es un marco de trabajo que le ayuda a evaluar sus canalizaciones RAG. Existen herramientas y marcos de trabajo que le ayudan a construir estas canalizaciones, pero evaluarlas y cuantificar su rendimiento puede resultar difícil. Aquí es donde entra Arize Pheonix.

        @@ -298,7 +299,7 @@ Answering questions: 100%|██████████| 3/3 [00:03<00:00, 1 ¿Cuál es el lenguaje de programación utilizado... [CMake & Conan\n\nLa biblioteca de algoritmos de Mil... - ¿Cuál es el lenguaje de programación utilizado... + El lenguaje de programación utilizado para escribir Knowher... ¿Cuál es el lenguaje de programación utilizado... @@ -332,7 +333,7 @@ Answering questions: 100%|██████████| 3/3 [00:03<00:00,
        • Explicación de la alucinación: Explica por qué una respuesta es objetiva o no.
        -
      • Evaluación de la garantía de calidad: Evalúa la precisión de las respuestas del modelo a las consultas de entrada.

        +
      • Evaluación dela garantía de calidad: Evalúa la precisión de las respuestas del modelo a las consultas de entrada.

        • Explicación de la GC: Detalla por qué una respuesta es correcta o incorrecta.
      • @@ -359,7 +360,7 @@ OpenAIInstrumentor().instrument()

        - Alt Text + Alt Text Texto Alt

        import nest_asyncio
         
        @@ -448,7 +449,7 @@ results_df.head()
               [CMake & Conan\n\nLa biblioteca de algoritmos de Mil...
               El lenguaje de programación utilizado para escribir Knowher...
               El lenguaje de programación utilizado para escribir Knowher...
        -      [CMake & Conan La librería de algoritmos de Mil...
        +      [CMake & Conan\nLa biblioteca de algoritmos de Mil...
               [CMake & Conan La biblioteca de algoritmos de Mil...
               factual
               Determinar si la respuesta es factual o hallu...
        diff --git a/localization/v2.4.x/site/es/integrations/integrate_with_bentoml.json b/localization/v2.4.x/site/es/integrations/integrate_with_bentoml.json
        index f921e76d9..bb8197fda 100644
        --- a/localization/v2.4.x/site/es/integrations/integrate_with_bentoml.json
        +++ b/localization/v2.4.x/site/es/integrations/integrate_with_bentoml.json
        @@ -1 +1 @@
        -{"codeList":["$ pip install -U pymilvus bentoml\n","import bentoml\n\nBENTO_EMBEDDING_MODEL_END_POINT = \"BENTO_EMBEDDING_MODEL_END_POINT\"\nBENTO_API_TOKEN = \"BENTO_API_TOKEN\"\n\nembedding_client = bentoml.SyncHTTPClient(\n    BENTO_EMBEDDING_MODEL_END_POINT, token=BENTO_API_TOKEN\n)\n","# naively chunk on newlines\ndef chunk_text(filename: str) -> list:\n    with open(filename, \"r\") as f:\n        text = f.read()\n    sentences = text.split(\"\\n\")\n    return sentences\n","import os\nimport requests\nimport urllib.request\n\n# set up the data source\nrepo = \"ytang07/bento_octo_milvus_RAG\"\ndirectory = \"data\"\nsave_dir = \"./city_data\"\napi_url = f\"https://api.github.com/repos/{repo}/contents/{directory}\"\n\n\nresponse = requests.get(api_url)\ndata = response.json()\n\nif not os.path.exists(save_dir):\n    os.makedirs(save_dir)\n\nfor item in data:\n    if item[\"type\"] == \"file\":\n        file_url = item[\"download_url\"]\n        file_path = os.path.join(save_dir, item[\"name\"])\n        urllib.request.urlretrieve(file_url, file_path)\n","# please upload your data directory under this file's folder\ncities = os.listdir(\"city_data\")\n# store chunked text for each of the cities in a list of dicts\ncity_chunks = []\nfor city in cities:\n    chunked = chunk_text(f\"city_data/{city}\")\n    cleaned = []\n    for chunk in chunked:\n        if len(chunk) > 7:\n            cleaned.append(chunk)\n    mapped = {\"city_name\": city.split(\".\")[0], \"chunks\": cleaned}\n    city_chunks.append(mapped)\n","def get_embeddings(texts: list) -> list:\n    if len(texts) > 25:\n        splits = [texts[x : x + 25] for x in range(0, len(texts), 25)]\n        embeddings = []\n        for split in splits:\n            embedding_split = embedding_client.encode(sentences=split)\n            embeddings += embedding_split\n        return embeddings\n    return embedding_client.encode(\n        sentences=texts,\n    )\n","entries = []\nfor city_dict in city_chunks:\n    # No need for the embeddings list if get_embeddings already returns a list of lists\n    embedding_list = get_embeddings(city_dict[\"chunks\"])  # returns a list of lists\n    # Now match texts with embeddings and city name\n    for i, embedding in enumerate(embedding_list):\n        entry = {\n            \"embedding\": embedding,\n            \"sentence\": city_dict[\"chunks\"][\n                i\n            ],  # Assume \"chunks\" has the corresponding texts for the embeddings\n            \"city\": city_dict[\"city_name\"],\n        }\n        entries.append(entry)\n    print(entries)\n","from pymilvus import MilvusClient\n\nCOLLECTION_NAME = \"Bento_Milvus_RAG\"  # random name for your collection\nDIMENSION = 384\n\n# Initialize a Milvus Lite client\nmilvus_client = MilvusClient(\"milvus_demo.db\")\n","from pymilvus import connections\n\nconnections.connect(uri=\"milvus_demo.db\")\n","from pymilvus import MilvusClient, DataType, Collection\n\n# Create schema\nschema = MilvusClient.create_schema(\n    auto_id=True,\n    enable_dynamic_field=True,\n)\n\n# 3.2. Add fields to schema\nschema.add_field(field_name=\"id\", datatype=DataType.INT64, is_primary=True)\nschema.add_field(field_name=\"embedding\", datatype=DataType.FLOAT_VECTOR, dim=DIMENSION)\n","# prepare index parameters\nindex_params = milvus_client.prepare_index_params()\n\n# add index\nindex_params.add_index(\n    field_name=\"embedding\",\n    index_type=\"AUTOINDEX\",  # use autoindex instead of other complex indexing method\n    metric_type=\"COSINE\",  # L2, COSINE, or IP\n)\n\n# create collection\nif milvus_client.has_collection(collection_name=COLLECTION_NAME):\n    milvus_client.drop_collection(collection_name=COLLECTION_NAME)\nmilvus_client.create_collection(\n    collection_name=COLLECTION_NAME, schema=schema, index_params=index_params\n)\n\n# Outside the loop, now you upsert all the entries at once\nmilvus_client.insert(collection_name=COLLECTION_NAME, data=entries)\n","BENTO_LLM_END_POINT = \"BENTO_LLM_END_POINT\"\n\nllm_client = bentoml.SyncHTTPClient(BENTO_LLM_END_POINT, token=BENTO_API_TOKEN)\n","def dorag(question: str, context: str):\n\n    prompt = (\n        f\"You are a helpful assistant. The user has a question. Answer the user question based only on the context: {context}. \\n\"\n        f\"The user question is {question}\"\n    )\n\n    results = llm_client.generate(\n        max_tokens=1024,\n        prompt=prompt,\n    )\n\n    res = \"\"\n    for result in results:\n        res += result\n\n    return res\n","question = \"What state is Cambridge in?\"\n\n\ndef ask_a_question(question):\n    embeddings = get_embeddings([question])\n    res = milvus_client.search(\n        collection_name=COLLECTION_NAME,\n        data=embeddings,  # search for the one (1) embedding returned as a list of lists\n        anns_field=\"embedding\",  # Search across embeddings\n        limit=5,  # get me the top 5 results\n        output_fields=[\"sentence\"],  # get the sentence/chunk and city\n    )\n\n    sentences = []\n    for hits in res:\n        for hit in hits:\n            print(hit)\n            sentences.append(hit[\"entity\"][\"sentence\"])\n    context = \". \".join(sentences)\n    return context\n\n\ncontext = ask_a_question(question=question)\nprint(context)\n","print(dorag(question=question, context=context))\n"],"headingContent":"","anchorList":[{"label":"Generación mejorada por recuperación (RAG) con Milvus y BentoML","href":"Retrieval-Augmented-Generation-RAG-with-Milvus-and-BentoML","type":1,"isActive":false},{"label":"Introducción","href":"Introduction","type":2,"isActive":false},{"label":"Antes de empezar","href":"Before-you-begin","type":2,"isActive":false},{"label":"Sirviendo Embeddings con BentoML/BentoCloud","href":"Serving-Embeddings-with-BentoMLBentoCloud","type":2,"isActive":false},{"label":"Inserción de datos en una base de datos vectorial para su recuperación","href":"Inserting-Data-into-a-Vector-Database-for-Retrieval","type":2,"isActive":false},{"label":"Creación de su colección Milvus Lite","href":"Creating-Your-Milvus-Lite-Collection","type":2,"isActive":false},{"label":"Configura tu LLM para RAG","href":"Set-up-Your-LLM-for-RAG","type":2,"isActive":false},{"label":"Instrucciones LLM","href":"LLM-Instructions","type":2,"isActive":false},{"label":"Un ejemplo RAG","href":"A-RAG-Example","type":2,"isActive":false}]}
        \ No newline at end of file
        +{"codeList":["$ pip install -U pymilvus bentoml\n","import bentoml\n\nBENTO_EMBEDDING_MODEL_END_POINT = \"BENTO_EMBEDDING_MODEL_END_POINT\"\nBENTO_API_TOKEN = \"BENTO_API_TOKEN\"\n\nembedding_client = bentoml.SyncHTTPClient(\n    BENTO_EMBEDDING_MODEL_END_POINT, token=BENTO_API_TOKEN\n)\n","# naively chunk on newlines\ndef chunk_text(filename: str) -> list:\n    with open(filename, \"r\") as f:\n        text = f.read()\n    sentences = text.split(\"\\n\")\n    return sentences\n","import os\nimport requests\nimport urllib.request\n\n# set up the data source\nrepo = \"ytang07/bento_octo_milvus_RAG\"\ndirectory = \"data\"\nsave_dir = \"./city_data\"\napi_url = f\"https://api.github.com/repos/{repo}/contents/{directory}\"\n\n\nresponse = requests.get(api_url)\ndata = response.json()\n\nif not os.path.exists(save_dir):\n    os.makedirs(save_dir)\n\nfor item in data:\n    if item[\"type\"] == \"file\":\n        file_url = item[\"download_url\"]\n        file_path = os.path.join(save_dir, item[\"name\"])\n        urllib.request.urlretrieve(file_url, file_path)\n","# please upload your data directory under this file's folder\ncities = os.listdir(\"city_data\")\n# store chunked text for each of the cities in a list of dicts\ncity_chunks = []\nfor city in cities:\n    chunked = chunk_text(f\"city_data/{city}\")\n    cleaned = []\n    for chunk in chunked:\n        if len(chunk) > 7:\n            cleaned.append(chunk)\n    mapped = {\"city_name\": city.split(\".\")[0], \"chunks\": cleaned}\n    city_chunks.append(mapped)\n","def get_embeddings(texts: list) -> list:\n    if len(texts) > 25:\n        splits = [texts[x : x + 25] for x in range(0, len(texts), 25)]\n        embeddings = []\n        for split in splits:\n            embedding_split = embedding_client.encode(sentences=split)\n            embeddings += embedding_split\n        return embeddings\n    return embedding_client.encode(\n        sentences=texts,\n    )\n","entries = []\nfor city_dict in city_chunks:\n    # No need for the embeddings list if get_embeddings already returns a list of lists\n    embedding_list = get_embeddings(city_dict[\"chunks\"])  # returns a list of lists\n    # Now match texts with embeddings and city name\n    for i, embedding in enumerate(embedding_list):\n        entry = {\n            \"embedding\": embedding,\n            \"sentence\": city_dict[\"chunks\"][\n                i\n            ],  # Assume \"chunks\" has the corresponding texts for the embeddings\n            \"city\": city_dict[\"city_name\"],\n        }\n        entries.append(entry)\n    print(entries)\n","from pymilvus import MilvusClient\n\nCOLLECTION_NAME = \"Bento_Milvus_RAG\"  # random name for your collection\nDIMENSION = 384\n\n# Initialize a Milvus Lite client\nmilvus_client = MilvusClient(\"milvus_demo.db\")\n","from pymilvus import connections\n\nconnections.connect(uri=\"milvus_demo.db\")\n","from pymilvus import MilvusClient, DataType, Collection\n\n# Create schema\nschema = MilvusClient.create_schema(\n    auto_id=True,\n    enable_dynamic_field=True,\n)\n\n# 3.2. Add fields to schema\nschema.add_field(field_name=\"id\", datatype=DataType.INT64, is_primary=True)\nschema.add_field(field_name=\"embedding\", datatype=DataType.FLOAT_VECTOR, dim=DIMENSION)\n","# prepare index parameters\nindex_params = milvus_client.prepare_index_params()\n\n# add index\nindex_params.add_index(\n    field_name=\"embedding\",\n    index_type=\"AUTOINDEX\",  # use autoindex instead of other complex indexing method\n    metric_type=\"COSINE\",  # L2, COSINE, or IP\n)\n\n# create collection\nif milvus_client.has_collection(collection_name=COLLECTION_NAME):\n    milvus_client.drop_collection(collection_name=COLLECTION_NAME)\nmilvus_client.create_collection(\n    collection_name=COLLECTION_NAME, schema=schema, index_params=index_params\n)\n\n# Outside the loop, now you upsert all the entries at once\nmilvus_client.insert(collection_name=COLLECTION_NAME, data=entries)\n","BENTO_LLM_END_POINT = \"BENTO_LLM_END_POINT\"\n\nllm_client = bentoml.SyncHTTPClient(BENTO_LLM_END_POINT, token=BENTO_API_TOKEN)\n","def dorag(question: str, context: str):\n\n    prompt = (\n        f\"You are a helpful assistant. The user has a question. Answer the user question based only on the context: {context}. \\n\"\n        f\"The user question is {question}\"\n    )\n\n    results = llm_client.generate(\n        max_tokens=1024,\n        prompt=prompt,\n    )\n\n    res = \"\"\n    for result in results:\n        res += result\n\n    return res\n","question = \"What state is Cambridge in?\"\n\n\ndef ask_a_question(question):\n    embeddings = get_embeddings([question])\n    res = milvus_client.search(\n        collection_name=COLLECTION_NAME,\n        data=embeddings,  # search for the one (1) embedding returned as a list of lists\n        anns_field=\"embedding\",  # Search across embeddings\n        limit=5,  # get me the top 5 results\n        output_fields=[\"sentence\"],  # get the sentence/chunk and city\n    )\n\n    sentences = []\n    for hits in res:\n        for hit in hits:\n            print(hit)\n            sentences.append(hit[\"entity\"][\"sentence\"])\n    context = \". \".join(sentences)\n    return context\n\n\ncontext = ask_a_question(question=question)\nprint(context)\n","print(dorag(question=question, context=context))\n"],"headingContent":"Retrieval-Augmented Generation (RAG) with Milvus and BentoML","anchorList":[{"label":"Generación mejorada por recuperación (RAG) con Milvus y BentoML","href":"Retrieval-Augmented-Generation-RAG-with-Milvus-and-BentoML","type":1,"isActive":false},{"label":"Introducción","href":"Introduction","type":2,"isActive":false},{"label":"Antes de empezar","href":"Before-you-begin","type":2,"isActive":false},{"label":"Sirviendo Embeddings con BentoML/BentoCloud","href":"Serving-Embeddings-with-BentoMLBentoCloud","type":2,"isActive":false},{"label":"Inserción de datos en una base de datos vectorial para su recuperación","href":"Inserting-Data-into-a-Vector-Database-for-Retrieval","type":2,"isActive":false},{"label":"Creación de su colección Milvus Lite","href":"Creating-Your-Milvus-Lite-Collection","type":2,"isActive":false},{"label":"Configura tu LLM para RAG","href":"Set-up-Your-LLM-for-RAG","type":2,"isActive":false},{"label":"Instrucciones LLM","href":"LLM-Instructions","type":2,"isActive":false},{"label":"Un ejemplo RAG","href":"A-RAG-Example","type":2,"isActive":false}]}
        \ No newline at end of file
        diff --git a/localization/v2.4.x/site/es/integrations/integrate_with_bentoml.md b/localization/v2.4.x/site/es/integrations/integrate_with_bentoml.md
        index 98c656a25..69f354b11 100644
        --- a/localization/v2.4.x/site/es/integrations/integrate_with_bentoml.md
        +++ b/localization/v2.4.x/site/es/integrations/integrate_with_bentoml.md
        @@ -22,7 +22,8 @@ title: Generación mejorada por recuperación (RAG) con Milvus y BentoML
                   d="M4 9h1v1H4c-1.5 0-3-1.69-3-3.5S2.55 3 4 3h4c1.45 0 3 1.69 3 3.5 0 1.41-.91 2.72-2 3.25V8.59c.58-.45 1-1.27 1-2.09C10 5.22 8.98 4 8 4H4c-.98 0-2 1.22-2 2.5S3 9 4 9zm9-3h-1v1h1c1 0 2 1.22 2 2.5S13.98 12 13 12H9c-.98 0-2-1.22-2-2.5 0-.83.42-1.64 1-2.09V6.25c-1.09.53-2 1.84-2 3.25C6 11.31 7.55 13 9 13h4c1.45 0 3-1.69 3-3.5S14.5 6 13 6z"
                 >
               
        -    

        Open In Colab

        +

        Open In Colab +GitHub Repository

        Introducción

        -

        Ahora, tenemos que emparejar los embeddings y los trozos de texto. Como la lista de incrustaciones y la lista de frases deben coincidir por índice, podemos enumerate a través de cualquiera de las listas para emparejarlas.

        +

        Ahora, tenemos que emparejar los embeddings y los trozos de texto. Dado que la lista de incrustaciones y la lista de frases deben coincidir por índice, podemos enumerate a través de cualquiera de las listas para hacerlas coincidir.

        entries = []
         for city_dict in city_chunks:
             # No need for the embeddings list if get_embeddings already returns a list of lists
        @@ -228,7 +229,7 @@ schema = MilvusClient.create_schema(
         schema.add_field(field_name="id", datatype=DataType.INT64, is_primary=True)
         schema.add_field(field_name="embedding", datatype=DataType.FLOAT_VECTOR, dim=DIMENSION)
         
        -

        Ahora que hemos creado nuestro esquema y definido correctamente el campo de datos, necesitamos definir el índice. En términos de búsqueda, un "índice" define cómo vamos a mapear nuestros datos para su recuperación. En este proyecto utilizaremos la opción AUTOINDEX por defecto para indexar nuestros datos.

        +

        Ahora que hemos creado nuestro esquema y definido correctamente el campo de datos, necesitamos definir el índice. En términos de búsqueda, un "índice" define cómo vamos a mapear nuestros datos para su recuperación. En este proyecto utilizaremos la opción AUTOINDEX por defecto para indexar nuestros datos.

        A continuación, creamos la colección con el nombre, esquema e índice dados anteriormente. Finalmente, insertamos los datos previamente procesados.

        # prepare index parameters
         index_params = milvus_client.prepare_index_params()
        diff --git a/localization/v2.4.x/site/es/integrations/integrate_with_camel.json b/localization/v2.4.x/site/es/integrations/integrate_with_camel.json
        index d2527e135..296a8d514 100644
        --- a/localization/v2.4.x/site/es/integrations/integrate_with_camel.json
        +++ b/localization/v2.4.x/site/es/integrations/integrate_with_camel.json
        @@ -1 +1 @@
        -{"codeList":["$ pip install -U \"camel-ai[all]\" pymilvus\n","import os\nimport requests\n\nos.makedirs(\"local_data\", exist_ok=True)\n\nurl = \"https://arxiv.org/pdf/2303.17760.pdf\"\nresponse = requests.get(url)\nwith open(\"local_data/camel paper.pdf\", \"wb\") as file:\n    file.write(response.content)\n","os.environ[\"OPENAI_API_KEY\"] = \"Your Key\"\n","from camel.embeddings import OpenAIEmbedding\n\nembedding_instance = OpenAIEmbedding()\n","from camel.storages import MilvusStorage\n\nstorage_instance = MilvusStorage(\n    vector_dim=embedding_instance.get_output_dim(),\n    url_and_api_key=(\n        \"./milvus_demo.db\",  # Your Milvus connection URI\n        \"\",  # Your Milvus token\n    ),\n    collection_name=\"camel_paper\",\n)\n","from camel.retrievers import VectorRetriever\n\nvector_retriever = VectorRetriever(\n    embedding_model=embedding_instance, storage=storage_instance\n)\n","vector_retriever.process(content_input_path=\"local_data/camel paper.pdf\")\n","retrieved_info = vector_retriever.query(query=\"What is CAMEL?\", top_k=1)\nprint(retrieved_info)\n","retrieved_info_irrelevant = vector_retriever.query(\n    query=\"Compared with dumpling and rice, which should I take for dinner?\", top_k=1\n)\n\nprint(retrieved_info_irrelevant)\n","from camel.retrievers import AutoRetriever\nfrom camel.types import StorageType\n\nauto_retriever = AutoRetriever(\n    url_and_api_key=(\n        \"./milvus_demo.db\",  # Your Milvus connection URI\n        \"\",  # Your Milvus token\n    ),\n    storage_type=StorageType.MILVUS,\n    embedding_model=embedding_instance,\n)\n\nretrieved_info = auto_retriever.run_vector_retriever(\n    query=\"What is CAMEL-AI\",\n    content_input_paths=[\n        \"local_data/camel paper.pdf\",  # example local path\n        \"https://www.camel-ai.org/\",  # example remote url\n    ],\n    top_k=1,\n    return_detailed_info=True,\n)\n\nprint(retrieved_info)\n","from camel.agents import ChatAgent\nfrom camel.messages import BaseMessage\nfrom camel.types import RoleType\nfrom camel.retrievers import AutoRetriever\nfrom camel.types import StorageType\n\n\ndef single_agent(query: str) -> str:\n    # Set agent role\n    assistant_sys_msg = BaseMessage(\n        role_name=\"Assistant\",\n        role_type=RoleType.ASSISTANT,\n        meta_dict=None,\n        content=\"\"\"You are a helpful assistant to answer question,\n         I will give you the Original Query and Retrieved Context,\n        answer the Original Query based on the Retrieved Context,\n        if you can't answer the question just say I don't know.\"\"\",\n    )\n\n    # Add auto retriever\n    auto_retriever = AutoRetriever(\n        url_and_api_key=(\n            \"./milvus_demo.db\",  # Your Milvus connection URI\n            \"\",  # Your Milvus token\n        ),\n        storage_type=StorageType.MILVUS,\n        embedding_model=embedding_instance,\n    )\n\n    retrieved_info = auto_retriever.run_vector_retriever(\n        query=query,\n        content_input_paths=[\n            \"local_data/camel paper.pdf\",  # example local path\n            \"https://www.camel-ai.org/\",  # example remote url\n        ],\n        # vector_storage_local_path=\"storage_default_run\",\n        top_k=1,\n        return_detailed_info=True,\n    )\n\n    # Pass the retrieved infomation to agent\n    user_msg = BaseMessage.make_user_message(role_name=\"User\", content=retrieved_info)\n    agent = ChatAgent(assistant_sys_msg)\n\n    # Get response\n    assistant_response = agent.step(user_msg)\n    return assistant_response.msg.content\n\n\nprint(single_agent(\"What is CAMEL-AI\"))\n","from typing import List\nfrom colorama import Fore\n\nfrom camel.agents.chat_agent import FunctionCallingRecord\nfrom camel.configs import ChatGPTConfig\nfrom camel.functions import (\n    MATH_FUNCS,\n    RETRIEVAL_FUNCS,\n)\nfrom camel.societies import RolePlaying\nfrom camel.types import ModelType\nfrom camel.utils import print_text_animated\n\n\ndef role_playing_with_rag(\n    task_prompt, model_type=ModelType.GPT_4O, chat_turn_limit=10\n) -> None:\n    task_prompt = task_prompt\n\n    user_model_config = ChatGPTConfig(temperature=0.0)\n\n    function_list = [\n        *MATH_FUNCS,\n        *RETRIEVAL_FUNCS,\n    ]\n    assistant_model_config = ChatGPTConfig(\n        tools=function_list,\n        temperature=0.0,\n    )\n\n    role_play_session = RolePlaying(\n        assistant_role_name=\"Searcher\",\n        user_role_name=\"Professor\",\n        assistant_agent_kwargs=dict(\n            model_type=model_type,\n            model_config=assistant_model_config,\n            tools=function_list,\n        ),\n        user_agent_kwargs=dict(\n            model_type=model_type,\n            model_config=user_model_config,\n        ),\n        task_prompt=task_prompt,\n        with_task_specify=False,\n    )\n\n    print(\n        Fore.GREEN\n        + f\"AI Assistant sys message:\\n{role_play_session.assistant_sys_msg}\\n\"\n    )\n    print(Fore.BLUE + f\"AI User sys message:\\n{role_play_session.user_sys_msg}\\n\")\n\n    print(Fore.YELLOW + f\"Original task prompt:\\n{task_prompt}\\n\")\n    print(\n        Fore.CYAN\n        + f\"Specified task prompt:\\n{role_play_session.specified_task_prompt}\\n\"\n    )\n    print(Fore.RED + f\"Final task prompt:\\n{role_play_session.task_prompt}\\n\")\n\n    n = 0\n    input_msg = role_play_session.init_chat()\n    while n < chat_turn_limit:\n        n += 1\n        assistant_response, user_response = role_play_session.step(input_msg)\n\n        if assistant_response.terminated:\n            print(\n                Fore.GREEN\n                + (\n                    \"AI Assistant terminated. Reason: \"\n                    f\"{assistant_response.info['termination_reasons']}.\"\n                )\n            )\n            break\n        if user_response.terminated:\n            print(\n                Fore.GREEN\n                + (\n                    \"AI User terminated. \"\n                    f\"Reason: {user_response.info['termination_reasons']}.\"\n                )\n            )\n            break\n\n        # Print output from the user\n        print_text_animated(Fore.BLUE + f\"AI User:\\n\\n{user_response.msg.content}\\n\")\n\n        # Print output from the assistant, including any function\n        # execution information\n        print_text_animated(Fore.GREEN + \"AI Assistant:\")\n        tool_calls: List[FunctionCallingRecord] = assistant_response.info[\"tool_calls\"]\n        for func_record in tool_calls:\n            print_text_animated(f\"{func_record}\")\n        print_text_animated(f\"{assistant_response.msg.content}\\n\")\n\n        if \"CAMEL_TASK_DONE\" in user_response.msg.content:\n            break\n\n        input_msg = assistant_response.msg\n","role_playing_with_rag(\n    task_prompt=\"\"\"What is the main termination reasons for AI Society\n                   dataset, how many number of messages did camel decided to\n                   limit, what's the value plus 100? You should refer to the\n                   content in path camel/local_data/camel paper.pdf\"\"\"\n)\n"],"headingContent":"","anchorList":[{"label":"Retrieval-Augmented Generation (RAG) con Milvus y Camel","href":"Retrieval-Augmented-Generation-RAG-with-Milvus-and-Camel","type":1,"isActive":false},{"label":"Cargar datos","href":"Load-Data","type":2,"isActive":false},{"label":"1. RAG personalizado","href":"1-Customized-RAG","type":2,"isActive":false},{"label":"2. Auto RAG","href":"2-Auto-RAG","type":2,"isActive":false},{"label":"3. Agente Único con Auto RAG","href":"3-Single-Agent-with-Auto-RAG","type":2,"isActive":false},{"label":"4. Role-playing con Auto RAG","href":"4-Role-playing-with-Auto-RAG","type":2,"isActive":false}]}
        \ No newline at end of file
        +{"codeList":["$ pip install -U \"camel-ai[all]\" pymilvus\n","import os\nimport requests\n\nos.makedirs(\"local_data\", exist_ok=True)\n\nurl = \"https://arxiv.org/pdf/2303.17760.pdf\"\nresponse = requests.get(url)\nwith open(\"local_data/camel paper.pdf\", \"wb\") as file:\n    file.write(response.content)\n","os.environ[\"OPENAI_API_KEY\"] = \"Your Key\"\n","from camel.embeddings import OpenAIEmbedding\n\nembedding_instance = OpenAIEmbedding()\n","from camel.storages import MilvusStorage\n\nstorage_instance = MilvusStorage(\n    vector_dim=embedding_instance.get_output_dim(),\n    url_and_api_key=(\n        \"./milvus_demo.db\",  # Your Milvus connection URI\n        \"\",  # Your Milvus token\n    ),\n    collection_name=\"camel_paper\",\n)\n","from camel.retrievers import VectorRetriever\n\nvector_retriever = VectorRetriever(\n    embedding_model=embedding_instance, storage=storage_instance\n)\n","vector_retriever.process(content_input_path=\"local_data/camel paper.pdf\")\n","retrieved_info = vector_retriever.query(query=\"What is CAMEL?\", top_k=1)\nprint(retrieved_info)\n","retrieved_info_irrelevant = vector_retriever.query(\n    query=\"Compared with dumpling and rice, which should I take for dinner?\", top_k=1\n)\n\nprint(retrieved_info_irrelevant)\n","from camel.retrievers import AutoRetriever\nfrom camel.types import StorageType\n\nauto_retriever = AutoRetriever(\n    url_and_api_key=(\n        \"./milvus_demo.db\",  # Your Milvus connection URI\n        \"\",  # Your Milvus token\n    ),\n    storage_type=StorageType.MILVUS,\n    embedding_model=embedding_instance,\n)\n\nretrieved_info = auto_retriever.run_vector_retriever(\n    query=\"What is CAMEL-AI\",\n    content_input_paths=[\n        \"local_data/camel paper.pdf\",  # example local path\n        \"https://www.camel-ai.org/\",  # example remote url\n    ],\n    top_k=1,\n    return_detailed_info=True,\n)\n\nprint(retrieved_info)\n","from camel.agents import ChatAgent\nfrom camel.messages import BaseMessage\nfrom camel.types import RoleType\nfrom camel.retrievers import AutoRetriever\nfrom camel.types import StorageType\n\n\ndef single_agent(query: str) -> str:\n    # Set agent role\n    assistant_sys_msg = BaseMessage(\n        role_name=\"Assistant\",\n        role_type=RoleType.ASSISTANT,\n        meta_dict=None,\n        content=\"\"\"You are a helpful assistant to answer question,\n         I will give you the Original Query and Retrieved Context,\n        answer the Original Query based on the Retrieved Context,\n        if you can't answer the question just say I don't know.\"\"\",\n    )\n\n    # Add auto retriever\n    auto_retriever = AutoRetriever(\n        url_and_api_key=(\n            \"./milvus_demo.db\",  # Your Milvus connection URI\n            \"\",  # Your Milvus token\n        ),\n        storage_type=StorageType.MILVUS,\n        embedding_model=embedding_instance,\n    )\n\n    retrieved_info = auto_retriever.run_vector_retriever(\n        query=query,\n        content_input_paths=[\n            \"local_data/camel paper.pdf\",  # example local path\n            \"https://www.camel-ai.org/\",  # example remote url\n        ],\n        # vector_storage_local_path=\"storage_default_run\",\n        top_k=1,\n        return_detailed_info=True,\n    )\n\n    # Pass the retrieved infomation to agent\n    user_msg = BaseMessage.make_user_message(role_name=\"User\", content=retrieved_info)\n    agent = ChatAgent(assistant_sys_msg)\n\n    # Get response\n    assistant_response = agent.step(user_msg)\n    return assistant_response.msg.content\n\n\nprint(single_agent(\"What is CAMEL-AI\"))\n","from typing import List\nfrom colorama import Fore\n\nfrom camel.agents.chat_agent import FunctionCallingRecord\nfrom camel.configs import ChatGPTConfig\nfrom camel.functions import (\n    MATH_FUNCS,\n    RETRIEVAL_FUNCS,\n)\nfrom camel.societies import RolePlaying\nfrom camel.types import ModelType\nfrom camel.utils import print_text_animated\n\n\ndef role_playing_with_rag(\n    task_prompt, model_type=ModelType.GPT_4O, chat_turn_limit=10\n) -> None:\n    task_prompt = task_prompt\n\n    user_model_config = ChatGPTConfig(temperature=0.0)\n\n    function_list = [\n        *MATH_FUNCS,\n        *RETRIEVAL_FUNCS,\n    ]\n    assistant_model_config = ChatGPTConfig(\n        tools=function_list,\n        temperature=0.0,\n    )\n\n    role_play_session = RolePlaying(\n        assistant_role_name=\"Searcher\",\n        user_role_name=\"Professor\",\n        assistant_agent_kwargs=dict(\n            model_type=model_type,\n            model_config=assistant_model_config,\n            tools=function_list,\n        ),\n        user_agent_kwargs=dict(\n            model_type=model_type,\n            model_config=user_model_config,\n        ),\n        task_prompt=task_prompt,\n        with_task_specify=False,\n    )\n\n    print(\n        Fore.GREEN\n        + f\"AI Assistant sys message:\\n{role_play_session.assistant_sys_msg}\\n\"\n    )\n    print(Fore.BLUE + f\"AI User sys message:\\n{role_play_session.user_sys_msg}\\n\")\n\n    print(Fore.YELLOW + f\"Original task prompt:\\n{task_prompt}\\n\")\n    print(\n        Fore.CYAN\n        + f\"Specified task prompt:\\n{role_play_session.specified_task_prompt}\\n\"\n    )\n    print(Fore.RED + f\"Final task prompt:\\n{role_play_session.task_prompt}\\n\")\n\n    n = 0\n    input_msg = role_play_session.init_chat()\n    while n < chat_turn_limit:\n        n += 1\n        assistant_response, user_response = role_play_session.step(input_msg)\n\n        if assistant_response.terminated:\n            print(\n                Fore.GREEN\n                + (\n                    \"AI Assistant terminated. Reason: \"\n                    f\"{assistant_response.info['termination_reasons']}.\"\n                )\n            )\n            break\n        if user_response.terminated:\n            print(\n                Fore.GREEN\n                + (\n                    \"AI User terminated. \"\n                    f\"Reason: {user_response.info['termination_reasons']}.\"\n                )\n            )\n            break\n\n        # Print output from the user\n        print_text_animated(Fore.BLUE + f\"AI User:\\n\\n{user_response.msg.content}\\n\")\n\n        # Print output from the assistant, including any function\n        # execution information\n        print_text_animated(Fore.GREEN + \"AI Assistant:\")\n        tool_calls: List[FunctionCallingRecord] = assistant_response.info[\"tool_calls\"]\n        for func_record in tool_calls:\n            print_text_animated(f\"{func_record}\")\n        print_text_animated(f\"{assistant_response.msg.content}\\n\")\n\n        if \"CAMEL_TASK_DONE\" in user_response.msg.content:\n            break\n\n        input_msg = assistant_response.msg\n","role_playing_with_rag(\n    task_prompt=\"\"\"What is the main termination reasons for AI Society\n                   dataset, how many number of messages did camel decided to\n                   limit, what's the value plus 100? You should refer to the\n                   content in path camel/local_data/camel paper.pdf\"\"\"\n)\n"],"headingContent":"Retrieval-Augmented Generation (RAG) with Milvus and Camel","anchorList":[{"label":"Retrieval-Augmented Generation (RAG) con Milvus y Camel","href":"Retrieval-Augmented-Generation-RAG-with-Milvus-and-Camel","type":1,"isActive":false},{"label":"Cargar datos","href":"Load-Data","type":2,"isActive":false},{"label":"1. RAG personalizado","href":"1-Customized-RAG","type":2,"isActive":false},{"label":"2. Auto RAG","href":"2-Auto-RAG","type":2,"isActive":false},{"label":"3. Agente Único con Auto RAG","href":"3-Single-Agent-with-Auto-RAG","type":2,"isActive":false},{"label":"4. Role-playing con Auto RAG","href":"4-Role-playing-with-Auto-RAG","type":2,"isActive":false}]}
        \ No newline at end of file
        diff --git a/localization/v2.4.x/site/es/integrations/integrate_with_camel.md b/localization/v2.4.x/site/es/integrations/integrate_with_camel.md
        index 420306fb9..ec8a5d613 100644
        --- a/localization/v2.4.x/site/es/integrations/integrate_with_camel.md
        +++ b/localization/v2.4.x/site/es/integrations/integrate_with_camel.md
        @@ -22,7 +22,8 @@ title: Generación mejorada por recuperación (RAG) con Milvus y BentoML
                   d="M4 9h1v1H4c-1.5 0-3-1.69-3-3.5S2.55 3 4 3h4c1.45 0 3 1.69 3 3.5 0 1.41-.91 2.72-2 3.25V8.59c.58-.45 1-1.27 1-2.09C10 5.22 8.98 4 8 4H4c-.98 0-2 1.22-2 2.5S3 9 4 9zm9-3h-1v1h1c1 0 2 1.22 2 2.5S13.98 12 13 12H9c-.98 0-2-1.22-2-2.5 0-.83.42-1.64 1-2.09V6.25c-1.09.53-2 1.84-2 3.25C6 11.31 7.55 13 9 13h4c1.45 0 3-1.69 3-3.5S14.5 6 13 6z"
                 >
               
        -    

        Open In Colab

        +

        Open In Colab +GitHub Repository

        Esta guía muestra cómo construir un sistema de Generación Aumentada por Recuperación (RAG) utilizando CAMEL y Milvus.

        El sistema RAG combina un sistema de recuperación con un modelo generativo para generar texto nuevo basado en una petición dada. En primer lugar, el sistema recupera documentos relevantes de un corpus utilizando Milvus y, a continuación, utiliza un modelo generativo para generar un nuevo texto basado en los documentos recuperados.

        CAMEL es un marco multiagente. Milvus es la base de datos vectorial de código abierto más avanzada del mundo, creada para potenciar la búsqueda de similitudes y las aplicaciones de IA.

        diff --git a/localization/v2.4.x/site/es/integrations/integrate_with_dspy.json b/localization/v2.4.x/site/es/integrations/integrate_with_dspy.json index 6bc0e4d77..7cca24c33 100644 --- a/localization/v2.4.x/site/es/integrations/integrate_with_dspy.json +++ b/localization/v2.4.x/site/es/integrations/integrate_with_dspy.json @@ -1 +1 @@ -{"codeList":["$ pip install \"dspy-ai[milvus]\"\n$ pip install -U pymilvus\n","from dspy.datasets import HotPotQA\n\n# Load the dataset.\ndataset = HotPotQA(\n train_seed=1, train_size=20, eval_seed=2023, dev_size=50, test_size=0\n)\n\n# Tell DSPy that the 'question' field is the input. Any other fields are labels and/or metadata.\ntrainset = [x.with_inputs(\"question\") for x in dataset.train]\ndevset = [x.with_inputs(\"question\") for x in dataset.dev]\n","import requests\nimport os\n\nos.environ[\"OPENAI_API_KEY\"] = \"\"\nMILVUS_URI = \"example.db\"\nMILVUS_TOKEN = \"\"\n\nfrom pymilvus import MilvusClient, DataType, Collection\nfrom dspy.retrieve.milvus_rm import openai_embedding_function\n\nclient = MilvusClient(uri=MILVUS_URI, token=MILVUS_TOKEN)\n\nif \"dspy_example\" not in client.list_collections():\n client.create_collection(\n collection_name=\"dspy_example\",\n overwrite=True,\n dimension=1536,\n primary_field_name=\"id\",\n vector_field_name=\"embedding\",\n id_type=\"int\",\n metric_type=\"IP\",\n max_length=65535,\n enable_dynamic=True,\n )\ntext = requests.get(\n \"https://raw.githubusercontent.com/wxywb/dspy_dataset_sample/master/sample_data.txt\"\n).text\n\nfor idx, passage in enumerate(text.split(\"\\n\")):\n if len(passage) == 0:\n continue\n client.insert(\n collection_name=\"dspy_example\",\n data=[\n {\n \"id\": idx,\n \"embedding\": openai_embedding_function(passage)[0],\n \"text\": passage,\n }\n ],\n )\n","from dspy.retrieve.milvus_rm import MilvusRM\nimport dspy\n\nretriever_model = MilvusRM(\n collection_name=\"dspy_example\",\n uri=MILVUS_URI,\n token=MILVUS_TOKEN, # ignore this if no token is required for Milvus connection\n embedding_function=openai_embedding_function,\n)\nturbo = dspy.OpenAI(model=\"gpt-3.5-turbo\")\ndspy.settings.configure(lm=turbo)\n","class GenerateAnswer(dspy.Signature):\n \"\"\"Answer questions with short factoid answers.\"\"\"\n\n context = dspy.InputField(desc=\"may contain relevant facts\")\n question = dspy.InputField()\n answer = dspy.OutputField(desc=\"often between 1 and 5 words\")\n","class RAG(dspy.Module):\n def __init__(self, rm):\n super().__init__()\n self.retrieve = rm\n\n # This signature indicates the task imposed on the COT module.\n self.generate_answer = dspy.ChainOfThought(GenerateAnswer)\n\n def forward(self, question):\n # Use milvus_rm to retrieve context for the question.\n context = self.retrieve(question).passages\n # COT module takes \"context, query\" and output \"answer\".\n prediction = self.generate_answer(context=context, question=question)\n return dspy.Prediction(\n context=[item.long_text for item in context], answer=prediction.answer\n )\n","rag = RAG(retriever_model)\nprint(rag(\"who write At My Window\").answer)\n","from dspy.evaluate.evaluate import Evaluate\nfrom dspy.datasets import HotPotQA\n\nevaluate_on_hotpotqa = Evaluate(\n devset=devset, num_threads=1, display_progress=False, display_table=5\n)\n\nmetric = dspy.evaluate.answer_exact_match\nscore = evaluate_on_hotpotqa(rag, metric=metric)\nprint(\"rag:\", score)\n","from dspy.teleprompt import BootstrapFewShot\n\n# Validation logic: check that the predicted answer is correct.# Also check that the retrieved context does contain that answer.\n\n\ndef validate_context_and_answer(example, pred, trace=None):\n answer_EM = dspy.evaluate.answer_exact_match(example, pred)\n answer_PM = dspy.evaluate.answer_passage_match(example, pred)\n return answer_EM and answer_PM\n\n\n# Set up a basic teleprompter, which will compile our RAG program.\nteleprompter = BootstrapFewShot(metric=validate_context_and_answer)\n\n# Compile!\ncompiled_rag = teleprompter.compile(rag, trainset=trainset)\n\n# Now compiled_rag is optimized and ready to answer your new question!\n# Now, let’s evaluate the compiled RAG program.\nscore = evaluate_on_hotpotqa(compiled_rag, metric=metric)\nprint(score)\nprint(\"compile_rag:\", score)\n"],"headingContent":"","anchorList":[{"label":"Integrar Milvus con DSPy","href":"Integrate-Milvus-with-DSPy","type":1,"isActive":false},{"label":"Qué es DSPy","href":"What-is-DSPy","type":2,"isActive":false},{"label":"Ventajas del uso de DSPy","href":"Benefits-of-using-DSPy","type":2,"isActive":false},{"label":"Módulos","href":"Modules","type":2,"isActive":false},{"label":"Por qué Milvus en DSPy","href":"Why-Milvus-in-DSPy","type":2,"isActive":false},{"label":"Ejemplos","href":"Examples","type":2,"isActive":false},{"label":"Resumen","href":"Summary","type":2,"isActive":false}]} \ No newline at end of file +{"codeList":["$ pip install \"dspy-ai[milvus]\"\n$ pip install -U pymilvus\n","from dspy.datasets import HotPotQA\n\n# Load the dataset.\ndataset = HotPotQA(\n train_seed=1, train_size=20, eval_seed=2023, dev_size=50, test_size=0\n)\n\n# Tell DSPy that the 'question' field is the input. Any other fields are labels and/or metadata.\ntrainset = [x.with_inputs(\"question\") for x in dataset.train]\ndevset = [x.with_inputs(\"question\") for x in dataset.dev]\n","import requests\nimport os\n\nos.environ[\"OPENAI_API_KEY\"] = \"\"\nMILVUS_URI = \"example.db\"\nMILVUS_TOKEN = \"\"\n\nfrom pymilvus import MilvusClient, DataType, Collection\nfrom dspy.retrieve.milvus_rm import openai_embedding_function\n\nclient = MilvusClient(uri=MILVUS_URI, token=MILVUS_TOKEN)\n\nif \"dspy_example\" not in client.list_collections():\n client.create_collection(\n collection_name=\"dspy_example\",\n overwrite=True,\n dimension=1536,\n primary_field_name=\"id\",\n vector_field_name=\"embedding\",\n id_type=\"int\",\n metric_type=\"IP\",\n max_length=65535,\n enable_dynamic=True,\n )\ntext = requests.get(\n \"https://raw.githubusercontent.com/wxywb/dspy_dataset_sample/master/sample_data.txt\"\n).text\n\nfor idx, passage in enumerate(text.split(\"\\n\")):\n if len(passage) == 0:\n continue\n client.insert(\n collection_name=\"dspy_example\",\n data=[\n {\n \"id\": idx,\n \"embedding\": openai_embedding_function(passage)[0],\n \"text\": passage,\n }\n ],\n )\n","from dspy.retrieve.milvus_rm import MilvusRM\nimport dspy\n\nretriever_model = MilvusRM(\n collection_name=\"dspy_example\",\n uri=MILVUS_URI,\n token=MILVUS_TOKEN, # ignore this if no token is required for Milvus connection\n embedding_function=openai_embedding_function,\n)\nturbo = dspy.OpenAI(model=\"gpt-3.5-turbo\")\ndspy.settings.configure(lm=turbo)\n","class GenerateAnswer(dspy.Signature):\n \"\"\"Answer questions with short factoid answers.\"\"\"\n\n context = dspy.InputField(desc=\"may contain relevant facts\")\n question = dspy.InputField()\n answer = dspy.OutputField(desc=\"often between 1 and 5 words\")\n","class RAG(dspy.Module):\n def __init__(self, rm):\n super().__init__()\n self.retrieve = rm\n\n # This signature indicates the task imposed on the COT module.\n self.generate_answer = dspy.ChainOfThought(GenerateAnswer)\n\n def forward(self, question):\n # Use milvus_rm to retrieve context for the question.\n context = self.retrieve(question).passages\n # COT module takes \"context, query\" and output \"answer\".\n prediction = self.generate_answer(context=context, question=question)\n return dspy.Prediction(\n context=[item.long_text for item in context], answer=prediction.answer\n )\n","rag = RAG(retriever_model)\nprint(rag(\"who write At My Window\").answer)\n","from dspy.evaluate.evaluate import Evaluate\nfrom dspy.datasets import HotPotQA\n\nevaluate_on_hotpotqa = Evaluate(\n devset=devset, num_threads=1, display_progress=False, display_table=5\n)\n\nmetric = dspy.evaluate.answer_exact_match\nscore = evaluate_on_hotpotqa(rag, metric=metric)\nprint(\"rag:\", score)\n","from dspy.teleprompt import BootstrapFewShot\n\n# Validation logic: check that the predicted answer is correct.# Also check that the retrieved context does contain that answer.\n\n\ndef validate_context_and_answer(example, pred, trace=None):\n answer_EM = dspy.evaluate.answer_exact_match(example, pred)\n answer_PM = dspy.evaluate.answer_passage_match(example, pred)\n return answer_EM and answer_PM\n\n\n# Set up a basic teleprompter, which will compile our RAG program.\nteleprompter = BootstrapFewShot(metric=validate_context_and_answer)\n\n# Compile!\ncompiled_rag = teleprompter.compile(rag, trainset=trainset)\n\n# Now compiled_rag is optimized and ready to answer your new question!\n# Now, let’s evaluate the compiled RAG program.\nscore = evaluate_on_hotpotqa(compiled_rag, metric=metric)\nprint(score)\nprint(\"compile_rag:\", score)\n"],"headingContent":"Integrate Milvus with DSPy","anchorList":[{"label":"Integrar Milvus con DSPy","href":"Integrate-Milvus-with-DSPy","type":1,"isActive":false},{"label":"Qué es DSPy","href":"What-is-DSPy","type":2,"isActive":false},{"label":"Ventajas del uso de DSPy","href":"Benefits-of-using-DSPy","type":2,"isActive":false},{"label":"Módulos","href":"Modules","type":2,"isActive":false},{"label":"Por qué Milvus en DSPy","href":"Why-Milvus-in-DSPy","type":2,"isActive":false},{"label":"Ejemplos","href":"Examples","type":2,"isActive":false},{"label":"Resumen","href":"Summary","type":2,"isActive":false}]} \ No newline at end of file diff --git a/localization/v2.4.x/site/es/integrations/integrate_with_dspy.md b/localization/v2.4.x/site/es/integrations/integrate_with_dspy.md index 3dfdbf0b0..3c278549f 100644 --- a/localization/v2.4.x/site/es/integrations/integrate_with_dspy.md +++ b/localization/v2.4.x/site/es/integrations/integrate_with_dspy.md @@ -20,7 +20,8 @@ title: Integrar Milvus con DSPy d="M4 9h1v1H4c-1.5 0-3-1.69-3-3.5S2.55 3 4 3h4c1.45 0 3 1.69 3 3.5 0 1.41-.91 2.72-2 3.25V8.59c.58-.45 1-1.27 1-2.09C10 5.22 8.98 4 8 4H4c-.98 0-2 1.22-2 2.5S3 9 4 9zm9-3h-1v1h1c1 0 2 1.22 2 2.5S13.98 12 13 12H9c-.98 0-2-1.22-2-2.5 0-.83.42-1.64 1-2.09V6.25c-1.09.53-2 1.84-2 3.25C6 11.31 7.55 13 9 13h4c1.45 0 3-1.69 3-3.5S14.5 6 13 6z" > -

        Open In Colab

        +

        Open In Colab +GitHub Repository

        Qué es DSPy

        Incluimos breves descripciones de los campos context y answer para definir directrices más claras sobre lo que el modelo recibirá y deberá generar.

        -

        Construir la canalización

        Ahora vamos a definir el proceso RAG.

        +

        Construcción de la canalización

        Ahora vamos a definir el proceso RAG.

        class RAG(dspy.Module):
             def __init__(self, rm):
                 super().__init__()
        diff --git a/localization/v2.4.x/site/es/integrations/integrate_with_haystack.json b/localization/v2.4.x/site/es/integrations/integrate_with_haystack.json
        index cd41a14e7..1fb0f0d72 100644
        --- a/localization/v2.4.x/site/es/integrations/integrate_with_haystack.json
        +++ b/localization/v2.4.x/site/es/integrations/integrate_with_haystack.json
        @@ -1 +1 @@
        -{"codeList":["! pip install --upgrade --quiet pymilvus milvus-haystack markdown-it-py mdit_plain\n","import os\n\nos.environ[\"OPENAI_API_KEY\"] = \"sk-***********\"\n","import os\nimport urllib.request\n\nurl = \"https://www.gutenberg.org/cache/epub/7785/pg7785.txt\"\nfile_path = \"./davinci.txt\"\n\nif not os.path.exists(file_path):\n    urllib.request.urlretrieve(url, file_path)\n","from haystack import Pipeline\nfrom haystack.components.converters import MarkdownToDocument\nfrom haystack.components.embedders import OpenAIDocumentEmbedder, OpenAITextEmbedder\nfrom haystack.components.preprocessors import DocumentSplitter\nfrom haystack.components.writers import DocumentWriter\nfrom haystack.utils import Secret\n\nfrom milvus_haystack import MilvusDocumentStore\nfrom milvus_haystack.milvus_embedding_retriever import MilvusEmbeddingRetriever\n\n\ndocument_store = MilvusDocumentStore(\n    connection_args={\"uri\": \"./milvus.db\"},\n    # connection_args={\"uri\": \"http://localhost:19530\"},\n    # connection_args={\"uri\": YOUR_ZILLIZ_CLOUD_URI, \"token\": Secret.from_env_var(\"ZILLIZ_CLOUD_API_KEY\")},\n    drop_old=True,\n)\n","indexing_pipeline = Pipeline()\nindexing_pipeline.add_component(\"converter\", MarkdownToDocument())\nindexing_pipeline.add_component(\n    \"splitter\", DocumentSplitter(split_by=\"sentence\", split_length=2)\n)\nindexing_pipeline.add_component(\"embedder\", OpenAIDocumentEmbedder())\nindexing_pipeline.add_component(\"writer\", DocumentWriter(document_store))\nindexing_pipeline.connect(\"converter\", \"splitter\")\nindexing_pipeline.connect(\"splitter\", \"embedder\")\nindexing_pipeline.connect(\"embedder\", \"writer\")\nindexing_pipeline.run({\"converter\": {\"sources\": [file_path]}})\n\nprint(\"Number of documents:\", document_store.count_documents())\n","question = 'Where is the painting \"Warrior\" currently stored?'\n\nretrieval_pipeline = Pipeline()\nretrieval_pipeline.add_component(\"embedder\", OpenAITextEmbedder())\nretrieval_pipeline.add_component(\n    \"retriever\", MilvusEmbeddingRetriever(document_store=document_store, top_k=3)\n)\nretrieval_pipeline.connect(\"embedder\", \"retriever\")\n\nretrieval_results = retrieval_pipeline.run({\"embedder\": {\"text\": question}})\n\nfor doc in retrieval_results[\"retriever\"][\"documents\"]:\n    print(doc.content)\n    print(\"-\" * 10)\n","from haystack.utils import Secret\nfrom haystack.components.builders import PromptBuilder\nfrom haystack.components.generators import OpenAIGenerator\n\nprompt_template = \"\"\"Answer the following query based on the provided context. If the context does\n                     not include an answer, reply with 'I don't know'.\\n\n                     Query: {{query}}\n                     Documents:\n                     {% for doc in documents %}\n                        {{ doc.content }}\n                     {% endfor %}\n                     Answer:\n                  \"\"\"\n\nrag_pipeline = Pipeline()\nrag_pipeline.add_component(\"text_embedder\", OpenAITextEmbedder())\nrag_pipeline.add_component(\n    \"retriever\", MilvusEmbeddingRetriever(document_store=document_store, top_k=3)\n)\nrag_pipeline.add_component(\"prompt_builder\", PromptBuilder(template=prompt_template))\nrag_pipeline.add_component(\n    \"generator\",\n    OpenAIGenerator(\n        api_key=Secret.from_token(os.getenv(\"OPENAI_API_KEY\")),\n        generation_kwargs={\"temperature\": 0},\n    ),\n)\nrag_pipeline.connect(\"text_embedder.embedding\", \"retriever.query_embedding\")\nrag_pipeline.connect(\"retriever.documents\", \"prompt_builder.documents\")\nrag_pipeline.connect(\"prompt_builder\", \"generator\")\n\nresults = rag_pipeline.run(\n    {\n        \"text_embedder\": {\"text\": question},\n        \"prompt_builder\": {\"query\": question},\n    }\n)\nprint(\"RAG answer:\", results[\"generator\"][\"replies\"][0])\n"],"headingContent":"","anchorList":[{"label":"Retrieval-Augmented Generation (RAG) con Milvus y Haystack","href":"Retrieval-Augmented-Generation-RAG-with-Milvus-and-Haystack","type":1,"isActive":false},{"label":"Requisitos previos","href":"Prerequisites","type":2,"isActive":false},{"label":"Preparar los datos","href":"Prepare-the-data","type":2,"isActive":false},{"label":"Crear el proceso de indexación","href":"Create-the-indexing-Pipeline","type":2,"isActive":false},{"label":"Crear la canalización de recuperación","href":"Create-the-retrieval-pipeline","type":2,"isActive":false},{"label":"Crear la canalización RAG","href":"Create-the-RAG-pipeline","type":2,"isActive":false}]}
        \ No newline at end of file
        +{"codeList":["! pip install --upgrade --quiet pymilvus milvus-haystack markdown-it-py mdit_plain\n","import os\n\nos.environ[\"OPENAI_API_KEY\"] = \"sk-***********\"\n","import os\nimport urllib.request\n\nurl = \"https://www.gutenberg.org/cache/epub/7785/pg7785.txt\"\nfile_path = \"./davinci.txt\"\n\nif not os.path.exists(file_path):\n    urllib.request.urlretrieve(url, file_path)\n","from haystack import Pipeline\nfrom haystack.components.converters import MarkdownToDocument\nfrom haystack.components.embedders import OpenAIDocumentEmbedder, OpenAITextEmbedder\nfrom haystack.components.preprocessors import DocumentSplitter\nfrom haystack.components.writers import DocumentWriter\nfrom haystack.utils import Secret\n\nfrom milvus_haystack import MilvusDocumentStore\nfrom milvus_haystack.milvus_embedding_retriever import MilvusEmbeddingRetriever\n\n\ndocument_store = MilvusDocumentStore(\n    connection_args={\"uri\": \"./milvus.db\"},\n    # connection_args={\"uri\": \"http://localhost:19530\"},\n    # connection_args={\"uri\": YOUR_ZILLIZ_CLOUD_URI, \"token\": Secret.from_env_var(\"ZILLIZ_CLOUD_API_KEY\")},\n    drop_old=True,\n)\n","indexing_pipeline = Pipeline()\nindexing_pipeline.add_component(\"converter\", MarkdownToDocument())\nindexing_pipeline.add_component(\n    \"splitter\", DocumentSplitter(split_by=\"sentence\", split_length=2)\n)\nindexing_pipeline.add_component(\"embedder\", OpenAIDocumentEmbedder())\nindexing_pipeline.add_component(\"writer\", DocumentWriter(document_store))\nindexing_pipeline.connect(\"converter\", \"splitter\")\nindexing_pipeline.connect(\"splitter\", \"embedder\")\nindexing_pipeline.connect(\"embedder\", \"writer\")\nindexing_pipeline.run({\"converter\": {\"sources\": [file_path]}})\n\nprint(\"Number of documents:\", document_store.count_documents())\n","question = 'Where is the painting \"Warrior\" currently stored?'\n\nretrieval_pipeline = Pipeline()\nretrieval_pipeline.add_component(\"embedder\", OpenAITextEmbedder())\nretrieval_pipeline.add_component(\n    \"retriever\", MilvusEmbeddingRetriever(document_store=document_store, top_k=3)\n)\nretrieval_pipeline.connect(\"embedder\", \"retriever\")\n\nretrieval_results = retrieval_pipeline.run({\"embedder\": {\"text\": question}})\n\nfor doc in retrieval_results[\"retriever\"][\"documents\"]:\n    print(doc.content)\n    print(\"-\" * 10)\n","from haystack.utils import Secret\nfrom haystack.components.builders import PromptBuilder\nfrom haystack.components.generators import OpenAIGenerator\n\nprompt_template = \"\"\"Answer the following query based on the provided context. If the context does\n                     not include an answer, reply with 'I don't know'.\\n\n                     Query: {{query}}\n                     Documents:\n                     {% for doc in documents %}\n                        {{ doc.content }}\n                     {% endfor %}\n                     Answer:\n                  \"\"\"\n\nrag_pipeline = Pipeline()\nrag_pipeline.add_component(\"text_embedder\", OpenAITextEmbedder())\nrag_pipeline.add_component(\n    \"retriever\", MilvusEmbeddingRetriever(document_store=document_store, top_k=3)\n)\nrag_pipeline.add_component(\"prompt_builder\", PromptBuilder(template=prompt_template))\nrag_pipeline.add_component(\n    \"generator\",\n    OpenAIGenerator(\n        api_key=Secret.from_token(os.getenv(\"OPENAI_API_KEY\")),\n        generation_kwargs={\"temperature\": 0},\n    ),\n)\nrag_pipeline.connect(\"text_embedder.embedding\", \"retriever.query_embedding\")\nrag_pipeline.connect(\"retriever.documents\", \"prompt_builder.documents\")\nrag_pipeline.connect(\"prompt_builder\", \"generator\")\n\nresults = rag_pipeline.run(\n    {\n        \"text_embedder\": {\"text\": question},\n        \"prompt_builder\": {\"query\": question},\n    }\n)\nprint(\"RAG answer:\", results[\"generator\"][\"replies\"][0])\n"],"headingContent":"Retrieval-Augmented Generation (RAG) with Milvus and Haystack","anchorList":[{"label":"Retrieval-Augmented Generation (RAG) con Milvus y Haystack","href":"Retrieval-Augmented-Generation-RAG-with-Milvus-and-Haystack","type":1,"isActive":false},{"label":"Requisitos previos","href":"Prerequisites","type":2,"isActive":false},{"label":"Preparar los datos","href":"Prepare-the-data","type":2,"isActive":false},{"label":"Crear el proceso de indexación","href":"Create-the-indexing-Pipeline","type":2,"isActive":false},{"label":"Crear la canalización de recuperación","href":"Create-the-retrieval-pipeline","type":2,"isActive":false},{"label":"Crear la canalización RAG","href":"Create-the-RAG-pipeline","type":2,"isActive":false}]}
        \ No newline at end of file
        diff --git a/localization/v2.4.x/site/es/integrations/integrate_with_haystack.md b/localization/v2.4.x/site/es/integrations/integrate_with_haystack.md
        index 65d366edf..79692372c 100644
        --- a/localization/v2.4.x/site/es/integrations/integrate_with_haystack.md
        +++ b/localization/v2.4.x/site/es/integrations/integrate_with_haystack.md
        @@ -3,7 +3,7 @@ id: integrate_with_haystack.md
         summary: >-
           Esta guía muestra cómo construir un sistema de Generación Mejorada por
           Recuperación (RAG) utilizando Haystack y Milvus.
        -title: Generación mejorada por recuperación (RAG) con Milvus y Haystack
        +title: Retrieval-Augmented Generation (RAG) con Milvus y Haystack
         ---
         

        Retrieval-Augmented Generation (RAG) con Milvus y Haystack

        Open In Colab

        +

        Open In Colab +GitHub Repository

        Esta guía muestra cómo construir un sistema de Generación Aumentada por Recuperación (RAG) utilizando Haystack y Milvus.

        El sistema RAG combina un sistema de recuperación con un modelo generativo para generar texto nuevo basado en una petición dada. En primer lugar, el sistema recupera documentos relevantes de un corpus utilizando Milvus y, a continuación, utiliza un modelo generativo para generar un nuevo texto basado en los documentos recuperados.

        Haystack es el framework de código abierto en Python de deepset para crear aplicaciones personalizadas con grandes modelos lingüísticos (LLM). Milvus es la base de datos vectorial de código abierto más avanzada del mundo, creada para potenciar la búsqueda de similitudes incrustadas y las aplicaciones de IA.

        @@ -43,7 +44,7 @@ title: Generación mejorada por recuperación (RAG) con Milvus y Haystack
        ! pip install --upgrade --quiet pymilvus milvus-haystack markdown-it-py mdit_plain
         
        -

        Si estás utilizando Google Colab, para habilitar las dependencias que acabas de instalar, es posible que tengas que reiniciar el tiempo de ejecución (Haz clic en el menú "Tiempo de ejecución" en la parte superior de la pantalla, y selecciona "Reiniciar sesión" en el menú desplegable).

        +

        Si estás utilizando Google Colab, para habilitar las dependencias recién instaladas, es posible que tengas que reiniciar el tiempo de ejecución (Haz clic en el menú "Tiempo de ejecución" en la parte superior de la pantalla, y selecciona "Reiniciar sesión" en el menú desplegable).

        Utilizaremos los modelos de OpenAI. Debes preparar la clave api OPENAI_API_KEY como variable de entorno.

        import os
        diff --git a/localization/v2.4.x/site/es/integrations/integrate_with_hugging-face.json b/localization/v2.4.x/site/es/integrations/integrate_with_hugging-face.json
        index fc2350959..bd8df6114 100644
        --- a/localization/v2.4.x/site/es/integrations/integrate_with_hugging-face.json
        +++ b/localization/v2.4.x/site/es/integrations/integrate_with_hugging-face.json
        @@ -1 +1 @@
        -{"codeList":["$ pip install --upgrade pymilvus transformers datasets torch\n","from datasets import load_dataset\n\n\nDATASET = \"squad\"  # Name of dataset from HuggingFace Datasets\nINSERT_RATIO = 0.001  # Ratio of example dataset to be inserted\n\ndata = load_dataset(DATASET, split=\"validation\")\n# Generates a fixed subset. To generate a random subset, remove the seed.\ndata = data.train_test_split(test_size=INSERT_RATIO, seed=42)[\"test\"]\n# Clean up the data structure in the dataset.\ndata = data.map(\n    lambda val: {\"answer\": val[\"answers\"][\"text\"][0]},\n    remove_columns=[\"id\", \"answers\", \"context\"],\n)\n\n# View summary of example data\nprint(data)\n","from transformers import AutoTokenizer, AutoModel\nimport torch\n\nMODEL = (\n    \"sentence-transformers/all-MiniLM-L6-v2\"  # Name of model from HuggingFace Models\n)\nINFERENCE_BATCH_SIZE = 64  # Batch size of model inference\n\n# Load tokenizer & model from HuggingFace Hub\ntokenizer = AutoTokenizer.from_pretrained(MODEL)\nmodel = AutoModel.from_pretrained(MODEL)\n\n\ndef encode_text(batch):\n    # Tokenize sentences\n    encoded_input = tokenizer(\n        batch[\"question\"], padding=True, truncation=True, return_tensors=\"pt\"\n    )\n\n    # Compute token embeddings\n    with torch.no_grad():\n        model_output = model(**encoded_input)\n\n    # Perform pooling\n    token_embeddings = model_output[0]\n    attention_mask = encoded_input[\"attention_mask\"]\n    input_mask_expanded = (\n        attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float()\n    )\n    sentence_embeddings = torch.sum(\n        token_embeddings * input_mask_expanded, 1\n    ) / torch.clamp(input_mask_expanded.sum(1), min=1e-9)\n\n    # Normalize embeddings\n    batch[\"question_embedding\"] = torch.nn.functional.normalize(\n        sentence_embeddings, p=2, dim=1\n    )\n    return batch\n\n\ndata = data.map(encode_text, batched=True, batch_size=INFERENCE_BATCH_SIZE)\ndata_list = data.to_list()\n","from pymilvus import MilvusClient\n\n\nMILVUS_URI = \"./huggingface_milvus_test.db\"  # Connection URI\nCOLLECTION_NAME = \"huggingface_test\"  # Collection name\nDIMENSION = 384  # Embedding dimension depending on model\n\nmilvus_client = MilvusClient(MILVUS_URI)\nif milvus_client.has_collection(collection_name=COLLECTION_NAME):\n    milvus_client.drop_collection(collection_name=COLLECTION_NAME)\nmilvus_client.create_collection(\n    collection_name=COLLECTION_NAME,\n    dimension=DIMENSION,\n    auto_id=True,  # Enable auto id\n    enable_dynamic_field=True,  # Enable dynamic fields\n    vector_field_name=\"question_embedding\",  # Map vector field name and embedding column in dataset\n    consistency_level=\"Strong\",  # To enable search with latest data\n)\n","milvus_client.insert(collection_name=COLLECTION_NAME, data=data_list)\n","questions = {\n    \"question\": [\n        \"What is LGM?\",\n        \"When did Massachusetts first mandate that children be educated in schools?\",\n    ]\n}\n\n# Generate question embeddings\nquestion_embeddings = [v.tolist() for v in encode_text(questions)[\"question_embedding\"]]\n\n# Search across Milvus\nsearch_results = milvus_client.search(\n    collection_name=COLLECTION_NAME,\n    data=question_embeddings,\n    limit=3,  # How many search results to output\n    output_fields=[\"answer\", \"question\"],  # Include these fields in search results\n)\n\n# Print out results\nfor q, res in zip(questions[\"question\"], search_results):\n    print(\"Question:\", q)\n    for r in res:\n        print(\n            {\n                \"answer\": r[\"entity\"][\"answer\"],\n                \"score\": r[\"distance\"],\n                \"original question\": r[\"entity\"][\"question\"],\n            }\n        )\n    print(\"\\n\")\n"],"headingContent":"","anchorList":[{"label":"Respuesta a preguntas con Milvus y Hugging Face","href":"Question-Answering-Using-Milvus-and-Hugging-Face","type":1,"isActive":false},{"label":"Antes de empezar","href":"Before-you-begin","type":2,"isActive":false},{"label":"Preparar los datos","href":"Prepare-data","type":2,"isActive":false},{"label":"Insertar datos","href":"Insert-data","type":2,"isActive":false},{"label":"Hacer preguntas","href":"Ask-questions","type":2,"isActive":false}]}
        \ No newline at end of file
        +{"codeList":["$ pip install --upgrade pymilvus transformers datasets torch\n","from datasets import load_dataset\n\n\nDATASET = \"squad\"  # Name of dataset from HuggingFace Datasets\nINSERT_RATIO = 0.001  # Ratio of example dataset to be inserted\n\ndata = load_dataset(DATASET, split=\"validation\")\n# Generates a fixed subset. To generate a random subset, remove the seed.\ndata = data.train_test_split(test_size=INSERT_RATIO, seed=42)[\"test\"]\n# Clean up the data structure in the dataset.\ndata = data.map(\n    lambda val: {\"answer\": val[\"answers\"][\"text\"][0]},\n    remove_columns=[\"id\", \"answers\", \"context\"],\n)\n\n# View summary of example data\nprint(data)\n","from transformers import AutoTokenizer, AutoModel\nimport torch\n\nMODEL = (\n    \"sentence-transformers/all-MiniLM-L6-v2\"  # Name of model from HuggingFace Models\n)\nINFERENCE_BATCH_SIZE = 64  # Batch size of model inference\n\n# Load tokenizer & model from HuggingFace Hub\ntokenizer = AutoTokenizer.from_pretrained(MODEL)\nmodel = AutoModel.from_pretrained(MODEL)\n\n\ndef encode_text(batch):\n    # Tokenize sentences\n    encoded_input = tokenizer(\n        batch[\"question\"], padding=True, truncation=True, return_tensors=\"pt\"\n    )\n\n    # Compute token embeddings\n    with torch.no_grad():\n        model_output = model(**encoded_input)\n\n    # Perform pooling\n    token_embeddings = model_output[0]\n    attention_mask = encoded_input[\"attention_mask\"]\n    input_mask_expanded = (\n        attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float()\n    )\n    sentence_embeddings = torch.sum(\n        token_embeddings * input_mask_expanded, 1\n    ) / torch.clamp(input_mask_expanded.sum(1), min=1e-9)\n\n    # Normalize embeddings\n    batch[\"question_embedding\"] = torch.nn.functional.normalize(\n        sentence_embeddings, p=2, dim=1\n    )\n    return batch\n\n\ndata = data.map(encode_text, batched=True, batch_size=INFERENCE_BATCH_SIZE)\ndata_list = data.to_list()\n","from pymilvus import MilvusClient\n\n\nMILVUS_URI = \"./huggingface_milvus_test.db\"  # Connection URI\nCOLLECTION_NAME = \"huggingface_test\"  # Collection name\nDIMENSION = 384  # Embedding dimension depending on model\n\nmilvus_client = MilvusClient(MILVUS_URI)\nif milvus_client.has_collection(collection_name=COLLECTION_NAME):\n    milvus_client.drop_collection(collection_name=COLLECTION_NAME)\nmilvus_client.create_collection(\n    collection_name=COLLECTION_NAME,\n    dimension=DIMENSION,\n    auto_id=True,  # Enable auto id\n    enable_dynamic_field=True,  # Enable dynamic fields\n    vector_field_name=\"question_embedding\",  # Map vector field name and embedding column in dataset\n    consistency_level=\"Strong\",  # To enable search with latest data\n)\n","milvus_client.insert(collection_name=COLLECTION_NAME, data=data_list)\n","questions = {\n    \"question\": [\n        \"What is LGM?\",\n        \"When did Massachusetts first mandate that children be educated in schools?\",\n    ]\n}\n\n# Generate question embeddings\nquestion_embeddings = [v.tolist() for v in encode_text(questions)[\"question_embedding\"]]\n\n# Search across Milvus\nsearch_results = milvus_client.search(\n    collection_name=COLLECTION_NAME,\n    data=question_embeddings,\n    limit=3,  # How many search results to output\n    output_fields=[\"answer\", \"question\"],  # Include these fields in search results\n)\n\n# Print out results\nfor q, res in zip(questions[\"question\"], search_results):\n    print(\"Question:\", q)\n    for r in res:\n        print(\n            {\n                \"answer\": r[\"entity\"][\"answer\"],\n                \"score\": r[\"distance\"],\n                \"original question\": r[\"entity\"][\"question\"],\n            }\n        )\n    print(\"\\n\")\n"],"headingContent":"Question Answering Using Milvus and Hugging Face","anchorList":[{"label":"Respuesta a preguntas con Milvus y Hugging Face","href":"Question-Answering-Using-Milvus-and-Hugging-Face","type":1,"isActive":false},{"label":"Antes de empezar","href":"Before-you-begin","type":2,"isActive":false},{"label":"Preparar los datos","href":"Prepare-data","type":2,"isActive":false},{"label":"Insertar datos","href":"Insert-data","type":2,"isActive":false},{"label":"Hacer preguntas","href":"Ask-questions","type":2,"isActive":false}]}
        \ No newline at end of file
        diff --git a/localization/v2.4.x/site/es/integrations/integrate_with_hugging-face.md b/localization/v2.4.x/site/es/integrations/integrate_with_hugging-face.md
        index 6ec2038d7..0e0db3994 100644
        --- a/localization/v2.4.x/site/es/integrations/integrate_with_hugging-face.md
        +++ b/localization/v2.4.x/site/es/integrations/integrate_with_hugging-face.md
        @@ -22,7 +22,8 @@ title: Respuesta a preguntas con Milvus y Hugging Face
                   d="M4 9h1v1H4c-1.5 0-3-1.69-3-3.5S2.55 3 4 3h4c1.45 0 3 1.69 3 3.5 0 1.41-.91 2.72-2 3.25V8.59c.58-.45 1-1.27 1-2.09C10 5.22 8.98 4 8 4H4c-.98 0-2 1.22-2 2.5S3 9 4 9zm9-3h-1v1h1c1 0 2 1.22 2 2.5S13.98 12 13 12H9c-.98 0-2-1.22-2-2.5 0-.83.42-1.64 1-2.09V6.25c-1.09.53-2 1.84-2 3.25C6 11.31 7.55 13 9 13h4c1.45 0 3-1.69 3-3.5S14.5 6 13 6z"
                 >
               
        -    

        Open In Colab

        +

        Open In Colab +GitHub Repository

        Un sistema de respuesta a preguntas basado en la búsqueda semántica consiste en encontrar la pregunta más similar a partir de un conjunto de datos de pares pregunta-respuesta para una consulta determinada. Una vez identificada la pregunta más similar, la respuesta correspondiente del conjunto de datos se considera la respuesta a la consulta. Este enfoque se basa en medidas de similitud semántica para determinar la similitud entre las preguntas y recuperar las respuestas relevantes.

        Este tutorial muestra cómo construir un sistema de respuesta a preguntas utilizando Hugging Face como cargador de datos y generador de incrustaciones para el procesamiento de datos y Milvus como base de datos vectorial para la búsqueda semántica.

        Antes de empezar

        -

        Si está utilizando Google Colab, para habilitar las dependencias que acaba de instalar, es posible que tenga que reiniciar el tiempo de ejecución. (Haz clic en el menú "Tiempo de ejecución" en la parte superior de la pantalla y selecciona "Reiniciar sesión" en el menú desplegable).

        +

        Si utiliza Google Colab, para habilitar las dependencias que acaba de instalar, es posible que tenga que reiniciar el tiempo de ejecución. (Haz clic en el menú "Tiempo de ejecución" en la parte superior de la pantalla y selecciona "Reiniciar sesión" en el menú desplegable).

        Preparar los datos

        Open In Colab

        +

        Open In Colab +GitHub Repository

        Esta guía muestra cómo utilizar las incrustaciones de Jina AI y Milvus para realizar tareas de búsqueda y recuperación de similitudes.

        Quién es Jina AI

        Jina AI, fundada en 2020 en Berlín, es una empresa pionera en IA centrada en revolucionar el futuro de la inteligencia artificial a través de su base de búsqueda. Especializada en IA multimodal, Jina AI tiene como objetivo capacitar a empresas y desarrolladores para aprovechar el poder de los datos multimodales para la creación de valor y el ahorro de costes a través de su conjunto integrado de componentes, incluyendo incrustaciones, rerankers, operaciones rápidas e infraestructura central. Las incrustaciones de vanguardia de Jina AI cuentan con un rendimiento de primer nivel, con un modelo de longitud de token 8192 ideal para la representación de datos completa. Al ofrecer compatibilidad multilingüe y una integración perfecta con plataformas líderes como OpenAI, estas incrustaciones facilitan las aplicaciones multilingües.

        +

        Jina AI, fundada en 2020 en Berlín, es una empresa pionera en IA centrada en revolucionar el futuro de la inteligencia artificial a través de su base de búsqueda. Especializada en IA multimodal, Jina AI tiene como objetivo capacitar a empresas y desarrolladores para aprovechar el poder de los datos multimodales para la creación de valor y el ahorro de costes a través de su conjunto integrado de componentes, incluyendo incrustaciones, rerankers, operaciones rápidas e infraestructura central. Las incrustaciones de vanguardia de Jina AI cuentan con un rendimiento de primer nivel, con un modelo de longitud de token 8192 ideal para la representación integral de datos. Al ofrecer compatibilidad multilingüe y una integración perfecta con plataformas líderes como OpenAI, estas incrustaciones facilitan las aplicaciones multilingües.

        Incrustación de Milvus y Jina AI

        El modelo de incrustación principal de Jina AI destaca en la comprensión de texto detallado, por lo que resulta ideal para la búsqueda semántica, la clasificación de contenidos, el análisis avanzado de sentimientos, el resumen de textos y los sistemas de recomendación personalizados.

        -
        from pymilvus.model.dense import JinaEmbeddingFunction
        +    

        El modelo de incrustación central de Jina AI destaca en la comprensión de texto detallado, por lo que resulta ideal para la búsqueda semántica, la clasificación de contenidos, el análisis avanzado de sentimientos, el resumen de textos y los sistemas de recomendación personalizados.

        +
        from pymilvus.model.dense import JinaEmbeddingFunction
         
         jina_api_key = "<YOUR_JINA_API_KEY>"
        -ef = JinaEmbeddingFunction("jina-embeddings-v2-base-en", jina_api_key)
        +ef = JinaEmbeddingFunction(
        +    "jina-embeddings-v3", 
        +    jina_api_key,
        +    task="retrieval.passage",
        +    dimensions=1024
        +)
         
         query = "what is information retrieval?"
         doc = "Information retrieval is the process of finding relevant information from a large collection of data or documents."
         
        -qvecs = ef.encode_queries([query])
        -dvecs = ef.encode_documents([doc])
        +qvecs = ef.encode_queries([query])  # This method uses `retrieval.query` as the task
        +dvecs = ef.encode_documents([doc])  # This method uses `retrieval.passage` as the task
         

        Incrustación bilingüe

        Open In Colab

        +

        Open In Colab +GitHub Repository

        Esta guía muestra cómo construir un sistema de Generación Aumentada por Recuperación (RAG) utilizando LlamaIndex y Milvus.

        El sistema RAG combina un sistema de recuperación con un modelo generativo para generar texto nuevo basado en una petición dada. En primer lugar, el sistema recupera documentos relevantes de un corpus utilizando Milvus y, a continuación, utiliza un modelo generativo para generar un nuevo texto basado en los documentos recuperados.

        LlamaIndex es un marco de datos sencillo y flexible para conectar fuentes de datos personalizadas a grandes modelos lingüísticos (LLM). Milvus es la base de datos vectorial de código abierto más avanzada del mundo, creada para potenciar la búsqueda de similitudes incrustadas y las aplicaciones de IA.

        diff --git a/localization/v2.4.x/site/es/integrations/integrate_with_openai.json b/localization/v2.4.x/site/es/integrations/integrate_with_openai.json index e26783888..be972a2f6 100644 --- a/localization/v2.4.x/site/es/integrations/integrate_with_openai.json +++ b/localization/v2.4.x/site/es/integrations/integrate_with_openai.json @@ -1 +1 @@ -{"codeList":["pip install --upgrade openai pymilvus\n","from openai import OpenAI\nfrom pymilvus import MilvusClient\n\nMODEL_NAME = \"text-embedding-3-small\" # Which model to use, please check https://platform.openai.com/docs/guides/embeddings for available models\nDIMENSION = 1536 # Dimension of vector embedding\n\n# Connect to OpenAI with API Key.\nopenai_client = OpenAI(api_key=\"\")\n\ndocs = [\n \"Artificial intelligence was founded as an academic discipline in 1956.\",\n \"Alan Turing was the first person to conduct substantial research in AI.\",\n \"Born in Maida Vale, London, Turing was raised in southern England.\",\n]\n\nvectors = [\n vec.embedding\n for vec in openai_client.embeddings.create(input=docs, model=MODEL_NAME).data\n]\n\n# Prepare data to be stored in Milvus vector database.\n# We can store the id, vector representation, raw text and labels such as \"subject\" in this case in Milvus.\ndata = [\n {\"id\": i, \"vector\": vectors[i], \"text\": docs[i], \"subject\": \"history\"}\n for i in range(len(docs))\n]\n\n\n# Connect to Milvus, all data is stored in a local file named \"milvus_openai_demo.db\"\n# in current directory. You can also connect to a remote Milvus server following this\n# instruction: https://milvus.io/docs/install_standalone-docker.md.\nmilvus_client = MilvusClient(uri=\"milvus_openai_demo.db\")\nCOLLECTION_NAME = \"demo_collection\" # Milvus collection name\n# Create a collection to store the vectors and text.\nif milvus_client.has_collection(collection_name=COLLECTION_NAME):\n milvus_client.drop_collection(collection_name=COLLECTION_NAME)\nmilvus_client.create_collection(collection_name=COLLECTION_NAME, dimension=DIMENSION)\n\n# Insert all data into Milvus vector database.\nres = milvus_client.insert(collection_name=\"demo_collection\", data=data)\n\nprint(res[\"insert_count\"])\n","queries = [\"When was artificial intelligence founded?\"]\n\nquery_vectors = [\n vec.embedding\n for vec in openai_client.embeddings.create(input=queries, model=MODEL_NAME).data\n]\n\nres = milvus_client.search(\n collection_name=COLLECTION_NAME, # target collection\n data=query_vectors, # query vectors\n limit=2, # number of returned entities\n output_fields=[\"text\", \"subject\"], # specifies fields to be returned\n)\n\nfor q in queries:\n print(\"Query:\", q)\n for result in res:\n print(result)\n print(\"\\n\")\n","[\n {\n \"id\": 0,\n \"distance\": -0.772376537322998,\n \"entity\": {\n \"text\": \"Artificial intelligence was founded as an academic discipline in 1956.\",\n \"subject\": \"history\",\n },\n },\n {\n \"id\": 1,\n \"distance\": -0.58596271276474,\n \"entity\": {\n \"text\": \"Alan Turing was the first person to conduct substantial research in AI.\",\n \"subject\": \"history\",\n },\n },\n]\n"],"headingContent":"","anchorList":[{"label":"Búsqueda semántica con Milvus y OpenAI","href":"Semantic-Search-with-Milvus-and-OpenAI","type":1,"isActive":false},{"label":"Para empezar","href":"Getting-started","type":2,"isActive":false},{"label":"Búsqueda de títulos de libros con OpenAI y Milvus","href":"Searching-book-titles-with-OpenAI--Milvus","type":2,"isActive":false}]} \ No newline at end of file +{"codeList":["pip install --upgrade openai pymilvus\n","from openai import OpenAI\nfrom pymilvus import MilvusClient\n\nMODEL_NAME = \"text-embedding-3-small\" # Which model to use, please check https://platform.openai.com/docs/guides/embeddings for available models\nDIMENSION = 1536 # Dimension of vector embedding\n\n# Connect to OpenAI with API Key.\nopenai_client = OpenAI(api_key=\"\")\n\ndocs = [\n \"Artificial intelligence was founded as an academic discipline in 1956.\",\n \"Alan Turing was the first person to conduct substantial research in AI.\",\n \"Born in Maida Vale, London, Turing was raised in southern England.\",\n]\n\nvectors = [\n vec.embedding\n for vec in openai_client.embeddings.create(input=docs, model=MODEL_NAME).data\n]\n\n# Prepare data to be stored in Milvus vector database.\n# We can store the id, vector representation, raw text and labels such as \"subject\" in this case in Milvus.\ndata = [\n {\"id\": i, \"vector\": vectors[i], \"text\": docs[i], \"subject\": \"history\"}\n for i in range(len(docs))\n]\n\n\n# Connect to Milvus, all data is stored in a local file named \"milvus_openai_demo.db\"\n# in current directory. You can also connect to a remote Milvus server following this\n# instruction: https://milvus.io/docs/install_standalone-docker.md.\nmilvus_client = MilvusClient(uri=\"milvus_openai_demo.db\")\nCOLLECTION_NAME = \"demo_collection\" # Milvus collection name\n# Create a collection to store the vectors and text.\nif milvus_client.has_collection(collection_name=COLLECTION_NAME):\n milvus_client.drop_collection(collection_name=COLLECTION_NAME)\nmilvus_client.create_collection(collection_name=COLLECTION_NAME, dimension=DIMENSION)\n\n# Insert all data into Milvus vector database.\nres = milvus_client.insert(collection_name=\"demo_collection\", data=data)\n\nprint(res[\"insert_count\"])\n","queries = [\"When was artificial intelligence founded?\"]\n\nquery_vectors = [\n vec.embedding\n for vec in openai_client.embeddings.create(input=queries, model=MODEL_NAME).data\n]\n\nres = milvus_client.search(\n collection_name=COLLECTION_NAME, # target collection\n data=query_vectors, # query vectors\n limit=2, # number of returned entities\n output_fields=[\"text\", \"subject\"], # specifies fields to be returned\n)\n\nfor q in queries:\n print(\"Query:\", q)\n for result in res:\n print(result)\n print(\"\\n\")\n","[\n {\n \"id\": 0,\n \"distance\": -0.772376537322998,\n \"entity\": {\n \"text\": \"Artificial intelligence was founded as an academic discipline in 1956.\",\n \"subject\": \"history\",\n },\n },\n {\n \"id\": 1,\n \"distance\": -0.58596271276474,\n \"entity\": {\n \"text\": \"Alan Turing was the first person to conduct substantial research in AI.\",\n \"subject\": \"history\",\n },\n },\n]\n"],"headingContent":"Semantic Search with Milvus and OpenAI","anchorList":[{"label":"Búsqueda semántica con Milvus y OpenAI","href":"Semantic-Search-with-Milvus-and-OpenAI","type":1,"isActive":false},{"label":"Para empezar","href":"Getting-started","type":2,"isActive":false},{"label":"Búsqueda de títulos de libros con OpenAI y Milvus","href":"Searching-book-titles-with-OpenAI--Milvus","type":2,"isActive":false}]} \ No newline at end of file diff --git a/localization/v2.4.x/site/es/integrations/integrate_with_openai.md b/localization/v2.4.x/site/es/integrations/integrate_with_openai.md index e7c62ed1f..5c200c281 100644 --- a/localization/v2.4.x/site/es/integrations/integrate_with_openai.md +++ b/localization/v2.4.x/site/es/integrations/integrate_with_openai.md @@ -20,8 +20,9 @@ summary: >- d="M4 9h1v1H4c-1.5 0-3-1.69-3-3.5S2.55 3 4 3h4c1.45 0 3 1.69 3 3.5 0 1.41-.91 2.72-2 3.25V8.59c.58-.45 1-1.27 1-2.09C10 5.22 8.98 4 8 4H4c-.98 0-2 1.22-2 2.5S3 9 4 9zm9-3h-1v1h1c1 0 2 1.22 2 2.5S13.98 12 13 12H9c-.98 0-2-1.22-2-2.5 0-.83.42-1.64 1-2.09V6.25c-1.09.53-2 1.84-2 3.25C6 11.31 7.55 13 9 13h4c1.45 0 3-1.69 3-3.5S14.5 6 13 6z" > -

        Open In Colab

        -

        Esta guía muestra cómo la API de incrustación de OpenAI puede utilizarse con la base de datos vectorial Milvus para realizar búsquedas semánticas en texto.

        +

        Open In Colab +GitHub Repository

        +

        Esta guía muestra cómo la API de incrustación de OpenAI puede utilizarse con la base de datos vectorial Milvus para realizar búsquedas semánticas en texto.

        Para empezar

        Antes de empezar, asegúrese de que tiene preparada la clave API de OpenAI, o consiga una en el sitio web de OpenAI.

        +

        Antes de empezar, asegúrese de que tiene preparada la clave API de OpenAI, o consiga una en el sitio web de OpenAI.

        Los datos utilizados en este ejemplo son títulos de libros. Puedes descargar el conjunto de datos aquí y ponerlo en el mismo directorio donde ejecutes el siguiente código.

        Primero, instala el paquete para Milvus y OpenAI:

        pip install --upgrade openai pymilvus
        diff --git a/localization/v2.4.x/site/es/integrations/integrate_with_ragas.json b/localization/v2.4.x/site/es/integrations/integrate_with_ragas.json
        index 4a9068ddc..d52d2c9ef 100644
        --- a/localization/v2.4.x/site/es/integrations/integrate_with_ragas.json
        +++ b/localization/v2.4.x/site/es/integrations/integrate_with_ragas.json
        @@ -1 +1 @@
        -{"codeList":["$ pip install --upgrade pymilvus openai requests tqdm pandas ragas\n","import os\n\nos.environ[\"OPENAI_API_KEY\"] = \"sk-***********\"\n","from typing import List\nfrom tqdm import tqdm\nfrom openai import OpenAI\nfrom pymilvus import MilvusClient\n\n\nclass RAG:\n    \"\"\"\n    RAG (Retrieval-Augmented Generation) class built upon OpenAI and Milvus.\n    \"\"\"\n\n    def __init__(self, openai_client: OpenAI, milvus_client: MilvusClient):\n        self._prepare_openai(openai_client)\n        self._prepare_milvus(milvus_client)\n\n    def _emb_text(self, text: str) -> List[float]:\n        return (\n            self.openai_client.embeddings.create(input=text, model=self.embedding_model)\n            .data[0]\n            .embedding\n        )\n\n    def _prepare_openai(\n        self,\n        openai_client: OpenAI,\n        embedding_model: str = \"text-embedding-3-small\",\n        llm_model: str = \"gpt-3.5-turbo\",\n    ):\n        self.openai_client = openai_client\n        self.embedding_model = embedding_model\n        self.llm_model = llm_model\n        self.SYSTEM_PROMPT = \"\"\"\nHuman: You are an AI assistant. You are able to find answers to the questions from the contextual passage snippets provided.\n\"\"\"\n        self.USER_PROMPT = \"\"\"\nUse the following pieces of information enclosed in  tags to provide an answer to the question enclosed in  tags.\n\n{context}\n\n\n{question}\n\n\"\"\"\n\n    def _prepare_milvus(\n        self, milvus_client: MilvusClient, collection_name: str = \"rag_collection\"\n    ):\n        self.milvus_client = milvus_client\n        self.collection_name = collection_name\n        if self.milvus_client.has_collection(self.collection_name):\n            self.milvus_client.drop_collection(self.collection_name)\n        embedding_dim = len(self._emb_text(\"foo\"))\n        self.milvus_client.create_collection(\n            collection_name=self.collection_name,\n            dimension=embedding_dim,\n            metric_type=\"IP\",  # Inner product distance\n            consistency_level=\"Strong\",  # Strong consistency level\n        )\n\n    def load(self, texts: List[str]):\n        \"\"\"\n        Load the text data into Milvus.\n        \"\"\"\n        data = []\n        for i, line in enumerate(tqdm(texts, desc=\"Creating embeddings\")):\n            data.append({\"id\": i, \"vector\": self._emb_text(line), \"text\": line})\n\n        self.milvus_client.insert(collection_name=self.collection_name, data=data)\n\n    def retrieve(self, question: str, top_k: int = 3) -> List[str]:\n        \"\"\"\n        Retrieve the most similar text data to the given question.\n        \"\"\"\n        search_res = self.milvus_client.search(\n            collection_name=self.collection_name,\n            data=[self._emb_text(question)],\n            limit=top_k,\n            search_params={\"metric_type\": \"IP\", \"params\": {}},  # Inner product distance\n            output_fields=[\"text\"],  # Return the text field\n        )\n        retrieved_texts = [res[\"entity\"][\"text\"] for res in search_res[0]]\n        return retrieved_texts[:top_k]\n\n    def answer(\n        self,\n        question: str,\n        retrieval_top_k: int = 3,\n        return_retrieved_text: bool = False,\n    ):\n        \"\"\"\n        Answer the given question with the retrieved knowledge.\n        \"\"\"\n        retrieved_texts = self.retrieve(question, top_k=retrieval_top_k)\n        user_prompt = self.USER_PROMPT.format(\n            context=\"\\n\".join(retrieved_texts), question=question\n        )\n        response = self.openai_client.chat.completions.create(\n            model=self.llm_model,\n            messages=[\n                {\"role\": \"system\", \"content\": self.SYSTEM_PROMPT},\n                {\"role\": \"user\", \"content\": user_prompt},\n            ],\n        )\n        if not return_retrieved_text:\n            return response.choices[0].message.content\n        else:\n            return response.choices[0].message.content, retrieved_texts\n","openai_client = OpenAI()\nmilvus_client = MilvusClient(uri=\"./milvus_demo.db\")\n\nmy_rag = RAG(openai_client=openai_client, milvus_client=milvus_client)\n","import os\nimport urllib.request\n\nurl = \"https://raw.githubusercontent.com/milvus-io/milvus/master/DEVELOPMENT.md\"\nfile_path = \"./Milvus_DEVELOPMENT.md\"\n\nif not os.path.exists(file_path):\n    urllib.request.urlretrieve(url, file_path)\nwith open(file_path, \"r\") as file:\n    file_text = file.read()\n\n# We simply use \"# \" to separate the content in the file, which can roughly separate the content of each main part of the markdown file.\ntext_lines = file_text.split(\"# \")\nmy_rag.load(text_lines)  # Load the text data into RAG pipeline\n","question = \"what is the hardware requirements specification if I want to build Milvus and run from source code?\"\nmy_rag.answer(question, return_retrieved_text=True)\n","from datasets import Dataset\nimport pandas as pd\n\nquestion_list = [\n    \"what is the hardware requirements specification if I want to build Milvus and run from source code?\",\n    \"What is the programming language used to write Knowhere?\",\n    \"What should be ensured before running code coverage?\",\n]\nground_truth_list = [\n    \"If you want to build Milvus and run from source code, the recommended hardware requirements specification is:\\n\\n- 8GB of RAM\\n- 50GB of free disk space.\",\n    \"The programming language used to write Knowhere is C++.\",\n    \"Before running code coverage, you should make sure that your code changes are covered by unit tests.\",\n]\ncontexts_list = []\nanswer_list = []\nfor question in tqdm(question_list, desc=\"Answering questions\"):\n    answer, contexts = my_rag.answer(question, return_retrieved_text=True)\n    contexts_list.append(contexts)\n    answer_list.append(answer)\n\ndf = pd.DataFrame(\n    {\n        \"question\": question_list,\n        \"contexts\": contexts_list,\n        \"answer\": answer_list,\n        \"ground_truth\": ground_truth_list,\n    }\n)\nrag_results = Dataset.from_pandas(df)\ndf\n","from ragas import evaluate\nfrom ragas.metrics import (\n    answer_relevancy,\n    faithfulness,\n    context_recall,\n    context_precision,\n)\n\nresult = evaluate(\n    rag_results,\n    metrics=[\n        answer_relevancy,\n        faithfulness,\n        context_recall,\n        context_precision,\n    ],\n)\n\nresult\n"],"headingContent":"","anchorList":[{"label":"Evaluación con Ragas","href":"Evaluation-with-Ragas","type":1,"isActive":false},{"label":"Requisitos previos","href":"Prerequisites","type":2,"isActive":false},{"label":"Definir el pipeline RAG","href":"Define-the-RAG-pipeline","type":2,"isActive":false},{"label":"Ejecutar la canalización RAG y obtener resultados","href":"Run-the-RAG-pipeline-and-get-results","type":2,"isActive":false},{"label":"Evaluación con Ragas","href":"Evaluation-with-Ragas","type":2,"isActive":false}]}
        \ No newline at end of file
        +{"codeList":["$ pip install --upgrade pymilvus openai requests tqdm pandas ragas\n","import os\n\nos.environ[\"OPENAI_API_KEY\"] = \"sk-***********\"\n","from typing import List\nfrom tqdm import tqdm\nfrom openai import OpenAI\nfrom pymilvus import MilvusClient\n\n\nclass RAG:\n    \"\"\"\n    RAG (Retrieval-Augmented Generation) class built upon OpenAI and Milvus.\n    \"\"\"\n\n    def __init__(self, openai_client: OpenAI, milvus_client: MilvusClient):\n        self._prepare_openai(openai_client)\n        self._prepare_milvus(milvus_client)\n\n    def _emb_text(self, text: str) -> List[float]:\n        return (\n            self.openai_client.embeddings.create(input=text, model=self.embedding_model)\n            .data[0]\n            .embedding\n        )\n\n    def _prepare_openai(\n        self,\n        openai_client: OpenAI,\n        embedding_model: str = \"text-embedding-3-small\",\n        llm_model: str = \"gpt-3.5-turbo\",\n    ):\n        self.openai_client = openai_client\n        self.embedding_model = embedding_model\n        self.llm_model = llm_model\n        self.SYSTEM_PROMPT = \"\"\"\nHuman: You are an AI assistant. You are able to find answers to the questions from the contextual passage snippets provided.\n\"\"\"\n        self.USER_PROMPT = \"\"\"\nUse the following pieces of information enclosed in  tags to provide an answer to the question enclosed in  tags.\n\n{context}\n\n\n{question}\n\n\"\"\"\n\n    def _prepare_milvus(\n        self, milvus_client: MilvusClient, collection_name: str = \"rag_collection\"\n    ):\n        self.milvus_client = milvus_client\n        self.collection_name = collection_name\n        if self.milvus_client.has_collection(self.collection_name):\n            self.milvus_client.drop_collection(self.collection_name)\n        embedding_dim = len(self._emb_text(\"foo\"))\n        self.milvus_client.create_collection(\n            collection_name=self.collection_name,\n            dimension=embedding_dim,\n            metric_type=\"IP\",  # Inner product distance\n            consistency_level=\"Strong\",  # Strong consistency level\n        )\n\n    def load(self, texts: List[str]):\n        \"\"\"\n        Load the text data into Milvus.\n        \"\"\"\n        data = []\n        for i, line in enumerate(tqdm(texts, desc=\"Creating embeddings\")):\n            data.append({\"id\": i, \"vector\": self._emb_text(line), \"text\": line})\n\n        self.milvus_client.insert(collection_name=self.collection_name, data=data)\n\n    def retrieve(self, question: str, top_k: int = 3) -> List[str]:\n        \"\"\"\n        Retrieve the most similar text data to the given question.\n        \"\"\"\n        search_res = self.milvus_client.search(\n            collection_name=self.collection_name,\n            data=[self._emb_text(question)],\n            limit=top_k,\n            search_params={\"metric_type\": \"IP\", \"params\": {}},  # Inner product distance\n            output_fields=[\"text\"],  # Return the text field\n        )\n        retrieved_texts = [res[\"entity\"][\"text\"] for res in search_res[0]]\n        return retrieved_texts[:top_k]\n\n    def answer(\n        self,\n        question: str,\n        retrieval_top_k: int = 3,\n        return_retrieved_text: bool = False,\n    ):\n        \"\"\"\n        Answer the given question with the retrieved knowledge.\n        \"\"\"\n        retrieved_texts = self.retrieve(question, top_k=retrieval_top_k)\n        user_prompt = self.USER_PROMPT.format(\n            context=\"\\n\".join(retrieved_texts), question=question\n        )\n        response = self.openai_client.chat.completions.create(\n            model=self.llm_model,\n            messages=[\n                {\"role\": \"system\", \"content\": self.SYSTEM_PROMPT},\n                {\"role\": \"user\", \"content\": user_prompt},\n            ],\n        )\n        if not return_retrieved_text:\n            return response.choices[0].message.content\n        else:\n            return response.choices[0].message.content, retrieved_texts\n","openai_client = OpenAI()\nmilvus_client = MilvusClient(uri=\"./milvus_demo.db\")\n\nmy_rag = RAG(openai_client=openai_client, milvus_client=milvus_client)\n","import os\nimport urllib.request\n\nurl = \"https://raw.githubusercontent.com/milvus-io/milvus/master/DEVELOPMENT.md\"\nfile_path = \"./Milvus_DEVELOPMENT.md\"\n\nif not os.path.exists(file_path):\n    urllib.request.urlretrieve(url, file_path)\nwith open(file_path, \"r\") as file:\n    file_text = file.read()\n\n# We simply use \"# \" to separate the content in the file, which can roughly separate the content of each main part of the markdown file.\ntext_lines = file_text.split(\"# \")\nmy_rag.load(text_lines)  # Load the text data into RAG pipeline\n","question = \"what is the hardware requirements specification if I want to build Milvus and run from source code?\"\nmy_rag.answer(question, return_retrieved_text=True)\n","from datasets import Dataset\nimport pandas as pd\n\nquestion_list = [\n    \"what is the hardware requirements specification if I want to build Milvus and run from source code?\",\n    \"What is the programming language used to write Knowhere?\",\n    \"What should be ensured before running code coverage?\",\n]\nground_truth_list = [\n    \"If you want to build Milvus and run from source code, the recommended hardware requirements specification is:\\n\\n- 8GB of RAM\\n- 50GB of free disk space.\",\n    \"The programming language used to write Knowhere is C++.\",\n    \"Before running code coverage, you should make sure that your code changes are covered by unit tests.\",\n]\ncontexts_list = []\nanswer_list = []\nfor question in tqdm(question_list, desc=\"Answering questions\"):\n    answer, contexts = my_rag.answer(question, return_retrieved_text=True)\n    contexts_list.append(contexts)\n    answer_list.append(answer)\n\ndf = pd.DataFrame(\n    {\n        \"question\": question_list,\n        \"contexts\": contexts_list,\n        \"answer\": answer_list,\n        \"ground_truth\": ground_truth_list,\n    }\n)\nrag_results = Dataset.from_pandas(df)\ndf\n","from ragas import evaluate\nfrom ragas.metrics import (\n    answer_relevancy,\n    faithfulness,\n    context_recall,\n    context_precision,\n)\n\nresult = evaluate(\n    rag_results,\n    metrics=[\n        answer_relevancy,\n        faithfulness,\n        context_recall,\n        context_precision,\n    ],\n)\n\nresult\n"],"headingContent":"Evaluation with Ragas","anchorList":[{"label":"Evaluación con Ragas","href":"Evaluation-with-Ragas","type":1,"isActive":false},{"label":"Requisitos previos","href":"Prerequisites","type":2,"isActive":false},{"label":"Definir el pipeline RAG","href":"Define-the-RAG-pipeline","type":2,"isActive":false},{"label":"Ejecutar la canalización RAG y obtener resultados","href":"Run-the-RAG-pipeline-and-get-results","type":2,"isActive":false},{"label":"Evaluación con Ragas","href":"Evaluation-with-Ragas","type":2,"isActive":false}]}
        \ No newline at end of file
        diff --git a/localization/v2.4.x/site/es/integrations/integrate_with_ragas.md b/localization/v2.4.x/site/es/integrations/integrate_with_ragas.md
        index 8ce27d2ab..da1789fe2 100644
        --- a/localization/v2.4.x/site/es/integrations/integrate_with_ragas.md
        +++ b/localization/v2.4.x/site/es/integrations/integrate_with_ragas.md
        @@ -20,7 +20,8 @@ title: Evaluación con Ragas
                   d="M4 9h1v1H4c-1.5 0-3-1.69-3-3.5S2.55 3 4 3h4c1.45 0 3 1.69 3 3.5 0 1.41-.91 2.72-2 3.25V8.59c.58-.45 1-1.27 1-2.09C10 5.22 8.98 4 8 4H4c-.98 0-2 1.22-2 2.5S3 9 4 9zm9-3h-1v1h1c1 0 2 1.22 2 2.5S13.98 12 13 12H9c-.98 0-2-1.22-2-2.5 0-.83.42-1.64 1-2.09V6.25c-1.09.53-2 1.84-2 3.25C6 11.31 7.55 13 9 13h4c1.45 0 3-1.69 3-3.5S14.5 6 13 6z"
                 >
               
        -    

        Open In Colab

        +

        Open In Colab +GitHub Repository

        Esta guía muestra cómo utilizar Ragas para evaluar un sistema de generación mejorada por recuperación (RAG) basado en Milvus.

        El sistema RAG combina un sistema de recuperación con un modelo generativo para generar texto nuevo a partir de una petición dada. En primer lugar, el sistema recupera documentos relevantes de un corpus utilizando Milvus y, a continuación, utiliza un modelo generativo para generar un nuevo texto basado en los documentos recuperados.

        Ragas es un marco de trabajo que le ayuda a evaluar sus canalizaciones RAG. Existen herramientas y marcos de trabajo que ayudan a crear estas cadenas, pero evaluarlas y cuantificar su rendimiento puede resultar difícil. Aquí es donde entra Ragas (RAG Assessment).

        @@ -296,7 +297,7 @@ df 1 - ¿Cuál es el lenguaje de programación utilizado... + ¿Cuál es el lenguaje de programac... [CMake & Conan\n\nLa biblioteca de algoritmos de Mil... ¿Cuál es el lenguaje de programación utilizado... ¿Cuál es el lenguaje de programación utilizado... diff --git a/localization/v2.4.x/site/es/integrations/integrate_with_vanna.json b/localization/v2.4.x/site/es/integrations/integrate_with_vanna.json index 16f3f50a3..600b86e08 100644 --- a/localization/v2.4.x/site/es/integrations/integrate_with_vanna.json +++ b/localization/v2.4.x/site/es/integrations/integrate_with_vanna.json @@ -1 +1 @@ -{"codeList":["$ pip install \"vanna[milvus,openai]\"\n","import os\n\nos.environ[\"OPENAI_API_KEY\"] = \"sk-***********\"\n","from pymilvus import MilvusClient, model\nfrom vanna.milvus import Milvus_VectorStore\nfrom vanna.openai import OpenAI_Chat\n\n\nclass VannaMilvus(Milvus_VectorStore, OpenAI_Chat):\n def __init__(self, config=None):\n Milvus_VectorStore.__init__(self, config=config)\n OpenAI_Chat.__init__(self, config=config)\n","milvus_uri = \"./milvus_vanna.db\"\n\nmilvus_client = MilvusClient(uri=milvus_uri)\n\nvn_milvus = VannaMilvus(\n config={\n \"api_key\": os.getenv(\"OPENAI_API_KEY\"),\n \"model\": \"gpt-3.5-turbo\",\n \"milvus_client\": milvus_client,\n \"embedding_function\": model.DefaultEmbeddingFunction(),\n \"n_results\": 2, # The number of results to return from Milvus semantic search.\n }\n)\n","import sqlite3\n\nsqlite_path = \"./my-database.sqlite\"\nsql_connect = sqlite3.connect(sqlite_path)\nc = sql_connect.cursor()\n\ninit_sqls = \"\"\"\nCREATE TABLE IF NOT EXISTS Customer (\n ID INTEGER PRIMARY KEY AUTOINCREMENT,\n Name TEXT NOT NULL,\n Company TEXT NOT NULL,\n City TEXT NOT NULL,\n Phone TEXT NOT NULL\n);\n\nCREATE TABLE IF NOT EXISTS Company (\n ID INTEGER PRIMARY KEY AUTOINCREMENT,\n Name TEXT NOT NULL,\n Industry TEXT NOT NULL,\n Location TEXT NOT NULL,\n EmployeeCount INTEGER NOT NULL\n);\n\nCREATE TABLE IF NOT EXISTS User (\n ID INTEGER PRIMARY KEY AUTOINCREMENT,\n Username TEXT NOT NULL UNIQUE,\n Email TEXT NOT NULL UNIQUE\n);\n\nINSERT INTO Customer (Name, Company, City, Phone) \nVALUES ('John Doe', 'ABC Corp', 'New York', '123-456-7890');\n\nINSERT INTO Customer (Name, Company, City, Phone) \nVALUES ('Jane Smith', 'XYZ Inc', 'Los Angeles', '098-765-4321');\n\nINSERT INTO Company (Name, Industry, Location, EmployeeCount)\nVALUES ('ABC Corp', 'cutting-edge technology', 'New York', 100);\n\nINSERT INTO User (Username, Email)\nVALUES ('johndoe123', 'johndoe123@example.com');\n\"\"\"\n\nfor sql in init_sqls.split(\";\"):\n c.execute(sql)\n\nsql_connect.commit()\n\n# Connect to the SQLite database\nvn_milvus.connect_to_sqlite(sqlite_path)\n","# If there exists training data, we should remove it before training.\nexisting_training_data = vn_milvus.get_training_data()\nif len(existing_training_data) > 0:\n for _, training_data in existing_training_data.iterrows():\n vn_milvus.remove_training_data(training_data[\"id\"])\n\n# Get the DDL of the SQLite database\ndf_ddl = vn_milvus.run_sql(\"SELECT type, sql FROM sqlite_master WHERE sql is not null\")\n\n# Train the model on the DDL data\nfor ddl in df_ddl[\"sql\"].to_list():\n vn_milvus.train(ddl=ddl)\n","# Add documentation about your business terminology or definitions.\nvn_milvus.train(\n documentation=\"ABC Corp specializes in cutting-edge technology solutions and innovation.\"\n)\nvn_milvus.train(\n documentation=\"XYZ Inc is a global leader in manufacturing and supply chain management.\"\n)\n\n# You can also add SQL queries to your training data.\nvn_milvus.train(sql=\"SELECT * FROM Customer WHERE Name = 'John Doe'\")\n","training_data = vn_milvus.get_training_data()\ntraining_data\n","sql = vn_milvus.generate_sql(\"what is the phone number of John Doe?\")\nvn_milvus.run_sql(sql)\n","sql = vn_milvus.generate_sql(\"which customer works for a manufacturing corporation?\")\nvn_milvus.run_sql(sql)\n","sql_connect.close()\nmilvus_client.close()\n\nos.remove(sqlite_path)\nif os.path.exists(milvus_uri):\n os.remove(milvus_uri)\n"],"headingContent":"","anchorList":[{"label":"Escribir SQL con Vanna y Milvus","href":"Write-SQL-with-Vanna-and-Milvus","type":1,"isActive":false},{"label":"Requisitos previos","href":"Prerequisites","type":2,"isActive":false},{"label":"Preparación de datos","href":"Data-preparation","type":2,"isActive":false},{"label":"Entrenar con datos","href":"Train-with-data","type":2,"isActive":false},{"label":"Generar SQLs y ejecutarlos","href":"Generate-SQLs-and-execute-them","type":2,"isActive":false}]} \ No newline at end of file +{"codeList":["$ pip install \"vanna[milvus,openai]\"\n","import os\n\nos.environ[\"OPENAI_API_KEY\"] = \"sk-***********\"\n","from pymilvus import MilvusClient, model\nfrom vanna.milvus import Milvus_VectorStore\nfrom vanna.openai import OpenAI_Chat\n\n\nclass VannaMilvus(Milvus_VectorStore, OpenAI_Chat):\n def __init__(self, config=None):\n Milvus_VectorStore.__init__(self, config=config)\n OpenAI_Chat.__init__(self, config=config)\n","milvus_uri = \"./milvus_vanna.db\"\n\nmilvus_client = MilvusClient(uri=milvus_uri)\n\nvn_milvus = VannaMilvus(\n config={\n \"api_key\": os.getenv(\"OPENAI_API_KEY\"),\n \"model\": \"gpt-3.5-turbo\",\n \"milvus_client\": milvus_client,\n \"embedding_function\": model.DefaultEmbeddingFunction(),\n \"n_results\": 2, # The number of results to return from Milvus semantic search.\n }\n)\n","import sqlite3\n\nsqlite_path = \"./my-database.sqlite\"\nsql_connect = sqlite3.connect(sqlite_path)\nc = sql_connect.cursor()\n\ninit_sqls = \"\"\"\nCREATE TABLE IF NOT EXISTS Customer (\n ID INTEGER PRIMARY KEY AUTOINCREMENT,\n Name TEXT NOT NULL,\n Company TEXT NOT NULL,\n City TEXT NOT NULL,\n Phone TEXT NOT NULL\n);\n\nCREATE TABLE IF NOT EXISTS Company (\n ID INTEGER PRIMARY KEY AUTOINCREMENT,\n Name TEXT NOT NULL,\n Industry TEXT NOT NULL,\n Location TEXT NOT NULL,\n EmployeeCount INTEGER NOT NULL\n);\n\nCREATE TABLE IF NOT EXISTS User (\n ID INTEGER PRIMARY KEY AUTOINCREMENT,\n Username TEXT NOT NULL UNIQUE,\n Email TEXT NOT NULL UNIQUE\n);\n\nINSERT INTO Customer (Name, Company, City, Phone) \nVALUES ('John Doe', 'ABC Corp', 'New York', '123-456-7890');\n\nINSERT INTO Customer (Name, Company, City, Phone) \nVALUES ('Jane Smith', 'XYZ Inc', 'Los Angeles', '098-765-4321');\n\nINSERT INTO Company (Name, Industry, Location, EmployeeCount)\nVALUES ('ABC Corp', 'cutting-edge technology', 'New York', 100);\n\nINSERT INTO User (Username, Email)\nVALUES ('johndoe123', 'johndoe123@example.com');\n\"\"\"\n\nfor sql in init_sqls.split(\";\"):\n c.execute(sql)\n\nsql_connect.commit()\n\n# Connect to the SQLite database\nvn_milvus.connect_to_sqlite(sqlite_path)\n","# If there exists training data, we should remove it before training.\nexisting_training_data = vn_milvus.get_training_data()\nif len(existing_training_data) > 0:\n for _, training_data in existing_training_data.iterrows():\n vn_milvus.remove_training_data(training_data[\"id\"])\n\n# Get the DDL of the SQLite database\ndf_ddl = vn_milvus.run_sql(\"SELECT type, sql FROM sqlite_master WHERE sql is not null\")\n\n# Train the model on the DDL data\nfor ddl in df_ddl[\"sql\"].to_list():\n vn_milvus.train(ddl=ddl)\n","# Add documentation about your business terminology or definitions.\nvn_milvus.train(\n documentation=\"ABC Corp specializes in cutting-edge technology solutions and innovation.\"\n)\nvn_milvus.train(\n documentation=\"XYZ Inc is a global leader in manufacturing and supply chain management.\"\n)\n\n# You can also add SQL queries to your training data.\nvn_milvus.train(sql=\"SELECT * FROM Customer WHERE Name = 'John Doe'\")\n","training_data = vn_milvus.get_training_data()\ntraining_data\n","sql = vn_milvus.generate_sql(\"what is the phone number of John Doe?\")\nvn_milvus.run_sql(sql)\n","sql = vn_milvus.generate_sql(\"which customer works for a manufacturing corporation?\")\nvn_milvus.run_sql(sql)\n","sql_connect.close()\nmilvus_client.close()\n\nos.remove(sqlite_path)\nif os.path.exists(milvus_uri):\n os.remove(milvus_uri)\n"],"headingContent":"Write SQL with Vanna and Milvus","anchorList":[{"label":"Escribir SQL con Vanna y Milvus","href":"Write-SQL-with-Vanna-and-Milvus","type":1,"isActive":false},{"label":"Requisitos previos","href":"Prerequisites","type":2,"isActive":false},{"label":"Preparación de datos","href":"Data-preparation","type":2,"isActive":false},{"label":"Entrenar con datos","href":"Train-with-data","type":2,"isActive":false},{"label":"Generar SQLs y ejecutarlos","href":"Generate-SQLs-and-execute-them","type":2,"isActive":false}]} \ No newline at end of file diff --git a/localization/v2.4.x/site/es/integrations/integrate_with_vanna.md b/localization/v2.4.x/site/es/integrations/integrate_with_vanna.md index 1714856b1..2dc9a71c1 100644 --- a/localization/v2.4.x/site/es/integrations/integrate_with_vanna.md +++ b/localization/v2.4.x/site/es/integrations/integrate_with_vanna.md @@ -20,7 +20,9 @@ title: Escribir SQL con Vanna y Milvus d="M4 9h1v1H4c-1.5 0-3-1.69-3-3.5S2.55 3 4 3h4c1.45 0 3 1.69 3 3.5 0 1.41-.91 2.72-2 3.25V8.59c.58-.45 1-1.27 1-2.09C10 5.22 8.98 4 8 4H4c-.98 0-2 1.22-2 2.5S3 9 4 9zm9-3h-1v1h1c1 0 2 1.22 2 2.5S13.98 12 13 12H9c-.98 0-2-1.22-2-2.5 0-.83.42-1.64 1-2.09V6.25c-1.09.53-2 1.84-2 3.25C6 11.31 7.55 13 9 13h4c1.45 0 3-1.69 3-3.5S14.5 6 13 6z" > -

        Vanna es un framework Python RAG (Retrieval-Augmented Generation) de código abierto para la generación de SQL y funcionalidades relacionadas. Milvus es la base de datos vectorial de código abierto más avanzada del mundo, creada para potenciar la búsqueda de similitudes y las aplicaciones de IA.

        +

        Open In Colab +GitHub Repository

        +

        Vanna es un framework Python RAG (Retrieval-Augmented Generation) de código abierto para la generación de SQL y funcionalidades relacionadas. Milvus es la base de datos vectorial de código abierto más avanzada del mundo, creada para potenciar la búsqueda de similitudes y las aplicaciones de IA.

        Vanna funciona en dos sencillos pasos: entrena un "modelo" RAG en tus datos y, a continuación, formula preguntas que devolverán consultas SQL que pueden configurarse para ejecutarse en tu base de datos. Esta guía muestra cómo utilizar Vanna para generar y ejecutar consultas SQL basadas en sus datos almacenados en una base de datos.

        Requisitos previos

        Primero, necesitamos heredar de las clases Milvus_VectorStore y OpenAI_Chat de Vanna y definir una nueva clase VannaMilvus que combine las capacidades de ambas.

        +

        En primer lugar, necesitamos heredar de las clases Milvus_VectorStore y OpenAI_Chat de Vanna y definir una nueva clase VannaMilvus que combine las capacidades de ambas.

        from pymilvus import MilvusClient, model
         from vanna.milvus import Milvus_VectorStore
         from vanna.openai import OpenAI_Chat
        diff --git a/localization/v2.4.x/site/es/integrations/integrate_with_voyageai.json b/localization/v2.4.x/site/es/integrations/integrate_with_voyageai.json
        index db39a1feb..a8f9e4a2a 100644
        --- a/localization/v2.4.x/site/es/integrations/integrate_with_voyageai.json
        +++ b/localization/v2.4.x/site/es/integrations/integrate_with_voyageai.json
        @@ -1 +1 @@
        -{"codeList":["$ pip install --upgrade voyageai pymilvus\n","import voyageai\nfrom pymilvus import MilvusClient\n\nMODEL_NAME = \"voyage-law-2\"  # Which model to use, please check https://docs.voyageai.com/docs/embeddings for available models\nDIMENSION = 1024  # Dimension of vector embedding\n\n# Connect to VoyageAI with API Key.\nvoyage_client = voyageai.Client(api_key=\"\")\n\ndocs = [\n    \"Artificial intelligence was founded as an academic discipline in 1956.\",\n    \"Alan Turing was the first person to conduct substantial research in AI.\",\n    \"Born in Maida Vale, London, Turing was raised in southern England.\",\n]\n\nvectors = voyage_client.embed(texts=docs, model=MODEL_NAME, truncation=False).embeddings\n\n# Prepare data to be stored in Milvus vector database.\n# We can store the id, vector representation, raw text and labels such as \"subject\" in this case in Milvus.\ndata = [\n    {\"id\": i, \"vector\": vectors[i], \"text\": docs[i], \"subject\": \"history\"}\n    for i in range(len(docs))\n]\n\n\n# Connect to Milvus, all data is stored in a local file named \"milvus_voyage_demo.db\"\n# in current directory. You can also connect to a remote Milvus server following this\n# instruction: https://milvus.io/docs/install_standalone-docker.md.\nmilvus_client = MilvusClient(uri=\"milvus_voyage_demo.db\")\nCOLLECTION_NAME = \"demo_collection\"  # Milvus collection name\n# Create a collection to store the vectors and text.\nif milvus_client.has_collection(collection_name=COLLECTION_NAME):\n    milvus_client.drop_collection(collection_name=COLLECTION_NAME)\nmilvus_client.create_collection(collection_name=COLLECTION_NAME, dimension=DIMENSION)\n\n# Insert all data into Milvus vector database.\nres = milvus_client.insert(collection_name=\"demo_collection\", data=data)\n\nprint(res[\"insert_count\"])\n","queries = [\"When was artificial intelligence founded?\"]\n\nquery_vectors = voyage_client.embed(\n    texts=queries, model=MODEL_NAME, truncation=False\n).embeddings\n\nres = milvus_client.search(\n    collection_name=COLLECTION_NAME,  # target collection\n    data=query_vectors,  # query vectors\n    limit=2,  # number of returned entities\n    output_fields=[\"text\", \"subject\"],  # specifies fields to be returned\n)\n\nfor q in queries:\n    print(\"Query:\", q)\n    for result in res:\n        print(result)\n    print(\"\\n\")\n"],"headingContent":"","anchorList":[{"label":"Búsqueda semántica con Milvus y VoyageAI","href":"Semantic-Search-with-Milvus-and-VoyageAI","type":1,"isActive":false},{"label":"Para empezar","href":"Getting-started","type":2,"isActive":false},{"label":"Buscando títulos de libros con VoyageAI y Milvus","href":"Searching-book-titles-with-VoyageAI--Milvus","type":2,"isActive":false}]}
        \ No newline at end of file
        +{"codeList":["$ pip install --upgrade voyageai pymilvus\n","import voyageai\nfrom pymilvus import MilvusClient\n\nMODEL_NAME = \"voyage-law-2\"  # Which model to use, please check https://docs.voyageai.com/docs/embeddings for available models\nDIMENSION = 1024  # Dimension of vector embedding\n\n# Connect to VoyageAI with API Key.\nvoyage_client = voyageai.Client(api_key=\"\")\n\ndocs = [\n    \"Artificial intelligence was founded as an academic discipline in 1956.\",\n    \"Alan Turing was the first person to conduct substantial research in AI.\",\n    \"Born in Maida Vale, London, Turing was raised in southern England.\",\n]\n\nvectors = voyage_client.embed(texts=docs, model=MODEL_NAME, truncation=False).embeddings\n\n# Prepare data to be stored in Milvus vector database.\n# We can store the id, vector representation, raw text and labels such as \"subject\" in this case in Milvus.\ndata = [\n    {\"id\": i, \"vector\": vectors[i], \"text\": docs[i], \"subject\": \"history\"}\n    for i in range(len(docs))\n]\n\n\n# Connect to Milvus, all data is stored in a local file named \"milvus_voyage_demo.db\"\n# in current directory. You can also connect to a remote Milvus server following this\n# instruction: https://milvus.io/docs/install_standalone-docker.md.\nmilvus_client = MilvusClient(uri=\"milvus_voyage_demo.db\")\nCOLLECTION_NAME = \"demo_collection\"  # Milvus collection name\n# Create a collection to store the vectors and text.\nif milvus_client.has_collection(collection_name=COLLECTION_NAME):\n    milvus_client.drop_collection(collection_name=COLLECTION_NAME)\nmilvus_client.create_collection(collection_name=COLLECTION_NAME, dimension=DIMENSION)\n\n# Insert all data into Milvus vector database.\nres = milvus_client.insert(collection_name=\"demo_collection\", data=data)\n\nprint(res[\"insert_count\"])\n","queries = [\"When was artificial intelligence founded?\"]\n\nquery_vectors = voyage_client.embed(\n    texts=queries, model=MODEL_NAME, truncation=False\n).embeddings\n\nres = milvus_client.search(\n    collection_name=COLLECTION_NAME,  # target collection\n    data=query_vectors,  # query vectors\n    limit=2,  # number of returned entities\n    output_fields=[\"text\", \"subject\"],  # specifies fields to be returned\n)\n\nfor q in queries:\n    print(\"Query:\", q)\n    for result in res:\n        print(result)\n    print(\"\\n\")\n"],"headingContent":"Semantic Search with Milvus and VoyageAI","anchorList":[{"label":"Búsqueda semántica con Milvus y VoyageAI","href":"Semantic-Search-with-Milvus-and-VoyageAI","type":1,"isActive":false},{"label":"Para empezar","href":"Getting-started","type":2,"isActive":false},{"label":"Buscando títulos de libros con VoyageAI y Milvus","href":"Searching-book-titles-with-VoyageAI--Milvus","type":2,"isActive":false}]}
        \ No newline at end of file
        diff --git a/localization/v2.4.x/site/es/integrations/integrate_with_voyageai.md b/localization/v2.4.x/site/es/integrations/integrate_with_voyageai.md
        index 87cd13045..b5e4b7775 100644
        --- a/localization/v2.4.x/site/es/integrations/integrate_with_voyageai.md
        +++ b/localization/v2.4.x/site/es/integrations/integrate_with_voyageai.md
        @@ -20,8 +20,9 @@ summary: >-
                   d="M4 9h1v1H4c-1.5 0-3-1.69-3-3.5S2.55 3 4 3h4c1.45 0 3 1.69 3 3.5 0 1.41-.91 2.72-2 3.25V8.59c.58-.45 1-1.27 1-2.09C10 5.22 8.98 4 8 4H4c-.98 0-2 1.22-2 2.5S3 9 4 9zm9-3h-1v1h1c1 0 2 1.22 2 2.5S13.98 12 13 12H9c-.98 0-2-1.22-2-2.5 0-.83.42-1.64 1-2.09V6.25c-1.09.53-2 1.84-2 3.25C6 11.31 7.55 13 9 13h4c1.45 0 3-1.69 3-3.5S14.5 6 13 6z"
                 >
               
        -    

        Open In Colab

        -

        Esta guía muestra cómo la API de incrustación de VoyageAI puede utilizarse con la base de datos vectorial Milvus para realizar búsquedas semánticas en texto.

        +

        Open In Colab +GitHub Repository

        +

        Esta guía muestra cómo la API de incrustación de Voy ageAI puede utilizarse con la base de datos vectorial Milvus para realizar búsquedas semánticas en texto.

        Para empezar

        Open In Colab

        +

        Open In Colab +GitHub Repository

        Esta guía muestra cómo construir un sistema de Generación Aumentada por Recuperación (RAG) utilizando LangChain y Milvus.

        El sistema RAG combina un sistema de recuperación con un modelo generativo para generar nuevo texto basado en una petición dada. En primer lugar, el sistema recupera documentos relevantes de un corpus utilizando Milvus y, a continuación, utiliza un modelo generativo para generar un nuevo texto basado en los documentos recuperados.

        LangChain es un marco para el desarrollo de aplicaciones basadas en grandes modelos lingüísticos (LLM). Milvus es la base de datos vectorial de código abierto más avanzada del mundo, creada para potenciar la búsqueda de similitudes de incrustación y las aplicaciones de IA.

        @@ -210,7 +211,7 @@ res d="M4 9h1v1H4c-1.5 0-3-1.69-3-3.5S2.55 3 4 3h4c1.45 0 3 1.69 3 3.5 0 1.41-.91 2.72-2 3.25V8.59c.58-.45 1-1.27 1-2.09C10 5.22 8.98 4 8 4H4c-.98 0-2 1.22-2 2.5S3 9 4 9zm9-3h-1v1h1c1 0 2 1.22 2 2.5S13.98 12 13 12H9c-.98 0-2-1.22-2-2.5 0-.83.42-1.64 1-2.09V6.25c-1.09.53-2 1.84-2 3.25C6 11.31 7.55 13 9 13h4c1.45 0 3-1.69 3-3.5S14.5 6 13 6z" > -

        Podemos utilizar las reglas de filtrado escalar de Milvus para filtrar los documentos basándonos en los metadatos. Hemos cargado los documentos de dos fuentes diferentes, y podemos filtrar los documentos por los metadatos source.

        +

        Podemos utilizar las reglas de filtrado escalar de Milvus para filtrar los documentos basándonos en los metadatos. Hemos cargado los documentos de dos fuentes diferentes, y podemos filtrar los documentos por los metadatos source.

        vectorstore.similarity_search(
             "What is CoT?",
             k=1,
        diff --git a/localization/v2.4.x/site/es/menuStructure/es.json b/localization/v2.4.x/site/es/menuStructure/es.json
        index c46d0a96e..0066fca12 100644
        --- a/localization/v2.4.x/site/es/menuStructure/es.json
        +++ b/localization/v2.4.x/site/es/menuStructure/es.json
        @@ -741,7 +741,7 @@
                         "children": []
                       },
                       {
        -                "label": "Despliegue en GCP",
        +                "label": "Implantación en GCP",
                         "id": "gcp.md",
                         "order": 2,
                         "children": []
        diff --git a/localization/v2.4.x/site/es/reference/architecture/architecture_overview.json b/localization/v2.4.x/site/es/reference/architecture/architecture_overview.json
        index a72ee747a..e4bb6e882 100644
        --- a/localization/v2.4.x/site/es/reference/architecture/architecture_overview.json
        +++ b/localization/v2.4.x/site/es/reference/architecture/architecture_overview.json
        @@ -1 +1 @@
        -{"codeList":[],"headingContent":"","anchorList":[{"label":"Visión general de la arquitectura de Milvus","href":"Milvus-Architecture-Overview","type":1,"isActive":false},{"label":"Más información","href":"Whats-next","type":2,"isActive":false}]}
        \ No newline at end of file
        +{"codeList":[],"headingContent":"Milvus Architecture Overview","anchorList":[{"label":"Visión general de la arquitectura de Milvus","href":"Milvus-Architecture-Overview","type":1,"isActive":false},{"label":"Más información","href":"Whats-next","type":2,"isActive":false}]}
        \ No newline at end of file
        diff --git a/localization/v2.4.x/site/es/reference/architecture/architecture_overview.md b/localization/v2.4.x/site/es/reference/architecture/architecture_overview.md
        index 5a1052d89..bb9999172 100644
        --- a/localization/v2.4.x/site/es/reference/architecture/architecture_overview.md
        +++ b/localization/v2.4.x/site/es/reference/architecture/architecture_overview.md
        @@ -28,6 +28,12 @@ title: Visión general de la arquitectura de Milvus
           
             Architecture_diagram
              Diagrama_de_arquitectura 

        +

        Según la figura, las interfaces pueden clasificarse en las siguientes categorías:

        +
          +
        • DDL / DCL: createCollection / createPartition / dropCollection / dropPartition / hasCollection / hasPartition
        • +
        • DML / Produce: insert / delete / upsert
        • +
        • DQL: búsqueda / consulta
        • +

        Más información

        diff --git a/localization/v2.4.x/site/es/reference/disk_index.json b/localization/v2.4.x/site/es/reference/disk_index.json index 73bbfeb0f..eb66c2110 100644 --- a/localization/v2.4.x/site/es/reference/disk_index.json +++ b/localization/v2.4.x/site/es/reference/disk_index.json @@ -1 +1 @@ -{"codeList":["...\nDiskIndex:\n MaxDegree: 56\n SearchListSize: 100\n PQCodeBugetGBRatio: 0.125\n SearchCacheBudgetGBRatio: 0.125\n BeamWidthRatio: 4.0\n...\n"],"headingContent":"","anchorList":[{"label":"Índice en disco","href":"On-disk-Index","type":1,"isActive":false},{"label":"Requisitos previos","href":"Prerequisites","type":2,"isActive":false},{"label":"Límites","href":"Limits","type":2,"isActive":false},{"label":"Parámetros de índice y búsqueda","href":"Index-and-search-settings","type":2,"isActive":false},{"label":"Configuraciones de Milvus relacionadas con DiskANN","href":"DiskANN-related-Milvus-configurations","type":2,"isActive":false},{"label":"Solución de problemas","href":"Troubleshooting","type":2,"isActive":false}]} \ No newline at end of file +{"codeList":["...\nDiskIndex:\n MaxDegree: 56\n SearchListSize: 100\n PQCodeBugetGBRatio: 0.125\n SearchCacheBudgetGBRatio: 0.125\n BeamWidthRatio: 4.0\n...\n"],"headingContent":"On-disk Index","anchorList":[{"label":"Índice en disco","href":"On-disk-Index","type":1,"isActive":false},{"label":"Requisitos previos","href":"Prerequisites","type":2,"isActive":false},{"label":"Límites","href":"Limits","type":2,"isActive":false},{"label":"Parámetros de índice y búsqueda","href":"Index-and-search-settings","type":2,"isActive":false},{"label":"Configuraciones de Milvus relacionadas con DiskANN","href":"DiskANN-related-Milvus-configurations","type":2,"isActive":false},{"label":"Solución de problemas","href":"Troubleshooting","type":2,"isActive":false}]} \ No newline at end of file diff --git a/localization/v2.4.x/site/es/reference/disk_index.md b/localization/v2.4.x/site/es/reference/disk_index.md index 767615cc9..b05957446 100644 --- a/localization/v2.4.x/site/es/reference/disk_index.md +++ b/localization/v2.4.x/site/es/reference/disk_index.md @@ -68,7 +68,7 @@ Actualmente, un campo vectorial sólo admite un tipo de índice. Milvus elimina

        Para utilizar DiskANN, asegúrese de que

        • Utilice sólo vectores flotantes con al menos 1 dimensión en sus datos.
        • -
        • Utilice únicamente la distancia euclidiana (L2) o el producto interior (IP) para medir la distancia entre vectores.
        • +
        • Utilice únicamente la distancia euclidiana (L2), el producto interior (IP) o COSINE para medir la distancia entre vectores.

        Parámetros de índice y búsqueda

        Las réplicas en memoria se organizan como grupos de réplicas. Cada grupo de réplica contiene réplicas de fragmentos. Cada réplica de fragmento tiene una réplica de flujo y una réplica histórica que corresponden a los segmentos en crecimiento y sellados en el fragmento (es decir, el canal DML).

        +

        Las réplicas en memoria se organizan como grupos de réplicas. Cada grupo de réplica contiene réplicas de fragmentos. Cada réplica de fragmento tiene una réplica de flujo y una réplica histórica que corresponden a los segmentos en crecimiento y sellados en el fragmento (es decir, el canal DML).

        - An illustration of how in-memory replica works + An illustration of how in-memory replica works Ilustración del funcionamiento de las réplicas en memoria

        Grupo de réplica

        Un grupo de réplica consiste en múltiples nodos de consulta que son responsables de manejar los datos históricos y las réplicas.

        Réplica de fragmentos

        Una réplica de fragmento consta de una réplica de flujo y una réplica histórica, ambas pertenecientes al mismo fragmento. El número de réplicas de fragmentos de un grupo de réplicas viene determinado por el número de fragmentos de una colección específica.

        -

        Réplica de flujo

        Una réplica de streaming contiene todos los segmentos crecientes del mismo canal DML. Técnicamente hablando, una réplica de streaming debe ser servida por un único nodo de consulta en una réplica.

        -

        Réplica histórica

        Una réplica histórica contiene todos los segmentos sellados del mismo canal DML. Los segmentos sellados de una réplica histórica pueden distribuirse en varios nodos de consulta dentro del mismo grupo de réplica.

        +

        Réplica de flujo

        Una réplica de streaming contiene todos los segmentos crecientes del mismo canal DML. Técnicamente hablando, una réplica de streaming debe ser servida por un solo nodo de consulta en una réplica.

        +

        Réplica histórica

        Una réplica histórica contiene todos los segmentos sellados del mismo canal DML. Los segmentos sellados de una réplica histórica pueden distribuirse en varios nodos de consulta dentro del mismo grupo de réplicas.

        Líder de fragmentos

        Un líder de fragmento es el nodo de consulta que sirve la réplica de flujo en una réplica de fragmento.

        Detalles de diseño

        Caché

        El proxy mantiene una caché que asigna segmentos a nodos de consulta y la actualiza periódicamente. Cuando el proxy recibe una petición, Milvus obtiene de la caché todos los segmentos sellados que necesitan ser buscados e intenta asignarlos a los nodos de consulta de manera uniforme.

        Para los segmentos en crecimiento, el proxy también mantiene una caché de canal a nodo de consulta y envía solicitudes a los nodos de consulta correspondientes.

        Conmutación por error

        Las cachés del proxy no siempre están actualizadas. Algunos segmentos o canales pueden haberse movido a otros nodos de consulta cuando llega una petición. En este caso, el proxy recibirá una respuesta de error, actualizará la caché e intentará asignarlo a otro nodo de consulta.

        -

        Se ignorará un segmento si el proxy sigue sin encontrarlo tras actualizar la caché. Esto puede ocurrir si el segmento ha sido compactado.

        +

        Un segmento se ignorará si el proxy sigue sin encontrarlo tras actualizar la caché. Esto puede ocurrir si el segmento ha sido compactado.

        Si la caché no es precisa, el proxy puede pasar por alto algunos segmentos. Los nodos de consulta con canales DML (segmentos crecientes) devuelven respuestas de búsqueda junto con una lista de segmentos fiables con los que el proxy puede comparar y actualizar la caché.

        Mejora

        El proxy no puede asignar las peticiones de búsqueda a los nodos de consulta de forma completamente equitativa y los nodos de consulta pueden tener diferentes recursos para servir las peticiones de búsqueda. Para evitar una distribución de recursos con colas largas, el proxy asignará segmentos activos en otros nodos de consulta a un nodo de consulta inactivo que también disponga de estos segmentos.

        diff --git a/localization/v2.4.x/site/es/release_notes.json b/localization/v2.4.x/site/es/release_notes.json index a140f7254..8ddd333d1 100644 --- a/localization/v2.4.x/site/es/release_notes.json +++ b/localization/v2.4.x/site/es/release_notes.json @@ -1 +1 @@ -{"codeList":[],"headingContent":"Release Notes","anchorList":[{"label":"Notas de la versión","href":"Release-Notes","type":1,"isActive":false},{"label":"v2.4.11","href":"v2411","type":2,"isActive":false},{"label":"v2.4.10","href":"v2410","type":2,"isActive":false},{"label":"v2.4.9","href":"v249","type":2,"isActive":false},{"label":"v2.4.8","href":"v248","type":2,"isActive":false},{"label":"v2.4.6","href":"v246","type":2,"isActive":false},{"label":"v2.4.5","href":"v245","type":2,"isActive":false},{"label":"v2.4.4","href":"v244","type":2,"isActive":false},{"label":"v2.4.3","href":"v243","type":2,"isActive":false},{"label":"v2.4.1","href":"v241","type":2,"isActive":false},{"label":"v2.4.0","href":"v240","type":2,"isActive":false},{"label":"v2.4.0-rc.1","href":"v240-rc1","type":2,"isActive":false}]} \ No newline at end of file +{"codeList":[],"headingContent":"Release Notes","anchorList":[{"label":"Notas de la versión","href":"Release-Notes","type":1,"isActive":false},{"label":"v2.4.13-hotfix","href":"v2413-hotfix","type":2,"isActive":false},{"label":"[Obsoleto] v2.4.13","href":"Deprecated-v2413","type":2,"isActive":false},{"label":"v2.4.12","href":"v2412","type":2,"isActive":false},{"label":"v2.4.11","href":"v2411","type":2,"isActive":false},{"label":"v2.4.10","href":"v2410","type":2,"isActive":false},{"label":"v2.4.9","href":"v249","type":2,"isActive":false},{"label":"v2.4.8","href":"v248","type":2,"isActive":false},{"label":"v2.4.6","href":"v246","type":2,"isActive":false},{"label":"v2.4.5","href":"v245","type":2,"isActive":false},{"label":"v2.4.4","href":"v244","type":2,"isActive":false},{"label":"v2.4.3","href":"v243","type":2,"isActive":false},{"label":"v2.4.1","href":"v241","type":2,"isActive":false},{"label":"v2.4.0","href":"v240","type":2,"isActive":false},{"label":"v2.4.0-rc.1","href":"v240-rc1","type":2,"isActive":false}]} \ No newline at end of file diff --git a/localization/v2.4.x/site/es/release_notes.md b/localization/v2.4.x/site/es/release_notes.md index 03468bb4f..84017995b 100644 --- a/localization/v2.4.x/site/es/release_notes.md +++ b/localization/v2.4.x/site/es/release_notes.md @@ -19,6 +19,163 @@ title: Notas de la versión >

        Descubra las novedades de Milvus. Esta página resume las nuevas características, mejoras, problemas conocidos y correcciones de errores de cada versión. Puede encontrar las notas de la versión para cada versión publicada después de la v2.4.0 en esta sección. Le sugerimos que visite regularmente esta página para conocer las actualizaciones.

        +

        v2.4.13-hotfix

        Fecha de publicación: 17 de octubre de 2024

        + + + + + + + +
        Versión de MilvusVersión del SDK de PythonVersión del SDK de JavaVersión del SDK de Node.js
        2.4.13-hotfix2.4.82.4.52.4.9
        +

        Milvus v2.4.13-hotfix soluciona un problema crítico específico de v2.4.13, donde Milvus puede fallar al recuperar la información de recolección después de un reinicio si todas las instantáneas de MetaKV fueron recolectadas como basura(#36933). Se recomienda a los usuarios que actualmente ejecutan la versión 2.4.13 que actualicen a la versión 2.4.13-hotfix lo antes posible para evitar posibles interrupciones.

        +

        Correcciones críticas

          +
        • Cargar la clave original si la marca de tiempo es MaxTimestamp(#36935)
        • +
        +

        [Obsoleto] v2.4.13

        Fecha de publicación: 12 de octubre de 2024

        + + + + + + + +
        Versión de MilvusVersión del SDK de PythonVersión del SDK de JavaVersión del SDK de Node.js
        2.4.132.4.82.4.52.4.9
        +

        Milvus 2.4.13 introduce la carga dinámica de réplicas, permitiendo a los usuarios ajustar el número de réplicas de la colección sin necesidad de liberar y recargar la colección. Esta versión también soluciona varios errores críticos relacionados con la importación masiva, el análisis sintáctico de expresiones, el equilibrio de carga y la recuperación de fallos. Además, se han introducido mejoras significativas en el uso de los recursos MMAP y en el rendimiento de las importaciones, mejorando la eficiencia general del sistema. Recomendamos encarecidamente la actualización a esta versión para mejorar el rendimiento y la estabilidad.

        +

        Características

          +
        • Ajuste dinámico de réplicas para colecciones cargadas(#36417)
        • +
        • MMAP de vectores dispersos en tipos de segmento crecientes(#36565)
        • +
        +

        Corrección de errores

          +
        • Corrección de un problema de rendimiento de descarga(#36741)
        • +
        • Corrección de un error con expresiones JSON en "[]"(#36722)
        • +
        • Eliminación de vecinos si el destino compacto no está indexado(#36694)
        • +
        • Mejora del rendimiento de Rocksmq cuando el canal está lleno(#36618)
        • +
        • Se ha solucionado un problema por el que no se diferían los errores durante el desempaquetado(#36665)
        • +
        • Resuelto un escape de memoria para segmentos importados en el gestor de segmentos(#36631)
        • +
        • Se han omitido comprobaciones de estado innecesarias para nodos de consulta en el proxy(nº 36553).
        • +
        • Solucionado un problema de desbordamiento con expresiones de términos(#36534)
        • +
        • Registro del ID de nodo antes de asignar tareas para evitar la asignación incorrecta de tareas(#36493)
        • +
        • Resueltos los problemas de carrera de datos en la compactación de clustering(#36499)
        • +
        • Se ha añadido una comprobación de la longitud máxima de la matriz de cadenas tras la coincidencia de tipos(nº 36497).
        • +
        • Resueltas las condiciones de carrera en modo mixto o autónomo(#36459)
        • +
        • Corrección del desequilibrio de segmentos tras operaciones repetidas de carga y liberación(#36543)
        • +
        • Corregido un caso donde los segmentos no podían ser movidos desde un nodo de parada(#36475)
        • +
        • Actualizada la información de segmento correctamente incluso si faltaban algunos segmentos(#36729)
        • +
        • Se ha evitado que las transacciones etcd excedan el límite máximo en la instantánea KV(#36773)
        • +
        +

        Mejoras

          +
        • Mejorada la estimación de recursos MMAP:
            +
          • Mejorado el código relacionado con MMAP en column.h(#36521)
          • +
          • Mejora de la estimación de recursos al cargar colecciones(#36728)
          • +
        • +
        • Mejoras de rendimiento:
            +
          • Mejora de la eficiencia de análisis de expresiones mediante la conversión de Unicode a ASCII(#36676)
          • +
          • Habilitada la producción paralela de mensajes para múltiples temas(#36462)
          • +
          • Reducción de la sobrecarga de la CPU al calcular el tamaño del archivo de índice(#36580)
          • +
          • Obtención del tipo de mensaje de la cabecera para minimizar el desmarcado(#36454)
          • +
          • Optimizada la política de selección de réplicas basada en la carga de trabajo(#36384)
          • +
        • +
        • Dividir los mensajes de la tarea de eliminación para ajustarlos a los límites de tamaño máximo de los mensajes(#36574)
        • +
        • Añadida nueva URL RESTful para describir las tareas de importación(#36754)
        • +
        • Optimización de la programación de importaciones y adición de una métrica de coste temporal(#36684)
        • +
        • Añadido registro de informe de balance para el balanceador del coordinador de consultas(#36749)
        • +
        • Cambio al uso de la configuración común de GC(#36670)
        • +
        • Añadido el cambio de política de reenvío de flujo para el delegador(#36712)
        • +
        • Habilitada la compactación manual para colecciones sin índices(#36581)
        • +
        • Habilitado el equilibrio de carga en nodos de consulta con capacidades de memoria variables(#36625)
        • +
        • Caso unificado para etiquetas de entrada utilizando metrics.label(#36616)
        • +
        • Se han hecho idempotentes las operaciones de canal/segmento de transferencia(#36552)
        • +
        • Añadidas métricas para supervisar el rendimiento de importación y el recuento de filas importadas(#36588)
        • +
        • Prevención de la creación de múltiples objetos de temporizador en los objetivos(#36573)
        • +
        • Actualizada la versión de expresiones y el formato de respuesta HTTP para expresiones(#36467)
        • +
        • Mejorada la recogida de basura en la instantánea KV(#36793)
        • +
        • Añadido soporte para ejecutar métodos con parámetros de contexto(#36798)
        • +
        +

        v2.4.12

        Fecha de publicación: 26 de septiembre de 2024

        + + + + + + + +
        Versión MilvusVersión del SDK de PythonVersión del SDK de JavaVersión del SDK de Node.js
        2.4.122.4.72.4.42.4.9
        +

        Milvus 2.4.12 introduce mejoras significativas y correcciones de errores críticos. Esta versión soluciona los problemas de duplicación de datos y mejora la velocidad de recuperación de fallos, especialmente cuando se manejan grandes cantidades de borrados. Sin embargo, persiste un problema conocido por el que la recuperación de fallos puede ser lenta cuando se borran grandes cantidades de datos. Estamos trabajando activamente para resolver este problema.

        +

        Mejoras

          +
        • Implementación de la parada graceful para el gestor de flowgraph(#36358)
        • +
        • Desactivada la comprobación de índices para campos vectoriales no cargados(#36280)
        • +
        • Se han filtrado los registros borrados no exitosos durante la carga delta(#36272)
        • +
        • Mejorada la gestión de errores para excepciones std::stoi(#36296)
        • +
        • No se permiten palabras clave como nombres de campo o nombres de campo dinámicos(#36108)
        • +
        • Añadidas métricas para borrar entradas en segmentos L0(#36227)
        • +
        • Implementada la política de reenvío de L0 para soportar la carga remota(#36208)
        • +
        • Añadida comprobación de carga de campo RNA en proxy(#36194)
        • +
        • Habilitado el soporte de filas dispersas vacías(#36061)
        • +
        • Corregida una vulnerabilidad de seguridad(#36156)
        • +
        • Implementado un gestor de estadísticas para las métricas de tamaño de solicitud/respuesta(#36118)
        • +
        • Se ha corregido la estimación del tamaño de las matrices de datos codificadas(#36379)
        • +
        +

        Corrección de errores

          +
        • Resueltos errores de tipo de métrica para colecciones con dos campos vectoriales(#36473)
        • +
        • Corregidos los problemas de almacenamiento en búferes largos que provocaban fallos en la recepción de colas de mensajes(#36425)
        • +
        • Se ha implementado la devolución adecuada de compactación a segmentos tras la división(nº 36429).
        • +
        • Resueltos los problemas de carrera de datos con la goroutina de comprobación de ID de nodo(#36377)
        • +
        • Eliminada la comprobación del tipo de elemento(#36324)
        • +
        • Solucionados los problemas de acceso concurrente para segmentos crecientes y sellados(#36288)
        • +
        • Implementación de bloqueo de estado futuro(#36333)
        • +
        • Corregido el uso de offset en HybridSearch(#36287, #36253)
        • +
        • Resuelto el segmento sucio/canal de fugas en QueryNode(#36259)
        • +
        • Corregida la duplicación de claves primarias(#36274)
        • +
        • Ajuste del tipo de métrica en las solicitudes de búsqueda(#36279)
        • +
        • Corregido el problema de eliminación de la métrica stored_index_files_size(#36161)
        • +
        • Se ha corregido el comportamiento del grupo de privilegios de lectura y escritura para el acceso global a la API(#36145)
        • +

        v2.4.11

        Fecha de lanzamiento: 11 de septiembre de 2024

        +

        Fecha de publicación: 11 de septiembre de 2024

        @@ -72,7 +229,7 @@ title: Notas de la versión
      • Asegurado el inicio único de los observadores querycoord en 2.4(#35817)
      • Mejoras

          -
        • Actualizada la versión de Milvus y proto a 2.4.11(#36069)
        • +
        • Actualizada la versión de Milvus & proto a 2.4.11(#36069)
        • Se ha corregido la pérdida de memoria en las pruebas unitarias y se ha habilitado la opción use_asan para las compilaciones unittest(#35857)
        • Ajustados los límites de l0segmentsrowcount a valores más apropiados(#36015)
        • Modificado el factor de estimación de memoria deltalog a uno(#36035)
        • @@ -117,7 +274,7 @@ title: Notas de la versión
        • Upsert con identificación automática: Soporte para operaciones upsert con generación automática de ID(#34633)
        • Carga parcial de campos de una colección [Beta Preview]: Permite cargar campos específicos de una colección(#35696)
        • Mejoras RBAC:
            -
          • Añadido soporte de mensajes RBAC para la Captura de Datos de Cambios(CDC)(#35562)
          • +
          • Añadido soporte de mensajes RBAC para la Captura de Datos de Cambios (CDC)(#35562)
          • Introducción de grupos de privilegios de sólo lectura/escritura/admin para simplificar el proceso de concesión de RBAC(#35543)
          • Nueva API para realizar copias de seguridad y restaurar configuraciones RBAC(#35513)
          • Actualizar caché de proxy después de restaurar metadatos RBAC(#35636)
          • @@ -151,7 +308,7 @@ title: Notas de la versión
          • Actualización a protobuf-go v2(#35555)
          • Rastreo mejorado con codificación de cadena hexadecimal para traceid y spanid(#35568)
          • Añadidas métricas de número de segmento para el gancho de consulta(#35619)
          • -
          • Mejora de la compatibilidad con el antiguo SDK para la función de configuración de parámetros de carga(#35573)
          • +
          • Mejora de la compatibilidad con el SDK antiguo para la función de configuración de parámetros de carga(#35573)
          • Añadido soporte para HTTP v1/v2 throttling(#35504)
          • Corregida la estimación de memoria de índice(#35670)
          • Posibilidad de escribir varios segmentos en el compactador de mezcla para evitar la generación de segmentos grandes(#35648)
          • @@ -217,7 +374,7 @@ title: Notas de la versión
        Versión de MilvusVersión del SDK de PythonVersión del SDK de JavaVersión del SDK de Node.js
        2.4.82.4.52.4.32.4.4
        -

        Milvus 2.4.8 introdujo varias mejoras significativas en el rendimiento y la estabilidad del sistema. La característica más notable fue la implementación de la compactación por agrupación, un mecanismo que mejora la eficiencia de las búsquedas y consultas mediante la redistribución de los datos en grandes colecciones en función de una clave de agrupación designada, lo que reduce la cantidad de datos escaneados. La compactación también se desacopló del DataNode shard, permitiendo que cualquier DataNode realizara la compactación de forma independiente, lo que mejoró la tolerancia a fallos, la estabilidad, el rendimiento y la escalabilidad. Además, se ha refactorizado la interfaz entre los componentes Go y C++ para utilizar llamadas CGO asíncronas, lo que ha permitido resolver problemas como los tiempos de espera de sesión, y se han realizado otras optimizaciones de rendimiento basadas en la creación de perfiles. También se han actualizado las dependencias de la aplicación para solucionar vulnerabilidades de seguridad conocidas. Además, esta versión incluye numerosas optimizaciones de rendimiento y correcciones de errores críticos.

        +

        Milvus 2.4.8 introdujo varias mejoras significativas en el rendimiento y la estabilidad del sistema. La característica más notable fue la implementación de la compactación por agrupación, un mecanismo que mejora la eficiencia de las búsquedas y consultas mediante la redistribución de los datos en grandes colecciones en función de una clave de agrupación designada, lo que reduce la cantidad de datos escaneados. La compactación también se desacopló del DataNode shard, permitiendo que cualquier DataNode realizara la compactación de forma independiente, lo que mejoró la tolerancia a fallos, la estabilidad, el rendimiento y la escalabilidad. Además, se ha refactorizado la interfaz entre los componentes Go y C++ para utilizar llamadas CGO asíncronas, lo que ha resuelto problemas como los tiempos de espera de sesión, y se han realizado otras optimizaciones de rendimiento basadas en la creación de perfiles. También se han actualizado las dependencias de la aplicación para solucionar vulnerabilidades de seguridad conocidas. Además, esta versión incluye numerosas optimizaciones de rendimiento y correcciones de errores críticos.

        Características

        • Se ha implementado la compactación de agrupaciones, que permite redistribuir los datos en función de una clave de agrupación designada para mejorar la eficacia de las consultas(#34326),(#34363).
        @@ -269,7 +426,7 @@ title: Notas de la versión

      Cambios en

      • Para los usuarios de código abierto, esta versión cambia los tipos de métrica en AutoIndex para FloatVector y BinaryVector a Cosine y Hamming, respectivamente.

      • -
      • Versiones corregidas de dependencias de terceros:

        +
      • Versionescorregidas de dependencias de terceros:

        • Esta versión introduce versiones fijas para ciertas bibliotecas de dependencias de terceros, mejorando significativamente la gestión de la cadena de suministro de software de Milvus.
        • Al aislar el proyecto de los cambios aguas arriba, protege las compilaciones diarias de posibles interrupciones.
        • @@ -294,7 +451,7 @@ title: Notas de la versión
        • Se ha corregido la configuración de los parámetros de búsqueda al utilizar el iterador Knowhere.(#34732)
        • Revisada la lógica para comprobar el estado de la carga de particiones.(#34305)
        • Se corrigió un problema donde las actualizaciones de caché de privilegios fallaban debido a errores de solicitud no manejados.(#34697)
        • -
        • Resuelto un fallo en la recuperación de la colección cargada tras el reinicio de QueryCoord.(#35211)
        • +
        • Resuelto un fallo en la recuperación de colecciones cargadas tras el reinicio de QueryCoord.(#35211)
        • Solucionado un problema de idempotencia de carga eliminando la validación innecesaria de parámetros de índice.(#35179)
        • Asegurado compressBinlog se ejecuta para permitir reloadFromKV para llenar correctamente logID de binlog después de DataCoord reinicia.(#34062)
        • Se ha corregido un problema por el que los metadatos de recogida no se eliminaban después de la recogida de basura en DataCoord.(#34884)
        • @@ -355,7 +512,7 @@ title: Notas de la versión
        • Corregida una fuga de memoria de objetos oráculo de clave primaria cuando un trabajador está desconectado(#34020).
        • Corregido ChannelManagerImplV2 para notificar al Nodo correcto, solucionando problemas de captura de parámetros en el cierre de bucle(#34004).
        • Se ha corregido una carrera de lectura-escritura de datos en ImportTask segmentsInfo implementando una copia profunda(#34126).
        • -
        • Corregida la información de versión para la opción de configuración "legacyVersionWithoutRPCWatch" para evitar errores durante las actualizaciones continuas(#34185).
        • +
        • Se ha corregido la información de versión para la opción de configuración "legacyVersionWithoutRPCWatch" para evitar errores durante las actualizaciones continuas(#34185).
        • Corregida la métrica para el número de particiones cargadas(#34195).
        • Se ha pasado la configuración de otlpSecure al configurar el seguimiento de segcore(#34210).
        • Solucionado un problema por el que las propiedades de DataCoord se sobrescribían por error(#34240).
        • @@ -395,7 +552,7 @@ title: Notas de la versión

          El lanzamiento de Milvus 2.4.5 introduce varias mejoras y correcciones de errores para mejorar el rendimiento, la estabilidad y la funcionalidad. Milvus 2.4.5 simplifica la búsqueda de vectores dispersos, float16 y bfloat16 con autoindexación, acelera las búsquedas, eliminaciones y compactaciones con las optimizaciones del filtro Bloom y aborda la gestión de datos mediante tiempos de carga más rápidos y compatibilidad con la importación de segmentos L0. También introduce el índice HNSW disperso para la búsqueda eficiente de datos dispersos de alta dimensión, mejora la API RESTful con soporte de vectores flotantes dispersos y corrige errores críticos para mejorar la estabilidad.

          Novedades

            -
          • Se ha añadido compatibilidad con rbac a la API de descripción/alteración de bases de datos(nº 33804).
          • +
          • Se ha añadido compatibilidad con rbac a la API de descripción/alteración de bases de datos(nº 33804).
          • Soporte de la creación del índice HNSW para vectores dispersos(#33653, #33662)
          • Posibilidad de crear el índice de disco para vectores binarios(#33575)
          • Soporte de vectores dispersos en RESTful v2(#33555)
          • @@ -430,7 +587,7 @@ title: Notas de la versión
          • Se ha corregido un error que podía provocar que indexnode volviera a intentar crear el índice en parámetros de índice no válidos de todos los vectores(#33878)
          • Corregido el error que cuando las cargas y liberaciones ocurren concurrentemente puede bloquear el Servidor(#33699)
          • Mejorada la consistencia de la caché para los valores de configuración(#33797)
          • -
          • Se ha evitado la posible pérdida de datos durante la eliminación(#33821)
          • +
          • Evitada la posible pérdida de datos durante la eliminación(#33821)
          • Se ha garantizado que el campo DroppedAt (fecha y hora probable de eliminación) se establezca tras la eliminación de colecciones(#33767)
          • Se ha solucionado un problema que podía hacer que Milvus manejara incorrectamente los tamaños de datos de vectores binarios(#33751)
          • Se ha evitado que las credenciales sensibles de Kafka se registren en texto sin formato(#33694, #33747)
          • @@ -439,7 +596,7 @@ title: Notas de la versión
          • Mejorado el manejo del índice HNSW disperso (funcionalidad interna)(#33714)
          • Se ha limpiado la memoria vectorial para evitar fugas de memoria(#33708)
          • Asegurado un calentamiento asíncrono más suave arreglando un problema de bloqueo de estado.(#33687)
          • -
          • Se ha corregido un error que podía provocar la pérdida de resultados en los iteradores de consulta.(#33506)
          • +
          • Se ha corregido un error que podía causar la pérdida de resultados en los iteradores de consulta.(#33506)
          • Se ha corregido un error que podía provocar que el tamaño del segmento de importación fuera desigual. (#33634)
          • Se ha corregido la gestión incorrecta del tamaño de los datos para los tipos bf16, fp16 y vector binario(#33488)
          • Se ha mejorado la estabilidad solucionando posibles problemas con el compactador L0(#33564)
          • @@ -450,7 +607,7 @@ title: Notas de la versión
          • Optimizada la canalización de consultas eliminando el comprobador de grupos innecesario(#33485)
          • Utilización de la ruta de almacenamiento local para una comprobación más precisa de la capacidad de disco en el nodo de índice.(#33505)
          • Se ha corregido la posibilidad de que hasMoreResult devuelva false cuando el número de aciertos supera el límite(#33642)
          • -
          • Retraso en la carga de bf en el delegador para evitar que los bfs se carguen una y otra vez cuando el trabajador no tiene más memoria(#33650)- Corregido un error por el que queryHook no podía reconocer el tipo de vector(#33911)
          • +
          • Retraso en la carga de bfs en el delegador para evitar que los bfs se carguen una y otra vez cuando el trabajador no tiene más memoria(#33650)- Corregido un error por el que queryHook no podía reconocer el tipo de vector(#33911)
          • Impedido el uso capturado iteración variable partitionID(#33970)
          • Corregido un error que puede provocar que Milvus sea incapaz de crear AutoIndex en vectores binarios y dispersos(#33867)
          • Se ha corregido un error que podía provocar que indexnode volviera a intentar crear el índice en parámetros de índice no válidos de todos los vectores(#33878)
          • @@ -465,7 +622,7 @@ title: Notas de la versión
          • Mejorado el manejo del índice HNSW disperso (funcionalidad interna)(#33714)
          • Se ha limpiado la memoria vectorial para evitar fugas de memoria(#33708)
          • Asegurado un calentamiento asíncrono más suave arreglando un problema de bloqueo de estado.(#33687)
          • -
          • Se ha corregido un error que podía provocar la pérdida de resultados en los iteradores de consulta.(#33506)
          • +
          • Se ha corregido un error que podía causar la pérdida de resultados en los iteradores de consulta.(#33506)
          • Se ha corregido un error que podía provocar que el tamaño del segmento de importación fuera desigual. (#33634)
          • Se ha corregido la gestión incorrecta del tamaño de los datos para los tipos bf16, fp16 y vector binario(#33488)
          • Se ha mejorado la estabilidad solucionando posibles problemas con el compactador L0(#33564)
          • @@ -512,7 +669,7 @@ title: Notas de la versión
          • Habilitada la compilación Milvus con GCC-13(#33441)

          Corrección de errores

            -
          • Visualización de colecciones vacías cuando se conceden todos los privilegios(#33454)
          • +
          • Se mostraban colecciones vacías cuando se concedían todos los privilegios(#33454)
          • Asegurado CMake descargas e instalaciones para la plataforma actual, no sólo x86_64(#33439)

          v2.4.3

          Open In Colab

          +

          Open In Colab +GitHub Repository

          En este tutorial, le mostraremos cómo construir un sistema RAG (Retrieval-Augmented Generation) con Milvus.

          El sistema RAG combina un sistema de recuperación con un modelo generativo para generar nuevo texto basado en una petición dada. En primer lugar, el sistema recupera documentos relevantes de un corpus utilizando Milvus y, a continuación, utiliza un modelo generativo para generar un nuevo texto basado en los documentos recuperados.

          @@ -85,7 +86,7 @@ embedding_dim = len(test_embedding)
          1536
           [0.00988506618887186, -0.005540902726352215, 0.0068014683201909065, -0.03810417652130127, -0.018254263326525688, -0.041231658309698105, -0.007651153020560741, 0.03220026567578316, 0.01892443746328354, 0.00010708322952268645]
           
          -

          Cargar los datos en Milvus

          Open In Colab

          +

          Open In Colab +GitHub Repository

          La aplicación generalizada de grandes modelos lingüísticos pone de relieve la importancia de mejorar la precisión y pertinencia de sus respuestas. La Generación Mejorada por Recuperación (RAG) mejora los modelos con bases de conocimiento externas, proporcionando más información contextual y mitigando problemas como la alucinación y el conocimiento insuficiente. Sin embargo, basarse únicamente en paradigmas RAG sencillos tiene sus limitaciones, sobre todo cuando se trata de relaciones complejas entre entidades y preguntas con varios saltos, en las que el modelo suele tener dificultades para ofrecer respuestas precisas.

          La introducción de grafos de conocimiento (KG) en el sistema GAR ofrece una nueva solución. Los KG presentan las entidades y sus relaciones de forma estructurada, proporcionando información más precisa y ayudando a RAG a gestionar mejor las tareas de respuesta a preguntas complejas. KG-RAG se encuentra todavía en sus primeras fases, y no hay consenso sobre cómo recuperar eficazmente entidades y relaciones a partir de KGs o cómo integrar la búsqueda de similitud vectorial con estructuras de grafos.

          En este cuaderno, introducimos un enfoque sencillo pero potente para mejorar en gran medida el rendimiento de este escenario. Se trata de un simple paradigma RAG con recuperación multidireccional y posterior reordenación, pero implementa Graph RAG de forma lógica y consigue un rendimiento puntero en el manejo de preguntas multi-salto. Veamos cómo se implementa.

          @@ -212,7 +213,7 @@ create_milvus_collection(entity_col_name) create_milvus_collection(relation_col_name) create_milvus_collection(passage_col_name)
      -

      Inserte los datos con su información de metadatos en las colecciones Milvus, incluidas las colecciones de entidades, relaciones y pasajes. La información de metadatos incluye el id de pasaje y el id de entidad o relación de adyacencia.

      +

      Inserte los datos con su información de metadatos en las colecciones de Milvus, incluidas las colecciones de entidades, relaciones y pasajes. La información de metadatos incluye el id de pasaje y el id de entidad o relación de adyacencia.

      def milvus_insert(
           collection_name: str,
           text_list: list[str],
      diff --git a/localization/v2.4.x/site/es/tutorials/hybrid_search_with_milvus.json b/localization/v2.4.x/site/es/tutorials/hybrid_search_with_milvus.json
      index 4c094adf0..e863c7386 100644
      --- a/localization/v2.4.x/site/es/tutorials/hybrid_search_with_milvus.json
      +++ b/localization/v2.4.x/site/es/tutorials/hybrid_search_with_milvus.json
      @@ -1 +1 @@
      -{"codeList":["$ pip install --upgrade pymilvus \"pymilvus[model]\"\n","# Run this cell to download the dataset\n$ wget http://qim.fs.quoracdn.net/quora_duplicate_questions.tsv\n","import pandas as pd\n\nfile_path = \"quora_duplicate_questions.tsv\"\ndf = pd.read_csv(file_path, sep=\"\\t\")\nquestions = set()\nfor _, row in df.iterrows():\n    obj = row.to_dict()\n    questions.add(obj[\"question1\"][:512])\n    questions.add(obj[\"question2\"][:512])\n    if len(questions) > 500:  # Skip this if you want to use the full dataset\n        break\n\ndocs = list(questions)\n\n# example question\nprint(docs[0])\n","from milvus_model.hybrid import BGEM3EmbeddingFunction\n\nef = BGEM3EmbeddingFunction(use_fp16=False, device=\"cpu\")\ndense_dim = ef.dim[\"dense\"]\n\n# Generate embeddings using BGE-M3 model\ndocs_embeddings = ef(docs)\n","from pymilvus import (\n    connections,\n    utility,\n    FieldSchema,\n    CollectionSchema,\n    DataType,\n    Collection,\n)\n\n# Connect to Milvus given URI\nconnections.connect(uri=\"./milvus.db\")\n\n# Specify the data schema for the new Collection\nfields = [\n    # Use auto generated id as primary key\n    FieldSchema(\n        name=\"pk\", dtype=DataType.VARCHAR, is_primary=True, auto_id=True, max_length=100\n    ),\n    # Store the original text to retrieve based on semantically distance\n    FieldSchema(name=\"text\", dtype=DataType.VARCHAR, max_length=512),\n    # Milvus now supports both sparse and dense vectors,\n    # we can store each in a separate field to conduct hybrid search on both vectors\n    FieldSchema(name=\"sparse_vector\", dtype=DataType.SPARSE_FLOAT_VECTOR),\n    FieldSchema(name=\"dense_vector\", dtype=DataType.FLOAT_VECTOR, dim=dense_dim),\n]\nschema = CollectionSchema(fields)\n\n# Create collection (drop the old one if exists)\ncol_name = \"hybrid_demo\"\nif utility.has_collection(col_name):\n    Collection(col_name).drop()\ncol = Collection(col_name, schema, consistency_level=\"Strong\")\n\n# To make vector search efficient, we need to create indices for the vector fields\nsparse_index = {\"index_type\": \"SPARSE_INVERTED_INDEX\", \"metric_type\": \"IP\"}\ncol.create_index(\"sparse_vector\", sparse_index)\ndense_index = {\"index_type\": \"AUTOINDEX\", \"metric_type\": \"IP\"}\ncol.create_index(\"dense_vector\", dense_index)\ncol.load()\n","# For efficiency, we insert 50 records in each small batch\nfor i in range(0, len(docs), 50):\n    batched_entities = [\n        docs[i : i + 50],\n        docs_embeddings[\"sparse\"][i : i + 50],\n        docs_embeddings[\"dense\"][i : i + 50],\n    ]\n    col.insert(batched_entities)\nprint(\"Number of entities inserted:\", col.num_entities)\n","# Enter your search query\nquery = input(\"Enter your search query: \")\nprint(query)\n\n# Generate embeddings for the query\nquery_embeddings = ef([query])\n# print(query_embeddings)\n","from pymilvus import (\n    AnnSearchRequest,\n    WeightedRanker,\n)\n\n\ndef dense_search(col, query_dense_embedding, limit=10):\n    search_params = {\"metric_type\": \"IP\", \"params\": {}}\n    res = col.search(\n        [query_dense_embedding],\n        anns_field=\"dense_vector\",\n        limit=limit,\n        output_fields=[\"text\"],\n        param=search_params,\n    )[0]\n    return [hit.get(\"text\") for hit in res]\n\n\ndef sparse_search(col, query_sparse_embedding, limit=10):\n    search_params = {\n        \"metric_type\": \"IP\",\n        \"params\": {},\n    }\n    res = col.search(\n        [query_sparse_embedding],\n        anns_field=\"sparse_vector\",\n        limit=limit,\n        output_fields=[\"text\"],\n        param=search_params,\n    )[0]\n    return [hit.get(\"text\") for hit in res]\n\n\ndef hybrid_search(\n    col,\n    query_dense_embedding,\n    query_sparse_embedding,\n    sparse_weight=1.0,\n    dense_weight=1.0,\n    limit=10,\n):\n    dense_search_params = {\"metric_type\": \"IP\", \"params\": {}}\n    dense_req = AnnSearchRequest(\n        [query_dense_embedding], \"dense_vector\", dense_search_params, limit=limit\n    )\n    sparse_search_params = {\"metric_type\": \"IP\", \"params\": {}}\n    sparse_req = AnnSearchRequest(\n        [query_sparse_embedding], \"sparse_vector\", sparse_search_params, limit=limit\n    )\n    rerank = WeightedRanker(sparse_weight, dense_weight)\n    res = col.hybrid_search(\n        [sparse_req, dense_req], rerank=rerank, limit=limit, output_fields=[\"text\"]\n    )[0]\n    return [hit.get(\"text\") for hit in res]\n","dense_results = dense_search(col, query_embeddings[\"dense\"][0])\nsparse_results = sparse_search(col, query_embeddings[\"sparse\"][0])\nhybrid_results = hybrid_search(\n    col,\n    query_embeddings[\"dense\"][0],\n    query_embeddings[\"sparse\"][0],\n    sparse_weight=0.7,\n    dense_weight=1.0,\n)\n","def doc_text_formatting(ef, query, docs):\n    tokenizer = ef.model.tokenizer\n    query_tokens_ids = tokenizer.encode(query, return_offsets_mapping=True)\n    query_tokens = tokenizer.convert_ids_to_tokens(query_tokens_ids)\n    formatted_texts = []\n\n    for doc in docs:\n        ldx = 0\n        landmarks = []\n        encoding = tokenizer.encode_plus(doc, return_offsets_mapping=True)\n        tokens = tokenizer.convert_ids_to_tokens(encoding[\"input_ids\"])[1:-1]\n        offsets = encoding[\"offset_mapping\"][1:-1]\n        for token, (start, end) in zip(tokens, offsets):\n            if token in query_tokens:\n                if len(landmarks) != 0 and start == landmarks[-1]:\n                    landmarks[-1] = end\n                else:\n                    landmarks.append(start)\n                    landmarks.append(end)\n        close = False\n        formatted_text = \"\"\n        for i, c in enumerate(doc):\n            if ldx == len(landmarks):\n                pass\n            elif i == landmarks[ldx]:\n                if close:\n                    formatted_text += \"\"\n                else:\n                    formatted_text += \"\"\n                close = not close\n                ldx = ldx + 1\n            formatted_text += c\n        if close is True:\n            formatted_text += \"\"\n        formatted_texts.append(formatted_text)\n    return formatted_texts\n","from IPython.display import Markdown, display\n\n# Dense search results\ndisplay(Markdown(\"**Dense Search Results:**\"))\nformatted_results = doc_text_formatting(ef, query, dense_results)\nfor result in dense_results:\n    display(Markdown(result))\n\n# Sparse search results\ndisplay(Markdown(\"\\n**Sparse Search Results:**\"))\nformatted_results = doc_text_formatting(ef, query, sparse_results)\nfor result in formatted_results:\n    display(Markdown(result))\n\n# Hybrid search results\ndisplay(Markdown(\"\\n**Hybrid Search Results:**\"))\nformatted_results = doc_text_formatting(ef, query, hybrid_results)\nfor result in formatted_results:\n    display(Markdown(result))\n"],"headingContent":"","anchorList":[{"label":"Búsqueda híbrida con Milvus","href":"Hybrid-Search-with-Milvus","type":1,"isActive":false}]}
      \ No newline at end of file
      +{"codeList":["$ pip install --upgrade pymilvus \"pymilvus[model]\"\n","# Run this cell to download the dataset\n$ wget http://qim.fs.quoracdn.net/quora_duplicate_questions.tsv\n","import pandas as pd\n\nfile_path = \"quora_duplicate_questions.tsv\"\ndf = pd.read_csv(file_path, sep=\"\\t\")\nquestions = set()\nfor _, row in df.iterrows():\n    obj = row.to_dict()\n    questions.add(obj[\"question1\"][:512])\n    questions.add(obj[\"question2\"][:512])\n    if len(questions) > 500:  # Skip this if you want to use the full dataset\n        break\n\ndocs = list(questions)\n\n# example question\nprint(docs[0])\n","from milvus_model.hybrid import BGEM3EmbeddingFunction\n\nef = BGEM3EmbeddingFunction(use_fp16=False, device=\"cpu\")\ndense_dim = ef.dim[\"dense\"]\n\n# Generate embeddings using BGE-M3 model\ndocs_embeddings = ef(docs)\n","from pymilvus import (\n    connections,\n    utility,\n    FieldSchema,\n    CollectionSchema,\n    DataType,\n    Collection,\n)\n\n# Connect to Milvus given URI\nconnections.connect(uri=\"./milvus.db\")\n\n# Specify the data schema for the new Collection\nfields = [\n    # Use auto generated id as primary key\n    FieldSchema(\n        name=\"pk\", dtype=DataType.VARCHAR, is_primary=True, auto_id=True, max_length=100\n    ),\n    # Store the original text to retrieve based on semantically distance\n    FieldSchema(name=\"text\", dtype=DataType.VARCHAR, max_length=512),\n    # Milvus now supports both sparse and dense vectors,\n    # we can store each in a separate field to conduct hybrid search on both vectors\n    FieldSchema(name=\"sparse_vector\", dtype=DataType.SPARSE_FLOAT_VECTOR),\n    FieldSchema(name=\"dense_vector\", dtype=DataType.FLOAT_VECTOR, dim=dense_dim),\n]\nschema = CollectionSchema(fields)\n\n# Create collection (drop the old one if exists)\ncol_name = \"hybrid_demo\"\nif utility.has_collection(col_name):\n    Collection(col_name).drop()\ncol = Collection(col_name, schema, consistency_level=\"Strong\")\n\n# To make vector search efficient, we need to create indices for the vector fields\nsparse_index = {\"index_type\": \"SPARSE_INVERTED_INDEX\", \"metric_type\": \"IP\"}\ncol.create_index(\"sparse_vector\", sparse_index)\ndense_index = {\"index_type\": \"AUTOINDEX\", \"metric_type\": \"IP\"}\ncol.create_index(\"dense_vector\", dense_index)\ncol.load()\n","# For efficiency, we insert 50 records in each small batch\nfor i in range(0, len(docs), 50):\n    batched_entities = [\n        docs[i : i + 50],\n        docs_embeddings[\"sparse\"][i : i + 50],\n        docs_embeddings[\"dense\"][i : i + 50],\n    ]\n    col.insert(batched_entities)\nprint(\"Number of entities inserted:\", col.num_entities)\n","# Enter your search query\nquery = input(\"Enter your search query: \")\nprint(query)\n\n# Generate embeddings for the query\nquery_embeddings = ef([query])\n# print(query_embeddings)\n","from pymilvus import (\n    AnnSearchRequest,\n    WeightedRanker,\n)\n\n\ndef dense_search(col, query_dense_embedding, limit=10):\n    search_params = {\"metric_type\": \"IP\", \"params\": {}}\n    res = col.search(\n        [query_dense_embedding],\n        anns_field=\"dense_vector\",\n        limit=limit,\n        output_fields=[\"text\"],\n        param=search_params,\n    )[0]\n    return [hit.get(\"text\") for hit in res]\n\n\ndef sparse_search(col, query_sparse_embedding, limit=10):\n    search_params = {\n        \"metric_type\": \"IP\",\n        \"params\": {},\n    }\n    res = col.search(\n        [query_sparse_embedding],\n        anns_field=\"sparse_vector\",\n        limit=limit,\n        output_fields=[\"text\"],\n        param=search_params,\n    )[0]\n    return [hit.get(\"text\") for hit in res]\n\n\ndef hybrid_search(\n    col,\n    query_dense_embedding,\n    query_sparse_embedding,\n    sparse_weight=1.0,\n    dense_weight=1.0,\n    limit=10,\n):\n    dense_search_params = {\"metric_type\": \"IP\", \"params\": {}}\n    dense_req = AnnSearchRequest(\n        [query_dense_embedding], \"dense_vector\", dense_search_params, limit=limit\n    )\n    sparse_search_params = {\"metric_type\": \"IP\", \"params\": {}}\n    sparse_req = AnnSearchRequest(\n        [query_sparse_embedding], \"sparse_vector\", sparse_search_params, limit=limit\n    )\n    rerank = WeightedRanker(sparse_weight, dense_weight)\n    res = col.hybrid_search(\n        [sparse_req, dense_req], rerank=rerank, limit=limit, output_fields=[\"text\"]\n    )[0]\n    return [hit.get(\"text\") for hit in res]\n","dense_results = dense_search(col, query_embeddings[\"dense\"][0])\nsparse_results = sparse_search(col, query_embeddings[\"sparse\"]._getrow(0))\nhybrid_results = hybrid_search(\n    col,\n    query_embeddings[\"dense\"][0],\n    query_embeddings[\"sparse\"]._getrow(0),\n    sparse_weight=0.7,\n    dense_weight=1.0,\n)\n","def doc_text_formatting(ef, query, docs):\n    tokenizer = ef.model.tokenizer\n    query_tokens_ids = tokenizer.encode(query, return_offsets_mapping=True)\n    query_tokens = tokenizer.convert_ids_to_tokens(query_tokens_ids)\n    formatted_texts = []\n\n    for doc in docs:\n        ldx = 0\n        landmarks = []\n        encoding = tokenizer.encode_plus(doc, return_offsets_mapping=True)\n        tokens = tokenizer.convert_ids_to_tokens(encoding[\"input_ids\"])[1:-1]\n        offsets = encoding[\"offset_mapping\"][1:-1]\n        for token, (start, end) in zip(tokens, offsets):\n            if token in query_tokens:\n                if len(landmarks) != 0 and start == landmarks[-1]:\n                    landmarks[-1] = end\n                else:\n                    landmarks.append(start)\n                    landmarks.append(end)\n        close = False\n        formatted_text = \"\"\n        for i, c in enumerate(doc):\n            if ldx == len(landmarks):\n                pass\n            elif i == landmarks[ldx]:\n                if close:\n                    formatted_text += \"\"\n                else:\n                    formatted_text += \"\"\n                close = not close\n                ldx = ldx + 1\n            formatted_text += c\n        if close is True:\n            formatted_text += \"\"\n        formatted_texts.append(formatted_text)\n    return formatted_texts\n","from IPython.display import Markdown, display\n\n# Dense search results\ndisplay(Markdown(\"**Dense Search Results:**\"))\nformatted_results = doc_text_formatting(ef, query, dense_results)\nfor result in dense_results:\n    display(Markdown(result))\n\n# Sparse search results\ndisplay(Markdown(\"\\n**Sparse Search Results:**\"))\nformatted_results = doc_text_formatting(ef, query, sparse_results)\nfor result in formatted_results:\n    display(Markdown(result))\n\n# Hybrid search results\ndisplay(Markdown(\"\\n**Hybrid Search Results:**\"))\nformatted_results = doc_text_formatting(ef, query, hybrid_results)\nfor result in formatted_results:\n    display(Markdown(result))\n"],"headingContent":"Hybrid Search with Milvus","anchorList":[{"label":"Búsqueda híbrida con Milvus","href":"Hybrid-Search-with-Milvus","type":1,"isActive":false}]}
      \ No newline at end of file
      diff --git a/localization/v2.4.x/site/es/tutorials/hybrid_search_with_milvus.md b/localization/v2.4.x/site/es/tutorials/hybrid_search_with_milvus.md
      index 6769cf5d6..bc77a0055 100644
      --- a/localization/v2.4.x/site/es/tutorials/hybrid_search_with_milvus.md
      +++ b/localization/v2.4.x/site/es/tutorials/hybrid_search_with_milvus.md
      @@ -18,9 +18,10 @@ title: Búsqueda híbrida con Milvus
                 d="M4 9h1v1H4c-1.5 0-3-1.69-3-3.5S2.55 3 4 3h4c1.45 0 3 1.69 3 3.5 0 1.41-.91 2.72-2 3.25V8.59c.58-.45 1-1.27 1-2.09C10 5.22 8.98 4 8 4H4c-.98 0-2 1.22-2 2.5S3 9 4 9zm9-3h-1v1h1c1 0 2 1.22 2 2.5S13.98 12 13 12H9c-.98 0-2-1.22-2-2.5 0-.83.42-1.64 1-2.09V6.25c-1.09.53-2 1.84-2 3.25C6 11.31 7.55 13 9 13h4c1.45 0 3-1.69 3-3.5S14.5 6 13 6z"
               >
             
      -    

      Open In Colab

      +

      Open In Colab +GitHub Repository

      -

      En este tutorial, demostraremos cómo realizar una búsqueda híbrida con Milvus y el modelo BGE-M3. El modelo BGE-M3 puede convertir texto en vectores densos y dispersos. Milvus admite el almacenamiento de ambos tipos de vectores en una colección, lo que permite una búsqueda híbrida que mejora la relevancia de los resultados.

      +

      En este tutorial, demostraremos cómo realizar una búsqueda híbrida con Milvus y el modelo BGE-M3. El modelo BGE-M3 puede convertir texto en vectores densos y dispersos. Milvus admite el almacenamiento de ambos tipos de vectores en una colección, lo que permite una búsqueda híbrida que mejora la relevancia de los resultados.

      Milvus admite métodos de recuperación densos, dispersos e híbridos:

      • Recuperación densa: Utiliza el contexto semántico para comprender el significado de las consultas.
      • @@ -55,7 +56,7 @@ docs = list(questions)
      What is the strongest Kevlar cord?
       
      -

      Utilizar el modelo BGE-M3 para la incrustación

      El modelo BGE-M3 puede incrustar textos como vectores densos y dispersos.

      +

      Uso del modelo BGE-M3 para la incrustación

      El modelo BGE-M3 puede incrustar textos como vectores densos y dispersos.

      from milvus_model.hybrid import BGEM3EmbeddingFunction
       
       ef = BGEM3EmbeddingFunction(use_fp16=False, device="cpu")
      @@ -201,11 +202,11 @@ def dense_search(col,
       

      Vamos a ejecutar tres búsquedas diferentes con las funciones definidas:

      dense_results = dense_search(col, query_embeddings["dense"][0])
      -sparse_results = sparse_search(col, query_embeddings["sparse"][0])
      +sparse_results = sparse_search(col, query_embeddings["sparse"]._getrow(0))
       hybrid_results = hybrid_search(
           col,
           query_embeddings["dense"][0],
      -    query_embeddings["sparse"][0],
      +    query_embeddings["sparse"]._getrow(0),
           sparse_weight=0.7,
           dense_weight=1.0,
       )
      @@ -277,26 +278,26 @@ formatted_results = doc_text_formatting(ef, query, hybrid_results)
       

      ¿Cómo puedo aprender seguridad informática?

      ¿Cuál es la mejor manera de iniciarse en la robótica? ¿Cuál es la mejor placa de desarrollo con la que puedo empezar a trabajar?

      ¿Cómo puedo aprender a hablar inglés con fluidez?

      -

      ¿Cuáles son las mejores maneras de aprender francés?

      +

      ¿Cuáles son las mejores formas de aprender francés?

      ¿Cómo se puede hacer que la física sea fácil de aprender?

      ¿Cómo nos preparamos para el UPSC?

      Resultados de la búsqueda dispersa:

      ¿Qué es la programación Java? ¿Cómo aprender el lenguaje de programación Java?

      -

      ¿Cuál es la mejor manera de empezar a aprender robótica?

      +

      ¿Cuál es la mejor manera de empezar a aprender robótica?

      ¿Cuál es la alternativa al aprendizaje automático?

      ¿Cómo creo un nuevo Terminal y un nuevo shell en Linux usando programación en C?

      -

      ¿Cómo puedo crear un nuevo shell en un nuevo terminal utilizando la programación C (terminal de Linux)?

      +

      ¿Cómo puedo crear un nuevo shell en un nuevo terminal utilizando la programación C (terminal de Linux)?

      ¿Qué negocio es mejor empezar en Hyderabad?

      ¿Qué negocio es mejor para empezar en Hyderabad?

      -

      ¿Cuál es la mejor forma de iniciarse en la robótica? ¿Cuál es la mejor placa de desarrollo con la que puedo empezar a trabajar?

      +

      ¿Cuál es la mejor forma de iniciarse en la robótica? ¿Cuál es la mejor placa de desarrollo con la que puedo empezar a trabajar?

      ¿Qué matemáticas necesita un novato para entender los algoritmos de programación informática? ¿Qué libros sobre algoritmos son adecuados para un completo principiante?

      ¿Cómo hacer que la vida se adapte a ti y que la vida deje de maltratarte mental y emocionalmente?

      Resultados de la búsqueda híbrida:

      -

      ¿Cuál es la mejor manera de iniciarse en la robótica? ¿Cuál es la mejor placa de desarrollo con la que puedo empezar a trabajar?

      +

      ¿Cuál es la mejor manera de iniciarse en la robótica? ¿Cuál es la mejor placa de desarrollo con la que puedo empezar a trabajar?

      ¿Qué es la programación Java? ¿Cómo aprender el lenguaje de programación Java?

      -

      ¿Cuál es la mejor manera de empezar a aprender robótica?

      +

      ¿Cuál es la mejor manera de empezar a aprender robótica?

      ¿Cómo nos preparamos para el UPSC?

      -

      ¿Cómo hacer que la física sea fácil de aprender?

      +

      ¿Cómo hacer que la física sea fácil de aprender?

      ¿Cuáles son las mejores maneras de aprender francés?

      ¿Cómo puedo aprender a hablar inglés con fluidez?

      ¿Cómo puedo aprender seguridad informática?

      diff --git a/localization/v2.4.x/site/es/tutorials/image_similarity_search.json b/localization/v2.4.x/site/es/tutorials/image_similarity_search.json index 186ff991e..e60b251bb 100644 --- a/localization/v2.4.x/site/es/tutorials/image_similarity_search.json +++ b/localization/v2.4.x/site/es/tutorials/image_similarity_search.json @@ -1 +1 @@ -{"codeList":["!wget https://github.com/milvus-io/pymilvus-assets/releases/download/imagedata/reverse_image_search.zip\n!unzip -q -o reverse_image_search.zip\n","$ pip install pymilvus --upgrade\n$ pip install timm\n","import torch\nfrom PIL import Image\nimport timm\nfrom sklearn.preprocessing import normalize\nfrom timm.data import resolve_data_config\nfrom timm.data.transforms_factory import create_transform\n\n\nclass FeatureExtractor:\n def __init__(self, modelname):\n # Load the pre-trained model\n self.model = timm.create_model(\n modelname, pretrained=True, num_classes=0, global_pool=\"avg\"\n )\n self.model.eval()\n\n # Get the input size required by the model\n self.input_size = self.model.default_cfg[\"input_size\"]\n\n config = resolve_data_config({}, model=modelname)\n # Get the preprocessing function provided by TIMM for the model\n self.preprocess = create_transform(**config)\n\n def __call__(self, imagepath):\n # Preprocess the input image\n input_image = Image.open(imagepath).convert(\"RGB\") # Convert to RGB if needed\n input_image = self.preprocess(input_image)\n\n # Convert the image to a PyTorch tensor and add a batch dimension\n input_tensor = input_image.unsqueeze(0)\n\n # Perform inference\n with torch.no_grad():\n output = self.model(input_tensor)\n\n # Extract the feature vector\n feature_vector = output.squeeze().numpy()\n\n return normalize(feature_vector.reshape(1, -1), norm=\"l2\").flatten()\n","from pymilvus import MilvusClient\n\n# Set up a Milvus client\nclient = MilvusClient(uri=\"example.db\")\n# Create a collection in quick setup mode\nif client.has_collection(collection_name=\"image_embeddings\"):\n client.drop_collection(collection_name=\"image_embeddings\")\nclient.create_collection(\n collection_name=\"image_embeddings\",\n vector_field_name=\"vector\",\n dimension=512,\n auto_id=True,\n enable_dynamic_field=True,\n metric_type=\"COSINE\",\n)\n","import os\n\nextractor = FeatureExtractor(\"resnet34\")\n\nroot = \"./train\"\ninsert = True\nif insert is True:\n for dirpath, foldername, filenames in os.walk(root):\n for filename in filenames:\n if filename.endswith(\".JPEG\"):\n filepath = dirpath + \"/\" + filename\n image_embedding = extractor(filepath)\n client.insert(\n \"image_embeddings\",\n {\"vector\": image_embedding, \"filename\": filepath},\n )\n","from IPython.display import display\n\nquery_image = \"./test/Afghan_hound/n02088094_4261.JPEG\"\n\nresults = client.search(\n \"image_embeddings\",\n data=[extractor(query_image)],\n output_fields=[\"filename\"],\n search_params={\"metric_type\": \"COSINE\"},\n)\nimages = []\nfor result in results:\n for hit in result[:10]:\n filename = hit[\"entity\"][\"filename\"]\n img = Image.open(filename)\n img = img.resize((150, 150))\n images.append(img)\n\nwidth = 150 * 5\nheight = 150 * 2\nconcatenated_image = Image.new(\"RGB\", (width, height))\n\nfor idx, img in enumerate(images):\n x = idx % 5\n y = idx // 5\n concatenated_image.paste(img, (x * 150, y * 150))\ndisplay(\"query\")\ndisplay(Image.open(query_image).resize((150, 150)))\ndisplay(\"results\")\ndisplay(concatenated_image)\n"],"headingContent":"","anchorList":[{"label":"Búsqueda de imágenes con Milvus","href":"Image-Search-with-Milvus","type":1,"isActive":false},{"label":"Preparación del conjunto de datos","href":"Dataset-Preparation","type":2,"isActive":false},{"label":"Requisitos previos","href":"Prequisites","type":2,"isActive":false},{"label":"Definir el extractor de características","href":"Define-the-Feature-Extractor","type":2,"isActive":false},{"label":"Crear una colección Milvus","href":"Create-a-Milvus-Collection","type":2,"isActive":false},{"label":"Insertar los Embeddings en Milvus","href":"Insert-the-Embeddings-to-Milvus","type":2,"isActive":false},{"label":"Despliegue rápido","href":"Quick-Deploy","type":2,"isActive":false}]} \ No newline at end of file +{"codeList":["!wget https://github.com/milvus-io/pymilvus-assets/releases/download/imagedata/reverse_image_search.zip\n!unzip -q -o reverse_image_search.zip\n","$ pip install pymilvus --upgrade\n$ pip install timm\n","import torch\nfrom PIL import Image\nimport timm\nfrom sklearn.preprocessing import normalize\nfrom timm.data import resolve_data_config\nfrom timm.data.transforms_factory import create_transform\n\n\nclass FeatureExtractor:\n def __init__(self, modelname):\n # Load the pre-trained model\n self.model = timm.create_model(\n modelname, pretrained=True, num_classes=0, global_pool=\"avg\"\n )\n self.model.eval()\n\n # Get the input size required by the model\n self.input_size = self.model.default_cfg[\"input_size\"]\n\n config = resolve_data_config({}, model=modelname)\n # Get the preprocessing function provided by TIMM for the model\n self.preprocess = create_transform(**config)\n\n def __call__(self, imagepath):\n # Preprocess the input image\n input_image = Image.open(imagepath).convert(\"RGB\") # Convert to RGB if needed\n input_image = self.preprocess(input_image)\n\n # Convert the image to a PyTorch tensor and add a batch dimension\n input_tensor = input_image.unsqueeze(0)\n\n # Perform inference\n with torch.no_grad():\n output = self.model(input_tensor)\n\n # Extract the feature vector\n feature_vector = output.squeeze().numpy()\n\n return normalize(feature_vector.reshape(1, -1), norm=\"l2\").flatten()\n","from pymilvus import MilvusClient\n\n# Set up a Milvus client\nclient = MilvusClient(uri=\"example.db\")\n# Create a collection in quick setup mode\nif client.has_collection(collection_name=\"image_embeddings\"):\n client.drop_collection(collection_name=\"image_embeddings\")\nclient.create_collection(\n collection_name=\"image_embeddings\",\n vector_field_name=\"vector\",\n dimension=512,\n auto_id=True,\n enable_dynamic_field=True,\n metric_type=\"COSINE\",\n)\n","import os\n\nextractor = FeatureExtractor(\"resnet34\")\n\nroot = \"./train\"\ninsert = True\nif insert is True:\n for dirpath, foldername, filenames in os.walk(root):\n for filename in filenames:\n if filename.endswith(\".JPEG\"):\n filepath = dirpath + \"/\" + filename\n image_embedding = extractor(filepath)\n client.insert(\n \"image_embeddings\",\n {\"vector\": image_embedding, \"filename\": filepath},\n )\n","from IPython.display import display\n\nquery_image = \"./test/Afghan_hound/n02088094_4261.JPEG\"\n\nresults = client.search(\n \"image_embeddings\",\n data=[extractor(query_image)],\n output_fields=[\"filename\"],\n search_params={\"metric_type\": \"COSINE\"},\n)\nimages = []\nfor result in results:\n for hit in result[:10]:\n filename = hit[\"entity\"][\"filename\"]\n img = Image.open(filename)\n img = img.resize((150, 150))\n images.append(img)\n\nwidth = 150 * 5\nheight = 150 * 2\nconcatenated_image = Image.new(\"RGB\", (width, height))\n\nfor idx, img in enumerate(images):\n x = idx % 5\n y = idx // 5\n concatenated_image.paste(img, (x * 150, y * 150))\ndisplay(\"query\")\ndisplay(Image.open(query_image).resize((150, 150)))\ndisplay(\"results\")\ndisplay(concatenated_image)\n"],"headingContent":"Image Search with Milvus","anchorList":[{"label":"Búsqueda de imágenes con Milvus","href":"Image-Search-with-Milvus","type":1,"isActive":false},{"label":"Preparación del conjunto de datos","href":"Dataset-Preparation","type":2,"isActive":false},{"label":"Requisitos previos","href":"Prequisites","type":2,"isActive":false},{"label":"Definir el extractor de características","href":"Define-the-Feature-Extractor","type":2,"isActive":false},{"label":"Crear una colección Milvus","href":"Create-a-Milvus-Collection","type":2,"isActive":false},{"label":"Insertar los Embeddings en Milvus","href":"Insert-the-Embeddings-to-Milvus","type":2,"isActive":false},{"label":"Despliegue rápido","href":"Quick-Deploy","type":2,"isActive":false}]} \ No newline at end of file diff --git a/localization/v2.4.x/site/es/tutorials/image_similarity_search.md b/localization/v2.4.x/site/es/tutorials/image_similarity_search.md index 757ad4a48..b5b9f79a9 100644 --- a/localization/v2.4.x/site/es/tutorials/image_similarity_search.md +++ b/localization/v2.4.x/site/es/tutorials/image_similarity_search.md @@ -18,7 +18,8 @@ title: Búsqueda de imágenes con Milvus d="M4 9h1v1H4c-1.5 0-3-1.69-3-3.5S2.55 3 4 3h4c1.45 0 3 1.69 3 3.5 0 1.41-.91 2.72-2 3.25V8.59c.58-.45 1-1.27 1-2.09C10 5.22 8.98 4 8 4H4c-.98 0-2 1.22-2 2.5S3 9 4 9zm9-3h-1v1h1c1 0 2 1.22 2 2.5S13.98 12 13 12H9c-.98 0-2-1.22-2-2.5 0-.83.42-1.64 1-2.09V6.25c-1.09.53-2 1.84-2 3.25C6 11.31 7.55 13 9 13h4c1.45 0 3-1.69 3-3.5S14.5 6 13 6z" > -

      Open In Colab

      +

      Open In Colab +GitHub Repository

      En este cuaderno, le mostraremos cómo utilizar Milvus para buscar imágenes similares en un conjunto de datos. Utilizaremos un subconjunto del conjunto de datos ImageNet y buscaremos una imagen de un sabueso afgano para demostrarlo.

      Preparación del conjunto de datos

      -

      Si utilizas Google Colab, para activar las dependencias que acabas de instalar, es posible que tengas que reiniciar el tiempo de ejecución. (Haga clic en el menú "Tiempo de ejecución" en la parte superior de la pantalla y seleccione "Reiniciar sesión" en el menú desplegable).

      +

      Si utilizas Google Colab, para activar las dependencias recién instaladas, es posible que tengas que reiniciar el tiempo de ejecución. (Haga clic en el menú "Tiempo de ejecución" en la parte superior de la pantalla y seleccione "Reiniciar sesión" en el menú desplegable).

      Definir el extractor de características

      Open In Colab

      +

      Open In Colab +GitHub Repository

      Este tutorial muestra el RAG multimodal con Milvus, el modelo BGE visualizado y GPT-4o. Con este sistema, los usuarios pueden cargar una imagen y editar instrucciones de texto, que son procesadas por el modelo de recuperación compuesto de BGE para buscar imágenes candidatas. A continuación, GPT-4o actúa como reranker, seleccionando la imagen más adecuada y proporcionando la justificación de la elección. Esta potente combinación permite una experiencia de búsqueda de imágenes intuitiva y sin fisuras, aprovechando Milvus para una recuperación eficaz, el modelo BGE para un procesamiento y una correspondencia precisos de las imágenes, y GPT-4o para una reordenación avanzada.

      @@ -130,7 +131,7 @@ Number of encoded images: 900
      • Establecer el uri como un archivo local, por ejemplo ./milvus_demo.db, es el método más conveniente, ya que utiliza automáticamente Milvus Lite para almacenar todos los datos en este archivo.
      • Si tiene una gran escala de datos, puede configurar un servidor Milvus más eficiente en docker o kubernetes. En esta configuración, por favor utilice la uri del servidor, por ejemplohttp://localhost:19530, como su uri.
      • -
      • Si desea utilizar Zilliz Cloud, el servicio en la nube totalmente gestionado para Milvus, ajuste uri y token, que corresponden al punto final público y a la clave Api en Zilliz Cloud.
      • +
      • Si desea utilizar Zilliz Cloud, el servicio en la nube totalmente gestionado para Milvus, ajuste uri y token, que se corresponden con el punto final público y la clave Api en Zilliz Cloud.
      from pymilvus import MilvusClient
      @@ -177,7 +178,7 @@ milvus_client.insert(
               >
             
           

      En esta sección, en primer lugar, buscaremos imágenes relevantes mediante una consulta multimodal y, a continuación, utilizaremos el servicio LLM para jerarquizar los resultados y encontrar el mejor con una explicación.

      -

      Ahora estamos preparados para realizar la búsqueda avanzada de imágenes con datos de consulta compuestos tanto por imágenes como por instrucciones de texto.

      +

      Ahora estamos listos para realizar la búsqueda avanzada de imágenes con datos de consulta compuestos tanto por imágenes como por instrucciones de texto.

      query_image = os.path.join(
           data_dir, "leopard.jpg"
       )  # Change to your own query image path
      diff --git a/localization/v2.4.x/site/es/tutorials/tutorials-overview.json b/localization/v2.4.x/site/es/tutorials/tutorials-overview.json
      index 5cc70d12e..0da6cb876 100644
      --- a/localization/v2.4.x/site/es/tutorials/tutorials-overview.json
      +++ b/localization/v2.4.x/site/es/tutorials/tutorials-overview.json
      @@ -1 +1 @@
      -{"codeList":[],"headingContent":"","anchorList":[{"label":"Visión general de los tutoriales","href":"Tutorials-Overview","type":1,"isActive":false}]}
      \ No newline at end of file
      +{"codeList":[],"headingContent":"Tutorials Overview","anchorList":[{"label":"Visión general de los tutoriales","href":"Tutorials-Overview","type":1,"isActive":false}]}
      \ No newline at end of file
      diff --git a/localization/v2.4.x/site/es/tutorials/tutorials-overview.md b/localization/v2.4.x/site/es/tutorials/tutorials-overview.md
      index 1ca5eea8d..52a875ff2 100644
      --- a/localization/v2.4.x/site/es/tutorials/tutorials-overview.md
      +++ b/localization/v2.4.x/site/es/tutorials/tutorials-overview.md
      @@ -1,7 +1,7 @@
       ---
       id: tutorials-overview.md
       summary: Esta página le ofrece una lista de tutoriales para interactuar con Milvus.
      -title: Tutoriales
      +title: Visión general de los tutoriales
       ---
       

      Visión general de los tutoriales

      Esta página ofrece una lista de tutoriales para que pueda interactuar con Milvus.

      +

      Esta página proporciona una lista de tutoriales para que pueda interactuar con Milvus.

      @@ -29,7 +29,6 @@ title: Tutoriales - @@ -38,5 +37,7 @@ title: Tutoriales + +
      TutorialCaso prácticoCaracterísticas de Milvus relacionadas
      Búsqueda de imágenes con MilvusBúsqueda semánticabúsqueda vectorial, campo dinámico
      Búsqueda híbrida con MilvusBúsqueda híbridabúsqueda híbrida, multivectorial, incrustación densa, incrustación dispersa
      Búsqueda multimodal con multivectoresBúsqueda semánticamultivectorial, búsqueda híbrida
      Sistema de respuesta a preguntasRespuesta a preguntasbúsqueda vectorial
      Sistema de recomendaciónSistema de recomendaciónbúsqueda vectorial
      Búsqueda por similitud de vídeosBúsqueda semánticabúsqueda vectorial
      Buscador de textoBúsqueda semánticabúsqueda vectorial
      Búsqueda de imágenes por textoBúsqueda semánticabúsqueda vectorial
      Deduplicación de imágenesDeduplicaciónbúsqueda vectorial
      Graph RAG con MilvusRAGbúsqueda gráfica
      Recuperación contextual con MilvusInicio rápidobúsqueda vectorial
      diff --git a/localization/v2.4.x/site/es/userGuide/clustering-compaction.json b/localization/v2.4.x/site/es/userGuide/clustering-compaction.json index b28e87edd..32cff786b 100644 --- a/localization/v2.4.x/site/es/userGuide/clustering-compaction.json +++ b/localization/v2.4.x/site/es/userGuide/clustering-compaction.json @@ -1 +1 @@ -{"codeList":["dataCoord:\n compaction:\n clustering:\n enable: true \n autoEnable: false \n triggerInterval: 600 \n minInterval: 3600 \n maxInterval: 259200 \n newDataSizeThreshold: 512m \n timeout: 7200\n \nqueryNode:\n enableSegmentPrune: true \n\ndatanode:\n clusteringCompaction:\n memoryBufferRatio: 0.1 \n workPoolSize: 8 \ncommon:\n usePartitionKeyAsClusteringKey: true \n","default_fields = [\n FieldSchema(name=\"id\", dtype=DataType.INT64, is_primary=True),\n FieldSchema(name=\"key\", dtype=DataType.INT64, is_clustering_key=True),\n FieldSchema(name=\"var\", dtype=DataType.VARCHAR, max_length=1000, is_primary=False),\n FieldSchema(name=\"embeddings\", dtype=DataType.FLOAT_VECTOR, dim=dim)\n]\n\ndefault_schema = CollectionSchema(\n fields=default_fields, \n description=\"test clustering-key collection\"\n)\n\ncoll1 = Collection(name=\"clustering_test\", schema=default_schema)\n","coll1.compact(is_clustering=True)\ncoll1.get_compaction_state(is_clustering=True)\ncoll1.wait_for_compaction_completed(is_clustering=True)\n"],"headingContent":"","anchorList":[{"label":"Compactación en clústeres","href":"Clustering-Compaction","type":1,"isActive":false},{"label":"Visión general","href":"Overview","type":2,"isActive":false},{"label":"Utilizar la compactación de clústeres","href":"Use-Clustering-Compaction","type":2,"isActive":false},{"label":"Configuración de la colección","href":"Collection-Configuration","type":2,"isActive":false},{"label":"Activar la compactación en clúster","href":"Trigger-Clustering-Compaction","type":2,"isActive":false},{"label":"Buenas prácticas","href":"Best-practices","type":2,"isActive":false}]} \ No newline at end of file +{"codeList":["dataCoord:\n compaction:\n clustering:\n enable: true \n autoEnable: false \n triggerInterval: 600 \n minInterval: 3600 \n maxInterval: 259200 \n newDataSizeThreshold: 512m \n timeout: 7200\n \nqueryNode:\n enableSegmentPrune: true \n\ndatanode:\n clusteringCompaction:\n memoryBufferRatio: 0.1 \n workPoolSize: 8 \ncommon:\n usePartitionKeyAsClusteringKey: true \n","default_fields = [\n FieldSchema(name=\"id\", dtype=DataType.INT64, is_primary=True),\n FieldSchema(name=\"key\", dtype=DataType.INT64, is_clustering_key=True),\n FieldSchema(name=\"var\", dtype=DataType.VARCHAR, max_length=1000, is_primary=False),\n FieldSchema(name=\"embeddings\", dtype=DataType.FLOAT_VECTOR, dim=dim)\n]\n\ndefault_schema = CollectionSchema(\n fields=default_fields, \n description=\"test clustering-key collection\"\n)\n\ncoll1 = Collection(name=\"clustering_test\", schema=default_schema)\n","coll1.compact(is_clustering=True)\ncoll1.get_compaction_state(is_clustering=True)\ncoll1.wait_for_compaction_completed(is_clustering=True)\n"],"headingContent":"Clustering Compaction","anchorList":[{"label":"Compactación en clústeres","href":"Clustering-Compaction","type":1,"isActive":false},{"label":"Visión general","href":"Overview","type":2,"isActive":false},{"label":"Utilizar la compactación de clústeres","href":"Use-Clustering-Compaction","type":2,"isActive":false},{"label":"Activar la compactación en clúster","href":"Trigger-Clustering-Compaction","type":2,"isActive":false},{"label":"Buenas prácticas","href":"Best-practices","type":2,"isActive":false}]} \ No newline at end of file diff --git a/localization/v2.4.x/site/es/userGuide/clustering-compaction.md b/localization/v2.4.x/site/es/userGuide/clustering-compaction.md index dfb594cbb..8d5be25a4 100644 --- a/localization/v2.4.x/site/es/userGuide/clustering-compaction.md +++ b/localization/v2.4.x/site/es/userGuide/clustering-compaction.md @@ -1,6 +1,6 @@ --- id: clustering-compaction.md -title: Agrupación Compactación +title: Compactación en clústeres related_key: 'clustering, compaction' summary: >- La compactación en clústeres está diseñada para mejorar el rendimiento de la @@ -45,7 +45,7 @@ summary: >- Without clustering Compaction Compactación sin agrupación

      Si Milvus puede distribuir entidades entre segmentos basándose en los valores de un campo específico, el alcance de la búsqueda puede restringirse dentro de un segmento, mejorando así el rendimiento de la búsqueda.

      -

      La compactación por agrupamiento es una función de Milvus que redistribuye las entidades entre los segmentos de una colección basándose en los valores de un campo escalar. Para activar esta función, primero debe seleccionar un campo escalar como clave de agrupación. Esto permite a Milvus redistribuir entidades en un segmento cuando sus valores de clave de agrupación caen dentro de un rango específico. Cuando usted activa una compactación de clustering, Milvus genera/actualiza un índice global llamado PartitionStats, que registra la relación de mapeo entre segmentos y valores de clave de clustering.

      +

      Lacompactación por agrupamiento es una función de Milvus que redistribuye las entidades entre los segmentos de una colección basándose en los valores de un campo escalar. Para activar esta función, primero debe seleccionar un campo escalar como clave de agrupación. Esto permite a Milvus redistribuir entidades en un segmento cuando sus valores de clave de agrupación caen dentro de un rango específico. Cuando usted activa una compactación de clustering, Milvus genera/actualiza un índice global llamado PartitionStats, que registra la relación de mapeo entre segmentos y valores de clave de clustering.

      With Clustering Compaction @@ -99,8 +99,8 @@ common: enableEspecifica si se habilita la compactación en clúster.
      Configure esta opción en true si necesita habilitar esta función para cada colección que tenga una clave de agrupación.false autoEnableEspecifica si se activa la compactación automática.
      Si se establece en true, Milvus compactará las colecciones que tengan una clave de agrupación en los intervalos especificados.false triggerIntervalEspecifica el intervalo en milisegundos en el que Milvus inicia la compactación de agrupaciones.
      Este parámetro sólo es válido cuando autoEnable se establece en true.- -minIntervalEspecifica el intervalo mínimo en milisegundos.
      Este parámetro sólo es válido si autoEnable está configurado como true.
      Establecerlo en un número entero mayor que triggerInterval ayuda a evitar compactaciones repetidas en un período corto.- -maxIntervalEspecifica el intervalo máximo en milisegundos.
      Este parámetro sólo es válido cuando autoEnable está configurado como true.
      Una vez que Milvus detecta que una colección no ha sido compactada en clúster durante un periodo superior a este valor, fuerza una compactación en clúster.- +minIntervalEspecifica el intervalo mínimo en segundos.
      Este parámetro sólo es válido cuando autoEnable está configurado como true.
      Establecerlo en un número entero mayor que triggerInterval ayuda a evitar compactaciones repetidas en un período corto.- +maxIntervalEspecifica el intervalo máximo en segundos.
      Este parámetro sólo es válido cuando autoEnable está configurado como true.
      Una vez que Milvus detecta que una colección no ha sido compactada en clúster durante un periodo superior a este valor, fuerza una compactación en clúster.- newDataSizeThresholdEspecifica el umbral superior para activar una compactación en clúster.
      Este parámetro sólo es válido cuando autoEnable está configurado como true.
      Una vez que Milvus detecta que el volumen de datos de una colección supera este valor, inicia un proceso de compactación en clúster.- timeoutEspecifica la duración del tiempo de espera para una compactación en clúster.
      Una compactación en clúster falla si su tiempo de ejecución supera este valor.- @@ -119,7 +119,7 @@ common:

    • dataNode.clusteringCompaction

      - + @@ -139,22 +139,7 @@ common:

      Para aplicar los cambios anteriores a su cluster Milvus, por favor siga los pasos en Configurar Milvus con Helm y Configurar Milvus con Milvus Operators.

      -

      Configuración de la colección

      Para la compactación en cluster en una colección específica, debe seleccionar un campo escalar de la colección como clave de cluster.

      +

      Configuración de la colección

      Para la compactación en cluster en una colección específica, debe seleccionar un campo escalar de la colección como clave de cluster.

      default_fields = [
           FieldSchema(name="id", dtype=DataType.INT64, is_primary=True),
           FieldSchema(name="key", dtype=DataType.INT64, is_clustering_key=True),
      @@ -170,7 +155,7 @@ default_schema = CollectionSchema(
       coll1 = Collection(name="clustering_test", schema=default_schema)
       
      -

      Puede utilizar los campos escalares de los siguientes tipos de datos como clave de agrupación: Int8, Int16, Int32, Int64, Float, Double, y VarChar.

      +

      Puede utilizar los campos escalares de los siguientes tipos de datos como clave de agrupación: Int8, Int16, Int32, Int64, Float, Double y VarChar.

      Activar la compactación en clúster

      Una entidad, en el contexto de las colecciones Milvus, es una instancia singular e identificable dentro de una colección. Representa un miembro distinto de una clase particular, ya sea un libro en una biblioteca, un gen en un genoma o cualquier otra entidad identificable.

      Las entidades de una colección comparten un conjunto común de atributos, denominado esquema, que define la estructura que debe seguir cada entidad, incluidos los nombres de los campos, los tipos de datos y cualquier otra restricción.

      -

      Para insertar con éxito entidades en una colección es necesario que los datos proporcionados contengan todos los campos definidos por el esquema de la colección de destino. Además, también puede incluir campos no definidos por el esquema sólo si ha habilitado el campo dinámico. Para obtener más información, consulte Activación del campo dinámico.

      +

      Para insertar con éxito entidades en una colección es necesario que los datos proporcionados contengan todos los campos definidos por el esquema de la colección de destino. Además, también puede incluir campos no definidos por el esquema sólo si ha habilitado el campo dinámico. Para obtener más información, consulte Activación del campo dinámico.

      Preparativos

      - +
      Elemento de configuraciónDescripciónValor por defecto
      Configuración ElementoDescripciónValor por defecto
      memoryBufferRatioEspecifica la proporción de memoria intermedia para las tareas de compactación en clúster.
      Milvus vacía los datos cuando el tamaño de los datos excede el tamaño del búfer asignado calculado utilizando este ratio.
      -
      dimLa dimensionalidad del campo de la colección que contiene las incrustaciones vectoriales.
      El valor debe ser un número entero mayor que 1 y suele estar determinado por el modelo que se utiliza para generar incrustaciones vectoriales.
      Dimensionalidad del campo de la colección que contiene las incrustaciones vectoriales.
      El valor debe ser un número entero mayor que 1 y suele estar determinado por el modelo que se utiliza para generar incrustaciones vectoriales.
      @@ -527,7 +527,7 @@ indexParams.add(indexParamForVectorField); params - Los parámetros de ajuste fino para el tipo de índice especificado. Para más información sobre posibles claves y rangos de valores, consulte Índice en memoria. + Los parámetros de ajuste para el tipo de índice especificado. Para más información sobre posibles claves y rangos de valores, consulte Índice en memoria. @@ -579,7 +579,7 @@ indexParams.add(indexParamForVectorField); params - Los parámetros de ajuste fino para el tipo de índice especificado. Para más información sobre posibles claves y rangos de valores, consulte Índice en memoria. + Los parámetros de ajuste para el tipo de índice especificado. Para más información sobre posibles claves y rangos de valores, consulte Índice en memoria. @@ -620,7 +620,7 @@ indexParams.add(indexParamForVectorField);

      El fragmento de código anterior muestra cómo configurar los parámetros de índice para un campo vectorial y un campo escalar, respectivamente. Para el campo vectorial, establezca tanto el tipo métrico como el tipo de índice. Para un campo escalar, establezca sólo el tipo de índice. Se recomienda crear un índice para el campo vectorial y cualquier campo escalar que se utilice con frecuencia para filtrar.

      Paso 3: Crear la colección

      Tiene la opción de crear una colección y un archivo de índice por separado o crear una colección con el índice cargado simultáneamente en el momento de la creación.

      -

      Utilice create_collection() para crear una colección con los parámetros de esquema e índice especificados y get_load_state() para comprobar el estado de carga de la colección.

      +

      Utilice create_collection( ) para crear una colección con los parámetros de esquema e índice especificados y get_load_state() para comprobar el estado de carga de la colección.

      Utilice createCollection() para crear una colección con los parámetros de esquema e índice especificados y getLoadState() para comprobar el estado de carga de la colección.

      @@ -1593,6 +1593,33 @@ $ curl -X POST "http://# } # }
    • +

      Cargar una colección parcialmente (Vista previa pública)

      +

      Esta función se encuentra actualmente en fase de vista previa pública. La API y la funcionalidad pueden cambiar en el futuro.

      +
      +

      Al recibir su solicitud de carga, Milvus carga en memoria todos los índices de los campos vectoriales y todos los datos de los campos escalares. Si algunos campos no van a estar implicados en búsquedas y consultas, puede excluirlos de la carga para reducir el uso de memoria, mejorando el rendimiento de la búsqueda.

      +
      +
      # 7. Load the collection
      +client.load_collection(
      +    collection_name="customized_setup_2",
      +    load_fields=["my_id", "my_vector"] # Load only the specified fields
      +    skip_load_dynamic_field=True # Skip loading the dynamic field
      +)
      +
      +res = client.get_load_state(
      +    collection_name="customized_setup_2"
      +)
      +
      +print(res)
      +
      +# Output
      +#
      +# {
      +#     "state": "<LoadState: Loaded>"
      +# }
      +
      +

      Tenga en cuenta que sólo los campos enumerados en load_fields pueden utilizarse como condiciones de filtrado y campos de salida en búsquedas y consultas. Siempre debe incluir la clave primaria en la lista. Los nombres de campo excluidos de la carga no estarán disponibles para el filtrado o la salida.

      +

      Puede utilizar skip_load_dynamic_field=True para omitir la carga del campo dinámico. Milvus trata el campo dinámico como un único campo, por lo que todas las claves del campo dinámico se incluirán o excluirán juntas.

      +

      Liberar una colección

      Para liberar una colección, utilice el método release_collection() especificando el nombre de la colección.

      @@ -2323,7 +2350,7 @@ collection.set_properties( } )
      -

      Establecer MMAP

      Configure la propiedad de asignación de memoria (MMAP) para la colección, que determina si los datos se asignan a la memoria para mejorar el rendimiento de la consulta. Para obtener más información, consulte Configurar la asignación de memoria .

      +

      Establecer MMAP

      Configure la propiedad de asignación de memoria (MMAP) para la colección, que determina si los datos se asignan a la memoria para mejorar el rendimiento de la consulta. Para obtener más información, consulte Configurar la asignación de memoria.

      Antes de configurar la propiedad MMAP, libere primero la colección. De lo contrario, se producirá un error.

      diff --git a/localization/v2.4.x/site/es/userGuide/manage-indexes/index-vector-fields.json b/localization/v2.4.x/site/es/userGuide/manage-indexes/index-vector-fields.json index 46099c025..66a10c314 100644 --- a/localization/v2.4.x/site/es/userGuide/manage-indexes/index-vector-fields.json +++ b/localization/v2.4.x/site/es/userGuide/manage-indexes/index-vector-fields.json @@ -1 +1 @@ -{"codeList":["from pymilvus import MilvusClient, DataType\n\n# 1. Set up a Milvus client\nclient = MilvusClient(\n uri=\"http://localhost:19530\"\n)\n\n# 2. Create schema\n# 2.1. Create schema\nschema = MilvusClient.create_schema(\n auto_id=False,\n enable_dynamic_field=True,\n)\n\n# 2.2. Add fields to schema\nschema.add_field(field_name=\"id\", datatype=DataType.INT64, is_primary=True)\nschema.add_field(field_name=\"vector\", datatype=DataType.FLOAT_VECTOR, dim=5)\n\n# 3. Create collection\nclient.create_collection(\n collection_name=\"customized_setup\", \n schema=schema, \n)\n","import io.milvus.v2.client.ConnectConfig;\nimport io.milvus.v2.client.MilvusClientV2;\nimport io.milvus.v2.common.DataType;\nimport io.milvus.v2.service.collection.request.CreateCollectionReq;\n\nString CLUSTER_ENDPOINT = \"http://localhost:19530\";\n\n// 1. Connect to Milvus server\nConnectConfig connectConfig = ConnectConfig.builder()\n .uri(CLUSTER_ENDPOINT)\n .build();\n\nMilvusClientV2 client = new MilvusClientV2(connectConfig);\n\n// 2. Create a collection\n\n// 2.1 Create schema\nCreateCollectionReq.CollectionSchema schema = client.createSchema();\n\n// 2.2 Add fields to schema\nschema.addField(AddFieldReq.builder().fieldName(\"id\").dataType(DataType.Int64).isPrimaryKey(true).autoID(false).build());\nschema.addField(AddFieldReq.builder().fieldName(\"vector\").dataType(DataType.FloatVector).dimension(5).build());\n\n// 3 Create a collection without schema and index parameters\nCreateCollectionReq customizedSetupReq = CreateCollectionReq.builder()\n.collectionName(\"customized_setup\")\n.collectionSchema(schema)\n.build();\n\nclient.createCollection(customizedSetupReq);\n","// 1. Set up a Milvus Client\nclient = new MilvusClient({address, token});\n\n// 2. Define fields for the collection\nconst fields = [\n {\n name: \"id\",\n data_type: DataType.Int64,\n is_primary_key: true,\n autoID: false\n },\n {\n name: \"vector\",\n data_type: DataType.FloatVector,\n dim: 5\n },\n]\n\n// 3. Create a collection\nres = await client.createCollection({\n collection_name: \"customized_setup\",\n fields: fields,\n})\n\nconsole.log(res.error_code) \n\n// Output\n// \n// Success\n// \n","# 4.1. Set up the index parameters\nindex_params = MilvusClient.prepare_index_params()\n\n# 4.2. Add an index on the vector field.\nindex_params.add_index(\n field_name=\"vector\",\n metric_type=\"COSINE\",\n index_type=\"IVF_FLAT\",\n index_name=\"vector_index\",\n params={ \"nlist\": 128 }\n)\n\n# 4.3. Create an index file\nclient.create_index(\n collection_name=\"customized_setup\",\n index_params=index_params\n)\n","import io.milvus.v2.common.IndexParam;\nimport io.milvus.v2.service.index.request.CreateIndexReq;\n\n// 4 Prepare index parameters\n\n// 4.2 Add an index for the vector field \"vector\"\nIndexParam indexParamForVectorField = IndexParam.builder()\n .fieldName(\"vector\")\n .indexName(\"vector_index\")\n .indexType(IndexParam.IndexType.IVF_FLAT)\n .metricType(IndexParam.MetricType.COSINE)\n .extraParams(Map.of(\"nlist\", 128))\n .build();\n\nList indexParams = new ArrayList<>();\nindexParams.add(indexParamForVectorField);\n\n// 4.3 Crate an index file\nCreateIndexReq createIndexReq = CreateIndexReq.builder()\n .collectionName(\"customized_setup\")\n .indexParams(indexParams)\n .build();\n\nclient.createIndex(createIndexReq);\n","// 4. Set up index for the collection\n// 4.1. Set up the index parameters\nres = await client.createIndex({\n collection_name: \"customized_setup\",\n field_name: \"vector\",\n index_type: \"AUTOINDEX\",\n metric_type: \"COSINE\", \n index_name: \"vector_index\",\n params: { \"nlist\": 128 }\n})\n\nconsole.log(res.error_code)\n\n// Output\n// \n// Success\n// \n","# 5. Describe index\nres = client.list_indexes(\n collection_name=\"customized_setup\"\n)\n\nprint(res)\n\n# Output\n#\n# [\n# \"vector_index\",\n# ]\n\nres = client.describe_index(\n collection_name=\"customized_setup\",\n index_name=\"vector_index\"\n)\n\nprint(res)\n\n# Output\n#\n# {\n# \"index_type\": ,\n# \"metric_type\": \"COSINE\",\n# \"field_name\": \"vector\",\n# \"index_name\": \"vector_index\"\n# }\n","import io.milvus.v2.service.index.request.DescribeIndexReq;\nimport io.milvus.v2.service.index.response.DescribeIndexResp;\n\n// 5. Describe index\n// 5.1 List the index names\nListIndexesReq listIndexesReq = ListIndexesReq.builder()\n .collectionName(\"customized_setup\")\n .build();\n\nList indexNames = client.listIndexes(listIndexesReq);\n\nSystem.out.println(indexNames);\n\n// Output:\n// [\n// \"vector_index\"\n// ]\n\n// 5.2 Describe an index\nDescribeIndexReq describeIndexReq = DescribeIndexReq.builder()\n .collectionName(\"customized_setup\")\n .indexName(\"vector_index\")\n .build();\n\nDescribeIndexResp describeIndexResp = client.describeIndex(describeIndexReq);\n\nSystem.out.println(JSONObject.toJSON(describeIndexResp));\n\n// Output:\n// {\n// \"metricType\": \"COSINE\",\n// \"indexType\": \"AUTOINDEX\",\n// \"fieldName\": \"vector\",\n// \"indexName\": \"vector_index\"\n// }\n","// 5. Describe the index\nres = await client.describeIndex({\n collection_name: \"customized_setup\",\n index_name: \"vector_index\"\n})\n\nconsole.log(JSON.stringify(res.index_descriptions, null, 2))\n\n// Output\n// \n// [\n// {\n// \"params\": [\n// {\n// \"key\": \"index_type\",\n// \"value\": \"AUTOINDEX\"\n// },\n// {\n// \"key\": \"metric_type\",\n// \"value\": \"COSINE\"\n// }\n// ],\n// \"index_name\": \"vector_index\",\n// \"indexID\": \"449007919953063141\",\n// \"field_name\": \"vector\",\n// \"indexed_rows\": \"0\",\n// \"total_rows\": \"0\",\n// \"state\": \"Finished\",\n// \"index_state_fail_reason\": \"\",\n// \"pending_index_rows\": \"0\"\n// }\n// ]\n// \n","# 6. Drop index\nclient.drop_index(\n collection_name=\"customized_setup\",\n index_name=\"vector_index\"\n)\n","// 6. Drop index\n\nDropIndexReq dropIndexReq = DropIndexReq.builder()\n .collectionName(\"customized_setup\")\n .indexName(\"vector_index\")\n .build();\n\nclient.dropIndex(dropIndexReq);\n","// 6. Drop the index\nres = await client.dropIndex({\n collection_name: \"customized_setup\",\n index_name: \"vector_index\"\n})\n\nconsole.log(res.error_code)\n\n// Output\n// \n// Success\n// \n"],"headingContent":"","anchorList":[{"label":"Indexar campos vectoriales","href":"Index-Vector-Fields","type":1,"isActive":false},{"label":"Visión general","href":"Overview","type":2,"isActive":false},{"label":"Preparativos","href":"Preparations","type":2,"isActive":false},{"label":"Indexar una colección","href":"Index-a-Collection","type":2,"isActive":false},{"label":"Comprobar los detalles del índice","href":"Check-Index-Details","type":2,"isActive":false},{"label":"Eliminar un índice","href":"Drop-an-Index","type":2,"isActive":false}]} \ No newline at end of file +{"codeList":["from pymilvus import MilvusClient, DataType\n\n# 1. Set up a Milvus client\nclient = MilvusClient(\n uri=\"http://localhost:19530\"\n)\n\n# 2. Create schema\n# 2.1. Create schema\nschema = MilvusClient.create_schema(\n auto_id=False,\n enable_dynamic_field=True,\n)\n\n# 2.2. Add fields to schema\nschema.add_field(field_name=\"id\", datatype=DataType.INT64, is_primary=True)\nschema.add_field(field_name=\"vector\", datatype=DataType.FLOAT_VECTOR, dim=5)\n\n# 3. Create collection\nclient.create_collection(\n collection_name=\"customized_setup\", \n schema=schema, \n)\n","import io.milvus.v2.client.ConnectConfig;\nimport io.milvus.v2.client.MilvusClientV2;\nimport io.milvus.v2.common.DataType;\nimport io.milvus.v2.service.collection.request.CreateCollectionReq;\n\nString CLUSTER_ENDPOINT = \"http://localhost:19530\";\n\n// 1. Connect to Milvus server\nConnectConfig connectConfig = ConnectConfig.builder()\n .uri(CLUSTER_ENDPOINT)\n .build();\n\nMilvusClientV2 client = new MilvusClientV2(connectConfig);\n\n// 2. Create a collection\n\n// 2.1 Create schema\nCreateCollectionReq.CollectionSchema schema = client.createSchema();\n\n// 2.2 Add fields to schema\nschema.addField(AddFieldReq.builder().fieldName(\"id\").dataType(DataType.Int64).isPrimaryKey(true).autoID(false).build());\nschema.addField(AddFieldReq.builder().fieldName(\"vector\").dataType(DataType.FloatVector).dimension(5).build());\n\n// 3 Create a collection without schema and index parameters\nCreateCollectionReq customizedSetupReq = CreateCollectionReq.builder()\n.collectionName(\"customized_setup\")\n.collectionSchema(schema)\n.build();\n\nclient.createCollection(customizedSetupReq);\n","// 1. Set up a Milvus Client\nclient = new MilvusClient({address, token});\n\n// 2. Define fields for the collection\nconst fields = [\n {\n name: \"id\",\n data_type: DataType.Int64,\n is_primary_key: true,\n autoID: false\n },\n {\n name: \"vector\",\n data_type: DataType.FloatVector,\n dim: 5\n },\n]\n\n// 3. Create a collection\nres = await client.createCollection({\n collection_name: \"customized_setup\",\n fields: fields,\n})\n\nconsole.log(res.error_code) \n\n// Output\n// \n// Success\n// \n","# 4.1. Set up the index parameters\nindex_params = MilvusClient.prepare_index_params()\n\n# 4.2. Add an index on the vector field.\nindex_params.add_index(\n field_name=\"vector\",\n metric_type=\"COSINE\",\n index_type=\"IVF_FLAT\",\n index_name=\"vector_index\",\n params={ \"nlist\": 128 }\n)\n\n# 4.3. Create an index file\nclient.create_index(\n collection_name=\"customized_setup\",\n index_params=index_params,\n sync=False # Whether to wait for index creation to complete before returning. Defaults to True.\n)\n","import io.milvus.v2.common.IndexParam;\nimport io.milvus.v2.service.index.request.CreateIndexReq;\n\n// 4 Prepare index parameters\n\n// 4.2 Add an index for the vector field \"vector\"\nIndexParam indexParamForVectorField = IndexParam.builder()\n .fieldName(\"vector\")\n .indexName(\"vector_index\")\n .indexType(IndexParam.IndexType.IVF_FLAT)\n .metricType(IndexParam.MetricType.COSINE)\n .extraParams(Map.of(\"nlist\", 128))\n .build();\n\nList indexParams = new ArrayList<>();\nindexParams.add(indexParamForVectorField);\n\n// 4.3 Crate an index file\nCreateIndexReq createIndexReq = CreateIndexReq.builder()\n .collectionName(\"customized_setup\")\n .indexParams(indexParams)\n .build();\n\nclient.createIndex(createIndexReq);\n","// 4. Set up index for the collection\n// 4.1. Set up the index parameters\nres = await client.createIndex({\n collection_name: \"customized_setup\",\n field_name: \"vector\",\n index_type: \"AUTOINDEX\",\n metric_type: \"COSINE\", \n index_name: \"vector_index\",\n params: { \"nlist\": 128 }\n})\n\nconsole.log(res.error_code)\n\n// Output\n// \n// Success\n// \n","# 5. Describe index\nres = client.list_indexes(\n collection_name=\"customized_setup\"\n)\n\nprint(res)\n\n# Output\n#\n# [\n# \"vector_index\",\n# ]\n\nres = client.describe_index(\n collection_name=\"customized_setup\",\n index_name=\"vector_index\"\n)\n\nprint(res)\n\n# Output\n#\n# {\n# \"index_type\": ,\n# \"metric_type\": \"COSINE\",\n# \"field_name\": \"vector\",\n# \"index_name\": \"vector_index\"\n# }\n","import io.milvus.v2.service.index.request.DescribeIndexReq;\nimport io.milvus.v2.service.index.response.DescribeIndexResp;\n\n// 5. Describe index\n// 5.1 List the index names\nListIndexesReq listIndexesReq = ListIndexesReq.builder()\n .collectionName(\"customized_setup\")\n .build();\n\nList indexNames = client.listIndexes(listIndexesReq);\n\nSystem.out.println(indexNames);\n\n// Output:\n// [\n// \"vector_index\"\n// ]\n\n// 5.2 Describe an index\nDescribeIndexReq describeIndexReq = DescribeIndexReq.builder()\n .collectionName(\"customized_setup\")\n .indexName(\"vector_index\")\n .build();\n\nDescribeIndexResp describeIndexResp = client.describeIndex(describeIndexReq);\n\nSystem.out.println(JSONObject.toJSON(describeIndexResp));\n\n// Output:\n// {\n// \"metricType\": \"COSINE\",\n// \"indexType\": \"AUTOINDEX\",\n// \"fieldName\": \"vector\",\n// \"indexName\": \"vector_index\"\n// }\n","// 5. Describe the index\nres = await client.describeIndex({\n collection_name: \"customized_setup\",\n index_name: \"vector_index\"\n})\n\nconsole.log(JSON.stringify(res.index_descriptions, null, 2))\n\n// Output\n// \n// [\n// {\n// \"params\": [\n// {\n// \"key\": \"index_type\",\n// \"value\": \"AUTOINDEX\"\n// },\n// {\n// \"key\": \"metric_type\",\n// \"value\": \"COSINE\"\n// }\n// ],\n// \"index_name\": \"vector_index\",\n// \"indexID\": \"449007919953063141\",\n// \"field_name\": \"vector\",\n// \"indexed_rows\": \"0\",\n// \"total_rows\": \"0\",\n// \"state\": \"Finished\",\n// \"index_state_fail_reason\": \"\",\n// \"pending_index_rows\": \"0\"\n// }\n// ]\n// \n","# 6. Drop index\nclient.drop_index(\n collection_name=\"customized_setup\",\n index_name=\"vector_index\"\n)\n","// 6. Drop index\n\nDropIndexReq dropIndexReq = DropIndexReq.builder()\n .collectionName(\"customized_setup\")\n .indexName(\"vector_index\")\n .build();\n\nclient.dropIndex(dropIndexReq);\n","// 6. Drop the index\nres = await client.dropIndex({\n collection_name: \"customized_setup\",\n index_name: \"vector_index\"\n})\n\nconsole.log(res.error_code)\n\n// Output\n// \n// Success\n// \n"],"headingContent":"Index Vector Fields","anchorList":[{"label":"Indexar campos vectoriales","href":"Index-Vector-Fields","type":1,"isActive":false},{"label":"Visión general","href":"Overview","type":2,"isActive":false},{"label":"Preparativos","href":"Preparations","type":2,"isActive":false},{"label":"Indexar una colección","href":"Index-a-Collection","type":2,"isActive":false},{"label":"Comprobar los detalles del índice","href":"Check-Index-Details","type":2,"isActive":false},{"label":"Eliminar un índice","href":"Drop-an-Index","type":2,"isActive":false}]} \ No newline at end of file diff --git a/localization/v2.4.x/site/es/userGuide/manage-indexes/index-vector-fields.md b/localization/v2.4.x/site/es/userGuide/manage-indexes/index-vector-fields.md index c453ce104..2143ad444 100644 --- a/localization/v2.4.x/site/es/userGuide/manage-indexes/index-vector-fields.md +++ b/localization/v2.4.x/site/es/userGuide/manage-indexes/index-vector-fields.md @@ -4,7 +4,7 @@ order: 1 summary: >- Esta guía le guiará a través de las operaciones básicas de creación y gestión de índices en campos vectoriales de una colección. -title: Campos vectoriales de índice +title: Indexar campos vectoriales ---

      Indexar campos vectoriales

      Aprovechando los metadatos almacenados en un archivo de índice, Milvus organiza sus datos en una estructura especializada, facilitando la rápida recuperación de la información solicitada durante las búsquedas o consultas.

      -

      Milvus proporciona varios tipos de índices y métricas para ordenar los valores de campo para realizar búsquedas de similitud eficientes. La siguiente tabla enumera los tipos de índices y métricas soportados para diferentes tipos de campos vectoriales. Para más detalles, consulte Índice en memoria y métricas de similitud.

      +

      Milvus proporciona varios tipos de índices y métricas para ordenar los valores de campo para realizar búsquedas de similitud eficientes. La siguiente tabla enumera los tipos de índices y métricas soportados para diferentes tipos de campos vectoriales. Para más detalles, consulte Índice en memoria y métricas de similitud.

      @@ -78,7 +78,7 @@ title: Campos vectoriales de índice Tipos métricos - Tipos de índices + Tipos de índice @@ -249,7 +249,8 @@ index_params.add_index( # 4.3. Create an index file client.create_index( collection_name="customized_setup", - index_params=index_params + index_params=index_params, + sync=False # Whether to wait for index creation to complete before returning. Defaults to True. )
    import io.milvus.v2.common.IndexParam;
    @@ -331,6 +332,10 @@ res = await client.index_params
           Un objeto IndexParams que contiene una lista de objetos IndexParam.
         
    +    
    +      sync
    +      Controla cómo se construye el índice en relación con la petición del cliente. Valores válidos:
    • True (por defecto): El cliente espera a que el índice esté completamente construido antes de devolverlo. Esto significa que no obtendrá una respuesta hasta que el proceso se haya completado.
    • False: El cliente regresa inmediatamente después de recibir la petición y el índice se está construyendo en segundo plano. Para saber si se ha completado la creación del índice, utilice el método describe_index().
    + diff --git a/localization/v2.4.x/site/es/userGuide/manage-partitions.json b/localization/v2.4.x/site/es/userGuide/manage-partitions.json index 0e1ea3045..742755f94 100644 --- a/localization/v2.4.x/site/es/userGuide/manage-partitions.json +++ b/localization/v2.4.x/site/es/userGuide/manage-partitions.json @@ -1 +1 @@ -{"codeList":["from pymilvus import MilvusClient, DataType\n\n# 1. Set up a Milvus client\nclient = MilvusClient(\n uri=\"http://localhost:19530\"\n)\n\n# 2. Create a collection\nclient.create_collection(\n collection_name=\"quick_setup\",\n dimension=5,\n)\n\n","import io.milvus.v2.client.ConnectConfig;\nimport io.milvus.v2.client.MilvusClientV2;\nimport io.milvus.v2.service.collection.request.CreateCollectionReq;\n\nString CLUSTER_ENDPOINT = \"http://localhost:19530\";\n\n// 1. Connect to Milvus server\nConnectConfig connectConfig = ConnectConfig.builder()\n .uri(CLUSTER_ENDPOINT)\n .build();\n\nMilvusClientV2 client = new MilvusClientV2(connectConfig);\n\n// 2. Create a collection in quick setup mode\nCreateCollectionReq quickSetupReq = CreateCollectionReq.builder()\n .collectionName(\"quick_setup\")\n .dimension(5)\n .build();\n\nclient.createCollection(quickSetupReq);\n","const address = \"http://localhost:19530\"\n\n// 1. Set up a Milvus Client\nclient = new MilvusClient({address});\n\n// 2. Create a collection in quick setup mode\nawait client.createCollection({\n collection_name: \"quick_setup\",\n dimension: 5,\n}); \n","# 3. List partitions\nres = client.list_partitions(collection_name=\"quick_setup\")\nprint(res)\n\n# Output\n#\n# [\"_default\"]\n","import io.milvus.v2.service.partition.request.ListPartitionsReq;\n\n// 3. List all partitions in the collection\nListPartitionsReq listPartitionsReq = ListPartitionsReq.builder()\n .collectionName(\"quick_setup\")\n .build();\n\nList partitionNames = client.listPartitions(listPartitionsReq);\n\nSystem.out.println(partitionNames);\n\n// Output:\n// [\"_default\"]\n","// 3. List partitions\nres = await client.listPartitions({\n collection_name: \"quick_setup\"\n})\n\nconsole.log(res.partition_names)\n\n// Output\n// \n// [ '_default' ]\n// \n","# 4. Create more partitions\nclient.create_partition(\n collection_name=\"quick_setup\",\n partition_name=\"partitionA\"\n)\n\nclient.create_partition(\n collection_name=\"quick_setup\",\n partition_name=\"partitionB\"\n)\n\nres = client.list_partitions(collection_name=\"quick_setup\")\nprint(res)\n\n# Output\n#\n# [\"_default\", \"partitionA\", \"partitionB\"]\n","import io.milvus.v2.service.partition.request.CreatePartitionReq;\n\n// 4. Create more partitions\nCreatePartitionReq createPartitionReq = CreatePartitionReq.builder()\n .collectionName(\"quick_setup\")\n .partitionName(\"partitionA\")\n .build();\n\nclient.createPartition(createPartitionReq);\n\ncreatePartitionReq = CreatePartitionReq.builder()\n .collectionName(\"quick_setup\")\n .partitionName(\"partitionB\")\n .build();\n\nclient.createPartition(createPartitionReq);\n\nlistPartitionsReq = ListPartitionsReq.builder()\n .collectionName(\"quick_setup\")\n .build();\n\npartitionNames = client.listPartitions(listPartitionsReq);\n\nSystem.out.println(partitionNames);\n\n// Output:\n// [\n// \"_default\",\n// \"partitionA\",\n// \"partitionB\"\n// ]\n","// 4. Create more partitions\nawait client.createPartition({\n collection_name: \"quick_setup\",\n partition_name: \"partitionA\"\n})\n\nawait client.createPartition({\n collection_name: \"quick_setup\",\n partition_name: \"partitionB\"\n})\n\nres = await client.listPartitions({\n collection_name: \"quick_setup\"\n})\n\nconsole.log(res.partition_names)\n\n// Output\n// \n// [ '_default', 'partitionA', 'partitionB' ]\n// \n","# 5. Check whether a partition exists\nres = client.has_partition(\n collection_name=\"quick_setup\",\n partition_name=\"partitionA\"\n)\nprint(res)\n\n# Output\n#\n# True\n\nres = client.has_partition(\n collection_name=\"quick_setup\",\n partition_name=\"partitionC\"\n)\nprint(res)\n\n# Output\n#\n# False\n","import io.milvus.v2.service.partition.request.HasPartitionReq;\n\n// 5. Check whether a partition exists\nHasPartitionReq hasPartitionReq = HasPartitionReq.builder()\n .collectionName(\"quick_setup\")\n .partitionName(\"partitionA\")\n .build();\n\nboolean exists = client.hasPartition(hasPartitionReq);\n\nSystem.out.println(exists);\n\n// Output:\n// true\n\nhasPartitionReq = HasPartitionReq.builder()\n .collectionName(\"quick_setup\")\n .partitionName(\"partitionC\")\n .build();\n\nexists = client.hasPartition(hasPartitionReq);\n\nSystem.out.println(exists);\n\n// Output:\n// false\n","// 5. Check whether a partition exists\nres = await client.hasPartition({\n collection_name: \"quick_setup\",\n partition_name: \"partitionA\"\n})\n\nconsole.log(res.value)\n\n// Output\n// \n// true\n// \n\nres = await client.hasPartition({\n collection_name: \"quick_setup\",\n partition_name: \"partitionC\"\n})\n\nconsole.log(res.value)\n\n// Output\n// \n// false\n// \n","# Release the collection\nclient.release_collection(collection_name=\"quick_setup\")\n\n# Check the load status\nres = client.get_load_state(collection_name=\"quick_setup\")\nprint(res)\n\n# Output\n#\n# {\n# \"state\": \"\"\n# }\n\nres = client.get_load_state(\n collection_name=\"quick_setup\", \n partition_name=\"partitionA\"\n)\n\nprint(res)\n\n# Output\n#\n# {\n# \"state\": \"\"\n# }\n\nres = client.get_load_state(\n collection_name=\"quick_setup\", \n partition_name=\"partitionB\"\n)\n\nprint(res)\n\n# Output\n#\n# {\n# \"state\": \"\"\n# }\n\n","import io.milvus.v2.service.collection.request.GetLoadStateReq;\nimport io.milvus.v2.service.collection.request.ReleaseCollectionReq;\nimport io.milvus.v2.service.partition.request.LoadPartitionsReq;\nimport io.milvus.v2.service.partition.request.ReleasePartitionsReq;\n\n// 6. Load a partition independantly\n// 6.1 Release the collection\nReleaseCollectionReq releaseCollectionReq = ReleaseCollectionReq.builder()\n .collectionName(\"quick_setup\")\n .build();\n\nclient.releaseCollection(releaseCollectionReq);\n\n// 6.2 Load partitionA\nLoadPartitionsReq loadPartitionsReq = LoadPartitionsReq.builder()\n .collectionName(\"quick_setup\")\n .partitionNames(List.of(\"partitionA\"))\n .build();\n\nclient.loadPartitions(loadPartitionsReq);\n\nThread.sleep(3000);\n\n// 6.3 Check the load status of the collection and its partitions\nGetLoadStateReq getLoadStateReq = GetLoadStateReq.builder()\n .collectionName(\"quick_setup\")\n .build();\n\nboolean state = client.getLoadState(getLoadStateReq);\n\nSystem.out.println(state);\n\n// Output:\n// true\n\ngetLoadStateReq = GetLoadStateReq.builder()\n .collectionName(\"quick_setup\")\n .partitionName(\"partitionA\")\n .build();\n\nstate = client.getLoadState(getLoadStateReq);\n\nSystem.out.println(state);\n\n// Output:\n// true\n\ngetLoadStateReq = GetLoadStateReq.builder()\n .collectionName(\"quick_setup\")\n .partitionName(\"partitionB\")\n .build();\n\nstate = client.getLoadState(getLoadStateReq);\n\nSystem.out.println(state);\n\n// Output:\n// false\n","// 6. Load a partition indenpendantly\nawait client.releaseCollection({\n collection_name: \"quick_setup\"\n})\n\nres = await client.getLoadState({\n collection_name: \"quick_setup\"\n})\n\nconsole.log(res.state)\n\n// Output\n// \n// LoadStateNotLoad\n// \n\nawait client.loadPartitions({\n collection_name: \"quick_setup\",\n partition_names: [\"partitionA\"]\n})\n\nawait sleep(3000)\n\nres = await client.getLoadState({\n collection_name: \"quick_setup\"\n})\n\nconsole.log(res.state)\n\n// Output\n// \n// LoadStateLoaded\n// \n\nres = await client.getLoadState({\n collection_name: \"quick_setup\",\n partition_name: \"partitionA\"\n})\n\nconsole.log(res.state)\n\n// Output\n// \n// LoadStateLoaded\n// \n\nres = await client.getLoadState({\n collection_name: \"quick_setup\",\n partition_name: \"partitionB\"\n})\n\nconsole.log(res.state)\n\n// Output\n// \n// LoadStateLoaded\n// \n","client.load_partitions(\n collection_name=\"quick_setup\",\n partition_names=[\"partitionA\"]\n)\n\nres = client.get_load_state(collection_name=\"quick_setup\")\nprint(res)\n\n# Output\n#\n# {\n# \"state\": \"\"\n# }\n","LoadPartitionsReq loadPartitionsReq = LoadPartitionsReq.builder()\n .collectionName(\"quick_setup\")\n .partitionNames(List.of(\"partitionA\"))\n .build();\n\nclient.loadPartitions(loadPartitionsReq);\n\ngetLoadStateReq = GetLoadStateReq.builder()\n .collectionName(\"quick_setup\")\n .partitionName(\"partitionA\")\n .build();\n\nstate = client.getLoadState(getLoadStateReq);\n\nSystem.out.println(state);\n\n// Output:\n// true\n","await client.loadPartitions({\n collection_name: \"quick_setup\",\n partition_names: [\"partitionA\"]\n})\n\nres = await client.getLoadState({\n collection_name: \"quick_setup\",\n partition_name: \"partitionA\"\n})\n\nconsole.log(res.state)\n\n// Output\n// \n// LoadStateLoaded\n//\n","client.load_partitions(\n collection_name=\"quick_setup\",\n partition_names=[\"partitionA\", \"partitionB\"]\n)\n\nres = client.get_load_status(\n collection_name=\"quick_setup\",\n partition_name=\"partitionA\"\n)\n\n# Output\n#\n# {\n# \"state\": \"\"\n# }\n\nres = client.get_load_status(\n collection_name=\"quick_setup\",\n partition_name=\"partitionB\"\n)\n\n# Output\n#\n# {\n# \"state\": \"\"\n# }\n","LoadPartitionsReq loadPartitionsReq = LoadPartitionsReq.builder()\n .collectionName(\"quick_setup\")\n .partitionNames(List.of(\"partitionA\", \"partitionB\"))\n .build();\n\nclient.loadPartitions(loadPartitionsReq);\n\ngetLoadStateReq = GetLoadStateReq.builder()\n .collectionName(\"quick_setup\")\n .partitionName(\"partitionA\")\n .build();\n\nstate = client.getLoadState(getLoadStateReq);\n\nSystem.out.println(state);\n\n// Output:\n// true\n\ngetLoadStateReq = GetLoadStateReq.builder()\n .collectionName(\"quick_setup\")\n .partitionName(\"partitionB\")\n .build();\n\nstate = client.getLoadState(getLoadStateReq);\n\nSystem.out.println(state);\n\n// Output:\n// true\n","await client.loadPartitions({\n collection_name: \"quick_setup\",\n partition_names: [\"partitionA\", \"partitionB\"]\n})\n\nres = await client.getLoadState({\n collection_name: \"quick_setup\",\n partition_name: \"partitionA\"\n})\n\nconsole.log(res)\n\n// Output\n// \n// LoadStateLoaded\n// \n\nres = await client.getLoadState({\n collection_name: \"quick_setup\",\n partition_name: \"partitionB\"\n})\n\nconsole.log(res)\n\n// Output\n// \n// LoadStateLoaded\n// \n","# 7. Release a partition\nclient.release_partitions(\n collection_name=\"quick_setup\",\n partition_names=[\"partitionA\"]\n)\n\nres = client.get_load_state(\n collection_name=\"quick_setup\", \n partition_name=\"partitionA\"\n)\n\nprint(res)\n\n# Output\n#\n# {\n# \"state\": \"\"\n# }\n\n","import io.milvus.v2.service.partition.request.ReleasePartitionsReq;\n\n// 7. Release a partition\nReleasePartitionsReq releasePartitionsReq = ReleasePartitionsReq.builder()\n .collectionName(\"quick_setup\")\n .partitionNames(List.of(\"partitionA\"))\n .build();\n\nclient.releasePartitions(releasePartitionsReq);\n\ngetLoadStateReq = GetLoadStateReq.builder()\n .collectionName(\"quick_setup\")\n .partitionName(\"partitionA\")\n .build();\n\nstate = client.getLoadState(getLoadStateReq);\n\nSystem.out.println(state);\n\n// Output:\n// false\n","// 7. Release a partition\nawait client.releasePartitions({\n collection_name: \"quick_setup\",\n partition_names: [\"partitionA\"]\n})\n\nres = await client.getLoadState({\n collection_name: \"quick_setup\"\n})\n\nconsole.log(res.state)\n\n// Output\n// \n// LoadStateNotLoad\n// \n","client.release_partitions(\n collection_name=\"quick_setup\",\n partition_names=[\"_default\", \"partitionA\", \"partitionB\"]\n)\n\nres = client.get_load_status(\n collection_name=\"quick_setup\",\n)\n\n# Output\n#\n# {\n# \"state\": \"\"\n# }\n","# 8. Drop a partition\nclient.drop_partition(\n collection_name=\"quick_setup\",\n partition_name=\"partitionB\"\n)\n\nres = client.list_partitions(collection_name=\"quick_setup\")\nprint(res)\n\n# Output\n#\n# [\"_default\", \"partitionA\"]\n","import io.milvus.v2.service.partition.request.ReleasePartitionsReq;\n\nReleasePartitionsReq releasePartitionsReq = ReleasePartitionsReq.builder()\n .collectionName(\"quick_setup\")\n .partitionNames(List.of(\"_default\", \"partitionA\", \"partitionB\"))\n .build();\n\nclient.releasePartitions(releasePartitionsReq);\n\ngetLoadStateReq = GetLoadStateReq.builder()\n .collectionName(\"quick_setup\")\n .build();\n\nstate = client.getLoadState(getLoadStateReq);\n\nSystem.out.println(state);\n\n// Output:\n// false\n","\nawait client.releasePartitions({\n collection_name: \"quick_setup\",\n partition_names: [\"_default\", \"partitionA\", \"partitionB\"]\n})\n\nres = await client.getLoadState({\n collection_name: \"quick_setup\"\n})\n\nconsole.log(res)\n\n// Output\n// \n// {\n// status: {\n// error_code: 'Success',\n// reason: '',\n// code: 0,\n// retriable: false,\n// detail: ''\n// },\n// state: 'LoadStateNotLoad'\n// }\n// \n"],"headingContent":"","anchorList":[{"label":"Gestionar particiones","href":"Manage-Partitions","type":1,"isActive":false},{"label":"Visión general","href":"Overview","type":2,"isActive":false},{"label":"Preparativos","href":"Preparations","type":2,"isActive":false},{"label":"Listar particiones","href":"List-Partitions","type":2,"isActive":false},{"label":"Crear particiones","href":"Create-Partitions","type":2,"isActive":false},{"label":"Comprobar la existencia de una partición específica","href":"Check-for-a-Specific-Partition","type":2,"isActive":false},{"label":"Cargar y liberar particiones","href":"Load--Release-Partitions","type":2,"isActive":false},{"label":"Liberar particiones","href":"Drop-Partitions","type":2,"isActive":false},{"label":"PREGUNTAS FRECUENTES","href":"FAQ","type":2,"isActive":false}]} \ No newline at end of file +{"codeList":["from pymilvus import MilvusClient, DataType\n\n# 1. Set up a Milvus client\nclient = MilvusClient(\n uri=\"http://localhost:19530\"\n)\n\n# 2. Create a collection\nclient.create_collection(\n collection_name=\"quick_setup\",\n dimension=5,\n)\n\n","import io.milvus.v2.client.ConnectConfig;\nimport io.milvus.v2.client.MilvusClientV2;\nimport io.milvus.v2.service.collection.request.CreateCollectionReq;\n\nString CLUSTER_ENDPOINT = \"http://localhost:19530\";\n\n// 1. Connect to Milvus server\nConnectConfig connectConfig = ConnectConfig.builder()\n .uri(CLUSTER_ENDPOINT)\n .build();\n\nMilvusClientV2 client = new MilvusClientV2(connectConfig);\n\n// 2. Create a collection in quick setup mode\nCreateCollectionReq quickSetupReq = CreateCollectionReq.builder()\n .collectionName(\"quick_setup\")\n .dimension(5)\n .build();\n\nclient.createCollection(quickSetupReq);\n","const address = \"http://localhost:19530\"\n\n// 1. Set up a Milvus Client\nclient = new MilvusClient({address});\n\n// 2. Create a collection in quick setup mode\nawait client.createCollection({\n collection_name: \"quick_setup\",\n dimension: 5,\n}); \n","# 3. List partitions\nres = client.list_partitions(collection_name=\"quick_setup\")\nprint(res)\n\n# Output\n#\n# [\"_default\"]\n","import io.milvus.v2.service.partition.request.ListPartitionsReq;\n\n// 3. List all partitions in the collection\nListPartitionsReq listPartitionsReq = ListPartitionsReq.builder()\n .collectionName(\"quick_setup\")\n .build();\n\nList partitionNames = client.listPartitions(listPartitionsReq);\n\nSystem.out.println(partitionNames);\n\n// Output:\n// [\"_default\"]\n","// 3. List partitions\nres = await client.listPartitions({\n collection_name: \"quick_setup\"\n})\n\nconsole.log(res.partition_names)\n\n// Output\n// \n// [ '_default' ]\n// \n","# 4. Create more partitions\nclient.create_partition(\n collection_name=\"quick_setup\",\n partition_name=\"partitionA\"\n)\n\nclient.create_partition(\n collection_name=\"quick_setup\",\n partition_name=\"partitionB\"\n)\n\nres = client.list_partitions(collection_name=\"quick_setup\")\nprint(res)\n\n# Output\n#\n# [\"_default\", \"partitionA\", \"partitionB\"]\n","import io.milvus.v2.service.partition.request.CreatePartitionReq;\n\n// 4. Create more partitions\nCreatePartitionReq createPartitionReq = CreatePartitionReq.builder()\n .collectionName(\"quick_setup\")\n .partitionName(\"partitionA\")\n .build();\n\nclient.createPartition(createPartitionReq);\n\ncreatePartitionReq = CreatePartitionReq.builder()\n .collectionName(\"quick_setup\")\n .partitionName(\"partitionB\")\n .build();\n\nclient.createPartition(createPartitionReq);\n\nlistPartitionsReq = ListPartitionsReq.builder()\n .collectionName(\"quick_setup\")\n .build();\n\npartitionNames = client.listPartitions(listPartitionsReq);\n\nSystem.out.println(partitionNames);\n\n// Output:\n// [\n// \"_default\",\n// \"partitionA\",\n// \"partitionB\"\n// ]\n","// 4. Create more partitions\nawait client.createPartition({\n collection_name: \"quick_setup\",\n partition_name: \"partitionA\"\n})\n\nawait client.createPartition({\n collection_name: \"quick_setup\",\n partition_name: \"partitionB\"\n})\n\nres = await client.listPartitions({\n collection_name: \"quick_setup\"\n})\n\nconsole.log(res.partition_names)\n\n// Output\n// \n// [ '_default', 'partitionA', 'partitionB' ]\n// \n","# 5. Check whether a partition exists\nres = client.has_partition(\n collection_name=\"quick_setup\",\n partition_name=\"partitionA\"\n)\nprint(res)\n\n# Output\n#\n# True\n\nres = client.has_partition(\n collection_name=\"quick_setup\",\n partition_name=\"partitionC\"\n)\nprint(res)\n\n# Output\n#\n# False\n","import io.milvus.v2.service.partition.request.HasPartitionReq;\n\n// 5. Check whether a partition exists\nHasPartitionReq hasPartitionReq = HasPartitionReq.builder()\n .collectionName(\"quick_setup\")\n .partitionName(\"partitionA\")\n .build();\n\nboolean exists = client.hasPartition(hasPartitionReq);\n\nSystem.out.println(exists);\n\n// Output:\n// true\n\nhasPartitionReq = HasPartitionReq.builder()\n .collectionName(\"quick_setup\")\n .partitionName(\"partitionC\")\n .build();\n\nexists = client.hasPartition(hasPartitionReq);\n\nSystem.out.println(exists);\n\n// Output:\n// false\n","// 5. Check whether a partition exists\nres = await client.hasPartition({\n collection_name: \"quick_setup\",\n partition_name: \"partitionA\"\n})\n\nconsole.log(res.value)\n\n// Output\n// \n// true\n// \n\nres = await client.hasPartition({\n collection_name: \"quick_setup\",\n partition_name: \"partitionC\"\n})\n\nconsole.log(res.value)\n\n// Output\n// \n// false\n// \n","# Release the collection\nclient.release_collection(collection_name=\"quick_setup\")\n\n# Check the load status\nres = client.get_load_state(collection_name=\"quick_setup\")\nprint(res)\n\n# Output\n#\n# {\n# \"state\": \"\"\n# }\n\nres = client.get_load_state(\n collection_name=\"quick_setup\", \n partition_name=\"partitionA\"\n)\n\nprint(res)\n\n# Output\n#\n# {\n# \"state\": \"\"\n# }\n\nres = client.get_load_state(\n collection_name=\"quick_setup\", \n partition_name=\"partitionB\"\n)\n\nprint(res)\n\n# Output\n#\n# {\n# \"state\": \"\"\n# }\n\n","import io.milvus.v2.service.collection.request.GetLoadStateReq;\nimport io.milvus.v2.service.collection.request.ReleaseCollectionReq;\nimport io.milvus.v2.service.partition.request.LoadPartitionsReq;\nimport io.milvus.v2.service.partition.request.ReleasePartitionsReq;\n\n// 6. Load a partition independantly\n// 6.1 Release the collection\nReleaseCollectionReq releaseCollectionReq = ReleaseCollectionReq.builder()\n .collectionName(\"quick_setup\")\n .build();\n\nclient.releaseCollection(releaseCollectionReq);\n\n// 6.2 Load partitionA\nLoadPartitionsReq loadPartitionsReq = LoadPartitionsReq.builder()\n .collectionName(\"quick_setup\")\n .partitionNames(List.of(\"partitionA\"))\n .build();\n\nclient.loadPartitions(loadPartitionsReq);\n\nThread.sleep(3000);\n\n// 6.3 Check the load status of the collection and its partitions\nGetLoadStateReq getLoadStateReq = GetLoadStateReq.builder()\n .collectionName(\"quick_setup\")\n .build();\n\nboolean state = client.getLoadState(getLoadStateReq);\n\nSystem.out.println(state);\n\n// Output:\n// true\n\ngetLoadStateReq = GetLoadStateReq.builder()\n .collectionName(\"quick_setup\")\n .partitionName(\"partitionA\")\n .build();\n\nstate = client.getLoadState(getLoadStateReq);\n\nSystem.out.println(state);\n\n// Output:\n// true\n\ngetLoadStateReq = GetLoadStateReq.builder()\n .collectionName(\"quick_setup\")\n .partitionName(\"partitionB\")\n .build();\n\nstate = client.getLoadState(getLoadStateReq);\n\nSystem.out.println(state);\n\n// Output:\n// false\n","// 6. Load a partition indenpendantly\nawait client.releaseCollection({\n collection_name: \"quick_setup\"\n})\n\nres = await client.getLoadState({\n collection_name: \"quick_setup\"\n})\n\nconsole.log(res.state)\n\n// Output\n// \n// LoadStateNotLoad\n// \n\nawait client.loadPartitions({\n collection_name: \"quick_setup\",\n partition_names: [\"partitionA\"]\n})\n\nawait sleep(3000)\n\nres = await client.getLoadState({\n collection_name: \"quick_setup\"\n})\n\nconsole.log(res.state)\n\n// Output\n// \n// LoadStateLoaded\n// \n\nres = await client.getLoadState({\n collection_name: \"quick_setup\",\n partition_name: \"partitionA\"\n})\n\nconsole.log(res.state)\n\n// Output\n// \n// LoadStateLoaded\n// \n\nres = await client.getLoadState({\n collection_name: \"quick_setup\",\n partition_name: \"partitionB\"\n})\n\nconsole.log(res.state)\n\n// Output\n// \n// LoadStateLoaded\n// \n","client.load_partitions(\n collection_name=\"quick_setup\",\n partition_names=[\"partitionA\"]\n)\n\nres = client.get_load_state(collection_name=\"quick_setup\")\nprint(res)\n\n# Output\n#\n# {\n# \"state\": \"\"\n# }\n","LoadPartitionsReq loadPartitionsReq = LoadPartitionsReq.builder()\n .collectionName(\"quick_setup\")\n .partitionNames(List.of(\"partitionA\"))\n .build();\n\nclient.loadPartitions(loadPartitionsReq);\n\ngetLoadStateReq = GetLoadStateReq.builder()\n .collectionName(\"quick_setup\")\n .partitionName(\"partitionA\")\n .build();\n\nstate = client.getLoadState(getLoadStateReq);\n\nSystem.out.println(state);\n\n// Output:\n// true\n","await client.loadPartitions({\n collection_name: \"quick_setup\",\n partition_names: [\"partitionA\"]\n})\n\nres = await client.getLoadState({\n collection_name: \"quick_setup\",\n partition_name: \"partitionA\"\n})\n\nconsole.log(res.state)\n\n// Output\n// \n// LoadStateLoaded\n//\n","client.load_partitions(\n collection_name=\"quick_setup\",\n partition_names=[\"partitionA\", \"partitionB\"]\n)\n\nres = client.get_load_status(\n collection_name=\"quick_setup\",\n partition_name=\"partitionA\"\n)\n\n# Output\n#\n# {\n# \"state\": \"\"\n# }\n\nres = client.get_load_status(\n collection_name=\"quick_setup\",\n partition_name=\"partitionB\"\n)\n\n# Output\n#\n# {\n# \"state\": \"\"\n# }\n","LoadPartitionsReq loadPartitionsReq = LoadPartitionsReq.builder()\n .collectionName(\"quick_setup\")\n .partitionNames(List.of(\"partitionA\", \"partitionB\"))\n .build();\n\nclient.loadPartitions(loadPartitionsReq);\n\ngetLoadStateReq = GetLoadStateReq.builder()\n .collectionName(\"quick_setup\")\n .partitionName(\"partitionA\")\n .build();\n\nstate = client.getLoadState(getLoadStateReq);\n\nSystem.out.println(state);\n\n// Output:\n// true\n\ngetLoadStateReq = GetLoadStateReq.builder()\n .collectionName(\"quick_setup\")\n .partitionName(\"partitionB\")\n .build();\n\nstate = client.getLoadState(getLoadStateReq);\n\nSystem.out.println(state);\n\n// Output:\n// true\n","await client.loadPartitions({\n collection_name: \"quick_setup\",\n partition_names: [\"partitionA\", \"partitionB\"]\n})\n\nres = await client.getLoadState({\n collection_name: \"quick_setup\",\n partition_name: \"partitionA\"\n})\n\nconsole.log(res)\n\n// Output\n// \n// LoadStateLoaded\n// \n\nres = await client.getLoadState({\n collection_name: \"quick_setup\",\n partition_name: \"partitionB\"\n})\n\nconsole.log(res)\n\n// Output\n// \n// LoadStateLoaded\n// \n","client.load_partitions(\n collection_name=\"quick_setup\",\n partition_names=[\"partitionA\"],\n load_fields=[\"id\", \"vector\"],\n skip_load_dynamic_field=True\n)\n","# 7. Release a partition\nclient.release_partitions(\n collection_name=\"quick_setup\",\n partition_names=[\"partitionA\"]\n)\n\nres = client.get_load_state(\n collection_name=\"quick_setup\", \n partition_name=\"partitionA\"\n)\n\nprint(res)\n\n# Output\n#\n# {\n# \"state\": \"\"\n# }\n\n","import io.milvus.v2.service.partition.request.ReleasePartitionsReq;\n\n// 7. Release a partition\nReleasePartitionsReq releasePartitionsReq = ReleasePartitionsReq.builder()\n .collectionName(\"quick_setup\")\n .partitionNames(List.of(\"partitionA\"))\n .build();\n\nclient.releasePartitions(releasePartitionsReq);\n\ngetLoadStateReq = GetLoadStateReq.builder()\n .collectionName(\"quick_setup\")\n .partitionName(\"partitionA\")\n .build();\n\nstate = client.getLoadState(getLoadStateReq);\n\nSystem.out.println(state);\n\n// Output:\n// false\n","// 7. Release a partition\nawait client.releasePartitions({\n collection_name: \"quick_setup\",\n partition_names: [\"partitionA\"]\n})\n\nres = await client.getLoadState({\n collection_name: \"quick_setup\"\n})\n\nconsole.log(res.state)\n\n// Output\n// \n// LoadStateNotLoad\n// \n","client.release_partitions(\n collection_name=\"quick_setup\",\n partition_names=[\"_default\", \"partitionA\", \"partitionB\"]\n)\n\nres = client.get_load_status(\n collection_name=\"quick_setup\",\n)\n\n# Output\n#\n# {\n# \"state\": \"\"\n# }\n","# 8. Drop a partition\nclient.drop_partition(\n collection_name=\"quick_setup\",\n partition_name=\"partitionB\"\n)\n\nres = client.list_partitions(collection_name=\"quick_setup\")\nprint(res)\n\n# Output\n#\n# [\"_default\", \"partitionA\"]\n","import io.milvus.v2.service.partition.request.ReleasePartitionsReq;\n\nReleasePartitionsReq releasePartitionsReq = ReleasePartitionsReq.builder()\n .collectionName(\"quick_setup\")\n .partitionNames(List.of(\"_default\", \"partitionA\", \"partitionB\"))\n .build();\n\nclient.releasePartitions(releasePartitionsReq);\n\ngetLoadStateReq = GetLoadStateReq.builder()\n .collectionName(\"quick_setup\")\n .build();\n\nstate = client.getLoadState(getLoadStateReq);\n\nSystem.out.println(state);\n\n// Output:\n// false\n","\nawait client.releasePartitions({\n collection_name: \"quick_setup\",\n partition_names: [\"_default\", \"partitionA\", \"partitionB\"]\n})\n\nres = await client.getLoadState({\n collection_name: \"quick_setup\"\n})\n\nconsole.log(res)\n\n// Output\n// \n// {\n// status: {\n// error_code: 'Success',\n// reason: '',\n// code: 0,\n// retriable: false,\n// detail: ''\n// },\n// state: 'LoadStateNotLoad'\n// }\n// \n"],"headingContent":"Manage Partitions","anchorList":[{"label":"Gestionar particiones","href":"Manage-Partitions","type":1,"isActive":false},{"label":"Visión general","href":"Overview","type":2,"isActive":false},{"label":"Preparativos","href":"Preparations","type":2,"isActive":false},{"label":"Listar particiones","href":"List-Partitions","type":2,"isActive":false},{"label":"Crear particiones","href":"Create-Partitions","type":2,"isActive":false},{"label":"Comprobar la existencia de una partición específica","href":"Check-for-a-Specific-Partition","type":2,"isActive":false},{"label":"Cargar y liberar particiones","href":"Load--Release-Partitions","type":2,"isActive":false},{"label":"Liberar particiones","href":"Drop-Partitions","type":2,"isActive":false},{"label":"PREGUNTAS FRECUENTES","href":"FAQ","type":2,"isActive":false}]} \ No newline at end of file diff --git a/localization/v2.4.x/site/es/userGuide/manage-partitions.md b/localization/v2.4.x/site/es/userGuide/manage-partitions.md index 860ab30ea..06e0c9854 100644 --- a/localization/v2.4.x/site/es/userGuide/manage-partitions.md +++ b/localization/v2.4.x/site/es/userGuide/manage-partitions.md @@ -1,7 +1,6 @@ --- id: manage-partitions.md title: Gestionar particiones -summary: '' ---

    Gestionar particiones +

    Para cargar campos específicos en una o más particiones, haga lo siguiente:

    +
    client.load_partitions(
    +    collection_name="quick_setup",
    +    partition_names=["partitionA"],
    +    load_fields=["id", "vector"],
    +    skip_load_dynamic_field=True
    +)
    +
    +

    Tenga en cuenta que sólo los campos listados en load_fields pueden utilizarse como condiciones de filtrado y campos de salida en búsquedas y consultas. Debe incluir siempre la clave primaria en la lista. Los nombres de campo excluidos de la carga no estarán disponibles para el filtrado o la salida.

    +

    Puede utilizar skip_load_dynamic_field=True para omitir la carga del campo dinámico. Milvus trata el campo dinámico como un único campo, por lo que todas las claves del campo dinámico se incluirán o excluirán juntas.

    Liberar particiones

    -

    Para liberar todas las particiones de una colección, basta con llamar a release_collection(). Para liberar particiones específicas de una colección, utiliza release_partitions().

    +

    Para liberar todas las particiones de una colección, basta con llamar a release_collection(). Para liberar particiones específicas de una colección, utilice release_partitions().

    Para liberar todas las particiones de una colección, basta con llamar a releaseCollection(). Para liberar particiones específicas de una colección, utilice releasePartitions().

    @@ -930,7 +939,7 @@ res = await client.rootCoord.maxPartitionNum. Para más detalles, consulte Configuraciones del sistema.

    +

    Por defecto, Milvus permite crear un máximo de 1.024 particiones. Puede ajustar el número máximo de particiones configurando rootCoord.maxPartitionNum. Para más detalles, consulte Configuraciones del sistema.

  • ¿Cómo puedo diferenciar entre particiones y claves de partición?

    Las particiones son unidades físicas de almacenamiento, mientras que las claves de partición son conceptos lógicos que asignan automáticamente datos a particiones específicas basándose en una columna designada.

    Por ejemplo, en Milvus, si tiene una colección con una clave de partición definida como el campo color, el sistema asigna automáticamente los datos a las particiones basándose en los valores hash del campo color para cada entidad. Este proceso automatizado libera al usuario de la responsabilidad de especificar manualmente la partición al insertar o buscar datos.

    diff --git a/localization/v2.4.x/site/es/userGuide/search-query-get/single-vector-search.json b/localization/v2.4.x/site/es/userGuide/search-query-get/single-vector-search.json index 67ce6089a..2345e33c6 100644 --- a/localization/v2.4.x/site/es/userGuide/search-query-get/single-vector-search.json +++ b/localization/v2.4.x/site/es/userGuide/search-query-get/single-vector-search.json @@ -1 +1 @@ -{"codeList":["# 1. Set up a Milvus client\nclient = MilvusClient(\n uri=CLUSTER_ENDPOINT,\n token=TOKEN \n)\n\n# 2. Create a collection\nclient.create_collection(\n collection_name=\"quick_setup\",\n dimension=5,\n metric_type=\"IP\"\n)\n\n# 3. Insert randomly generated vectors \ncolors = [\"green\", \"blue\", \"yellow\", \"red\", \"black\", \"white\", \"purple\", \"pink\", \"orange\", \"brown\", \"grey\"]\ndata = []\n\nfor i in range(1000):\n current_color = random.choice(colors)\n data.append({\n \"id\": i,\n \"vector\": [ random.uniform(-1, 1) for _ in range(5) ],\n \"color\": current_color,\n \"color_tag\": f\"{current_color}_{str(random.randint(1000, 9999))}\"\n })\n\nres = client.insert(\n collection_name=\"quick_setup\",\n data=data\n)\n\nprint(res)\n\n# Output\n#\n# {\n# \"insert_count\": 1000,\n# \"ids\": [\n# 0,\n# 1,\n# 2,\n# 3,\n# 4,\n# 5,\n# 6,\n# 7,\n# 8,\n# 9,\n# \"(990 more items hidden)\"\n# ]\n# }\n\n# 6.1 Create partitions \nclient.create_partition(\n collection_name=\"quick_setup\",\n partition_name=\"red\"\n)\n\nclient.create_partition(\n collection_name=\"quick_setup\",\n partition_name=\"blue\"\n)\n\n# 6.1 Insert data into partitions\nred_data = [ {\"id\": i, \"vector\": [ random.uniform(-1, 1) for _ in range(5) ], \"color\": \"red\", \"color_tag\": f\"red_{str(random.randint(1000, 9999))}\" } for i in range(500) ]\nblue_data = [ {\"id\": i, \"vector\": [ random.uniform(-1, 1) for _ in range(5) ], \"color\": \"blue\", \"color_tag\": f\"blue_{str(random.randint(1000, 9999))}\" } for i in range(500) ]\n\nres = client.insert(\n collection_name=\"quick_setup\",\n data=red_data,\n partition_name=\"red\"\n)\n\nprint(res)\n\n# Output\n#\n# {\n# \"insert_count\": 500,\n# \"ids\": [\n# 0,\n# 1,\n# 2,\n# 3,\n# 4,\n# 5,\n# 6,\n# 7,\n# 8,\n# 9,\n# \"(490 more items hidden)\"\n# ]\n# }\n\nres = client.insert(\n collection_name=\"quick_setup\",\n data=blue_data,\n partition_name=\"blue\"\n)\n\nprint(res)\n\n# Output\n#\n# {\n# \"insert_count\": 500,\n# \"ids\": [\n# 0,\n# 1,\n# 2,\n# 3,\n# 4,\n# 5,\n# 6,\n# 7,\n# 8,\n# 9,\n# \"(490 more items hidden)\"\n# ]\n# }\n","import java.util.ArrayList;\nimport java.util.Arrays;\nimport java.util.List;\nimport java.util.Map;\nimport java.util.Random;\n\nimport com.alibaba.fastjson.JSONObject;\n\nimport io.milvus.v2.client.ConnectConfig;\nimport io.milvus.v2.client.MilvusClientV2;\nimport io.milvus.v2.service.collection.request.CreateCollectionReq;\nimport io.milvus.v2.service.collection.request.GetLoadStateReq;\nimport io.milvus.v2.service.vector.request.InsertReq;\nimport io.milvus.v2.service.vector.response.InsertResp; \n\nString CLUSTER_ENDPOINT = \"http://localhost:19530\";\n\n// 1. Connect to Milvus server\nConnectConfig connectConfig = ConnectConfig.builder()\n .uri(CLUSTER_ENDPOINT)\n .build();\n\nMilvusClientV2 client = new MilvusClientV2(connectConfig); \n\n// 2. Create a collection in quick setup mode\nCreateCollectionReq quickSetupReq = CreateCollectionReq.builder()\n .collectionName(\"quick_setup\")\n .dimension(5)\n .metricType(\"IP\")\n .build();\n\nclient.createCollection(quickSetupReq);\n\nGetLoadStateReq loadStateReq = GetLoadStateReq.builder()\n .collectionName(\"quick_setup\")\n .build();\n\nboolean state = client.getLoadState(loadStateReq);\n\nSystem.out.println(state);\n\n// Output:\n// true\n\n// 3. Insert randomly generated vectors into the collection\nList colors = Arrays.asList(\"green\", \"blue\", \"yellow\", \"red\", \"black\", \"white\", \"purple\", \"pink\", \"orange\", \"brown\", \"grey\");\nList data = new ArrayList<>();\n\nfor (int i=0; i<1000; i++) {\n Random rand = new Random();\n String current_color = colors.get(rand.nextInt(colors.size()-1));\n JSONObject row = new JSONObject();\n row.put(\"id\", Long.valueOf(i));\n row.put(\"vector\", Arrays.asList(rand.nextFloat(), rand.nextFloat(), rand.nextFloat(), rand.nextFloat(), rand.nextFloat()));\n row.put(\"color_tag\", current_color + \"_\" + String.valueOf(rand.nextInt(8999) + 1000));\n data.add(row);\n}\n\nInsertReq insertReq = InsertReq.builder()\n .collectionName(\"quick_setup\")\n .data(data)\n .build();\n\nInsertResp insertResp = client.insert(insertReq);\n\nSystem.out.println(JSONObject.toJSON(insertResp));\n\n// Output:\n// {\"insertCnt\": 1000}\n\n// 6.1. Create a partition\nCreatePartitionReq partitionReq = CreatePartitionReq.builder()\n .collectionName(\"quick_setup\")\n .partitionName(\"red\")\n .build();\n\nclient.createPartition(partitionReq);\n\npartitionReq = CreatePartitionReq.builder()\n .collectionName(\"quick_setup\")\n .partitionName(\"blue\")\n .build();\n\nclient.createPartition(partitionReq);\n\n// 6.2 Insert data into the partition\ndata = new ArrayList<>();\n\nfor (int i=1000; i<1500; i++) {\n Random rand = new Random();\n String current_color = \"red\";\n JSONObject row = new JSONObject();\n row.put(\"id\", Long.valueOf(i));\n row.put(\"vector\", Arrays.asList(rand.nextFloat(), rand.nextFloat(), rand.nextFloat(), rand.nextFloat(), rand.nextFloat()));\n row.put(\"color\", current_color);\n row.put(\"color_tag\", current_color + \"_\" + String.valueOf(rand.nextInt(8999) + 1000));\n data.add(row);\n} \n\ninsertReq = InsertReq.builder()\n .collectionName(\"quick_setup\")\n .data(data)\n .partitionName(\"red\")\n .build();\n\ninsertResp = client.insert(insertReq);\n\nSystem.out.println(JSONObject.toJSON(insertResp));\n\n// Output:\n// {\"insertCnt\": 500}\n\ndata = new ArrayList<>();\n\nfor (int i=1500; i<2000; i++) {\n Random rand = new Random();\n String current_color = \"blue\";\n JSONObject row = new JSONObject();\n row.put(\"id\", Long.valueOf(i));\n row.put(\"vector\", Arrays.asList(rand.nextFloat(), rand.nextFloat(), rand.nextFloat(), rand.nextFloat(), rand.nextFloat()));\n row.put(\"color\", current_color);\n row.put(\"color_tag\", current_color + \"_\" + String.valueOf(rand.nextInt(8999) + 1000));\n data.add(row);\n}\n\ninsertReq = InsertReq.builder()\n .collectionName(\"quick_setup\")\n .data(data)\n .partitionName(\"blue\")\n .build();\n\ninsertResp = client.insert(insertReq);\n\nSystem.out.println(JSONObject.toJSON(insertResp));\n\n// Output:\n// {\"insertCnt\": 500}\n","const { MilvusClient, DataType, sleep } = require(\"@zilliz/milvus2-sdk-node\")\n\nconst address = \"http://localhost:19530\"\n\n// 1. Set up a Milvus Client\nclient = new MilvusClient({address});\n\n// 2. Create a collection in quick setup mode\nawait client.createCollection({\n collection_name: \"quick_setup\",\n dimension: 5,\n metric_type: \"IP\"\n}); \n\n// 3. Insert randomly generated vectors\nconst colors = [\"green\", \"blue\", \"yellow\", \"red\", \"black\", \"white\", \"purple\", \"pink\", \"orange\", \"brown\", \"grey\"]\ndata = []\n\nfor (let i = 0; i < 1000; i++) {\n current_color = colors[Math.floor(Math.random() * colors.length)]\n data.push({\n id: i,\n vector: [Math.random(), Math.random(), Math.random(), Math.random(), Math.random()],\n color: current_color,\n color_tag: `${current_color}_${Math.floor(Math.random() * 8999) + 1000}`\n })\n}\n\nvar res = await client.insert({\n collection_name: \"quick_setup\",\n data: data\n})\n\nconsole.log(res.insert_cnt)\n\n// Output\n// \n// 1000\n// \n\nawait client.createPartition({\n collection_name: \"quick_setup\",\n partition_name: \"red\"\n})\n\nawait client.createPartition({\n collection_name: \"quick_setup\",\n partition_name: \"blue\"\n})\n\n// 6.1 Insert data into partitions\nvar red_data = []\nvar blue_data = []\n\nfor (let i = 1000; i < 1500; i++) {\n red_data.push({\n id: i,\n vector: [Math.random(), Math.random(), Math.random(), Math.random(), Math.random()],\n color: \"red\",\n color_tag: `red_${Math.floor(Math.random() * 8999) + 1000}`\n })\n}\n\nfor (let i = 1500; i < 2000; i++) {\n blue_data.push({\n id: i,\n vector: [Math.random(), Math.random(), Math.random(), Math.random(), Math.random()],\n color: \"blue\",\n color_tag: `blue_${Math.floor(Math.random() * 8999) + 1000}`\n })\n}\n\nres = await client.insert({\n collection_name: \"quick_setup\",\n data: red_data,\n partition_name: \"red\"\n})\n\nconsole.log(res.insert_cnt)\n\n// Output\n// \n// 500\n// \n\nres = await client.insert({\n collection_name: \"quick_setup\",\n data: blue_data,\n partition_name: \"blue\"\n})\n\nconsole.log(res.insert_cnt)\n\n// Output\n// \n// 500\n// \n","# Single vector search\nres = client.search(\n collection_name=\"test_collection\", # Replace with the actual name of your collection\n # Replace with your query vector\n data=[[0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592]],\n limit=5, # Max. number of search results to return\n search_params={\"metric_type\": \"IP\", \"params\": {}} # Search parameters\n)\n\n# Convert the output to a formatted JSON string\nresult = json.dumps(res, indent=4)\nprint(result)\n","// 4. Single vector search\nList> query_vectors = Arrays.asList(Arrays.asList(0.3580376395471989f, -0.6023495712049978f, 0.18414012509913835f, -0.26286205330961354f, 0.9029438446296592f));\n\nSearchReq searchReq = SearchReq.builder()\n .collectionName(\"quick_setup\")\n .data(query_vectors)\n .topK(3) // The number of results to return\n .build();\n\nSearchResp searchResp = client.search(searchReq);\n\nSystem.out.println(JSONObject.toJSON(searchResp));\n","// 4. Single vector search\nvar query_vector = [0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592],\n\nres = await client.search({\n collection_name: \"quick_setup\",\n data: [query_vector],\n limit: 3, // The number of results to return\n})\n\nconsole.log(res.results)\n","[\n [\n {\n \"id\": 0,\n \"distance\": 1.4093276262283325,\n \"entity\": {}\n },\n {\n \"id\": 4,\n \"distance\": 0.9902134537696838,\n \"entity\": {}\n },\n {\n \"id\": 1,\n \"distance\": 0.8519943356513977,\n \"entity\": {}\n },\n {\n \"id\": 5,\n \"distance\": 0.7972343564033508,\n \"entity\": {}\n },\n {\n \"id\": 2,\n \"distance\": 0.5928734540939331,\n \"entity\": {}\n }\n ]\n]\n","{\"searchResults\": [[\n {\n \"score\": 1.263043,\n \"fields\": {\n \"vector\": [\n 0.9533119,\n 0.02538395,\n 0.76714665,\n 0.35481733,\n 0.9845762\n ],\n \"id\": 740\n }\n },\n {\n \"score\": 1.2377806,\n \"fields\": {\n \"vector\": [\n 0.7411156,\n 0.08687937,\n 0.8254139,\n 0.08370924,\n 0.99095553\n ],\n \"id\": 640\n }\n },\n {\n \"score\": 1.1869997,\n \"fields\": {\n \"vector\": [\n 0.87928146,\n 0.05324632,\n 0.6312755,\n 0.28005534,\n 0.9542448\n ],\n \"id\": 455\n }\n }\n]]}\n","[\n { score: 1.7463608980178833, id: '854' },\n { score: 1.744946002960205, id: '425' },\n { score: 1.7258622646331787, id: '718' }\n]\n","# Bulk-vector search\nres = client.search(\n collection_name=\"test_collection\", # Replace with the actual name of your collection\n data=[\n [0.19886812562848388, 0.06023560599112088, 0.6976963061752597, 0.2614474506242501, 0.838729485096104],\n [0.3172005263489739, 0.9719044792798428, -0.36981146090600725, -0.4860894583077995, 0.95791889146345]\n ], # Replace with your query vectors\n limit=2, # Max. number of search results to return\n search_params={\"metric_type\": \"IP\", \"params\": {}} # Search parameters\n)\n\nresult = json.dumps(res, indent=4)\nprint(result)\n","// 5. Batch vector search\nquery_vectors = Arrays.asList(\n Arrays.asList(0.3580376395471989f, -0.6023495712049978f, 0.18414012509913835f, -0.26286205330961354f, 0.9029438446296592f),\n Arrays.asList(0.19886812562848388f, 0.06023560599112088f, 0.6976963061752597f, 0.2614474506242501f, 0.838729485096104f)\n);\n\nsearchReq = SearchReq.builder()\n .collectionName(\"quick_setup\")\n .data(query_vectors)\n .topK(2)\n .build();\n\nsearchResp = client.search(searchReq);\n\nSystem.out.println(JSONObject.toJSON(searchResp));\n","// 5. Batch vector search\nvar query_vectors = [\n [0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592],\n [0.19886812562848388, 0.06023560599112088, 0.6976963061752597, 0.2614474506242501, 0.838729485096104]\n]\n\nres = await client.search({\n collection_name: \"quick_setup\",\n data: query_vectors,\n limit: 2,\n})\n\nconsole.log(res.results)\n","[\n [\n {\n \"id\": 1,\n \"distance\": 1.3017789125442505,\n \"entity\": {}\n },\n {\n \"id\": 7,\n \"distance\": 1.2419954538345337,\n \"entity\": {}\n }\n ], # Result set 1\n [\n {\n \"id\": 3,\n \"distance\": 2.3358664512634277,\n \"entity\": {}\n },\n {\n \"id\": 8,\n \"distance\": 0.5642921924591064,\n \"entity\": {}\n }\n ] # Result set 2\n]\n","// Two sets of vectors are returned as expected\n\n{\"searchResults\": [\n [\n {\n \"score\": 1.263043,\n \"fields\": {\n \"vector\": [\n 0.9533119,\n 0.02538395,\n 0.76714665,\n 0.35481733,\n 0.9845762\n ],\n \"id\": 740\n }\n },\n {\n \"score\": 1.2377806,\n \"fields\": {\n \"vector\": [\n 0.7411156,\n 0.08687937,\n 0.8254139,\n 0.08370924,\n 0.99095553\n ],\n \"id\": 640\n }\n }\n ],\n [\n {\n \"score\": 1.8654699,\n \"fields\": {\n \"vector\": [\n 0.4671427,\n 0.8378432,\n 0.98844475,\n 0.82763994,\n 0.9729997\n ],\n \"id\": 638\n }\n },\n {\n \"score\": 1.8581753,\n \"fields\": {\n \"vector\": [\n 0.735541,\n 0.60140246,\n 0.86730254,\n 0.93152493,\n 0.98603314\n ],\n \"id\": 855\n }\n }\n ]\n]}\n","[\n [\n { score: 2.3590476512908936, id: '854' },\n { score: 2.2896690368652344, id: '59' }\n [\n { score: 2.664059638977051, id: '59' },\n { score: 2.59483003616333, id: '854' }\n ]\n]\n","# 6.2 Search within a partition\nquery_vector = [0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592]\n\nres = client.search(\n collection_name=\"quick_setup\",\n data=[query_vector],\n limit=5,\n search_params={\"metric_type\": \"IP\", \"params\": {\"level\": 1}},\n partition_names=[\"red\"]\n)\n\nprint(res)\n","// 6.3 Search within partitions\nquery_vectors = Arrays.asList(Arrays.asList(0.3580376395471989f, -0.6023495712049978f, 0.18414012509913835f, -0.26286205330961354f, 0.9029438446296592f));\n\nsearchReq = SearchReq.builder()\n .collectionName(\"quick_setup\")\n .data(query_vectors)\n .partitionNames(Arrays.asList(\"red\"))\n .topK(5)\n .build();\n\nsearchResp = client.search(searchReq);\n\nSystem.out.println(JSONObject.toJSON(searchResp));\n","// 6.2 Search within partitions\nquery_vector = [0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592]\n\nres = await client.search({\n collection_name: \"quick_setup\",\n data: [query_vector],\n partition_names: [\"red\"],\n limit: 5,\n})\n\nconsole.log(res.results)\n","[\n [\n {\n \"id\": 16,\n \"distance\": 0.9200337529182434,\n \"entity\": {}\n },\n {\n \"id\": 14,\n \"distance\": 0.4505271911621094,\n \"entity\": {}\n },\n {\n \"id\": 15,\n \"distance\": 0.19924677908420563,\n \"entity\": {}\n },\n {\n \"id\": 17,\n \"distance\": 0.0075093843042850494,\n \"entity\": {}\n },\n {\n \"id\": 13,\n \"distance\": -0.14609718322753906,\n \"entity\": {}\n }\n ]\n]\n","{\"searchResults\": [\n [\n {\n \"score\": 1.1677284,\n \"fields\": {\n \"vector\": [\n 0.9986977,\n 0.17964739,\n 0.49086612,\n 0.23155272,\n 0.98438674\n ],\n \"id\": 1435\n }\n },\n {\n \"score\": 1.1476475,\n \"fields\": {\n \"vector\": [\n 0.6952647,\n 0.13417172,\n 0.91045254,\n 0.119336545,\n 0.9338931\n ],\n \"id\": 1291\n }\n },\n {\n \"score\": 1.0969629,\n \"fields\": {\n \"vector\": [\n 0.3363194,\n 0.028906643,\n 0.6675426,\n 0.030419827,\n 0.9735209\n ],\n \"id\": 1168\n }\n },\n {\n \"score\": 1.0741848,\n \"fields\": {\n \"vector\": [\n 0.9980543,\n 0.36063594,\n 0.66427994,\n 0.17359233,\n 0.94954175\n ],\n \"id\": 1164\n }\n },\n {\n \"score\": 1.0584627,\n \"fields\": {\n \"vector\": [\n 0.7187005,\n 0.12674773,\n 0.987718,\n 0.3110777,\n 0.86093885\n ],\n \"id\": 1085\n }\n }\n ],\n [\n {\n \"score\": 1.8030131,\n \"fields\": {\n \"vector\": [\n 0.59726167,\n 0.7054632,\n 0.9573117,\n 0.94529945,\n 0.8664103\n ],\n \"id\": 1203\n }\n },\n {\n \"score\": 1.7728865,\n \"fields\": {\n \"vector\": [\n 0.6672442,\n 0.60448086,\n 0.9325822,\n 0.80272985,\n 0.8861626\n ],\n \"id\": 1448\n }\n },\n {\n \"score\": 1.7536311,\n \"fields\": {\n \"vector\": [\n 0.59663296,\n 0.77831805,\n 0.8578314,\n 0.88818026,\n 0.9030075\n ],\n \"id\": 1010\n }\n },\n {\n \"score\": 1.7520742,\n \"fields\": {\n \"vector\": [\n 0.854198,\n 0.72294194,\n 0.9245805,\n 0.86126596,\n 0.7969224\n ],\n \"id\": 1219\n }\n },\n {\n \"score\": 1.7452049,\n \"fields\": {\n \"vector\": [\n 0.96419,\n 0.943535,\n 0.87611496,\n 0.8268136,\n 0.79786557\n ],\n \"id\": 1149\n }\n }\n ]\n]}\n","[\n { score: 3.0258803367614746, id: '1201' },\n { score: 3.004319190979004, id: '1458' },\n { score: 2.880324363708496, id: '1187' },\n { score: 2.8246407508850098, id: '1347' },\n { score: 2.797295093536377, id: '1406' }\n]\n","res = client.search(\n collection_name=\"quick_setup\",\n data=[query_vector],\n limit=5,\n search_params={\"metric_type\": \"IP\", \"params\": {\"level\": 1}},\n partition_names=[\"blue\"]\n)\n\nprint(res)\n","searchReq = SearchReq.builder()\n .collectionName(\"quick_setup\")\n .data(query_vectors)\n .partitionNames(Arrays.asList(\"blue\"))\n .topK(5)\n .build();\n\nsearchResp = client.search(searchReq);\n\nSystem.out.println(JSONObject.toJSON(searchResp));\n","res = await client.search({\n collection_name: \"quick_setup\",\n data: [query_vector],\n partition_names: [\"blue\"],\n limit: 5,\n})\n\nconsole.log(res.results)\n","[\n [\n {\n \"id\": 20,\n \"distance\": 2.363696813583374,\n \"entity\": {}\n },\n {\n \"id\": 26,\n \"distance\": 1.0665391683578491,\n \"entity\": {}\n },\n {\n \"id\": 23,\n \"distance\": 1.066049575805664,\n \"entity\": {}\n },\n {\n \"id\": 29,\n \"distance\": 0.8353596925735474,\n \"entity\": {}\n },\n {\n \"id\": 28,\n \"distance\": 0.7484277486801147,\n \"entity\": {}\n }\n ]\n]\n","{\"searchResults\": [\n [\n {\n \"score\": 1.1628494,\n \"fields\": {\n \"vector\": [\n 0.7442872,\n 0.046407282,\n 0.71031404,\n 0.3544345,\n 0.9819991\n ],\n \"id\": 1992\n }\n },\n {\n \"score\": 1.1470042,\n \"fields\": {\n \"vector\": [\n 0.5505825,\n 0.04367262,\n 0.9985836,\n 0.18922359,\n 0.93255126\n ],\n \"id\": 1977\n }\n },\n {\n \"score\": 1.1450152,\n \"fields\": {\n \"vector\": [\n 0.89994013,\n 0.052991092,\n 0.8645576,\n 0.6406729,\n 0.95679337\n ],\n \"id\": 1573\n }\n },\n {\n \"score\": 1.1439825,\n \"fields\": {\n \"vector\": [\n 0.9253267,\n 0.15890503,\n 0.7999555,\n 0.19126713,\n 0.898583\n ],\n \"id\": 1552\n }\n },\n {\n \"score\": 1.1029172,\n \"fields\": {\n \"vector\": [\n 0.95661926,\n 0.18777144,\n 0.38115507,\n 0.14323527,\n 0.93137646\n ],\n \"id\": 1823\n }\n }\n ],\n [\n {\n \"score\": 1.8005109,\n \"fields\": {\n \"vector\": [\n 0.5953582,\n 0.7794224,\n 0.9388869,\n 0.79825854,\n 0.9197286\n ],\n \"id\": 1888\n }\n },\n {\n \"score\": 1.7714822,\n \"fields\": {\n \"vector\": [\n 0.56805456,\n 0.89422905,\n 0.88187534,\n 0.914824,\n 0.8944365\n ],\n \"id\": 1648\n }\n },\n {\n \"score\": 1.7561421,\n \"fields\": {\n \"vector\": [\n 0.83421993,\n 0.39865613,\n 0.92319834,\n 0.42695504,\n 0.96633124\n ],\n \"id\": 1688\n }\n },\n {\n \"score\": 1.7553532,\n \"fields\": {\n \"vector\": [\n 0.89994013,\n 0.052991092,\n 0.8645576,\n 0.6406729,\n 0.95679337\n ],\n \"id\": 1573\n }\n },\n {\n \"score\": 1.7543385,\n \"fields\": {\n \"vector\": [\n 0.16542226,\n 0.38248396,\n 0.9888778,\n 0.80913955,\n 0.9501492\n ],\n \"id\": 1544\n }\n }\n ]\n]}\n","[\n { score: 2.8421106338500977, id: '1745' },\n { score: 2.838560104370117, id: '1782' },\n { score: 2.8134000301361084, id: '1511' },\n { score: 2.718268871307373, id: '1679' },\n { score: 2.7014894485473633, id: '1597' }\n]\n","# Search with output fields\nres = client.search(\n collection_name=\"test_collection\", # Replace with the actual name of your collection\n data=[[0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592]],\n limit=5, # Max. number of search results to return\n search_params={\"metric_type\": \"IP\", \"params\": {}}, # Search parameters\n output_fields=[\"color\"] # Output fields to return\n)\n\nresult = json.dumps(res, indent=4)\nprint(result)\n","// 7. Search with output fields\nquery_vectors = Arrays.asList(Arrays.asList(0.3580376395471989f, -0.6023495712049978f, 0.18414012509913835f, -0.26286205330961354f, 0.9029438446296592f));\n\nsearchReq = SearchReq.builder()\n .collectionName(\"quick_setup\")\n .data(query_vectors)\n .outputFields(Arrays.asList(\"color\"))\n .topK(5)\n .build();\n\nsearchResp = client.search(searchReq);\n\nSystem.out.println(JSONObject.toJSON(searchResp));\n","// 7. Search with output fields\nquery_vector = [0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592]\n\nres = await client.search({\n collection_name: \"quick_setup\",\n data: [query_vector],\n limit: 5,\n output_fields: [\"color\"],\n})\n\nconsole.log(res.results)\n","[\n [\n {\n \"id\": 0,\n \"distance\": 1.4093276262283325,\n \"entity\": {\n \"color\": \"pink_8682\"\n }\n },\n {\n \"id\": 16,\n \"distance\": 1.0159327983856201,\n \"entity\": {\n \"color\": \"yellow_1496\"\n }\n },\n {\n \"id\": 4,\n \"distance\": 0.9902134537696838,\n \"entity\": {\n \"color\": \"red_4794\"\n }\n },\n {\n \"id\": 14,\n \"distance\": 0.9803846478462219,\n \"entity\": {\n \"color\": \"green_2899\"\n }\n },\n {\n \"id\": 1,\n \"distance\": 0.8519943356513977,\n \"entity\": {\n \"color\": \"red_7025\"\n }\n }\n ]\n]\n","{\"searchResults\": [\n [\n {\n \"score\": 1.263043,\n \"fields\": {}\n },\n {\n \"score\": 1.2377806,\n \"fields\": {}\n },\n {\n \"score\": 1.1869997,\n \"fields\": {}\n },\n {\n \"score\": 1.1748955,\n \"fields\": {}\n },\n {\n \"score\": 1.1720343,\n \"fields\": {}\n }\n ]\n]}\n","\n[\n { score: 3.036271572113037, id: '59', color: 'orange' },\n { score: 3.0267879962921143, id: '1745', color: 'blue' },\n { score: 3.0069446563720703, id: '854', color: 'black' },\n { score: 2.984386682510376, id: '718', color: 'black' },\n { score: 2.916019916534424, id: '425', color: 'purple' }\n]\n","# Search with filter\nres = client.search(\n collection_name=\"test_collection\", # Replace with the actual name of your collection\n data=[[0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592]],\n limit=5, # Max. number of search results to return\n search_params={\"metric_type\": \"IP\", \"params\": {}}, # Search parameters\n output_fields=[\"color\"], # Output fields to return\n filter='color like \"red%\"'\n)\n\nresult = json.dumps(res, indent=4)\nprint(result)\n","// 8. Filtered search\nquery_vectors = Arrays.asList(Arrays.asList(0.3580376395471989f, -0.6023495712049978f, 0.18414012509913835f, -0.26286205330961354f, 0.9029438446296592f));\n\nsearchReq = SearchReq.builder()\n .collectionName(\"quick_setup\")\n .data(query_vectors)\n .outputFields(Arrays.asList(\"color_tag\"))\n .filter(\"color_tag like \\\"red%\\\"\")\n .topK(5)\n .build();\n\nsearchResp = client.search(searchReq);\n\nSystem.out.println(JSONObject.toJSON(searchResp));\n","// 8. Filtered search\n// 8.1 Filter with \"like\" operator and prefix wildcard\nquery_vector = [0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592]\n\nres = await client.search({\n collection_name: \"quick_setup\",\n data: [query_vector],\n limit: 5,\n filters: \"color_tag like \\\"red%\\\"\",\n output_fields: [\"color_tag\"]\n})\n\nconsole.log(res.results)\n","[\n [\n {\n \"id\": 4,\n \"distance\": 0.9902134537696838,\n \"entity\": {\n \"color\": \"red_4794\"\n }\n },\n {\n \"id\": 1,\n \"distance\": 0.8519943356513977,\n \"entity\": {\n \"color\": \"red_7025\"\n }\n },\n {\n \"id\": 6,\n \"distance\": -0.4113418459892273,\n \"entity\": {\n \"color\": \"red_9392\"\n }\n }\n ]\n]\n","{\"searchResults\": [\n [\n {\n \"score\": 1.1869997,\n \"fields\": {\"color_tag\": \"red_3026\"}\n },\n {\n \"score\": 1.1677284,\n \"fields\": {\"color_tag\": \"red_9030\"}\n },\n {\n \"score\": 1.1476475,\n \"fields\": {\"color_tag\": \"red_3744\"}\n },\n {\n \"score\": 1.0969629,\n \"fields\": {\"color_tag\": \"red_4168\"}\n },\n {\n \"score\": 1.0741848,\n \"fields\": {\"color_tag\": \"red_9678\"}\n }\n ]\n]}\n","[\n { score: 2.5080761909484863, id: '1201', color_tag: 'red_8904' },\n { score: 2.491129159927368, id: '425', color_tag: 'purple_8212' },\n { score: 2.4889798164367676, id: '1458', color_tag: 'red_6891' },\n { score: 2.42964243888855, id: '724', color_tag: 'black_9885' },\n { score: 2.4004223346710205, id: '854', color_tag: 'black_5990' }\n]\n","# Infix match on color field\nres = client.search(\n collection_name=\"test_collection\", # Replace with the actual name of your collection\n data=[[0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592]],\n limit=5, # Max. number of search results to return\n search_params={\"metric_type\": \"IP\", \"params\": {}}, # Search parameters\n output_fields=[\"color\"], # Output fields to return\n filter='color like \"%ll%\"' # Filter on color field, infix match on \"ll\"\n)\n\nresult = json.dumps(res, indent=4)\nprint(result)\n","// 8. Filtered search\nquery_vectors = Arrays.asList(Arrays.asList(0.3580376395471989f, -0.6023495712049978f, 0.18414012509913835f, -0.26286205330961354f, 0.9029438446296592f));\n\nsearchReq = SearchReq.builder()\n .collectionName(\"quick_setup\")\n .data(query_vectors)\n .outputFields(Arrays.asList(\"color_tag\"))\n .filter(\"color like \\\"%ll%\\\"\")\n .topK(5)\n .build();\n\nsearchResp = client.search(searchReq);\n\nSystem.out.println(JSONObject.toJSON(searchResp));\n","// 8. Filtered search\n// 8.1 Filter with \"like\" operator and prefix wildcard\nquery_vector = [0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592]\n\nres = await client.search({\n collection_name: \"quick_setup\",\n data: [query_vector],\n limit: 5,\n filters: \"color_tag like \\\"%ll%\\\"\",\n output_fields: [\"color_tag\"]\n})\n\nconsole.log(res.results)\n","[\n [\n {\n \"id\": 5,\n \"distance\": 0.7972343564033508,\n \"entity\": {\n \"color\": \"yellow_4222\"\n }\n }\n ]\n]\n","{\"searchResults\": [\n [\n {\n \"score\": 1.1869997,\n \"fields\": {\"color_tag\": \"yellow_4222\"}\n }\n ]\n]}\n","[\n { score: 2.5080761909484863, id: '1201', color_tag: 'yellow_4222' }\n]\n","# Conduct a range search\nsearch_params = {\n \"metric_type\": \"IP\",\n \"params\": {\n \"radius\": 0.8, # Radius of the search circle\n \"range_filter\": 1.0 # Range filter to filter out vectors that are not within the search circle\n }\n}\n\nres = client.search(\n collection_name=\"test_collection\", # Replace with the actual name of your collection\n data=[[0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592]],\n limit=3, # Max. number of search results to return\n search_params=search_params, # Search parameters\n output_fields=[\"color\"], # Output fields to return\n)\n\nresult = json.dumps(res, indent=4)\nprint(result)\n","// 9. Range search\nquery_vectors = Arrays.asList(Arrays.asList(0.3580376395471989f, -0.6023495712049978f, 0.18414012509913835f, -0.26286205330961354f, 0.9029438446296592f));\n\nsearchReq = SearchReq.builder()\n .collectionName(\"quick_setup\")\n .data(query_vectors)\n .outputFields(Arrays.asList(\"color_tag\"))\n .searchParams(Map.of(\"radius\", 0.1, \"range\", 1.0))\n .topK(5)\n .build();\n\nsearchResp = client.search(searchReq);\n\nSystem.out.println(JSONObject.toJSON(searchResp));\n","// 9. Range search\nquery_vector = [0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592]\n\nres = await client.search({\n collection_name: \"quick_setup\",\n data: [query_vector],\n limit: 5,\n params: {\n radius: 0.1,\n range: 1.0\n },\n output_fields: [\"color_tag\"]\n})\n\nconsole.log(res.results)\n","[\n [\n {\n \"id\": 4,\n \"distance\": 0.9902134537696838,\n \"entity\": {\n \"color\": \"red_4794\"\n }\n },\n {\n \"id\": 14,\n \"distance\": 0.9803846478462219,\n \"entity\": {\n \"color\": \"green_2899\"\n }\n },\n {\n \"id\": 1,\n \"distance\": 0.8519943356513977,\n \"entity\": {\n \"color\": \"red_7025\"\n }\n }\n ]\n]\n","{\"searchResults\": [\n [\n {\n \"score\": 1.263043,\n \"fields\": {\"color_tag\": \"green_2052\"}\n },\n {\n \"score\": 1.2377806,\n \"fields\": {\"color_tag\": \"purple_3709\"}\n },\n {\n \"score\": 1.1869997,\n \"fields\": {\"color_tag\": \"red_3026\"}\n },\n {\n \"score\": 1.1748955,\n \"fields\": {\"color_tag\": \"black_1646\"}\n },\n {\n \"score\": 1.1720343,\n \"fields\": {\"color_tag\": \"green_4853\"}\n }\n ]\n]}\n","[\n { score: 2.3387961387634277, id: '718', color_tag: 'black_7154' },\n { score: 2.3352415561676025, id: '1745', color_tag: 'blue_8741' },\n { score: 2.290485382080078, id: '1408', color_tag: 'red_2324' },\n { score: 2.285870313644409, id: '854', color_tag: 'black_5990' },\n { score: 2.2593345642089844, id: '1309', color_tag: 'red_8458' }\n]\n","# Connect to Milvus\nclient = MilvusClient(uri='http://localhost:19530') # Milvus server address\n\n# Load data into collection\nclient.load_collection(\"group_search\") # Collection name\n\n# Group search results\nres = client.search(\n collection_name=\"group_search\", # Collection name\n data=[[0.14529211512077012, 0.9147257273453546, 0.7965055218724449, 0.7009258593102812, 0.5605206522382088]], # Query vector\n search_params={\n \"metric_type\": \"L2\",\n \"params\": {\"nprobe\": 10},\n }, # Search parameters\n limit=10, # Max. number of search results to return\n group_by_field=\"doc_id\", # Group results by document ID\n output_fields=[\"doc_id\", \"passage_id\"]\n)\n\n# Retrieve the values in the `doc_id` column\ndoc_ids = [result['entity']['doc_id'] for result in res[0]]\n\nprint(doc_ids)\n","[5, 10, 1, 7, 9, 6, 3, 4, 8, 2]\n","# Connect to Milvus\nclient = MilvusClient(uri='http://localhost:19530') # Milvus server address\n\n# Load data into collection\nclient.load_collection(\"group_search\") # Collection name\n\n# Search without `group_by_field`\nres = client.search(\n collection_name=\"group_search\", # Collection name\n data=query_passage_vector, # Replace with your query vector\n search_params={\n \"metric_type\": \"L2\",\n \"params\": {\"nprobe\": 10},\n }, # Search parameters\n limit=10, # Max. number of search results to return\n # group_by_field=\"doc_id\", # Group results by document ID\n output_fields=[\"doc_id\", \"passage_id\"]\n)\n\n# Retrieve the values in the `doc_id` column\ndoc_ids = [result['entity']['doc_id'] for result in res[0]]\n\nprint(doc_ids)\n","[1, 10, 3, 10, 1, 9, 4, 4, 8, 6]\n","# In normal cases, you do not need to set search parameters manually\n# Except for range searches.\nsearch_parameters = {\n 'metric_type': 'L2',\n 'params': {\n 'nprobe': 10,\n 'level': 1,\n 'radius': 1.0\n 'range_filter': 0.8\n }\n}\n"],"headingContent":"","anchorList":[{"label":"Búsqueda de un solo vector","href":"Single-Vector-Search","type":1,"isActive":false},{"label":"Visión general","href":"Overview","type":2,"isActive":false},{"label":"Preparativos","href":"Preparations","type":2,"isActive":false},{"label":"Búsqueda básica","href":"Basic-search","type":2,"isActive":false},{"label":"Búsqueda filtrada","href":"Filtered-search","type":2,"isActive":false},{"label":"Búsqueda por rango","href":"Range-search","type":2,"isActive":false},{"label":"Búsqueda por agrupación","href":"Grouping-search","type":2,"isActive":false},{"label":"Parámetros de búsqueda","href":"Search-parameters","type":2,"isActive":false}]} \ No newline at end of file +{"codeList":["# 1. Set up a Milvus client\nclient = MilvusClient(\n uri=CLUSTER_ENDPOINT,\n token=TOKEN \n)\n\n# 2. Create a collection\nclient.create_collection(\n collection_name=\"quick_setup\",\n dimension=5,\n metric_type=\"IP\"\n)\n\n# 3. Insert randomly generated vectors \ncolors = [\"green\", \"blue\", \"yellow\", \"red\", \"black\", \"white\", \"purple\", \"pink\", \"orange\", \"brown\", \"grey\"]\ndata = []\n\nfor i in range(1000):\n current_color = random.choice(colors)\n data.append({\n \"id\": i,\n \"vector\": [ random.uniform(-1, 1) for _ in range(5) ],\n \"color\": current_color,\n \"color_tag\": f\"{current_color}_{str(random.randint(1000, 9999))}\"\n })\n\nres = client.insert(\n collection_name=\"quick_setup\",\n data=data\n)\n\nprint(res)\n\n# Output\n#\n# {\n# \"insert_count\": 1000,\n# \"ids\": [\n# 0,\n# 1,\n# 2,\n# 3,\n# 4,\n# 5,\n# 6,\n# 7,\n# 8,\n# 9,\n# \"(990 more items hidden)\"\n# ]\n# }\n\n# 6.1 Create partitions \nclient.create_partition(\n collection_name=\"quick_setup\",\n partition_name=\"red\"\n)\n\nclient.create_partition(\n collection_name=\"quick_setup\",\n partition_name=\"blue\"\n)\n\n# 6.1 Insert data into partitions\nred_data = [ {\"id\": i, \"vector\": [ random.uniform(-1, 1) for _ in range(5) ], \"color\": \"red\", \"color_tag\": f\"red_{str(random.randint(1000, 9999))}\" } for i in range(500) ]\nblue_data = [ {\"id\": i, \"vector\": [ random.uniform(-1, 1) for _ in range(5) ], \"color\": \"blue\", \"color_tag\": f\"blue_{str(random.randint(1000, 9999))}\" } for i in range(500) ]\n\nres = client.insert(\n collection_name=\"quick_setup\",\n data=red_data,\n partition_name=\"red\"\n)\n\nprint(res)\n\n# Output\n#\n# {\n# \"insert_count\": 500,\n# \"ids\": [\n# 0,\n# 1,\n# 2,\n# 3,\n# 4,\n# 5,\n# 6,\n# 7,\n# 8,\n# 9,\n# \"(490 more items hidden)\"\n# ]\n# }\n\nres = client.insert(\n collection_name=\"quick_setup\",\n data=blue_data,\n partition_name=\"blue\"\n)\n\nprint(res)\n\n# Output\n#\n# {\n# \"insert_count\": 500,\n# \"ids\": [\n# 0,\n# 1,\n# 2,\n# 3,\n# 4,\n# 5,\n# 6,\n# 7,\n# 8,\n# 9,\n# \"(490 more items hidden)\"\n# ]\n# }\n","import java.util.ArrayList;\nimport java.util.Arrays;\nimport java.util.List;\nimport java.util.Map;\nimport java.util.Random;\n\nimport com.alibaba.fastjson.JSONObject;\n\nimport io.milvus.v2.client.ConnectConfig;\nimport io.milvus.v2.client.MilvusClientV2;\nimport io.milvus.v2.service.collection.request.CreateCollectionReq;\nimport io.milvus.v2.service.collection.request.GetLoadStateReq;\nimport io.milvus.v2.service.vector.request.InsertReq;\nimport io.milvus.v2.service.vector.response.InsertResp; \n\nString CLUSTER_ENDPOINT = \"http://localhost:19530\";\n\n// 1. Connect to Milvus server\nConnectConfig connectConfig = ConnectConfig.builder()\n .uri(CLUSTER_ENDPOINT)\n .build();\n\nMilvusClientV2 client = new MilvusClientV2(connectConfig); \n\n// 2. Create a collection in quick setup mode\nCreateCollectionReq quickSetupReq = CreateCollectionReq.builder()\n .collectionName(\"quick_setup\")\n .dimension(5)\n .metricType(\"IP\")\n .build();\n\nclient.createCollection(quickSetupReq);\n\nGetLoadStateReq loadStateReq = GetLoadStateReq.builder()\n .collectionName(\"quick_setup\")\n .build();\n\nboolean state = client.getLoadState(loadStateReq);\n\nSystem.out.println(state);\n\n// Output:\n// true\n\n// 3. Insert randomly generated vectors into the collection\nList colors = Arrays.asList(\"green\", \"blue\", \"yellow\", \"red\", \"black\", \"white\", \"purple\", \"pink\", \"orange\", \"brown\", \"grey\");\nList data = new ArrayList<>();\n\nfor (int i=0; i<1000; i++) {\n Random rand = new Random();\n String current_color = colors.get(rand.nextInt(colors.size()-1));\n JSONObject row = new JSONObject();\n row.put(\"id\", Long.valueOf(i));\n row.put(\"vector\", Arrays.asList(rand.nextFloat(), rand.nextFloat(), rand.nextFloat(), rand.nextFloat(), rand.nextFloat()));\n row.put(\"color_tag\", current_color + \"_\" + String.valueOf(rand.nextInt(8999) + 1000));\n data.add(row);\n}\n\nInsertReq insertReq = InsertReq.builder()\n .collectionName(\"quick_setup\")\n .data(data)\n .build();\n\nInsertResp insertResp = client.insert(insertReq);\n\nSystem.out.println(JSONObject.toJSON(insertResp));\n\n// Output:\n// {\"insertCnt\": 1000}\n\n// 6.1. Create a partition\nCreatePartitionReq partitionReq = CreatePartitionReq.builder()\n .collectionName(\"quick_setup\")\n .partitionName(\"red\")\n .build();\n\nclient.createPartition(partitionReq);\n\npartitionReq = CreatePartitionReq.builder()\n .collectionName(\"quick_setup\")\n .partitionName(\"blue\")\n .build();\n\nclient.createPartition(partitionReq);\n\n// 6.2 Insert data into the partition\ndata = new ArrayList<>();\n\nfor (int i=1000; i<1500; i++) {\n Random rand = new Random();\n String current_color = \"red\";\n JSONObject row = new JSONObject();\n row.put(\"id\", Long.valueOf(i));\n row.put(\"vector\", Arrays.asList(rand.nextFloat(), rand.nextFloat(), rand.nextFloat(), rand.nextFloat(), rand.nextFloat()));\n row.put(\"color\", current_color);\n row.put(\"color_tag\", current_color + \"_\" + String.valueOf(rand.nextInt(8999) + 1000));\n data.add(row);\n} \n\ninsertReq = InsertReq.builder()\n .collectionName(\"quick_setup\")\n .data(data)\n .partitionName(\"red\")\n .build();\n\ninsertResp = client.insert(insertReq);\n\nSystem.out.println(JSONObject.toJSON(insertResp));\n\n// Output:\n// {\"insertCnt\": 500}\n\ndata = new ArrayList<>();\n\nfor (int i=1500; i<2000; i++) {\n Random rand = new Random();\n String current_color = \"blue\";\n JSONObject row = new JSONObject();\n row.put(\"id\", Long.valueOf(i));\n row.put(\"vector\", Arrays.asList(rand.nextFloat(), rand.nextFloat(), rand.nextFloat(), rand.nextFloat(), rand.nextFloat()));\n row.put(\"color\", current_color);\n row.put(\"color_tag\", current_color + \"_\" + String.valueOf(rand.nextInt(8999) + 1000));\n data.add(row);\n}\n\ninsertReq = InsertReq.builder()\n .collectionName(\"quick_setup\")\n .data(data)\n .partitionName(\"blue\")\n .build();\n\ninsertResp = client.insert(insertReq);\n\nSystem.out.println(JSONObject.toJSON(insertResp));\n\n// Output:\n// {\"insertCnt\": 500}\n","const { MilvusClient, DataType, sleep } = require(\"@zilliz/milvus2-sdk-node\")\n\nconst address = \"http://localhost:19530\"\n\n// 1. Set up a Milvus Client\nclient = new MilvusClient({address});\n\n// 2. Create a collection in quick setup mode\nawait client.createCollection({\n collection_name: \"quick_setup\",\n dimension: 5,\n metric_type: \"IP\"\n}); \n\n// 3. Insert randomly generated vectors\nconst colors = [\"green\", \"blue\", \"yellow\", \"red\", \"black\", \"white\", \"purple\", \"pink\", \"orange\", \"brown\", \"grey\"]\ndata = []\n\nfor (let i = 0; i < 1000; i++) {\n current_color = colors[Math.floor(Math.random() * colors.length)]\n data.push({\n id: i,\n vector: [Math.random(), Math.random(), Math.random(), Math.random(), Math.random()],\n color: current_color,\n color_tag: `${current_color}_${Math.floor(Math.random() * 8999) + 1000}`\n })\n}\n\nvar res = await client.insert({\n collection_name: \"quick_setup\",\n data: data\n})\n\nconsole.log(res.insert_cnt)\n\n// Output\n// \n// 1000\n// \n\nawait client.createPartition({\n collection_name: \"quick_setup\",\n partition_name: \"red\"\n})\n\nawait client.createPartition({\n collection_name: \"quick_setup\",\n partition_name: \"blue\"\n})\n\n// 6.1 Insert data into partitions\nvar red_data = []\nvar blue_data = []\n\nfor (let i = 1000; i < 1500; i++) {\n red_data.push({\n id: i,\n vector: [Math.random(), Math.random(), Math.random(), Math.random(), Math.random()],\n color: \"red\",\n color_tag: `red_${Math.floor(Math.random() * 8999) + 1000}`\n })\n}\n\nfor (let i = 1500; i < 2000; i++) {\n blue_data.push({\n id: i,\n vector: [Math.random(), Math.random(), Math.random(), Math.random(), Math.random()],\n color: \"blue\",\n color_tag: `blue_${Math.floor(Math.random() * 8999) + 1000}`\n })\n}\n\nres = await client.insert({\n collection_name: \"quick_setup\",\n data: red_data,\n partition_name: \"red\"\n})\n\nconsole.log(res.insert_cnt)\n\n// Output\n// \n// 500\n// \n\nres = await client.insert({\n collection_name: \"quick_setup\",\n data: blue_data,\n partition_name: \"blue\"\n})\n\nconsole.log(res.insert_cnt)\n\n// Output\n// \n// 500\n// \n","# Single vector search\nres = client.search(\n collection_name=\"test_collection\", # Replace with the actual name of your collection\n # Replace with your query vector\n data=[[0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592]],\n limit=5, # Max. number of search results to return\n search_params={\"metric_type\": \"IP\", \"params\": {}} # Search parameters\n)\n\n# Convert the output to a formatted JSON string\nresult = json.dumps(res, indent=4)\nprint(result)\n","// 4. Single vector search\nList> query_vectors = Arrays.asList(Arrays.asList(0.3580376395471989f, -0.6023495712049978f, 0.18414012509913835f, -0.26286205330961354f, 0.9029438446296592f));\n\nSearchReq searchReq = SearchReq.builder()\n .collectionName(\"quick_setup\")\n .data(query_vectors)\n .topK(3) // The number of results to return\n .build();\n\nSearchResp searchResp = client.search(searchReq);\n\nSystem.out.println(JSONObject.toJSON(searchResp));\n","// 4. Single vector search\nvar query_vector = [0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592],\n\nres = await client.search({\n collection_name: \"quick_setup\",\n data: [query_vector],\n limit: 3, // The number of results to return\n})\n\nconsole.log(res.results)\n","[\n [\n {\n \"id\": 0,\n \"distance\": 1.4093276262283325,\n \"entity\": {}\n },\n {\n \"id\": 4,\n \"distance\": 0.9902134537696838,\n \"entity\": {}\n },\n {\n \"id\": 1,\n \"distance\": 0.8519943356513977,\n \"entity\": {}\n },\n {\n \"id\": 5,\n \"distance\": 0.7972343564033508,\n \"entity\": {}\n },\n {\n \"id\": 2,\n \"distance\": 0.5928734540939331,\n \"entity\": {}\n }\n ]\n]\n","{\"searchResults\": [[\n {\n \"score\": 1.263043,\n \"fields\": {\n \"vector\": [\n 0.9533119,\n 0.02538395,\n 0.76714665,\n 0.35481733,\n 0.9845762\n ],\n \"id\": 740\n }\n },\n {\n \"score\": 1.2377806,\n \"fields\": {\n \"vector\": [\n 0.7411156,\n 0.08687937,\n 0.8254139,\n 0.08370924,\n 0.99095553\n ],\n \"id\": 640\n }\n },\n {\n \"score\": 1.1869997,\n \"fields\": {\n \"vector\": [\n 0.87928146,\n 0.05324632,\n 0.6312755,\n 0.28005534,\n 0.9542448\n ],\n \"id\": 455\n }\n }\n]]}\n","[\n { score: 1.7463608980178833, id: '854' },\n { score: 1.744946002960205, id: '425' },\n { score: 1.7258622646331787, id: '718' }\n]\n","# Bulk-vector search\nres = client.search(\n collection_name=\"test_collection\", # Replace with the actual name of your collection\n data=[\n [0.19886812562848388, 0.06023560599112088, 0.6976963061752597, 0.2614474506242501, 0.838729485096104],\n [0.3172005263489739, 0.9719044792798428, -0.36981146090600725, -0.4860894583077995, 0.95791889146345]\n ], # Replace with your query vectors\n limit=2, # Max. number of search results to return\n search_params={\"metric_type\": \"IP\", \"params\": {}} # Search parameters\n)\n\nresult = json.dumps(res, indent=4)\nprint(result)\n","// 5. Batch vector search\nquery_vectors = Arrays.asList(\n Arrays.asList(0.3580376395471989f, -0.6023495712049978f, 0.18414012509913835f, -0.26286205330961354f, 0.9029438446296592f),\n Arrays.asList(0.19886812562848388f, 0.06023560599112088f, 0.6976963061752597f, 0.2614474506242501f, 0.838729485096104f)\n);\n\nsearchReq = SearchReq.builder()\n .collectionName(\"quick_setup\")\n .data(query_vectors)\n .topK(2)\n .build();\n\nsearchResp = client.search(searchReq);\n\nSystem.out.println(JSONObject.toJSON(searchResp));\n","// 5. Batch vector search\nvar query_vectors = [\n [0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592],\n [0.19886812562848388, 0.06023560599112088, 0.6976963061752597, 0.2614474506242501, 0.838729485096104]\n]\n\nres = await client.search({\n collection_name: \"quick_setup\",\n data: query_vectors,\n limit: 2,\n})\n\nconsole.log(res.results)\n","[\n [\n {\n \"id\": 1,\n \"distance\": 1.3017789125442505,\n \"entity\": {}\n },\n {\n \"id\": 7,\n \"distance\": 1.2419954538345337,\n \"entity\": {}\n }\n ], # Result set 1\n [\n {\n \"id\": 3,\n \"distance\": 2.3358664512634277,\n \"entity\": {}\n },\n {\n \"id\": 8,\n \"distance\": 0.5642921924591064,\n \"entity\": {}\n }\n ] # Result set 2\n]\n","// Two sets of vectors are returned as expected\n\n{\"searchResults\": [\n [\n {\n \"score\": 1.263043,\n \"fields\": {\n \"vector\": [\n 0.9533119,\n 0.02538395,\n 0.76714665,\n 0.35481733,\n 0.9845762\n ],\n \"id\": 740\n }\n },\n {\n \"score\": 1.2377806,\n \"fields\": {\n \"vector\": [\n 0.7411156,\n 0.08687937,\n 0.8254139,\n 0.08370924,\n 0.99095553\n ],\n \"id\": 640\n }\n }\n ],\n [\n {\n \"score\": 1.8654699,\n \"fields\": {\n \"vector\": [\n 0.4671427,\n 0.8378432,\n 0.98844475,\n 0.82763994,\n 0.9729997\n ],\n \"id\": 638\n }\n },\n {\n \"score\": 1.8581753,\n \"fields\": {\n \"vector\": [\n 0.735541,\n 0.60140246,\n 0.86730254,\n 0.93152493,\n 0.98603314\n ],\n \"id\": 855\n }\n }\n ]\n]}\n","[\n [\n { score: 2.3590476512908936, id: '854' },\n { score: 2.2896690368652344, id: '59' }\n [\n { score: 2.664059638977051, id: '59' },\n { score: 2.59483003616333, id: '854' }\n ]\n]\n","# 6.2 Search within a partition\nquery_vector = [0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592]\n\nres = client.search(\n collection_name=\"quick_setup\",\n data=[query_vector],\n limit=5,\n search_params={\"metric_type\": \"IP\", \"params\": {\"level\": 1}},\n partition_names=[\"red\"]\n)\n\nprint(res)\n","// 6.3 Search within partitions\nquery_vectors = Arrays.asList(Arrays.asList(0.3580376395471989f, -0.6023495712049978f, 0.18414012509913835f, -0.26286205330961354f, 0.9029438446296592f));\n\nsearchReq = SearchReq.builder()\n .collectionName(\"quick_setup\")\n .data(query_vectors)\n .partitionNames(Arrays.asList(\"red\"))\n .topK(5)\n .build();\n\nsearchResp = client.search(searchReq);\n\nSystem.out.println(JSONObject.toJSON(searchResp));\n","// 6.2 Search within partitions\nquery_vector = [0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592]\n\nres = await client.search({\n collection_name: \"quick_setup\",\n data: [query_vector],\n partition_names: [\"red\"],\n limit: 5,\n})\n\nconsole.log(res.results)\n","[\n [\n {\n \"id\": 16,\n \"distance\": 0.9200337529182434,\n \"entity\": {}\n },\n {\n \"id\": 14,\n \"distance\": 0.4505271911621094,\n \"entity\": {}\n },\n {\n \"id\": 15,\n \"distance\": 0.19924677908420563,\n \"entity\": {}\n },\n {\n \"id\": 17,\n \"distance\": 0.0075093843042850494,\n \"entity\": {}\n },\n {\n \"id\": 13,\n \"distance\": -0.14609718322753906,\n \"entity\": {}\n }\n ]\n]\n","{\"searchResults\": [\n [\n {\n \"score\": 1.1677284,\n \"fields\": {\n \"vector\": [\n 0.9986977,\n 0.17964739,\n 0.49086612,\n 0.23155272,\n 0.98438674\n ],\n \"id\": 1435\n }\n },\n {\n \"score\": 1.1476475,\n \"fields\": {\n \"vector\": [\n 0.6952647,\n 0.13417172,\n 0.91045254,\n 0.119336545,\n 0.9338931\n ],\n \"id\": 1291\n }\n },\n {\n \"score\": 1.0969629,\n \"fields\": {\n \"vector\": [\n 0.3363194,\n 0.028906643,\n 0.6675426,\n 0.030419827,\n 0.9735209\n ],\n \"id\": 1168\n }\n },\n {\n \"score\": 1.0741848,\n \"fields\": {\n \"vector\": [\n 0.9980543,\n 0.36063594,\n 0.66427994,\n 0.17359233,\n 0.94954175\n ],\n \"id\": 1164\n }\n },\n {\n \"score\": 1.0584627,\n \"fields\": {\n \"vector\": [\n 0.7187005,\n 0.12674773,\n 0.987718,\n 0.3110777,\n 0.86093885\n ],\n \"id\": 1085\n }\n }\n ],\n [\n {\n \"score\": 1.8030131,\n \"fields\": {\n \"vector\": [\n 0.59726167,\n 0.7054632,\n 0.9573117,\n 0.94529945,\n 0.8664103\n ],\n \"id\": 1203\n }\n },\n {\n \"score\": 1.7728865,\n \"fields\": {\n \"vector\": [\n 0.6672442,\n 0.60448086,\n 0.9325822,\n 0.80272985,\n 0.8861626\n ],\n \"id\": 1448\n }\n },\n {\n \"score\": 1.7536311,\n \"fields\": {\n \"vector\": [\n 0.59663296,\n 0.77831805,\n 0.8578314,\n 0.88818026,\n 0.9030075\n ],\n \"id\": 1010\n }\n },\n {\n \"score\": 1.7520742,\n \"fields\": {\n \"vector\": [\n 0.854198,\n 0.72294194,\n 0.9245805,\n 0.86126596,\n 0.7969224\n ],\n \"id\": 1219\n }\n },\n {\n \"score\": 1.7452049,\n \"fields\": {\n \"vector\": [\n 0.96419,\n 0.943535,\n 0.87611496,\n 0.8268136,\n 0.79786557\n ],\n \"id\": 1149\n }\n }\n ]\n]}\n","[\n { score: 3.0258803367614746, id: '1201' },\n { score: 3.004319190979004, id: '1458' },\n { score: 2.880324363708496, id: '1187' },\n { score: 2.8246407508850098, id: '1347' },\n { score: 2.797295093536377, id: '1406' }\n]\n","res = client.search(\n collection_name=\"quick_setup\",\n data=[query_vector],\n limit=5,\n search_params={\"metric_type\": \"IP\", \"params\": {\"level\": 1}},\n partition_names=[\"blue\"]\n)\n\nprint(res)\n","searchReq = SearchReq.builder()\n .collectionName(\"quick_setup\")\n .data(query_vectors)\n .partitionNames(Arrays.asList(\"blue\"))\n .topK(5)\n .build();\n\nsearchResp = client.search(searchReq);\n\nSystem.out.println(JSONObject.toJSON(searchResp));\n","res = await client.search({\n collection_name: \"quick_setup\",\n data: [query_vector],\n partition_names: [\"blue\"],\n limit: 5,\n})\n\nconsole.log(res.results)\n","[\n [\n {\n \"id\": 20,\n \"distance\": 2.363696813583374,\n \"entity\": {}\n },\n {\n \"id\": 26,\n \"distance\": 1.0665391683578491,\n \"entity\": {}\n },\n {\n \"id\": 23,\n \"distance\": 1.066049575805664,\n \"entity\": {}\n },\n {\n \"id\": 29,\n \"distance\": 0.8353596925735474,\n \"entity\": {}\n },\n {\n \"id\": 28,\n \"distance\": 0.7484277486801147,\n \"entity\": {}\n }\n ]\n]\n","{\"searchResults\": [\n [\n {\n \"score\": 1.1628494,\n \"fields\": {\n \"vector\": [\n 0.7442872,\n 0.046407282,\n 0.71031404,\n 0.3544345,\n 0.9819991\n ],\n \"id\": 1992\n }\n },\n {\n \"score\": 1.1470042,\n \"fields\": {\n \"vector\": [\n 0.5505825,\n 0.04367262,\n 0.9985836,\n 0.18922359,\n 0.93255126\n ],\n \"id\": 1977\n }\n },\n {\n \"score\": 1.1450152,\n \"fields\": {\n \"vector\": [\n 0.89994013,\n 0.052991092,\n 0.8645576,\n 0.6406729,\n 0.95679337\n ],\n \"id\": 1573\n }\n },\n {\n \"score\": 1.1439825,\n \"fields\": {\n \"vector\": [\n 0.9253267,\n 0.15890503,\n 0.7999555,\n 0.19126713,\n 0.898583\n ],\n \"id\": 1552\n }\n },\n {\n \"score\": 1.1029172,\n \"fields\": {\n \"vector\": [\n 0.95661926,\n 0.18777144,\n 0.38115507,\n 0.14323527,\n 0.93137646\n ],\n \"id\": 1823\n }\n }\n ],\n [\n {\n \"score\": 1.8005109,\n \"fields\": {\n \"vector\": [\n 0.5953582,\n 0.7794224,\n 0.9388869,\n 0.79825854,\n 0.9197286\n ],\n \"id\": 1888\n }\n },\n {\n \"score\": 1.7714822,\n \"fields\": {\n \"vector\": [\n 0.56805456,\n 0.89422905,\n 0.88187534,\n 0.914824,\n 0.8944365\n ],\n \"id\": 1648\n }\n },\n {\n \"score\": 1.7561421,\n \"fields\": {\n \"vector\": [\n 0.83421993,\n 0.39865613,\n 0.92319834,\n 0.42695504,\n 0.96633124\n ],\n \"id\": 1688\n }\n },\n {\n \"score\": 1.7553532,\n \"fields\": {\n \"vector\": [\n 0.89994013,\n 0.052991092,\n 0.8645576,\n 0.6406729,\n 0.95679337\n ],\n \"id\": 1573\n }\n },\n {\n \"score\": 1.7543385,\n \"fields\": {\n \"vector\": [\n 0.16542226,\n 0.38248396,\n 0.9888778,\n 0.80913955,\n 0.9501492\n ],\n \"id\": 1544\n }\n }\n ]\n]}\n","[\n { score: 2.8421106338500977, id: '1745' },\n { score: 2.838560104370117, id: '1782' },\n { score: 2.8134000301361084, id: '1511' },\n { score: 2.718268871307373, id: '1679' },\n { score: 2.7014894485473633, id: '1597' }\n]\n","# Search with output fields\nres = client.search(\n collection_name=\"test_collection\", # Replace with the actual name of your collection\n data=[[0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592]],\n limit=5, # Max. number of search results to return\n search_params={\"metric_type\": \"IP\", \"params\": {}}, # Search parameters\n output_fields=[\"color\"] # Output fields to return\n)\n\nresult = json.dumps(res, indent=4)\nprint(result)\n","// 7. Search with output fields\nquery_vectors = Arrays.asList(Arrays.asList(0.3580376395471989f, -0.6023495712049978f, 0.18414012509913835f, -0.26286205330961354f, 0.9029438446296592f));\n\nsearchReq = SearchReq.builder()\n .collectionName(\"quick_setup\")\n .data(query_vectors)\n .outputFields(Arrays.asList(\"color\"))\n .topK(5)\n .build();\n\nsearchResp = client.search(searchReq);\n\nSystem.out.println(JSONObject.toJSON(searchResp));\n","// 7. Search with output fields\nquery_vector = [0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592]\n\nres = await client.search({\n collection_name: \"quick_setup\",\n data: [query_vector],\n limit: 5,\n output_fields: [\"color\"],\n})\n\nconsole.log(res.results)\n","[\n [\n {\n \"id\": 0,\n \"distance\": 1.4093276262283325,\n \"entity\": {\n \"color\": \"pink_8682\"\n }\n },\n {\n \"id\": 16,\n \"distance\": 1.0159327983856201,\n \"entity\": {\n \"color\": \"yellow_1496\"\n }\n },\n {\n \"id\": 4,\n \"distance\": 0.9902134537696838,\n \"entity\": {\n \"color\": \"red_4794\"\n }\n },\n {\n \"id\": 14,\n \"distance\": 0.9803846478462219,\n \"entity\": {\n \"color\": \"green_2899\"\n }\n },\n {\n \"id\": 1,\n \"distance\": 0.8519943356513977,\n \"entity\": {\n \"color\": \"red_7025\"\n }\n }\n ]\n]\n","{\"searchResults\": [\n [\n {\n \"score\": 1.263043,\n \"fields\": {}\n },\n {\n \"score\": 1.2377806,\n \"fields\": {}\n },\n {\n \"score\": 1.1869997,\n \"fields\": {}\n },\n {\n \"score\": 1.1748955,\n \"fields\": {}\n },\n {\n \"score\": 1.1720343,\n \"fields\": {}\n }\n ]\n]}\n","\n[\n { score: 3.036271572113037, id: '59', color: 'orange' },\n { score: 3.0267879962921143, id: '1745', color: 'blue' },\n { score: 3.0069446563720703, id: '854', color: 'black' },\n { score: 2.984386682510376, id: '718', color: 'black' },\n { score: 2.916019916534424, id: '425', color: 'purple' }\n]\n","# Search with filter\nres = client.search(\n collection_name=\"test_collection\", # Replace with the actual name of your collection\n data=[[0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592]],\n limit=5, # Max. number of search results to return\n search_params={\"metric_type\": \"IP\", \"params\": {}}, # Search parameters\n output_fields=[\"color\"], # Output fields to return\n filter='color like \"red%\"'\n)\n\nresult = json.dumps(res, indent=4)\nprint(result)\n","// 8. Filtered search\nquery_vectors = Arrays.asList(Arrays.asList(0.3580376395471989f, -0.6023495712049978f, 0.18414012509913835f, -0.26286205330961354f, 0.9029438446296592f));\n\nsearchReq = SearchReq.builder()\n .collectionName(\"quick_setup\")\n .data(query_vectors)\n .outputFields(Arrays.asList(\"color_tag\"))\n .filter(\"color_tag like \\\"red%\\\"\")\n .topK(5)\n .build();\n\nsearchResp = client.search(searchReq);\n\nSystem.out.println(JSONObject.toJSON(searchResp));\n","// 8. Filtered search\n// 8.1 Filter with \"like\" operator and prefix wildcard\nquery_vector = [0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592]\n\nres = await client.search({\n collection_name: \"quick_setup\",\n data: [query_vector],\n limit: 5,\n filters: \"color_tag like \\\"red%\\\"\",\n output_fields: [\"color_tag\"]\n})\n\nconsole.log(res.results)\n","[\n [\n {\n \"id\": 4,\n \"distance\": 0.9902134537696838,\n \"entity\": {\n \"color\": \"red_4794\"\n }\n },\n {\n \"id\": 1,\n \"distance\": 0.8519943356513977,\n \"entity\": {\n \"color\": \"red_7025\"\n }\n },\n {\n \"id\": 6,\n \"distance\": -0.4113418459892273,\n \"entity\": {\n \"color\": \"red_9392\"\n }\n }\n ]\n]\n","{\"searchResults\": [\n [\n {\n \"score\": 1.1869997,\n \"fields\": {\"color_tag\": \"red_3026\"}\n },\n {\n \"score\": 1.1677284,\n \"fields\": {\"color_tag\": \"red_9030\"}\n },\n {\n \"score\": 1.1476475,\n \"fields\": {\"color_tag\": \"red_3744\"}\n },\n {\n \"score\": 1.0969629,\n \"fields\": {\"color_tag\": \"red_4168\"}\n },\n {\n \"score\": 1.0741848,\n \"fields\": {\"color_tag\": \"red_9678\"}\n }\n ]\n]}\n","[\n { score: 2.5080761909484863, id: '1201', color_tag: 'red_8904' },\n { score: 2.491129159927368, id: '425', color_tag: 'purple_8212' },\n { score: 2.4889798164367676, id: '1458', color_tag: 'red_6891' },\n { score: 2.42964243888855, id: '724', color_tag: 'black_9885' },\n { score: 2.4004223346710205, id: '854', color_tag: 'black_5990' }\n]\n","# Infix match on color field\nres = client.search(\n collection_name=\"test_collection\", # Replace with the actual name of your collection\n data=[[0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592]],\n limit=5, # Max. number of search results to return\n search_params={\"metric_type\": \"IP\", \"params\": {}}, # Search parameters\n output_fields=[\"color\"], # Output fields to return\n filter='color like \"%ll%\"' # Filter on color field, infix match on \"ll\"\n)\n\nresult = json.dumps(res, indent=4)\nprint(result)\n","// 8. Filtered search\nquery_vectors = Arrays.asList(Arrays.asList(0.3580376395471989f, -0.6023495712049978f, 0.18414012509913835f, -0.26286205330961354f, 0.9029438446296592f));\n\nsearchReq = SearchReq.builder()\n .collectionName(\"quick_setup\")\n .data(query_vectors)\n .outputFields(Arrays.asList(\"color_tag\"))\n .filter(\"color like \\\"%ll%\\\"\")\n .topK(5)\n .build();\n\nsearchResp = client.search(searchReq);\n\nSystem.out.println(JSONObject.toJSON(searchResp));\n","// 8. Filtered search\n// 8.1 Filter with \"like\" operator and prefix wildcard\nquery_vector = [0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592]\n\nres = await client.search({\n collection_name: \"quick_setup\",\n data: [query_vector],\n limit: 5,\n filters: \"color_tag like \\\"%ll%\\\"\",\n output_fields: [\"color_tag\"]\n})\n\nconsole.log(res.results)\n","[\n [\n {\n \"id\": 5,\n \"distance\": 0.7972343564033508,\n \"entity\": {\n \"color\": \"yellow_4222\"\n }\n }\n ]\n]\n","{\"searchResults\": [\n [\n {\n \"score\": 1.1869997,\n \"fields\": {\"color_tag\": \"yellow_4222\"}\n }\n ]\n]}\n","[\n { score: 2.5080761909484863, id: '1201', color_tag: 'yellow_4222' }\n]\n","# Conduct a range search\nsearch_params = {\n \"metric_type\": \"IP\",\n \"params\": {\n \"radius\": 0.8, # Radius of the search circle\n \"range_filter\": 1.0 # Range filter to filter out vectors that are not within the search circle\n }\n}\n\nres = client.search(\n collection_name=\"test_collection\", # Replace with the actual name of your collection\n data=[[0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592]],\n limit=3, # Max. number of search results to return\n search_params=search_params, # Search parameters\n output_fields=[\"color\"], # Output fields to return\n)\n\nresult = json.dumps(res, indent=4)\nprint(result)\n","// 9. Range search\nquery_vectors = Arrays.asList(Arrays.asList(0.3580376395471989f, -0.6023495712049978f, 0.18414012509913835f, -0.26286205330961354f, 0.9029438446296592f));\n\nsearchReq = SearchReq.builder()\n .collectionName(\"quick_setup\")\n .data(query_vectors)\n .outputFields(Arrays.asList(\"color_tag\"))\n .searchParams(Map.of(\"radius\", 0.1, \"range\", 1.0))\n .topK(5)\n .build();\n\nsearchResp = client.search(searchReq);\n\nSystem.out.println(JSONObject.toJSON(searchResp));\n","// 9. Range search\nquery_vector = [0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592]\n\nres = await client.search({\n collection_name: \"quick_setup\",\n data: [query_vector],\n limit: 5,\n params: {\n radius: 0.1,\n range: 1.0\n },\n output_fields: [\"color_tag\"]\n})\n\nconsole.log(res.results)\n","[\n [\n {\n \"id\": 4,\n \"distance\": 0.9902134537696838,\n \"entity\": {\n \"color\": \"red_4794\"\n }\n },\n {\n \"id\": 14,\n \"distance\": 0.9803846478462219,\n \"entity\": {\n \"color\": \"green_2899\"\n }\n },\n {\n \"id\": 1,\n \"distance\": 0.8519943356513977,\n \"entity\": {\n \"color\": \"red_7025\"\n }\n }\n ]\n]\n","{\"searchResults\": [\n [\n {\n \"score\": 1.263043,\n \"fields\": {\"color_tag\": \"green_2052\"}\n },\n {\n \"score\": 1.2377806,\n \"fields\": {\"color_tag\": \"purple_3709\"}\n },\n {\n \"score\": 1.1869997,\n \"fields\": {\"color_tag\": \"red_3026\"}\n },\n {\n \"score\": 1.1748955,\n \"fields\": {\"color_tag\": \"black_1646\"}\n },\n {\n \"score\": 1.1720343,\n \"fields\": {\"color_tag\": \"green_4853\"}\n }\n ]\n]}\n","[\n { score: 2.3387961387634277, id: '718', color_tag: 'black_7154' },\n { score: 2.3352415561676025, id: '1745', color_tag: 'blue_8741' },\n { score: 2.290485382080078, id: '1408', color_tag: 'red_2324' },\n { score: 2.285870313644409, id: '854', color_tag: 'black_5990' },\n { score: 2.2593345642089844, id: '1309', color_tag: 'red_8458' }\n]\n","# Connect to Milvus\nclient = MilvusClient(uri='http://localhost:19530') # Milvus server address\n\n# Load data into collection\nclient.load_collection(\"group_search\") # Collection name\n\n# Group search results\nres = client.search(\n collection_name=\"group_search\", # Collection name\n data=[[0.14529211512077012, 0.9147257273453546, 0.7965055218724449, 0.7009258593102812, 0.5605206522382088]], # Query vector\n search_params={\n \"metric_type\": \"L2\",\n \"params\": {\"nprobe\": 10},\n }, # Search parameters\n limit=5, # Max. number of groups to return\n group_by_field=\"doc_id\", # Group results by document ID\n group_size=2, # returned at most 2 passages per document, the default value is 1\n group_strict_size=True, # ensure every group contains exactly 3 passages\n output_fields=[\"doc_id\", \"passage_id\"]\n)\n\n# Retrieve the values in the `doc_id` column\ndoc_ids = [result['entity']['doc_id'] for result in res[0]]\npassage_ids = [result['entity']['passage_id'] for result in res[0]]\n\nprint(doc_ids)\nprint(passage_ids)\n","[\"doc_11\", \"doc_11\", \"doc_7\", \"doc_7\", \"doc_3\", \"doc_3\", \"doc_2\", \"doc_2\", \"doc_8\", \"doc_8\"]\n[5, 10, 11, 10, 9, 6, 5, 4, 9, 2]\n","# Connect to Milvus\nclient = MilvusClient(uri='http://localhost:19530') # Milvus server address\n\n# Load data into collection\nclient.load_collection(\"group_search\") # Collection name\n\n# Search without `group_by_field`\nres = client.search(\n collection_name=\"group_search\", # Collection name\n data=query_passage_vector, # Replace with your query vector\n search_params={\n \"metric_type\": \"L2\",\n \"params\": {\"nprobe\": 10},\n }, # Search parameters\n limit=5, # Max. number of search results to return\n # group_by_field=\"doc_id\", # Group results by document ID\n # group_size=2, \n # group_strict_size=True,\n output_fields=[\"doc_id\", \"passage_id\"]\n)\n\n# Retrieve the values in the `doc_id` column\ndoc_ids = [result['entity']['doc_id'] for result in res[0]]\npassage_ids = [result['entity']['passage_id'] for result in res[0]]\n\nprint(doc_ids)\nprint(passage_ids)\n","[\"doc_11\", \"doc_11\", \"doc_11\", \"doc_11\", \"doc_11\"]\n[1, 10, 3, 12, 9]\n","# In normal cases, you do not need to set search parameters manually\n# Except for range searches.\nsearch_parameters = {\n 'metric_type': 'L2',\n 'params': {\n 'nprobe': 10,\n 'level': 1,\n 'radius': 1.0\n 'range_filter': 0.8\n }\n}\n"],"headingContent":"Single-Vector Search","anchorList":[{"label":"Búsqueda de un solo vector","href":"Single-Vector-Search","type":1,"isActive":false},{"label":"Visión general","href":"Overview","type":2,"isActive":false},{"label":"Preparativos","href":"Preparations","type":2,"isActive":false},{"label":"Búsqueda básica","href":"Basic-search","type":2,"isActive":false},{"label":"Búsqueda filtrada","href":"Filtered-search","type":2,"isActive":false},{"label":"Búsqueda por rango","href":"Range-search","type":2,"isActive":false},{"label":"Búsqueda por agrupación","href":"Grouping-search","type":2,"isActive":false},{"label":"Parámetros de búsqueda","href":"Search-parameters","type":2,"isActive":false}]} \ No newline at end of file diff --git a/localization/v2.4.x/site/es/userGuide/search-query-get/single-vector-search.md b/localization/v2.4.x/site/es/userGuide/search-query-get/single-vector-search.md index a79b15c0f..e23d822b6 100644 --- a/localization/v2.4.x/site/es/userGuide/search-query-get/single-vector-search.md +++ b/localization/v2.4.x/site/es/userGuide/search-query-get/single-vector-search.md @@ -4,7 +4,7 @@ order: 1 summary: >- Este artículo describe cómo buscar vectores en una colección Milvus utilizando un único vector de consulta. -title: Búsqueda monovectorial +title: Búsqueda de un solo vector ---

    Búsqueda de un solo vector

    Una vez que haya insertado sus datos, el siguiente paso es realizar búsquedas de similitud en su colección en Milvus.

    Milvus le permite realizar dos tipos de búsquedas, dependiendo del número de campos vectoriales de su colección:

      -
    • Búsqueda de un solo vector: Si su colección sólo tiene un campo vectorial, utilice el método search() para encontrar las entidades más similares. Este método compara su vector de consulta con los vectores existentes en su colección y devuelve los ID de las coincidencias más cercanas junto con las distancias entre ellos. Opcionalmente, también puede devolver los valores vectoriales y los metadatos de los resultados.
    • +
    • Búsqueda de un solo vector: Si su colección sólo tiene un campo vectorial, utilice el método search() para encontrar las entidades más similares. Este método compara su vector de consulta con los vectores existentes en su colección y devuelve los ID de las coincidencias más cercanas junto con las distancias entre ellos. Opcionalmente, también puede devolver los valores del vector y los metadatos de los resultados.
    • Búsqueda híbrida: Para colecciones con dos o más campos vectoriales, utilice el método hybrid_search() método. Este método realiza múltiples peticiones de búsqueda de Vecino más próximo aproximado (RNA) y combina los resultados para devolver las coincidencias más relevantes tras una nueva clasificación.

    Esta guía se centra en cómo realizar una búsqueda de un solo vector en Milvus. Para obtener más información sobre la búsqueda híbrida, consulte Búsqueda híbrida.

    @@ -441,7 +441,7 @@ res = await client.search, puede proporcionar uno o varios valores vectoriales que representen sus incrustaciones de consulta y un valor limit que indique el número de resultados que se devolverán.

    -

    Dependiendo de los datos y del vector de consulta, es posible que obtenga menos resultados que limit. Esto ocurre cuando limit es mayor que el número de vectores que coinciden con la consulta.

    +

    En función de los datos y del vector de consulta, es posible que obtenga menos resultados que limit. Esto ocurre cuando limit es mayor que el número de vectores que coinciden con la consulta.

    La búsqueda monovectorial es la forma más sencilla de las operaciones search en Milvus, diseñada para encontrar los vectores más similares a un vector de consulta dado.

    Para realizar una búsqueda monovectorial, especifique el nombre de la colección de destino, el vector de consulta y el número de resultados deseado (limit). Esta operación devuelve un conjunto de resultados que incluye los vectores más similares, sus ID y las distancias desde el vector de consulta.

    A continuación se muestra un ejemplo de búsqueda de las 5 entidades más similares al vector de consulta:

    @@ -1717,9 +1717,10 @@ res = await client. -

    En Milvus, agrupar la búsqueda por un campo específico puede evitar la redundancia del mismo elemento de campo en los resultados. Puede obtener un conjunto variado de resultados para el campo específico.

    -

    Considere una colección de documentos, cada documento se divide en varios pasajes. Cada pasaje está representado por un vector incrustado y pertenece a un documento. Para encontrar documentos relevantes en lugar de pasajes similares, puede incluir el argumento group_by_field en la opearción search() para agrupar los resultados por el ID del documento. Esto ayuda a devolver los documentos más relevantes y únicos, en lugar de pasajes separados del mismo documento.

    -

    He aquí el código de ejemplo para agrupar los resultados de la búsqueda por campos:

    +

    En Milvus, la búsqueda agrupada está diseñada para mejorar la exhaustividad y precisión de los resultados de búsqueda.

    +

    Considere un escenario en RAG, donde un montón de documentos están divididos en varios pasajes, y cada pasaje está representado por un vector incrustado. Los usuarios quieren encontrar los pasajes más relevantes para que los LLMs sean precisos. La función de búsqueda ordinaria de Milvus puede cumplir este requisito, pero puede dar lugar a resultados muy sesgados y parciales: la mayoría de los pasajes proceden de unos pocos documentos, y la exhaustividad de los resultados de la búsqueda es muy pobre. Esto puede perjudicar seriamente la precisión o incluso la corrección de los resultados ofrecidos por el LLM e influir negativamente en la experiencia de los usuarios del LLM.

    +

    La búsqueda agrupada puede resolver eficazmente este problema. Pasando un campo group_by y group_size, los usuarios de Milvus pueden agrupar los resultados de la búsqueda en varios grupos y asegurarse de que el número de entidades de cada grupo no excede un group_size específico. Esta característica puede mejorar significativamente la exhaustividad y equidad de los resultados de búsqueda, mejorando notablemente la calidad de los resultados del LLM.

    +

    A continuación se muestra el código de ejemplo para agrupar los resultados de la búsqueda por campo:

    # Connect to Milvus
     client = MilvusClient(uri='http://localhost:19530') # Milvus server address
     
    @@ -1734,21 +1735,26 @@ res = client.search(
         "metric_type": "L2",
         "params": {"nprobe": 10},
         }, # Search parameters
    -    limit=10, # Max. number of search results to return
    +    limit=5, # Max. number of groups to return
         group_by_field="doc_id", # Group results by document ID
    +    group_size=2, # returned at most 2 passages per document, the default value is 1
    +    group_strict_size=True, # ensure every group contains exactly 3 passages
         output_fields=["doc_id", "passage_id"]
     )
     
     # Retrieve the values in the `doc_id` column
     doc_ids = [result['entity']['doc_id'] for result in res[0]]
    +passage_ids = [result['entity']['passage_id'] for result in res[0]]
     
     print(doc_ids)
    +print(passage_ids)
     

    La salida es similar a la siguiente:

    -
    [5, 10, 1, 7, 9, 6, 3, 4, 8, 2]
    +
    ["doc_11", "doc_11", "doc_7", "doc_7", "doc_3", "doc_3", "doc_2", "doc_2", "doc_8", "doc_8"]
    +[5, 10, 11, 10, 9, 6, 5, 4, 9, 2]
     
    -

    En el resultado obtenido puede observarse que las entidades devueltas no contienen valores duplicados de doc_id.

    -

    Para comparar, comentemos group_by_field y realicemos una búsqueda normal:

    +

    En la salida dada, se puede observar que para cada documento, se recuperan exactamente dos pasajes y un total de 5 documentos componen colectivamente los resultados.

    +

    Para comparar, comentemos los parámetros relacionados con el grupo y realicemos una búsqueda normal:

    # Connect to Milvus
     client = MilvusClient(uri='http://localhost:19530') # Milvus server address
     
    @@ -1763,27 +1769,33 @@ res = client.search(
         "metric_type": "L2",
         "params": {"nprobe": 10},
         }, # Search parameters
    -    limit=10, # Max. number of search results to return
    +    limit=5, # Max. number of search results to return
         # group_by_field="doc_id", # Group results by document ID
    +    # group_size=2, 
    +    # group_strict_size=True,
         output_fields=["doc_id", "passage_id"]
     )
     
     # Retrieve the values in the `doc_id` column
     doc_ids = [result['entity']['doc_id'] for result in res[0]]
    +passage_ids = [result['entity']['passage_id'] for result in res[0]]
     
     print(doc_ids)
    +print(passage_ids)
     

    La salida es similar a la siguiente:

    -
    [1, 10, 3, 10, 1, 9, 4, 4, 8, 6]
    +
    ["doc_11", "doc_11", "doc_11", "doc_11", "doc_11"]
    +[1, 10, 3, 12, 9]
     
    -

    En la salida dada, puede observarse que las entidades devueltas contienen valores doc_id duplicados.

    +

    En la salida dada, puede observarse que "doc_11" dominó completamente los resultados de la búsqueda, eclipsando los párrafos de alta calidad de otros documentos, lo que puede ser una mala indicación para LLM.

    +

    Un punto más a tener en cuenta: por defecto, grouping_search devolverá resultados instantáneamente cuando tenga suficientes grupos, lo que puede llevar a que el número de resultados en cada grupo no sea suficiente para cumplir el tamaño_grupo. Si le preocupa el número de resultados de cada grupo, establezca group_strict_size=True como se muestra en el código anterior. Esto hará que Milvus se esfuerce por obtener suficientes resultados para cada grupo, con un ligero coste para el rendimiento.

    Limitaciones

    • Indexación: Esta función de agrupación sólo funciona para colecciones indexadas con el tipo HNSW, IVF_FLAT o FLAT. Para más información, consulte Índice en memoria.

    • Vector: Actualmente, la búsqueda de agrupaciones no admite un campo vectorial de tipo BINARY_VECTOR. Para obtener más información sobre los tipos de datos, consulte Tipos de datos admitidos.

    • Campo: Actualmente, la búsqueda de agrupación sólo permite una única columna. No es posible especificar varios nombres de campo en la configuración de group_by_field. Además, la búsqueda de agrupación es incompatible con los tipos de datos JSON, FLOAT, DOUBLE, ARRAY o campos vectoriales.

    • Impacto en el rendimiento: Tenga en cuenta que el rendimiento disminuye al aumentar el número de vectores de consulta. Utilizando como ejemplo un clúster con 2 núcleos de CPU y 8 GB de memoria, el tiempo de ejecución de la búsqueda de agrupación aumenta proporcionalmente con el número de vectores de consulta de entrada.

    • -
    • Funcionalidad: Actualmente, la búsqueda por agrupamiento no es compatible con la búsqueda por rango, los iteradores de búsqueda ni la búsqueda híbrida.

    • +
    • Funcionalidad: Actualmente, la búsqueda por agrupamiento no es compatible con la búsqueda por rango, los iteradores de búsqueda

    Parámetros de búsqueda

  • - +
    Forma de medir la similitud entre las incrustaciones vectoriales.
    Los valores posibles son IP, L2, COSINE, JACCARD, y HAMMING, y el valor predeterminado es el del archivo de índice cargado.
    params.nprobeNúmero de unidades a consultar durante la búsqueda.
    El valor se encuentra en el intervalo [1, nlist[1]].
    params.levelNivel de precisión de la búsqueda.
    Los valores posibles son 1, 2 y 3, y el valor predeterminado es 1. Los valores más altos producen resultados más precisos pero un rendimiento más lento.
    params.radiusDefine el límite exterior del espacio de búsqueda. Sólo los vectores que se encuentran a esta distancia del vector de consulta se consideran coincidencias potenciales.
    El intervalo de valores viene determinado por el parámetro metric_type. Por ejemplo, si metric_type se establece en L2, el intervalo de valores válido es [0, ∞]. Si metric_type se establece en COSINE, el intervalo de valores válido es [-1, 1]. Para más información, consulte Métricas de similitud.
    params.radiusDefine el límite exterior del espacio de búsqueda. Sólo los vectores que se encuentran a esta distancia del vector de consulta se consideran coincidencias potenciales.
    El rango de valores viene determinado por el parámetro metric_type. Por ejemplo, si metric_type se establece en L2, el intervalo de valores válido es [0, ∞]. Si metric_type se establece en COSINE, el intervalo de valores válido es [-1, 1]. Para más información, consulte Métricas de similitud.
    params.range_filterMientras que radius establece el límite exterior de la búsqueda, range_filter puede utilizarse opcionalmente para definir un límite interior, creando un rango de distancias dentro del cual deben encontrarse los vectores para ser considerados coincidentes.
    El rango de valores viene determinado por el parámetro metric_type. Por ejemplo, si metric_type se establece en L2, el rango de valores válido es [0, ∞]. Si metric_type se establece en COSINE, el rango de valores válido es [-1, 1]. Para más información, consulte Métricas de similitud.
    diff --git a/localization/v2.4.x/site/es/userGuide/search-query-get/with-iterators.json b/localization/v2.4.x/site/es/userGuide/search-query-get/with-iterators.json index 0e1b134ed..62504f1b7 100644 --- a/localization/v2.4.x/site/es/userGuide/search-query-get/with-iterators.json +++ b/localization/v2.4.x/site/es/userGuide/search-query-get/with-iterators.json @@ -1 +1 @@ -{"codeList":["from pymilvus import MilvusClient\n\n# 1. Set up a Milvus client\nclient = MilvusClient(\n uri=\"http://localhost:19530\"\n)\n\n# 2. Create a collection\nclient.create_collection(\n collection_name=\"quick_setup\",\n dimension=5,\n)\n","import io.milvus.client.MilvusServiceClient;\nimport io.milvus.param.ConnectParam;\nimport io.milvus.param.highlevel.collection.CreateSimpleCollectionParam;\n\nString CLUSTER_ENDPOINT = \"http://localhost:19530\";\n\n// 1. Connect to Milvus server\nConnectParam connectParam = ConnectParam.newBuilder()\n .withUri(CLUSTER_ENDPOINT)\n .build();\n\nMilvusServiceClient client = new MilvusServiceClient(connectParam);\n\n// 2. Create a collection\nCreateSimpleCollectionParam createCollectionParam = CreateSimpleCollectionParam.newBuilder()\n .withCollectionName(\"quick_setup\")\n .withDimension(5)\n .build();\n\nclient.createCollection(createCollectionParam);\n","# 3. Insert randomly generated vectors \ncolors = [\"green\", \"blue\", \"yellow\", \"red\", \"black\", \"white\", \"purple\", \"pink\", \"orange\", \"brown\", \"grey\"]\ndata = []\n\nfor i in range(10000):\n current_color = random.choice(colors)\n current_tag = random.randint(1000, 9999)\n data.append({\n \"id\": i,\n \"vector\": [ random.uniform(-1, 1) for _ in range(5) ],\n \"color\": current_color,\n \"tag\": current_tag,\n \"color_tag\": f\"{current_color}_{str(current_tag)}\"\n })\n\nprint(data[0])\n\n# Output\n#\n# {\n# \"id\": 0,\n# \"vector\": [\n# -0.5705990742218152,\n# 0.39844925120642083,\n# -0.8791287928610869,\n# 0.024163154953680932,\n# 0.6837669917169638\n# ],\n# \"color\": \"purple\",\n# \"tag\": 7774,\n# \"color_tag\": \"purple_7774\"\n# }\n\nres = client.insert(\n collection_name=\"quick_setup\",\n data=data,\n)\n\nprint(res)\n\n# Output\n#\n# {\n# \"insert_count\": 10000,\n# \"ids\": [\n# 0,\n# 1,\n# 2,\n# 3,\n# 4,\n# 5,\n# 6,\n# 7,\n# 8,\n# 9,\n# \"(9990 more items hidden)\"\n# ]\n# }\n","import java.util.ArrayList;\nimport java.util.Arrays;\nimport java.util.List;\nimport java.util.Random;\n\nimport com.alibaba.fastjson.JSONObject;\n\nimport io.milvus.param.R;\nimport io.milvus.param.dml.InsertParam;\nimport io.milvus.response.MutationResultWrapper;\nimport io.milvus.grpc.MutationResult;\n\n\n// 3. Insert randomly generated vectors into the collection\nList colors = Arrays.asList(\"green\", \"blue\", \"yellow\", \"red\", \"black\", \"white\", \"purple\", \"pink\", \"orange\", \"brown\", \"grey\");\nList data = new ArrayList<>();\n\nfor (int i=0; i<10000; i++) {\n Random rand = new Random();\n String current_color = colors.get(rand.nextInt(colors.size()-1));\n JSONObject row = new JSONObject();\n row.put(\"id\", Long.valueOf(i));\n row.put(\"vector\", Arrays.asList(rand.nextFloat(), rand.nextFloat(), rand.nextFloat(), rand.nextFloat(), rand.nextFloat()));\n row.put(\"color_tag\", current_color + \"_\" + String.valueOf(rand.nextInt(8999) + 1000));\n data.add(row);\n}\n\nInsertParam insertParam = InsertParam.newBuilder()\n .withCollectionName(\"quick_setup\")\n .withRows(data)\n .build();\n\nR insertRes = client.insert(insertParam);\n\nif (insertRes.getStatus() != R.Status.Success.getCode()) {\n System.err.println(insertRes.getMessage());\n}\n\nMutationResultWrapper wrapper = new MutationResultWrapper(insertRes.getData());\nSystem.out.println(wrapper.getInsertCount());\n","from pymilvus import Collection\n\n# 4. Search with iterator\nconnections.connect(host=\"127.0.0.1\", port=19530)\ncollection = Collection(\"quick_setup\")\n\nquery_vectors = [[0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592]]\nsearch_params = {\n \"metric_type\": \"IP\",\n \"params\": {\"nprobe\": 10}\n}\n\niterator = collection.search_iterator(\n data=query_vectors,\n anns_field=\"vector\",\n batch_size=10,\n param=search_params,\n output_fields=[\"color_tag\"],\n limit=3\n)\n\nresults = []\n\nwhile True:\n result = iterator.next()\n if not result:\n iterator.close()\n break\n \n results.extend(result)\n \n for hit in result:\n results.append(hit.to_dict())\n\nprint(results)\n\n# Output\n#\n# [\n# {\n# \"id\": 1756,\n# \"distance\": 2.0642056465148926,\n# \"entity\": {\n# \"color_tag\": \"black_9109\"\n# }\n# },\n# {\n# \"id\": 6488,\n# \"distance\": 1.9437453746795654,\n# \"entity\": {\n# \"color_tag\": \"purple_8164\"\n# }\n# },\n# {\n# \"id\": 3338,\n# \"distance\": 1.9107104539871216,\n# \"entity\": {\n# \"color_tag\": \"brown_8121\"\n# }\n# }\n# ]\n","import io.milvus.param.dml.QueryIteratorParam;\nimport io.milvus.param.dml.SearchIteratorParam;\nimport io.milvus.response.QueryResultsWrapper;\nimport io.milvus.orm.iterator.SearchIterator;\n\n// 4. Search with iterators\nSearchIteratorParam iteratorParam = SearchIteratorParam.newBuilder()\n .withCollectionName(\"quick_setup\")\n .withVectorFieldName(\"vector\")\n // Use withFloatVectors() in clusters compatible with Milvus 2.4.x\n .withVectors(Arrays.asList(0.3580376395471989f, -0.6023495712049978f, 0.18414012509913835f, -0.26286205330961354f, 0.9029438446296592f))\n .withBatchSize(10L)\n .withParams(\"{\\\"metric_type\\\": \\\"COSINE\\\", \\\"params\\\": {\\\"level\\\": 1}}\")\n .build();\n \n\nR searchIteratorRes = client.searchIterator(iteratorParam);\n\nif (searchIteratorRes.getStatus() != R.Status.Success.getCode()) {\n System.err.println(searchIteratorRes.getMessage());\n}\n\nSearchIterator searchIterator = searchIteratorRes.getData();\nList results = new ArrayList<>();\n\nwhile (true) {\n List batchResults = searchIterator.next();\n if (batchResults.isEmpty()) {\n searchIterator.close();\n break;\n }\n for (QueryResultsWrapper.RowRecord rowRecord : batchResults) {\n results.add(rowRecord);\n }\n}\n\nSystem.out.println(results.size());\n","# 6. Query with iterator\niterator = collection.query_iterator(\n batch_size=10, # Controls the size of the return each time you call next()\n expr=\"color_tag like \\\"brown_8\\\"\",\n output_fields=[\"color_tag\"]\n)\n\nresults = []\n\nwhile True:\n result = iterator.next()\n if not result:\n iterator.close()\n break\n \n results.extend(result)\n \n# 8. Check the search results\nprint(len(results))\n\nprint(results[:3])\n\n# Output\n#\n# [\n# {\n# \"color_tag\": \"brown_8785\",\n# \"id\": 94\n# },\n# {\n# \"color_tag\": \"brown_8568\",\n# \"id\": 176\n# },\n# {\n# \"color_tag\": \"brown_8721\",\n# \"id\": 289\n# }\n# ]\n","import io.milvus.param.dml.QueryIteratorParam;\nimport io.milvus.orm.iterator.QueryIterator;\n\n// 5. Query with iterators\n\ntry {\n Files.write(Path.of(\"results.json\"), JSON.toJSONString(new ArrayList<>()).getBytes(), StandardOpenOption.CREATE, StandardOpenOption.TRUNCATE_EXISTING);\n} catch (Exception e) {\n // TODO: handle exception\n e.printStackTrace();\n}\n\nQueryIteratorParam queryIteratorParam = QueryIteratorParam.newBuilder()\n .withCollectionName(\"quick_setup\")\n .withExpr(\"color_tag like \\\"brown_8%\\\"\")\n .withBatchSize(50L)\n .addOutField(\"vector\")\n .addOutField(\"color_tag\")\n .build();\n\nR queryIteratRes = client.queryIterator(queryIteratorParam);\n\nif (queryIteratRes.getStatus() != R.Status.Success.getCode()) {\n System.err.println(queryIteratRes.getMessage());\n}\n\nQueryIterator queryIterator = queryIteratRes.getData();\n\nwhile (true) {\n List batchResults = queryIterator.next();\n if (batchResults.isEmpty()) {\n queryIterator.close();\n break;\n }\n\n String jsonString = \"\";\n List jsonObject = new ArrayList<>();\n try {\n jsonString = Files.readString(Path.of(\"results.json\"));\n jsonObject = JSON.parseArray(jsonString).toJavaList(null);\n } catch (IOException e) {\n // TODO Auto-generated catch block\n e.printStackTrace();\n }\n\n for (QueryResultsWrapper.RowRecord queryResult : batchResults) {\n JSONObject row = new JSONObject();\n row.put(\"id\", queryResult.get(\"id\"));\n row.put(\"vector\", queryResult.get(\"vector\"));\n row.put(\"color_tag\", queryResult.get(\"color_tag\"));\n jsonObject.add(row);\n }\n\n try {\n Files.write(Path.of(\"results.json\"), JSON.toJSONString(jsonObject).getBytes(), StandardOpenOption.WRITE);\n } catch (IOException e) {\n // TODO Auto-generated catch block\n e.printStackTrace();\n }\n}\n"],"headingContent":"","anchorList":[{"label":"Con iteradores","href":"With-Iterators","type":1,"isActive":false},{"label":"Visión general","href":"Overview","type":2,"isActive":false},{"label":"Preparativos","href":"Preparations","type":2,"isActive":false},{"label":"Búsqueda con iterador","href":"Search-with-iterator","type":2,"isActive":false},{"label":"Consulta con un iterador","href":"Query-with-an-iterator","type":2,"isActive":false}]} \ No newline at end of file +{"codeList":["from pymilvus import MilvusClient\n\n# 1. Set up a Milvus client\nclient = MilvusClient(\n uri=\"http://localhost:19530\"\n)\n\n# 2. Create a collection\nclient.create_collection(\n collection_name=\"quick_setup\",\n dimension=5,\n)\n","import io.milvus.client.MilvusServiceClient;\nimport io.milvus.param.ConnectParam;\nimport io.milvus.param.highlevel.collection.CreateSimpleCollectionParam;\n\nString CLUSTER_ENDPOINT = \"http://localhost:19530\";\n\n// 1. Connect to Milvus server\nConnectParam connectParam = ConnectParam.newBuilder()\n .withUri(CLUSTER_ENDPOINT)\n .build();\n\nMilvusServiceClient client = new MilvusServiceClient(connectParam);\n\n// 2. Create a collection\nCreateSimpleCollectionParam createCollectionParam = CreateSimpleCollectionParam.newBuilder()\n .withCollectionName(\"quick_setup\")\n .withDimension(5)\n .build();\n\nclient.createCollection(createCollectionParam);\n","# 3. Insert randomly generated vectors \ncolors = [\"green\", \"blue\", \"yellow\", \"red\", \"black\", \"white\", \"purple\", \"pink\", \"orange\", \"brown\", \"grey\"]\ndata = []\n\nfor i in range(10000):\n current_color = random.choice(colors)\n current_tag = random.randint(1000, 9999)\n data.append({\n \"id\": i,\n \"vector\": [ random.uniform(-1, 1) for _ in range(5) ],\n \"color\": current_color,\n \"tag\": current_tag,\n \"color_tag\": f\"{current_color}_{str(current_tag)}\"\n })\n\nprint(data[0])\n\n# Output\n#\n# {\n# \"id\": 0,\n# \"vector\": [\n# -0.5705990742218152,\n# 0.39844925120642083,\n# -0.8791287928610869,\n# 0.024163154953680932,\n# 0.6837669917169638\n# ],\n# \"color\": \"purple\",\n# \"tag\": 7774,\n# \"color_tag\": \"purple_7774\"\n# }\n\nres = client.insert(\n collection_name=\"quick_setup\",\n data=data,\n)\n\nprint(res)\n\n# Output\n#\n# {\n# \"insert_count\": 10000,\n# \"ids\": [\n# 0,\n# 1,\n# 2,\n# 3,\n# 4,\n# 5,\n# 6,\n# 7,\n# 8,\n# 9,\n# \"(9990 more items hidden)\"\n# ]\n# }\n","import java.util.ArrayList;\nimport java.util.Arrays;\nimport java.util.List;\nimport java.util.Random;\n\nimport com.alibaba.fastjson.JSONObject;\n\nimport io.milvus.param.R;\nimport io.milvus.param.dml.InsertParam;\nimport io.milvus.response.MutationResultWrapper;\nimport io.milvus.grpc.MutationResult;\n\n\n// 3. Insert randomly generated vectors into the collection\nList colors = Arrays.asList(\"green\", \"blue\", \"yellow\", \"red\", \"black\", \"white\", \"purple\", \"pink\", \"orange\", \"brown\", \"grey\");\nList data = new ArrayList<>();\n\nfor (int i=0; i<10000; i++) {\n Random rand = new Random();\n String current_color = colors.get(rand.nextInt(colors.size()-1));\n JSONObject row = new JSONObject();\n row.put(\"id\", Long.valueOf(i));\n row.put(\"vector\", Arrays.asList(rand.nextFloat(), rand.nextFloat(), rand.nextFloat(), rand.nextFloat(), rand.nextFloat()));\n row.put(\"color_tag\", current_color + \"_\" + String.valueOf(rand.nextInt(8999) + 1000));\n data.add(row);\n}\n\nInsertParam insertParam = InsertParam.newBuilder()\n .withCollectionName(\"quick_setup\")\n .withRows(data)\n .build();\n\nR insertRes = client.insert(insertParam);\n\nif (insertRes.getStatus() != R.Status.Success.getCode()) {\n System.err.println(insertRes.getMessage());\n}\n\nMutationResultWrapper wrapper = new MutationResultWrapper(insertRes.getData());\nSystem.out.println(wrapper.getInsertCount());\n","from pymilvus import Collection\n\n# 4. Search with iterator\nconnections.connect(host=\"127.0.0.1\", port=19530)\ncollection = Collection(\"quick_setup\")\n\nquery_vectors = [[0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592]]\nsearch_params = {\n \"metric_type\": \"IP\",\n \"params\": {\"nprobe\": 10}\n}\n\niterator = collection.search_iterator(\n data=query_vectors,\n anns_field=\"vector\",\n batch_size=10,\n param=search_params,\n output_fields=[\"color_tag\"],\n limit=300\n)\n# search 300 entities totally with 10 entities per page\n\nresults = []\n\nwhile True:\n result = iterator.next()\n if not result:\n iterator.close()\n break\n \n results.extend(result)\n \n for hit in result:\n results.append(hit.to_dict())\n\nprint(results)\n\n# Output\n#\n# [\n# {\n# \"id\": 1756,\n# \"distance\": 2.0642056465148926,\n# \"entity\": {\n# \"color_tag\": \"black_9109\"\n# }\n# },\n# {\n# \"id\": 6488,\n# \"distance\": 1.9437453746795654,\n# \"entity\": {\n# \"color_tag\": \"purple_8164\"\n# }\n# },\n# {\n# \"id\": 3338,\n# \"distance\": 1.9107104539871216,\n# \"entity\": {\n# \"color_tag\": \"brown_8121\"\n# }\n# }\n# ]\n","import io.milvus.param.dml.QueryIteratorParam;\nimport io.milvus.param.dml.SearchIteratorParam;\nimport io.milvus.response.QueryResultsWrapper;\nimport io.milvus.orm.iterator.SearchIterator;\n\n// 4. Search with iterators\nSearchIteratorParam iteratorParam = SearchIteratorParam.newBuilder()\n .withCollectionName(\"quick_setup\")\n .withVectorFieldName(\"vector\")\n // Use withFloatVectors() in clusters compatible with Milvus 2.4.x\n .withVectors(Arrays.asList(0.3580376395471989f, -0.6023495712049978f, 0.18414012509913835f, -0.26286205330961354f, 0.9029438446296592f))\n .withBatchSize(10L)\n .withParams(\"{\\\"metric_type\\\": \\\"COSINE\\\", \\\"params\\\": {\\\"level\\\": 1}}\")\n .build();\n \n\nR searchIteratorRes = client.searchIterator(iteratorParam);\n\nif (searchIteratorRes.getStatus() != R.Status.Success.getCode()) {\n System.err.println(searchIteratorRes.getMessage());\n}\n\nSearchIterator searchIterator = searchIteratorRes.getData();\nList results = new ArrayList<>();\n\nwhile (true) {\n List batchResults = searchIterator.next();\n if (batchResults.isEmpty()) {\n searchIterator.close();\n break;\n }\n for (QueryResultsWrapper.RowRecord rowRecord : batchResults) {\n results.add(rowRecord);\n }\n}\n\nSystem.out.println(results.size());\n","# 6. Query with iterator\niterator = collection.query_iterator(\n batch_size=10, # Controls the size of the return each time you call next()\n expr=\"color_tag like \\\"brown_8\\\"\",\n output_fields=[\"color_tag\"]\n)\n\nresults = []\n\nwhile True:\n result = iterator.next()\n if not result:\n iterator.close()\n break\n \n results.extend(result)\n \n# 8. Check the search results\nprint(len(results))\n\nprint(results[:3])\n\n# Output\n#\n# [\n# {\n# \"color_tag\": \"brown_8785\",\n# \"id\": 94\n# },\n# {\n# \"color_tag\": \"brown_8568\",\n# \"id\": 176\n# },\n# {\n# \"color_tag\": \"brown_8721\",\n# \"id\": 289\n# }\n# ]\n","import io.milvus.param.dml.QueryIteratorParam;\nimport io.milvus.orm.iterator.QueryIterator;\n\n// 5. Query with iterators\n\ntry {\n Files.write(Path.of(\"results.json\"), JSON.toJSONString(new ArrayList<>()).getBytes(), StandardOpenOption.CREATE, StandardOpenOption.TRUNCATE_EXISTING);\n} catch (Exception e) {\n // TODO: handle exception\n e.printStackTrace();\n}\n\nQueryIteratorParam queryIteratorParam = QueryIteratorParam.newBuilder()\n .withCollectionName(\"quick_setup\")\n .withExpr(\"color_tag like \\\"brown_8%\\\"\")\n .withBatchSize(50L)\n .addOutField(\"vector\")\n .addOutField(\"color_tag\")\n .build();\n\nR queryIteratRes = client.queryIterator(queryIteratorParam);\n\nif (queryIteratRes.getStatus() != R.Status.Success.getCode()) {\n System.err.println(queryIteratRes.getMessage());\n}\n\nQueryIterator queryIterator = queryIteratRes.getData();\n\nwhile (true) {\n List batchResults = queryIterator.next();\n if (batchResults.isEmpty()) {\n queryIterator.close();\n break;\n }\n\n String jsonString = \"\";\n List jsonObject = new ArrayList<>();\n try {\n jsonString = Files.readString(Path.of(\"results.json\"));\n jsonObject = JSON.parseArray(jsonString).toJavaList(null);\n } catch (IOException e) {\n // TODO Auto-generated catch block\n e.printStackTrace();\n }\n\n for (QueryResultsWrapper.RowRecord queryResult : batchResults) {\n JSONObject row = new JSONObject();\n row.put(\"id\", queryResult.get(\"id\"));\n row.put(\"vector\", queryResult.get(\"vector\"));\n row.put(\"color_tag\", queryResult.get(\"color_tag\"));\n jsonObject.add(row);\n }\n\n try {\n Files.write(Path.of(\"results.json\"), JSON.toJSONString(jsonObject).getBytes(), StandardOpenOption.WRITE);\n } catch (IOException e) {\n // TODO Auto-generated catch block\n e.printStackTrace();\n }\n}\n"],"headingContent":"With Iterators","anchorList":[{"label":"Con iteradores","href":"With-Iterators","type":1,"isActive":false},{"label":"Visión general","href":"Overview","type":2,"isActive":false},{"label":"Preparativos","href":"Preparations","type":2,"isActive":false},{"label":"Búsqueda con iterador","href":"Search-with-iterator","type":2,"isActive":false},{"label":"Consulta con un iterador","href":"Query-with-an-iterator","type":2,"isActive":false}]} \ No newline at end of file diff --git a/localization/v2.4.x/site/es/userGuide/search-query-get/with-iterators.md b/localization/v2.4.x/site/es/userGuide/search-query-get/with-iterators.md index 6d61eec8a..172f38197 100644 --- a/localization/v2.4.x/site/es/userGuide/search-query-get/with-iterators.md +++ b/localization/v2.4.x/site/es/userGuide/search-query-get/with-iterators.md @@ -4,7 +4,7 @@ order: 4 summary: >- Milvus proporciona iteradores de búsqueda y consulta para iterar resultados con un gran volumen de entidades. -title: Con Iteradores +title: Con iteradores ---

    Con iteradores

    Milvus proporciona iteradores de búsqueda y consulta para iterar resultados con un gran volumen de entidades. Dado que Milvus limita TopK a 16384, los usuarios pueden utilizar iteradores para devolver grandes números o incluso entidades enteras en una colección en modo por lotes.

    +

    Milvus proporciona iteradores de búsqueda y consulta para iterar a través de un gran volumen de entidades. Dado que Milvus limita TopK a 16384, los usuarios pueden utilizar iteradores para devolver grandes números o incluso entidades enteras en una colección en modo por lotes.

    Visión general

    Los iteradores son potentes herramientas que le ayudan a iterar a través de un gran volumen de datos o de todos los datos de una colección utilizando valores de clave primaria y expresiones booleanas. Esto puede mejorar significativamente la forma de recuperar datos. A diferencia del uso tradicional de parámetros de desplazamiento y límite, que pueden perder eficacia con el tiempo, los iteradores ofrecen una solución más escalable.

    +

    Los iteradores son una herramienta eficaz para escanear una colección completa o iterar a través de un gran volumen de entidades especificando valores de clave primaria o una expresión de filtro. En comparación con una llamada de búsqueda o consulta con parámetros de desplazamiento y límite, el uso de iteradores es más eficiente y escalable.

    Ventajas del uso de iteradores

      -
    • Simplicidad: Elimina los complejos parámetros offset y limit.

    • +
    • Simplicidad: Elimina los complejos parámetros de desplazamiento y límite.

    • Eficacia: Proporciona una recuperación de datos escalable al obtener sólo los datos necesarios.

    • Coherencia: Garantiza un tamaño coherente del conjunto de datos con filtros booleanos.

    @@ -64,7 +64,7 @@ title: Con Iteradores d="M4 9h1v1H4c-1.5 0-3-1.69-3-3.5S2.55 3 4 3h4c1.45 0 3 1.69 3 3.5 0 1.41-.91 2.72-2 3.25V8.59c.58-.45 1-1.27 1-2.09C10 5.22 8.98 4 8 4H4c-.98 0-2 1.22-2 2.5S3 9 4 9zm9-3h-1v1h1c1 0 2 1.22 2 2.5S13.98 12 13 12H9c-.98 0-2-1.22-2-2.5 0-.83.42-1.64 1-2.09V6.25c-1.09.53-2 1.84-2 3.25C6 11.31 7.55 13 9 13h4c1.45 0 3-1.69 3-3.5S14.5 6 13 6z" > -

    Los siguientes pasos reutilizan el código para conectarse a Milvus, configurar rápidamente una colección e insertar más de 10.000 entidades generadas aleatoriamente en la colección.

    +

    El siguiente paso de preparación se conecta a Milvus e inserta entidades generadas aleatoriamente en una colección.

    Paso 1: Crear una colección

    Utilice MilvusClient para conectarse al servidor Milvus y create_collection() para crear una colección.

    @@ -266,8 +266,9 @@ iterator = collection.search_iterator( batch_size=10, param=search_params, output_fields=["color_tag"], - limit=3 + limit=300 ) +# search 300 entities totally with 10 entities per page results = [] @@ -366,7 +367,7 @@ System.out.println(results.size()); batch_size - El número de entidades a devolver cada vez que se llama a next() sobre el iterador actual.
    El valor por defecto es 1000. Ajústelo a un valor adecuado para controlar el número de entidades a devolver por iteración. + El número de entidades a devolver cada vez que se llama a next() en el iterador actual.
    El valor por defecto es 1000. Ajústelo a un valor adecuado para controlar el número de entidades a devolver por iteración. param diff --git a/localization/v2.4.x/site/es/userGuide/tools/cli_commands.json b/localization/v2.4.x/site/es/userGuide/tools/cli_commands.json index a40884fbc..58a9c8895 100644 --- a/localization/v2.4.x/site/es/userGuide/tools/cli_commands.json +++ b/localization/v2.4.x/site/es/userGuide/tools/cli_commands.json @@ -1 +1 @@ -{"codeList":["clear\n","connect [-uri (text)] [-t (text)]\n","milvus_cli > connect -uri http://127.0.0.1:19530 \n","create database -db (text) \n","milvus_cli > create database -db testdb\n","use database -db (text) \n","milvus_cli > use database -db testdb\n","list databases \n","milvus_cli > list databases\n","delete database -db (text) \n","milvus_cli > delete database -db testdb\n","create user -u (text) -p (text)\n","milvus_cli > create user -u zilliz -p zilliz\n","create alias -c (text) -a (text) [-A] \n","milvus_cli > create alias -c car -a carAlias1\n","milvus_cli > create alias -c car2 -A -a carAlias1\n","create collection -c (text) -f (text) -p (text) [-a] [-d (text)]\n","## For array field: --schema-field support :::(:if Varchar)\n\nmilvus_cli > create collection -c car -f id:INT64:primary_field -f vector:FLOAT_VECTOR:128 -f color:INT64:color -f brand:ARRAY:64:VARCHAR:128 -p id -A -d 'car_collection'\n","create partition -c (text) -p (text) [-d (text)]\n","milvus_cli > create partition -c car -p new_partition -d test_add_partition\n","create index\n","milvus_cli > create index\n\nCollection name (car, car2): car2\n\nThe name of the field to create an index for (vector): vector\n\nIndex name: vectorIndex\n\n# Default is ''\nIndex type FLAT, IVF_FLAT, IVF_SQ8, IVF_PQ, RNSG, HNSW, ANNOY, AUTOINDEX, DISKANN, GPU_IVF_FLAT, GPU_IVF_PQ, SPARSE_INVERTED_INDEX, SPARSE_WAND, SCANN, STL_SORT, Trie, INVERTED, ) []: IVF_FLAT \n\n# Default is ''\nIndex metric type (L2, IP, HAMMING, TANIMOTO, COSINE, ) []: \n\nTimeout []:\n","delete user -u (text)\n","milvus_cli > delete user -u zilliz\n","delete alias -a (text) \n","delete collection -c (text) \n","milvus_cli > delete collection -c car\n","delete entities -c (text) -p (text) \n","milvus_cli > delete entities -c car\n\nThe expression to specify entities to be deleted, such as \"film_id in [ 0, 1 ]\": film_id in [ 0, 1 ]\n\nYou are trying to delete the entities of collection. This action cannot be undone!\n\nDo you want to continue? [y/N]: y\n","delete partition -c (text) -p (text)\n","milvus_cli > delete partition -c car -p new_partition\n","delete index -c (text) -in (text)\n","milvus_cli > delete index -c car -in indexName\n","show collection -c (text)\n","milvus_cli > show collection -c test_collection_insert\n","show partition -c (text) -p (text)\n","milvus_cli > show partition -c test_collection_insert -p _default\n","show index -c (text) -in (text)\n","milvus_cli > show index -c test_collection -in index_name\n","exit\n","help \n","import -c (text)[-p (text)] \n","milvus_cli > import -c car 'examples/import_csv/vectors.csv'\n\nReading csv file... [####################################] 100%\n\nColumn names are ['vector', 'color', 'brand']\n\nProcessed 50001 lines.\n\nInserting ...\n\nInsert successfully.\n-------------------------- ------------------\nTotal insert entities: 50000\nTotal collection entities: 150000\nMilvus timestamp: 428849214449254403\n-------------------------- ------------------\n","milvus_cli > import -c car 'https://raw.githubusercontent.com/milvus-\nio/milvus_cli/main/examples/import_csv/vectors.csv'\n\nReading file from remote URL.\n\nReading csv file... [####################################] 100%\n\nColumn names are ['vector', 'color', 'brand']\n\nProcessed 50001 lines.\n\nInserting ...\n\nInsert successfully.\n\n-------------------------- ------------------\nTotal insert entities: 50000\nTotal collection entities: 150000\nMilvus timestamp: 428849214449254403\n-------------------------- ------------------\n","list users\n","list collections\n","list indexes -c (text)\n","list partitions -c (text)\n","load -c (text) [-p (text)]\n","query\n","milvus_cli > query\n\nCollection name: car\n\nThe query expression: id in [ 428960801420883491, 428960801420883492,\n428960801420883493 ]\n\nName of partitions that contain entities(split by \",\" if multiple) []:\ndefault\n\nA list of fields to return(split by \",\" if multiple) []: color, brand\n\ntimeout []:\n\nGuarantee timestamp. This instructs Milvus to see all operations performed before a provided timestamp. If no such timestamp is provided, then Milvus will search all operations performed to date. [0]:\nGraceful time. Only used in bounded consistency level. If graceful_time is set, PyMilvus will use current timestamp minus the graceful_time as the guarantee_timestamp. This option is 5s by default if not set. [5]:\n","milvus_cli > query\n\nCollection name: car\n\nThe query expression: id > 428960801420883491\n\nName of partitions that contain entities(split by \",\" if multiple) []:\ndefault\n\nA list of fields to return(split by \",\" if multiple) []: id, color,\nbrand\n\ntimeout []:\n\nGuarantee timestamp. This instructs Milvus to see all operations performed before a provided timestamp. If no such timestamp is provided, then Milvus will search all operations performed to date. [0]:\nGraceful time. Only used in bounded consistency level. If graceful_time is set, PyMilvus will use current timestamp minus the graceful_time as the guarantee_timestamp. This option is 5s by default if not set. [5]:\n","release -c (text) [-p (text)]\n","search\n","milvus_cli > search\n\nCollection name (car, test_collection): car\n\nThe vectors of search data(the length of data is number of query (nq), the dim of every vector in data must be equal to vector field’s of collection. You can also import a csv file\nout headers): examples/import_csv/search_vectors.csv\n\nThe vector field used to search of collection (vector): vector\n\nSearch parameter nprobe's value: 10\n\nThe max number of returned record, also known as topk: 2\n\nThe boolean expression used to filter attribute []: id > 0\n\nThe names of partitions to search (split by \",\" if multiple) ['_default'] []: _default\n\ntimeout []:\n\nGuarantee Timestamp(It instructs Milvus to see all operations performed before a provided timestamp. If no such timestamp is provided, then Milvus will search all operations performed to date) [0]:\n\n","milvus_cli > search\n\nCollection name (car, test_collection): car\n\nThe vectors of search data(the length of data is number of query (nq), the dim of every vector in data must be equal to vector field’s of collection. You can also import a csv file without headers):\n [[0.71, 0.76, 0.17, 0.13, 0.42, 0.07, 0.15, 0.67, 0.58, 0.02, 0.39, 0.47, 0.58, 0.88, 0.73, 0.31, 0.23, 0.57, 0.33, 0.2, 0.03, 0.43, 0.78, 0.49, 0.17, 0.56, 0.76, 0.54, 0.45, 0.46, 0.05, 0.1, 0.43, 0.63, 0.29, 0.44, 0.65, 0.01, 0.35, 0.46, 0.66, 0.7, 0.88, 0.07, 0.49, 0.92, 0.57, 0.5, 0.16, 0.77, 0.98, 0.1, 0.44, 0.88, 0.82, 0.16, 0.67, 0.63, 0.57, 0.55, 0.95, 0.13, 0.64, 0.43, 0.71, 0.81, 0.43, 0.65, 0.76, 0.7, 0.05, 0.24, 0.03, 0.9, 0.46, 0.28, 0.92, 0.25, 0.97, 0.79, 0.73, 0.97, 0.49, 0.28, 0.64, 0.19, 0.23, 0.51, 0.09, 0.1, 0.53, 0.03, 0.23, 0.94, 0.87, 0.14, 0.42, 0.82, 0.91, 0.11, 0.91, 0.37, 0.26, 0.6, 0.89, 0.6, 0.32, 0.11, 0.98, 0.67, 0.12, 0.66, 0.47, 0.02, 0.15, 0.6, 0.64, 0.57, 0.14, 0.81, 0.75, 0.11, 0.49, 0.78, 0.16, 0.63, 0.57, 0.18]]\n\nThe vector field used to search of collection (vector): vector\n\nSearch parameter nprobe's value: 10\n\nThe specified number of decimal places of returned distance [-1]: 5\n\nThe max number of returned record, also known as topk: 2\n\nThe boolean expression used to filter attribute []: id > 0\n\nThe names of partitions to search (split by \",\" if multiple) ['_default'] []: _default\n\ntimeout []:\n\nGuarantee Timestamp(It instructs Milvus to see all operations performed before a provided timestamp. If no such timestamp is provided, then Milvus will search all operations performed to date) [0]:\n\n","milvus_cli > search\n\nCollection name (car, car2): car\n\nThe vectors of search data(the length of data is number of query (nq), the dim of every vector in data must be equal to vector field’s of collection. You can also import a csv file without headers): examples/import_csv/search_vectors.csv\n\nThe vector field used to search of collection (vector): vector\n\nThe specified number of decimal places of returned distance [-1]: 5\n\nThe max number of returned record, also known as topk: 2\n\nThe boolean expression used to filter attribute []:\n\nThe names of partitions to search (split by \",\" if multiple) ['_default'] []:\n\ntimeout []:\n\nGuarantee Timestamp(It instructs Milvus to see all operations performed before a provided timestamp. If no such timestamp is provided, then Milvus will search all operations performed to date) [0]:\n\n","list connections \n","show index_progress -c (text) [-i (text)]\n","show loading_progress -c (text) [-p (text)]\n","version\n","$ milvus_cli --version\nMilvus_CLI v0.4.0\n"],"headingContent":"","anchorList":[{"label":"Referencia de comandos Milvus_CLI","href":"MilvusCLI-Command-Reference","type":1,"isActive":false},{"label":"borrar","href":"clear","type":2,"isActive":false},{"label":"conectar","href":"connect","type":2,"isActive":false},{"label":"crear Base de Datos","href":"create-Database","type":2,"isActive":false},{"label":"use Base de datos","href":"use-Database","type":2,"isActive":false},{"label":"Listar Bases de Datos","href":"List-Databases","type":2,"isActive":false},{"label":"Borrar Base de Datos","href":"Delete-Database","type":2,"isActive":false},{"label":"crear usuario","href":"create-user","type":2,"isActive":false},{"label":"crear alias","href":"create-alias","type":2,"isActive":false},{"label":"crear colección","href":"create-collection","type":2,"isActive":false},{"label":"crear partición","href":"create-partition","type":2,"isActive":false},{"label":"crear índice","href":"create-index","type":2,"isActive":false},{"label":"delete user","href":"delete-user","type":2,"isActive":false},{"label":"borrar alias","href":"delete-alias","type":2,"isActive":false},{"label":"eliminar colección","href":"delete-collection","type":2,"isActive":false},{"label":"borrar entidades","href":"delete-entities","type":2,"isActive":false},{"label":"borrar partición","href":"delete-partition","type":2,"isActive":false},{"label":"borrar índice","href":"delete-index","type":2,"isActive":false},{"label":"mostrar colección","href":"show-collection","type":2,"isActive":false},{"label":"mostrar partición","href":"show-partition","type":2,"isActive":false},{"label":"mostrar índice","href":"show-index","type":2,"isActive":false},{"label":"salir","href":"exit","type":2,"isActive":false},{"label":"ayuda","href":"help","type":2,"isActive":false},{"label":"importar","href":"import","type":2,"isActive":false},{"label":"listar usuarios","href":"list-users","type":2,"isActive":false},{"label":"Listar colecciones","href":"list-collections","type":2,"isActive":false},{"label":"listar índices","href":"list-indexes","type":2,"isActive":false},{"label":"listar particiones","href":"list-partitions","type":2,"isActive":false},{"label":"cargar","href":"load","type":2,"isActive":false},{"label":"consulta","href":"query","type":2,"isActive":false},{"label":"liberar","href":"release","type":2,"isActive":false},{"label":"búsqueda","href":"search","type":2,"isActive":false},{"label":"Lista de conexiones","href":"List-connection","type":2,"isActive":false},{"label":"show index_progress","href":"show-indexprogress","type":2,"isActive":false},{"label":"mostrar_progreso_carga","href":"show-loadingprogress","type":2,"isActive":false},{"label":"versión","href":"version","type":2,"isActive":false}]} \ No newline at end of file +{"codeList":["clear\n","connect [-uri (text)] [-t (text)]\n","milvus_cli > connect -uri http://127.0.0.1:19530 \n","create database -db (text) \n","milvus_cli > create database -db testdb\n","use database -db (text) \n","milvus_cli > use database -db testdb\n","list databases \n","milvus_cli > list databases\n","delete database -db (text) \n","milvus_cli > delete database -db testdb\n","create user -u (text) -p (text)\n","milvus_cli > create user -u zilliz -p zilliz\n","create alias -c (text) -a (text) [-A] \n","milvus_cli > create alias -c car -a carAlias1\n","milvus_cli > create alias -c car2 -A -a carAlias1\n","create collection -c (text) -f (text) -p (text) [-a] [-d (text)]\n","## For array field: --schema-field support :::(:if Varchar)\n\nmilvus_cli > create collection -c car -f id:INT64:primary_field -f vector:FLOAT_VECTOR:128 -f color:INT64:color -f brand:ARRAY:64:VARCHAR:128 -p id -A -d 'car_collection'\n","create partition -c (text) -p (text) [-d (text)]\n","milvus_cli > create partition -c car -p new_partition -d test_add_partition\n","create index\n","milvus_cli > create index\n\nCollection name (car, car2): car2\n\nThe name of the field to create an index for (vector): vector\n\nIndex name: vectorIndex\n\n# Default is ''\nIndex type FLAT, IVF_FLAT, IVF_SQ8, IVF_PQ, RNSG, HNSW, ANNOY, AUTOINDEX, DISKANN, GPU_IVF_FLAT, GPU_IVF_PQ, SPARSE_INVERTED_INDEX, SPARSE_WAND, SCANN, STL_SORT, Trie, INVERTED, ) []: IVF_FLAT \n\n# Default is ''\nIndex metric type (L2, IP, HAMMING, TANIMOTO, COSINE, ) []: \n\nTimeout []:\n","delete user -u (text)\n","milvus_cli > delete user -u zilliz\n","delete alias -a (text) \n","delete collection -c (text) \n","milvus_cli > delete collection -c car\n","delete entities -c (text) -p (text) \n","milvus_cli > delete entities -c car\n\nThe expression to specify entities to be deleted, such as \"film_id in [ 0, 1 ]\": film_id in [ 0, 1 ]\n\nYou are trying to delete the entities of collection. This action cannot be undone!\n\nDo you want to continue? [y/N]: y\n","delete partition -c (text) -p (text)\n","milvus_cli > delete partition -c car -p new_partition\n","delete index -c (text) -in (text)\n","milvus_cli > delete index -c car -in indexName\n","show collection -c (text)\n","milvus_cli > show collection -c test_collection_insert\n","show partition -c (text) -p (text)\n","milvus_cli > show partition -c test_collection_insert -p _default\n","show index -c (text) -in (text)\n","milvus_cli > show index -c test_collection -in index_name\n","exit\n","help \n","import -c (text)[-p (text)] \n","milvus_cli > import -c car 'examples/import_csv/vectors.csv'\n\nReading csv file... [####################################] 100%\n\nColumn names are ['vector', 'color', 'brand']\n\nProcessed 50001 lines.\n\nInserting ...\n\nInsert successfully.\n-------------------------- ------------------\nTotal insert entities: 50000\nTotal collection entities: 150000\nMilvus timestamp: 428849214449254403\n-------------------------- ------------------\n","milvus_cli > import -c car 'https://raw.githubusercontent.com/milvus-\nio/milvus_cli/main/examples/import_csv/vectors.csv'\n\nReading file from remote URL.\n\nReading csv file... [####################################] 100%\n\nColumn names are ['vector', 'color', 'brand']\n\nProcessed 50001 lines.\n\nInserting ...\n\nInsert successfully.\n\n-------------------------- ------------------\nTotal insert entities: 50000\nTotal collection entities: 150000\nMilvus timestamp: 428849214449254403\n-------------------------- ------------------\n","list users\n","list collections\n","list indexes -c (text)\n","list partitions -c (text)\n","load -c (text) [-p (text)]\n","query\n","milvus_cli > query\n\nCollection name: car\n\nThe query expression: id in [ 428960801420883491, 428960801420883492,\n428960801420883493 ]\n\nName of partitions that contain entities(split by \",\" if multiple) []:\ndefault\n\nA list of fields to return(split by \",\" if multiple) []: color, brand\n\ntimeout []:\n\nGuarantee timestamp. This instructs Milvus to see all operations performed before a provided timestamp. If no such timestamp is provided, then Milvus will search all operations performed to date. [0]:\nGraceful time. Only used in bounded consistency level. If graceful_time is set, PyMilvus will use current timestamp minus the graceful_time as the guarantee_timestamp. This option is 5s by default if not set. [5]:\n","milvus_cli > query\n\nCollection name: car\n\nThe query expression: id > 428960801420883491\n\nName of partitions that contain entities(split by \",\" if multiple) []:\ndefault\n\nA list of fields to return(split by \",\" if multiple) []: id, color,\nbrand\n\ntimeout []:\n\nGuarantee timestamp. This instructs Milvus to see all operations performed before a provided timestamp. If no such timestamp is provided, then Milvus will search all operations performed to date. [0]:\nGraceful time. Only used in bounded consistency level. If graceful_time is set, PyMilvus will use current timestamp minus the graceful_time as the guarantee_timestamp. This option is 5s by default if not set. [5]:\n","release -c (text) [-p (text)]\n","search\n","milvus_cli > search\n\nCollection name (car, test_collection): car\n\nThe vectors of search data(the length of data is number of query (nq), the dim of every vector in data must be equal to vector field’s of collection. You can also import a csv file\nout headers): examples/import_csv/search_vectors.csv\n\nThe vector field used to search of collection (vector): vector\n\nSearch parameter nprobe's value: 10\n\nThe max number of returned record, also known as topk: 2\n\nThe boolean expression used to filter attribute []: id > 0\n\nThe names of partitions to search (split by \",\" if multiple) ['_default'] []: _default\n\ntimeout []:\n\nGuarantee Timestamp(It instructs Milvus to see all operations performed before a provided timestamp. If no such timestamp is provided, then Milvus will search all operations performed to date) [0]:\n\n","milvus_cli > search\n\nCollection name (car, test_collection): car\n\nThe vectors of search data(the length of data is number of query (nq), the dim of every vector in data must be equal to vector field’s of collection. You can also import a csv file without headers):\n [[0.71, 0.76, 0.17, 0.13, 0.42, 0.07, 0.15, 0.67, 0.58, 0.02, 0.39, 0.47, 0.58, 0.88, 0.73, 0.31, 0.23, 0.57, 0.33, 0.2, 0.03, 0.43, 0.78, 0.49, 0.17, 0.56, 0.76, 0.54, 0.45, 0.46, 0.05, 0.1, 0.43, 0.63, 0.29, 0.44, 0.65, 0.01, 0.35, 0.46, 0.66, 0.7, 0.88, 0.07, 0.49, 0.92, 0.57, 0.5, 0.16, 0.77, 0.98, 0.1, 0.44, 0.88, 0.82, 0.16, 0.67, 0.63, 0.57, 0.55, 0.95, 0.13, 0.64, 0.43, 0.71, 0.81, 0.43, 0.65, 0.76, 0.7, 0.05, 0.24, 0.03, 0.9, 0.46, 0.28, 0.92, 0.25, 0.97, 0.79, 0.73, 0.97, 0.49, 0.28, 0.64, 0.19, 0.23, 0.51, 0.09, 0.1, 0.53, 0.03, 0.23, 0.94, 0.87, 0.14, 0.42, 0.82, 0.91, 0.11, 0.91, 0.37, 0.26, 0.6, 0.89, 0.6, 0.32, 0.11, 0.98, 0.67, 0.12, 0.66, 0.47, 0.02, 0.15, 0.6, 0.64, 0.57, 0.14, 0.81, 0.75, 0.11, 0.49, 0.78, 0.16, 0.63, 0.57, 0.18]]\n\nThe vector field used to search of collection (vector): vector\n\nSearch parameter nprobe's value: 10\n\nThe specified number of decimal places of returned distance [-1]: 5\n\nThe max number of returned record, also known as topk: 2\n\nThe boolean expression used to filter attribute []: id > 0\n\nThe names of partitions to search (split by \",\" if multiple) ['_default'] []: _default\n\ntimeout []:\n\nGuarantee Timestamp(It instructs Milvus to see all operations performed before a provided timestamp. If no such timestamp is provided, then Milvus will search all operations performed to date) [0]:\n\n","milvus_cli > search\n\nCollection name (car, car2): car\n\nThe vectors of search data(the length of data is number of query (nq), the dim of every vector in data must be equal to vector field’s of collection. You can also import a csv file without headers): examples/import_csv/search_vectors.csv\n\nThe vector field used to search of collection (vector): vector\n\nThe specified number of decimal places of returned distance [-1]: 5\n\nThe max number of returned record, also known as topk: 2\n\nThe boolean expression used to filter attribute []:\n\nThe names of partitions to search (split by \",\" if multiple) ['_default'] []:\n\ntimeout []:\n\nGuarantee Timestamp(It instructs Milvus to see all operations performed before a provided timestamp. If no such timestamp is provided, then Milvus will search all operations performed to date) [0]:\n\n","list connections \n","show index_progress -c (text) [-i (text)]\n","show loading_progress -c (text) [-p (text)]\n","version\n","$ milvus_cli --version\nMilvus_CLI v0.4.0\n"],"headingContent":"Milvus_CLI Command Reference","anchorList":[{"label":"Referencia de comandos Milvus_CLI","href":"MilvusCLI-Command-Reference","type":1,"isActive":false},{"label":"borrar","href":"clear","type":2,"isActive":false},{"label":"conectar","href":"connect","type":2,"isActive":false},{"label":"crear Base de Datos","href":"create-Database","type":2,"isActive":false},{"label":"use Base de datos","href":"use-Database","type":2,"isActive":false},{"label":"list Bases de datos","href":"list-Databases","type":2,"isActive":false},{"label":"delete Base de Datos","href":"delete-Database","type":2,"isActive":false},{"label":"crear usuario","href":"create-user","type":2,"isActive":false},{"label":"crear alias","href":"create-alias","type":2,"isActive":false},{"label":"crear colección","href":"create-collection","type":2,"isActive":false},{"label":"crear partición","href":"create-partition","type":2,"isActive":false},{"label":"crear índice","href":"create-index","type":2,"isActive":false},{"label":"delete user","href":"delete-user","type":2,"isActive":false},{"label":"borrar alias","href":"delete-alias","type":2,"isActive":false},{"label":"eliminar colección","href":"delete-collection","type":2,"isActive":false},{"label":"borrar entidades","href":"delete-entities","type":2,"isActive":false},{"label":"borrar partición","href":"delete-partition","type":2,"isActive":false},{"label":"borrar índice","href":"delete-index","type":2,"isActive":false},{"label":"mostrar colección","href":"show-collection","type":2,"isActive":false},{"label":"mostrar partición","href":"show-partition","type":2,"isActive":false},{"label":"mostrar índice","href":"show-index","type":2,"isActive":false},{"label":"salir","href":"exit","type":2,"isActive":false},{"label":"ayuda","href":"help","type":2,"isActive":false},{"label":"importar","href":"import","type":2,"isActive":false},{"label":"listar usuarios","href":"list-users","type":2,"isActive":false},{"label":"Listar colecciones","href":"list-collections","type":2,"isActive":false},{"label":"listar índices","href":"list-indexes","type":2,"isActive":false},{"label":"listar particiones","href":"list-partitions","type":2,"isActive":false},{"label":"cargar","href":"load","type":2,"isActive":false},{"label":"consulta","href":"query","type":2,"isActive":false},{"label":"liberar","href":"release","type":2,"isActive":false},{"label":"búsqueda","href":"search","type":2,"isActive":false},{"label":"listar conexiones","href":"list-connection","type":2,"isActive":false},{"label":"show index_progress","href":"show-indexprogress","type":2,"isActive":false},{"label":"mostrar_progreso_carga","href":"show-loadingprogress","type":2,"isActive":false},{"label":"versión","href":"version","type":2,"isActive":false}]} \ No newline at end of file diff --git a/localization/v2.4.x/site/es/userGuide/tools/cli_commands.md b/localization/v2.4.x/site/es/userGuide/tools/cli_commands.md index b97676d21..ceb0f4dc3 100644 --- a/localization/v2.4.x/site/es/userGuide/tools/cli_commands.md +++ b/localization/v2.4.x/site/es/userGuide/tools/cli_commands.md @@ -143,7 +143,7 @@ title: Referencia de comandos Milvus_CLI

    Ejemplos

    Ejemplo 1

    El siguiente ejemplo utiliza la base de datos testdb en milvus.

    milvus_cli > use database -db testdb
     
    -

    Listar Bases de Datos

    Ejemplo 1

    El siguiente ejemplo lista las bases de datos en milvus.

    milvus_cli > list databases
     
    -

    Borrar Base de Datos

    -

    Lista de conexiones

    Muestra el progreso de la carga de entidades.

    +

    Muestra el progreso de carga de una colección.

    Sintaxis

    show loading_progress -c (text) [-p (text)]
     
    diff --git a/localization/v2.4.x/site/es/userGuide/tools/milvus_backup_overview.json b/localization/v2.4.x/site/es/userGuide/tools/milvus_backup_overview.json index efa443a0e..c3ea9aec7 100644 --- a/localization/v2.4.x/site/es/userGuide/tools/milvus_backup_overview.json +++ b/localization/v2.4.x/site/es/userGuide/tools/milvus_backup_overview.json @@ -1 +1 @@ -{"codeList":[],"headingContent":"","anchorList":[{"label":"Copia de seguridad de Milvus","href":"Milvus-Backup","type":1,"isActive":false},{"label":"Requisitos previos","href":"Prerequisites","type":2,"isActive":false},{"label":"Arquitectura","href":"Architecture","type":2,"isActive":false},{"label":"Última versión","href":"Latest-release","type":2,"isActive":false}]} \ No newline at end of file +{"codeList":[],"headingContent":"Milvus Backup","anchorList":[{"label":"Copia de seguridad de Milvus","href":"Milvus-Backup","type":1,"isActive":false},{"label":"Requisitos previos","href":"Prerequisites","type":2,"isActive":false},{"label":"Arquitectura","href":"Architecture","type":2,"isActive":false},{"label":"Última versión","href":"Latest-release","type":2,"isActive":false}]} \ No newline at end of file diff --git a/localization/v2.4.x/site/es/userGuide/tools/milvus_backup_overview.md b/localization/v2.4.x/site/es/userGuide/tools/milvus_backup_overview.md index 0ab0c124f..7371aab05 100644 --- a/localization/v2.4.x/site/es/userGuide/tools/milvus_backup_overview.md +++ b/localization/v2.4.x/site/es/userGuide/tools/milvus_backup_overview.md @@ -3,7 +3,7 @@ id: milvus_backup_overview.md summary: >- Milvus-Backup es una herramienta que permite a los usuarios realizar copias de seguridad y restaurar los datos de Milvus. -title: Milvus Backup +title: Copia de seguridad de Milvus ---

    Copia de seguridad de Milvus

    diff --git a/localization/v2.4.x/site/es/userGuide/use-partition-key.json b/localization/v2.4.x/site/es/userGuide/use-partition-key.json index e7630a3f7..8c9e3c335 100644 --- a/localization/v2.4.x/site/es/userGuide/use-partition-key.json +++ b/localization/v2.4.x/site/es/userGuide/use-partition-key.json @@ -1 +1 @@ -{"codeList":["import random, time\nfrom pymilvus import connections, MilvusClient, DataType\n\nSERVER_ADDR = \"http://localhost:19530\"\n\n# 1. Set up a Milvus client\nclient = MilvusClient(\n uri=SERVER_ADDR\n)\n\n# 2. Create a collection\nschema = MilvusClient.create_schema(\n auto_id=False,\n enable_dynamic_field=True,\n partition_key_field=\"color\",\n num_partitions=16 # Number of partitions. Defaults to 16.\n)\n\nschema.add_field(field_name=\"id\", datatype=DataType.INT64, is_primary=True)\nschema.add_field(field_name=\"vector\", datatype=DataType.FLOAT_VECTOR, dim=5)\nschema.add_field(field_name=\"color\", datatype=DataType.VARCHAR, max_length=512)\n","import io.milvus.v2.client.ConnectConfig;\nimport io.milvus.v2.client.MilvusClientV2;\nimport io.milvus.v2.common.DataType;\nimport io.milvus.v2.common.IndexParam;\nimport io.milvus.v2.service.collection.request.AddFieldReq;\nimport io.milvus.v2.service.collection.request.CreateCollectionReq;\n\nString CLUSTER_ENDPOINT = \"http://localhost:19530\";\n\n// 1. Connect to Milvus server\nConnectConfig connectConfig = ConnectConfig.builder()\n .uri(CLUSTER_ENDPOINT)\n .build();\n\nMilvusClientV2 client = new MilvusClientV2(connectConfig);\n\n// 2. Create a collection in customized setup mode\n\n// 2.1 Create schema\nCreateCollectionReq.CollectionSchema schema = client.createSchema();\n\n// 2.2 Add fields to schema\nschema.addField(AddFieldReq.builder()\n .fieldName(\"id\")\n .dataType(DataType.Int64)\n .isPrimaryKey(true)\n .autoID(false)\n .build());\n\nschema.addField(AddFieldReq.builder()\n .fieldName(\"vector\")\n .dataType(DataType.FloatVector)\n .dimension(5)\n .build());\n \nschema.addField(AddFieldReq.builder()\n .fieldName(\"color\")\n .dataType(DataType.VarChar)\n .maxLength(512)\n .isPartitionKey(true)\n .build());\n","const { MilvusClient, DataType, sleep } = require(\"@zilliz/milvus2-sdk-node\")\n\nconst address = \"http://localhost:19530\"\n\nasync function main() {\n// 1. Set up a Milvus Client\nclient = new MilvusClient({address}); \n\n// 2. Create a collection\n// 2.1 Define fields\nconst fields = [\n {\n name: \"id\",\n data_type: DataType.Int64,\n is_primary_key: true,\n auto_id: false\n },\n {\n name: \"vector\",\n data_type: DataType.FloatVector,\n dim: 5\n },\n {\n name: \"color\",\n data_type: DataType.VarChar,\n max_length: 512,\n is_partition_key: true\n }\n]\n","index_params = MilvusClient.prepare_index_params()\n\nindex_params.add_index(\n field_name=\"id\",\n index_type=\"STL_SORT\"\n)\n\nindex_params.add_index(\n field_name=\"color\",\n index_type=\"Trie\"\n)\n\nindex_params.add_index(\n field_name=\"vector\",\n index_type=\"IVF_FLAT\",\n metric_type=\"L2\",\n params={\"nlist\": 1024}\n)\n","// 2.3 Prepare index parameters\nIndexParam indexParamForVectorField = IndexParam.builder()\n .fieldName(\"vector\")\n .indexType(IndexParam.IndexType.IVF_FLAT)\n .metricType(IndexParam.MetricType.IP)\n .extraParams(Map.of(\"nlist\", 1024))\n .build();\n\nList indexParams = new ArrayList<>();\nindexParams.add(indexParamForVectorField);\n","// 2.2 Prepare index parameters\nconst index_params = [{\n field_name: \"color\",\n index_type: \"Trie\"\n},{\n field_name: \"id\",\n index_type: \"STL_SORT\"\n},{\n field_name: \"vector\",\n index_type: \"IVF_FLAT\",\n metric_type: \"IP\",\n params: { nlist: 1024}\n}]\n","client.create_collection(\n collection_name=\"test_collection\",\n schema=schema,\n index_params=index_params\n)\n","// 2.4 Create a collection with schema and index parameters\nCreateCollectionReq customizedSetupReq = CreateCollectionReq.builder()\n .collectionName(\"test_collection\")\n .collectionSchema(schema)\n .indexParams(indexParams) \n .build();\n\nclient.createCollection(customizedSetupReq);\n","// 2.3 Create a collection with fields and index parameters\nres = await client.createCollection({\n collection_name: \"test_collection\",\n fields: fields, \n index_params: index_params,\n})\n\nconsole.log(res.error_code)\n\n// Output\n// \n// Success\n//\n","# 3. Insert randomly generated vectors \ncolors = [\"green\", \"blue\", \"yellow\", \"red\", \"black\", \"white\", \"purple\", \"pink\", \"orange\", \"brown\", \"grey\"]\ndata = []\n\nfor i in range(1000):\n current_color = random.choice(colors)\n current_tag = random.randint(1000, 9999)\n data.append({\n \"id\": i,\n \"vector\": [ random.uniform(-1, 1) for _ in range(5) ],\n \"color\": current_color,\n \"tag\": current_tag,\n \"color_tag\": f\"{current_color}_{str(current_tag)}\"\n })\n\nprint(data[0])\n","// 3. Insert randomly generated vectors\nList colors = Arrays.asList(\"green\", \"blue\", \"yellow\", \"red\", \"black\", \"white\", \"purple\", \"pink\", \"orange\", \"brown\", \"grey\");\nList data = new ArrayList<>();\n\nfor (int i=0; i<1000; i++) {\n Random rand = new Random();\n String current_color = colors.get(rand.nextInt(colors.size()-1));\n int current_tag = rand.nextInt(8999) + 1000;\n JSONObject row = new JSONObject();\n row.put(\"id\", Long.valueOf(i));\n row.put(\"vector\", Arrays.asList(rand.nextFloat(), rand.nextFloat(), rand.nextFloat(), rand.nextFloat(), rand.nextFloat()));\n row.put(\"color\", current_color);\n row.put(\"tag\", current_tag);\n row.put(\"color_tag\", current_color + \"_\" + String.valueOf(rand.nextInt(8999) + 1000));\n data.add(row);\n}\n\nSystem.out.println(JSONObject.toJSON(data.get(0))); \n","// 3. Insert randomly generated vectors \nconst colors = [\"green\", \"blue\", \"yellow\", \"red\", \"black\", \"white\", \"purple\", \"pink\", \"orange\", \"brown\", \"grey\"]\nvar data = []\n\nfor (let i = 0; i < 1000; i++) {\n const current_color = colors[Math.floor(Math.random() * colors.length)]\n const current_tag = Math.floor(Math.random() * 8999 + 1000)\n data.push({\n id: i,\n vector: [Math.random(), Math.random(), Math.random(), Math.random(), Math.random()],\n color: current_color,\n tag: current_tag,\n color_tag: `${current_color}_${current_tag}`\n })\n}\n\nconsole.log(data[0])\n","{\n id: 0,\n vector: [\n 0.1275656405044483,\n 0.47417858592773277,\n 0.13858264437643286,\n 0.2390904907020377,\n 0.8447862593689635\n ],\n color: 'blue',\n tag: 2064,\n color_tag: 'blue_2064'\n}\n","res = client.insert(\n collection_name=\"test_collection\",\n data=data\n)\n\nprint(res)\n\n# Output\n#\n# {\n# \"insert_count\": 1000,\n# \"ids\": [\n# 0,\n# 1,\n# 2,\n# 3,\n# 4,\n# 5,\n# 6,\n# 7,\n# 8,\n# 9,\n# \"(990 more items hidden)\"\n# ]\n# }\n","// 3.1 Insert data into the collection\nInsertReq insertReq = InsertReq.builder()\n .collectionName(\"test_collection\")\n .data(data)\n .build();\n\nInsertResp insertResp = client.insert(insertReq);\n\nSystem.out.println(JSONObject.toJSON(insertResp));\n\n// Output:\n// {\"insertCnt\": 1000}\n","res = await client.insert({\n collection_name: \"test_collection\",\n data: data,\n})\n\nconsole.log(res.insert_cnt)\n\n// Output\n// \n// 1000\n// \n","# 4. Search with partition key\nquery_vectors = [[0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592]]\n\nres = client.search(\n collection_name=\"test_collection\",\n data=query_vectors,\n filter=\"color == 'green'\",\n search_params={\"metric_type\": \"L2\", \"params\": {\"nprobe\": 10}},\n output_fields=[\"id\", \"color_tag\"],\n limit=3\n)\n\nprint(res)\n\n# Output\n#\n# [\n# [\n# {\n# \"id\": 970,\n# \"distance\": 0.5770174264907837,\n# \"entity\": {\n# \"id\": 970,\n# \"color_tag\": \"green_9828\"\n# }\n# },\n# {\n# \"id\": 115,\n# \"distance\": 0.6898155808448792,\n# \"entity\": {\n# \"id\": 115,\n# \"color_tag\": \"green_4073\"\n# }\n# },\n# {\n# \"id\": 899,\n# \"distance\": 0.7028976678848267,\n# \"entity\": {\n# \"id\": 899,\n# \"color_tag\": \"green_9897\"\n# }\n# }\n# ]\n# ]\n","// 4. Search with partition key\nList> query_vectors = Arrays.asList(Arrays.asList(0.3580376395471989f, -0.6023495712049978f, 0.18414012509913835f, -0.26286205330961354f, 0.9029438446296592f));\n\nSearchReq searchReq = SearchReq.builder()\n .collectionName(\"test_collection\")\n .data(query_vectors)\n .filter(\"color == \\\"green\\\"\")\n .topK(3)\n .build();\n\nSearchResp searchResp = client.search(searchReq);\n\nSystem.out.println(JSONObject.toJSON(searchResp)); \n\n// Output:\n// {\"searchResults\": [[\n// {\n// \"distance\": 1.0586997,\n// \"id\": 414,\n// \"entity\": {}\n// },\n// {\n// \"distance\": 0.981384,\n// \"id\": 293,\n// \"entity\": {}\n// },\n// {\n// \"distance\": 0.9548756,\n// \"id\": 325,\n// \"entity\": {}\n// }\n// ]]}\n","// 4. Search with partition key\nconst query_vectors = [0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592]\n\nres = await client.search({\n collection_name: \"test_collection\",\n data: query_vectors,\n filter: \"color == 'green'\",\n output_fields: [\"color_tag\"],\n limit: 3\n})\n\nconsole.log(res.results)\n\n// Output\n// \n// [\n// { score: 2.402090549468994, id: '135', color_tag: 'green_2694' },\n// { score: 2.3938629627227783, id: '326', color_tag: 'green_7104' },\n// { score: 2.3235254287719727, id: '801', color_tag: 'green_3162' }\n// ]\n// \n"],"headingContent":"","anchorList":[{"label":"Use Partition Key","href":"Use-Partition-Key","type":1,"isActive":false},{"label":"Overview","href":"Overview","type":2,"isActive":false},{"label":"Enable partition key","href":"Enable-partition-key","type":2,"isActive":false},{"label":"List partitions","href":"List-partitions","type":2,"isActive":false},{"label":"Insert data","href":"Insert-data","type":2,"isActive":false},{"label":"Use partition key","href":"Use-partition-key","type":2,"isActive":false},{"label":"Typical use cases","href":"Typical-use-cases","type":2,"isActive":false}]} \ No newline at end of file +{"codeList":["import random, time\nfrom pymilvus import connections, MilvusClient, DataType\n\nSERVER_ADDR = \"http://localhost:19530\"\n\n# 1. Set up a Milvus client\nclient = MilvusClient(\n uri=SERVER_ADDR\n)\n\n# 2. Create a collection\nschema = MilvusClient.create_schema(\n auto_id=False,\n enable_dynamic_field=True,\n partition_key_field=\"color\",\n num_partitions=64 # Number of partitions. Defaults to 64.\n)\n\nschema.add_field(field_name=\"id\", datatype=DataType.INT64, is_primary=True)\nschema.add_field(field_name=\"vector\", datatype=DataType.FLOAT_VECTOR, dim=5)\nschema.add_field(field_name=\"color\", datatype=DataType.VARCHAR, max_length=512)\n","import io.milvus.v2.client.ConnectConfig;\nimport io.milvus.v2.client.MilvusClientV2;\nimport io.milvus.v2.common.DataType;\nimport io.milvus.v2.common.IndexParam;\nimport io.milvus.v2.service.collection.request.AddFieldReq;\nimport io.milvus.v2.service.collection.request.CreateCollectionReq;\n\nString CLUSTER_ENDPOINT = \"http://localhost:19530\";\n\n// 1. Connect to Milvus server\nConnectConfig connectConfig = ConnectConfig.builder()\n .uri(CLUSTER_ENDPOINT)\n .build();\n\nMilvusClientV2 client = new MilvusClientV2(connectConfig);\n\n// 2. Create a collection in customized setup mode\n\n// 2.1 Create schema\nCreateCollectionReq.CollectionSchema schema = client.createSchema();\n\n// 2.2 Add fields to schema\nschema.addField(AddFieldReq.builder()\n .fieldName(\"id\")\n .dataType(DataType.Int64)\n .isPrimaryKey(true)\n .autoID(false)\n .build());\n\nschema.addField(AddFieldReq.builder()\n .fieldName(\"vector\")\n .dataType(DataType.FloatVector)\n .dimension(5)\n .build());\n \nschema.addField(AddFieldReq.builder()\n .fieldName(\"color\")\n .dataType(DataType.VarChar)\n .maxLength(512)\n .isPartitionKey(true)\n .build());\n","const { MilvusClient, DataType, sleep } = require(\"@zilliz/milvus2-sdk-node\")\n\nconst address = \"http://localhost:19530\"\n\nasync function main() {\n// 1. Set up a Milvus Client\nclient = new MilvusClient({address}); \n\n// 2. Create a collection\n// 2.1 Define fields\nconst fields = [\n {\n name: \"id\",\n data_type: DataType.Int64,\n is_primary_key: true,\n auto_id: false\n },\n {\n name: \"vector\",\n data_type: DataType.FloatVector,\n dim: 5\n },\n {\n name: \"color\",\n data_type: DataType.VarChar,\n max_length: 512,\n is_partition_key: true\n }\n]\n","index_params = MilvusClient.prepare_index_params()\n\nindex_params.add_index(\n field_name=\"id\",\n index_type=\"STL_SORT\"\n)\n\nindex_params.add_index(\n field_name=\"color\",\n index_type=\"Trie\"\n)\n\nindex_params.add_index(\n field_name=\"vector\",\n index_type=\"IVF_FLAT\",\n metric_type=\"L2\",\n params={\"nlist\": 1024}\n)\n","// 2.3 Prepare index parameters\nIndexParam indexParamForVectorField = IndexParam.builder()\n .fieldName(\"vector\")\n .indexType(IndexParam.IndexType.IVF_FLAT)\n .metricType(IndexParam.MetricType.IP)\n .extraParams(Map.of(\"nlist\", 1024))\n .build();\n\nList indexParams = new ArrayList<>();\nindexParams.add(indexParamForVectorField);\n","// 2.2 Prepare index parameters\nconst index_params = [{\n field_name: \"color\",\n index_type: \"Trie\"\n},{\n field_name: \"id\",\n index_type: \"STL_SORT\"\n},{\n field_name: \"vector\",\n index_type: \"IVF_FLAT\",\n metric_type: \"IP\",\n params: { nlist: 1024}\n}]\n","client.create_collection(\n collection_name=\"test_collection\",\n schema=schema,\n index_params=index_params\n)\n","// 2.4 Create a collection with schema and index parameters\nCreateCollectionReq customizedSetupReq = CreateCollectionReq.builder()\n .collectionName(\"test_collection\")\n .collectionSchema(schema)\n .indexParams(indexParams) \n .build();\n\nclient.createCollection(customizedSetupReq);\n","// 2.3 Create a collection with fields and index parameters\nres = await client.createCollection({\n collection_name: \"test_collection\",\n fields: fields, \n index_params: index_params,\n})\n\nconsole.log(res.error_code)\n\n// Output\n// \n// Success\n//\n","# 3. Insert randomly generated vectors \ncolors = [\"green\", \"blue\", \"yellow\", \"red\", \"black\", \"white\", \"purple\", \"pink\", \"orange\", \"brown\", \"grey\"]\ndata = []\n\nfor i in range(1000):\n current_color = random.choice(colors)\n current_tag = random.randint(1000, 9999)\n data.append({\n \"id\": i,\n \"vector\": [ random.uniform(-1, 1) for _ in range(5) ],\n \"color\": current_color,\n \"tag\": current_tag,\n \"color_tag\": f\"{current_color}_{str(current_tag)}\"\n })\n\nprint(data[0])\n","// 3. Insert randomly generated vectors\nList colors = Arrays.asList(\"green\", \"blue\", \"yellow\", \"red\", \"black\", \"white\", \"purple\", \"pink\", \"orange\", \"brown\", \"grey\");\nList data = new ArrayList<>();\n\nfor (int i=0; i<1000; i++) {\n Random rand = new Random();\n String current_color = colors.get(rand.nextInt(colors.size()-1));\n int current_tag = rand.nextInt(8999) + 1000;\n JSONObject row = new JSONObject();\n row.put(\"id\", Long.valueOf(i));\n row.put(\"vector\", Arrays.asList(rand.nextFloat(), rand.nextFloat(), rand.nextFloat(), rand.nextFloat(), rand.nextFloat()));\n row.put(\"color\", current_color);\n row.put(\"tag\", current_tag);\n row.put(\"color_tag\", current_color + \"_\" + String.valueOf(rand.nextInt(8999) + 1000));\n data.add(row);\n}\n\nSystem.out.println(JSONObject.toJSON(data.get(0))); \n","// 3. Insert randomly generated vectors \nconst colors = [\"green\", \"blue\", \"yellow\", \"red\", \"black\", \"white\", \"purple\", \"pink\", \"orange\", \"brown\", \"grey\"]\nvar data = []\n\nfor (let i = 0; i < 1000; i++) {\n const current_color = colors[Math.floor(Math.random() * colors.length)]\n const current_tag = Math.floor(Math.random() * 8999 + 1000)\n data.push({\n id: i,\n vector: [Math.random(), Math.random(), Math.random(), Math.random(), Math.random()],\n color: current_color,\n tag: current_tag,\n color_tag: `${current_color}_${current_tag}`\n })\n}\n\nconsole.log(data[0])\n","{\n id: 0,\n vector: [\n 0.1275656405044483,\n 0.47417858592773277,\n 0.13858264437643286,\n 0.2390904907020377,\n 0.8447862593689635\n ],\n color: 'blue',\n tag: 2064,\n color_tag: 'blue_2064'\n}\n","res = client.insert(\n collection_name=\"test_collection\",\n data=data\n)\n\nprint(res)\n\n# Output\n#\n# {\n# \"insert_count\": 1000,\n# \"ids\": [\n# 0,\n# 1,\n# 2,\n# 3,\n# 4,\n# 5,\n# 6,\n# 7,\n# 8,\n# 9,\n# \"(990 more items hidden)\"\n# ]\n# }\n","// 3.1 Insert data into the collection\nInsertReq insertReq = InsertReq.builder()\n .collectionName(\"test_collection\")\n .data(data)\n .build();\n\nInsertResp insertResp = client.insert(insertReq);\n\nSystem.out.println(JSONObject.toJSON(insertResp));\n\n// Output:\n// {\"insertCnt\": 1000}\n","res = await client.insert({\n collection_name: \"test_collection\",\n data: data,\n})\n\nconsole.log(res.insert_cnt)\n\n// Output\n// \n// 1000\n// \n","# 4. Search with partition key\nquery_vectors = [[0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592]]\n\nres = client.search(\n collection_name=\"test_collection\",\n data=query_vectors,\n filter=\"color == 'green'\",\n search_params={\"metric_type\": \"L2\", \"params\": {\"nprobe\": 10}},\n output_fields=[\"id\", \"color_tag\"],\n limit=3\n)\n\nprint(res)\n\n# Output\n#\n# [\n# [\n# {\n# \"id\": 970,\n# \"distance\": 0.5770174264907837,\n# \"entity\": {\n# \"id\": 970,\n# \"color_tag\": \"green_9828\"\n# }\n# },\n# {\n# \"id\": 115,\n# \"distance\": 0.6898155808448792,\n# \"entity\": {\n# \"id\": 115,\n# \"color_tag\": \"green_4073\"\n# }\n# },\n# {\n# \"id\": 899,\n# \"distance\": 0.7028976678848267,\n# \"entity\": {\n# \"id\": 899,\n# \"color_tag\": \"green_9897\"\n# }\n# }\n# ]\n# ]\n","// 4. Search with partition key\nList> query_vectors = Arrays.asList(Arrays.asList(0.3580376395471989f, -0.6023495712049978f, 0.18414012509913835f, -0.26286205330961354f, 0.9029438446296592f));\n\nSearchReq searchReq = SearchReq.builder()\n .collectionName(\"test_collection\")\n .data(query_vectors)\n .filter(\"color == \\\"green\\\"\")\n .topK(3)\n .build();\n\nSearchResp searchResp = client.search(searchReq);\n\nSystem.out.println(JSONObject.toJSON(searchResp)); \n\n// Output:\n// {\"searchResults\": [[\n// {\n// \"distance\": 1.0586997,\n// \"id\": 414,\n// \"entity\": {}\n// },\n// {\n// \"distance\": 0.981384,\n// \"id\": 293,\n// \"entity\": {}\n// },\n// {\n// \"distance\": 0.9548756,\n// \"id\": 325,\n// \"entity\": {}\n// }\n// ]]}\n","// 4. Search with partition key\nconst query_vectors = [0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592]\n\nres = await client.search({\n collection_name: \"test_collection\",\n data: query_vectors,\n filter: \"color == 'green'\",\n output_fields: [\"color_tag\"],\n limit: 3\n})\n\nconsole.log(res.results)\n\n// Output\n// \n// [\n// { score: 2.402090549468994, id: '135', color_tag: 'green_2694' },\n// { score: 2.3938629627227783, id: '326', color_tag: 'green_7104' },\n// { score: 2.3235254287719727, id: '801', color_tag: 'green_3162' }\n// ]\n// \n"],"headingContent":"Use Partition Key","anchorList":[{"label":"Utilizar la clave de partición","href":"Use-Partition-Key","type":1,"isActive":false},{"label":"Visión general","href":"Overview","type":2,"isActive":false},{"label":"Activar la clave de partición","href":"Enable-partition-key","type":2,"isActive":false},{"label":"Listar particiones","href":"List-partitions","type":2,"isActive":false},{"label":"Insertar datos","href":"Insert-data","type":2,"isActive":false},{"label":"Utilizar la clave de partición","href":"Use-partition-key","type":2,"isActive":false},{"label":"Casos de uso típicos","href":"Typical-use-cases","type":2,"isActive":false}]} \ No newline at end of file diff --git a/localization/v2.4.x/site/es/userGuide/use-partition-key.md b/localization/v2.4.x/site/es/userGuide/use-partition-key.md index 990c59b03..8540ab57f 100644 --- a/localization/v2.4.x/site/es/userGuide/use-partition-key.md +++ b/localization/v2.4.x/site/es/userGuide/use-partition-key.md @@ -1,9 +1,8 @@ --- id: use-partition-key.md -title: Use Partition Key -summary: '' +title: Utilizar la clave de partición --- -

    Use Partition Key

    This guide walks you through using the partition key to accelerate data retrieval from your collection.

    -

    Overview

    Esta guía le guiará en el uso de la clave de partición para acelerar la recuperación de datos de su colección.

    +

    Visión general

    You can set a particular field in a collection as the partition key so that Milvus distributes incoming entities into different partitions according to their respective partition values in this field. This allows entities with the same key value to be grouped in a partition, accelerating search performance by avoiding the need to scan irrelevant partitions when filtering by the key field. When compared to traditional filtering methods, the partition key can greatly enhance query performance.

    -

    You can use the partition key to implement multi-tenancy. For details on multi-tenancy, read Multi-tenancy for more.

    -

    Enable partition key

    Puede establecer un campo particular en una colección como clave de partición para que Milvus distribuya las entidades entrantes en diferentes particiones según sus respectivos valores de partición en este campo. Esto permite que las entidades con el mismo valor de clave se agrupen en una partición, acelerando el rendimiento de la búsqueda al evitar la necesidad de escanear particiones irrelevantes al filtrar por el campo clave. En comparación con los métodos de filtrado tradicionales, la clave de partición puede mejorar enormemente el rendimiento de la consulta.

    +

    Puede utilizar la clave de partición para implementar multi-tenancy. Para más detalles sobre multi-tenancy, lea Multi-tenancy para más información.

    +

    Activar la clave de partición

    To set a field as the partition key, specify partition_key_field when creating a collection schema.

    -

    In the example code below, num_partitions determines the number of partitions that will be created. By default, it is set to 16. We recommend you retain the default value.

    +

    Para establecer un campo como clave de partición, especifique partition_key_field al crear un esquema de colección.

    +

    En el código de ejemplo siguiente, num_partitions determina el número de particiones que se crearán. Por defecto, se establece en 64. Le recomendamos que mantenga el valor por defecto.

    -

    For more information on parameters, refer to MilvusClient, create_schema(), and add_field() in the SDK reference.

    +

    Para obtener más información sobre los parámetros, consulte MilvusClient, create_schema()y add_field() en la referencia del SDK.

    -

    For more information on parameters, refer to MilvusClientV2, createSchema(), and addField() in the SDK reference.

    +

    Para más información sobre los parámetros, consulte MilvusClientV2, createSchema()y addField() en la referencia del SDK.

    -

    For more information on parameters, refer to MilvusClient and createCollection() in the SDK reference.

    +

    Para obtener más información sobre los parámetros, consulte MilvusClient y createCollection() en la referencia del SDK.

    + Python Java Node.js
    import random, time
     from pymilvus import connections, MilvusClient, DataType
     
    @@ -82,7 +78,7 @@ schema = MilvusClient.create_schema(
         auto_id=False,
         enable_dynamic_field=True,
         partition_key_field="color",
    -    num_partitions=16 # Number of partitions. Defaults to 16.
    +    num_partitions=64 # Number of partitions. Defaults to 64.
     )
     
     schema.add_field(field_name="id", datatype=DataType.INT64, is_primary=True)
    @@ -161,12 +157,9 @@ client = new M
         }
     ]
     
    -

    After you have defined the fields, set up the index parameters.

    +

    Una vez definidos los campos, configure los parámetros de índice.

    + Python Java Node.js
    index_params = MilvusClient.prepare_index_params()
     
     index_params.add_index(
    @@ -211,12 +204,9 @@ indexParams.add(indexParamForVectorFie
         params: { nlist: 1024}
     }]
     
    -

    Finally, you can create a collection.

    +

    Finalmente, puedes crear una colección.

    + Python Java Node.js
    client.create_collection(
         collection_name="test_collection",
         schema=schema,
    @@ -246,7 +236,7 @@ res = await client.// Success
     //
     
    -

    List partitions

    Once a field of a collection is used as the partition key, Milvus creates the specified number of partitions and manages them on your behalf. Therefore, you cannot manipulate the partitions in this collection anymore.

    -

    The following snippet demonstrates that 64 partitions in a collection once one of its fields is used as the partition key.

    -

    Insert data

    Una vez que un campo de una colección se utiliza como clave de partición, Milvus crea el número especificado de particiones y las gestiona en su nombre. Por lo tanto, ya no puede manipular las particiones de esta colección.

    +

    El siguiente fragmento demuestra que 64 particiones en una colección una vez que uno de sus campos se utiliza como clave de partición.

    +

    Insertar datos

    Once the collection is ready, start inserting data as follows:

    -

    Prepare data

    +

    Una vez que la colección esté lista, comience a insertar datos como se indica a continuación:

    +

    Preparar datos

    # 3. Insert randomly generated vectors 
     colors = ["green", "blue", "yellow", "red", "black", "white", "purple", "pink", "orange", "brown", "grey"]
     data = []
    @@ -338,7 +325,7 @@ data = []
     
     console.log(data[0])
     
    -

    You can view the structure of the generated data by checking its first entry.

    +

    Puedes ver la estructura de los datos generados comprobando su primera entrada.

    {
         id: 0,
         vector: [
    @@ -353,20 +340,17 @@ data = []
         color_tag: 'blue_2064'
     }
     
    -

    Insert data

    -

    Use the insert() method to insert the data into the collection.

    +

    Insertar datos

    +

    Utiliza el método insert() para insertar los datos en la colección.

    -

    Use the insert() method to insert the data into the collection.

    +

    Utiliza el método insert() para insertar los datos en la colección.

    -

    Use the insert() method to insert the data into the collection.

    +

    Utilice el método insert() para insertar los datos en la colección.

    + Python Java Node.js
    res = client.insert(
         collection_name="test_collection",
         data=data
    @@ -418,7 +402,7 @@ data = []
     // 1000
     // 
     
    -

    Use partition key

    Once you have indexed and loaded the collection as well as inserted data, you can conduct a similarity search using the partition key.

    +

    Una vez que hayas indexado y cargado la colección, así como insertado los datos, puedes realizar una búsqueda por similitud utilizando la clave de partición.

    -

    For more information on parameters, refer to search() in the SDK reference.

    +

    Para obtener más información sobre los parámetros, consulte search() en la referencia del SDK.

    -

    For more information on parameters, refer to search() in the SDK reference.

    +

    Para obtener más información sobre los parámetros, consulte search() en la referencia del SDK.

    -

    For more information on parameters, refer to search() in the SDK reference.

    +

    Para más información sobre parámetros, consulte search() en la referencia del SDK.

    -

    notes

    -

    To conduct a similarity search using the partition key, you should include either of the following in the boolean expression of the search request:

    +

    notas

    +

    Para realizar una búsqueda por similitud utilizando la clave de partición, debe incluir cualquiera de los siguientes elementos en la expresión booleana de la petición de búsqueda:

    • expr='<partition_key>=="xxxx"'

    • expr='<partition_key> in ["xxx", "xxx"]'

    -

    Do replace <partition_key> with the name of the field that is designated as the partition key.

    +

    Sustituya <partition_key> por el nombre del campo designado como clave de partición.

    + Python Java Node.js
    # 4. Search with partition key
     query_vectors = [[0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592]]
     
    @@ -557,7 +538,7 @@ res = await client.// ]
     // 
     
    -

    Typical use cases

    You can utilize the partition key feature to achieve better search performance and enable multi-tenancy. This can be done by assigning a tenant-specific value as the partition key field for each entity. When searching or querying the collection, you can filter entities by the tenant-specific value by including the partition key field in the boolean expression. This approach ensures data isolation by tenants and avoids scanning unnecessary partitions.

    +

    Puede utilizar la función de clave de partición para mejorar el rendimiento de la búsqueda y permitir la multitenencia. Esto puede hacerse asignando un valor específico del inquilino como campo de clave de partición para cada entidad. Al buscar o consultar la colección, puede filtrar entidades por el valor específico del inquilino incluyendo el campo de clave de partición en la expresión booleana. Este enfoque garantiza el aislamiento de datos por inquilinos y evita escanear particiones innecesarias.

    diff --git a/localization/v2.4.x/site/fr/adminGuide/clouds/aws/s3.json b/localization/v2.4.x/site/fr/adminGuide/clouds/aws/s3.json index 4fdf004ec..24d9c1dc6 100644 --- a/localization/v2.4.x/site/fr/adminGuide/clouds/aws/s3.json +++ b/localization/v2.4.x/site/fr/adminGuide/clouds/aws/s3.json @@ -1 +1 @@ -{"codeList":["milvus_bucket_name=\"milvus-bucket-$(openssl rand -hex 12)\"\n\naws s3api create-bucket --bucket \"$milvus_bucket_name\" --region 'us-east-2' --acl private --object-ownership ObjectWriter --create-bucket-configuration LocationConstraint='us-east-2'\n\n\n# Output\n#\n# \"Location\": \"http://milvus-bucket-039dd013c0712f085d60e21f.s3.amazonaws.com/\"\n","echo '{\n \"Version\": \"2012-10-17\",\n \"Statement\": [\n {\n \"Effect\": \"Allow\",\n \"Action\": [\n \"s3:GetObject\",\n \"s3:PutObject\",\n \"s3:ListBucket\",\n \"s3:DeleteObject\"\n ],\n \"Resource\": [\n \"arn:aws:s3:::\",\n \"arn:aws:s3:::/*\"\n ]\n }\n ]\n}' > milvus-s3-policy.json\n\naws iam create-policy --policy-name MilvusS3ReadWrite --policy-document file://milvus-s3-policy.json\n\n\n# Get the ARN from the command output as follows:\n# {\n# \"Policy\": {\n# \"PolicyName\": \"MilvusS3ReadWrite\",\n# \"PolicyId\": \"AN5QQVVPM1BVTFlBNkdZT\",\n# \"Arn\": \"arn:aws:iam::12345678901:policy/MilvusS3ReadWrite\",\n# \"Path\": \"/\",\n# \"DefaultVersionId\": \"v1\",\n# \"AttachmentCount\": 0,\n# \"PermissionsBoundaryUsageCount\": 0,\n# \"IsAttachable\": true,\n# \"CreateDate\": \"2023-11-16T06:00:01+00:00\",\n# \"UpdateDate\": \"2023-11-16T06:00:01+00:00\"\n# }\n# } \n","eksctl create iamserviceaccount --name milvus-s3-access-sa --namespace milvus --cluster milvus-eks-cluster --role-name milvus-s3-access-sa \\\n --attach-policy-arn arn:aws:iam:::policy/MilvusS3ReadWrite --approve\n","aws iam get-role --role-name milvus-s3-access-sa --query Role.AssumeRolePolicyDocument\n# An example output is as follows\n{\n \"Version\": \"2012-10-17\",\n \"Statement\": [\n {\n \"Effect\": \"Allow\",\n \"Principal\": {\n \"Federated\": \"arn:aws:iam::111122223333:oidc-provider/oidc.eks.region-code.amazonaws.com/id/EXAMPLED539D4633E53DE1B71EXAMPLE\"\n },\n \"Action\": \"sts:AssumeRoleWithWebIdentity\",\n \"Condition\": {\n \"StringEquals\": {\n \"oidc.eks.region-code.amazonaws.com/id/EXAMPLED539D4633E53DE1B71EXAMPLE:sub\": \"system:serviceaccount:default:my-service-account\",\n \"oidc.eks.region-code.amazonaws.com/id/EXAMPLED539D4633E53DE1B71EXAMPLE:aud\": \"sts.amazonaws.com\"\n }\n }\n }\n ]\n}\n","aws iam list-attached-role-policies --role-name milvus-s3-access-sa --query 'AttachedPolicies[].PolicyArn' --output text\n# An example output is as follows\narn:aws:iam::12345678901:policy/MilvusS3ReadWrite\n","export policy_arn='arn:aws:iam::12345678901:policy/MilvusS3ReadWrite'\naws iam get-policy --policy-arn $policy_arn\n# An example output is as follows\n{\n \"Policy\": {\n \"PolicyName\": \"MilvusS3ReadWrite\",\n \"PolicyId\": \"EXAMPLEBIOWGLDEXAMPLE\",\n \"Arn\": \"arn:aws:iam::12345678901:policy/MilvusS3ReadWrite\",\n \"Path\": \"/\",\n \"DefaultVersionId\": \"v2\",\n [...]\n }\n}\n","aws iam get-policy-version --policy-arn $policy_arn --version-id v2\n# An example output is as follows\n{\n \"PolicyVersion\": {\n \"Document\": {\n \"Version\": \"2012-10-17\",\n \"Statement\": [\n {\n \"Effect\": \"Allow\",\n \"Action\": [\n \"s3:GetObject\",\n \"s3:PutObject\",\n \"s3:ListBucket\",\n \"s3:DeleteObject\"\n ],\n \"Resource\": [\n \"arn:aws:s3:::\",\n \"arn:aws:s3:::/*\"\n ]\n }\n ]\n },\n [...]\n }\n}\n","kubectl describe serviceaccount milvus-s3-access-sa -n milvus\n# An example output is as follows\nName: milvus-s3-access-sa\nNamespace: milvus\nLabels: app.kubernetes.io/managed-by=eksctl\nAnnotations: eks.amazonaws.com/role-arn: arn:aws:iam::12345678901:role/milvus-s3-access-sa\n[...]\n","helm repo add milvus https://zilliztech.github.io/milvus-helm/\nhelm repo update\n","cluster:\n enabled: true\n\nservice:\n type: LoadBalancer\n port: 19530\n annotations: \n service.beta.kubernetes.io/aws-load-balancer-type: external\n service.beta.kubernetes.io/aws-load-balancer-name: milvus-service\n service.beta.kubernetes.io/aws-load-balancer-scheme: internet-facing\n service.beta.kubernetes.io/aws-load-balancer-nlb-target-type: ip\n\nserviceAccount:\n create: false\n name: milvus-s3-access-sa\n\nminio:\n enabled: false\n\nexternalS3:\n enabled: true\n host: \"s3.us-east-2.amazonaws.com\"\n port: \"443\"\n useSSL: true\n bucketName: \"\"\n useIAM: true\n cloudProvider: \"aws\"\n iamEndpoint: \"\"\n\nrootCoordinator:\n replicas: 2\n activeStandby:\n enabled: true\n resources: \n limits:\n cpu: 1\n memory: 2Gi\n\nindexCoordinator:\n replicas: 2\n activeStandby:\n enabled: true\n resources: \n limits:\n cpu: \"0.5\"\n memory: 0.5Gi\n\nqueryCoordinator:\n replicas: 2\n activeStandby:\n enabled: true\n resources: \n limits:\n cpu: \"0.5\"\n memory: 0.5Gi\n\ndataCoordinator:\n replicas: 2\n activeStandby:\n enabled: true\n resources: \n limits:\n cpu: \"0.5\"\n memory: 0.5Gi\n\nproxy:\n replicas: 2\n resources: \n limits:\n cpu: 1\n memory: 2Gi \n","helm upgrade --install milvus-demo milvus/milvus -n milvus -f milvus.yaml\n"],"headingContent":"","anchorList":[{"label":"Configuration de l'accès S3 par rôle IAM","href":"Configure-S3-Access-by-IAM-Role","type":1,"isActive":false},{"label":"Avant de commencer","href":"Before-you-start","type":2,"isActive":false},{"label":"Associer un rôle IAM à un compte de service Kubernetes","href":"Associate-an-IAM-role-with-a-Kubernetes-service-account","type":2,"isActive":false},{"label":"Vérifiez la configuration du rôle et du compte de service","href":"Verify-the-role-and-service-account-setup","type":2,"isActive":false},{"label":"Déployer Milvus","href":"Deploy-Milvus","type":2,"isActive":false},{"label":"Vérifier l'installation","href":"Verify-the-installation","type":2,"isActive":false}]} \ No newline at end of file +{"codeList":["milvus_bucket_name=\"milvus-bucket-$(openssl rand -hex 12)\"\n\naws s3api create-bucket --bucket \"$milvus_bucket_name\" --region 'us-east-2' --acl private --object-ownership ObjectWriter --create-bucket-configuration LocationConstraint='us-east-2'\n\n\n# Output\n#\n# \"Location\": \"http://milvus-bucket-039dd013c0712f085d60e21f.s3.amazonaws.com/\"\n","echo '{\n \"Version\": \"2012-10-17\",\n \"Statement\": [\n {\n \"Effect\": \"Allow\",\n \"Action\": [\n \"s3:ListBucket\"\n ],\n \"Resource\": [\n \"arn:aws:s3:::\"\n ]\n },\n {\n \"Effect\": \"Allow\",\n \"Action\": [\n \"s3:DeleteObject\",\n \"s3:GetObject\",\n \"s3:PutObject\"\n ],\n \"Resource\": [\n \"arn:aws:s3:::/*\"\n ]\n }\n ]\n}' > milvus-s3-policy.json\n\naws iam create-policy --policy-name MilvusS3ReadWrite --policy-document file://milvus-s3-policy.json\n\n\n# Get the ARN from the command output as follows:\n# {\n# \"Policy\": {\n# \"PolicyName\": \"MilvusS3ReadWrite\",\n# \"PolicyId\": \"AN5QQVVPM1BVTFlBNkdZT\",\n# \"Arn\": \"arn:aws:iam::12345678901:policy/MilvusS3ReadWrite\",\n# \"Path\": \"/\",\n# \"DefaultVersionId\": \"v1\",\n# \"AttachmentCount\": 0,\n# \"PermissionsBoundaryUsageCount\": 0,\n# \"IsAttachable\": true,\n# \"CreateDate\": \"2023-11-16T06:00:01+00:00\",\n# \"UpdateDate\": \"2023-11-16T06:00:01+00:00\"\n# }\n# } \n","eksctl create iamserviceaccount --name milvus-s3-access-sa --namespace milvus --cluster milvus-eks-cluster --role-name milvus-s3-access-sa \\\n --attach-policy-arn arn:aws:iam:::policy/MilvusS3ReadWrite --approve\n","aws iam get-role --role-name milvus-s3-access-sa --query Role.AssumeRolePolicyDocument\n# An example output is as follows\n{\n \"Version\": \"2012-10-17\",\n \"Statement\": [\n {\n \"Effect\": \"Allow\",\n \"Principal\": {\n \"Federated\": \"arn:aws:iam::111122223333:oidc-provider/oidc.eks.region-code.amazonaws.com/id/EXAMPLED539D4633E53DE1B71EXAMPLE\"\n },\n \"Action\": \"sts:AssumeRoleWithWebIdentity\",\n \"Condition\": {\n \"StringEquals\": {\n \"oidc.eks.region-code.amazonaws.com/id/EXAMPLED539D4633E53DE1B71EXAMPLE:sub\": \"system:serviceaccount:default:my-service-account\",\n \"oidc.eks.region-code.amazonaws.com/id/EXAMPLED539D4633E53DE1B71EXAMPLE:aud\": \"sts.amazonaws.com\"\n }\n }\n }\n ]\n}\n","aws iam list-attached-role-policies --role-name milvus-s3-access-sa --query 'AttachedPolicies[].PolicyArn' --output text\n# An example output is as follows\narn:aws:iam::12345678901:policy/MilvusS3ReadWrite\n","export policy_arn='arn:aws:iam::12345678901:policy/MilvusS3ReadWrite'\naws iam get-policy --policy-arn $policy_arn\n# An example output is as follows\n{\n \"Policy\": {\n \"PolicyName\": \"MilvusS3ReadWrite\",\n \"PolicyId\": \"EXAMPLEBIOWGLDEXAMPLE\",\n \"Arn\": \"arn:aws:iam::12345678901:policy/MilvusS3ReadWrite\",\n \"Path\": \"/\",\n \"DefaultVersionId\": \"v2\",\n [...]\n }\n}\n","aws iam get-policy-version --policy-arn $policy_arn --version-id v2\n# An example output is as follows\n{\n \"PolicyVersion\": {\n \"Document\": {\n \"Version\": \"2012-10-17\",\n \"Statement\": [\n {\n \"Effect\": \"Allow\",\n \"Action\": [\n \"s3:GetObject\",\n \"s3:PutObject\",\n \"s3:ListBucket\",\n \"s3:DeleteObject\"\n ],\n \"Resource\": [\n \"arn:aws:s3:::\",\n \"arn:aws:s3:::/*\"\n ]\n }\n ]\n },\n [...]\n }\n}\n","kubectl describe serviceaccount milvus-s3-access-sa -n milvus\n# An example output is as follows\nName: milvus-s3-access-sa\nNamespace: milvus\nLabels: app.kubernetes.io/managed-by=eksctl\nAnnotations: eks.amazonaws.com/role-arn: arn:aws:iam::12345678901:role/milvus-s3-access-sa\n[...]\n","helm repo add milvus https://zilliztech.github.io/milvus-helm/\nhelm repo update\n","cluster:\n enabled: true\n\nservice:\n type: LoadBalancer\n port: 19530\n annotations: \n service.beta.kubernetes.io/aws-load-balancer-type: external\n service.beta.kubernetes.io/aws-load-balancer-name: milvus-service\n service.beta.kubernetes.io/aws-load-balancer-scheme: internet-facing\n service.beta.kubernetes.io/aws-load-balancer-nlb-target-type: ip\n\nserviceAccount:\n create: false\n name: milvus-s3-access-sa\n\nminio:\n enabled: false\n\nexternalS3:\n enabled: true\n host: \"s3.us-east-2.amazonaws.com\"\n port: \"443\"\n useSSL: true\n bucketName: \"\"\n useIAM: true\n cloudProvider: \"aws\"\n iamEndpoint: \"\"\n\nrootCoordinator:\n replicas: 2\n activeStandby:\n enabled: true\n resources: \n limits:\n cpu: 1\n memory: 2Gi\n\nindexCoordinator:\n replicas: 2\n activeStandby:\n enabled: true\n resources: \n limits:\n cpu: \"0.5\"\n memory: 0.5Gi\n\nqueryCoordinator:\n replicas: 2\n activeStandby:\n enabled: true\n resources: \n limits:\n cpu: \"0.5\"\n memory: 0.5Gi\n\ndataCoordinator:\n replicas: 2\n activeStandby:\n enabled: true\n resources: \n limits:\n cpu: \"0.5\"\n memory: 0.5Gi\n\nproxy:\n replicas: 2\n resources: \n limits:\n cpu: 1\n memory: 2Gi \n","helm upgrade --install milvus-demo milvus/milvus -n milvus -f milvus.yaml\n"],"headingContent":"Configure S3 Access by IAM Role","anchorList":[{"label":"Configuration de l'accès S3 par rôle IAM","href":"Configure-S3-Access-by-IAM-Role","type":1,"isActive":false},{"label":"Avant de commencer","href":"Before-you-start","type":2,"isActive":false},{"label":"Associer un rôle IAM à un compte de service Kubernetes","href":"Associate-an-IAM-role-with-a-Kubernetes-service-account","type":2,"isActive":false},{"label":"Vérifiez la configuration du rôle et du compte de service","href":"Verify-the-role-and-service-account-setup","type":2,"isActive":false},{"label":"Déployer Milvus","href":"Deploy-Milvus","type":2,"isActive":false},{"label":"Vérifier l'installation","href":"Verify-the-installation","type":2,"isActive":false}]} \ No newline at end of file diff --git a/localization/v2.4.x/site/fr/adminGuide/clouds/aws/s3.md b/localization/v2.4.x/site/fr/adminGuide/clouds/aws/s3.md index 00d8cc2c4..b18007a47 100644 --- a/localization/v2.4.x/site/fr/adminGuide/clouds/aws/s3.md +++ b/localization/v2.4.x/site/fr/adminGuide/clouds/aws/s3.md @@ -1,8 +1,8 @@ --- id: s3.md -title: Configurer l'accès S3 par rôle IAM +title: Configuration de l'accès S3 par rôle IAM related_key: 's3, storage, iam' -summary: Apprenez à configurer s3 avec un rôle IAM. +summary: Apprenez à configurer s3 avec le rôle IAM. ---

    Configuration de l'accès S3 par rôle IAM

    Cette rubrique explique comment configurer l'accès S3 par rôle IAM lorsque vous installez Milvus avec helm. Pour plus de détails, voir Rôles IAM.

    +

    Cette rubrique explique comment configurer l'accès S3 par rôle IAM lorsque vous installez Milvus avec helm. Pour plus de détails, voir Rôles IAM.

    Avant de commencer

      -
    • Veuillez activer OIDC lors de la création d'un cluster EKS à l'aide d'eksctl. Pour plus de détails, reportez-vous à IAM OIDC.

    • +
    • Veuillez activer OIDC lors de la création d'un cluster EKS à l'aide d'eksctl. Pour plus de détails, reportez-vous à IAM OIDC.

    • Ce guide suppose que vous avez déjà déployé un cluster Milvus sur AWS avec Kubernetes.

    Associer un rôle IAM à un compte de service Kubernetes

  • Appliquer le fichier :

    kubectl apply -f milvus-operator-certificate.yaml
    @@ -160,7 +160,7 @@ helm repo update milvus-operator
             >
           
         

    Suivre le reste du guide sur le site de documentation de Milvus : Déployer Milvus.

    -

    Prochaines étapes

    Téléchargez milvus.yaml directement ou à l'aide de la commande suivante.

    -
    $ wget https://raw.githubusercontent.com/milvus-io/milvus/v2.4.9/configs/milvus.yaml
    +    

    Téléchargez milvus.yaml directement ou à l'aide de la commande suivante.

    +
    $ wget https://raw.githubusercontent.com/milvus-io/milvus/v2.4.13-hotfix/configs/milvus.yaml
     

    Modifier le fichier de configuration

    Téléchargez le fichier d'installation de Milvus standalone et enregistrez-le sous docker-compose.yml.

    +

    Téléchargez le fichier d'installation de Milvus standalone et enregistrez-le sous docker-compose.yml.

    Vous pouvez également exécuter la commande suivante.

    # For Milvus standalone
    -$ wget https://github.com/milvus-io/milvus/releases/download/v2.4.9/milvus-standalone-docker-compose.yml -O docker-compose.yml
    +$ wget https://github.com/milvus-io/milvus/releases/download/v2.4.13-hotfix/milvus-standalone-docker-compose.yml -O docker-compose.yml
     

    Modifier le fichier d'installation

    rootCoord:
    -    maxGeneralCapacity: 1024
    +    maxGeneralCapacity: 65536
     
    -

    Le paramètre maxGeneralCapacity définit le nombre maximum de collections que l'instance Milvus actuelle peut contenir. La valeur par défaut est 1024.

    +

    Le paramètre maxGeneralCapacity définit le nombre maximum de collections que l'instance Milvus actuelle peut contenir. La valeur par défaut est 65536.

    Calcul du nombre de collections

    L'exemple suivant montre comment accorder l'autorisation de rechercher toutes les collections au rôle nommé roleA.

    -

    L'adresse object_type spécifie le type d'objet, qui peut également être considéré comme le type de ressource. Actuellement, les valeurs valables sont Collection/User/Global, etc., où Global signifie qu'il n'y a pas de type de ressource spécifique. L'adresse object_name est le nom de la ressource. Si objecttypeest Collection, le nom de l'objet peut faire référence à un nom de collection spécifique, ou vous pouvez utiliser * pour spécifier toutes les collections. Si le type d'objetest Global, le nom de l'objet ne peut être spécifié que par *. Voir Utilisateurs et rôles pour connaître les autres types de privilèges que vous pouvez accorder.

    +

    L'adresse object_type spécifie le type d'objet, qui peut également être considéré comme le type de ressource. Actuellement, les valeurs valables sont Collection/User/Global, etc., où Global signifie qu'il n'y a pas de type de ressource spécifique. L'adresse object_name est le nom de la ressource. Si objecttypeest Collection, le nom de l'objet peut faire référence à un nom de collection spécifique, ou vous pouvez utiliser * pour spécifier toutes les collections. Sile type d'objet est Global, le nom de l'objet ne peut être spécifié que par *. Voir Utilisateurs et rôles pour connaître les autres types de privilèges que vous pouvez accorder.

    Avant de gérer les privilèges des rôles, assurez-vous d'avoir activé l'authentification des utilisateurs. Dans le cas contraire, une erreur risque de se produire. Pour plus d'informations sur l'activation de l'authentification des utilisateurs, reportez-vous à la section Authentifier l'accès des utilisateurs.

    # grant privilege to a role
     
    @@ -182,7 +182,7 @@ client.grant_privilege(
               d="M4 9h1v1H4c-1.5 0-3-1.69-3-3.5S2.55 3 4 3h4c1.45 0 3 1.69 3 3.5 0 1.41-.91 2.72-2 3.25V8.59c.58-.45 1-1.27 1-2.09C10 5.22 8.98 4 8 4H4c-.98 0-2 1.22-2 2.5S3 9 4 9zm9-3h-1v1h1c1 0 2 1.22 2 2.5S13.98 12 13 12H9c-.98 0-2-1.22-2-2.5 0-.83.42-1.64 1-2.09V6.25c-1.09.53-2 1.84-2 3.25C6 11.31 7.55 13 9 13h4c1.45 0 3-1.69 3-3.5S14.5 6 13 6z"
             >
           
    -    

    Accorder le rôle à un utilisateur pour qu'il hérite de tous les privilèges du rôle.

    +

    Accorder le rôle à un utilisateur afin que celui-ci puisse hériter de tous les privilèges du rôle.

    # grant a role to a user
     
     client.grant_role(
    @@ -190,7 +190,7 @@ client.grant_role(
         role_name='roleA'
     )
     
    -

    Après avoir accordé le rôle, vérifier qu'il a bien été accordé :

    +

    Après avoir accordé le rôle, vérifiez qu'il a bien été accordé :

    client.describe_user(
         user_name='user_1'
     )
    diff --git a/localization/v2.4.x/site/fr/adminGuide/resource_group.json b/localization/v2.4.x/site/fr/adminGuide/resource_group.json
    index 988ee007f..fc0ed0f10 100644
    --- a/localization/v2.4.x/site/fr/adminGuide/resource_group.json
    +++ b/localization/v2.4.x/site/fr/adminGuide/resource_group.json
    @@ -1 +1 @@
    -{"codeList":["{\n    \"requests\": { \"nodeNum\": 1 },\n    \"limits\": { \"nodeNum\": 1 },\n    \"transfer_from\": [{ \"resource_group\": \"rg1\" }],\n    \"transfer_to\": [{ \"resource_group\": \"rg2\" }]\n}\n","import pymilvus\n\n# A resource group name should be a string of 1 to 255 characters, starting with a letter or an underscore (_) and containing only numbers, letters, and underscores (_).\nname = \"rg\"\nnode_num = 0\n\n# create a resource group that exactly hold no query node.\ntry:\n    utility.create_resource_group(name, config=utility.ResourceGroupConfig(\n        requests={\"node_num\": node_num},\n        limits={\"node_num\": node_num},\n    ), using='default')\n    print(f\"Succeeded in creating resource group {name}.\")\nexcept Exception:\n    print(\"Failed to create the resource group.\")\n","rgs = utility.list_resource_groups(using='default')\nprint(f\"Resource group list: {rgs}\")\n\n# Resource group list: ['__default_resource_group', 'rg']\n","info = utility.describe_resource_group(name, using=\"default\")\nprint(f\"Resource group description: {info}\")\n\n# Resource group description: \n#        ,           // string, rg name\n#        ,            // int, num_node which has been transfer to this rg\n#        ,  // int, available node_num, some node may shutdown\n#        , // map[string]int, from collection_name to loaded replica of each collecion in this rg\n#        ,  // map[string]int, from collection_name to outgoging accessed node num by replica loaded in this rg \n#        .  // map[string]int, from collection_name to incoming accessed node num by replica loaded in other rg\n","source = '__default_resource_group'\ntarget = 'rg'\nexpected_num_nodes_in_default = 0\nexpected_num_nodes_in_rg = 1\n\ntry:\n    utility.update_resource_groups({\n        source: ResourceGroupConfig(\n            requests={\"node_num\": expected_num_nodes_in_default},\n            limits={\"node_num\": expected_num_nodes_in_default},\n        ),\n        target: ResourceGroupConfig(\n            requests={\"node_num\": expected_num_nodes_in_rg},\n            limits={\"node_num\": expected_num_nodes_in_rg},\n        )\n    }, using=\"default\")\n    print(f\"Succeeded in move 1 node(s) from {source} to {target}.\")\nexcept Exception:\n    print(\"Something went wrong while moving nodes.\")\n\n# After a while, succeeded in moving 1 node(s) from __default_resource_group to rg.\n","from pymilvus import Collection\n\ncollection = Collection('demo')\n\n# Milvus loads the collection to the default resource group.\ncollection.load(replica_number=2)\n\n# Or, you can ask Milvus load the collection to the desired resource group.\n# make sure that query nodes num should be greater or equal to replica_number\nresource_groups = ['rg']\ncollection.load(replica_number=2, _resource_groups=resource_groups) \n","collection = Collection(\"Books\")\n\n# Use the load method of a collection to load one of its partition\ncollection.load([\"Novels\"], replica_number=2, _resource_groups=resource_groups)\n\n# Or, you can use the load method of a partition directly\npartition = Partition(collection, \"Novels\")\npartition.load(replica_number=2, _resource_groups=resource_groups)\n","source = '__default_resource_group'\ntarget = 'rg'\ncollection_name = 'c'\nnum_replicas = 1\n\ntry:\n    utility.transfer_replica(source, target, collection_name, num_replicas, using=\"default\")\n    print(f\"Succeeded in moving {num_node} replica(s) of {collection_name} from {source} to {target}.\")\nexcept Exception:\n    print(\"Something went wrong while moving replicas.\")\n\n# Succeeded in moving 1 replica(s) of c from __default_resource_group to rg.\n","try:\n    utility.update_resource_groups({\n        \"rg\": utility.ResourceGroupConfig(\n            requests={\"node_num\": 0},\n            limits={\"node_num\": 0},\n        ),\n    }, using=\"default\")\n    utility.drop_resource_group(\"rg\", using=\"default\")\n    print(f\"Succeeded in dropping {source}.\")\nexcept Exception:\n    print(f\"Something went wrong while dropping {source}.\")\n","from pymilvus import utility\nfrom pymilvus.client.types import ResourceGroupConfig\n\n_PENDING_NODES_RESOURCE_GROUP=\"__pending_nodes\"\n\ndef init_cluster(node_num: int):\n    print(f\"Init cluster with {node_num} nodes, all nodes will be put in default resource group\")\n    # create a pending resource group, which can used to hold the pending nodes that do not hold any data.\n    utility.create_resource_group(name=_PENDING_NODES_RESOURCE_GROUP, config=ResourceGroupConfig(\n        requests={\"node_num\": 0}, # this resource group can hold 0 nodes, no data will be load on it.\n        limits={\"node_num\": 10000}, # this resource group can hold at most 10000 nodes \n    ))\n\n    # update default resource group, which can used to hold the nodes that all initial node in it.\n    utility.update_resource_groups({\n        \"__default_resource_group\": ResourceGroupConfig(\n            requests={\"node_num\": node_num},\n            limits={\"node_num\": node_num},\n            transfer_from=[{\"resource_group\": _PENDING_NODES_RESOURCE_GROUP}], # recover missing node from pending resource group at high priority.\n            transfer_to=[{\"resource_group\": _PENDING_NODES_RESOURCE_GROUP}], # recover redundant node to pending resource group at low priority.\n        )})\n    utility.create_resource_group(name=\"rg1\", config=ResourceGroupConfig(\n        requests={\"node_num\": 0},\n        limits={\"node_num\": 0},\n        transfer_from=[{\"resource_group\": _PENDING_NODES_RESOURCE_GROUP}], \n        transfer_to=[{\"resource_group\": _PENDING_NODES_RESOURCE_GROUP}],\n    ))\n    utility.create_resource_group(name=\"rg2\", config=ResourceGroupConfig(\n        requests={\"node_num\": 0},\n        limits={\"node_num\": 0},\n        transfer_from=[{\"resource_group\": _PENDING_NODES_RESOURCE_GROUP}], \n        transfer_to=[{\"resource_group\": _PENDING_NODES_RESOURCE_GROUP}],\n    ))\n\ninit_cluster(1)\n","\ndef scale_to(node_num: int):\n    # scale the querynode number in Milvus into node_num.\n    pass\n","# scale rg1 into 3 nodes, rg2 into 1 nodes\nutility.update_resource_groups({\n    \"rg1\": ResourceGroupConfig(\n        requests={\"node_num\": 3},\n        limits={\"node_num\": 3},\n        transfer_from=[{\"resource_group\": _PENDING_NODES_RESOURCE_GROUP}],\n        transfer_to=[{\"resource_group\": _PENDING_NODES_RESOURCE_GROUP}],\n    ),\n    \"rg2\": ResourceGroupConfig(\n        requests={\"node_num\": 1},\n        limits={\"node_num\": 1},\n        transfer_from=[{\"resource_group\": _PENDING_NODES_RESOURCE_GROUP}],\n        transfer_to=[{\"resource_group\": _PENDING_NODES_RESOURCE_GROUP}],\n    ),\n})\nscale_to(5)\n# rg1 has 3 nodes, rg2 has 1 node, __default_resource_group has 1 node.\n","# scale rg1 from 3 nodes into 2 nodes\nutility.update_resource_groups({\n    \"rg1\": ResourceGroupConfig(\n        requests={\"node_num\": 2},\n        limits={\"node_num\": 2},\n        transfer_from=[{\"resource_group\": _PENDING_NODES_RESOURCE_GROUP}],\n        transfer_to=[{\"resource_group\": _PENDING_NODES_RESOURCE_GROUP}],\n    ),\n})\n\n# rg1 has 2 nodes, rg2 has 1 node, __default_resource_group has 1 node, __pending_nodes has 1 node.\nscale_to(4)\n# scale the node in __pending_nodes\n"],"headingContent":"","anchorList":[{"label":"Gestion des groupes de ressources","href":"Manage-Resource-Groups","type":1,"isActive":false},{"label":"Qu'est-ce qu'un groupe de ressources ?","href":"What-is-a-resource-group","type":2,"isActive":false},{"label":"Concepts de groupe de ressources","href":"Concepts-of-resource-group","type":2,"isActive":false},{"label":"Utiliser l'API déclarative pour gérer le groupe de ressources","href":"Use-declarative-api-to-manage-resource-group","type":2,"isActive":false},{"label":"Une bonne pratique pour gérer la mise à l'échelle des clusters","href":"A-good-practice-to-manage-cluster-scaling","type":2,"isActive":false},{"label":"Comment les groupes de ressources interagissent avec plusieurs répliques","href":"How-resource-groups-interacts-with-multiple-replicas","type":2,"isActive":false},{"label":"Prochaines étapes","href":"Whats-next","type":1,"isActive":false}]}
    \ No newline at end of file
    +{"codeList":["{\n    \"requests\": { \"nodeNum\": 1 },\n    \"limits\": { \"nodeNum\": 1 },\n    \"transfer_from\": [{ \"resource_group\": \"rg1\" }],\n    \"transfer_to\": [{ \"resource_group\": \"rg2\" }]\n}\n","import pymilvus\n\n# A resource group name should be a string of 1 to 255 characters, starting with a letter or an underscore (_) and containing only numbers, letters, and underscores (_).\nname = \"rg\"\nnode_num = 0\n\n# create a resource group that exactly hold no query node.\ntry:\n    utility.create_resource_group(name, config=utility.ResourceGroupConfig(\n        requests={\"node_num\": node_num},\n        limits={\"node_num\": node_num},\n    ), using='default')\n    print(f\"Succeeded in creating resource group {name}.\")\nexcept Exception:\n    print(\"Failed to create the resource group.\")\n","rgs = utility.list_resource_groups(using='default')\nprint(f\"Resource group list: {rgs}\")\n\n# Resource group list: ['__default_resource_group', 'rg']\n","info = utility.describe_resource_group(name, using=\"default\")\nprint(f\"Resource group description: {info}\")\n\n# Resource group description: \n#        ,           // string, rg name\n#        ,            // int, num_node which has been transfer to this rg\n#        ,  // int, available node_num, some node may shutdown\n#        , // map[string]int, from collection_name to loaded replica of each collecion in this rg\n#        ,  // map[string]int, from collection_name to outgoging accessed node num by replica loaded in this rg \n#        .  // map[string]int, from collection_name to incoming accessed node num by replica loaded in other rg\n","source = '__default_resource_group'\ntarget = 'rg'\nexpected_num_nodes_in_default = 0\nexpected_num_nodes_in_rg = 1\n\ntry:\n    utility.update_resource_groups({\n        source: ResourceGroupConfig(\n            requests={\"node_num\": expected_num_nodes_in_default},\n            limits={\"node_num\": expected_num_nodes_in_default},\n        ),\n        target: ResourceGroupConfig(\n            requests={\"node_num\": expected_num_nodes_in_rg},\n            limits={\"node_num\": expected_num_nodes_in_rg},\n        )\n    }, using=\"default\")\n    print(f\"Succeeded in move 1 node(s) from {source} to {target}.\")\nexcept Exception:\n    print(\"Something went wrong while moving nodes.\")\n\n# After a while, succeeded in moving 1 node(s) from __default_resource_group to rg.\n","from pymilvus import Collection\n\ncollection = Collection('demo')\n\n# Milvus loads the collection to the default resource group.\ncollection.load(replica_number=2)\n\n# Or, you can ask Milvus load the collection to the desired resource group.\n# make sure that query nodes num should be greater or equal to replica_number\nresource_groups = ['rg']\ncollection.load(replica_number=2, _resource_groups=resource_groups) \n","collection = Collection(\"Books\")\n\n# Use the load method of a collection to load one of its partition\ncollection.load([\"Novels\"], replica_number=2, _resource_groups=resource_groups)\n\n# Or, you can use the load method of a partition directly\npartition = Partition(collection, \"Novels\")\npartition.load(replica_number=2, _resource_groups=resource_groups)\n","source = '__default_resource_group'\ntarget = 'rg'\ncollection_name = 'c'\nnum_replicas = 1\n\ntry:\n    utility.transfer_replica(source, target, collection_name, num_replicas, using=\"default\")\n    print(f\"Succeeded in moving {num_node} replica(s) of {collection_name} from {source} to {target}.\")\nexcept Exception:\n    print(\"Something went wrong while moving replicas.\")\n\n# Succeeded in moving 1 replica(s) of c from __default_resource_group to rg.\n","try:\n    utility.update_resource_groups({\n        \"rg\": utility.ResourceGroupConfig(\n            requests={\"node_num\": 0},\n            limits={\"node_num\": 0},\n        ),\n    }, using=\"default\")\n    utility.drop_resource_group(\"rg\", using=\"default\")\n    print(f\"Succeeded in dropping {source}.\")\nexcept Exception:\n    print(f\"Something went wrong while dropping {source}.\")\n","from pymilvus import utility\nfrom pymilvus.client.types import ResourceGroupConfig\n\n_PENDING_NODES_RESOURCE_GROUP=\"__pending_nodes\"\n\ndef init_cluster(node_num: int):\n    print(f\"Init cluster with {node_num} nodes, all nodes will be put in default resource group\")\n    # create a pending resource group, which can used to hold the pending nodes that do not hold any data.\n    utility.create_resource_group(name=_PENDING_NODES_RESOURCE_GROUP, config=ResourceGroupConfig(\n        requests={\"node_num\": 0}, # this resource group can hold 0 nodes, no data will be load on it.\n        limits={\"node_num\": 10000}, # this resource group can hold at most 10000 nodes \n    ))\n\n    # update default resource group, which can used to hold the nodes that all initial node in it.\n    utility.update_resource_groups({\n        \"__default_resource_group\": ResourceGroupConfig(\n            requests={\"node_num\": node_num},\n            limits={\"node_num\": node_num},\n            transfer_from=[{\"resource_group\": _PENDING_NODES_RESOURCE_GROUP}], # recover missing node from pending resource group at high priority.\n            transfer_to=[{\"resource_group\": _PENDING_NODES_RESOURCE_GROUP}], # recover redundant node to pending resource group at low priority.\n        )})\n    utility.create_resource_group(name=\"rg1\", config=ResourceGroupConfig(\n        requests={\"node_num\": 0},\n        limits={\"node_num\": 0},\n        transfer_from=[{\"resource_group\": _PENDING_NODES_RESOURCE_GROUP}], \n        transfer_to=[{\"resource_group\": _PENDING_NODES_RESOURCE_GROUP}],\n    ))\n    utility.create_resource_group(name=\"rg2\", config=ResourceGroupConfig(\n        requests={\"node_num\": 0},\n        limits={\"node_num\": 0},\n        transfer_from=[{\"resource_group\": _PENDING_NODES_RESOURCE_GROUP}], \n        transfer_to=[{\"resource_group\": _PENDING_NODES_RESOURCE_GROUP}],\n    ))\n\ninit_cluster(1)\n","\ndef scale_to(node_num: int):\n    # scale the querynode number in Milvus into node_num.\n    pass\n","# scale rg1 into 3 nodes, rg2 into 1 nodes\nutility.update_resource_groups({\n    \"rg1\": ResourceGroupConfig(\n        requests={\"node_num\": 3},\n        limits={\"node_num\": 3},\n        transfer_from=[{\"resource_group\": _PENDING_NODES_RESOURCE_GROUP}],\n        transfer_to=[{\"resource_group\": _PENDING_NODES_RESOURCE_GROUP}],\n    ),\n    \"rg2\": ResourceGroupConfig(\n        requests={\"node_num\": 1},\n        limits={\"node_num\": 1},\n        transfer_from=[{\"resource_group\": _PENDING_NODES_RESOURCE_GROUP}],\n        transfer_to=[{\"resource_group\": _PENDING_NODES_RESOURCE_GROUP}],\n    ),\n})\nscale_to(5)\n# rg1 has 3 nodes, rg2 has 1 node, __default_resource_group has 1 node.\n","# scale rg1 from 3 nodes into 2 nodes\nutility.update_resource_groups({\n    \"rg1\": ResourceGroupConfig(\n        requests={\"node_num\": 2},\n        limits={\"node_num\": 2},\n        transfer_from=[{\"resource_group\": _PENDING_NODES_RESOURCE_GROUP}],\n        transfer_to=[{\"resource_group\": _PENDING_NODES_RESOURCE_GROUP}],\n    ),\n})\n\n# rg1 has 2 nodes, rg2 has 1 node, __default_resource_group has 1 node, __pending_nodes has 1 node.\nscale_to(4)\n# scale the node in __pending_nodes\n"],"headingContent":"Manage Resource Groups","anchorList":[{"label":"Gestion des groupes de ressources","href":"Manage-Resource-Groups","type":1,"isActive":false},{"label":"Qu'est-ce qu'un groupe de ressources ?","href":"What-is-a-resource-group","type":2,"isActive":false},{"label":"Concepts du groupe de ressources","href":"Concepts-of-resource-group","type":2,"isActive":false},{"label":"Utiliser l'API déclarative pour gérer le groupe de ressources","href":"Use-declarative-api-to-manage-resource-group","type":2,"isActive":false},{"label":"Une bonne pratique pour gérer la mise à l'échelle des clusters","href":"A-good-practice-to-manage-cluster-scaling","type":2,"isActive":false},{"label":"Comment les groupes de ressources interagissent avec plusieurs répliques","href":"How-resource-groups-interacts-with-multiple-replicas","type":2,"isActive":false},{"label":"Prochaines étapes","href":"Whats-next","type":1,"isActive":false}]}
    \ No newline at end of file
    diff --git a/localization/v2.4.x/site/fr/adminGuide/resource_group.md b/localization/v2.4.x/site/fr/adminGuide/resource_group.md
    index b7b5d2596..3aa901d62 100644
    --- a/localization/v2.4.x/site/fr/adminGuide/resource_group.md
    +++ b/localization/v2.4.x/site/fr/adminGuide/resource_group.md
    @@ -2,7 +2,7 @@
     id: resource_group.md
     related_key: Manage Resource Groups
     summary: Apprenez à gérer les groupes de ressources.
    -title: Gérer les groupes de ressources
    +title: Gestion des groupes de ressources
     ---
     

    Gestion des groupes de ressources

    Un groupe de ressources peut contenir plusieurs ou tous les nœuds de requête d'un cluster Milvus. Vous décidez de la manière dont vous souhaitez répartir les nœuds de requête entre les groupes de ressources en fonction de ce qui vous semble le plus judicieux. Par exemple, dans un scénario à plusieurs collections, vous pouvez allouer un nombre approprié de nœuds de requête à chaque groupe de ressources et charger les collections dans différents groupes de ressources, de sorte que les opérations au sein de chaque collection soient physiquement indépendantes de celles des autres collections.

    -

    Notez qu'une instance Milvus maintient un groupe de ressources par défaut pour contenir tous les nœuds de requête au démarrage et le nomme __default_resource_group.

    +

    Notez qu'une instance Milvus maintient un groupe de ressources par défaut pour contenir tous les nœuds de requête au démarrage et le nomme __default_resource_group.

    À partir de la version 2.4.1, Milvus fournit une API de groupe de ressources déclarative, tandis que l'ancienne API de groupe de ressources a été supprimée. La nouvelle API déclarative permet aux utilisateurs d'atteindre l'idempotence, afin de faciliter le développement secondaire dans les environnements "cloud-native".

    -

    Concepts de groupe de ressources

    -

    Tous les exemples de code sur cette page sont dans PyMilvus 2.4.5. Mettez à jour votre installation PyMilvus avant de les exécuter.

    +

    Tous les exemples de code sur cette page sont dans PyMilvus 2.4.8. Mettez à jour votre installation PyMilvus avant de les exécuter.

    1. Créer un groupe de ressources.

      @@ -230,7 +230,7 @@ num_replicas = 1

      Actuellement, Milvus ne peut pas être mis à l'échelle de manière indépendante dans les environnements cloud-native. Toutefois, en utilisant l'API Declarative Resource Group conjointement avec l'orchestration de conteneurs, Milvus peut facilement isoler et gérer les ressources pour les QueryNodes. Voici une bonne pratique pour gérer les QueryNodes dans un environnement en nuage :

        -
      1. Par défaut, Milvus crée un __default_resource_group. Ce groupe de ressources ne peut pas être supprimé et sert également de groupe de ressources de chargement par défaut pour toutes les collections et les QueryNodes redondants lui sont toujours affectés. Par conséquent, nous pouvons créer un groupe de ressources en attente pour contenir les ressources QueryNode inutilisées, empêchant ainsi les ressources QueryNode d'être occupées par le __default_resource_group.

        +
      2. Par défaut, Milvus crée un __default_resource_group. Ce groupe de ressources ne peut pas être supprimé et sert également de groupe de ressources de chargement par défaut pour toutes les collections et les QueryNodes redondants lui sont toujours affectés. Par conséquent, nous pouvons créer un groupe de ressources en attente pour contenir les ressources QueryNode inutilisées, empêchant ainsi les ressources QueryNode d'être occupées par le __default_resource_group.

        De plus, si nous appliquons strictement la contrainte sum(.requests.nodeNum) <= queryNodeNum, nous pouvons contrôler précisément l'affectation des QueryNodes dans le cluster. Supposons qu'il n'y ait actuellement qu'un seul QueryNode dans le cluster et initialisons le cluster. Voici un exemple de configuration :

        from pymilvus import utility
         from pymilvus.client.types import ResourceGroupConfig
        @@ -296,7 +296,7 @@ scale_to(5)
         # rg1 has 3 nodes, rg2 has 1 node, __default_resource_group has 1 node.
         
      3. Mise à l'échelle du cluster

        -

        De même, nous pouvons établir des règles de mise à l'échelle qui donnent la priorité à la sélection des QueryNodes dans le groupe de ressources __pending_nodes. Ces informations peuvent être obtenues via l'API describe_resource_group. Atteindre l'objectif de mise à l'échelle du groupe de ressources spécifié.

        +

        De même, nous pouvons établir des règles de mise à l'échelle qui donnent la priorité à la sélection des QueryNodes dans le groupe de ressources __pending_nodes. Ces informations peuvent être obtenues via l'API describe_resource_group. Atteindre l'objectif de mise à l'échelle du groupe de ressources spécifié.

        # scale rg1 from 3 nodes into 2 nodes
         utility.update_resource_groups({
             "rg1": ResourceGroupConfig(
        @@ -348,6 +348,6 @@ scale_to(4)
               
             

        Pour déployer une instance Milvus multi-tenant, lisez ce qui suit :

        diff --git a/localization/v2.4.x/site/fr/adminGuide/tls.json b/localization/v2.4.x/site/fr/adminGuide/tls.json index 062585231..bc83ad75d 100644 --- a/localization/v2.4.x/site/fr/adminGuide/tls.json +++ b/localization/v2.4.x/site/fr/adminGuide/tls.json @@ -1 +1 @@ -{"codeList":["openssl version\n","sudo apt install openssl\n","mkdir cert && cd cert\ntouch openssl.cnf gen.sh\n","#\n# OpenSSL example configuration file.\n# This is mostly being used for generation of certificate requests.\n#\n\n# This definition stops the following lines choking if HOME isn't\n# defined.\nHOME = .\nRANDFILE = $ENV::HOME/.rnd\n\n# Extra OBJECT IDENTIFIER info:\n#oid_file = $ENV::HOME/.oid\noid_section = new_oids\n\n# To use this configuration file with the \"-extfile\" option of the\n# \"openssl x509\" utility, name here the section containing the\n# X.509v3 extensions to use:\n# extensions = \n# (Alternatively, use a configuration file that has only\n# X.509v3 extensions in its main [= default] section.)\n\n[ new_oids ]\n\n# We can add new OIDs in here for use by 'ca', 'req' and 'ts'.\n# Add a simple OID like this:\n# testoid1=1.2.3.4\n# Or use config file substitution like this:\n# testoid2=${testoid1}.5.6\n\n# Policies used by the TSA examples.\ntsa_policy1 = 1.2.3.4.1\ntsa_policy2 = 1.2.3.4.5.6\ntsa_policy3 = 1.2.3.4.5.7\n\n####################################################################\n[ ca ]\ndefault_ca = CA_default # The default ca section\n\n####################################################################\n[ CA_default ]\n\ndir = ./demoCA # Where everything is kept\ncerts = $dir/certs # Where the issued certs are kept\ncrl_dir = $dir/crl # Where the issued crl are kept\ndatabase = $dir/index.txt # database index file.\n#unique_subject = no # Set to 'no' to allow creation of\n # several ctificates with same subject.\nnew_certs_dir = $dir/newcerts # default place for new certs.\n\ncertificate = $dir/cacert.pem # The CA certificate\nserial = $dir/serial # The current serial number\ncrlnumber = $dir/crlnumber # the current crl number\n # must be commented out to leave a V1 CRL\ncrl = $dir/crl.pem # The current CRL\nprivate_key = $dir/private/cakey.pem# The private key\nRANDFILE = $dir/private/.rand # private random number file\n\nx509_extensions = usr_cert # The extentions to add to the cert\n\n# Comment out the following two lines for the \"traditional\"\n# (and highly broken) format.\nname_opt = ca_default # Subject Name options\ncert_opt = ca_default # Certificate field options\n\n# Extension copying option: use with caution.\ncopy_extensions = copy\n\n# Extensions to add to a CRL. Note: Netscape communicator chokes on V2 CRLs\n# so this is commented out by default to leave a V1 CRL.\n# crlnumber must also be commented out to leave a V1 CRL.\n# crl_extensions = crl_ext\n\ndefault_days = 365 # how long to certify for\ndefault_crl_days= 30 # how long before next CRL\ndefault_md = default # use public key default MD\npreserve = no # keep passed DN ordering\n\n# A few difference way of specifying how similar the request should look\n# For type CA, the listed attributes must be the same, and the optional\n# and supplied fields are just that :-)\npolicy = policy_match\n\n# For the CA policy\n[ policy_match ]\ncountryName = match\nstateOrProvinceName = match\norganizationName = match\norganizationalUnitName = optional\ncommonName = supplied\nemailAddress = optional\n\n# For the 'anything' policy\n# At this point in time, you must list all acceptable 'object'\n# types.\n[ policy_anything ]\ncountryName = optional\nstateOrProvinceName = optional\nlocalityName = optional\norganizationName = optional\norganizationalUnitName = optional\ncommonName = supplied\nemailAddress = optional\n\n####################################################################\n[ req ]\ndefault_bits = 2048\ndefault_keyfile = privkey.pem\ndistinguished_name = req_distinguished_name\nattributes = req_attributes\nx509_extensions = v3_ca # The extentions to add to the self signed cert\n\n# Passwords for private keys if not present they will be prompted for\n# input_password = secret\n# output_password = secret\n\n# This sets a mask for permitted string types. There are several options. \n# default: PrintableString, T61String, BMPString.\n# pkix : PrintableString, BMPString (PKIX recommendation before 2004)\n# utf8only: only UTF8Strings (PKIX recommendation after 2004).\n# nombstr : PrintableString, T61String (no BMPStrings or UTF8Strings).\n# MASK:XXXX a literal mask value.\n# WARNING: ancient versions of Netscape crash on BMPStrings or UTF8Strings.\nstring_mask = utf8only\n\nreq_extensions = v3_req # The extensions to add to a certificate request\n\n[ req_distinguished_name ]\ncountryName = Country Name (2 letter code)\ncountryName_default = AU\ncountryName_min = 2\ncountryName_max = 2\n\nstateOrProvinceName = State or Province Name (full name)\nstateOrProvinceName_default = Some-State\n\nlocalityName = Locality Name (eg, city)\n\n0.organizationName = Organization Name (eg, company)\n0.organizationName_default = Internet Widgits Pty Ltd\n\n# we can do this but it is not needed normally :-)\n#1.organizationName = Second Organization Name (eg, company)\n#1.organizationName_default = World Wide Web Pty Ltd\n\norganizationalUnitName = Organizational Unit Name (eg, section)\n#organizationalUnitName_default =\n\ncommonName = Common Name (e.g. server FQDN or YOUR name)\ncommonName_max = 64\n\nemailAddress = Email Address\nemailAddress_max = 64\n\n# SET-ex3 = SET extension number 3\n\n[ req_attributes ]\nchallengePassword = A challenge password\nchallengePassword_min = 4\nchallengePassword_max = 20\n\nunstructuredName = An optional company name\n\n[ usr_cert ]\n\n# These extensions are added when 'ca' signs a request.\n\n# This goes against PKIX guidelines but some CAs do it and some software\n# requires this to avoid interpreting an end user certificate as a CA.\n\nbasicConstraints=CA:FALSE\n\n# Here are some examples of the usage of nsCertType. If it is omitted\n# the certificate can be used for anything *except* object signing.\n\n# This is OK for an SSL server.\n# nsCertType = server\n\n# For an object signing certificate this would be used.\n# nsCertType = objsign\n\n# For normal client use this is typical\n# nsCertType = client, email\n\n# and for everything including object signing:\n# nsCertType = client, email, objsign\n\n# This is typical in keyUsage for a client certificate.\n# keyUsage = nonRepudiation, digitalSignature, keyEncipherment\n\n# This will be displayed in Netscape's comment listbox.\nnsComment = \"OpenSSL Generated Certificate\"\n\n# PKIX recommendations harmless if included in all certificates.\nsubjectKeyIdentifier=hash\nauthorityKeyIdentifier=keyid,issuer\n\n# This stuff is for subjectAltName and issuerAltname.\n# Import the email address.\n# subjectAltName=email:copy\n# An alternative to produce certificates that aren't\n# deprecated according to PKIX.\n# subjectAltName=email:move\n\n# Copy subject details\n# issuerAltName=issuer:copy\n\n#nsCaRevocationUrl = http://www.domain.dom/ca-crl.pem\n#nsBaseUrl\n#nsRevocationUrl\n#nsRenewalUrl\n#nsCaPolicyUrl\n#nsSslServerName\n\n# This is required for TSA certificates.\n# extendedKeyUsage = critical,timeStamping\n\n[ v3_req ]\n\n# Extensions to add to a certificate request\n\nbasicConstraints = CA:FALSE\nkeyUsage = nonRepudiation, digitalSignature, keyEncipherment\n\n\n[ v3_ca ]\n\n\n# Extensions for a typical CA\n\n\n# PKIX recommendation.\n\nsubjectKeyIdentifier=hash\n\nauthorityKeyIdentifier=keyid:always,issuer\n\n# This is what PKIX recommends but some broken software chokes on critical\n# extensions.\n#basicConstraints = critical,CA:true\n# So we do this instead.\nbasicConstraints = CA:true\n\n# Key usage: this is typical for a CA certificate. However since it will\n# prevent it being used as an test self-signed certificate it is best\n# left out by default.\n# keyUsage = cRLSign, keyCertSign\n\n# Some might want this also\n# nsCertType = sslCA, emailCA\n\n# Include email address in subject alt name: another PKIX recommendation\n# subjectAltName=email:copy\n# Copy issuer details\n# issuerAltName=issuer:copy\n\n# DER hex encoding of an extension: beware experts only!\n# obj=DER:02:03\n# Where 'obj' is a standard or added object\n# You can even override a supported extension:\n# basicConstraints= critical, DER:30:03:01:01:FF\n\n[ crl_ext ]\n\n# CRL extensions.\n# Only issuerAltName and authorityKeyIdentifier make any sense in a CRL.\n\n# issuerAltName=issuer:copy\nauthorityKeyIdentifier=keyid:always\n\n[ proxy_cert_ext ]\n# These extensions should be added when creating a proxy certificate\n\n# This goes against PKIX guidelines but some CAs do it and some software\n# requires this to avoid interpreting an end user certificate as a CA.\n\nbasicConstraints=CA:FALSE\n\n# Here are some examples of the usage of nsCertType. If it is omitted\n# the certificate can be used for anything *except* object signing.\n\n# This is OK for an SSL server.\n# nsCertType = server\n\n# For an object signing certificate this would be used.\n# nsCertType = objsign\n\n# For normal client use this is typical\n# nsCertType = client, email\n\n# and for everything including object signing:\n# nsCertType = client, email, objsign\n\n# This is typical in keyUsage for a client certificate.\n# keyUsage = nonRepudiation, digitalSignature, keyEncipherment\n\n# This will be displayed in Netscape's comment listbox.\nnsComment = \"OpenSSL Generated Certificate\"\n\n# PKIX recommendations harmless if included in all certificates.\nsubjectKeyIdentifier=hash\nauthorityKeyIdentifier=keyid,issuer\n\n# This stuff is for subjectAltName and issuerAltname.\n# Import the email address.\n# subjectAltName=email:copy\n# An alternative to produce certificates that aren't\n# deprecated according to PKIX.\n# subjectAltName=email:move\n\n# Copy subject details\n# issuerAltName=issuer:copy\n\n#nsCaRevocationUrl = http://www.domain.dom/ca-crl.pem\n#nsBaseUrl\n#nsRevocationUrl\n#nsRenewalUrl\n#nsCaPolicyUrl\n#nsSslServerName\n\n# This really needs to be in place for it to be a proxy certificate.\nproxyCertInfo=critical,language:id-ppl-anyLanguage,pathlen:3,policy:foo\n\n####################################################################\n[ tsa ]\n\ndefault_tsa = tsa_config1 # the default TSA section\n\n[ tsa_config1 ]\n\n# These are used by the TSA reply generation only.\ndir = ./demoCA # TSA root directory\nserial = $dir/tsaserial # The current serial number (mandatory)\ncrypto_device = builtin # OpenSSL engine to use for signing\nsigner_cert = $dir/tsacert.pem # The TSA signing certificate\n # (optional)\ncerts = $dir/cacert.pem # Certificate chain to include in reply\n # (optional)\nsigner_key = $dir/private/tsakey.pem # The TSA private key (optional)\n\ndefault_policy = tsa_policy1 # Policy if request did not specify it\n # (optional)\nother_policies = tsa_policy2, tsa_policy3 # acceptable policies (optional)\ndigests = md5, sha1 # Acceptable message digests (mandatory)\naccuracy = secs:1, millisecs:500, microsecs:100 # (optional)\nclock_precision_digits = 0 # number of digits after dot. (optional)\nordering = yes # Is ordering defined for timestamps?\n # (optional, default: no)\ntsa_name = yes # Must the TSA name be included in the reply?\n # (optional, default: no)\ness_cert_id_chain = no # Must the ESS cert id chain be included?\n # (optional, default: no)\n","#!/usr/bin/env sh\n# your variables\nCountry=\"CN\"\nState=\"Shanghai\"\nLocation=\"Shanghai\"\nOrganization=\"milvus\"\nOrganizational=\"milvus\"\nCommonName=\"localhost\"\n\necho \"generate ca.key\"\nopenssl genrsa -out ca.key 2048\n\necho \"generate ca.pem\"\nopenssl req -new -x509 -key ca.key -out ca.pem -days 3650 -subj \"/C=$Country/ST=$State/L=$Location/O=$Organization/OU=$Organizational/CN=$CommonName\"\n\necho \"generate server SAN certificate\"\nopenssl genpkey -algorithm RSA -out server.key\nopenssl req -new -nodes -key server.key -out server.csr -days 3650 -subj \"/C=$Country/O=$Organization/OU=$Organizational/CN=$CommonName\" -config ./openssl.cnf -extensions v3_req\nopenssl x509 -req -days 3650 -in server.csr -out server.pem -CA ca.pem -CAkey ca.key -CAcreateserial -extfile ./openssl.cnf -extensions v3_req\n\necho \"generate client SAN certificate\"\nopenssl genpkey -algorithm RSA -out client.key\nopenssl req -new -nodes -key client.key -out client.csr -days 3650 -subj \"/C=$Country/O=$Organization/OU=$Organizational/CN=$CommonName\" -config ./openssl.cnf -extensions v3_req\nopenssl x509 -req -days 3650 -in client.csr -out client.pem -CA ca.pem -CAkey ca.key -CAcreateserial -extfile ./openssl.cnf -extensions v3_req\n\n","chmod +x gen.sh\n./gen.sh\n","openssl genpkey -algorithm RSA -out ca.key\n","openssl req -new -x509 -key ca.key -out ca.pem -days 3650 -subj \"/C=$Country/ST=$State/L=$Location/O=$Organization/OU=$Organizational/CN=$CommonName\"\n","openssl genpkey -algorithm RSA -out server.key\n","openssl req -new -nodes -key server.key -out server.csr -days 3650 -subj \"/C=$Country/O=$Organization/OU=$Organizational/CN=$CommonName\" -config ./openssl.cnf -extensions v3_req\n","openssl x509 -req -days 3650 -in server.csr -out server.pem -CA ca.pem -CAkey ca.key -CAcreateserial -extfile ./openssl.cnf -extensions v3_req\n","tls:\n serverPemPath: /milvus/tls/server.pem\n serverKeyPath: /milvus/tls/server.key\n caPemPath: /milvus/tls/ca.pem\n\ncommon:\n security:\n tlsMode: 1\n","├── docker-compose.yml\n├── milvus.yaml\n└── tls\n ├── server.pem\n ├── server.key\n └── ca.pem\n"," standalone:\n container_name: milvus-standalone\n image: milvusdb/milvus:latest\n command: [\"milvus\", \"run\", \"standalone\"]\n security_opt:\n - seccomp:unconfined\n environment:\n ETCD_ENDPOINTS: etcd:2379\n MINIO_ADDRESS: minio:9000\n volumes:\n - ${DOCKER_VOLUME_DIRECTORY:-.}/volumes/milvus:/var/lib/milvus\n - ${DOCKER_VOLUME_DIRECTORY:-.}/tls:/milvus/tls\n - ${DOCKER_VOLUME_DIRECTORY:-.}/milvus.yaml:/milvus/configs/milvus.yaml\n","sudo docker compose up -d\n","from pymilvus import MilvusClient\n\nclient = MilvusClient(\n uri=\"http://localhost:19530\",\n secure=True,\n server_pem_path=\"path_to/server.pem\",\n server_name=\"localhost\"\n)\n","from pymilvus import MilvusClient\n\nclient = MilvusClient(\n uri=\"http://localhost:19530\",\n secure=True,\n client_pem_path=\"path_to/client.pem\",\n client_key_path=\"path_to/client.key\",\n ca_pem_path=\"path_to/ca.pem\",\n server_name=\"localhost\"\n)\n"],"headingContent":"","anchorList":[{"label":"Cryptage en transit","href":"Encryption-in-Transit","type":1,"isActive":false},{"label":"Créer votre propre certificat","href":"Create-your-own-certificate","type":2,"isActive":false},{"label":"Configuration d'un serveur Milvus avec TLS","href":"Set-up-a-Milvus-server-with-TLS","type":2,"isActive":false},{"label":"Se connecter au serveur Milvus avec TLS","href":"Connect-to-the-Milvus-server-with-TLS","type":2,"isActive":false}]} \ No newline at end of file +{"codeList":["openssl version\n","sudo apt install openssl\n","mkdir cert && cd cert\ntouch openssl.cnf gen.sh\n","#\n# OpenSSL example configuration file.\n# This is mostly being used for generation of certificate requests.\n#\n\n# This definition stops the following lines choking if HOME isn't\n# defined.\nHOME = .\nRANDFILE = $ENV::HOME/.rnd\n\n# Extra OBJECT IDENTIFIER info:\n#oid_file = $ENV::HOME/.oid\noid_section = new_oids\n\n# To use this configuration file with the \"-extfile\" option of the\n# \"openssl x509\" utility, name here the section containing the\n# X.509v3 extensions to use:\n# extensions = \n# (Alternatively, use a configuration file that has only\n# X.509v3 extensions in its main [= default] section.)\n\n[ new_oids ]\n\n# We can add new OIDs in here for use by 'ca', 'req' and 'ts'.\n# Add a simple OID like this:\n# testoid1=1.2.3.4\n# Or use config file substitution like this:\n# testoid2=${testoid1}.5.6\n\n# Policies used by the TSA examples.\ntsa_policy1 = 1.2.3.4.1\ntsa_policy2 = 1.2.3.4.5.6\ntsa_policy3 = 1.2.3.4.5.7\n\n####################################################################\n[ ca ]\ndefault_ca = CA_default # The default ca section\n\n####################################################################\n[ CA_default ]\n\ndir = ./demoCA # Where everything is kept\ncerts = $dir/certs # Where the issued certs are kept\ncrl_dir = $dir/crl # Where the issued crl are kept\ndatabase = $dir/index.txt # database index file.\n#unique_subject = no # Set to 'no' to allow creation of\n # several ctificates with same subject.\nnew_certs_dir = $dir/newcerts # default place for new certs.\n\ncertificate = $dir/cacert.pem # The CA certificate\nserial = $dir/serial # The current serial number\ncrlnumber = $dir/crlnumber # the current crl number\n # must be commented out to leave a V1 CRL\ncrl = $dir/crl.pem # The current CRL\nprivate_key = $dir/private/cakey.pem# The private key\nRANDFILE = $dir/private/.rand # private random number file\n\nx509_extensions = usr_cert # The extentions to add to the cert\n\n# Comment out the following two lines for the \"traditional\"\n# (and highly broken) format.\nname_opt = ca_default # Subject Name options\ncert_opt = ca_default # Certificate field options\n\n# Extension copying option: use with caution.\ncopy_extensions = copy\n\n# Extensions to add to a CRL. Note: Netscape communicator chokes on V2 CRLs\n# so this is commented out by default to leave a V1 CRL.\n# crlnumber must also be commented out to leave a V1 CRL.\n# crl_extensions = crl_ext\n\ndefault_days = 365 # how long to certify for\ndefault_crl_days= 30 # how long before next CRL\ndefault_md = default # use public key default MD\npreserve = no # keep passed DN ordering\n\n# A few difference way of specifying how similar the request should look\n# For type CA, the listed attributes must be the same, and the optional\n# and supplied fields are just that :-)\npolicy = policy_match\n\n# For the CA policy\n[ policy_match ]\ncountryName = match\nstateOrProvinceName = match\norganizationName = match\norganizationalUnitName = optional\ncommonName = supplied\nemailAddress = optional\n\n# For the 'anything' policy\n# At this point in time, you must list all acceptable 'object'\n# types.\n[ policy_anything ]\ncountryName = optional\nstateOrProvinceName = optional\nlocalityName = optional\norganizationName = optional\norganizationalUnitName = optional\ncommonName = supplied\nemailAddress = optional\n\n####################################################################\n[ req ]\ndefault_bits = 2048\ndefault_keyfile = privkey.pem\ndistinguished_name = req_distinguished_name\nattributes = req_attributes\nx509_extensions = v3_ca # The extentions to add to the self signed cert\n\n# Passwords for private keys if not present they will be prompted for\n# input_password = secret\n# output_password = secret\n\n# This sets a mask for permitted string types. There are several options. \n# default: PrintableString, T61String, BMPString.\n# pkix : PrintableString, BMPString (PKIX recommendation before 2004)\n# utf8only: only UTF8Strings (PKIX recommendation after 2004).\n# nombstr : PrintableString, T61String (no BMPStrings or UTF8Strings).\n# MASK:XXXX a literal mask value.\n# WARNING: ancient versions of Netscape crash on BMPStrings or UTF8Strings.\nstring_mask = utf8only\n\nreq_extensions = v3_req # The extensions to add to a certificate request\n\n[ req_distinguished_name ]\ncountryName = Country Name (2 letter code)\ncountryName_default = AU\ncountryName_min = 2\ncountryName_max = 2\n\nstateOrProvinceName = State or Province Name (full name)\nstateOrProvinceName_default = Some-State\n\nlocalityName = Locality Name (eg, city)\n\n0.organizationName = Organization Name (eg, company)\n0.organizationName_default = Internet Widgits Pty Ltd\n\n# we can do this but it is not needed normally :-)\n#1.organizationName = Second Organization Name (eg, company)\n#1.organizationName_default = World Wide Web Pty Ltd\n\norganizationalUnitName = Organizational Unit Name (eg, section)\n#organizationalUnitName_default =\n\ncommonName = Common Name (e.g. server FQDN or YOUR name)\ncommonName_max = 64\n\nemailAddress = Email Address\nemailAddress_max = 64\n\n# SET-ex3 = SET extension number 3\n\n[ req_attributes ]\nchallengePassword = A challenge password\nchallengePassword_min = 4\nchallengePassword_max = 20\n\nunstructuredName = An optional company name\n\n[ usr_cert ]\n\n# These extensions are added when 'ca' signs a request.\n\n# This goes against PKIX guidelines but some CAs do it and some software\n# requires this to avoid interpreting an end user certificate as a CA.\n\nbasicConstraints=CA:FALSE\n\n# Here are some examples of the usage of nsCertType. If it is omitted\n# the certificate can be used for anything *except* object signing.\n\n# This is OK for an SSL server.\n# nsCertType = server\n\n# For an object signing certificate this would be used.\n# nsCertType = objsign\n\n# For normal client use this is typical\n# nsCertType = client, email\n\n# and for everything including object signing:\n# nsCertType = client, email, objsign\n\n# This is typical in keyUsage for a client certificate.\n# keyUsage = nonRepudiation, digitalSignature, keyEncipherment\n\n# This will be displayed in Netscape's comment listbox.\nnsComment = \"OpenSSL Generated Certificate\"\n\n# PKIX recommendations harmless if included in all certificates.\nsubjectKeyIdentifier=hash\nauthorityKeyIdentifier=keyid,issuer\n\n# This stuff is for subjectAltName and issuerAltname.\n# Import the email address.\n# subjectAltName=email:copy\n# An alternative to produce certificates that aren't\n# deprecated according to PKIX.\n# subjectAltName=email:move\n\n# Copy subject details\n# issuerAltName=issuer:copy\n\n#nsCaRevocationUrl = http://www.domain.dom/ca-crl.pem\n#nsBaseUrl\n#nsRevocationUrl\n#nsRenewalUrl\n#nsCaPolicyUrl\n#nsSslServerName\n\n# This is required for TSA certificates.\n# extendedKeyUsage = critical,timeStamping\n\n[ v3_req ]\n\n# Extensions to add to a certificate request\n\nbasicConstraints = CA:FALSE\nkeyUsage = nonRepudiation, digitalSignature, keyEncipherment\n\n\n[ v3_ca ]\n\n\n# Extensions for a typical CA\n\n\n# PKIX recommendation.\n\nsubjectKeyIdentifier=hash\n\nauthorityKeyIdentifier=keyid:always,issuer\n\n# This is what PKIX recommends but some broken software chokes on critical\n# extensions.\n#basicConstraints = critical,CA:true\n# So we do this instead.\nbasicConstraints = CA:true\n\n# Key usage: this is typical for a CA certificate. However since it will\n# prevent it being used as an test self-signed certificate it is best\n# left out by default.\n# keyUsage = cRLSign, keyCertSign\n\n# Some might want this also\n# nsCertType = sslCA, emailCA\n\n# Include email address in subject alt name: another PKIX recommendation\n# subjectAltName=email:copy\n# Copy issuer details\n# issuerAltName=issuer:copy\n\n# DER hex encoding of an extension: beware experts only!\n# obj=DER:02:03\n# Where 'obj' is a standard or added object\n# You can even override a supported extension:\n# basicConstraints= critical, DER:30:03:01:01:FF\n\n[ crl_ext ]\n\n# CRL extensions.\n# Only issuerAltName and authorityKeyIdentifier make any sense in a CRL.\n\n# issuerAltName=issuer:copy\nauthorityKeyIdentifier=keyid:always\n\n[ proxy_cert_ext ]\n# These extensions should be added when creating a proxy certificate\n\n# This goes against PKIX guidelines but some CAs do it and some software\n# requires this to avoid interpreting an end user certificate as a CA.\n\nbasicConstraints=CA:FALSE\n\n# Here are some examples of the usage of nsCertType. If it is omitted\n# the certificate can be used for anything *except* object signing.\n\n# This is OK for an SSL server.\n# nsCertType = server\n\n# For an object signing certificate this would be used.\n# nsCertType = objsign\n\n# For normal client use this is typical\n# nsCertType = client, email\n\n# and for everything including object signing:\n# nsCertType = client, email, objsign\n\n# This is typical in keyUsage for a client certificate.\n# keyUsage = nonRepudiation, digitalSignature, keyEncipherment\n\n# This will be displayed in Netscape's comment listbox.\nnsComment = \"OpenSSL Generated Certificate\"\n\n# PKIX recommendations harmless if included in all certificates.\nsubjectKeyIdentifier=hash\nauthorityKeyIdentifier=keyid,issuer\n\n# This stuff is for subjectAltName and issuerAltname.\n# Import the email address.\n# subjectAltName=email:copy\n# An alternative to produce certificates that aren't\n# deprecated according to PKIX.\n# subjectAltName=email:move\n\n# Copy subject details\n# issuerAltName=issuer:copy\n\n#nsCaRevocationUrl = http://www.domain.dom/ca-crl.pem\n#nsBaseUrl\n#nsRevocationUrl\n#nsRenewalUrl\n#nsCaPolicyUrl\n#nsSslServerName\n\n# This really needs to be in place for it to be a proxy certificate.\nproxyCertInfo=critical,language:id-ppl-anyLanguage,pathlen:3,policy:foo\n\n####################################################################\n[ tsa ]\n\ndefault_tsa = tsa_config1 # the default TSA section\n\n[ tsa_config1 ]\n\n# These are used by the TSA reply generation only.\ndir = ./demoCA # TSA root directory\nserial = $dir/tsaserial # The current serial number (mandatory)\ncrypto_device = builtin # OpenSSL engine to use for signing\nsigner_cert = $dir/tsacert.pem # The TSA signing certificate\n # (optional)\ncerts = $dir/cacert.pem # Certificate chain to include in reply\n # (optional)\nsigner_key = $dir/private/tsakey.pem # The TSA private key (optional)\n\ndefault_policy = tsa_policy1 # Policy if request did not specify it\n # (optional)\nother_policies = tsa_policy2, tsa_policy3 # acceptable policies (optional)\ndigests = md5, sha1 # Acceptable message digests (mandatory)\naccuracy = secs:1, millisecs:500, microsecs:100 # (optional)\nclock_precision_digits = 0 # number of digits after dot. (optional)\nordering = yes # Is ordering defined for timestamps?\n # (optional, default: no)\ntsa_name = yes # Must the TSA name be included in the reply?\n # (optional, default: no)\ness_cert_id_chain = no # Must the ESS cert id chain be included?\n # (optional, default: no)\n","#!/usr/bin/env sh\n# your variables\nCountry=\"CN\"\nState=\"Shanghai\"\nLocation=\"Shanghai\"\nOrganization=\"milvus\"\nOrganizational=\"milvus\"\nCommonName=\"localhost\"\n\necho \"generate ca.key\"\nopenssl genrsa -out ca.key 2048\n\necho \"generate ca.pem\"\nopenssl req -new -x509 -key ca.key -out ca.pem -days 3650 -subj \"/C=$Country/ST=$State/L=$Location/O=$Organization/OU=$Organizational/CN=$CommonName\"\n\necho \"generate server SAN certificate\"\nopenssl genpkey -algorithm RSA -out server.key\nopenssl req -new -nodes -key server.key -out server.csr -days 3650 -subj \"/C=$Country/O=$Organization/OU=$Organizational/CN=$CommonName\" -config ./openssl.cnf -extensions v3_req\nopenssl x509 -req -days 3650 -in server.csr -out server.pem -CA ca.pem -CAkey ca.key -CAcreateserial -extfile ./openssl.cnf -extensions v3_req\n\necho \"generate client SAN certificate\"\nopenssl genpkey -algorithm RSA -out client.key\nopenssl req -new -nodes -key client.key -out client.csr -days 3650 -subj \"/C=$Country/O=$Organization/OU=$Organizational/CN=$CommonName\" -config ./openssl.cnf -extensions v3_req\nopenssl x509 -req -days 3650 -in client.csr -out client.pem -CA ca.pem -CAkey ca.key -CAcreateserial -extfile ./openssl.cnf -extensions v3_req\n\n","chmod +x gen.sh\n./gen.sh\n","openssl genpkey -algorithm RSA -out ca.key\n","openssl req -new -x509 -key ca.key -out ca.pem -days 3650 -subj \"/C=$Country/ST=$State/L=$Location/O=$Organization/OU=$Organizational/CN=$CommonName\"\n","openssl genpkey -algorithm RSA -out server.key\n","openssl req -new -nodes -key server.key -out server.csr -days 3650 -subj \"/C=$Country/O=$Organization/OU=$Organizational/CN=$CommonName\" -config ./openssl.cnf -extensions v3_req\n","openssl x509 -req -days 3650 -in server.csr -out server.pem -CA ca.pem -CAkey ca.key -CAcreateserial -extfile ./openssl.cnf -extensions v3_req\n","tls:\n serverPemPath: /milvus/tls/server.pem\n serverKeyPath: /milvus/tls/server.key\n caPemPath: /milvus/tls/ca.pem\n\ncommon:\n security:\n tlsMode: 1\n","├── docker-compose.yml\n├── milvus.yaml\n└── tls\n ├── server.pem\n ├── server.key\n └── ca.pem\n"," standalone:\n container_name: milvus-standalone\n image: milvusdb/milvus:latest\n command: [\"milvus\", \"run\", \"standalone\"]\n security_opt:\n - seccomp:unconfined\n environment:\n ETCD_ENDPOINTS: etcd:2379\n MINIO_ADDRESS: minio:9000\n volumes:\n - ${DOCKER_VOLUME_DIRECTORY:-.}/volumes/milvus:/var/lib/milvus\n - ${DOCKER_VOLUME_DIRECTORY:-.}/tls:/milvus/tls\n - ${DOCKER_VOLUME_DIRECTORY:-.}/milvus.yaml:/milvus/configs/milvus.yaml\n","sudo docker compose up -d\n","from pymilvus import MilvusClient\n\nclient = MilvusClient(\n uri=\"https://localhost:19530\",\n secure=True,\n server_pem_path=\"path_to/server.pem\",\n server_name=\"localhost\"\n)\n","from pymilvus import MilvusClient\n\nclient = MilvusClient(\n uri=\"https://localhost:19530\",\n secure=True,\n client_pem_path=\"path_to/client.pem\",\n client_key_path=\"path_to/client.key\",\n ca_pem_path=\"path_to/ca.pem\",\n server_name=\"localhost\"\n)\n","curl --cacert path_to/ca.pem https://localhost:19530/v2/vectordb/collections/list\n","curl --cert path_to/client.pem --key path_to/client.key --cacert path_to/ca.pem https://localhost:19530/v2/vectordb/collections/list\n"],"headingContent":"Encryption in Transit","anchorList":[{"label":"Cryptage en transit","href":"Encryption-in-Transit","type":1,"isActive":false},{"label":"Créer votre propre certificat","href":"Create-your-own-certificate","type":2,"isActive":false},{"label":"Configuration d'un serveur Milvus avec TLS","href":"Set-up-a-Milvus-server-with-TLS","type":2,"isActive":false},{"label":"Se connecter au serveur Milvus avec TLS","href":"Connect-to-the-Milvus-server-with-TLS","type":2,"isActive":false},{"label":"Connexion au serveur RESTful Milvus avec TLS","href":"Connect-to-the-Milvus-RESTful-server-with-TLS","type":2,"isActive":false}]} \ No newline at end of file diff --git a/localization/v2.4.x/site/fr/adminGuide/tls.md b/localization/v2.4.x/site/fr/adminGuide/tls.md index f9e72cf1d..0e5a0f6df 100644 --- a/localization/v2.4.x/site/fr/adminGuide/tls.md +++ b/localization/v2.4.x/site/fr/adminGuide/tls.md @@ -1,6 +1,6 @@ --- id: tls.md -title: Chiffrement en transit +title: Cryptage en transit summary: Découvrez comment activer le proxy TLS dans Milvus. ---

        Cryptage en transit

        TLS (Transport Layer Security) est un protocole de cryptage qui garantit la sécurité des communications. Le proxy Milvus utilise l'authentification unidirectionnelle et bidirectionnelle TLS.

        -

        Cette rubrique décrit comment activer le proxy TLS dans Milvus.

        +

        Cette rubrique décrit comment activer TLS dans le proxy Milvus pour les trafics gRPC et RESTful.

        -

        TLS et l'authentification de l'utilisateur sont deux approches de sécurité distinctes. Si vous avez activé l'authentification utilisateur et TLS dans votre système Milvus, vous devrez fournir un nom d'utilisateur, un mot de passe et des chemins d'accès aux fichiers de certificats. Pour plus d'informations sur l'activation de l'authentification utilisateur, voir Authentifier l'accès utilisateur.

        +

        TLS et l'authentification utilisateur sont deux approches de sécurité distinctes. Si vous avez activé l'authentification utilisateur et TLS dans votre système Milvus, vous devrez fournir un nom d'utilisateur, un mot de passe et des chemins d'accès aux fichiers de certificats. Pour plus d'informations sur l'activation de l'authentification de l'utilisateur, voir Authentifier l'accès de l'utilisateur.

        Créer votre propre certificat

      4. -

        2. Mettez en correspondance les fichiers de certificats avec le conteneur

        Préparer les fichiers de certificat

        Créez un nouveau dossier nommé tls dans le même répertoire que docker-compose.yaml. Copiez les fichiers server.pem, server.key et ca.pem dans le dossier tls. Placez-les dans une structure de répertoire comme suit :

        +

        2. Mettez en correspondance les fichiers de certificats avec le conteneur

        Préparer les fichiers de certificat

        Créez un nouveau dossier nommé tls dans le même répertoire que votre docker-compose.yaml. Copiez les fichiers server.pem, server.key et ca.pem dans le dossier tls. Placez-les dans une structure de répertoire comme suit :

        ├── docker-compose.yml
         ├── milvus.yaml
         └── tls
        @@ -524,7 +524,7 @@ openssl x509 -req -days 3650 -in client.csr -o
              ├── server.key
              └── ca.pem
         
        -

        Mise à jour de la configuration de Docker Compose

        Modifiez le fichier docker-compose.yaml pour mapper les chemins d'accès aux fichiers de certificats à l'intérieur du conteneur comme indiqué ci-dessous :

        +

        Mise à jour de la configuration de Docker Compose

        Modifiez le fichier docker-compose.yaml pour mapper les chemins d'accès aux fichiers de certificats à l'intérieur du conteneur, comme indiqué ci-dessous :

          standalone:
             container_name: milvus-standalone
             image: milvusdb/milvus:latest
        @@ -562,7 +562,7 @@ openssl x509 -req -days 3650 -in client.csr -o
         
        from pymilvus import MilvusClient
         
         client = MilvusClient(
        -    uri="http://localhost:19530",
        +    uri="https://localhost:19530",
             secure=True,
             server_pem_path="path_to/server.pem",
             server_name="localhost"
        @@ -572,7 +572,7 @@ client = MilvusClient(
         
        from pymilvus import MilvusClient
         
         client = MilvusClient(
        -    uri="http://localhost:19530",
        +    uri="https://localhost:19530",
             secure=True,
             client_pem_path="path_to/client.pem",
             client_key_path="path_to/client.key",
        @@ -581,3 +581,23 @@ client = MilvusClient(
         )
         

        Voir example_tls1.py et example_tls2.py pour plus d'informations.

        +

        Connexion au serveur RESTful Milvus avec TLS

        Pour les API RESTful, vous pouvez vérifier tls à l'aide de la commande curl.

        +

        Connexion TLS unidirectionnelle

        curl --cacert path_to/ca.pem https://localhost:19530/v2/vectordb/collections/list
        +
        +

        Connexion TLS bidirectionnelle

        curl --cert path_to/client.pem --key path_to/client.key --cacert path_to/ca.pem https://localhost:19530/v2/vectordb/collections/list
        +
        diff --git a/localization/v2.4.x/site/fr/adminGuide/upgrade_milvus_cluster-docker.json b/localization/v2.4.x/site/fr/adminGuide/upgrade_milvus_cluster-docker.json index d66ff1f63..435bed1c1 100644 --- a/localization/v2.4.x/site/fr/adminGuide/upgrade_milvus_cluster-docker.json +++ b/localization/v2.4.x/site/fr/adminGuide/upgrade_milvus_cluster-docker.json @@ -1 +1 @@ -{"codeList":["...\nrootcoord:\n container_name: milvus-rootcoord\n image: milvusdb/milvus:v2.4.9\n...\nproxy:\n container_name: milvus-proxy\n image: milvusdb/milvus:v2.4.9\n...\nquerycoord:\n container_name: milvus-querycoord\n image: milvusdb/milvus:v2.4.9 \n...\nquerynode:\n container_name: milvus-querynode\n image: milvusdb/milvus:v2.4.9\n...\nindexcoord:\n container_name: milvus-indexcoord\n image: milvusdb/milvus:v2.4.9\n...\nindexnode:\n container_name: milvus-indexnode\n image: milvusdb/milvus:v2.4.9 \n...\ndatacoord:\n container_name: milvus-datacoord\n image: milvusdb/milvus:v2.4.9 \n...\ndatanode:\n container_name: milvus-datanode\n image: milvusdb/milvus:v2.4.9\n","docker compose down\ndocker compose up -d\n","docker stop \n","# migration.yaml\ncmd:\n # Option: run/backup/rollback\n type: run\n runWithBackup: true\nconfig:\n sourceVersion: 2.1.4 # Specify your milvus version\n targetVersion: 2.4.9\n backupFilePath: /tmp/migration.bak\nmetastore:\n type: etcd\netcd:\n endpoints:\n - milvus-etcd:2379 # Use the etcd container name\n rootPath: by-dev # The root path where data is stored in etcd\n metaSubPath: meta\n kvSubPath: kv\n","# Suppose your docker-compose run with the default milvus network,\n# and you put migration.yaml in the same directory with docker-compose.yaml.\ndocker run --rm -it --network milvus -v $(pwd)/migration.yaml:/milvus/configs/migration.yaml milvus/meta-migration:v2.2.0 /milvus/bin/meta-migration -config=/milvus/configs/migration.yaml\n","Update the milvus image tag in the docker-compose.yaml\ndocker compose down\ndocker compose up -d\n"],"headingContent":"","anchorList":[{"label":"Mise à niveau du cluster Milvus avec Docker Compose","href":"Upgrade-Milvus-Cluster-with-Docker-Compose","type":1,"isActive":false},{"label":"Mise à niveau de Milvus en modifiant son image","href":"Upgrade-Milvus-by-changing-its-image","type":2,"isActive":false},{"label":"Migration des métadonnées","href":"Migrate-the-metadata","type":2,"isActive":false},{"label":"Suite de l'article","href":"Whats-next","type":2,"isActive":false}]} \ No newline at end of file +{"codeList":["...\nrootcoord:\n container_name: milvus-rootcoord\n image: milvusdb/milvus:v2.4.13-hotfix\n...\nproxy:\n container_name: milvus-proxy\n image: milvusdb/milvus:v2.4.13-hotfix\n...\nquerycoord:\n container_name: milvus-querycoord\n image: milvusdb/milvus:v2.4.13-hotfix \n...\nquerynode:\n container_name: milvus-querynode\n image: milvusdb/milvus:v2.4.13-hotfix\n...\nindexcoord:\n container_name: milvus-indexcoord\n image: milvusdb/milvus:v2.4.13-hotfix\n...\nindexnode:\n container_name: milvus-indexnode\n image: milvusdb/milvus:v2.4.13-hotfix \n...\ndatacoord:\n container_name: milvus-datacoord\n image: milvusdb/milvus:v2.4.13-hotfix \n...\ndatanode:\n container_name: milvus-datanode\n image: milvusdb/milvus:v2.4.13-hotfix\n","docker compose down\ndocker compose up -d\n","docker stop \n","# migration.yaml\ncmd:\n # Option: run/backup/rollback\n type: run\n runWithBackup: true\nconfig:\n sourceVersion: 2.1.4 # Specify your milvus version\n targetVersion: 2.4.13-hotfix\n backupFilePath: /tmp/migration.bak\nmetastore:\n type: etcd\netcd:\n endpoints:\n - milvus-etcd:2379 # Use the etcd container name\n rootPath: by-dev # The root path where data is stored in etcd\n metaSubPath: meta\n kvSubPath: kv\n","# Suppose your docker-compose run with the default milvus network,\n# and you put migration.yaml in the same directory with docker-compose.yaml.\ndocker run --rm -it --network milvus -v $(pwd)/migration.yaml:/milvus/configs/migration.yaml milvus/meta-migration:v2.2.0 /milvus/bin/meta-migration -config=/milvus/configs/migration.yaml\n","Update the milvus image tag in the docker-compose.yaml\ndocker compose down\ndocker compose up -d\n"],"headingContent":"Upgrade Milvus Cluster with Docker Compose","anchorList":[{"label":"Mise à niveau du cluster Milvus avec Docker Compose","href":"Upgrade-Milvus-Cluster-with-Docker-Compose","type":1,"isActive":false},{"label":"Mise à niveau de Milvus en modifiant son image","href":"Upgrade-Milvus-by-changing-its-image","type":2,"isActive":false},{"label":"Migration des métadonnées","href":"Migrate-the-metadata","type":2,"isActive":false},{"label":"Suite de l'article","href":"Whats-next","type":2,"isActive":false}]} \ No newline at end of file diff --git a/localization/v2.4.x/site/fr/adminGuide/upgrade_milvus_cluster-docker.md b/localization/v2.4.x/site/fr/adminGuide/upgrade_milvus_cluster-docker.md index 1ff65e847..2900511a8 100644 --- a/localization/v2.4.x/site/fr/adminGuide/upgrade_milvus_cluster-docker.md +++ b/localization/v2.4.x/site/fr/adminGuide/upgrade_milvus_cluster-docker.md @@ -20,7 +20,7 @@ title: Mise à niveau du cluster Milvus avec Docker Compose >

        Cette rubrique décrit comment mettre à niveau votre Milvus à l'aide de Docker Compose.

        -

        Dans les cas normaux, vous pouvez mettre à niveau Milvus en modifiant son image. Cependant, vous devez migrer les métadonnées avant toute mise à niveau de la version 2.1.x à la version 2.4.9.

        +

        Dans les cas normaux, vous pouvez mettre à niveau Milvus en modifiant son image. Cependant, vous devez migrer les métadonnées avant toute mise à niveau de la v2.1.x vers la v2.4.13-hotfix.

        Mise à niveau de Milvus en modifiant son image

        Dans les cas normaux, vous pouvez mettre à niveau Milvus comme suit :

        +

        Dans les cas normaux, vous pouvez mettre à niveau Milvus de la manière suivante :

        1. Modifier les balises d'image de Milvus dans docker-compose.yaml.

          Notez que vous devez modifier les balises d'image pour le proxy, tous les coordinateurs et tous les nœuds de travail.

          ...
           rootcoord:
             container_name: milvus-rootcoord
          -  image: milvusdb/milvus:v2.4.9
          +  image: milvusdb/milvus:v2.4.13-hotfix
           ...
           proxy:
             container_name: milvus-proxy
          -  image: milvusdb/milvus:v2.4.9
          +  image: milvusdb/milvus:v2.4.13-hotfix
           ...
           querycoord:
             container_name: milvus-querycoord
          -  image: milvusdb/milvus:v2.4.9  
          +  image: milvusdb/milvus:v2.4.13-hotfix  
           ...
           querynode:
             container_name: milvus-querynode
          -  image: milvusdb/milvus:v2.4.9
          +  image: milvusdb/milvus:v2.4.13-hotfix
           ...
           indexcoord:
             container_name: milvus-indexcoord
          -  image: milvusdb/milvus:v2.4.9
          +  image: milvusdb/milvus:v2.4.13-hotfix
           ...
           indexnode:
             container_name: milvus-indexnode
          -  image: milvusdb/milvus:v2.4.9 
          +  image: milvusdb/milvus:v2.4.13-hotfix 
           ...
           datacoord:
             container_name: milvus-datacoord
          -  image: milvusdb/milvus:v2.4.9   
          +  image: milvusdb/milvus:v2.4.13-hotfix   
           ...
           datanode:
             container_name: milvus-datanode
          -  image: milvusdb/milvus:v2.4.9
          +  image: milvusdb/milvus:v2.4.13-hotfix
           
        2. Exécutez les commandes suivantes pour effectuer la mise à niveau.

          docker compose down
          @@ -105,7 +105,7 @@ cmd:
             runWithBackup: true
           config:
             sourceVersion: 2.1.4   # Specify your milvus version
          -  targetVersion: 2.4.9
          +  targetVersion: 2.4.13-hotfix
             backupFilePath: /tmp/migration.bak
           metastore:
             type: etcd
          @@ -146,7 +146,7 @@ docker compose up -d
           
        3. Vous pouvez également apprendre à
        4. -
        5. Si vous êtes prêt à déployer votre cluster sur des nuages :
            +
          • Si vous êtes prêt à déployer votre cluster sur des clouds :
            • Apprendre à déployer Milvus sur Amazon EKS avec Terraform
            • Apprendre à déployer le cluster Milvus sur GCP avec Kubernetes
            • Apprendre à déployer Milvus sur Microsoft Azure avec Kubernetes
            • diff --git a/localization/v2.4.x/site/fr/adminGuide/upgrade_milvus_cluster-helm.json b/localization/v2.4.x/site/fr/adminGuide/upgrade_milvus_cluster-helm.json index a4794c5e2..dcfe2b042 100644 --- a/localization/v2.4.x/site/fr/adminGuide/upgrade_milvus_cluster-helm.json +++ b/localization/v2.4.x/site/fr/adminGuide/upgrade_milvus_cluster-helm.json @@ -1 +1 @@ -{"codeList":["$ helm repo update\n$ helm search repo zilliztech/milvus --versions\n","helm repo add zilliztech https://zilliztech.github.io/milvus-helm\nhelm repo update\n# upgrade existing helm release\nhelm upgrade my-release zilliztech/milvus\n","NAME CHART VERSION APP VERSION DESCRIPTION \nzilliztech/milvus 4.1.34 2.4.5 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.33 2.4.4 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.32 2.4.3 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.31 2.4.1 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.30 2.4.1 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.29 2.4.0 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.24 2.3.11 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.23 2.3.10 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.22 2.3.10 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.21 2.3.10 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.20 2.3.10 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.18 2.3.10 Milvus is an open-source vector database built ... \nzilliztech/milvus 4.1.18 2.3.9 Milvus is an open-source vector database built ... \nzilliztech/milvus 4.1.17 2.3.8 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.16 2.3.7 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.15 2.3.5 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.14 2.3.6 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.13 2.3.5 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.12 2.3.5 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.11 2.3.4 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.10 2.3.3 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.9 2.3.3 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.8 2.3.2 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.7 2.3.2 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.6 2.3.1 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.5 2.3.1 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.4 2.3.1 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.3 2.3.1 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.2 2.3.1 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.1 2.3.0 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.0 2.3.0 Milvus is an open-source vector database built ...\n","sh rollingUpdate.sh -n default -i my-release -o update -t 2.4.9 -w 'milvusdb/milvus:v2.4.9'\n","helm repo update\nhelm upgrade my-release zilliztech/milvus --reuse-values --version=4.1.24 # use the helm chart version here\n","NAME NAMESPACE REVISION UPDATED STATUS CHART APP VERSION \nnew-release default 1 2022-11-21 15:41:25.51539 +0800 CST deployed milvus-3.2.18 2.1.4 \n","NAME READY STATUS RESTARTS AGE\nmy-release-etcd-0 1/1 Running 0 21m\nmy-release-etcd-1 1/1 Running 0 21m\nmy-release-etcd-2 1/1 Running 0 21m\nmy-release-milvus-datacoord-664c58798d-fl75s 1/1 Running 0 21m\nmy-release-milvus-datanode-5f75686c55-xfg2r 1/1 Running 0 21m\nmy-release-milvus-indexcoord-5f98b97589-2l48r 1/1 Running 0 21m\nmy-release-milvus-indexnode-857b4ddf98-vmd75 1/1 Running 0 21m\nmy-release-milvus-proxy-6c548f787f-scspp 1/1 Running 0 21m\nmy-release-milvus-querycoord-c454f44cd-dwmwq 1/1 Running 0 21m\nmy-release-milvus-querynode-76bb4946d-lbrz6 1/1 Running 0 21m\nmy-release-milvus-rootcoord-7764c5b686-62msm 1/1 Running 0 21m\nmy-release-minio-0 1/1 Running 0 21m\nmy-release-minio-1 1/1 Running 0 21m\nmy-release-minio-2 1/1 Running 0 21m\nmy-release-minio-3 1/1 Running 0 21m\nmy-release-pulsar-bookie-0 1/1 Running 0 21m\nmy-release-pulsar-bookie-1 1/1 Running 0 21m\nmy-release-pulsar-bookie-2 1/1 Running 0 21m\nmy-release-pulsar-bookie-init-tjxpj 0/1 Completed 0 21m\nmy-release-pulsar-broker-0 1/1 Running 0 21m\nmy-release-pulsar-proxy-0 1/1 Running 0 21m\nmy-release-pulsar-pulsar-init-c8vvc 0/1 Completed 0 21m\nmy-release-pulsar-recovery-0 1/1 Running 0 21m\nmy-release-pulsar-zookeeper-0 1/1 Running 0 21m\nmy-release-pulsar-zookeeper-1 1/1 Running 0 20m\nmy-release-pulsar-zookeeper-2 1/1 Running 0 20m\n","$ kubectl get pods my-release-milvus-proxy-6c548f787f-scspp -o=jsonpath='{$.spec.containers[0].image}'\n# milvusdb/milvus:v2.1.4\n","./migrate.sh -i my-release -s 2.1.4 -t 2.4.9\n","./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.9\n","./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.9 -r by-dev\n","./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.9 -r by-dev -w milvusdb/milvus:v2.4.9\n","./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.9 -w milvusdb/milvus:v2.4.9 -d true\n","./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.9 -r by-dev -o rollback -w milvusdb/milvus:v2.1.1\n./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.9 -r by-dev -o migrate -w milvusdb/milvus:v2.4.9\n"],"headingContent":"","anchorList":[{"label":"Mise à niveau du cluster Milvus avec les cartes Helm","href":"Upgrade-Milvus-Cluster-with-Helm-Chart","type":1,"isActive":false},{"label":"Vérification de la carte Milvus Helm","href":"Check-Milvus-Helm-Chart","type":2,"isActive":false},{"label":"Effectuer une mise à niveau continue","href":"Conduct-a-rolling-upgrade","type":2,"isActive":false},{"label":"Mise à niveau de Milvus à l'aide de Helm","href":"Upgrade-Milvus-using-Helm","type":2,"isActive":false},{"label":"Migrer les métadonnées","href":"Migrate-the-metadata","type":2,"isActive":false}]} \ No newline at end of file +{"codeList":["$ helm repo update\n$ helm search repo zilliztech/milvus --versions\n","helm repo add zilliztech https://zilliztech.github.io/milvus-helm\nhelm repo update\n# upgrade existing helm release\nhelm upgrade my-release zilliztech/milvus\n","NAME CHART VERSION APP VERSION DESCRIPTION \nzilliztech/milvus 4.1.34 2.4.5 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.33 2.4.4 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.32 2.4.3 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.31 2.4.1 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.30 2.4.1 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.29 2.4.0 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.24 2.3.11 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.23 2.3.10 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.22 2.3.10 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.21 2.3.10 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.20 2.3.10 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.18 2.3.10 Milvus is an open-source vector database built ... \nzilliztech/milvus 4.1.18 2.3.9 Milvus is an open-source vector database built ... \nzilliztech/milvus 4.1.17 2.3.8 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.16 2.3.7 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.15 2.3.5 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.14 2.3.6 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.13 2.3.5 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.12 2.3.5 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.11 2.3.4 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.10 2.3.3 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.9 2.3.3 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.8 2.3.2 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.7 2.3.2 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.6 2.3.1 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.5 2.3.1 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.4 2.3.1 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.3 2.3.1 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.2 2.3.1 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.1 2.3.0 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.0 2.3.0 Milvus is an open-source vector database built ...\n","sh rollingUpdate.sh -n default -i my-release -o update -t 2.4.13-hotfix -w 'milvusdb/milvus:v2.4.13-hotfix'\n","helm repo update\nhelm upgrade my-release zilliztech/milvus --reuse-values --version=4.1.24 # use the helm chart version here\n","NAME NAMESPACE REVISION UPDATED STATUS CHART APP VERSION \nnew-release default 1 2022-11-21 15:41:25.51539 +0800 CST deployed milvus-3.2.18 2.1.4 \n","NAME READY STATUS RESTARTS AGE\nmy-release-etcd-0 1/1 Running 0 21m\nmy-release-etcd-1 1/1 Running 0 21m\nmy-release-etcd-2 1/1 Running 0 21m\nmy-release-milvus-datacoord-664c58798d-fl75s 1/1 Running 0 21m\nmy-release-milvus-datanode-5f75686c55-xfg2r 1/1 Running 0 21m\nmy-release-milvus-indexcoord-5f98b97589-2l48r 1/1 Running 0 21m\nmy-release-milvus-indexnode-857b4ddf98-vmd75 1/1 Running 0 21m\nmy-release-milvus-proxy-6c548f787f-scspp 1/1 Running 0 21m\nmy-release-milvus-querycoord-c454f44cd-dwmwq 1/1 Running 0 21m\nmy-release-milvus-querynode-76bb4946d-lbrz6 1/1 Running 0 21m\nmy-release-milvus-rootcoord-7764c5b686-62msm 1/1 Running 0 21m\nmy-release-minio-0 1/1 Running 0 21m\nmy-release-minio-1 1/1 Running 0 21m\nmy-release-minio-2 1/1 Running 0 21m\nmy-release-minio-3 1/1 Running 0 21m\nmy-release-pulsar-bookie-0 1/1 Running 0 21m\nmy-release-pulsar-bookie-1 1/1 Running 0 21m\nmy-release-pulsar-bookie-2 1/1 Running 0 21m\nmy-release-pulsar-bookie-init-tjxpj 0/1 Completed 0 21m\nmy-release-pulsar-broker-0 1/1 Running 0 21m\nmy-release-pulsar-proxy-0 1/1 Running 0 21m\nmy-release-pulsar-pulsar-init-c8vvc 0/1 Completed 0 21m\nmy-release-pulsar-recovery-0 1/1 Running 0 21m\nmy-release-pulsar-zookeeper-0 1/1 Running 0 21m\nmy-release-pulsar-zookeeper-1 1/1 Running 0 20m\nmy-release-pulsar-zookeeper-2 1/1 Running 0 20m\n","$ kubectl get pods my-release-milvus-proxy-6c548f787f-scspp -o=jsonpath='{$.spec.containers[0].image}'\n# milvusdb/milvus:v2.1.4\n","./migrate.sh -i my-release -s 2.1.4 -t 2.4.13-hotfix\n","./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.13-hotfix\n","./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.13-hotfix -r by-dev\n","./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.13-hotfix -r by-dev -w milvusdb/milvus:v2.4.13-hotfix\n","./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.13-hotfix -w milvusdb/milvus:v2.4.13-hotfix -d true\n","./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.13-hotfix -r by-dev -o rollback -w milvusdb/milvus:v2.1.1\n./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.13-hotfix -r by-dev -o migrate -w milvusdb/milvus:v2.4.13-hotfix\n"],"headingContent":"Upgrade Milvus Cluster with Helm Chart","anchorList":[{"label":"Mise à niveau du cluster Milvus avec les cartes Helm","href":"Upgrade-Milvus-Cluster-with-Helm-Chart","type":1,"isActive":false},{"label":"Vérification de la carte Milvus Helm","href":"Check-Milvus-Helm-Chart","type":2,"isActive":false},{"label":"Effectuer une mise à niveau continue","href":"Conduct-a-rolling-upgrade","type":2,"isActive":false},{"label":"Mise à niveau de Milvus à l'aide de Helm","href":"Upgrade-Milvus-using-Helm","type":2,"isActive":false},{"label":"Migrer les métadonnées","href":"Migrate-the-metadata","type":2,"isActive":false}]} \ No newline at end of file diff --git a/localization/v2.4.x/site/fr/adminGuide/upgrade_milvus_cluster-helm.md b/localization/v2.4.x/site/fr/adminGuide/upgrade_milvus_cluster-helm.md index 406c36716..94db8b585 100644 --- a/localization/v2.4.x/site/fr/adminGuide/upgrade_milvus_cluster-helm.md +++ b/localization/v2.4.x/site/fr/adminGuide/upgrade_milvus_cluster-helm.md @@ -5,7 +5,7 @@ order: 1 group: upgrade_milvus_cluster-operator.md related_key: upgrade Milvus Cluster summary: Découvrez comment mettre à niveau le cluster Milvus avec Helm Chart. -title: Mise à niveau de l'ensemble Milvus avec le tableau de bord +title: Mise à niveau du cluster Milvus avec les cartes Helm ---

              Mise à niveau du cluster Milvus avec les cartes Helm

        6. Vous pouvez choisir le chemin de mise à niveau pour votre Milvus comme suit :

          -
          - [Effectuer une mise à niveau continue](#conduct-a-rolling-upgrade) de Milvus v2.2.3 et versions ultérieures vers v2.4.9.
          +
          - [Effectuer une mise à niveau continue](#conduct-a-rolling-upgrade) de Milvus v2.2.3 et versions ultérieures vers v2.4.13-hotfix.

          Effectuer une mise à niveau continue

          @@ -235,25 +235,25 @@ my-release-pulsar-zookeeper-2
        1. Migrer les métadonnées Milvus.
        2. Démarrer les composants Milvus avec une nouvelle image.
        -

        2. Mise à niveau de Milvus de la version 2.1.x à la version 2.4.9

        Les commandes suivantes supposent que vous mettez à niveau Milvus de la version 2.1.4 à la version 2.4.9. Modifiez-les pour obtenir les versions qui répondent à vos besoins.

        +

        2. Mise à niveau de Milvus de la version 2.1.x vers la version 2.4.13-hotfix

        Les commandes suivantes supposent que vous mettez à niveau Milvus de la version 2.1.4 à la version 2.4.13-hotfix. Modifiez-les pour obtenir les versions qui répondent à vos besoins.

          -
        1. Spécifiez le nom de l'instance Milvus, la version source de Milvus et la version cible de Milvus.

          -
          ./migrate.sh -i my-release -s 2.1.4 -t 2.4.9
          +
        2. Spécifier le nom de l'instance Milvus, la version source de Milvus et la version cible de Milvus.

          +
          ./migrate.sh -i my-release -s 2.1.4 -t 2.4.13-hotfix
           
        3. Spécifiez l'espace de noms avec -n si votre Milvus n'est pas installé dans l'espace de noms K8s par défaut.

          -
          ./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.9
          +
          ./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.13-hotfix
           
        4. Spécifiez le chemin d'accès à la racine avec -r si votre Milvus est installé avec le chemin d'accès personnalisé rootpath.

          -
          ./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.9 -r by-dev
          +
          ./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.13-hotfix -r by-dev
           
        5. Spécifier la balise d'image avec -w si votre Milvus est installé avec une balise personnalisée image.

          -
          ./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.9 -r by-dev -w milvusdb/milvus:v2.4.9
          +
          ./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.13-hotfix -r by-dev -w milvusdb/milvus:v2.4.13-hotfix
           
        6. Définissez -d true si vous souhaitez supprimer automatiquement le pod de migration une fois la migration terminée.

          -
          ./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.9 -w milvusdb/milvus:v2.4.9 -d true
          +
          ./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.13-hotfix -w milvusdb/milvus:v2.4.13-hotfix -d true
           
        7. Revenir en arrière et migrer à nouveau si la migration échoue.

          -
          ./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.9 -r by-dev -o rollback -w milvusdb/milvus:v2.1.1
          -./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.9 -r by-dev -o migrate -w milvusdb/milvus:v2.4.9
          +
          ./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.13-hotfix -r by-dev -o rollback -w milvusdb/milvus:v2.1.1
          +./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.13-hotfix -r by-dev -o migrate -w milvusdb/milvus:v2.4.13-hotfix
           
        diff --git a/localization/v2.4.x/site/fr/adminGuide/upgrade_milvus_cluster-operator.json b/localization/v2.4.x/site/fr/adminGuide/upgrade_milvus_cluster-operator.json index b6287d004..941289c2c 100644 --- a/localization/v2.4.x/site/fr/adminGuide/upgrade_milvus_cluster-operator.json +++ b/localization/v2.4.x/site/fr/adminGuide/upgrade_milvus_cluster-operator.json @@ -1 +1 @@ -{"codeList":["helm repo add zilliztech-milvus-operator https://zilliztech.github.io/milvus-operator/\nhelm repo update zilliztech-milvus-operator\nhelm -n milvus-operator upgrade milvus-operator zilliztech-milvus-operator/milvus-operator\n","apiVersion: milvus.io/v1beta1\nkind: Milvus\nmetadata:\n name: my-release\nspec:\n components:\n enableRollingUpdate: true\n imageUpdateMode: rollingUpgrade # Default value, can be omitted\n image: milvusdb/milvus:v2.4.9\n","apiVersion: milvus.io/v1beta1\nkind: Milvus\nmetadata:\n name: my-release\nspec:\n components:\n enableRollingUpdate: true\n imageUpdateMode: all\n image: milvusdb/milvus:v2.4.9\n","apiVersion: milvus.io/v1beta1\nkind: Milvus\nmetadata:\n name: my-release\nspec:\n components:\n enableRollingUpdate: true\n imageUpdateMode: rollingDowngrade\n image: milvusdb/milvus:\n","kubectl apply -f milvusupgrade.yml\n","apiVersion: milvus.io/v1beta1\nkind: Milvus\nmetadata:\n name: my-release\nspec:\n # Omit other fields ...\n components:\n image: milvusdb/milvus:v2.4.9\n","kubectl apply -f milvusupgrade.yaml\n","apiVersion: milvus.io/v1beta1\nkind: MilvusUpgrade\nmetadata:\n name: my-release-upgrade\nspec:\n milvus:\n namespace: default\n name: my-release\n sourceVersion: \"v2.1.4\"\n targetVersion: \"v2.4.9\"\n # below are some omit default values:\n # targetImage: \"milvusdb/milvus:v2.4.9\"\n # toolImage: \"milvusdb/meta-migration:v2.2.0\"\n # operation: upgrade\n # rollbackIfFailed: true\n # backupPVC: \"\"\n # maxRetry: 3\n","$ kubectl apply -f https://github.com/zilliztech/milvus-operator/blob/main/config/samples/beta/milvusupgrade.yaml\n","kubectl describe milvus release-name\n"],"headingContent":"","anchorList":[{"label":"Mise à niveau du cluster Milvus avec Milvus Operator","href":"Upgrade-Milvus-Cluster-with-Milvus-Operator","type":1,"isActive":false},{"label":"Mise à niveau de l'opérateur Milvus","href":"Upgrade-your-Milvus-operator","type":2,"isActive":false},{"label":"Effectuer une mise à niveau continue","href":"Conduct-a-rolling-upgrade","type":2,"isActive":false},{"label":"Mettre à niveau Milvus en changeant son image","href":"Upgrade-Milvus-by-changing-its-image","type":2,"isActive":false},{"label":"Migrer les métadonnées","href":"Migrate-the-metadata","type":2,"isActive":false}]} \ No newline at end of file +{"codeList":["helm repo add zilliztech-milvus-operator https://zilliztech.github.io/milvus-operator/\nhelm repo update zilliztech-milvus-operator\nhelm -n milvus-operator upgrade milvus-operator zilliztech-milvus-operator/milvus-operator\n","apiVersion: milvus.io/v1beta1\nkind: Milvus\nmetadata:\n name: my-release\nspec:\n components:\n enableRollingUpdate: true\n imageUpdateMode: rollingUpgrade # Default value, can be omitted\n image: milvusdb/milvus:v2.4.13-hotfix\n","apiVersion: milvus.io/v1beta1\nkind: Milvus\nmetadata:\n name: my-release\nspec:\n components:\n enableRollingUpdate: true\n imageUpdateMode: all\n image: milvusdb/milvus:v2.4.13-hotfix\n","apiVersion: milvus.io/v1beta1\nkind: Milvus\nmetadata:\n name: my-release\nspec:\n components:\n enableRollingUpdate: true\n imageUpdateMode: rollingDowngrade\n image: milvusdb/milvus:\n","kubectl apply -f milvusupgrade.yml\n","apiVersion: milvus.io/v1beta1\nkind: Milvus\nmetadata:\n name: my-release\nspec:\n # Omit other fields ...\n components:\n image: milvusdb/milvus:v2.4.13-hotfix\n","kubectl apply -f milvusupgrade.yaml\n","apiVersion: milvus.io/v1beta1\nkind: MilvusUpgrade\nmetadata:\n name: my-release-upgrade\nspec:\n milvus:\n namespace: default\n name: my-release\n sourceVersion: \"v2.1.4\"\n targetVersion: \"v2.4.13-hotfix\"\n # below are some omit default values:\n # targetImage: \"milvusdb/milvus:v2.4.13-hotfix\"\n # toolImage: \"milvusdb/meta-migration:v2.2.0\"\n # operation: upgrade\n # rollbackIfFailed: true\n # backupPVC: \"\"\n # maxRetry: 3\n","$ kubectl apply -f https://github.com/zilliztech/milvus-operator/blob/main/config/samples/beta/milvusupgrade.yaml\n","kubectl describe milvus release-name\n"],"headingContent":"Upgrade Milvus Cluster with Milvus Operator","anchorList":[{"label":"Mise à niveau du cluster Milvus avec Milvus Operator","href":"Upgrade-Milvus-Cluster-with-Milvus-Operator","type":1,"isActive":false},{"label":"Mise à niveau de l'opérateur Milvus","href":"Upgrade-your-Milvus-operator","type":2,"isActive":false},{"label":"Effectuer une mise à niveau continue","href":"Conduct-a-rolling-upgrade","type":2,"isActive":false},{"label":"Mettre à niveau Milvus en changeant son image","href":"Upgrade-Milvus-by-changing-its-image","type":2,"isActive":false},{"label":"Migrer les métadonnées","href":"Migrate-the-metadata","type":2,"isActive":false}]} \ No newline at end of file diff --git a/localization/v2.4.x/site/fr/adminGuide/upgrade_milvus_cluster-operator.md b/localization/v2.4.x/site/fr/adminGuide/upgrade_milvus_cluster-operator.md index 071a0bcc9..dfb14f25c 100644 --- a/localization/v2.4.x/site/fr/adminGuide/upgrade_milvus_cluster-operator.md +++ b/localization/v2.4.x/site/fr/adminGuide/upgrade_milvus_cluster-operator.md @@ -46,9 +46,9 @@ helm -n milvus-operator upgrade milvus-

        Une fois que vous avez mis à niveau votre opérateur Milvus vers la dernière version, vous avez le choix entre les options suivantes :

        Effectuer une mise à niveau continue

    Dans le fichier de configuration ci-dessus, définissez spec.components.enableRollingUpdate comme true et spec.components.image comme la version souhaitée de Milvus.

    Par défaut, Milvus effectue une mise à niveau continue pour les coordinateurs de manière ordonnée, c'est-à-dire qu'il remplace les images des pods des coordinateurs l'une après l'autre. Pour réduire le temps de mise à niveau, envisagez de définir spec.components.imageUpdateMode sur all afin que Milvus remplace toutes les images de pods en même temps.

    @@ -88,7 +88,7 @@ spec: components: enableRollingUpdate: true imageUpdateMode: all - image: milvusdb/milvus:v2.4.9 + image: milvusdb/milvus:v2.4.13-hotfix

    Vous pouvez définir spec.components.imageUpdateMode sur rollingDowngrade pour que Milvus remplace les images de pods coordinateurs par une version inférieure.

    apiVersion: milvus.io/v1beta1
    @@ -128,7 +128,7 @@ metadata:
     spec:
       # Omit other fields ...
       components:
    -   image: milvusdb/milvus:v2.4.9
    +   image: milvusdb/milvus:v2.4.13-hotfix
     

    Exécutez ensuite ce qui suit pour effectuer la mise à niveau :

    kubectl apply -f milvusupgrade.yaml
    @@ -148,8 +148,8 @@ spec:
               d="M4 9h1v1H4c-1.5 0-3-1.69-3-3.5S2.55 3 4 3h4c1.45 0 3 1.69 3 3.5 0 1.41-.91 2.72-2 3.25V8.59c.58-.45 1-1.27 1-2.09C10 5.22 8.98 4 8 4H4c-.98 0-2 1.22-2 2.5S3 9 4 9zm9-3h-1v1h1c1 0 2 1.22 2 2.5S13.98 12 13 12H9c-.98 0-2-1.22-2-2.5 0-.83.42-1.64 1-2.09V6.25c-1.09.53-2 1.84-2 3.25C6 11.31 7.55 13 9 13h4c1.45 0 3-1.69 3-3.5S14.5 6 13 6z"
             >
           
    -    

    Depuis Milvus 2.2.0, les métadonnées sont incompatibles avec celles des versions précédentes. Les exemples suivants supposent une mise à niveau de Milvus 2.1.4 vers Milvus 2.4.9.

    -

    1. Création d'un fichier .yaml pour la migration des métadonnées

    Créer un fichier de migration des métadonnées. Voici un exemple. Vous devez spécifier les fichiers name, sourceVersion, et targetVersion dans le fichier de configuration. L'exemple suivant définit name en my-release-upgrade, sourceVersion en v2.1.4, et targetVersion en v2.4.9. Cela signifie que votre cluster Milvus sera mis à niveau de la v2.1.4 à la v2.4.9.

    +

    Depuis Milvus 2.2.0, les métadonnées sont incompatibles avec celles des versions précédentes. Les exemples suivants supposent une mise à niveau de Milvus 2.1.4 vers Milvus 2.4.13-hotfix.

    +

    1. Création d'un fichier .yaml pour la migration des métadonnées

    Créer un fichier de migration des métadonnées. Voici un exemple. Vous devez spécifier les fichiers name, sourceVersion et targetVersion dans le fichier de configuration. L'exemple suivant définit name en my-release-upgrade, sourceVersion en v2.1.4, et targetVersion en v2.4.13-hotfix. Cela signifie que votre cluster Milvus sera mis à niveau de la v2.1.4 à la v2.4.13-hotfix.

    apiVersion: milvus.io/v1beta1
     kind: MilvusUpgrade
     metadata:
    @@ -159,9 +159,9 @@ spec:
         namespace: default
         name: my-release
       sourceVersion: "v2.1.4"
    -  targetVersion: "v2.4.9"
    +  targetVersion: "v2.4.13-hotfix"
       # below are some omit default values:
    -  # targetImage: "milvusdb/milvus:v2.4.9"
    +  # targetImage: "milvusdb/milvus:v2.4.13-hotfix"
       # toolImage: "milvusdb/meta-migration:v2.2.0"
       # operation: upgrade
       # rollbackIfFailed: true
    diff --git a/localization/v2.4.x/site/fr/adminGuide/upgrade_milvus_standalone-docker.json b/localization/v2.4.x/site/fr/adminGuide/upgrade_milvus_standalone-docker.json
    index 4737f3aa4..803e23559 100644
    --- a/localization/v2.4.x/site/fr/adminGuide/upgrade_milvus_standalone-docker.json
    +++ b/localization/v2.4.x/site/fr/adminGuide/upgrade_milvus_standalone-docker.json
    @@ -1 +1 @@
    -{"codeList":["...\nstandalone:\n  container_name: milvus-standalone\n  image: milvusdb/milvus:v2.4.9\n","docker compose down\ndocker compose up -d\n","docker stop \n","# migration.yaml\ncmd:\n  # Option: run/backup/rollback\n  type: run\n  runWithBackup: true\nconfig:\n  sourceVersion: 2.1.4   # Specify your milvus version\n  targetVersion: 2.4.9\n  backupFilePath: /tmp/migration.bak\nmetastore:\n  type: etcd\netcd:\n  endpoints:\n    - milvus-etcd:2379  # Use the etcd container name\n  rootPath: by-dev # The root path where data is stored in etcd\n  metaSubPath: meta\n  kvSubPath: kv\n","# Suppose your docker-compose run with the default milvus network,\n# and you put migration.yaml in the same directory with docker-compose.yaml.\ndocker run --rm -it --network milvus -v $(pwd)/migration.yaml:/milvus/configs/migration.yaml milvusdb/meta-migration:v2.2.0 /milvus/bin/meta-migration -config=/milvus/configs/migration.yaml\n","// Run the following only after update the milvus image tag in the docker-compose.yaml\ndocker compose down\ndocker compose up -d\n"],"headingContent":"","anchorList":[{"label":"Mise à niveau de Milvus Standalone avec Docker Compose","href":"Upgrade-Milvus-Standalone-with-Docker-Compose","type":1,"isActive":false},{"label":"Mettre à niveau Milvus en modifiant son image","href":"Upgrade-Milvus-by-changing-its-image","type":2,"isActive":false},{"label":"Migration des métadonnées","href":"Migrate-the-metadata","type":2,"isActive":false},{"label":"Suite de l'article","href":"Whats-next","type":2,"isActive":false}]}
    \ No newline at end of file
    +{"codeList":["...\nstandalone:\n  container_name: milvus-standalone\n  image: milvusdb/milvus:v2.4.13-hotfix\n","docker compose down\ndocker compose up -d\n","docker stop \n","# migration.yaml\ncmd:\n  # Option: run/backup/rollback\n  type: run\n  runWithBackup: true\nconfig:\n  sourceVersion: 2.1.4   # Specify your milvus version\n  targetVersion: 2.4.13-hotfix\n  backupFilePath: /tmp/migration.bak\nmetastore:\n  type: etcd\netcd:\n  endpoints:\n    - milvus-etcd:2379  # Use the etcd container name\n  rootPath: by-dev # The root path where data is stored in etcd\n  metaSubPath: meta\n  kvSubPath: kv\n","# Suppose your docker-compose run with the default milvus network,\n# and you put migration.yaml in the same directory with docker-compose.yaml.\ndocker run --rm -it --network milvus -v $(pwd)/migration.yaml:/milvus/configs/migration.yaml milvusdb/meta-migration:v2.2.0 /milvus/bin/meta-migration -config=/milvus/configs/migration.yaml\n","// Run the following only after update the milvus image tag in the docker-compose.yaml\ndocker compose down\ndocker compose up -d\n"],"headingContent":"Upgrade Milvus Standalone with Docker Compose","anchorList":[{"label":"Mise à niveau de Milvus Standalone avec Docker Compose","href":"Upgrade-Milvus-Standalone-with-Docker-Compose","type":1,"isActive":false},{"label":"Mettre à niveau Milvus en modifiant son image","href":"Upgrade-Milvus-by-changing-its-image","type":2,"isActive":false},{"label":"Migration des métadonnées","href":"Migrate-the-metadata","type":2,"isActive":false},{"label":"Suite de l'article","href":"Whats-next","type":2,"isActive":false}]}
    \ No newline at end of file
    diff --git a/localization/v2.4.x/site/fr/adminGuide/upgrade_milvus_standalone-docker.md b/localization/v2.4.x/site/fr/adminGuide/upgrade_milvus_standalone-docker.md
    index 1dad1e1fb..60c360f43 100644
    --- a/localization/v2.4.x/site/fr/adminGuide/upgrade_milvus_standalone-docker.md
    +++ b/localization/v2.4.x/site/fr/adminGuide/upgrade_milvus_standalone-docker.md
    @@ -24,7 +24,7 @@ title: Mise à niveau de Milvus Standalone avec Docker Compose
             >
           
         

    Cette rubrique décrit comment mettre à niveau votre Milvus à l'aide de Docker Compose.

    -

    Dans les cas normaux, vous pouvez mettre à niveau Milvus en modifiant son image. Cependant, vous devez migrer les métadonnées avant toute mise à niveau de la version 2.1.x à la version 2.4.9.

    +

    Dans les cas normaux, vous pouvez mettre à niveau Milvus en modifiant son image. Cependant, vous devez migrer les métadonnées avant toute mise à niveau de la version 2.1.x vers la version 2.4.13-hotfix.

    Pour des raisons de sécurité, Milvus met à niveau son MinIO vers RELEASE.2023-03-20T20-16-18Z avec la publication de la v2.2.5. Avant toute mise à niveau à partir des versions précédentes de Milvus Standalone installées à l'aide de Docker Compose, vous devez créer un déploiement MinIO Single-Node Single-Drive et migrer les paramètres et le contenu MinIO existants vers le nouveau déploiement. Pour plus de détails, reportez-vous à ce guide.

    @@ -49,7 +49,7 @@ title: Mise à niveau de Milvus Standalone avec Docker Compose
    ...
     standalone:
       container_name: milvus-standalone
    -  image: milvusdb/milvus:v2.4.9
    +  image: milvusdb/milvus:v2.4.13-hotfix
     
  • Exécutez les commandes suivantes pour effectuer la mise à niveau.

    docker compose down
    @@ -83,7 +83,7 @@ cmd:
       runWithBackup: true
     config:
       sourceVersion: 2.1.4   # Specify your milvus version
    -  targetVersion: 2.4.9
    +  targetVersion: 2.4.13-hotfix
       backupFilePath: /tmp/migration.bak
     metastore:
       type: etcd
    diff --git a/localization/v2.4.x/site/fr/adminGuide/upgrade_milvus_standalone-helm.json b/localization/v2.4.x/site/fr/adminGuide/upgrade_milvus_standalone-helm.json
    index 77f36ce98..49b537d40 100644
    --- a/localization/v2.4.x/site/fr/adminGuide/upgrade_milvus_standalone-helm.json
    +++ b/localization/v2.4.x/site/fr/adminGuide/upgrade_milvus_standalone-helm.json
    @@ -1 +1 @@
    -{"codeList":["$ helm repo update\n$ helm search repo zilliztech/milvus --versions\n","helm repo add zilliztech https://zilliztech.github.io/milvus-helm\nhelm repo update\n# upgrade existing helm release\nhelm upgrade my-release zilliztech/milvus\n","NAME                    CHART VERSION   APP VERSION             DESCRIPTION                                       \nzilliztech/milvus       4.1.34          2.4.5                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.33          2.4.4                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.32          2.4.3                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.31          2.4.1                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.30          2.4.1                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.29          2.4.0                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.24          2.3.11                  Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.23          2.3.10                  Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.22          2.3.10                  Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.21          2.3.10                  Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.20          2.3.10                  Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.18          2.3.10                  Milvus is an open-source vector database built ... \nzilliztech/milvus       4.1.18          2.3.9                   Milvus is an open-source vector database built ...                                       \nzilliztech/milvus       4.1.17          2.3.8                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.16          2.3.7                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.15          2.3.5                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.14          2.3.6                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.13          2.3.5                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.12          2.3.5                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.11          2.3.4                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.10          2.3.3                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.9           2.3.3                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.8           2.3.2                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.7           2.3.2                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.6           2.3.1                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.5           2.3.1                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.4           2.3.1                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.3           2.3.1                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.2           2.3.1                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.1           2.3.0                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.0           2.3.0                   Milvus is an open-source vector database built ...\n","sh rollingUpdate.sh -n default -i my-release -o update -t 2.4.9 -w 'milvusdb/milvus:v2.4.9'\n","helm repo update\nhelm upgrade my-release milvus/milvus --reuse-values --version=4.1.24 # use the helm chart version here\n","NAME                NAMESPACE   REVISION    UPDATED                                 STATUS      CHART           APP VERSION     \nmy-release          default     1           2022-11-21 15:41:25.51539 +0800 CST     deployed    milvus-3.2.18   2.1.4\n","NAME                                            READY   STATUS    RESTARTS   AGE\nmy-release-etcd-0                               1/1     Running   0          84s\nmy-release-milvus-standalone-75c599fffc-6rwlj   1/1     Running   0          84s\nmy-release-minio-744dd9586f-qngzv               1/1     Running   0          84s\n","$ kubectl get pods my-release-milvus-proxy-6c548f787f-scspp -o=jsonpath='{$.spec.containers[0].image}'\n# milvusdb/milvus:v2.1.4\n","./migrate.sh -i my-release -s 2.1.4 -t 2.4.9\n","./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.9\n","./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.9 -r by-dev\n","./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.9 -r by-dev -w milvusdb/milvus:v2.4.9\n","./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.9 -w milvusdb/milvus:v2.4.9 -d true\n","./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.9 -r by-dev -o rollback -w milvusdb/milvus:v2.1.1\n./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.9 -r by-dev -o migrate -w milvusdb/milvus:v2.4.9\n"],"headingContent":"","anchorList":[{"label":"Mise à niveau de Milvus Standalone avec les cartes Helm","href":"Upgrade-Milvus-Standalone-with-Helm-Chart","type":1,"isActive":false},{"label":"Vérifier la version de Milvus","href":"Check-the-Milvus-version","type":2,"isActive":false},{"label":"Effectuer une mise à niveau continue","href":"Conduct-a-rolling-upgrade","type":2,"isActive":false},{"label":"Mise à niveau de Milvus à l'aide de Helm","href":"Upgrade-Milvus-using-Helm","type":2,"isActive":false},{"label":"Migrer les métadonnées","href":"Migrate-the-metadata","type":2,"isActive":false}]}
    \ No newline at end of file
    +{"codeList":["$ helm repo update\n$ helm search repo zilliztech/milvus --versions\n","helm repo add zilliztech https://zilliztech.github.io/milvus-helm\nhelm repo update\n# upgrade existing helm release\nhelm upgrade my-release zilliztech/milvus\n","NAME                    CHART VERSION   APP VERSION             DESCRIPTION                                       \nzilliztech/milvus       4.1.34          2.4.5                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.33          2.4.4                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.32          2.4.3                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.31          2.4.1                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.30          2.4.1                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.29          2.4.0                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.24          2.3.11                  Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.23          2.3.10                  Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.22          2.3.10                  Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.21          2.3.10                  Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.20          2.3.10                  Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.18          2.3.10                  Milvus is an open-source vector database built ... \nzilliztech/milvus       4.1.18          2.3.9                   Milvus is an open-source vector database built ...                                       \nzilliztech/milvus       4.1.17          2.3.8                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.16          2.3.7                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.15          2.3.5                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.14          2.3.6                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.13          2.3.5                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.12          2.3.5                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.11          2.3.4                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.10          2.3.3                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.9           2.3.3                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.8           2.3.2                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.7           2.3.2                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.6           2.3.1                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.5           2.3.1                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.4           2.3.1                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.3           2.3.1                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.2           2.3.1                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.1           2.3.0                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.0           2.3.0                   Milvus is an open-source vector database built ...\n","sh rollingUpdate.sh -n default -i my-release -o update -t 2.4.13-hotfix -w 'milvusdb/milvus:v2.4.13-hotfix'\n","helm repo update\nhelm upgrade my-release milvus/milvus --reuse-values --version=4.1.24 # use the helm chart version here\n","NAME                NAMESPACE   REVISION    UPDATED                                 STATUS      CHART           APP VERSION     \nmy-release          default     1           2022-11-21 15:41:25.51539 +0800 CST     deployed    milvus-3.2.18   2.1.4\n","NAME                                            READY   STATUS    RESTARTS   AGE\nmy-release-etcd-0                               1/1     Running   0          84s\nmy-release-milvus-standalone-75c599fffc-6rwlj   1/1     Running   0          84s\nmy-release-minio-744dd9586f-qngzv               1/1     Running   0          84s\n","$ kubectl get pods my-release-milvus-proxy-6c548f787f-scspp -o=jsonpath='{$.spec.containers[0].image}'\n# milvusdb/milvus:v2.1.4\n","./migrate.sh -i my-release -s 2.1.4 -t 2.4.13-hotfix\n","./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.13-hotfix\n","./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.13-hotfix -r by-dev\n","./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.13-hotfix -r by-dev -w milvusdb/milvus:v2.4.13-hotfix\n","./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.13-hotfix -w milvusdb/milvus:v2.4.13-hotfix -d true\n","./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.13-hotfix -r by-dev -o rollback -w milvusdb/milvus:v2.1.1\n./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.13-hotfix -r by-dev -o migrate -w milvusdb/milvus:v2.4.13-hotfix\n"],"headingContent":"Upgrade Milvus Standalone with Helm Chart","anchorList":[{"label":"Mise à niveau de Milvus Standalone avec la carte Helm","href":"Upgrade-Milvus-Standalone-with-Helm-Chart","type":1,"isActive":false},{"label":"Vérifier la version de Milvus","href":"Check-the-Milvus-version","type":2,"isActive":false},{"label":"Effectuer une mise à niveau continue","href":"Conduct-a-rolling-upgrade","type":2,"isActive":false},{"label":"Mise à niveau de Milvus à l'aide de Helm","href":"Upgrade-Milvus-using-Helm","type":2,"isActive":false},{"label":"Migrer les métadonnées","href":"Migrate-the-metadata","type":2,"isActive":false}]}
    \ No newline at end of file
    diff --git a/localization/v2.4.x/site/fr/adminGuide/upgrade_milvus_standalone-helm.md b/localization/v2.4.x/site/fr/adminGuide/upgrade_milvus_standalone-helm.md
    index 539cdedcb..eca210b5f 100644
    --- a/localization/v2.4.x/site/fr/adminGuide/upgrade_milvus_standalone-helm.md
    +++ b/localization/v2.4.x/site/fr/adminGuide/upgrade_milvus_standalone-helm.md
    @@ -5,10 +5,10 @@ order: 1
     group: upgrade_milvus_standalone-operator.md
     related_key: upgrade Milvus Standalone
     summary: Découvrez comment mettre à jour Milvus standalone avec Helm Chart.
    -title: Mise à niveau de la carte Milvus Standalone avec la carte Helm
    +title: Mise à niveau de Milvus Standalone avec la carte Helm
     ---
     
    -

    Mise à niveau de Milvus Standalone avec les cartes Helm

    Vous pouvez choisir le chemin de mise à niveau pour votre Milvus comme suit :

    -
    - [Effectuer une mise à niveau continue](#conduct-a-rolling-upgrade) de Milvus v2.2.3 et versions ultérieures vers v2.4.9.
    +
    - [Effectuer une mise à niveau continue](#conduct-a-rolling-upgrade) de Milvus v2.2.3 et versions ultérieures vers v2.4.13-hotfix.

    Effectuer une mise à niveau continue

    Depuis Milvus 2.2.3, vous pouvez configurer les coordinateurs Milvus pour qu'ils fonctionnent en mode actif-veille et activer la fonction de mise à niveau continue pour eux, afin que Milvus puisse répondre aux demandes entrantes pendant les mises à niveau des coordinateurs. Dans les versions précédentes, les coordinateurs doivent être supprimés puis créés lors d'une mise à niveau, ce qui peut entraîner certains temps d'arrêt du service.

    -

    Les mises à niveau en continu exigent que les coordinateurs fonctionnent en mode actif-veille. Vous pouvez utiliser le script que nous fournissons pour configurer les coordinateurs afin qu'ils travaillent en mode de veille active et lancer la mise à jour continue.

    +

    Les mises à niveau en continu exigent que les coordinateurs fonctionnent en mode actif-veille. Vous pouvez utiliser le script que nous fournissons pour configurer les coordinateurs afin qu'ils travaillent en mode veille active et lancer la mise à jour continue.

    Basé sur les capacités de mise à jour continue fournies par Kubernetes, le script ci-dessus applique une mise à jour ordonnée des déploiements en fonction de leurs dépendances. En outre, Milvus met en œuvre un mécanisme garantissant que ses composants restent compatibles avec ceux qui en dépendent pendant la mise à niveau, ce qui réduit considérablement les temps d'arrêt potentiels des services.

    Le script s'applique uniquement à la mise à niveau de Milvus installé avec Helm. Le tableau suivant répertorie les drapeaux de commande disponibles dans les scripts.

    @@ -123,8 +123,8 @@ zilliztech/milvus 4.1.0 2.3.0 Milvus is an ope
    oFonctionnementupdateFaux
    -

    Une fois que vous vous êtes assuré que tous les déploiements de votre instance Milvus sont dans leur état normal, vous pouvez exécuter la commande suivante pour mettre à niveau l'instance Milvus vers la version 2.4.9. Vous pouvez exécuter la commande suivante pour mettre à niveau l'instance Milvus vers la version 2.4.9.

    -
    sh rollingUpdate.sh -n default -i my-release -o update -t 2.4.9 -w 'milvusdb/milvus:v2.4.9'
    +

    Une fois que vous vous êtes assuré que tous les déploiements de votre instance Milvus sont dans leur état normal. Vous pouvez exécuter la commande suivante pour mettre à niveau l'instance Milvus vers 2.4.13-hotfix.

    +
    sh rollingUpdate.sh -n default -i my-release -o update -t 2.4.13-hotfix -w 'milvusdb/milvus:v2.4.13-hotfix'
     
      @@ -213,25 +213,25 @@ my-release-minio-744dd9586f-qngzv 1/1 Running 0 84s
    1. Migrer les métadonnées Milvus.
    2. Démarrer les composants Milvus avec une nouvelle image.
    -

    2. Mise à niveau de Milvus de la version 2.1.x à la version 2.4.9

    Les commandes suivantes supposent que vous mettez à niveau Milvus de la version 2.1.4 à la version 2.4.9. Modifiez-les pour obtenir les versions qui répondent à vos besoins.

    +

    2. Mise à niveau de Milvus de la version 2.1.x vers la version 2.4.13-hotfix

    Les commandes suivantes supposent que vous mettez à niveau Milvus de la version 2.1.4 à la version 2.4.13-hotfix. Modifiez-les pour obtenir les versions qui répondent à vos besoins.

      -
    1. Spécifiez le nom de l'instance Milvus, la version source de Milvus et la version cible de Milvus.

      -
      ./migrate.sh -i my-release -s 2.1.4 -t 2.4.9
      +
    2. Spécifier le nom de l'instance Milvus, la version source de Milvus et la version cible de Milvus.

      +
      ./migrate.sh -i my-release -s 2.1.4 -t 2.4.13-hotfix
       
    3. Spécifiez l'espace de noms avec -n si votre Milvus n'est pas installé dans l'espace de noms K8s par défaut.

      -
      ./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.9
      +
      ./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.13-hotfix
       
    4. Spécifiez le chemin d'accès à la racine avec -r si votre Milvus est installé avec le chemin d'accès personnalisé rootpath.

      -
      ./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.9 -r by-dev
      +
      ./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.13-hotfix -r by-dev
       
    5. Spécifier la balise d'image avec -w si votre Milvus est installé avec une balise personnalisée image.

      -
      ./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.9 -r by-dev -w milvusdb/milvus:v2.4.9
      +
      ./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.13-hotfix -r by-dev -w milvusdb/milvus:v2.4.13-hotfix
       
    6. Définissez -d true si vous souhaitez supprimer automatiquement le pod de migration une fois la migration terminée.

      -
      ./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.9 -w milvusdb/milvus:v2.4.9 -d true
      +
      ./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.13-hotfix -w milvusdb/milvus:v2.4.13-hotfix -d true
       
    7. Revenir en arrière et migrer à nouveau si la migration échoue.

      -
      ./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.9 -r by-dev -o rollback -w milvusdb/milvus:v2.1.1
      -./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.9 -r by-dev -o migrate -w milvusdb/milvus:v2.4.9
      +
      ./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.13-hotfix -r by-dev -o rollback -w milvusdb/milvus:v2.1.1
      +./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.13-hotfix -r by-dev -o migrate -w milvusdb/milvus:v2.4.13-hotfix
       
    diff --git a/localization/v2.4.x/site/fr/adminGuide/upgrade_milvus_standalone-operator.json b/localization/v2.4.x/site/fr/adminGuide/upgrade_milvus_standalone-operator.json index 796a1ca16..7b2c76c79 100644 --- a/localization/v2.4.x/site/fr/adminGuide/upgrade_milvus_standalone-operator.json +++ b/localization/v2.4.x/site/fr/adminGuide/upgrade_milvus_standalone-operator.json @@ -1 +1 @@ -{"codeList":["helm repo add zilliztech-milvus-operator https://zilliztech.github.io/milvus-operator/\nhelm repo update zilliztech-milvus-operator\nhelm -n milvus-operator upgrade milvus-operator zilliztech-milvus-operator/milvus-operator\n","apiVersion: milvus.io/v1beta1\nkind: Milvus\nmetadata:\n name: my-release\nspec:\n components:\n enableRollingUpdate: true\n imageUpdateMode: rollingUpgrade # Default value, can be omitted\n image: milvusdb/milvus:v2.4.9\n","apiVersion: milvus.io/v1beta1\nkind: Milvus\nmetadata:\n name: my-release\nspec:\n components:\n enableRollingUpdate: true\n imageUpdateMode: all\n image: milvusdb/milvus:v2.4.9\n","apiVersion: milvus.io/v1beta1\nkind: Milvus\nmetadata:\n name: my-release\nspec:\n components:\n enableRollingUpdate: true\n imageUpdateMode: rollingDowngrade\n image: milvusdb/milvus:\n","kubectl apply -f milvusupgrade.yml\n","apiVersion: milvus.io/v1beta1\nkind: Milvus\nmetadata:\n name: my-release\nlabels:\n app: milvus\nspec:\n # Omit other fields ...\n components:\n image: milvusdb/milvus:v2.4.9\n","kubectl apply -f milvusupgrade.yaml\n","apiVersion: milvus.io/v1beta1\nkind: MilvusUpgrade\nmetadata:\n name: my-release-upgrade\nspec:\n milvus:\n namespace: default\n name: my-release\n sourceVersion: \"v2.1.4\"\n targetVersion: \"v2.4.9\"\n # below are some omit default values:\n # targetImage: \"milvusdb/milvus:v2.4.9\"\n # toolImage: \"milvusdb/meta-migration:v2.2.0\"\n # operation: upgrade\n # rollbackIfFailed: true\n # backupPVC: \"\"\n # maxRetry: 3\n","$ kubectl apply -f https://raw.githubusercontent.com/zilliztech/milvus-operator/main/config/samples/milvusupgrade.yaml\n","kubectl describe milvus release-name\n"],"headingContent":"","anchorList":[{"label":"Mise à niveau de Milvus Standalone avec Milvus Operator","href":"Upgrade-Milvus-Standalone-with-Milvus-Operator","type":1,"isActive":false},{"label":"Mise à niveau de l'opérateur Milvus","href":"Upgrade-your-Milvus-operator","type":2,"isActive":false},{"label":"Effectuer une mise à niveau continue","href":"Conduct-a-rolling-upgrade","type":2,"isActive":false},{"label":"Mettre à niveau Milvus en changeant son image","href":"Upgrade-Milvus-by-changing-its-image","type":2,"isActive":false},{"label":"Migrer les métadonnées","href":"Migrate-the-metadata","type":2,"isActive":false}]} \ No newline at end of file +{"codeList":["helm repo add zilliztech-milvus-operator https://zilliztech.github.io/milvus-operator/\nhelm repo update zilliztech-milvus-operator\nhelm -n milvus-operator upgrade milvus-operator zilliztech-milvus-operator/milvus-operator\n","apiVersion: milvus.io/v1beta1\nkind: Milvus\nmetadata:\n name: my-release\nspec:\n components:\n enableRollingUpdate: true\n imageUpdateMode: rollingUpgrade # Default value, can be omitted\n image: milvusdb/milvus:v2.4.13-hotfix\n","apiVersion: milvus.io/v1beta1\nkind: Milvus\nmetadata:\n name: my-release\nspec:\n components:\n enableRollingUpdate: true\n imageUpdateMode: all\n image: milvusdb/milvus:v2.4.13-hotfix\n","apiVersion: milvus.io/v1beta1\nkind: Milvus\nmetadata:\n name: my-release\nspec:\n components:\n enableRollingUpdate: true\n imageUpdateMode: rollingDowngrade\n image: milvusdb/milvus:\n","kubectl apply -f milvusupgrade.yml\n","apiVersion: milvus.io/v1beta1\nkind: Milvus\nmetadata:\n name: my-release\nlabels:\n app: milvus\nspec:\n # Omit other fields ...\n components:\n image: milvusdb/milvus:v2.4.13-hotfix\n","kubectl apply -f milvusupgrade.yaml\n","apiVersion: milvus.io/v1beta1\nkind: MilvusUpgrade\nmetadata:\n name: my-release-upgrade\nspec:\n milvus:\n namespace: default\n name: my-release\n sourceVersion: \"v2.1.4\"\n targetVersion: \"v2.4.13-hotfix\"\n # below are some omit default values:\n # targetImage: \"milvusdb/milvus:v2.4.13-hotfix\"\n # toolImage: \"milvusdb/meta-migration:v2.2.0\"\n # operation: upgrade\n # rollbackIfFailed: true\n # backupPVC: \"\"\n # maxRetry: 3\n","$ kubectl apply -f https://raw.githubusercontent.com/zilliztech/milvus-operator/main/config/samples/milvusupgrade.yaml\n","kubectl describe milvus release-name\n"],"headingContent":"Upgrade Milvus Standalone with Milvus Operator","anchorList":[{"label":"Mise à niveau de Milvus Standalone avec Milvus Operator","href":"Upgrade-Milvus-Standalone-with-Milvus-Operator","type":1,"isActive":false},{"label":"Mise à niveau de l'opérateur Milvus","href":"Upgrade-your-Milvus-operator","type":2,"isActive":false},{"label":"Effectuer une mise à niveau continue","href":"Conduct-a-rolling-upgrade","type":2,"isActive":false},{"label":"Mettre à niveau Milvus en changeant son image","href":"Upgrade-Milvus-by-changing-its-image","type":2,"isActive":false},{"label":"Migrer les métadonnées","href":"Migrate-the-metadata","type":2,"isActive":false}]} \ No newline at end of file diff --git a/localization/v2.4.x/site/fr/adminGuide/upgrade_milvus_standalone-operator.md b/localization/v2.4.x/site/fr/adminGuide/upgrade_milvus_standalone-operator.md index 3f5b83a64..07676ba0f 100644 --- a/localization/v2.4.x/site/fr/adminGuide/upgrade_milvus_standalone-operator.md +++ b/localization/v2.4.x/site/fr/adminGuide/upgrade_milvus_standalone-operator.md @@ -46,9 +46,9 @@ helm -n milvus-operator upgrade milvus-

    Une fois que vous avez mis à niveau votre opérateur Milvus vers la dernière version, vous avez le choix entre les options suivantes :

    Effectuer une mise à niveau continue

  • Dans le fichier de configuration ci-dessus, définissez spec.components.enableRollingUpdate comme true et spec.components.image comme la version souhaitée de Milvus.

    Par défaut, Milvus effectue une mise à niveau continue pour les coordinateurs de manière ordonnée, en remplaçant les images des pods des coordinateurs l'une après l'autre. Pour réduire le temps de mise à niveau, envisagez de définir spec.components.imageUpdateMode sur all afin que Milvus remplace toutes les images de pods en même temps.

    @@ -88,7 +88,7 @@ spec: components: enableRollingUpdate: true imageUpdateMode: all - image: milvusdb/milvus:v2.4.9 + image: milvusdb/milvus:v2.4.13-hotfix

    Vous pouvez définir spec.components.imageUpdateMode sur rollingDowngrade pour que Milvus remplace les images de pods coordinateurs par une version inférieure.

    apiVersion: milvus.io/v1beta1
    @@ -130,7 +130,7 @@ labels:
     spec:
       # Omit other fields ...
       components:
    -   image: milvusdb/milvus:v2.4.9
    +   image: milvusdb/milvus:v2.4.13-hotfix
     

    Exécutez ensuite ce qui suit pour effectuer la mise à niveau :

    kubectl apply -f milvusupgrade.yaml
    @@ -150,8 +150,8 @@ spec:
               d="M4 9h1v1H4c-1.5 0-3-1.69-3-3.5S2.55 3 4 3h4c1.45 0 3 1.69 3 3.5 0 1.41-.91 2.72-2 3.25V8.59c.58-.45 1-1.27 1-2.09C10 5.22 8.98 4 8 4H4c-.98 0-2 1.22-2 2.5S3 9 4 9zm9-3h-1v1h1c1 0 2 1.22 2 2.5S13.98 12 13 12H9c-.98 0-2-1.22-2-2.5 0-.83.42-1.64 1-2.09V6.25c-1.09.53-2 1.84-2 3.25C6 11.31 7.55 13 9 13h4c1.45 0 3-1.69 3-3.5S14.5 6 13 6z"
             >
           
    -    

    Depuis Milvus 2.2.0, les métadonnées sont incompatibles avec celles des versions précédentes. Les exemples suivants supposent une mise à niveau de Milvus 2.1.4 vers Milvus v2.4.9.

    -

    1. Création d'un fichier .yaml pour la migration des métadonnées

    Créer un fichier de migration des métadonnées. Voici un exemple. Vous devez spécifier les fichiers name, sourceVersion et targetVersion dans le fichier de configuration. L'exemple suivant définit name en my-release-upgrade, sourceVersion en v2.1.4, et targetVersion en v2.4.9. Cela signifie que votre instance Milvus sera mise à niveau de la v2.1.4 à la v2.4.9.

    +

    Depuis Milvus 2.2.0, les métadonnées sont incompatibles avec celles des versions précédentes. Les exemples suivants supposent une mise à niveau de Milvus 2.1.4 vers Milvus v2.4.13-hotfix.

    +

    1. Créer un fichier .yaml pour la migration des métadonnées

    Créer un fichier de migration des métadonnées. Voici un exemple. Vous devez spécifier les fichiers name, sourceVersion, et targetVersion dans le fichier de configuration. L'exemple suivant définit name en my-release-upgrade, sourceVersion en v2.1.4, et targetVersion en v2.4.13-hotfix. Cela signifie que votre instance Milvus sera mise à niveau de la v2.1.4 à la v2.4.13-hotfix.

    apiVersion: milvus.io/v1beta1
     kind: MilvusUpgrade
     metadata:
    @@ -161,9 +161,9 @@ spec:
         namespace: default
         name: my-release
       sourceVersion: "v2.1.4"
    -  targetVersion: "v2.4.9"
    +  targetVersion: "v2.4.13-hotfix"
       # below are some omit default values:
    -  # targetImage: "milvusdb/milvus:v2.4.9"
    +  # targetImage: "milvusdb/milvus:v2.4.13-hotfix"
       # toolImage: "milvusdb/meta-migration:v2.2.0"
       # operation: upgrade
       # rollbackIfFailed: true
    diff --git a/localization/v2.4.x/site/fr/embeddings/embed-with-cohere.json b/localization/v2.4.x/site/fr/embeddings/embed-with-cohere.json
    index 40401b602..3ce35a476 100644
    --- a/localization/v2.4.x/site/fr/embeddings/embed-with-cohere.json
    +++ b/localization/v2.4.x/site/fr/embeddings/embed-with-cohere.json
    @@ -1 +1 @@
    -{"codeList":["pip install --upgrade pymilvus\npip install \"pymilvus[model]\"\n","cohere_ef = CohereEmbeddingFunction(\n    model_name=\"embed-english-light-v3.0\",\n    api_key=\"YOUR_COHERE_API_KEY\",\n    input_type=\"search_document\",\n    embedding_types=[\"float\"]\n)\n","docs = [\n    \"Artificial intelligence was founded as an academic discipline in 1956.\",\n    \"Alan Turing was the first person to conduct substantial research in AI.\",\n    \"Born in Maida Vale, London, Turing was raised in southern England.\",\n]\n\ndocs_embeddings = cohere_ef.encode_documents(docs)\n\n# Print embeddings\nprint(\"Embeddings:\", docs_embeddings)\n# Print dimension and shape of embeddings\nprint(\"Dim:\", cohere_ef.dim, docs_embeddings[0].shape)\n","Embeddings: [array([ 3.43322754e-02,  1.16252899e-03, -5.25207520e-02,  1.32846832e-03,\n       -6.80541992e-02,  6.10961914e-02, -7.06176758e-02,  1.48925781e-01,\n        1.54174805e-01,  1.98516846e-02,  2.43835449e-02,  3.55224609e-02,\n        1.82952881e-02,  7.57446289e-02, -2.40783691e-02,  4.40063477e-02,\n...\n        0.06359863, -0.01971436, -0.02253723,  0.00354195,  0.00222015,\n        0.00184727,  0.03408813, -0.00777817,  0.04919434,  0.01519775,\n       -0.02862549,  0.04760742, -0.07891846,  0.0124054 ], dtype=float32)]\nDim: 384 (384,)\n","queries = [\"When was artificial intelligence founded\", \n           \"Where was Alan Turing born?\"]\n\nquery_embeddings = cohere_ef.encode_queries(queries)\n\nprint(\"Embeddings:\", query_embeddings)\nprint(\"Dim\", cohere_ef.dim, query_embeddings[0].shape)\n","Embeddings: [array([-1.33361816e-02,  9.79423523e-04, -7.28759766e-02, -1.93786621e-02,\n       -9.71679688e-02,  4.34875488e-02, -9.81445312e-02,  1.16882324e-01,\n        5.89904785e-02, -4.19921875e-02,  4.95910645e-02,  5.83496094e-02,\n        3.47595215e-02, -5.87463379e-03, -7.30514526e-03,  2.92816162e-02,\n...\n        0.00749969, -0.01192474,  0.02719116,  0.03347778,  0.07696533,\n        0.01409149,  0.00964355, -0.01681519, -0.0073204 ,  0.00043154,\n       -0.04577637,  0.03591919, -0.02807617, -0.04812622], dtype=float32)]\nDim 384 (384,)\n"],"headingContent":"","anchorList":[{"label":"Cohere","href":"Cohere","type":1,"isActive":false}]}
    \ No newline at end of file
    +{"codeList":["pip install --upgrade pymilvus\npip install \"pymilvus[model]\"\n","from pymilvus.model.dense import CohereEmbeddingFunction\n\ncohere_ef = CohereEmbeddingFunction(\n    model_name=\"embed-english-light-v3.0\",\n    api_key=\"YOUR_COHERE_API_KEY\",\n    input_type=\"search_document\",\n    embedding_types=[\"float\"]\n)\n","docs = [\n    \"Artificial intelligence was founded as an academic discipline in 1956.\",\n    \"Alan Turing was the first person to conduct substantial research in AI.\",\n    \"Born in Maida Vale, London, Turing was raised in southern England.\",\n]\n\ndocs_embeddings = cohere_ef.encode_documents(docs)\n\n# Print embeddings\nprint(\"Embeddings:\", docs_embeddings)\n# Print dimension and shape of embeddings\nprint(\"Dim:\", cohere_ef.dim, docs_embeddings[0].shape)\n","Embeddings: [array([ 3.43322754e-02,  1.16252899e-03, -5.25207520e-02,  1.32846832e-03,\n       -6.80541992e-02,  6.10961914e-02, -7.06176758e-02,  1.48925781e-01,\n        1.54174805e-01,  1.98516846e-02,  2.43835449e-02,  3.55224609e-02,\n        1.82952881e-02,  7.57446289e-02, -2.40783691e-02,  4.40063477e-02,\n...\n        0.06359863, -0.01971436, -0.02253723,  0.00354195,  0.00222015,\n        0.00184727,  0.03408813, -0.00777817,  0.04919434,  0.01519775,\n       -0.02862549,  0.04760742, -0.07891846,  0.0124054 ], dtype=float32)]\nDim: 384 (384,)\n","queries = [\"When was artificial intelligence founded\", \n           \"Where was Alan Turing born?\"]\n\nquery_embeddings = cohere_ef.encode_queries(queries)\n\nprint(\"Embeddings:\", query_embeddings)\nprint(\"Dim\", cohere_ef.dim, query_embeddings[0].shape)\n","Embeddings: [array([-1.33361816e-02,  9.79423523e-04, -7.28759766e-02, -1.93786621e-02,\n       -9.71679688e-02,  4.34875488e-02, -9.81445312e-02,  1.16882324e-01,\n        5.89904785e-02, -4.19921875e-02,  4.95910645e-02,  5.83496094e-02,\n        3.47595215e-02, -5.87463379e-03, -7.30514526e-03,  2.92816162e-02,\n...\n        0.00749969, -0.01192474,  0.02719116,  0.03347778,  0.07696533,\n        0.01409149,  0.00964355, -0.01681519, -0.0073204 ,  0.00043154,\n       -0.04577637,  0.03591919, -0.02807617, -0.04812622], dtype=float32)]\nDim 384 (384,)\n"],"headingContent":"Cohere","anchorList":[{"label":"Cohere","href":"Cohere","type":1,"isActive":false}]}
    \ No newline at end of file
    diff --git a/localization/v2.4.x/site/fr/embeddings/embed-with-cohere.md b/localization/v2.4.x/site/fr/embeddings/embed-with-cohere.md
    index eb701a884..9d488c771 100644
    --- a/localization/v2.4.x/site/fr/embeddings/embed-with-cohere.md
    +++ b/localization/v2.4.x/site/fr/embeddings/embed-with-cohere.md
    @@ -29,7 +29,9 @@ title: Intégrer Cohere
     pip install "pymilvus[model]"
     

    Ensuite, instanciez la classe CohereEmbeddingFunction:

    -
    cohere_ef = CohereEmbeddingFunction(
    +
    from pymilvus.model.dense import CohereEmbeddingFunction
    +
    +cohere_ef = CohereEmbeddingFunction(
         model_name="embed-english-light-v3.0",
         api_key="YOUR_COHERE_API_KEY",
         input_type="search_document",
    @@ -39,7 +41,7 @@ pip install "pymilvus[model]"
     

    Paramètres:

    • model_name (chaîne)

      -

      Le nom du modèle d'intégration Cohere à utiliser pour l'encodage. Vous pouvez spécifier n'importe quel nom de modèle d'intégration Cohere disponible, par exemple, embed-english-v3.0, embed-multilingual-v3.0, etc. Si ce paramètre n'est pas spécifié, embed-english-light-v3.0 sera utilisé. Pour obtenir la liste des modèles disponibles, reportez-vous à Embed.

    • +

      Le nom du modèle d'intégration Cohere à utiliser pour l'encodage. Vous pouvez spécifier n'importe quel nom de modèle d'intégration Cohere, par exemple, embed-english-v3.0, embed-multilingual-v3.0, etc. Si ce paramètre n'est pas spécifié, embed-english-light-v3.0 sera utilisé. Pour obtenir la liste des modèles disponibles, reportez-vous à Embed.

    • api_key (chaîne)

      La clé d'API pour accéder à l'API Cohere.

    • input_type (chaîne)

      diff --git a/localization/v2.4.x/site/fr/embeddings/embed-with-jina.json b/localization/v2.4.x/site/fr/embeddings/embed-with-jina.json index 3ba04b590..4cee744ff 100644 --- a/localization/v2.4.x/site/fr/embeddings/embed-with-jina.json +++ b/localization/v2.4.x/site/fr/embeddings/embed-with-jina.json @@ -1 +1 @@ -{"codeList":["pip install --upgrade pymilvus\npip install \"pymilvus[model]\"\n","from pymilvus.model.dense import JinaEmbeddingFunction\n\njina_ef = JinaEmbeddingFunction(\n model_name=\"jina-embeddings-v2-base-en\", # Defaults to `jina-embeddings-v2-base-en`\n api_key=JINAAI_API_KEY # Provide your Jina AI API key\n)\n","docs = [\n \"Artificial intelligence was founded as an academic discipline in 1956.\",\n \"Alan Turing was the first person to conduct substantial research in AI.\",\n \"Born in Maida Vale, London, Turing was raised in southern England.\",\n]\n\ndocs_embeddings = jina_ef.encode_documents(docs)\n\n# Print embeddings\nprint(\"Embeddings:\", docs_embeddings)\n# Print dimension and shape of embeddings\nprint(\"Dim:\", jina_ef.dim, docs_embeddings[0].shape)\n","Embeddings: [array([-4.88487840e-01, -4.28095880e-01, 4.90086500e-01, -1.63274320e-01,\n 3.43437800e-01, 3.21476880e-01, 2.83173790e-02, -3.10403670e-01,\n 4.76985040e-01, -1.77410420e-01, -3.84803180e-01, -2.19224200e-01,\n -2.52898000e-01, 6.62411900e-02, -8.58173100e-01, 1.05221800e+00,\n...\n -2.04462400e-01, 7.14229800e-01, -1.66823000e-01, 8.72551440e-01,\n 5.53560140e-01, 8.92506300e-01, -2.39408610e-01, -4.22413560e-01,\n -3.19551350e-01, 5.59153850e-01, 2.44338100e-01, -8.60452100e-01])]\nDim: 768 (768,)\n","queries = [\"When was artificial intelligence founded\", \n \"Where was Alan Turing born?\"]\n\nquery_embeddings = jina_ef.encode_queries(queries)\n\nprint(\"Embeddings:\", query_embeddings)\nprint(\"Dim\", jina_ef.dim, query_embeddings[0].shape)\n","Embeddings: [array([-5.99164660e-01, -3.49827350e-01, 8.22405160e-01, -1.18632730e-01,\n 5.78107540e-01, 1.09789170e-01, 2.91604200e-01, -3.29306450e-01,\n 2.93779640e-01, -2.17880800e-01, -6.84535440e-01, -3.79752000e-01,\n -3.47541800e-01, 9.20846100e-02, -6.13804400e-01, 6.31312800e-01,\n...\n -1.84993740e-02, 9.38629150e-01, 2.74858470e-02, 1.09396360e+00,\n 3.96270750e-01, 7.44445800e-01, -1.95404050e-01, -6.08383200e-01,\n -3.75076300e-01, 3.87512200e-01, 8.11889650e-01, -3.76407620e-01])]\nDim 768 (768,)\n"],"headingContent":"","anchorList":[{"label":"Jina AI","href":"Jina-AI","type":1,"isActive":false}]} \ No newline at end of file +{"codeList":["pip install --upgrade pymilvus\npip install \"pymilvus[model]\"\n","from pymilvus.model.dense import JinaEmbeddingFunction\n\njina_ef = JinaEmbeddingFunction(\n model_name=\"jina-embeddings-v3\", # Defaults to `jina-embeddings-v3`\n api_key=JINAAI_API_KEY, # Provide your Jina AI API key\n task=\"retrieval.passage\", # Specify the task\n dimensions=1024, # Defaults to 1024\n)\n","\n```python\ndocs = [\n \"Artificial intelligence was founded as an academic discipline in 1956.\",\n \"Alan Turing was the first person to conduct substantial research in AI.\",\n \"Born in Maida Vale, London, Turing was raised in southern England.\",\n]\n\ndocs_embeddings = jina_ef.encode_documents(docs)\n\n# Print embeddings\nprint(\"Embeddings:\", docs_embeddings)\n# Print dimension and shape of embeddings\nprint(\"Dim:\", jina_ef.dim, docs_embeddings[0].shape)\n","Embeddings: [array([9.80641991e-02, -8.51697400e-02, 7.36531913e-02, 1.42558888e-02,\n -2.23589484e-02, 1.68494112e-03, -3.50753777e-02, -3.11530549e-02,\n -3.26012149e-02, 5.04568312e-03, 3.69836427e-02, 3.48948985e-02,\n 8.19722563e-03, 5.88679723e-02, -6.71099266e-03, -1.82369724e-02,\n...\n 2.48654783e-02, 3.43279652e-02, -1.66154150e-02, -9.90478322e-03,\n -2.96043139e-03, -8.57473817e-03, -7.39028037e-04, 6.25024503e-03,\n -1.08831357e-02, -4.00776342e-02, 3.25369164e-02, -1.42691191e-03])]\nDim: 1024 (1024,)\n","queries = [\"When was artificial intelligence founded\", \n \"Where was Alan Turing born?\"]\n\nquery_embeddings = jina_ef.encode_queries(queries)\n\nprint(\"Embeddings:\", query_embeddings)\nprint(\"Dim\", jina_ef.dim, query_embeddings[0].shape)\n","Embeddings: [array([8.79201014e-03, 1.47551354e-02, 4.02722731e-02, -2.52991207e-02,\n 1.12719582e-02, 3.75947170e-02, 3.97946090e-02, -7.36681819e-02,\n -2.17952449e-02, -1.16298944e-02, -6.83426252e-03, -5.12507409e-02,\n 5.26071340e-02, 6.75181448e-02, 3.92445624e-02, -1.40817231e-02,\n...\n 8.81703943e-03, 4.24629413e-02, -2.32944116e-02, -2.05193572e-02,\n -3.22035812e-02, 2.81896023e-03, 3.85326855e-02, 3.64372656e-02,\n -1.65050142e-02, -4.26847413e-02, 2.02664156e-02, -1.72684863e-02])]\nDim 1024 (1024,)\n","from pymilvus.model.dense import JinaEmbeddingFunction\n\njina_ef = JinaEmbeddingFunction(\n model_name=\"jina-embeddings-v3\", # Defaults to `jina-embeddings-v3`\n api_key=JINA_API_KEY, # Provide your Jina AI API key\n task=\"text-matching\",\n dimensions=1024, # Defaults to 1024\n)\n\ntexts = [\n \"Follow the white rabbit.\", # English\n \"Sigue al conejo blanco.\", # Spanish\n \"Suis le lapin blanc.\", # French\n \"跟着白兔走。\", # Chinese\n \"اتبع الأرنب الأبيض.\", # Arabic\n \"Folge dem weißen Kaninchen.\", # German\n]\n\nembeddings = jina_ef(texts)\n\n# Compute similarities\nprint(embeddings[0] @ embeddings[1].T)\n"],"headingContent":"Jina AI","anchorList":[{"label":"Jina AI","href":"Jina-AI","type":1,"isActive":false}]} \ No newline at end of file diff --git a/localization/v2.4.x/site/fr/embeddings/embed-with-jina.md b/localization/v2.4.x/site/fr/embeddings/embed-with-jina.md index 16b270748..be4bbab97 100644 --- a/localization/v2.4.x/site/fr/embeddings/embed-with-jina.md +++ b/localization/v2.4.x/site/fr/embeddings/embed-with-jina.md @@ -32,19 +32,36 @@ pip install "pymilvus[model]"
      from pymilvus.model.dense import JinaEmbeddingFunction
       
       jina_ef = JinaEmbeddingFunction(
      -    model_name="jina-embeddings-v2-base-en", # Defaults to `jina-embeddings-v2-base-en`
      -    api_key=JINAAI_API_KEY # Provide your Jina AI API key
      +    model_name="jina-embeddings-v3", # Defaults to `jina-embeddings-v3`
      +    api_key=JINAAI_API_KEY, # Provide your Jina AI API key
      +    task="retrieval.passage", # Specify the task
      +    dimensions=1024, # Defaults to 1024
       )
       

      Paramètres:

      • model_name (chaîne)

        -

        Le nom du modèle d'intégration de Jina AI à utiliser pour l'encodage. Vous pouvez spécifier n'importe quel nom de modèle d'intégration Jina AI disponible, par exemple, jina-embeddings-v2-base-en, jina-embeddings-v2-small-en, etc. Si ce paramètre n'est pas spécifié, jina-embeddings-v2-base-en sera utilisé. Pour obtenir la liste des modèles disponibles, reportez-vous à Jina Embeddings.

      • +

        Le nom du modèle d'intégration de Jina AI à utiliser pour l'encodage. Vous pouvez spécifier n'importe quel nom de modèle d'intégration Jina AI disponible, par exemple, jina-embeddings-v3, jina-embeddings-v2-base-en, etc. Si ce paramètre n'est pas spécifié, jina-embeddings-v3 sera utilisé. Pour obtenir la liste des modèles disponibles, reportez-vous à Jina Embeddings.

      • api_key (chaîne)

        -

        La clé API pour accéder à l'API Jina AI.

      • +

        La clé d'API pour accéder à l'API Jina AI.

        +
      • task (chaîne)

        +

        Le type d'entrée transmis au modèle. Requis pour les modèles d'intégration v3 et plus.

        +
          +
        • "retrieval.passage": Utilisé pour encoder des documents volumineux dans les tâches de recherche au moment de l'indexation.
        • +
        • "retrieval.query": Utilisé pour coder les requêtes ou les questions des utilisateurs dans les tâches de recherche.
        • +
        • "classification": Utilisé pour coder des textes dans le cadre de tâches de classification de textes.
        • +
        • "text-matching": Utilisé pour coder du texte pour la mise en correspondance de similarités, comme la mesure de la similarité entre deux phrases.
        • +
        • "clustering": Utilisé pour les tâches de regroupement ou de reclassement.
        • +
      • +
      • dimensions (int)

        +

        Le nombre de dimensions que doivent avoir les encastrements de sortie résultants. La valeur par défaut est 1024. Pris en charge uniquement pour les modèles d'intégration v3 et supérieurs.

      • +
      • late_chunking (bool)

        +

        Ce paramètre détermine s'il faut utiliser la nouvelle méthode de découpage en morceaux (chunking) que Jina AI a introduite le mois dernier pour encoder un lot de phrases. La valeur par défaut est False. Si la valeur est True, l'API Jina AI concaténera toutes les phrases du champ de saisie et les transmettra au modèle sous la forme d'une chaîne unique. En interne, le modèle incorpore cette longue chaîne concaténée et effectue ensuite un découpage tardif, renvoyant une liste d'incorporations correspondant à la taille de la liste d'entrée.

      -

      Pour créer des embeddings pour des documents, utilisez la méthode encode_documents():

      -
      docs = [
      +

      Pour créer des embeddings pour des documents, utilisez la méthode encode_documents(). Cette méthode est conçue pour l'intégration de documents dans des tâches de recherche asymétrique, telles que l'indexation de documents pour des tâches de recherche ou de recommandation. Cette méthode utilise retrieval.passage comme tâche.

      +
      
      +```python
      +docs = [
           "Artificial intelligence was founded as an academic discipline in 1956.",
           "Alan Turing was the first person to conduct substantial research in AI.",
           "Born in Maida Vale, London, Turing was raised in southern England.",
      @@ -58,17 +75,17 @@ docs_embeddings = jina_ef.encode_documents(docs)
       print("Dim:", jina_ef.dim, docs_embeddings[0].shape)
       

      Le résultat attendu est similaire à ce qui suit :

      -
      Embeddings: [array([-4.88487840e-01, -4.28095880e-01,  4.90086500e-01, -1.63274320e-01,
      -        3.43437800e-01,  3.21476880e-01,  2.83173790e-02, -3.10403670e-01,
      -        4.76985040e-01, -1.77410420e-01, -3.84803180e-01, -2.19224200e-01,
      -       -2.52898000e-01,  6.62411900e-02, -8.58173100e-01,  1.05221800e+00,
      +
      Embeddings: [array([9.80641991e-02, -8.51697400e-02,  7.36531913e-02,  1.42558888e-02,
      +       -2.23589484e-02,  1.68494112e-03, -3.50753777e-02, -3.11530549e-02,
      +       -3.26012149e-02,  5.04568312e-03,  3.69836427e-02,  3.48948985e-02,
      +        8.19722563e-03,  5.88679723e-02, -6.71099266e-03, -1.82369724e-02,
       ...
      -       -2.04462400e-01,  7.14229800e-01, -1.66823000e-01,  8.72551440e-01,
      -        5.53560140e-01,  8.92506300e-01, -2.39408610e-01, -4.22413560e-01,
      -       -3.19551350e-01,  5.59153850e-01,  2.44338100e-01, -8.60452100e-01])]
      -Dim: 768 (768,)
      +        2.48654783e-02,  3.43279652e-02, -1.66154150e-02, -9.90478322e-03,
      +       -2.96043139e-03, -8.57473817e-03, -7.39028037e-04,  6.25024503e-03,
      +       -1.08831357e-02, -4.00776342e-02,  3.25369164e-02, -1.42691191e-03])]
      +Dim: 1024 (1024,)
       
      -

      Pour créer des embeddings pour des requêtes, utilisez la méthode encode_queries():

      +

      Pour créer des incorporations de requêtes, utilisez la méthode encode_queries(). Cette méthode est conçue pour l'intégration de requêtes dans des tâches de recherche asymétriques, telles que les requêtes de recherche ou les questions. Cette méthode utilise retrieval.query comme tâche.

      queries = ["When was artificial intelligence founded", 
                  "Where was Alan Turing born?"]
       
      @@ -78,13 +95,37 @@ query_embeddings = jina_ef.encode_queries(queries)
       print("Dim", jina_ef.dim, query_embeddings[0].shape)
       

      Le résultat attendu est similaire à ce qui suit :

      -
      Embeddings: [array([-5.99164660e-01, -3.49827350e-01,  8.22405160e-01, -1.18632730e-01,
      -        5.78107540e-01,  1.09789170e-01,  2.91604200e-01, -3.29306450e-01,
      -        2.93779640e-01, -2.17880800e-01, -6.84535440e-01, -3.79752000e-01,
      -       -3.47541800e-01,  9.20846100e-02, -6.13804400e-01,  6.31312800e-01,
      +
      Embeddings: [array([8.79201014e-03,  1.47551354e-02,  4.02722731e-02, -2.52991207e-02,
      +        1.12719582e-02,  3.75947170e-02,  3.97946090e-02, -7.36681819e-02,
      +       -2.17952449e-02, -1.16298944e-02, -6.83426252e-03, -5.12507409e-02,
      +        5.26071340e-02,  6.75181448e-02,  3.92445624e-02, -1.40817231e-02,
       ...
      -       -1.84993740e-02,  9.38629150e-01,  2.74858470e-02,  1.09396360e+00,
      -        3.96270750e-01,  7.44445800e-01, -1.95404050e-01, -6.08383200e-01,
      -       -3.75076300e-01,  3.87512200e-01,  8.11889650e-01, -3.76407620e-01])]
      -Dim 768 (768,)
      +        8.81703943e-03,  4.24629413e-02, -2.32944116e-02, -2.05193572e-02,
      +       -3.22035812e-02,  2.81896023e-03,  3.85326855e-02,  3.64372656e-02,
      +       -1.65050142e-02, -4.26847413e-02,  2.02664156e-02, -1.72684863e-02])]
      +Dim 1024 (1024,)
      +
      +

      Pour créer des encastrements d'entrées pour des tâches de correspondance de similarité (telles que les tâches STS ou de recherche symétrique), de classification de texte, de regroupement ou de reclassement, utilisez la valeur de paramètre task appropriée lors de l'instanciation de la classe JinaEmbeddingFunction.

      +
      from pymilvus.model.dense import JinaEmbeddingFunction
      +
      +jina_ef = JinaEmbeddingFunction(
      +    model_name="jina-embeddings-v3", # Defaults to `jina-embeddings-v3`
      +    api_key=JINA_API_KEY, # Provide your Jina AI API key
      +    task="text-matching",
      +    dimensions=1024, # Defaults to 1024
      +)
      +
      +texts = [
      +    "Follow the white rabbit.",  # English
      +    "Sigue al conejo blanco.",  # Spanish
      +    "Suis le lapin blanc.",  # French
      +    "跟着白兔走。",  # Chinese
      +    "اتبع الأرنب الأبيض.",  # Arabic
      +    "Folge dem weißen Kaninchen.",  # German
      +]
      +
      +embeddings = jina_ef(texts)
      +
      +# Compute similarities
      +print(embeddings[0] @ embeddings[1].T)
       
      diff --git a/localization/v2.4.x/site/fr/embeddings/embed-with-voyage.json b/localization/v2.4.x/site/fr/embeddings/embed-with-voyage.json index c5fe18415..ba7547304 100644 --- a/localization/v2.4.x/site/fr/embeddings/embed-with-voyage.json +++ b/localization/v2.4.x/site/fr/embeddings/embed-with-voyage.json @@ -1 +1 @@ -{"codeList":["pip install --upgrade pymilvus\npip install \"pymilvus[model]\"\n","from pymilvus.model.dense import VoyageEmbeddingFunction\n\nvoyage_ef = VoyageEmbeddingFunction(\n model_name=\"voyage-lite-02-instruct\", # Defaults to `voyage-2`\n api_key=VOYAGE_API_KEY # Provide your Voyage API key\n)\n","docs = [\n \"Artificial intelligence was founded as an academic discipline in 1956.\",\n \"Alan Turing was the first person to conduct substantial research in AI.\",\n \"Born in Maida Vale, London, Turing was raised in southern England.\",\n]\n\ndocs_embeddings = voyage_ef.encode_documents(docs)\n\n# Print embeddings\nprint(\"Embeddings:\", docs_embeddings)\n# Print dimension and shape of embeddings\nprint(\"Dim:\", voyage_ef.dim, docs_embeddings[0].shape)\n","Embeddings: [array([ 0.02582654, -0.00907086, -0.04604037, ..., -0.01227521,\n 0.04420955, -0.00038829]), array([ 0.03844212, -0.01597065, -0.03728884, ..., -0.02118733,\n 0.03349845, 0.0065346 ]), array([ 0.05143557, -0.01096631, -0.02690451, ..., -0.02416254,\n 0.07658645, 0.03064499])]\nDim: 1024 (1024,)\n","queries = [\"When was artificial intelligence founded\", \n \"Where was Alan Turing born?\"]\n\nquery_embeddings = voyage_ef.encode_queries(queries)\n\nprint(\"Embeddings:\", query_embeddings)\nprint(\"Dim\", voyage_ef.dim, query_embeddings[0].shape)\n","Embeddings: [array([ 0.01733501, -0.0230672 , -0.05208827, ..., -0.00957995,\n 0.04493361, 0.01485138]), array([ 0.05937521, -0.00729363, -0.02184347, ..., -0.02107683,\n 0.05706626, 0.0263358 ])]\nDim 1024 (1024,)\n"],"headingContent":"","anchorList":[{"label":"Voyage","href":"Voyage","type":1,"isActive":false}]} \ No newline at end of file +{"codeList":["pip install --upgrade pymilvus\npip install \"pymilvus[model]\"\n","from pymilvus.model.dense import VoyageEmbeddingFunction\n\nvoyage_ef = VoyageEmbeddingFunction(\n model_name=\"voyage-3\", # Defaults to `voyage-3`\n api_key=VOYAGE_API_KEY # Provide your Voyage API key\n)\n","docs = [\n \"Artificial intelligence was founded as an academic discipline in 1956.\",\n \"Alan Turing was the first person to conduct substantial research in AI.\",\n \"Born in Maida Vale, London, Turing was raised in southern England.\",\n]\n\ndocs_embeddings = voyage_ef.encode_documents(docs)\n\n# Print embeddings\nprint(\"Embeddings:\", docs_embeddings)\n# Print dimension and shape of embeddings\nprint(\"Dim:\", voyage_ef.dim, docs_embeddings[0].shape)\n","Embeddings: [array([ 0.02582654, -0.00907086, -0.04604037, ..., -0.01227521,\n 0.04420955, -0.00038829]), array([ 0.03844212, -0.01597065, -0.03728884, ..., -0.02118733,\n 0.03349845, 0.0065346 ]), array([ 0.05143557, -0.01096631, -0.02690451, ..., -0.02416254,\n 0.07658645, 0.03064499])]\nDim: 1024 (1024,)\n","queries = [\"When was artificial intelligence founded\", \n \"Where was Alan Turing born?\"]\n\nquery_embeddings = voyage_ef.encode_queries(queries)\n\nprint(\"Embeddings:\", query_embeddings)\nprint(\"Dim\", voyage_ef.dim, query_embeddings[0].shape)\n","Embeddings: [array([ 0.01733501, -0.0230672 , -0.05208827, ..., -0.00957995,\n 0.04493361, 0.01485138]), array([ 0.05937521, -0.00729363, -0.02184347, ..., -0.02107683,\n 0.05706626, 0.0263358 ])]\nDim 1024 (1024,)\n"],"headingContent":"Voyage","anchorList":[{"label":"Voyage","href":"Voyage","type":1,"isActive":false}]} \ No newline at end of file diff --git a/localization/v2.4.x/site/fr/embeddings/embed-with-voyage.md b/localization/v2.4.x/site/fr/embeddings/embed-with-voyage.md index 7273d0fe6..3ff592eee 100644 --- a/localization/v2.4.x/site/fr/embeddings/embed-with-voyage.md +++ b/localization/v2.4.x/site/fr/embeddings/embed-with-voyage.md @@ -30,13 +30,13 @@ pip install "pymilvus[model]"
      from pymilvus.model.dense import VoyageEmbeddingFunction
       
       voyage_ef = VoyageEmbeddingFunction(
      -    model_name="voyage-lite-02-instruct", # Defaults to `voyage-2`
      +    model_name="voyage-3", # Defaults to `voyage-3`
           api_key=VOYAGE_API_KEY # Provide your Voyage API key
       )
       

      Paramètres:

        -
      • model_name (string) Le nom du modèle Voyage à utiliser pour l'encodage. Vous pouvez spécifier n'importe quel nom de modèle Voyage disponible, par exemple, voyage-law-2, voyage-code-2, etc. Si ce paramètre n'est pas spécifié, voyage-2 sera utilisé. Pour obtenir la liste des modèles disponibles, reportez-vous à la documentation officielle de Voyage.
      • +
      • model_name (string) Le nom du modèle Voyage à utiliser pour l'encodage. Vous pouvez spécifier n'importe quel nom de modèle Voyage disponible, par exemple, voyage-3-lite, voyage-finance-2, etc. Si ce paramètre n'est pas spécifié, voyage-3 sera utilisé. Pour obtenir la liste des modèles disponibles, reportez-vous à la documentation officielle de Voyage.
      • api_key (chaîne) La clé API pour accéder à l'API de Voyage. Pour plus d'informations sur la création d'une clé API, reportez-vous à Clé API et client Python.

      Pour créer des embeddings pour les documents, utilisez la méthode encode_documents():

      diff --git a/localization/v2.4.x/site/fr/embeddings/embeddings.json b/localization/v2.4.x/site/fr/embeddings/embeddings.json index b1c85b8f8..30b360267 100644 --- a/localization/v2.4.x/site/fr/embeddings/embeddings.json +++ b/localization/v2.4.x/site/fr/embeddings/embeddings.json @@ -1 +1 @@ -{"codeList":["pip install \"pymilvus[model]\"\n","from pymilvus import model\n\n# This will download \"all-MiniLM-L6-v2\", a light weight model.\nef = model.DefaultEmbeddingFunction()\n\n# Data from which embeddings are to be generated \ndocs = [\n \"Artificial intelligence was founded as an academic discipline in 1956.\",\n \"Alan Turing was the first person to conduct substantial research in AI.\",\n \"Born in Maida Vale, London, Turing was raised in southern England.\",\n]\n\nembeddings = ef.encode_documents(docs)\n\n# Print embeddings\nprint(\"Embeddings:\", embeddings)\n# Print dimension and shape of embeddings\nprint(\"Dim:\", ef.dim, embeddings[0].shape)\n","Embeddings: [array([-3.09392996e-02, -1.80662833e-02, 1.34775648e-02, 2.77156215e-02,\n -4.86349640e-03, -3.12581174e-02, -3.55921760e-02, 5.76934684e-03,\n 2.80773244e-03, 1.35783911e-01, 3.59678417e-02, 6.17732145e-02,\n...\n -4.61330153e-02, -4.85207550e-02, 3.13997865e-02, 7.82178566e-02,\n -4.75336798e-02, 5.21207601e-02, 9.04406682e-02, -5.36676683e-02],\n dtype=float32)]\nDim: 384 (384,)\n","from pymilvus.model.hybrid import BGEM3EmbeddingFunction\nfrom pymilvus import (\n utility,\n FieldSchema, CollectionSchema, DataType,\n Collection, AnnSearchRequest, RRFRanker, connections,\n)\n","# 1. prepare a small corpus to search\ndocs = [\n \"Artificial intelligence was founded as an academic discipline in 1956.\",\n \"Alan Turing was the first person to conduct substantial research in AI.\",\n \"Born in Maida Vale, London, Turing was raised in southern England.\",\n]\nquery = \"Who started AI research?\"\n\n# BGE-M3 model can embed texts as dense and sparse vectors.\n# It is included in the optional `model` module in pymilvus, to install it,\n# simply run \"pip install pymilvus[model]\".\n\nbge_m3_ef = BGEM3EmbeddingFunction(use_fp16=False, device=\"cpu\")\n\ndocs_embeddings = bge_m3_ef(docs)\nquery_embeddings = bge_m3_ef([query])\n","from pymilvus.model.sparse import BM25EmbeddingFunction\n","# 1. prepare a small corpus to search\ndocs = [\n \"Artificial intelligence was founded as an academic discipline in 1956.\",\n \"Alan Turing was the first person to conduct substantial research in AI.\",\n \"Born in Maida Vale, London, Turing was raised in southern England.\",\n]\nquery = \"Where was Turing born?\"\nbm25_ef = BM25EmbeddingFunction()\n\n# 2. fit the corpus to get BM25 model parameters on your documents.\nbm25_ef.fit(docs)\n\n# 3. store the fitted parameters to disk to expedite future processing.\nbm25_ef.save(\"bm25_params.json\")\n\n# 4. load the saved params\nnew_bm25_ef = BM25EmbeddingFunction()\nnew_bm25_ef.load(\"bm25_params.json\")\n\ndocs_embeddings = new_bm25_ef.encode_documents(docs)\nquery_embeddings = new_bm25_ef.encode_queries([query])\nprint(\"Dim:\", new_bm25_ef.dim, list(docs_embeddings)[0].shape)\n","Dim: 21 (1, 21)\n"],"headingContent":"","anchorList":[{"label":"Vue d'ensemble de l'intégration","href":"Embedding-Overview","type":1,"isActive":false},{"label":"Exemple 1 : utiliser la fonction d'intégration par défaut pour générer des vecteurs denses","href":"Example-1-Use-default-embedding-function-to-generate-dense-vectors","type":2,"isActive":false},{"label":"Exemple 2 : Générer des vecteurs denses et épars en un seul appel avec le modèle BGE M3","href":"Example-2-Generate-dense-and-sparse-vectors-in-one-call-with-BGE-M3-model","type":2,"isActive":false},{"label":"Exemple 3 : Générer des vecteurs épars à l'aide du modèle BM25","href":"Example-3-Generate--sparse-vectors-using-BM25-model","type":2,"isActive":false}]} \ No newline at end of file +{"codeList":["pip install \"pymilvus[model]\"\n","from pymilvus import model\n\n# This will download \"all-MiniLM-L6-v2\", a light weight model.\nef = model.DefaultEmbeddingFunction()\n\n# Data from which embeddings are to be generated \ndocs = [\n \"Artificial intelligence was founded as an academic discipline in 1956.\",\n \"Alan Turing was the first person to conduct substantial research in AI.\",\n \"Born in Maida Vale, London, Turing was raised in southern England.\",\n]\n\nembeddings = ef.encode_documents(docs)\n\n# Print embeddings\nprint(\"Embeddings:\", embeddings)\n# Print dimension and shape of embeddings\nprint(\"Dim:\", ef.dim, embeddings[0].shape)\n","Embeddings: [array([-3.09392996e-02, -1.80662833e-02, 1.34775648e-02, 2.77156215e-02,\n -4.86349640e-03, -3.12581174e-02, -3.55921760e-02, 5.76934684e-03,\n 2.80773244e-03, 1.35783911e-01, 3.59678417e-02, 6.17732145e-02,\n...\n -4.61330153e-02, -4.85207550e-02, 3.13997865e-02, 7.82178566e-02,\n -4.75336798e-02, 5.21207601e-02, 9.04406682e-02, -5.36676683e-02],\n dtype=float32)]\nDim: 384 (384,)\n","from pymilvus.model.hybrid import BGEM3EmbeddingFunction\nfrom pymilvus import (\n utility,\n FieldSchema, CollectionSchema, DataType,\n Collection, AnnSearchRequest, RRFRanker, connections,\n)\n","# 1. prepare a small corpus to search\ndocs = [\n \"Artificial intelligence was founded as an academic discipline in 1956.\",\n \"Alan Turing was the first person to conduct substantial research in AI.\",\n \"Born in Maida Vale, London, Turing was raised in southern England.\",\n]\nquery = \"Who started AI research?\"\n\n# BGE-M3 model can embed texts as dense and sparse vectors.\n# It is included in the optional `model` module in pymilvus, to install it,\n# simply run \"pip install pymilvus[model]\".\n\nbge_m3_ef = BGEM3EmbeddingFunction(use_fp16=False, device=\"cpu\")\n\ndocs_embeddings = bge_m3_ef(docs)\nquery_embeddings = bge_m3_ef([query])\n","from pymilvus.model.sparse import BM25EmbeddingFunction\n","# 1. prepare a small corpus to search\ndocs = [\n \"Artificial intelligence was founded as an academic discipline in 1956.\",\n \"Alan Turing was the first person to conduct substantial research in AI.\",\n \"Born in Maida Vale, London, Turing was raised in southern England.\",\n]\nquery = \"Where was Turing born?\"\nbm25_ef = BM25EmbeddingFunction()\n\n# 2. fit the corpus to get BM25 model parameters on your documents.\nbm25_ef.fit(docs)\n\n# 3. store the fitted parameters to disk to expedite future processing.\nbm25_ef.save(\"bm25_params.json\")\n\n# 4. load the saved params\nnew_bm25_ef = BM25EmbeddingFunction()\nnew_bm25_ef.load(\"bm25_params.json\")\n\ndocs_embeddings = new_bm25_ef.encode_documents(docs)\nquery_embeddings = new_bm25_ef.encode_queries([query])\nprint(\"Dim:\", new_bm25_ef.dim, list(docs_embeddings)[0].shape)\n","Dim: 21 (1, 21)\n"],"headingContent":"Embedding Overview","anchorList":[{"label":"Vue d'ensemble de l'intégration","href":"Embedding-Overview","type":1,"isActive":false},{"label":"Exemple 1 : Utiliser la fonction d'intégration par défaut pour générer des vecteurs denses","href":"Example-1-Use-default-embedding-function-to-generate-dense-vectors","type":2,"isActive":false},{"label":"Exemple 2 : Générer des vecteurs denses et épars en un seul appel avec le modèle BGE M3","href":"Example-2-Generate-dense-and-sparse-vectors-in-one-call-with-BGE-M3-model","type":2,"isActive":false},{"label":"Exemple 3 : Générer des vecteurs épars à l'aide du modèle BM25","href":"Example-3-Generate--sparse-vectors-using-BM25-model","type":2,"isActive":false}]} \ No newline at end of file diff --git a/localization/v2.4.x/site/fr/embeddings/embeddings.md b/localization/v2.4.x/site/fr/embeddings/embeddings.md index 24b6d4ce7..0e12aabc4 100644 --- a/localization/v2.4.x/site/fr/embeddings/embeddings.md +++ b/localization/v2.4.x/site/fr/embeddings/embeddings.md @@ -23,7 +23,7 @@ title: Vue d'ensemble de l'intégration

      Il existe deux catégories principales d'encastrements, chacune produisant un type de vecteur différent :

      • L'encastrement dense: La plupart des modèles d'intégration représentent l'information sous la forme d'un vecteur à virgule flottante de centaines ou de milliers de dimensions. Les résultats sont appelés vecteurs "denses" car la plupart des dimensions ont des valeurs non nulles. Par exemple, le modèle d'intégration à code source ouvert BAAI/bge-base-fr-v1.5 produit des vecteurs de 768 nombres à virgule flottante (vecteur flottant à 768 dimensions).

      • -
      • Enrobage clairsemé: En revanche, les vecteurs de sortie des encapsulages épars ont la plupart des dimensions égales à zéro, c'est-à-dire des vecteurs "épars". Ces vecteurs ont souvent des dimensions beaucoup plus élevées (des dizaines de milliers ou plus), qui sont déterminées par la taille du vocabulaire de jetons. Les vecteurs épars peuvent être générés par des réseaux neuronaux profonds ou par l'analyse statistique de corpus de textes. En raison de leur facilité d'interprétation et de leurs meilleures capacités de généralisation hors domaine, les vecteurs épars sont de plus en plus adoptés par les développeurs en complément des vecteurs denses.

      • +
      • Enrobage clairsemé: En revanche, les vecteurs de sortie des encapsulages épars ont la plupart des dimensions égales à zéro, c'est-à-dire des vecteurs "épars". Ces vecteurs ont souvent des dimensions beaucoup plus élevées (des dizaines de milliers ou plus) qui sont déterminées par la taille du vocabulaire de jetons. Les vecteurs épars peuvent être générés par des réseaux neuronaux profonds ou par l'analyse statistique de corpus de textes. En raison de leur facilité d'interprétation et de leurs meilleures capacités de généralisation hors domaine, les vecteurs épars sont de plus en plus adoptés par les développeurs en complément des vecteurs denses.

      Milvus est une base de données vectorielles conçue pour la gestion, le stockage et l'extraction de données vectorielles. Grâce à l'intégration des modèles d'intégration et de reclassement, vous pouvez facilement transformer un texte original en vecteurs consultables ou reclasser les résultats à l'aide de modèles puissants afin d'obtenir des résultats plus précis pour le RAG. Cette intégration simplifie la transformation du texte et élimine le besoin de composants d'intégration ou de reclassement supplémentaires, ce qui rationalise le développement et la validation des RAG.

      Pour créer des embeddings en action, voir Utilisation du modèle de PyMilvus pour générer des embeddings de texte.

      @@ -40,9 +40,13 @@ title: Vue d'ensemble de l'intégration voyageaiDenseAPI jinaDenseAPI cohèreDenseAPI +InstructeurDenseOpen-sourced +Mistral AIDenseAPI +NomicDenseAPI +mGTEHybrideOpen-sourced -

      Exemple 1 : utiliser la fonction d'intégration par défaut pour générer des vecteurs denses

      Pour utiliser les fonctions d'intégration avec Milvus, installez d'abord la bibliothèque client PyMilvus avec le sous-paquetage model qui contient tous les utilitaires de génération d'intégration.

      +

      Pour utiliser les fonctions d'intégration avec Milvus, installez d'abord la bibliothèque client PyMilvus avec le sous-paquetage model qui contient tous les utilitaires pour la génération d'intégration.

      pip install "pymilvus[model]"
       

      Le sous-paquet model prend en charge différents modèles d'intégration, depuis OpenAI, Sentence Transformers, BGE M3, BM25, jusqu'aux modèles pré-entraînés SPLADE. Par souci de simplicité, cet exemple utilise le modèle DefaultEmbeddingFunction qui est un modèle de transformateur de phrase tout-MiniLM-L6-v2. Le modèle pèse environ 70 Mo et il sera téléchargé lors de la première utilisation :

      @@ -108,7 +112,7 @@ Dim: 384 (384,

      Dans cet exemple, nous utilisons le modèle hybride BGE M3 pour intégrer du texte dans des vecteurs denses et épars et les utiliser pour retrouver des documents pertinents. Les étapes générales sont les suivantes :

      1. Incorporer le texte dans des vecteurs denses et épars à l'aide du modèle BGE-M3 ;

      2. -
      3. Création d'une collection Milvus pour stocker les vecteurs denses et épars ;

      4. +
      5. Mise en place d'une collection Milvus pour stocker les vecteurs denses et épars ;

      6. Insérer les données dans Milvus ;

      7. Rechercher et inspecter le résultat.

      @@ -153,7 +157,7 @@ query_embeddings = bge_m3_ef([query]) d="M4 9h1v1H4c-1.5 0-3-1.69-3-3.5S2.55 3 4 3h4c1.45 0 3 1.69 3 3.5 0 1.41-.91 2.72-2 3.25V8.59c.58-.45 1-1.27 1-2.09C10 5.22 8.98 4 8 4H4c-.98 0-2 1.22-2 2.5S3 9 4 9zm9-3h-1v1h1c1 0 2 1.22 2 2.5S13.98 12 13 12H9c-.98 0-2-1.22-2-2.5 0-.83.42-1.64 1-2.09V6.25c-1.09.53-2 1.84-2 3.25C6 11.31 7.55 13 9 13h4c1.45 0 3-1.69 3-3.5S14.5 6 13 6z" > -

      BM25 est une méthode bien connue qui utilise les fréquences d'occurrence des mots pour déterminer la pertinence entre les requêtes et les documents. Dans cet exemple, nous allons montrer comment utiliser BM25EmbeddingFunction pour générer des vecteurs d'encastrement clairsemés pour les requêtes et les documents.

      +

      BM25 est une méthode bien connue qui utilise les fréquences d'occurrence des mots pour déterminer la pertinence entre les requêtes et les documents. Dans cet exemple, nous allons montrer comment utiliser BM25EmbeddingFunction pour générer des encastrements épars pour les requêtes et les documents.

      Tout d'abord, importez la classe BM25EmbeddingFunction.

      from pymilvus.model.sparse import BM25EmbeddingFunction
       
      diff --git a/localization/v2.4.x/site/fr/faq/operational_faq.json b/localization/v2.4.x/site/fr/faq/operational_faq.json index 9f73040e0..88509154a 100644 --- a/localization/v2.4.x/site/fr/faq/operational_faq.json +++ b/localization/v2.4.x/site/fr/faq/operational_faq.json @@ -1 +1 @@ -{"codeList":["{\n \"registry-mirrors\": [\"https://registry.docker-cn.com\"]\n}\n","$ lscpu | grep -e sse4_2 -e avx -e avx2 -e avx512\n","pip install pymilvus>=2.4.2\n"],"headingContent":"","anchorList":[{"label":"FAQ opérationnelle","href":"Operational-FAQ","type":1,"isActive":false}]} \ No newline at end of file +{"codeList":["{\n \"registry-mirrors\": [\"https://registry.docker-cn.com\"]\n}\n","$ lscpu | grep -e sse4_2 -e avx -e avx2 -e avx512\n","pip install pymilvus>=2.4.2\n","# Python Example: result of len() str cannot be used as \"max-length\" in Milvus \n>>> s = \"你好,世界!\"\n>>> len(s) # Number of characters of s.\n6\n>>> len(bytes(s, \"utf-8\")) # Size in bytes of s, max-length in Milvus.\n18\n"],"headingContent":"Operational FAQ","anchorList":[{"label":"FAQ opérationnelle","href":"Operational-FAQ","type":1,"isActive":false}]} \ No newline at end of file diff --git a/localization/v2.4.x/site/fr/faq/operational_faq.md b/localization/v2.4.x/site/fr/faq/operational_faq.md index f2b054b9b..d6033f082 100644 --- a/localization/v2.4.x/site/fr/faq/operational_faq.md +++ b/localization/v2.4.x/site/fr/faq/operational_faq.md @@ -48,12 +48,12 @@ title: FAQ opérationnelle
      $ lscpu | grep -e sse4_2 -e avx -e avx2 -e avx512
       

      Pourquoi Milvus renvoie-t-il illegal instruction au démarrage ?

      Milvus exige que votre CPU prenne en charge un jeu d'instructions SIMD : SSE4.2, AVX, AVX2 ou AVX512. L'unité centrale doit prendre en charge au moins l'une de ces instructions pour que Milvus fonctionne normalement. Une erreur illegal instruction renvoyée lors du démarrage suggère que votre CPU ne prend pas en charge l'un des quatre jeux d'instructions ci-dessus.

      -

      Voir la prise en charge du jeu d'instructions SIMD par le processeur.

      +

      Voir la prise en charge du jeu d'instructions SIMD par le processeur.

      Puis-je installer Milvus sous Windows ?

      Oui, vous pouvez installer Milvus sur Windows soit en compilant à partir du code source, soit à partir d'un package binaire.

      Voir Exécuter Milvus sous Windows pour savoir comment installer Milvus sous Windows.

      J'ai obtenu une erreur lors de l'installation de pymilvus sur Windows. Que dois-je faire ?

      Il n'est pas recommandé d'installer PyMilvus sous Windows. Mais si vous devez installer PyMilvus sur Windows mais que vous avez obtenu une erreur, essayez de l'installer dans un environnement Conda. Voir Installer Milvus SDK pour plus d'informations sur l'installation de PyMilvus dans l'environnement Conda.

      Puis-je déployer Milvus lorsque je suis déconnecté d'Internet ?

      Oui, vous pouvez installer Milvus dans un environnement hors ligne. Voir Installer Milvus hors ligne pour plus d'informations.

      -

      Où puis-je trouver les journaux générés par Milvus ?

      Le journal Milvus est imprimé sur stout (sortie standard) et stderr (erreur standard) par défaut, mais nous vous recommandons vivement de rediriger votre journal vers un volume persistant en production. Pour ce faire, mettez à jour log.file.rootPath dans milvus.yaml. Et si vous déployez Milvus avec le diagramme milvus-helm, vous devez également activer la persistance du journal via --set log.persistence.enabled=true.

      +

      Où puis-je trouver les journaux générés par Milvus ?

      Le journal Milvus est imprimé sur stout (sortie standard) et stderr (erreur standard) par défaut, mais nous vous recommandons vivement de rediriger votre journal vers un volume persistant en production. Pour ce faire, mettez à jour log.file.rootPath dans milvus.yaml. Et si vous déployez Milvus avec le diagramme milvus-helm, vous devez également activer la persistance du journal via --set log.persistence.enabled=true.

      Si vous n'avez pas modifié la configuration, l'utilisation de kubectl logs <pod-name> ou de docker logs CONTAINER peut également vous aider à trouver le journal.

      Puis-je créer un index pour un segment avant d'y insérer des données ?

      Oui, vous pouvez le faire. Mais nous recommandons d'insérer les données par lots, chacun ne devant pas dépasser 256 Mo, avant d'indexer chaque segment.

      Puis-je partager une instance etcd entre plusieurs instances Milvus ?

      Oui, vous pouvez partager une instance etcd entre plusieurs instances Milvus. Pour ce faire, vous devez modifier etcd.rootPath en une valeur distincte pour chaque instance Milvus dans les fichiers de configuration de chacune d'entre elles avant de les démarrer.

      @@ -74,6 +74,31 @@ title: FAQ opérationnelle
    • Requête (correspondance exacte): Milvus sélectionne l'entité la plus récente avec la clé primaire correspondante. Recherche ANN : Milvus sélectionne l'entité ayant le score de similarité le plus élevé, même si les entités partagent le même PK. Cette priorisation peut entraîner moins de résultats uniques que la limite si votre collection comporte de nombreuses clés primaires en double.

    • Correspondances insuffisantes: Les expressions de filtrage de votre recherche peuvent être trop strictes, ce qui réduit le nombre d'entités répondant au seuil de similarité. Si les conditions définies pour la recherche sont trop restrictives, il n'y aura pas assez d'entités correspondantes, ce qui entraînera moins de résultats que prévu.

    +

    MilvusClient("milvus_demo.db") gives an error: ModuleNotFoundError: No module named 'milvus_lite'. Quelle est la cause de cette erreur et comment la résoudre ?

    Cette erreur se produit lorsque vous essayez d'utiliser Milvus Lite sur une plate-forme Windows. Milvus Lite est principalement conçu pour les environnements Linux et peut ne pas avoir de support natif pour Windows.

    +

    La solution consiste à utiliser un environnement Linux :

    +
      +
    • Utilisez un système d'exploitation basé sur Linux ou une machine virtuelle pour exécuter Milvus Lite.
    • +
    • Cette approche garantira la compatibilité avec les dépendances et les fonctionnalités de la bibliothèque.
    • +
    +

    Quelles sont les erreurs "length exceeds max length" dans Milvus, et comment les comprendre et les traiter ?

    Les erreurs "Length exceeds max length" dans Milvus se produisent lorsque la taille d'un élément de données dépasse la taille maximale autorisée pour une collection ou un champ. Voici quelques exemples et explications :

    +
      +
    • Erreur de champ JSON : <MilvusException: (code=1100, message=the length (398324) of json field (metadata) exceeds max length (65536): expected=valid length json string, actual=length exceeds max length: invalid parameter)>

    • +
    • Erreur de longueur de chaîne : <ParamError: (code=1, message=invalid input, length of string exceeds max length. length: 74238, max length: 60535)>

    • +
    • Erreur de champ VarChar : <MilvusException: (code=1100, message=the length (60540) of 0th VarChar paragraph exceeds max length (0)%!(EXTRA int64=60535): invalid parameter)>

    • +
    +

    Pour comprendre et traiter ces erreurs :

    +
      +
    • Comprenez que len(str) en Python représente le nombre de caractères, et non la taille en octets.
    • +
    • Pour les types de données basés sur des chaînes comme VARCHAR et JSON, utilisez len(bytes(str, encoding='utf-8')) pour déterminer la taille réelle en octets, ce que Milvus utilise pour "max-length".
    • +
    +

    Exemple en Python :

    +
    # Python Example: result of len() str cannot be used as "max-length" in Milvus 
    +>>> s = "你好,世界!"
    +>>> len(s) # Number of characters of s.
    +6
    +>>> len(bytes(s, "utf-8")) # Size in bytes of s, max-length in Milvus.
    +18
    +

    Vous avez encore des questions ?

    Vous pouvez le faire :

    • Consulter Milvus sur GitHub. N'hésitez pas à poser des questions, à partager des idées et à aider les autres.
    • diff --git a/localization/v2.4.x/site/fr/faq/performance_faq.json b/localization/v2.4.x/site/fr/faq/performance_faq.json index fdf9f7a92..e8ad39bac 100644 --- a/localization/v2.4.x/site/fr/faq/performance_faq.json +++ b/localization/v2.4.x/site/fr/faq/performance_faq.json @@ -1 +1 @@ -{"codeList":[],"headingContent":"","anchorList":[{"label":"FAQ sur les performances","href":"Performance-FAQ","type":1,"isActive":false}]} \ No newline at end of file +{"codeList":[],"headingContent":"Performance FAQ","anchorList":[{"label":"FAQ sur les performances","href":"Performance-FAQ","type":1,"isActive":false}]} \ No newline at end of file diff --git a/localization/v2.4.x/site/fr/faq/performance_faq.md b/localization/v2.4.x/site/fr/faq/performance_faq.md index 15f5a990c..93652bf82 100644 --- a/localization/v2.4.x/site/fr/faq/performance_faq.md +++ b/localization/v2.4.x/site/fr/faq/performance_faq.md @@ -3,7 +3,7 @@ id: performance_faq.md summary: >- Réponses aux questions fréquemment posées sur les performances de recherche, l'amélioration des performances et d'autres problèmes liés aux performances. -title: FAQ sur la performance +title: FAQ sur les performances ---

      FAQ sur les performances

    -

    Cependant, l'indexation d'un champ VARCHAR n'accélère pas le processus :

    +

    Toutefois, l'indexation d'un champ VARCHAR n'accélère pas le processus :

    • Suppression par ID: lorsque le champ VARCHAR est la clé primaire.
    • Les expressions non liées: Lorsque le champ VARCHAR ne fait pas partie de l'expression de suppression.
    • diff --git a/localization/v2.4.x/site/fr/faq/product_faq.json b/localization/v2.4.x/site/fr/faq/product_faq.json index 3fae1c1c8..71514576b 100644 --- a/localization/v2.4.x/site/fr/faq/product_faq.json +++ b/localization/v2.4.x/site/fr/faq/product_faq.json @@ -1 +1 @@ -{"codeList":["60 * 2 * 4 + 40 * 1 * 12 = 960\n"],"headingContent":"","anchorList":[{"label":"FAQ sur les produits","href":"Product-FAQ","type":1,"isActive":false}]} \ No newline at end of file +{"codeList":["60 * 2 * 4 + 40 * 1 * 12 = 960\n"],"headingContent":"Product FAQ","anchorList":[{"label":"FAQ sur les produits","href":"Product-FAQ","type":1,"isActive":false}]} \ No newline at end of file diff --git a/localization/v2.4.x/site/fr/faq/product_faq.md b/localization/v2.4.x/site/fr/faq/product_faq.md index 4b1d52b25..f028851f5 100644 --- a/localization/v2.4.x/site/fr/faq/product_faq.md +++ b/localization/v2.4.x/site/fr/faq/product_faq.md @@ -28,13 +28,13 @@ title: FAQ sur les produits

      Quelle est la taille maximale d'un ensemble de données que Milvus peut traiter ?

      Théoriquement, la taille maximale des ensembles de données que Milvus peut traiter est déterminée par le matériel sur lequel il est exécuté, en particulier la mémoire et le stockage du système :

      • Milvus charge toutes les collections et partitions spécifiées dans la mémoire avant d'exécuter les requêtes. Par conséquent, la taille de la mémoire détermine la quantité maximale de données que Milvus peut interroger.
      • -
      • Lorsque de nouvelles entités et un schéma lié aux collections (actuellement, seul MinIO est pris en charge pour la persistance des données) sont ajoutés à Milvus, le stockage du système détermine la taille maximale autorisée des données insérées.
      • +
      • Lorsque de nouvelles entités et des schémas liés aux collections (actuellement, seul MinIO est pris en charge pour la persistance des données) sont ajoutés à Milvus, le stockage du système détermine la taille maximale autorisée des données insérées.

      Où Milvus stocke-t-il les données ?

      Milvus traite deux types de données, les données insérées et les métadonnées.

      Les données insérées, y compris les données vectorielles, les données scalaires et les schémas spécifiques à la collection, sont stockées dans un stockage persistant sous forme de journal incrémentiel. Milvus prend en charge plusieurs backends de stockage d'objets, notamment MinIO, AWS S3, Google Cloud Storage (GCS), Azure Blob Storage, Alibaba Cloud OSS et Tencent Cloud Object Storage (COS).

      Les métadonnées sont générées au sein de Milvus. Chaque module Milvus a ses propres métadonnées qui sont stockées dans etcd.

      Pourquoi n'y a-t-il pas de données vectorielles dans etcd ?

      etcd stocke les métadonnées des modules Milvus ; MinIO stocke les entités.

      -

      Milvus prend-il en charge l'insertion et la recherche de données simultanément ?

      Oui. Les opérations d'insertion et les opérations de recherche sont gérées par deux modules distincts qui sont mutuellement indépendants. Du point de vue du client, une opération d'insertion est terminée lorsque les données insérées entrent dans la file d'attente des messages. Cependant, les données insérées ne sont pas consultables tant qu'elles n'ont pas été chargées dans le nœud d'interrogation. Si la taille du segment n'atteint pas le seuil de construction de l'index (512 Mo par défaut), Milvus a recours à la recherche par force brute et les performances de la requête peuvent être réduites.

      +

      Milvus prend-il en charge l'insertion et la recherche de données simultanément ?

      Oui. Les opérations d'insertion et les opérations de recherche sont gérées par deux modules distincts qui sont mutuellement indépendants. Du point de vue du client, une opération d'insertion est terminée lorsque les données insérées entrent dans la file d'attente des messages. Cependant, les données insérées ne sont pas consultables tant qu'elles ne sont pas chargées dans le nœud d'interrogation. Si la taille du segment n'atteint pas le seuil de construction de l'index (512 Mo par défaut), Milvus a recours à la recherche par force brute et les performances de la requête peuvent être réduites.

      Les vecteurs dont les clés primaires sont dupliquées peuvent-ils être insérés dans Milvus ?

      Oui. Milvus ne vérifie pas si les clés primaires des vecteurs sont dupliquées.

      Lorsque des vecteurs avec des clés primaires dupliquées sont insérés, Milvus les traite-t-il comme une opération de mise à jour ?

      Non. Milvus ne prend pas actuellement en charge les opérations de mise à jour et ne vérifie pas si les clés primaires des entités sont dupliquées. Il vous incombe de vous assurer que les clés primaires des entités sont uniques et, si ce n'est pas le cas, Milvus peut contenir plusieurs entités avec des clés primaires dupliquées.

      Dans ce cas, la copie de données qui sera renvoyée lors d'une requête reste un comportement inconnu. Cette limitation sera corrigée dans les prochaines versions.

      @@ -51,7 +51,7 @@ title: FAQ sur les produits

      En quoi les index FLAT et IVF_FLAT sont-ils différents ?

      L'index IVF_FLAT divise l'espace vectoriel en groupes de listes. Avec la valeur de liste par défaut de 16 384, Milvus compare les distances entre le vecteur cible et les centroïdes de tous les 16 384 clusters pour renvoyer les clusters les plus proches de la sonde. Milvus compare ensuite les distances entre le vecteur cible et les vecteurs des grappes sélectionnées pour obtenir les vecteurs les plus proches. Contrairement à IVF_FLAT, FLAT compare directement les distances entre le vecteur cible et tous les autres vecteurs.

      Lorsque le nombre total de vecteurs est approximativement égal à nlist, il y a peu de différence entre IVF_FLAT et FLAT en termes d'exigences de calcul et de performances de recherche. Toutefois, lorsque le nombre de vecteurs dépasse nlist d'un facteur de deux ou plus, IVF_FLAT commence à présenter des avantages en termes de performances.

      Voir Index des vecteurs pour plus d'informations.

      -

      Comment Milvus extrait-il les données ?

      Milvus renvoie un succès lorsque les données insérées sont chargées dans la file d'attente des messages. Cependant, les données ne sont pas encore transférées sur le disque. Le nœud de données de Milvus écrit alors les données de la file d'attente de messages dans le stockage persistant sous forme de journaux incrémentiels. Si flush() est appelé, le nœud de données est forcé d'écrire immédiatement toutes les données de la file d'attente des messages dans le stockage permanent.

      +

      Comment Milvus extrait-il les données ?

      Milvus renvoie un succès lorsque les données insérées sont chargées dans la file d'attente des messages. Cependant, les données ne sont pas encore transférées sur le disque. Le nœud de données de Milvus écrit alors les données de la file d'attente de messages dans le stockage persistant sous forme de journaux incrémentiels. Si flush() est appelé, le nœud de données est forcé d'écrire immédiatement toutes les données de la file d'attente de messages dans le stockage permanent.

      Qu'est-ce que la normalisation ? Pourquoi la normalisation est-elle nécessaire ?

      La normalisation est le processus de conversion d'un vecteur de sorte que sa norme soit égale à 1. Si le produit intérieur est utilisé pour calculer la similarité vectorielle, les vecteurs doivent être normalisés. Après normalisation, le produit intérieur est égal à la similarité en cosinus.

      Voir Wikipedia pour plus d'informations.

      Pourquoi la distance euclidienne (L2) et le produit intérieur (PI) donnent-ils des résultats différents ?

      Pour les vecteurs normalisés, la distance euclidienne (L2) est mathématiquement équivalente au produit intérieur (PI). Si ces mesures de similarité donnent des résultats différents, vérifiez si vos vecteurs sont normalisés.

      @@ -64,7 +64,7 @@ title: FAQ sur les produits

      Pour éviter cela, essayez d'augmenter nprobe et de réduire nlist et k.

      Voir Index des vecteurs pour plus d'informations.

      Quelle est la dimension vectorielle maximale prise en charge par Milvus ?

      Milvus peut gérer des vecteurs ayant jusqu'à 32 768 dimensions par défaut. Vous pouvez augmenter la valeur de Proxy.maxDimension pour permettre un vecteur de plus grande dimension.

      -

      Milvus prend-il en charge le processeur Apple M1 ?

      La version actuelle de Milvus ne prend pas en charge le processeur Apple M1.

      +

      Milvus prend-il en charge le processeur Apple M1 ?

      La version actuelle de Milvus ne prend pas directement en charge le processeur Apple M1. Après Milvus 2.3, Milvus fournit des images Docker pour l'architecture ARM64.

      Quels types de données Milvus prend-il en charge dans le champ de clé primaire ?

      Dans la version actuelle, Milvus prend en charge à la fois INT64 et les chaînes de caractères.

      Milvus est-il évolutif ?

      Oui. Vous pouvez déployer le cluster Milvus avec plusieurs nœuds via Helm Chart sur Kubernetes. Reportez-vous au Guide de mise à l'échelle pour plus d'instructions.

      La requête est-elle exécutée en mémoire ? Que sont les données incrémentielles et les données historiques ?

      Oui. Lorsqu'une demande de requête arrive, Milvus recherche les données incrémentielles et les données historiques en les chargeant en mémoire. Les données incrémentielles se trouvent dans les segments croissants, qui sont mis en mémoire tampon avant d'atteindre le seuil de persistance dans le moteur de stockage, tandis que les données historiques proviennent des segments scellés qui sont stockés dans le stockage d'objets. Les données incrémentales et les données historiques constituent ensemble l'ensemble des données à rechercher.

      diff --git a/localization/v2.4.x/site/fr/getstarted/install_SDKs/install-java.json b/localization/v2.4.x/site/fr/getstarted/install_SDKs/install-java.json index c5e96905d..4a561fcfb 100644 --- a/localization/v2.4.x/site/fr/getstarted/install_SDKs/install-java.json +++ b/localization/v2.4.x/site/fr/getstarted/install_SDKs/install-java.json @@ -1 +1 @@ -{"codeList":["\n io.milvus\n milvus-sdk-java\n 2.4.3\n\n","implementation 'io.milvus:milvus-sdk-java:2.4.3'\n"],"headingContent":"","anchorList":[{"label":"Installation de Milvus Java SDK","href":"Install-Milvus-Java-SDK","type":1,"isActive":false},{"label":"Exigences","href":"Requirement","type":2,"isActive":false},{"label":"Installation de Milvus Java SDK","href":"Install-Milvus-Java-SDK","type":2,"isActive":false},{"label":"Prochaines étapes","href":"Whats-next","type":2,"isActive":false}]} \ No newline at end of file +{"codeList":["\n io.milvus\n milvus-sdk-java\n 2.4.5\n\n","implementation 'io.milvus:milvus-sdk-java:2.4.5'\n"],"headingContent":"Install Milvus Java SDK","anchorList":[{"label":"Installation de Milvus Java SDK","href":"Install-Milvus-Java-SDK","type":1,"isActive":false},{"label":"Exigences","href":"Requirement","type":2,"isActive":false},{"label":"Installation de Milvus Java SDK","href":"Install-Milvus-Java-SDK","type":2,"isActive":false},{"label":"Prochaines étapes","href":"Whats-next","type":2,"isActive":false}]} \ No newline at end of file diff --git a/localization/v2.4.x/site/fr/getstarted/install_SDKs/install-java.md b/localization/v2.4.x/site/fr/getstarted/install_SDKs/install-java.md index 63cf2b1b6..0dc166886 100644 --- a/localization/v2.4.x/site/fr/getstarted/install_SDKs/install-java.md +++ b/localization/v2.4.x/site/fr/getstarted/install_SDKs/install-java.md @@ -3,7 +3,7 @@ id: install-java.md label: Install Java SDK related_key: SDK summary: Apprenez à installer le SDK Java de Milvus. -title: Installer le SDK Java Milvus +title: Installation de Milvus Java SDK ---

      Installation de Milvus Java SDK

    • Gradle/Grails
    -
    implementation 'io.milvus:milvus-sdk-java:2.4.3'
    +
    implementation 'io.milvus:milvus-sdk-java:2.4.5'
     

    Prochaines étapes

    PyMilvus est disponible dans l'index des paquets Python.

    Il est recommandé d'installer une version de PyMilvus correspondant à la version du serveur Milvus que vous avez installée. Pour plus d'informations, voir les notes de mise à jour.
    -
    $ python3 -m pip install pymilvus==2.4.5
    +
    $ python3 -m pip install pymilvus==2.4.8
     

    Vérifier l'installation

    Open In Colab

    +

    Open In Colab +GitHub Repository

    Les vecteurs, le format de données de sortie des modèles de réseaux neuronaux, peuvent encoder efficacement des informations et jouer un rôle central dans les applications d'IA telles que les bases de connaissances, la recherche sémantique, la Génération Augmentée de Récupération (RAG) et bien plus encore.

    Milvus est une base de données vectorielle open-source qui convient aux applications d'IA de toutes tailles, depuis l'exécution d'un chatbot de démonstration dans un bloc-notes Jupyter jusqu'à la construction d'une recherche à l'échelle du web qui sert des milliards d'utilisateurs. Dans ce guide, nous allons vous expliquer comment installer Milvus localement en quelques minutes et utiliser la bibliothèque client Python pour générer, stocker et rechercher des vecteurs.

    Installer Milvus

    Par défaut, les champs scalaires ne sont pas indexés. Si vous avez besoin d'effectuer une recherche filtrée de métadonnées dans un grand ensemble de données, vous pouvez envisager d'utiliser un schéma fixe et d'activer l'index pour améliorer les performances de la recherche.

    Outre la recherche vectorielle, vous pouvez également effectuer d'autres types de recherche :

    -

    Requête

    Une requête() est une opération qui permet de récupérer toutes les entités correspondant à un critère, tel qu'une expression de filtre ou certaines identifications.

    +

    Requête

    Une requête() est une opération qui permet de récupérer toutes les entités correspondant à un critère, tel qu'une expression de filtre ou certains identifiants.

    Par exemple, récupérer toutes les entités dont le champ scalaire a une valeur particulière :

    res = client.query(
         collection_name="demo_collection",
    @@ -361,7 +363,7 @@ res = client.delete(
               d="M4 9h1v1H4c-1.5 0-3-1.69-3-3.5S2.55 3 4 3h4c1.45 0 3 1.69 3 3.5 0 1.41-.91 2.72-2 3.25V8.59c.58-.45 1-1.27 1-2.09C10 5.22 8.98 4 8 4H4c-.98 0-2 1.22-2 2.5S3 9 4 9zm9-3h-1v1h1c1 0 2 1.22 2 2.5S13.98 12 13 12H9c-.98 0-2-1.22-2-2.5 0-.83.42-1.64 1-2.09V6.25c-1.09.53-2 1.84-2 3.25C6 11.31 7.55 13 9 13h4c1.45 0 3-1.69 3-3.5S14.5 6 13 6z"
             >
           
    -    

    Comme toutes les données de Milvus Lite sont stockées dans un fichier local, vous pouvez charger toutes les données dans la mémoire même après la fin du programme, en créant un MilvusClient avec le fichier existant. Par exemple, ceci récupérera les collections du fichier "milvus_demo.db" et continuera à y écrire des données.

    +

    Comme toutes les données de Milvus Lite sont stockées dans un fichier local, vous pouvez charger toutes les données dans la mémoire même après la fin du programme, en créant un site MilvusClient avec le fichier existant. Par exemple, ceci récupérera les collections du fichier "milvus_demo.db" et continuera à y écrire des données.

    from pymilvus import MilvusClient
     
     client = MilvusClient("milvus_demo.db")
    diff --git a/localization/v2.4.x/site/fr/getstarted/run-milvus-docker/install_standalone-docker-compose.json b/localization/v2.4.x/site/fr/getstarted/run-milvus-docker/install_standalone-docker-compose.json
    index e7fd5d260..e3a9d281c 100644
    --- a/localization/v2.4.x/site/fr/getstarted/run-milvus-docker/install_standalone-docker-compose.json
    +++ b/localization/v2.4.x/site/fr/getstarted/run-milvus-docker/install_standalone-docker-compose.json
    @@ -1 +1 @@
    -{"codeList":["# Download the configuration file\n$ wget https://github.com/milvus-io/milvus/releases/download/v2.4.9/milvus-standalone-docker-compose.yml -O docker-compose.yml\n\n# Start Milvus\n$ sudo docker compose up -d\n\nCreating milvus-etcd  ... done\nCreating milvus-minio ... done\nCreating milvus-standalone ... done\n","$ sudo docker compose ps\n\n      Name                     Command                  State                            Ports\n--------------------------------------------------------------------------------------------------------------------\nmilvus-etcd         etcd -advertise-client-url ...   Up             2379/tcp, 2380/tcp\nmilvus-minio        /usr/bin/docker-entrypoint ...   Up (healthy)   9000/tcp\nmilvus-standalone   /tini -- milvus run standalone   Up             0.0.0.0:19530->19530/tcp, 0.0.0.0:9091->9091/tcp\n","# Stop Milvus\n$ sudo docker compose down\n\n# Delete service data\n$ sudo rm -rf volumes\n"],"headingContent":"","anchorList":[{"label":"Exécuter Milvus avec Docker Compose","href":"Run-Milvus-with-Docker-Compose","type":1,"isActive":false},{"label":"Conditions préalables","href":"Prerequisites","type":2,"isActive":false},{"label":"Installer Milvus","href":"Install-Milvus","type":2,"isActive":false},{"label":"Prochaines étapes","href":"Whats-next","type":2,"isActive":false}]}
    \ No newline at end of file
    +{"codeList":["# Download the configuration file\n$ wget https://github.com/milvus-io/milvus/releases/download/v2.4.13-hotfix/milvus-standalone-docker-compose.yml -O docker-compose.yml\n\n# Start Milvus\n$ sudo docker-compose up -d\n\nCreating milvus-etcd  ... done\nCreating milvus-minio ... done\nCreating milvus-standalone ... done\n","$ sudo docker-compose ps\n\n      Name                     Command                  State                            Ports\n--------------------------------------------------------------------------------------------------------------------\nmilvus-etcd         etcd -advertise-client-url ...   Up             2379/tcp, 2380/tcp\nmilvus-minio        /usr/bin/docker-entrypoint ...   Up (healthy)   9000/tcp\nmilvus-standalone   /tini -- milvus run standalone   Up             0.0.0.0:19530->19530/tcp, 0.0.0.0:9091->9091/tcp\n","# Stop Milvus\n$ sudo docker-compose down\n\n# Delete service data\n$ sudo rm -rf volumes\n"],"headingContent":"Run Milvus with Docker Compose","anchorList":[{"label":"Exécuter Milvus avec Docker Compose","href":"Run-Milvus-with-Docker-Compose","type":1,"isActive":false},{"label":"Conditions préalables","href":"Prerequisites","type":2,"isActive":false},{"label":"Installer Milvus","href":"Install-Milvus","type":2,"isActive":false},{"label":"Prochaines étapes","href":"Whats-next","type":2,"isActive":false}]}
    \ No newline at end of file
    diff --git a/localization/v2.4.x/site/fr/getstarted/run-milvus-docker/install_standalone-docker-compose.md b/localization/v2.4.x/site/fr/getstarted/run-milvus-docker/install_standalone-docker-compose.md
    index 5efa15872..831f61666 100644
    --- a/localization/v2.4.x/site/fr/getstarted/run-milvus-docker/install_standalone-docker-compose.md
    +++ b/localization/v2.4.x/site/fr/getstarted/run-milvus-docker/install_standalone-docker-compose.md
    @@ -57,10 +57,10 @@ title: Exécuter Milvus avec Docker Compose
           
         

    Milvus fournit un fichier de configuration Docker Compose dans le référentiel Milvus. Pour installer Milvus à l'aide de Docker Compose, il suffit d'exécuter la commande suivante

    # Download the configuration file
    -$ wget https://github.com/milvus-io/milvus/releases/download/v2.4.9/milvus-standalone-docker-compose.yml -O docker-compose.yml
    +$ wget https://github.com/milvus-io/milvus/releases/download/v2.4.13-hotfix/milvus-standalone-docker-compose.yml -O docker-compose.yml
     
     # Start Milvus
    -$ sudo docker compose up -d
    +$ sudo docker-compose up -d
     
     Creating milvus-etcd  ... done
     Creating milvus-minio ... done
    @@ -69,7 +69,7 @@ Creating milvus-standalone ... done
     
    • Si vous n'avez pas réussi à exécuter la commande ci-dessus, veuillez vérifier si Docker Compose V1 est installé sur votre système. Si c'est le cas, il est conseillé de migrer vers Docker Compose V2 en raison des notes sur cette page.

    • -
    • Si vous rencontrez des problèmes en tirant l'image, contactez-nous à l'adresse community@zilliz.com en détaillant le problème, et nous vous fournirons l'assistance nécessaire.

    • +
    • Si vous rencontrez des problèmes lors de l'extraction de l'image, contactez-nous à l'adresse community@zilliz.com en détaillant le problème, et nous vous fournirons l'assistance nécessaire.

    Après le démarrage de Milvus,

    @@ -81,7 +81,7 @@ Creating milvus-standalone ... done

    Vous pouvez vérifier si les conteneurs sont opérationnels à l'aide de la commande suivante :

    -
    $ sudo docker compose ps
    +
    $ sudo docker-compose ps
     
           Name                     Command                  State                            Ports
     --------------------------------------------------------------------------------------------------------------------
    @@ -91,7 +91,7 @@ milvus-standalone   /tini -- milvus run standalone   Up             0.0.0.0:1953
     

    Vous pouvez arrêter et supprimer ce conteneur comme suit

    # Stop Milvus
    -$ sudo docker compose down
    +$ sudo docker-compose down
     
     # Delete service data
     $ sudo rm -rf volumes
    diff --git a/localization/v2.4.x/site/fr/getstarted/run-milvus-gpu/install_standalone-docker-compose-gpu.json b/localization/v2.4.x/site/fr/getstarted/run-milvus-gpu/install_standalone-docker-compose-gpu.json
    index 344876fc3..37b6a8214 100644
    --- a/localization/v2.4.x/site/fr/getstarted/run-milvus-gpu/install_standalone-docker-compose-gpu.json
    +++ b/localization/v2.4.x/site/fr/getstarted/run-milvus-gpu/install_standalone-docker-compose-gpu.json
    @@ -1 +1 @@
    -{"codeList":["$ wget https://github.com/milvus-io/milvus/releases/download/v2.4.9/milvus-standalone-docker-compose-gpu.yml -O docker-compose.yml\n","...\nstandalone:\n  ...\n  deploy:\n    resources:\n      reservations:\n        devices:\n          - driver: nvidia\n            capabilities: [\"gpu\"]\n            device_ids: [\"0\"]\n...\n","...\nstandalone:\n  ...\n  deploy:\n    resources:\n      reservations:\n        devices:\n          - driver: nvidia\n            capabilities: [\"gpu\"]\n            device_ids: ['0', '1']\n...\n","$ sudo docker compose up -d\n\nCreating milvus-etcd  ... done\nCreating milvus-minio ... done\nCreating milvus-standalone ... done\n","$ sudo docker compose ps\n\n      Name                     Command                  State                            Ports\n--------------------------------------------------------------------------------------------------------------------\nmilvus-etcd         etcd -advertise-client-url ...   Up             2379/tcp, 2380/tcp\nmilvus-minio        /usr/bin/docker-entrypoint ...   Up (healthy)   9000/tcp\nmilvus-standalone   /tini -- milvus run standalone   Up             0.0.0.0:19530->19530/tcp, 0.0.0.0:9091->9091/tcp\n","$ CUDA_VISIBLE_DEVICES=0 ./milvus run standalone\n","$ CUDA_VISIBLE_DEVICES=0,1 ./milvus run standalone\n","# Stop Milvus\n$ sudo docker compose down\n\n# Delete service data\n$ sudo rm -rf volumes\n","docker cp :/milvus/configs/milvus.yaml milvus.yaml\n","vim milvus.yaml\n","...\ngpu:\n  initMemSize: 0\n  maxMemSize: 0\n...\n","docker cp milvus.yaml :/milvus/configs/milvus.yaml\n","docker stop \ndocker start \n"],"headingContent":"","anchorList":[{"label":"Exécution de Milvus avec prise en charge du GPU à l'aide de Docker Compose","href":"Run-Milvus-with-GPU-Support-Using-Docker-Compose","type":1,"isActive":false},{"label":"Conditions préalables","href":"Prerequisites","type":2,"isActive":false},{"label":"Installer Milvus","href":"Install-Milvus","type":2,"isActive":false},{"label":"Configuration du pool de mémoire","href":"Configure-memory-pool","type":2,"isActive":false},{"label":"Prochaines étapes","href":"Whats-next","type":2,"isActive":false}]}
    \ No newline at end of file
    +{"codeList":["$ wget https://github.com/milvus-io/milvus/releases/download/v2.4.13-hotfix/milvus-standalone-docker-compose-gpu.yml -O docker-compose.yml\n","...\nstandalone:\n  ...\n  deploy:\n    resources:\n      reservations:\n        devices:\n          - driver: nvidia\n            capabilities: [\"gpu\"]\n            device_ids: [\"0\"]\n...\n","...\nstandalone:\n  ...\n  deploy:\n    resources:\n      reservations:\n        devices:\n          - driver: nvidia\n            capabilities: [\"gpu\"]\n            device_ids: ['0', '1']\n...\n","$ sudo docker compose up -d\n\nCreating milvus-etcd  ... done\nCreating milvus-minio ... done\nCreating milvus-standalone ... done\n","$ sudo docker compose ps\n\n      Name                     Command                  State                            Ports\n--------------------------------------------------------------------------------------------------------------------\nmilvus-etcd         etcd -advertise-client-url ...   Up             2379/tcp, 2380/tcp\nmilvus-minio        /usr/bin/docker-entrypoint ...   Up (healthy)   9000/tcp\nmilvus-standalone   /tini -- milvus run standalone   Up             0.0.0.0:19530->19530/tcp, 0.0.0.0:9091->9091/tcp\n","$ CUDA_VISIBLE_DEVICES=0 ./milvus run standalone\n","$ CUDA_VISIBLE_DEVICES=0,1 ./milvus run standalone\n","# Stop Milvus\n$ sudo docker compose down\n\n# Delete service data\n$ sudo rm -rf volumes\n","docker cp :/milvus/configs/milvus.yaml milvus.yaml\n","vim milvus.yaml\n","...\ngpu:\n  initMemSize: 0\n  maxMemSize: 0\n...\n","docker cp milvus.yaml :/milvus/configs/milvus.yaml\n","docker stop \ndocker start \n"],"headingContent":"Run Milvus with GPU Support Using Docker Compose","anchorList":[{"label":"Exécution de Milvus avec prise en charge du GPU à l'aide de Docker Compose","href":"Run-Milvus-with-GPU-Support-Using-Docker-Compose","type":1,"isActive":false},{"label":"Conditions préalables","href":"Prerequisites","type":2,"isActive":false},{"label":"Installer Milvus","href":"Install-Milvus","type":2,"isActive":false},{"label":"Configuration du pool de mémoire","href":"Configure-memory-pool","type":2,"isActive":false},{"label":"Prochaines étapes","href":"Whats-next","type":2,"isActive":false}]}
    \ No newline at end of file
    diff --git a/localization/v2.4.x/site/fr/getstarted/run-milvus-gpu/install_standalone-docker-compose-gpu.md b/localization/v2.4.x/site/fr/getstarted/run-milvus-gpu/install_standalone-docker-compose-gpu.md
    index 5f4a35167..76f53ce39 100644
    --- a/localization/v2.4.x/site/fr/getstarted/run-milvus-gpu/install_standalone-docker-compose-gpu.md
    +++ b/localization/v2.4.x/site/fr/getstarted/run-milvus-gpu/install_standalone-docker-compose-gpu.md
    @@ -3,7 +3,7 @@ id: install_standalone-docker-compose-gpu.md
     label: Standalone (Docker Compose)
     related_key: Kubernetes
     summary: Découvrez comment installer le cluster Milvus sur Kubernetes.
    -title: Exécuter Milvus avec support GPU en utilisant Docker Compose
    +title: Exécution de Milvus avec prise en charge du GPU à l'aide de Docker Compose
     ---
     

    Exécution de Milvus avec prise en charge du GPU à l'aide de Docker Compose

    Pour installer Milvus avec prise en charge du GPU à l'aide de Docker Compose, procédez comme suit.

    -

    1. Téléchargez et configurez le fichier YAML

    Télécharger milvus-standalone-docker-compose-gpu.yml et enregistrez-le sous docker-compose.yml manuellement ou à l'aide de la commande suivante.

    -
    $ wget https://github.com/milvus-io/milvus/releases/download/v2.4.9/milvus-standalone-docker-compose-gpu.yml -O docker-compose.yml
    +

    1. Téléchargez et configurez le fichier YAML

    Télécharger milvus-standalone-docker-compose-gpu.yml et enregistrez-le sous docker-compose.yml manuellement ou à l'aide de la commande suivante.

    +
    $ wget https://github.com/milvus-io/milvus/releases/download/v2.4.13-hotfix/milvus-standalone-docker-compose-gpu.yml -O docker-compose.yml
     

    Vous devez apporter quelques modifications aux variables d'environnement du service autonome dans le fichier YAML, comme suit :

      @@ -123,7 +123,7 @@ milvus-standalone /tini -- milvus run standalone Up 0.0.0.0:1953

      Rendez le dispositif GPU 0 visible pour Milvus :

      $ CUDA_VISIBLE_DEVICES=0 ./milvus run standalone
       
      -

      Rendre les périphériques GPU 0 et 1 visibles par Milvus :

      +

      Rendre les dispositifs GPU 0 et 1 visibles par Milvus :

      $ CUDA_VISIBLE_DEVICES=0,1 ./milvus run standalone
       

      Vous pouvez arrêter et supprimer ce conteneur comme suit.

      diff --git a/localization/v2.4.x/site/fr/integrations/evaluation_with_deepeval.md b/localization/v2.4.x/site/fr/integrations/evaluation_with_deepeval.md index 0a7a70471..883a1855e 100644 --- a/localization/v2.4.x/site/fr/integrations/evaluation_with_deepeval.md +++ b/localization/v2.4.x/site/fr/integrations/evaluation_with_deepeval.md @@ -20,8 +20,9 @@ title: Évaluation avec DeepEval d="M4 9h1v1H4c-1.5 0-3-1.69-3-3.5S2.55 3 4 3h4c1.45 0 3 1.69 3 3.5 0 1.41-.91 2.72-2 3.25V8.59c.58-.45 1-1.27 1-2.09C10 5.22 8.98 4 8 4H4c-.98 0-2 1.22-2 2.5S3 9 4 9zm9-3h-1v1h1c1 0 2 1.22 2 2.5S13.98 12 13 12H9c-.98 0-2-1.22-2-2.5 0-.83.42-1.64 1-2.09V6.25c-1.09.53-2 1.84-2 3.25C6 11.31 7.55 13 9 13h4c1.45 0 3-1.69 3-3.5S14.5 6 13 6z" > -

      Open In Colab

      -

      Ce guide montre comment utiliser DeepEval pour évaluer un pipeline Retrieval-Augmented Generation (RAG) construit sur Milvus.

      +

      Open In Colab +GitHub Repository

      +

      Ce guide montre comment utiliser DeepEval pour évaluer un pipeline Retrieval-Augmented Generation (RAG) construit sur Milvus.

      Le système RAG combine un système de recherche avec un modèle génératif pour générer un nouveau texte basé sur une invite donnée. Le système récupère d'abord les documents pertinents d'un corpus à l'aide de Milvus, puis utilise un modèle génératif pour générer un nouveau texte basé sur les documents récupérés.

      DeepEval est un cadre qui vous aide à évaluer vos pipelines RAG. Il existe des outils et des cadres existants qui vous aident à construire ces pipelines, mais il peut être difficile de les évaluer et de quantifier leurs performances. C'est là que DeepEval entre en jeu.

      Conditions préalables

    -
    Vous utilisez la dernière métrique de pertinence des réponses de DeepEval ! (utilisant gpt-4o, strict=False, async_mode=True)...
    -
    ✨ Vous exécutez la dernière métrique de fidélité de DeepEval ! (utilisant gpt-4o, strict=False, async_mode=True)...
    +
    Vous utilisez la dernière métrique de pertinence des réponses de DeepEval ! (utilisant gpt-4o, strict=False, async_mode=True)...
    +
    ✨ Vous exécutez la dernière métrique de fidélité de DeepEval ! (utilisant gpt-4o, strict=False, async_mode=True)...
    Event loop is already running. Applying nest_asyncio patch to allow async execution...
     
     
    diff --git a/localization/v2.4.x/site/fr/integrations/evaluation_with_phoenix.md b/localization/v2.4.x/site/fr/integrations/evaluation_with_phoenix.md
    index aa0f34f54..39c720157 100644
    --- a/localization/v2.4.x/site/fr/integrations/evaluation_with_phoenix.md
    +++ b/localization/v2.4.x/site/fr/integrations/evaluation_with_phoenix.md
    @@ -20,10 +20,11 @@ title: Évaluation avec Arize Pheonix
               d="M4 9h1v1H4c-1.5 0-3-1.69-3-3.5S2.55 3 4 3h4c1.45 0 3 1.69 3 3.5 0 1.41-.91 2.72-2 3.25V8.59c.58-.45 1-1.27 1-2.09C10 5.22 8.98 4 8 4H4c-.98 0-2 1.22-2 2.5S3 9 4 9zm9-3h-1v1h1c1 0 2 1.22 2 2.5S13.98 12 13 12H9c-.98 0-2-1.22-2-2.5 0-.83.42-1.64 1-2.09V6.25c-1.09.53-2 1.84-2 3.25C6 11.31 7.55 13 9 13h4c1.45 0 3-1.69 3-3.5S14.5 6 13 6z"
             >
           
    -    

    Open In Colab

    -

    Ce guide montre comment utiliser Arize Pheonix pour évaluer un pipeline de génération assistée par récupération (RAG) construit à partir de Milvus.

    +

    Open In Colab +GitHub Repository

    +

    Ce guide montre comment utiliser Arize Pheonix pour évaluer un pipeline de génération assistée par récupération (RAG) construit à partir de Milvus.

    Le système RAG combine un système de recherche avec un modèle génératif pour générer un nouveau texte basé sur une invite donnée. Le système récupère d'abord les documents pertinents d'un corpus à l'aide de Milvus, puis utilise un modèle génératif pour générer un nouveau texte basé sur les documents récupérés.

    -

    Arize Pheonix est un cadre qui vous aide à évaluer vos pipelines RAG. Il existe des outils et des cadres existants qui vous aident à construire ces pipelines, mais il peut être difficile de les évaluer et de quantifier leur performance. C'est là qu'Arize Pheonix intervient.

    +

    Arize Pheonix est un cadre qui vous aide à évaluer vos pipelines RAG. Il existe des outils et des cadres existants qui vous aident à construire ces pipelines, mais il peut être difficile de les évaluer et de quantifier leur performance. C'est là qu'Arize Pheonix entre en jeu.

    Conditions préalables

    - Alt Text + Alt Text Texte Alt

    import nest_asyncio
     
    @@ -419,7 +420,7 @@ results_df.head()
           input
           contextes
           sortie
    -      vérité_fondamentale
    +      vérité_de_sol
           contexte
           référence
           hallucination_eval
    diff --git a/localization/v2.4.x/site/fr/integrations/integrate_with_bentoml.json b/localization/v2.4.x/site/fr/integrations/integrate_with_bentoml.json
    index ec81a550e..508cfa5b6 100644
    --- a/localization/v2.4.x/site/fr/integrations/integrate_with_bentoml.json
    +++ b/localization/v2.4.x/site/fr/integrations/integrate_with_bentoml.json
    @@ -1 +1 @@
    -{"codeList":["$ pip install -U pymilvus bentoml\n","import bentoml\n\nBENTO_EMBEDDING_MODEL_END_POINT = \"BENTO_EMBEDDING_MODEL_END_POINT\"\nBENTO_API_TOKEN = \"BENTO_API_TOKEN\"\n\nembedding_client = bentoml.SyncHTTPClient(\n    BENTO_EMBEDDING_MODEL_END_POINT, token=BENTO_API_TOKEN\n)\n","# naively chunk on newlines\ndef chunk_text(filename: str) -> list:\n    with open(filename, \"r\") as f:\n        text = f.read()\n    sentences = text.split(\"\\n\")\n    return sentences\n","import os\nimport requests\nimport urllib.request\n\n# set up the data source\nrepo = \"ytang07/bento_octo_milvus_RAG\"\ndirectory = \"data\"\nsave_dir = \"./city_data\"\napi_url = f\"https://api.github.com/repos/{repo}/contents/{directory}\"\n\n\nresponse = requests.get(api_url)\ndata = response.json()\n\nif not os.path.exists(save_dir):\n    os.makedirs(save_dir)\n\nfor item in data:\n    if item[\"type\"] == \"file\":\n        file_url = item[\"download_url\"]\n        file_path = os.path.join(save_dir, item[\"name\"])\n        urllib.request.urlretrieve(file_url, file_path)\n","# please upload your data directory under this file's folder\ncities = os.listdir(\"city_data\")\n# store chunked text for each of the cities in a list of dicts\ncity_chunks = []\nfor city in cities:\n    chunked = chunk_text(f\"city_data/{city}\")\n    cleaned = []\n    for chunk in chunked:\n        if len(chunk) > 7:\n            cleaned.append(chunk)\n    mapped = {\"city_name\": city.split(\".\")[0], \"chunks\": cleaned}\n    city_chunks.append(mapped)\n","def get_embeddings(texts: list) -> list:\n    if len(texts) > 25:\n        splits = [texts[x : x + 25] for x in range(0, len(texts), 25)]\n        embeddings = []\n        for split in splits:\n            embedding_split = embedding_client.encode(sentences=split)\n            embeddings += embedding_split\n        return embeddings\n    return embedding_client.encode(\n        sentences=texts,\n    )\n","entries = []\nfor city_dict in city_chunks:\n    # No need for the embeddings list if get_embeddings already returns a list of lists\n    embedding_list = get_embeddings(city_dict[\"chunks\"])  # returns a list of lists\n    # Now match texts with embeddings and city name\n    for i, embedding in enumerate(embedding_list):\n        entry = {\n            \"embedding\": embedding,\n            \"sentence\": city_dict[\"chunks\"][\n                i\n            ],  # Assume \"chunks\" has the corresponding texts for the embeddings\n            \"city\": city_dict[\"city_name\"],\n        }\n        entries.append(entry)\n    print(entries)\n","from pymilvus import MilvusClient\n\nCOLLECTION_NAME = \"Bento_Milvus_RAG\"  # random name for your collection\nDIMENSION = 384\n\n# Initialize a Milvus Lite client\nmilvus_client = MilvusClient(\"milvus_demo.db\")\n","from pymilvus import connections\n\nconnections.connect(uri=\"milvus_demo.db\")\n","from pymilvus import MilvusClient, DataType, Collection\n\n# Create schema\nschema = MilvusClient.create_schema(\n    auto_id=True,\n    enable_dynamic_field=True,\n)\n\n# 3.2. Add fields to schema\nschema.add_field(field_name=\"id\", datatype=DataType.INT64, is_primary=True)\nschema.add_field(field_name=\"embedding\", datatype=DataType.FLOAT_VECTOR, dim=DIMENSION)\n","# prepare index parameters\nindex_params = milvus_client.prepare_index_params()\n\n# add index\nindex_params.add_index(\n    field_name=\"embedding\",\n    index_type=\"AUTOINDEX\",  # use autoindex instead of other complex indexing method\n    metric_type=\"COSINE\",  # L2, COSINE, or IP\n)\n\n# create collection\nif milvus_client.has_collection(collection_name=COLLECTION_NAME):\n    milvus_client.drop_collection(collection_name=COLLECTION_NAME)\nmilvus_client.create_collection(\n    collection_name=COLLECTION_NAME, schema=schema, index_params=index_params\n)\n\n# Outside the loop, now you upsert all the entries at once\nmilvus_client.insert(collection_name=COLLECTION_NAME, data=entries)\n","BENTO_LLM_END_POINT = \"BENTO_LLM_END_POINT\"\n\nllm_client = bentoml.SyncHTTPClient(BENTO_LLM_END_POINT, token=BENTO_API_TOKEN)\n","def dorag(question: str, context: str):\n\n    prompt = (\n        f\"You are a helpful assistant. The user has a question. Answer the user question based only on the context: {context}. \\n\"\n        f\"The user question is {question}\"\n    )\n\n    results = llm_client.generate(\n        max_tokens=1024,\n        prompt=prompt,\n    )\n\n    res = \"\"\n    for result in results:\n        res += result\n\n    return res\n","question = \"What state is Cambridge in?\"\n\n\ndef ask_a_question(question):\n    embeddings = get_embeddings([question])\n    res = milvus_client.search(\n        collection_name=COLLECTION_NAME,\n        data=embeddings,  # search for the one (1) embedding returned as a list of lists\n        anns_field=\"embedding\",  # Search across embeddings\n        limit=5,  # get me the top 5 results\n        output_fields=[\"sentence\"],  # get the sentence/chunk and city\n    )\n\n    sentences = []\n    for hits in res:\n        for hit in hits:\n            print(hit)\n            sentences.append(hit[\"entity\"][\"sentence\"])\n    context = \". \".join(sentences)\n    return context\n\n\ncontext = ask_a_question(question=question)\nprint(context)\n","print(dorag(question=question, context=context))\n"],"headingContent":"","anchorList":[{"label":"Génération améliorée par la recherche (RAG) avec Milvus et BentoML","href":"Retrieval-Augmented-Generation-RAG-with-Milvus-and-BentoML","type":1,"isActive":false},{"label":"Introduction","href":"Introduction","type":2,"isActive":false},{"label":"Avant de commencer","href":"Before-you-begin","type":2,"isActive":false},{"label":"Servir les Embeddings avec BentoML/BentoCloud","href":"Serving-Embeddings-with-BentoMLBentoCloud","type":2,"isActive":false},{"label":"Insertion des données dans une base de données vectorielle pour l'extraction","href":"Inserting-Data-into-a-Vector-Database-for-Retrieval","type":2,"isActive":false},{"label":"Création de votre collection Milvus Lite","href":"Creating-Your-Milvus-Lite-Collection","type":2,"isActive":false},{"label":"Configurer votre LLM pour RAG","href":"Set-up-Your-LLM-for-RAG","type":2,"isActive":false},{"label":"Instructions LLM","href":"LLM-Instructions","type":2,"isActive":false},{"label":"Un exemple de RAG","href":"A-RAG-Example","type":2,"isActive":false}]}
    \ No newline at end of file
    +{"codeList":["$ pip install -U pymilvus bentoml\n","import bentoml\n\nBENTO_EMBEDDING_MODEL_END_POINT = \"BENTO_EMBEDDING_MODEL_END_POINT\"\nBENTO_API_TOKEN = \"BENTO_API_TOKEN\"\n\nembedding_client = bentoml.SyncHTTPClient(\n    BENTO_EMBEDDING_MODEL_END_POINT, token=BENTO_API_TOKEN\n)\n","# naively chunk on newlines\ndef chunk_text(filename: str) -> list:\n    with open(filename, \"r\") as f:\n        text = f.read()\n    sentences = text.split(\"\\n\")\n    return sentences\n","import os\nimport requests\nimport urllib.request\n\n# set up the data source\nrepo = \"ytang07/bento_octo_milvus_RAG\"\ndirectory = \"data\"\nsave_dir = \"./city_data\"\napi_url = f\"https://api.github.com/repos/{repo}/contents/{directory}\"\n\n\nresponse = requests.get(api_url)\ndata = response.json()\n\nif not os.path.exists(save_dir):\n    os.makedirs(save_dir)\n\nfor item in data:\n    if item[\"type\"] == \"file\":\n        file_url = item[\"download_url\"]\n        file_path = os.path.join(save_dir, item[\"name\"])\n        urllib.request.urlretrieve(file_url, file_path)\n","# please upload your data directory under this file's folder\ncities = os.listdir(\"city_data\")\n# store chunked text for each of the cities in a list of dicts\ncity_chunks = []\nfor city in cities:\n    chunked = chunk_text(f\"city_data/{city}\")\n    cleaned = []\n    for chunk in chunked:\n        if len(chunk) > 7:\n            cleaned.append(chunk)\n    mapped = {\"city_name\": city.split(\".\")[0], \"chunks\": cleaned}\n    city_chunks.append(mapped)\n","def get_embeddings(texts: list) -> list:\n    if len(texts) > 25:\n        splits = [texts[x : x + 25] for x in range(0, len(texts), 25)]\n        embeddings = []\n        for split in splits:\n            embedding_split = embedding_client.encode(sentences=split)\n            embeddings += embedding_split\n        return embeddings\n    return embedding_client.encode(\n        sentences=texts,\n    )\n","entries = []\nfor city_dict in city_chunks:\n    # No need for the embeddings list if get_embeddings already returns a list of lists\n    embedding_list = get_embeddings(city_dict[\"chunks\"])  # returns a list of lists\n    # Now match texts with embeddings and city name\n    for i, embedding in enumerate(embedding_list):\n        entry = {\n            \"embedding\": embedding,\n            \"sentence\": city_dict[\"chunks\"][\n                i\n            ],  # Assume \"chunks\" has the corresponding texts for the embeddings\n            \"city\": city_dict[\"city_name\"],\n        }\n        entries.append(entry)\n    print(entries)\n","from pymilvus import MilvusClient\n\nCOLLECTION_NAME = \"Bento_Milvus_RAG\"  # random name for your collection\nDIMENSION = 384\n\n# Initialize a Milvus Lite client\nmilvus_client = MilvusClient(\"milvus_demo.db\")\n","from pymilvus import connections\n\nconnections.connect(uri=\"milvus_demo.db\")\n","from pymilvus import MilvusClient, DataType, Collection\n\n# Create schema\nschema = MilvusClient.create_schema(\n    auto_id=True,\n    enable_dynamic_field=True,\n)\n\n# 3.2. Add fields to schema\nschema.add_field(field_name=\"id\", datatype=DataType.INT64, is_primary=True)\nschema.add_field(field_name=\"embedding\", datatype=DataType.FLOAT_VECTOR, dim=DIMENSION)\n","# prepare index parameters\nindex_params = milvus_client.prepare_index_params()\n\n# add index\nindex_params.add_index(\n    field_name=\"embedding\",\n    index_type=\"AUTOINDEX\",  # use autoindex instead of other complex indexing method\n    metric_type=\"COSINE\",  # L2, COSINE, or IP\n)\n\n# create collection\nif milvus_client.has_collection(collection_name=COLLECTION_NAME):\n    milvus_client.drop_collection(collection_name=COLLECTION_NAME)\nmilvus_client.create_collection(\n    collection_name=COLLECTION_NAME, schema=schema, index_params=index_params\n)\n\n# Outside the loop, now you upsert all the entries at once\nmilvus_client.insert(collection_name=COLLECTION_NAME, data=entries)\n","BENTO_LLM_END_POINT = \"BENTO_LLM_END_POINT\"\n\nllm_client = bentoml.SyncHTTPClient(BENTO_LLM_END_POINT, token=BENTO_API_TOKEN)\n","def dorag(question: str, context: str):\n\n    prompt = (\n        f\"You are a helpful assistant. The user has a question. Answer the user question based only on the context: {context}. \\n\"\n        f\"The user question is {question}\"\n    )\n\n    results = llm_client.generate(\n        max_tokens=1024,\n        prompt=prompt,\n    )\n\n    res = \"\"\n    for result in results:\n        res += result\n\n    return res\n","question = \"What state is Cambridge in?\"\n\n\ndef ask_a_question(question):\n    embeddings = get_embeddings([question])\n    res = milvus_client.search(\n        collection_name=COLLECTION_NAME,\n        data=embeddings,  # search for the one (1) embedding returned as a list of lists\n        anns_field=\"embedding\",  # Search across embeddings\n        limit=5,  # get me the top 5 results\n        output_fields=[\"sentence\"],  # get the sentence/chunk and city\n    )\n\n    sentences = []\n    for hits in res:\n        for hit in hits:\n            print(hit)\n            sentences.append(hit[\"entity\"][\"sentence\"])\n    context = \". \".join(sentences)\n    return context\n\n\ncontext = ask_a_question(question=question)\nprint(context)\n","print(dorag(question=question, context=context))\n"],"headingContent":"Retrieval-Augmented Generation (RAG) with Milvus and BentoML","anchorList":[{"label":"Génération améliorée par la recherche (RAG) avec Milvus et BentoML","href":"Retrieval-Augmented-Generation-RAG-with-Milvus-and-BentoML","type":1,"isActive":false},{"label":"Introduction","href":"Introduction","type":2,"isActive":false},{"label":"Avant de commencer","href":"Before-you-begin","type":2,"isActive":false},{"label":"Servir les Embeddings avec BentoML/BentoCloud","href":"Serving-Embeddings-with-BentoMLBentoCloud","type":2,"isActive":false},{"label":"Insertion des données dans une base de données vectorielle pour l'extraction","href":"Inserting-Data-into-a-Vector-Database-for-Retrieval","type":2,"isActive":false},{"label":"Création de votre collection Milvus Lite","href":"Creating-Your-Milvus-Lite-Collection","type":2,"isActive":false},{"label":"Configurer votre LLM pour RAG","href":"Set-up-Your-LLM-for-RAG","type":2,"isActive":false},{"label":"Instructions LLM","href":"LLM-Instructions","type":2,"isActive":false},{"label":"Un exemple de RAG","href":"A-RAG-Example","type":2,"isActive":false}]}
    \ No newline at end of file
    diff --git a/localization/v2.4.x/site/fr/integrations/integrate_with_bentoml.md b/localization/v2.4.x/site/fr/integrations/integrate_with_bentoml.md
    index c5afc0d1f..75b094fe4 100644
    --- a/localization/v2.4.x/site/fr/integrations/integrate_with_bentoml.md
    +++ b/localization/v2.4.x/site/fr/integrations/integrate_with_bentoml.md
    @@ -2,10 +2,9 @@
     id: integrate_with_bentoml.md
     summary: >-
       Ce guide montre comment utiliser un modèle d'intégration open-source et un
    -  modèle de langue large sur BentoCloud avec la base de données vectorielle
    -  Milvus pour construire une application de Génération Augmentée de Récupération
    -  (RAG).
    -title: Génération assistée par récupération (RAG) avec Milvus et BentoML
    +  modèle de grande langue sur BentoCloud avec la base de données vectorielle
    +  Milvus pour créer une application de génération augmentée de recherche (RAG).
    +title: Génération améliorée par la recherche (RAG) avec Milvus et BentoML
     ---
     

    Génération améliorée par la recherche (RAG) avec Milvus et BentoML

    Open In Colab

    +

    Open In Colab +GitHub Repository

    Introduction

    Nous sommes maintenant prêts à poser une question. Cette fonction prend simplement une question et effectue un RAG pour générer le contexte pertinent à partir des informations d'arrière-plan. Ensuite, nous passons le contexte et la question à dorag() et nous obtenons le résultat.

    +

    Nous sommes maintenant prêts à poser une question. Cette fonction prend simplement une question et effectue un RAG pour générer le contexte pertinent à partir des informations de base. Ensuite, nous passons le contexte et la question à dorag() et nous obtenons le résultat.

    question = "What state is Cambridge in?"
     
     
    diff --git a/localization/v2.4.x/site/fr/integrations/integrate_with_camel.json b/localization/v2.4.x/site/fr/integrations/integrate_with_camel.json
    index 20c7e9300..9c6d2e79f 100644
    --- a/localization/v2.4.x/site/fr/integrations/integrate_with_camel.json
    +++ b/localization/v2.4.x/site/fr/integrations/integrate_with_camel.json
    @@ -1 +1 @@
    -{"codeList":["$ pip install -U \"camel-ai[all]\" pymilvus\n","import os\nimport requests\n\nos.makedirs(\"local_data\", exist_ok=True)\n\nurl = \"https://arxiv.org/pdf/2303.17760.pdf\"\nresponse = requests.get(url)\nwith open(\"local_data/camel paper.pdf\", \"wb\") as file:\n    file.write(response.content)\n","os.environ[\"OPENAI_API_KEY\"] = \"Your Key\"\n","from camel.embeddings import OpenAIEmbedding\n\nembedding_instance = OpenAIEmbedding()\n","from camel.storages import MilvusStorage\n\nstorage_instance = MilvusStorage(\n    vector_dim=embedding_instance.get_output_dim(),\n    url_and_api_key=(\n        \"./milvus_demo.db\",  # Your Milvus connection URI\n        \"\",  # Your Milvus token\n    ),\n    collection_name=\"camel_paper\",\n)\n","from camel.retrievers import VectorRetriever\n\nvector_retriever = VectorRetriever(\n    embedding_model=embedding_instance, storage=storage_instance\n)\n","vector_retriever.process(content_input_path=\"local_data/camel paper.pdf\")\n","retrieved_info = vector_retriever.query(query=\"What is CAMEL?\", top_k=1)\nprint(retrieved_info)\n","retrieved_info_irrelevant = vector_retriever.query(\n    query=\"Compared with dumpling and rice, which should I take for dinner?\", top_k=1\n)\n\nprint(retrieved_info_irrelevant)\n","from camel.retrievers import AutoRetriever\nfrom camel.types import StorageType\n\nauto_retriever = AutoRetriever(\n    url_and_api_key=(\n        \"./milvus_demo.db\",  # Your Milvus connection URI\n        \"\",  # Your Milvus token\n    ),\n    storage_type=StorageType.MILVUS,\n    embedding_model=embedding_instance,\n)\n\nretrieved_info = auto_retriever.run_vector_retriever(\n    query=\"What is CAMEL-AI\",\n    content_input_paths=[\n        \"local_data/camel paper.pdf\",  # example local path\n        \"https://www.camel-ai.org/\",  # example remote url\n    ],\n    top_k=1,\n    return_detailed_info=True,\n)\n\nprint(retrieved_info)\n","from camel.agents import ChatAgent\nfrom camel.messages import BaseMessage\nfrom camel.types import RoleType\nfrom camel.retrievers import AutoRetriever\nfrom camel.types import StorageType\n\n\ndef single_agent(query: str) -> str:\n    # Set agent role\n    assistant_sys_msg = BaseMessage(\n        role_name=\"Assistant\",\n        role_type=RoleType.ASSISTANT,\n        meta_dict=None,\n        content=\"\"\"You are a helpful assistant to answer question,\n         I will give you the Original Query and Retrieved Context,\n        answer the Original Query based on the Retrieved Context,\n        if you can't answer the question just say I don't know.\"\"\",\n    )\n\n    # Add auto retriever\n    auto_retriever = AutoRetriever(\n        url_and_api_key=(\n            \"./milvus_demo.db\",  # Your Milvus connection URI\n            \"\",  # Your Milvus token\n        ),\n        storage_type=StorageType.MILVUS,\n        embedding_model=embedding_instance,\n    )\n\n    retrieved_info = auto_retriever.run_vector_retriever(\n        query=query,\n        content_input_paths=[\n            \"local_data/camel paper.pdf\",  # example local path\n            \"https://www.camel-ai.org/\",  # example remote url\n        ],\n        # vector_storage_local_path=\"storage_default_run\",\n        top_k=1,\n        return_detailed_info=True,\n    )\n\n    # Pass the retrieved infomation to agent\n    user_msg = BaseMessage.make_user_message(role_name=\"User\", content=retrieved_info)\n    agent = ChatAgent(assistant_sys_msg)\n\n    # Get response\n    assistant_response = agent.step(user_msg)\n    return assistant_response.msg.content\n\n\nprint(single_agent(\"What is CAMEL-AI\"))\n","from typing import List\nfrom colorama import Fore\n\nfrom camel.agents.chat_agent import FunctionCallingRecord\nfrom camel.configs import ChatGPTConfig\nfrom camel.functions import (\n    MATH_FUNCS,\n    RETRIEVAL_FUNCS,\n)\nfrom camel.societies import RolePlaying\nfrom camel.types import ModelType\nfrom camel.utils import print_text_animated\n\n\ndef role_playing_with_rag(\n    task_prompt, model_type=ModelType.GPT_4O, chat_turn_limit=10\n) -> None:\n    task_prompt = task_prompt\n\n    user_model_config = ChatGPTConfig(temperature=0.0)\n\n    function_list = [\n        *MATH_FUNCS,\n        *RETRIEVAL_FUNCS,\n    ]\n    assistant_model_config = ChatGPTConfig(\n        tools=function_list,\n        temperature=0.0,\n    )\n\n    role_play_session = RolePlaying(\n        assistant_role_name=\"Searcher\",\n        user_role_name=\"Professor\",\n        assistant_agent_kwargs=dict(\n            model_type=model_type,\n            model_config=assistant_model_config,\n            tools=function_list,\n        ),\n        user_agent_kwargs=dict(\n            model_type=model_type,\n            model_config=user_model_config,\n        ),\n        task_prompt=task_prompt,\n        with_task_specify=False,\n    )\n\n    print(\n        Fore.GREEN\n        + f\"AI Assistant sys message:\\n{role_play_session.assistant_sys_msg}\\n\"\n    )\n    print(Fore.BLUE + f\"AI User sys message:\\n{role_play_session.user_sys_msg}\\n\")\n\n    print(Fore.YELLOW + f\"Original task prompt:\\n{task_prompt}\\n\")\n    print(\n        Fore.CYAN\n        + f\"Specified task prompt:\\n{role_play_session.specified_task_prompt}\\n\"\n    )\n    print(Fore.RED + f\"Final task prompt:\\n{role_play_session.task_prompt}\\n\")\n\n    n = 0\n    input_msg = role_play_session.init_chat()\n    while n < chat_turn_limit:\n        n += 1\n        assistant_response, user_response = role_play_session.step(input_msg)\n\n        if assistant_response.terminated:\n            print(\n                Fore.GREEN\n                + (\n                    \"AI Assistant terminated. Reason: \"\n                    f\"{assistant_response.info['termination_reasons']}.\"\n                )\n            )\n            break\n        if user_response.terminated:\n            print(\n                Fore.GREEN\n                + (\n                    \"AI User terminated. \"\n                    f\"Reason: {user_response.info['termination_reasons']}.\"\n                )\n            )\n            break\n\n        # Print output from the user\n        print_text_animated(Fore.BLUE + f\"AI User:\\n\\n{user_response.msg.content}\\n\")\n\n        # Print output from the assistant, including any function\n        # execution information\n        print_text_animated(Fore.GREEN + \"AI Assistant:\")\n        tool_calls: List[FunctionCallingRecord] = assistant_response.info[\"tool_calls\"]\n        for func_record in tool_calls:\n            print_text_animated(f\"{func_record}\")\n        print_text_animated(f\"{assistant_response.msg.content}\\n\")\n\n        if \"CAMEL_TASK_DONE\" in user_response.msg.content:\n            break\n\n        input_msg = assistant_response.msg\n","role_playing_with_rag(\n    task_prompt=\"\"\"What is the main termination reasons for AI Society\n                   dataset, how many number of messages did camel decided to\n                   limit, what's the value plus 100? You should refer to the\n                   content in path camel/local_data/camel paper.pdf\"\"\"\n)\n"],"headingContent":"","anchorList":[{"label":"Génération améliorée par récupération (RAG) avec Milvus et Camel","href":"Retrieval-Augmented-Generation-RAG-with-Milvus-and-Camel","type":1,"isActive":false},{"label":"Chargement des données","href":"Load-Data","type":2,"isActive":false},{"label":"1. RAG personnalisé","href":"1-Customized-RAG","type":2,"isActive":false},{"label":"2. Auto RAG","href":"2-Auto-RAG","type":2,"isActive":false},{"label":"3. Agent unique avec Auto RAG","href":"3-Single-Agent-with-Auto-RAG","type":2,"isActive":false},{"label":"4. Jeu de rôle avec Auto RAG","href":"4-Role-playing-with-Auto-RAG","type":2,"isActive":false}]}
    \ No newline at end of file
    +{"codeList":["$ pip install -U \"camel-ai[all]\" pymilvus\n","import os\nimport requests\n\nos.makedirs(\"local_data\", exist_ok=True)\n\nurl = \"https://arxiv.org/pdf/2303.17760.pdf\"\nresponse = requests.get(url)\nwith open(\"local_data/camel paper.pdf\", \"wb\") as file:\n    file.write(response.content)\n","os.environ[\"OPENAI_API_KEY\"] = \"Your Key\"\n","from camel.embeddings import OpenAIEmbedding\n\nembedding_instance = OpenAIEmbedding()\n","from camel.storages import MilvusStorage\n\nstorage_instance = MilvusStorage(\n    vector_dim=embedding_instance.get_output_dim(),\n    url_and_api_key=(\n        \"./milvus_demo.db\",  # Your Milvus connection URI\n        \"\",  # Your Milvus token\n    ),\n    collection_name=\"camel_paper\",\n)\n","from camel.retrievers import VectorRetriever\n\nvector_retriever = VectorRetriever(\n    embedding_model=embedding_instance, storage=storage_instance\n)\n","vector_retriever.process(content_input_path=\"local_data/camel paper.pdf\")\n","retrieved_info = vector_retriever.query(query=\"What is CAMEL?\", top_k=1)\nprint(retrieved_info)\n","retrieved_info_irrelevant = vector_retriever.query(\n    query=\"Compared with dumpling and rice, which should I take for dinner?\", top_k=1\n)\n\nprint(retrieved_info_irrelevant)\n","from camel.retrievers import AutoRetriever\nfrom camel.types import StorageType\n\nauto_retriever = AutoRetriever(\n    url_and_api_key=(\n        \"./milvus_demo.db\",  # Your Milvus connection URI\n        \"\",  # Your Milvus token\n    ),\n    storage_type=StorageType.MILVUS,\n    embedding_model=embedding_instance,\n)\n\nretrieved_info = auto_retriever.run_vector_retriever(\n    query=\"What is CAMEL-AI\",\n    content_input_paths=[\n        \"local_data/camel paper.pdf\",  # example local path\n        \"https://www.camel-ai.org/\",  # example remote url\n    ],\n    top_k=1,\n    return_detailed_info=True,\n)\n\nprint(retrieved_info)\n","from camel.agents import ChatAgent\nfrom camel.messages import BaseMessage\nfrom camel.types import RoleType\nfrom camel.retrievers import AutoRetriever\nfrom camel.types import StorageType\n\n\ndef single_agent(query: str) -> str:\n    # Set agent role\n    assistant_sys_msg = BaseMessage(\n        role_name=\"Assistant\",\n        role_type=RoleType.ASSISTANT,\n        meta_dict=None,\n        content=\"\"\"You are a helpful assistant to answer question,\n         I will give you the Original Query and Retrieved Context,\n        answer the Original Query based on the Retrieved Context,\n        if you can't answer the question just say I don't know.\"\"\",\n    )\n\n    # Add auto retriever\n    auto_retriever = AutoRetriever(\n        url_and_api_key=(\n            \"./milvus_demo.db\",  # Your Milvus connection URI\n            \"\",  # Your Milvus token\n        ),\n        storage_type=StorageType.MILVUS,\n        embedding_model=embedding_instance,\n    )\n\n    retrieved_info = auto_retriever.run_vector_retriever(\n        query=query,\n        content_input_paths=[\n            \"local_data/camel paper.pdf\",  # example local path\n            \"https://www.camel-ai.org/\",  # example remote url\n        ],\n        # vector_storage_local_path=\"storage_default_run\",\n        top_k=1,\n        return_detailed_info=True,\n    )\n\n    # Pass the retrieved infomation to agent\n    user_msg = BaseMessage.make_user_message(role_name=\"User\", content=retrieved_info)\n    agent = ChatAgent(assistant_sys_msg)\n\n    # Get response\n    assistant_response = agent.step(user_msg)\n    return assistant_response.msg.content\n\n\nprint(single_agent(\"What is CAMEL-AI\"))\n","from typing import List\nfrom colorama import Fore\n\nfrom camel.agents.chat_agent import FunctionCallingRecord\nfrom camel.configs import ChatGPTConfig\nfrom camel.functions import (\n    MATH_FUNCS,\n    RETRIEVAL_FUNCS,\n)\nfrom camel.societies import RolePlaying\nfrom camel.types import ModelType\nfrom camel.utils import print_text_animated\n\n\ndef role_playing_with_rag(\n    task_prompt, model_type=ModelType.GPT_4O, chat_turn_limit=10\n) -> None:\n    task_prompt = task_prompt\n\n    user_model_config = ChatGPTConfig(temperature=0.0)\n\n    function_list = [\n        *MATH_FUNCS,\n        *RETRIEVAL_FUNCS,\n    ]\n    assistant_model_config = ChatGPTConfig(\n        tools=function_list,\n        temperature=0.0,\n    )\n\n    role_play_session = RolePlaying(\n        assistant_role_name=\"Searcher\",\n        user_role_name=\"Professor\",\n        assistant_agent_kwargs=dict(\n            model_type=model_type,\n            model_config=assistant_model_config,\n            tools=function_list,\n        ),\n        user_agent_kwargs=dict(\n            model_type=model_type,\n            model_config=user_model_config,\n        ),\n        task_prompt=task_prompt,\n        with_task_specify=False,\n    )\n\n    print(\n        Fore.GREEN\n        + f\"AI Assistant sys message:\\n{role_play_session.assistant_sys_msg}\\n\"\n    )\n    print(Fore.BLUE + f\"AI User sys message:\\n{role_play_session.user_sys_msg}\\n\")\n\n    print(Fore.YELLOW + f\"Original task prompt:\\n{task_prompt}\\n\")\n    print(\n        Fore.CYAN\n        + f\"Specified task prompt:\\n{role_play_session.specified_task_prompt}\\n\"\n    )\n    print(Fore.RED + f\"Final task prompt:\\n{role_play_session.task_prompt}\\n\")\n\n    n = 0\n    input_msg = role_play_session.init_chat()\n    while n < chat_turn_limit:\n        n += 1\n        assistant_response, user_response = role_play_session.step(input_msg)\n\n        if assistant_response.terminated:\n            print(\n                Fore.GREEN\n                + (\n                    \"AI Assistant terminated. Reason: \"\n                    f\"{assistant_response.info['termination_reasons']}.\"\n                )\n            )\n            break\n        if user_response.terminated:\n            print(\n                Fore.GREEN\n                + (\n                    \"AI User terminated. \"\n                    f\"Reason: {user_response.info['termination_reasons']}.\"\n                )\n            )\n            break\n\n        # Print output from the user\n        print_text_animated(Fore.BLUE + f\"AI User:\\n\\n{user_response.msg.content}\\n\")\n\n        # Print output from the assistant, including any function\n        # execution information\n        print_text_animated(Fore.GREEN + \"AI Assistant:\")\n        tool_calls: List[FunctionCallingRecord] = assistant_response.info[\"tool_calls\"]\n        for func_record in tool_calls:\n            print_text_animated(f\"{func_record}\")\n        print_text_animated(f\"{assistant_response.msg.content}\\n\")\n\n        if \"CAMEL_TASK_DONE\" in user_response.msg.content:\n            break\n\n        input_msg = assistant_response.msg\n","role_playing_with_rag(\n    task_prompt=\"\"\"What is the main termination reasons for AI Society\n                   dataset, how many number of messages did camel decided to\n                   limit, what's the value plus 100? You should refer to the\n                   content in path camel/local_data/camel paper.pdf\"\"\"\n)\n"],"headingContent":"Retrieval-Augmented Generation (RAG) with Milvus and Camel","anchorList":[{"label":"Génération améliorée par récupération (RAG) avec Milvus et Camel","href":"Retrieval-Augmented-Generation-RAG-with-Milvus-and-Camel","type":1,"isActive":false},{"label":"Chargement des données","href":"Load-Data","type":2,"isActive":false},{"label":"1. RAG personnalisé","href":"1-Customized-RAG","type":2,"isActive":false},{"label":"2. Auto RAG","href":"2-Auto-RAG","type":2,"isActive":false},{"label":"3. Agent unique avec Auto RAG","href":"3-Single-Agent-with-Auto-RAG","type":2,"isActive":false},{"label":"4. Jeu de rôle avec Auto RAG","href":"4-Role-playing-with-Auto-RAG","type":2,"isActive":false}]}
    \ No newline at end of file
    diff --git a/localization/v2.4.x/site/fr/integrations/integrate_with_camel.md b/localization/v2.4.x/site/fr/integrations/integrate_with_camel.md
    index dac65de57..8c7e5d9d7 100644
    --- a/localization/v2.4.x/site/fr/integrations/integrate_with_camel.md
    +++ b/localization/v2.4.x/site/fr/integrations/integrate_with_camel.md
    @@ -2,9 +2,8 @@
     id: integrate_with_camel.md
     summary: >-
       Ce guide montre comment utiliser un modèle d'intégration open-source et un
    -  modèle de langue large sur BentoCloud avec la base de données vectorielle
    -  Milvus pour construire une application de Génération Augmentée de Récupération
    -  (RAG).
    +  modèle de grande langue sur BentoCloud avec la base de données vectorielle
    +  Milvus pour créer une application de génération augmentée de recherche (RAG).
     title: Génération assistée par récupération (RAG) avec Milvus et BentoML
     ---
     

    Génération améliorée par récupération (RAG) avec Milvus et Camel

    Open In Colab

    +

    Open In Colab +GitHub Repository

    Ce guide montre comment construire un système de génération améliorée par récupération (RAG) en utilisant CAMEL et Milvus.

    Le système RAG combine un système de recherche avec un modèle génératif pour générer un nouveau texte basé sur une invite donnée. Le système récupère d'abord les documents pertinents d'un corpus à l'aide de Milvus, puis utilise un modèle génératif pour générer un nouveau texte basé sur les documents récupérés.

    CAMEL est un cadre multi-agents. Milvus est la base de données vectorielles open-source la plus avancée au monde, construite pour alimenter la recherche de similarité d'intégration et les applications d'intelligence artificielle.

    @@ -84,7 +84,7 @@ response = requests.get(url)

    Pour configurer l'intégration d'OpenAI, nous devons configurer OPENAI_API_KEY comme indiqué ci-dessous.

    os.environ["OPENAI_API_KEY"] = "Your Key"
     
    -

    Importez et définissez l'instance d'intégration :

    +

    Importer et définir l'instance d'intégration :

    from camel.embeddings import OpenAIEmbedding
     
     embedding_instance = OpenAIEmbedding()
    diff --git a/localization/v2.4.x/site/fr/integrations/integrate_with_dspy.json b/localization/v2.4.x/site/fr/integrations/integrate_with_dspy.json
    index 058a59041..60f9fdc4f 100644
    --- a/localization/v2.4.x/site/fr/integrations/integrate_with_dspy.json
    +++ b/localization/v2.4.x/site/fr/integrations/integrate_with_dspy.json
    @@ -1 +1 @@
    -{"codeList":["$ pip install \"dspy-ai[milvus]\"\n$ pip install -U pymilvus\n","from dspy.datasets import HotPotQA\n\n# Load the dataset.\ndataset = HotPotQA(\n    train_seed=1, train_size=20, eval_seed=2023, dev_size=50, test_size=0\n)\n\n# Tell DSPy that the 'question' field is the input. Any other fields are labels and/or metadata.\ntrainset = [x.with_inputs(\"question\") for x in dataset.train]\ndevset = [x.with_inputs(\"question\") for x in dataset.dev]\n","import requests\nimport os\n\nos.environ[\"OPENAI_API_KEY\"] = \"\"\nMILVUS_URI = \"example.db\"\nMILVUS_TOKEN = \"\"\n\nfrom pymilvus import MilvusClient, DataType, Collection\nfrom dspy.retrieve.milvus_rm import openai_embedding_function\n\nclient = MilvusClient(uri=MILVUS_URI, token=MILVUS_TOKEN)\n\nif \"dspy_example\" not in client.list_collections():\n    client.create_collection(\n        collection_name=\"dspy_example\",\n        overwrite=True,\n        dimension=1536,\n        primary_field_name=\"id\",\n        vector_field_name=\"embedding\",\n        id_type=\"int\",\n        metric_type=\"IP\",\n        max_length=65535,\n        enable_dynamic=True,\n    )\ntext = requests.get(\n    \"https://raw.githubusercontent.com/wxywb/dspy_dataset_sample/master/sample_data.txt\"\n).text\n\nfor idx, passage in enumerate(text.split(\"\\n\")):\n    if len(passage) == 0:\n        continue\n    client.insert(\n        collection_name=\"dspy_example\",\n        data=[\n            {\n                \"id\": idx,\n                \"embedding\": openai_embedding_function(passage)[0],\n                \"text\": passage,\n            }\n        ],\n    )\n","from dspy.retrieve.milvus_rm import MilvusRM\nimport dspy\n\nretriever_model = MilvusRM(\n    collection_name=\"dspy_example\",\n    uri=MILVUS_URI,\n    token=MILVUS_TOKEN,  # ignore this if no token is required for Milvus connection\n    embedding_function=openai_embedding_function,\n)\nturbo = dspy.OpenAI(model=\"gpt-3.5-turbo\")\ndspy.settings.configure(lm=turbo)\n","class GenerateAnswer(dspy.Signature):\n    \"\"\"Answer questions with short factoid answers.\"\"\"\n\n    context = dspy.InputField(desc=\"may contain relevant facts\")\n    question = dspy.InputField()\n    answer = dspy.OutputField(desc=\"often between 1 and 5 words\")\n","class RAG(dspy.Module):\n    def __init__(self, rm):\n        super().__init__()\n        self.retrieve = rm\n\n        # This signature indicates the task imposed on the COT module.\n        self.generate_answer = dspy.ChainOfThought(GenerateAnswer)\n\n    def forward(self, question):\n        # Use milvus_rm to retrieve context for the question.\n        context = self.retrieve(question).passages\n        # COT module takes \"context, query\" and output \"answer\".\n        prediction = self.generate_answer(context=context, question=question)\n        return dspy.Prediction(\n            context=[item.long_text for item in context], answer=prediction.answer\n        )\n","rag = RAG(retriever_model)\nprint(rag(\"who write At My Window\").answer)\n","from dspy.evaluate.evaluate import Evaluate\nfrom dspy.datasets import HotPotQA\n\nevaluate_on_hotpotqa = Evaluate(\n    devset=devset, num_threads=1, display_progress=False, display_table=5\n)\n\nmetric = dspy.evaluate.answer_exact_match\nscore = evaluate_on_hotpotqa(rag, metric=metric)\nprint(\"rag:\", score)\n","from dspy.teleprompt import BootstrapFewShot\n\n# Validation logic: check that the predicted answer is correct.# Also check that the retrieved context does contain that answer.\n\n\ndef validate_context_and_answer(example, pred, trace=None):\n    answer_EM = dspy.evaluate.answer_exact_match(example, pred)\n    answer_PM = dspy.evaluate.answer_passage_match(example, pred)\n    return answer_EM and answer_PM\n\n\n# Set up a basic teleprompter, which will compile our RAG program.\nteleprompter = BootstrapFewShot(metric=validate_context_and_answer)\n\n# Compile!\ncompiled_rag = teleprompter.compile(rag, trainset=trainset)\n\n# Now compiled_rag is optimized and ready to answer your new question!\n# Now, let’s evaluate the compiled RAG program.\nscore = evaluate_on_hotpotqa(compiled_rag, metric=metric)\nprint(score)\nprint(\"compile_rag:\", score)\n"],"headingContent":"","anchorList":[{"label":"Intégrer Milvus à DSPy","href":"Integrate-Milvus-with-DSPy","type":1,"isActive":false},{"label":"Qu'est-ce que DSPy ?","href":"What-is-DSPy","type":2,"isActive":false},{"label":"Avantages de l'utilisation de DSPy","href":"Benefits-of-using-DSPy","type":2,"isActive":false},{"label":"Les modules","href":"Modules","type":2,"isActive":false},{"label":"Pourquoi Milvus dans DSPy ?","href":"Why-Milvus-in-DSPy","type":2,"isActive":false},{"label":"Exemples d'applications","href":"Examples","type":2,"isActive":false},{"label":"Résumé","href":"Summary","type":2,"isActive":false}]}
    \ No newline at end of file
    +{"codeList":["$ pip install \"dspy-ai[milvus]\"\n$ pip install -U pymilvus\n","from dspy.datasets import HotPotQA\n\n# Load the dataset.\ndataset = HotPotQA(\n    train_seed=1, train_size=20, eval_seed=2023, dev_size=50, test_size=0\n)\n\n# Tell DSPy that the 'question' field is the input. Any other fields are labels and/or metadata.\ntrainset = [x.with_inputs(\"question\") for x in dataset.train]\ndevset = [x.with_inputs(\"question\") for x in dataset.dev]\n","import requests\nimport os\n\nos.environ[\"OPENAI_API_KEY\"] = \"\"\nMILVUS_URI = \"example.db\"\nMILVUS_TOKEN = \"\"\n\nfrom pymilvus import MilvusClient, DataType, Collection\nfrom dspy.retrieve.milvus_rm import openai_embedding_function\n\nclient = MilvusClient(uri=MILVUS_URI, token=MILVUS_TOKEN)\n\nif \"dspy_example\" not in client.list_collections():\n    client.create_collection(\n        collection_name=\"dspy_example\",\n        overwrite=True,\n        dimension=1536,\n        primary_field_name=\"id\",\n        vector_field_name=\"embedding\",\n        id_type=\"int\",\n        metric_type=\"IP\",\n        max_length=65535,\n        enable_dynamic=True,\n    )\ntext = requests.get(\n    \"https://raw.githubusercontent.com/wxywb/dspy_dataset_sample/master/sample_data.txt\"\n).text\n\nfor idx, passage in enumerate(text.split(\"\\n\")):\n    if len(passage) == 0:\n        continue\n    client.insert(\n        collection_name=\"dspy_example\",\n        data=[\n            {\n                \"id\": idx,\n                \"embedding\": openai_embedding_function(passage)[0],\n                \"text\": passage,\n            }\n        ],\n    )\n","from dspy.retrieve.milvus_rm import MilvusRM\nimport dspy\n\nretriever_model = MilvusRM(\n    collection_name=\"dspy_example\",\n    uri=MILVUS_URI,\n    token=MILVUS_TOKEN,  # ignore this if no token is required for Milvus connection\n    embedding_function=openai_embedding_function,\n)\nturbo = dspy.OpenAI(model=\"gpt-3.5-turbo\")\ndspy.settings.configure(lm=turbo)\n","class GenerateAnswer(dspy.Signature):\n    \"\"\"Answer questions with short factoid answers.\"\"\"\n\n    context = dspy.InputField(desc=\"may contain relevant facts\")\n    question = dspy.InputField()\n    answer = dspy.OutputField(desc=\"often between 1 and 5 words\")\n","class RAG(dspy.Module):\n    def __init__(self, rm):\n        super().__init__()\n        self.retrieve = rm\n\n        # This signature indicates the task imposed on the COT module.\n        self.generate_answer = dspy.ChainOfThought(GenerateAnswer)\n\n    def forward(self, question):\n        # Use milvus_rm to retrieve context for the question.\n        context = self.retrieve(question).passages\n        # COT module takes \"context, query\" and output \"answer\".\n        prediction = self.generate_answer(context=context, question=question)\n        return dspy.Prediction(\n            context=[item.long_text for item in context], answer=prediction.answer\n        )\n","rag = RAG(retriever_model)\nprint(rag(\"who write At My Window\").answer)\n","from dspy.evaluate.evaluate import Evaluate\nfrom dspy.datasets import HotPotQA\n\nevaluate_on_hotpotqa = Evaluate(\n    devset=devset, num_threads=1, display_progress=False, display_table=5\n)\n\nmetric = dspy.evaluate.answer_exact_match\nscore = evaluate_on_hotpotqa(rag, metric=metric)\nprint(\"rag:\", score)\n","from dspy.teleprompt import BootstrapFewShot\n\n# Validation logic: check that the predicted answer is correct.# Also check that the retrieved context does contain that answer.\n\n\ndef validate_context_and_answer(example, pred, trace=None):\n    answer_EM = dspy.evaluate.answer_exact_match(example, pred)\n    answer_PM = dspy.evaluate.answer_passage_match(example, pred)\n    return answer_EM and answer_PM\n\n\n# Set up a basic teleprompter, which will compile our RAG program.\nteleprompter = BootstrapFewShot(metric=validate_context_and_answer)\n\n# Compile!\ncompiled_rag = teleprompter.compile(rag, trainset=trainset)\n\n# Now compiled_rag is optimized and ready to answer your new question!\n# Now, let’s evaluate the compiled RAG program.\nscore = evaluate_on_hotpotqa(compiled_rag, metric=metric)\nprint(score)\nprint(\"compile_rag:\", score)\n"],"headingContent":"Integrate Milvus with DSPy","anchorList":[{"label":"Intégrer Milvus à DSPy","href":"Integrate-Milvus-with-DSPy","type":1,"isActive":false},{"label":"Qu'est-ce que DSPy ?","href":"What-is-DSPy","type":2,"isActive":false},{"label":"Avantages de l'utilisation de DSPy","href":"Benefits-of-using-DSPy","type":2,"isActive":false},{"label":"Les modules","href":"Modules","type":2,"isActive":false},{"label":"Pourquoi Milvus dans DSPy ?","href":"Why-Milvus-in-DSPy","type":2,"isActive":false},{"label":"Exemples d'applications","href":"Examples","type":2,"isActive":false},{"label":"Résumé","href":"Summary","type":2,"isActive":false}]}
    \ No newline at end of file
    diff --git a/localization/v2.4.x/site/fr/integrations/integrate_with_dspy.md b/localization/v2.4.x/site/fr/integrations/integrate_with_dspy.md
    index 576870466..a2aac152e 100644
    --- a/localization/v2.4.x/site/fr/integrations/integrate_with_dspy.md
    +++ b/localization/v2.4.x/site/fr/integrations/integrate_with_dspy.md
    @@ -20,7 +20,8 @@ title: Intégrer Milvus à DSPy
               d="M4 9h1v1H4c-1.5 0-3-1.69-3-3.5S2.55 3 4 3h4c1.45 0 3 1.69 3 3.5 0 1.41-.91 2.72-2 3.25V8.59c.58-.45 1-1.27 1-2.09C10 5.22 8.98 4 8 4H4c-.98 0-2 1.22-2 2.5S3 9 4 9zm9-3h-1v1h1c1 0 2 1.22 2 2.5S13.98 12 13 12H9c-.98 0-2-1.22-2-2.5 0-.83.42-1.64 1-2.09V6.25c-1.09.53-2 1.84-2 3.25C6 11.31 7.55 13 9 13h4c1.45 0 3-1.69 3-3.5S14.5 6 13 6z"
             >
           
    -    

    Open In Colab

    +

    Open In Colab +GitHub Repository

    Qu'est-ce que DSPy ?

      -
    • Approche de la programmation : DSPy fournit une approche de programmation systématique pour le développement de pipelines LM en abstrayant les pipelines en tant que graphes de transformation de texte au lieu de se contenter d'inviter les LLM. Ses modules déclaratifs permettent une conception et une optimisation structurées, remplaçant la méthode d'essai et d'erreur des modèles d'invite traditionnels.
    • +
    • Approche de la programmation : DSPy fournit une approche de programmation systématique pour le développement de pipelines LM en abstrayant les pipelines sous forme de graphes de transformation de texte au lieu de se contenter d'inviter les LLM. Ses modules déclaratifs permettent une conception et une optimisation structurées, remplaçant la méthode d'essai et d'erreur des modèles d'invite traditionnels.
    • Amélioration des performances : DSPy démontre des gains de performance significatifs par rapport aux méthodes existantes. Grâce à des études de cas, il surpasse les messages-guides standard et les démonstrations créées par des experts, démontrant ainsi sa polyvalence et son efficacité, même lorsqu'il est compilé dans des modèles LM plus petits.
    • Abstraction modulaire : DSPy abstrait efficacement les aspects complexes du développement du pipeline LM, tels que la décomposition, le réglage fin et la sélection du modèle. Avec DSPy, un programme concis peut se traduire de manière transparente en instructions pour différents modèles, tels que GPT-4, Llama2-13b ou T5-base, ce qui simplifie le développement et améliore les performances.
    diff --git a/localization/v2.4.x/site/fr/integrations/integrate_with_haystack.json b/localization/v2.4.x/site/fr/integrations/integrate_with_haystack.json index 9d6f3c6ff..4bf483970 100644 --- a/localization/v2.4.x/site/fr/integrations/integrate_with_haystack.json +++ b/localization/v2.4.x/site/fr/integrations/integrate_with_haystack.json @@ -1 +1 @@ -{"codeList":["! pip install --upgrade --quiet pymilvus milvus-haystack markdown-it-py mdit_plain\n","import os\n\nos.environ[\"OPENAI_API_KEY\"] = \"sk-***********\"\n","import os\nimport urllib.request\n\nurl = \"https://www.gutenberg.org/cache/epub/7785/pg7785.txt\"\nfile_path = \"./davinci.txt\"\n\nif not os.path.exists(file_path):\n urllib.request.urlretrieve(url, file_path)\n","from haystack import Pipeline\nfrom haystack.components.converters import MarkdownToDocument\nfrom haystack.components.embedders import OpenAIDocumentEmbedder, OpenAITextEmbedder\nfrom haystack.components.preprocessors import DocumentSplitter\nfrom haystack.components.writers import DocumentWriter\nfrom haystack.utils import Secret\n\nfrom milvus_haystack import MilvusDocumentStore\nfrom milvus_haystack.milvus_embedding_retriever import MilvusEmbeddingRetriever\n\n\ndocument_store = MilvusDocumentStore(\n connection_args={\"uri\": \"./milvus.db\"},\n # connection_args={\"uri\": \"http://localhost:19530\"},\n # connection_args={\"uri\": YOUR_ZILLIZ_CLOUD_URI, \"token\": Secret.from_env_var(\"ZILLIZ_CLOUD_API_KEY\")},\n drop_old=True,\n)\n","indexing_pipeline = Pipeline()\nindexing_pipeline.add_component(\"converter\", MarkdownToDocument())\nindexing_pipeline.add_component(\n \"splitter\", DocumentSplitter(split_by=\"sentence\", split_length=2)\n)\nindexing_pipeline.add_component(\"embedder\", OpenAIDocumentEmbedder())\nindexing_pipeline.add_component(\"writer\", DocumentWriter(document_store))\nindexing_pipeline.connect(\"converter\", \"splitter\")\nindexing_pipeline.connect(\"splitter\", \"embedder\")\nindexing_pipeline.connect(\"embedder\", \"writer\")\nindexing_pipeline.run({\"converter\": {\"sources\": [file_path]}})\n\nprint(\"Number of documents:\", document_store.count_documents())\n","question = 'Where is the painting \"Warrior\" currently stored?'\n\nretrieval_pipeline = Pipeline()\nretrieval_pipeline.add_component(\"embedder\", OpenAITextEmbedder())\nretrieval_pipeline.add_component(\n \"retriever\", MilvusEmbeddingRetriever(document_store=document_store, top_k=3)\n)\nretrieval_pipeline.connect(\"embedder\", \"retriever\")\n\nretrieval_results = retrieval_pipeline.run({\"embedder\": {\"text\": question}})\n\nfor doc in retrieval_results[\"retriever\"][\"documents\"]:\n print(doc.content)\n print(\"-\" * 10)\n","from haystack.utils import Secret\nfrom haystack.components.builders import PromptBuilder\nfrom haystack.components.generators import OpenAIGenerator\n\nprompt_template = \"\"\"Answer the following query based on the provided context. If the context does\n not include an answer, reply with 'I don't know'.\\n\n Query: {{query}}\n Documents:\n {% for doc in documents %}\n {{ doc.content }}\n {% endfor %}\n Answer:\n \"\"\"\n\nrag_pipeline = Pipeline()\nrag_pipeline.add_component(\"text_embedder\", OpenAITextEmbedder())\nrag_pipeline.add_component(\n \"retriever\", MilvusEmbeddingRetriever(document_store=document_store, top_k=3)\n)\nrag_pipeline.add_component(\"prompt_builder\", PromptBuilder(template=prompt_template))\nrag_pipeline.add_component(\n \"generator\",\n OpenAIGenerator(\n api_key=Secret.from_token(os.getenv(\"OPENAI_API_KEY\")),\n generation_kwargs={\"temperature\": 0},\n ),\n)\nrag_pipeline.connect(\"text_embedder.embedding\", \"retriever.query_embedding\")\nrag_pipeline.connect(\"retriever.documents\", \"prompt_builder.documents\")\nrag_pipeline.connect(\"prompt_builder\", \"generator\")\n\nresults = rag_pipeline.run(\n {\n \"text_embedder\": {\"text\": question},\n \"prompt_builder\": {\"query\": question},\n }\n)\nprint(\"RAG answer:\", results[\"generator\"][\"replies\"][0])\n"],"headingContent":"","anchorList":[{"label":"Génération améliorée par récupération (RAG) avec Milvus et Haystack","href":"Retrieval-Augmented-Generation-RAG-with-Milvus-and-Haystack","type":1,"isActive":false},{"label":"Conditions préalables","href":"Prerequisites","type":2,"isActive":false},{"label":"Préparer les données","href":"Prepare-the-data","type":2,"isActive":false},{"label":"Créer le pipeline d'indexation","href":"Create-the-indexing-Pipeline","type":2,"isActive":false},{"label":"Créer le pipeline de récupération","href":"Create-the-retrieval-pipeline","type":2,"isActive":false},{"label":"Créer le pipeline RAG","href":"Create-the-RAG-pipeline","type":2,"isActive":false}]} \ No newline at end of file +{"codeList":["! pip install --upgrade --quiet pymilvus milvus-haystack markdown-it-py mdit_plain\n","import os\n\nos.environ[\"OPENAI_API_KEY\"] = \"sk-***********\"\n","import os\nimport urllib.request\n\nurl = \"https://www.gutenberg.org/cache/epub/7785/pg7785.txt\"\nfile_path = \"./davinci.txt\"\n\nif not os.path.exists(file_path):\n urllib.request.urlretrieve(url, file_path)\n","from haystack import Pipeline\nfrom haystack.components.converters import MarkdownToDocument\nfrom haystack.components.embedders import OpenAIDocumentEmbedder, OpenAITextEmbedder\nfrom haystack.components.preprocessors import DocumentSplitter\nfrom haystack.components.writers import DocumentWriter\nfrom haystack.utils import Secret\n\nfrom milvus_haystack import MilvusDocumentStore\nfrom milvus_haystack.milvus_embedding_retriever import MilvusEmbeddingRetriever\n\n\ndocument_store = MilvusDocumentStore(\n connection_args={\"uri\": \"./milvus.db\"},\n # connection_args={\"uri\": \"http://localhost:19530\"},\n # connection_args={\"uri\": YOUR_ZILLIZ_CLOUD_URI, \"token\": Secret.from_env_var(\"ZILLIZ_CLOUD_API_KEY\")},\n drop_old=True,\n)\n","indexing_pipeline = Pipeline()\nindexing_pipeline.add_component(\"converter\", MarkdownToDocument())\nindexing_pipeline.add_component(\n \"splitter\", DocumentSplitter(split_by=\"sentence\", split_length=2)\n)\nindexing_pipeline.add_component(\"embedder\", OpenAIDocumentEmbedder())\nindexing_pipeline.add_component(\"writer\", DocumentWriter(document_store))\nindexing_pipeline.connect(\"converter\", \"splitter\")\nindexing_pipeline.connect(\"splitter\", \"embedder\")\nindexing_pipeline.connect(\"embedder\", \"writer\")\nindexing_pipeline.run({\"converter\": {\"sources\": [file_path]}})\n\nprint(\"Number of documents:\", document_store.count_documents())\n","question = 'Where is the painting \"Warrior\" currently stored?'\n\nretrieval_pipeline = Pipeline()\nretrieval_pipeline.add_component(\"embedder\", OpenAITextEmbedder())\nretrieval_pipeline.add_component(\n \"retriever\", MilvusEmbeddingRetriever(document_store=document_store, top_k=3)\n)\nretrieval_pipeline.connect(\"embedder\", \"retriever\")\n\nretrieval_results = retrieval_pipeline.run({\"embedder\": {\"text\": question}})\n\nfor doc in retrieval_results[\"retriever\"][\"documents\"]:\n print(doc.content)\n print(\"-\" * 10)\n","from haystack.utils import Secret\nfrom haystack.components.builders import PromptBuilder\nfrom haystack.components.generators import OpenAIGenerator\n\nprompt_template = \"\"\"Answer the following query based on the provided context. If the context does\n not include an answer, reply with 'I don't know'.\\n\n Query: {{query}}\n Documents:\n {% for doc in documents %}\n {{ doc.content }}\n {% endfor %}\n Answer:\n \"\"\"\n\nrag_pipeline = Pipeline()\nrag_pipeline.add_component(\"text_embedder\", OpenAITextEmbedder())\nrag_pipeline.add_component(\n \"retriever\", MilvusEmbeddingRetriever(document_store=document_store, top_k=3)\n)\nrag_pipeline.add_component(\"prompt_builder\", PromptBuilder(template=prompt_template))\nrag_pipeline.add_component(\n \"generator\",\n OpenAIGenerator(\n api_key=Secret.from_token(os.getenv(\"OPENAI_API_KEY\")),\n generation_kwargs={\"temperature\": 0},\n ),\n)\nrag_pipeline.connect(\"text_embedder.embedding\", \"retriever.query_embedding\")\nrag_pipeline.connect(\"retriever.documents\", \"prompt_builder.documents\")\nrag_pipeline.connect(\"prompt_builder\", \"generator\")\n\nresults = rag_pipeline.run(\n {\n \"text_embedder\": {\"text\": question},\n \"prompt_builder\": {\"query\": question},\n }\n)\nprint(\"RAG answer:\", results[\"generator\"][\"replies\"][0])\n"],"headingContent":"Retrieval-Augmented Generation (RAG) with Milvus and Haystack","anchorList":[{"label":"Génération améliorée par récupération (RAG) avec Milvus et Haystack","href":"Retrieval-Augmented-Generation-RAG-with-Milvus-and-Haystack","type":1,"isActive":false},{"label":"Conditions préalables","href":"Prerequisites","type":2,"isActive":false},{"label":"Préparer les données","href":"Prepare-the-data","type":2,"isActive":false},{"label":"Créer le pipeline d'indexation","href":"Create-the-indexing-Pipeline","type":2,"isActive":false},{"label":"Créer le pipeline de récupération","href":"Create-the-retrieval-pipeline","type":2,"isActive":false},{"label":"Créer le pipeline RAG","href":"Create-the-RAG-pipeline","type":2,"isActive":false}]} \ No newline at end of file diff --git a/localization/v2.4.x/site/fr/integrations/integrate_with_haystack.md b/localization/v2.4.x/site/fr/integrations/integrate_with_haystack.md index 2a2d068fe..7a53544a5 100644 --- a/localization/v2.4.x/site/fr/integrations/integrate_with_haystack.md +++ b/localization/v2.4.x/site/fr/integrations/integrate_with_haystack.md @@ -20,7 +20,8 @@ title: Génération améliorée par récupération (RAG) avec Milvus et Haystack d="M4 9h1v1H4c-1.5 0-3-1.69-3-3.5S2.55 3 4 3h4c1.45 0 3 1.69 3 3.5 0 1.41-.91 2.72-2 3.25V8.59c.58-.45 1-1.27 1-2.09C10 5.22 8.98 4 8 4H4c-.98 0-2 1.22-2 2.5S3 9 4 9zm9-3h-1v1h1c1 0 2 1.22 2 2.5S13.98 12 13 12H9c-.98 0-2-1.22-2-2.5 0-.83.42-1.64 1-2.09V6.25c-1.09.53-2 1.84-2 3.25C6 11.31 7.55 13 9 13h4c1.45 0 3-1.69 3-3.5S14.5 6 13 6z" > -

    Open In Colab

    +

    Open In Colab +GitHub Repository

    Ce guide montre comment construire un système de génération améliorée par récupération (RAG) en utilisant Haystack et Milvus.

    Le système RAG combine un système de recherche avec un modèle génératif pour générer un nouveau texte basé sur une invite donnée. Le système récupère d'abord les documents pertinents d'un corpus à l'aide de Milvus, puis utilise un modèle génératif pour générer un nouveau texte basé sur les documents récupérés.

    Haystack est le cadre Python open source de deepset pour la création d'applications personnalisées avec de grands modèles de langage (LLM). Milvus est la base de données vectorielles open-source la plus avancée au monde, conçue pour alimenter les applications de recherche de similarité et d'IA.

    diff --git a/localization/v2.4.x/site/fr/integrations/integrate_with_hugging-face.json b/localization/v2.4.x/site/fr/integrations/integrate_with_hugging-face.json index 5ca00f2a4..345ec14ed 100644 --- a/localization/v2.4.x/site/fr/integrations/integrate_with_hugging-face.json +++ b/localization/v2.4.x/site/fr/integrations/integrate_with_hugging-face.json @@ -1 +1 @@ -{"codeList":["$ pip install --upgrade pymilvus transformers datasets torch\n","from datasets import load_dataset\n\n\nDATASET = \"squad\" # Name of dataset from HuggingFace Datasets\nINSERT_RATIO = 0.001 # Ratio of example dataset to be inserted\n\ndata = load_dataset(DATASET, split=\"validation\")\n# Generates a fixed subset. To generate a random subset, remove the seed.\ndata = data.train_test_split(test_size=INSERT_RATIO, seed=42)[\"test\"]\n# Clean up the data structure in the dataset.\ndata = data.map(\n lambda val: {\"answer\": val[\"answers\"][\"text\"][0]},\n remove_columns=[\"id\", \"answers\", \"context\"],\n)\n\n# View summary of example data\nprint(data)\n","from transformers import AutoTokenizer, AutoModel\nimport torch\n\nMODEL = (\n \"sentence-transformers/all-MiniLM-L6-v2\" # Name of model from HuggingFace Models\n)\nINFERENCE_BATCH_SIZE = 64 # Batch size of model inference\n\n# Load tokenizer & model from HuggingFace Hub\ntokenizer = AutoTokenizer.from_pretrained(MODEL)\nmodel = AutoModel.from_pretrained(MODEL)\n\n\ndef encode_text(batch):\n # Tokenize sentences\n encoded_input = tokenizer(\n batch[\"question\"], padding=True, truncation=True, return_tensors=\"pt\"\n )\n\n # Compute token embeddings\n with torch.no_grad():\n model_output = model(**encoded_input)\n\n # Perform pooling\n token_embeddings = model_output[0]\n attention_mask = encoded_input[\"attention_mask\"]\n input_mask_expanded = (\n attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float()\n )\n sentence_embeddings = torch.sum(\n token_embeddings * input_mask_expanded, 1\n ) / torch.clamp(input_mask_expanded.sum(1), min=1e-9)\n\n # Normalize embeddings\n batch[\"question_embedding\"] = torch.nn.functional.normalize(\n sentence_embeddings, p=2, dim=1\n )\n return batch\n\n\ndata = data.map(encode_text, batched=True, batch_size=INFERENCE_BATCH_SIZE)\ndata_list = data.to_list()\n","from pymilvus import MilvusClient\n\n\nMILVUS_URI = \"./huggingface_milvus_test.db\" # Connection URI\nCOLLECTION_NAME = \"huggingface_test\" # Collection name\nDIMENSION = 384 # Embedding dimension depending on model\n\nmilvus_client = MilvusClient(MILVUS_URI)\nif milvus_client.has_collection(collection_name=COLLECTION_NAME):\n milvus_client.drop_collection(collection_name=COLLECTION_NAME)\nmilvus_client.create_collection(\n collection_name=COLLECTION_NAME,\n dimension=DIMENSION,\n auto_id=True, # Enable auto id\n enable_dynamic_field=True, # Enable dynamic fields\n vector_field_name=\"question_embedding\", # Map vector field name and embedding column in dataset\n consistency_level=\"Strong\", # To enable search with latest data\n)\n","milvus_client.insert(collection_name=COLLECTION_NAME, data=data_list)\n","questions = {\n \"question\": [\n \"What is LGM?\",\n \"When did Massachusetts first mandate that children be educated in schools?\",\n ]\n}\n\n# Generate question embeddings\nquestion_embeddings = [v.tolist() for v in encode_text(questions)[\"question_embedding\"]]\n\n# Search across Milvus\nsearch_results = milvus_client.search(\n collection_name=COLLECTION_NAME,\n data=question_embeddings,\n limit=3, # How many search results to output\n output_fields=[\"answer\", \"question\"], # Include these fields in search results\n)\n\n# Print out results\nfor q, res in zip(questions[\"question\"], search_results):\n print(\"Question:\", q)\n for r in res:\n print(\n {\n \"answer\": r[\"entity\"][\"answer\"],\n \"score\": r[\"distance\"],\n \"original question\": r[\"entity\"][\"question\"],\n }\n )\n print(\"\\n\")\n"],"headingContent":"","anchorList":[{"label":"Réponse aux questions à l'aide de Milvus et de Hugging Face","href":"Question-Answering-Using-Milvus-and-Hugging-Face","type":1,"isActive":false},{"label":"Avant de commencer","href":"Before-you-begin","type":2,"isActive":false},{"label":"Préparer les données","href":"Prepare-data","type":2,"isActive":false},{"label":"Insérer des données","href":"Insert-data","type":2,"isActive":false},{"label":"Poser des questions","href":"Ask-questions","type":2,"isActive":false}]} \ No newline at end of file +{"codeList":["$ pip install --upgrade pymilvus transformers datasets torch\n","from datasets import load_dataset\n\n\nDATASET = \"squad\" # Name of dataset from HuggingFace Datasets\nINSERT_RATIO = 0.001 # Ratio of example dataset to be inserted\n\ndata = load_dataset(DATASET, split=\"validation\")\n# Generates a fixed subset. To generate a random subset, remove the seed.\ndata = data.train_test_split(test_size=INSERT_RATIO, seed=42)[\"test\"]\n# Clean up the data structure in the dataset.\ndata = data.map(\n lambda val: {\"answer\": val[\"answers\"][\"text\"][0]},\n remove_columns=[\"id\", \"answers\", \"context\"],\n)\n\n# View summary of example data\nprint(data)\n","from transformers import AutoTokenizer, AutoModel\nimport torch\n\nMODEL = (\n \"sentence-transformers/all-MiniLM-L6-v2\" # Name of model from HuggingFace Models\n)\nINFERENCE_BATCH_SIZE = 64 # Batch size of model inference\n\n# Load tokenizer & model from HuggingFace Hub\ntokenizer = AutoTokenizer.from_pretrained(MODEL)\nmodel = AutoModel.from_pretrained(MODEL)\n\n\ndef encode_text(batch):\n # Tokenize sentences\n encoded_input = tokenizer(\n batch[\"question\"], padding=True, truncation=True, return_tensors=\"pt\"\n )\n\n # Compute token embeddings\n with torch.no_grad():\n model_output = model(**encoded_input)\n\n # Perform pooling\n token_embeddings = model_output[0]\n attention_mask = encoded_input[\"attention_mask\"]\n input_mask_expanded = (\n attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float()\n )\n sentence_embeddings = torch.sum(\n token_embeddings * input_mask_expanded, 1\n ) / torch.clamp(input_mask_expanded.sum(1), min=1e-9)\n\n # Normalize embeddings\n batch[\"question_embedding\"] = torch.nn.functional.normalize(\n sentence_embeddings, p=2, dim=1\n )\n return batch\n\n\ndata = data.map(encode_text, batched=True, batch_size=INFERENCE_BATCH_SIZE)\ndata_list = data.to_list()\n","from pymilvus import MilvusClient\n\n\nMILVUS_URI = \"./huggingface_milvus_test.db\" # Connection URI\nCOLLECTION_NAME = \"huggingface_test\" # Collection name\nDIMENSION = 384 # Embedding dimension depending on model\n\nmilvus_client = MilvusClient(MILVUS_URI)\nif milvus_client.has_collection(collection_name=COLLECTION_NAME):\n milvus_client.drop_collection(collection_name=COLLECTION_NAME)\nmilvus_client.create_collection(\n collection_name=COLLECTION_NAME,\n dimension=DIMENSION,\n auto_id=True, # Enable auto id\n enable_dynamic_field=True, # Enable dynamic fields\n vector_field_name=\"question_embedding\", # Map vector field name and embedding column in dataset\n consistency_level=\"Strong\", # To enable search with latest data\n)\n","milvus_client.insert(collection_name=COLLECTION_NAME, data=data_list)\n","questions = {\n \"question\": [\n \"What is LGM?\",\n \"When did Massachusetts first mandate that children be educated in schools?\",\n ]\n}\n\n# Generate question embeddings\nquestion_embeddings = [v.tolist() for v in encode_text(questions)[\"question_embedding\"]]\n\n# Search across Milvus\nsearch_results = milvus_client.search(\n collection_name=COLLECTION_NAME,\n data=question_embeddings,\n limit=3, # How many search results to output\n output_fields=[\"answer\", \"question\"], # Include these fields in search results\n)\n\n# Print out results\nfor q, res in zip(questions[\"question\"], search_results):\n print(\"Question:\", q)\n for r in res:\n print(\n {\n \"answer\": r[\"entity\"][\"answer\"],\n \"score\": r[\"distance\"],\n \"original question\": r[\"entity\"][\"question\"],\n }\n )\n print(\"\\n\")\n"],"headingContent":"Question Answering Using Milvus and Hugging Face","anchorList":[{"label":"Réponse aux questions à l'aide de Milvus et de Hugging Face","href":"Question-Answering-Using-Milvus-and-Hugging-Face","type":1,"isActive":false},{"label":"Avant de commencer","href":"Before-you-begin","type":2,"isActive":false},{"label":"Préparer les données","href":"Prepare-data","type":2,"isActive":false},{"label":"Insérer des données","href":"Insert-data","type":2,"isActive":false},{"label":"Poser des questions","href":"Ask-questions","type":2,"isActive":false}]} \ No newline at end of file diff --git a/localization/v2.4.x/site/fr/integrations/integrate_with_hugging-face.md b/localization/v2.4.x/site/fr/integrations/integrate_with_hugging-face.md index 716f3aa85..477e5ad02 100644 --- a/localization/v2.4.x/site/fr/integrations/integrate_with_hugging-face.md +++ b/localization/v2.4.x/site/fr/integrations/integrate_with_hugging-face.md @@ -22,7 +22,8 @@ title: Réponse aux questions à l'aide de Milvus et de Hugging Face d="M4 9h1v1H4c-1.5 0-3-1.69-3-3.5S2.55 3 4 3h4c1.45 0 3 1.69 3 3.5 0 1.41-.91 2.72-2 3.25V8.59c.58-.45 1-1.27 1-2.09C10 5.22 8.98 4 8 4H4c-.98 0-2 1.22-2 2.5S3 9 4 9zm9-3h-1v1h1c1 0 2 1.22 2 2.5S13.98 12 13 12H9c-.98 0-2-1.22-2-2.5 0-.83.42-1.64 1-2.09V6.25c-1.09.53-2 1.84-2 3.25C6 11.31 7.55 13 9 13h4c1.45 0 3-1.69 3-3.5S14.5 6 13 6z" > -

    Open In Colab

    +

    Open In Colab +GitHub Repository

    Un système de réponse aux questions basé sur la recherche sémantique consiste à trouver la question la plus similaire à partir d'un ensemble de paires question-réponse pour une question donnée. Une fois la question la plus similaire identifiée, la réponse correspondante de l'ensemble de données est considérée comme la réponse à la question. Cette approche s'appuie sur des mesures de similarité sémantique pour déterminer la similarité entre les questions et extraire les réponses pertinentes.

    Ce tutoriel montre comment construire un système de réponse aux questions en utilisant Hugging Face comme chargeur de données et générateur d'intégration pour le traitement des données et Milvus comme base de données vectorielle pour la recherche sémantique.

    Avant de commencer

    Open In Colab

    +

    Open In Colab +GitHub Repository

    Ce guide montre comment utiliser les embeddings de Jina AI et Milvus pour effectuer des tâches de recherche et d'extraction de similarités.

    Qui est Jina AI ?

    Le modèle d'intégration de base de Jina AI excelle dans la compréhension de textes détaillés, ce qui le rend idéal pour la recherche sémantique, la classification de contenu et donc l'analyse avancée des sentiments, le résumé de texte et les systèmes de recommandation personnalisés.

    -
    from pymilvus.model.dense import JinaEmbeddingFunction
    +
    from pymilvus.model.dense import JinaEmbeddingFunction
     
     jina_api_key = "<YOUR_JINA_API_KEY>"
    -ef = JinaEmbeddingFunction("jina-embeddings-v2-base-en", jina_api_key)
    +ef = JinaEmbeddingFunction(
    +    "jina-embeddings-v3", 
    +    jina_api_key,
    +    task="retrieval.passage",
    +    dimensions=1024
    +)
     
     query = "what is information retrieval?"
     doc = "Information retrieval is the process of finding relevant information from a large collection of data or documents."
     
    -qvecs = ef.encode_queries([query])
    -dvecs = ef.encode_documents([doc])
    +qvecs = ef.encode_queries([query])  # This method uses `retrieval.query` as the task
    +dvecs = ef.encode_documents([doc])  # This method uses `retrieval.passage` as the task
     

    Embeddings bilingues

    Open In Colab

    +

    Open In Colab +GitHub Repository

    Ce guide montre comment construire un système de génération améliorée par récupération (RAG) en utilisant LlamaIndex et Milvus.

    Le système RAG combine un système de recherche avec un modèle génératif pour générer un nouveau texte basé sur une invite donnée. Le système récupère d'abord les documents pertinents d'un corpus à l'aide de Milvus, puis utilise un modèle génératif pour générer un nouveau texte basé sur les documents récupérés.

    LlamaIndex est un cadre de données simple et flexible permettant de connecter des sources de données personnalisées à de grands modèles de langage (LLM). Milvus est la base de données vectorielles open-source la plus avancée au monde, conçue pour alimenter la recherche de similarité d'intégration et les applications d'intelligence artificielle.

    diff --git a/localization/v2.4.x/site/fr/integrations/integrate_with_openai.json b/localization/v2.4.x/site/fr/integrations/integrate_with_openai.json index 663df58f3..06bdb80cc 100644 --- a/localization/v2.4.x/site/fr/integrations/integrate_with_openai.json +++ b/localization/v2.4.x/site/fr/integrations/integrate_with_openai.json @@ -1 +1 @@ -{"codeList":["pip install --upgrade openai pymilvus\n","from openai import OpenAI\nfrom pymilvus import MilvusClient\n\nMODEL_NAME = \"text-embedding-3-small\" # Which model to use, please check https://platform.openai.com/docs/guides/embeddings for available models\nDIMENSION = 1536 # Dimension of vector embedding\n\n# Connect to OpenAI with API Key.\nopenai_client = OpenAI(api_key=\"\")\n\ndocs = [\n \"Artificial intelligence was founded as an academic discipline in 1956.\",\n \"Alan Turing was the first person to conduct substantial research in AI.\",\n \"Born in Maida Vale, London, Turing was raised in southern England.\",\n]\n\nvectors = [\n vec.embedding\n for vec in openai_client.embeddings.create(input=docs, model=MODEL_NAME).data\n]\n\n# Prepare data to be stored in Milvus vector database.\n# We can store the id, vector representation, raw text and labels such as \"subject\" in this case in Milvus.\ndata = [\n {\"id\": i, \"vector\": vectors[i], \"text\": docs[i], \"subject\": \"history\"}\n for i in range(len(docs))\n]\n\n\n# Connect to Milvus, all data is stored in a local file named \"milvus_openai_demo.db\"\n# in current directory. You can also connect to a remote Milvus server following this\n# instruction: https://milvus.io/docs/install_standalone-docker.md.\nmilvus_client = MilvusClient(uri=\"milvus_openai_demo.db\")\nCOLLECTION_NAME = \"demo_collection\" # Milvus collection name\n# Create a collection to store the vectors and text.\nif milvus_client.has_collection(collection_name=COLLECTION_NAME):\n milvus_client.drop_collection(collection_name=COLLECTION_NAME)\nmilvus_client.create_collection(collection_name=COLLECTION_NAME, dimension=DIMENSION)\n\n# Insert all data into Milvus vector database.\nres = milvus_client.insert(collection_name=\"demo_collection\", data=data)\n\nprint(res[\"insert_count\"])\n","queries = [\"When was artificial intelligence founded?\"]\n\nquery_vectors = [\n vec.embedding\n for vec in openai_client.embeddings.create(input=queries, model=MODEL_NAME).data\n]\n\nres = milvus_client.search(\n collection_name=COLLECTION_NAME, # target collection\n data=query_vectors, # query vectors\n limit=2, # number of returned entities\n output_fields=[\"text\", \"subject\"], # specifies fields to be returned\n)\n\nfor q in queries:\n print(\"Query:\", q)\n for result in res:\n print(result)\n print(\"\\n\")\n","[\n {\n \"id\": 0,\n \"distance\": -0.772376537322998,\n \"entity\": {\n \"text\": \"Artificial intelligence was founded as an academic discipline in 1956.\",\n \"subject\": \"history\",\n },\n },\n {\n \"id\": 1,\n \"distance\": -0.58596271276474,\n \"entity\": {\n \"text\": \"Alan Turing was the first person to conduct substantial research in AI.\",\n \"subject\": \"history\",\n },\n },\n]\n"],"headingContent":"","anchorList":[{"label":"Recherche sémantique avec Milvus et OpenAI","href":"Semantic-Search-with-Milvus-and-OpenAI","type":1,"isActive":false},{"label":"Pour commencer","href":"Getting-started","type":2,"isActive":false},{"label":"Recherche de titres de livres avec OpenAI et Milvus","href":"Searching-book-titles-with-OpenAI--Milvus","type":2,"isActive":false}]} \ No newline at end of file +{"codeList":["pip install --upgrade openai pymilvus\n","from openai import OpenAI\nfrom pymilvus import MilvusClient\n\nMODEL_NAME = \"text-embedding-3-small\" # Which model to use, please check https://platform.openai.com/docs/guides/embeddings for available models\nDIMENSION = 1536 # Dimension of vector embedding\n\n# Connect to OpenAI with API Key.\nopenai_client = OpenAI(api_key=\"\")\n\ndocs = [\n \"Artificial intelligence was founded as an academic discipline in 1956.\",\n \"Alan Turing was the first person to conduct substantial research in AI.\",\n \"Born in Maida Vale, London, Turing was raised in southern England.\",\n]\n\nvectors = [\n vec.embedding\n for vec in openai_client.embeddings.create(input=docs, model=MODEL_NAME).data\n]\n\n# Prepare data to be stored in Milvus vector database.\n# We can store the id, vector representation, raw text and labels such as \"subject\" in this case in Milvus.\ndata = [\n {\"id\": i, \"vector\": vectors[i], \"text\": docs[i], \"subject\": \"history\"}\n for i in range(len(docs))\n]\n\n\n# Connect to Milvus, all data is stored in a local file named \"milvus_openai_demo.db\"\n# in current directory. You can also connect to a remote Milvus server following this\n# instruction: https://milvus.io/docs/install_standalone-docker.md.\nmilvus_client = MilvusClient(uri=\"milvus_openai_demo.db\")\nCOLLECTION_NAME = \"demo_collection\" # Milvus collection name\n# Create a collection to store the vectors and text.\nif milvus_client.has_collection(collection_name=COLLECTION_NAME):\n milvus_client.drop_collection(collection_name=COLLECTION_NAME)\nmilvus_client.create_collection(collection_name=COLLECTION_NAME, dimension=DIMENSION)\n\n# Insert all data into Milvus vector database.\nres = milvus_client.insert(collection_name=\"demo_collection\", data=data)\n\nprint(res[\"insert_count\"])\n","queries = [\"When was artificial intelligence founded?\"]\n\nquery_vectors = [\n vec.embedding\n for vec in openai_client.embeddings.create(input=queries, model=MODEL_NAME).data\n]\n\nres = milvus_client.search(\n collection_name=COLLECTION_NAME, # target collection\n data=query_vectors, # query vectors\n limit=2, # number of returned entities\n output_fields=[\"text\", \"subject\"], # specifies fields to be returned\n)\n\nfor q in queries:\n print(\"Query:\", q)\n for result in res:\n print(result)\n print(\"\\n\")\n","[\n {\n \"id\": 0,\n \"distance\": -0.772376537322998,\n \"entity\": {\n \"text\": \"Artificial intelligence was founded as an academic discipline in 1956.\",\n \"subject\": \"history\",\n },\n },\n {\n \"id\": 1,\n \"distance\": -0.58596271276474,\n \"entity\": {\n \"text\": \"Alan Turing was the first person to conduct substantial research in AI.\",\n \"subject\": \"history\",\n },\n },\n]\n"],"headingContent":"Semantic Search with Milvus and OpenAI","anchorList":[{"label":"Recherche sémantique avec Milvus et OpenAI","href":"Semantic-Search-with-Milvus-and-OpenAI","type":1,"isActive":false},{"label":"Pour commencer","href":"Getting-started","type":2,"isActive":false},{"label":"Recherche de titres de livres avec OpenAI et Milvus","href":"Searching-book-titles-with-OpenAI--Milvus","type":2,"isActive":false}]} \ No newline at end of file diff --git a/localization/v2.4.x/site/fr/integrations/integrate_with_openai.md b/localization/v2.4.x/site/fr/integrations/integrate_with_openai.md index 30be6555a..19f25b5bc 100644 --- a/localization/v2.4.x/site/fr/integrations/integrate_with_openai.md +++ b/localization/v2.4.x/site/fr/integrations/integrate_with_openai.md @@ -20,7 +20,8 @@ summary: >- d="M4 9h1v1H4c-1.5 0-3-1.69-3-3.5S2.55 3 4 3h4c1.45 0 3 1.69 3 3.5 0 1.41-.91 2.72-2 3.25V8.59c.58-.45 1-1.27 1-2.09C10 5.22 8.98 4 8 4H4c-.98 0-2 1.22-2 2.5S3 9 4 9zm9-3h-1v1h1c1 0 2 1.22 2 2.5S13.98 12 13 12H9c-.98 0-2-1.22-2-2.5 0-.83.42-1.64 1-2.09V6.25c-1.09.53-2 1.84-2 3.25C6 11.31 7.55 13 9 13h4c1.45 0 3-1.69 3-3.5S14.5 6 13 6z" > -

    Open In Colab

    +

    Open In Colab +GitHub Repository

    Ce guide montre comment l'API d'intégration d'OpenAI peut être utilisée avec la base de données vectorielle Milvus pour effectuer une recherche sémantique sur du texte.

    Pour commencer

    Open In Colab

    -

    Ce guide montre comment utiliser Ragas pour évaluer un pipeline de génération assistée par récupération (RAG) construit à partir de Milvus.

    +

    Open In Colab +GitHub Repository

    +

    Ce guide montre comment utiliser Ragas pour évaluer un pipeline de génération assistée par récupération (RAG) construit à partir de Milvus.

    Le système RAG combine un système de recherche avec un modèle génératif pour générer un nouveau texte basé sur une invite donnée. Le système récupère d'abord les documents pertinents d'un corpus à l'aide de Milvus, puis utilise un modèle génératif pour générer un nouveau texte basé sur les documents récupérés.

    Ragas est un cadre qui vous aide à évaluer vos pipelines RAG. Il existe des outils et des cadres existants qui vous aident à construire ces pipelines, mais il peut être difficile de les évaluer et de quantifier leurs performances. C'est là que Ragas (RAG Assessment) entre en jeu.

    Conditions préalables

    Vanna est un framework Python RAG (Retrieval-Augmented Generation) open-source pour la génération SQL et les fonctionnalités associées. Milvus est la base de données vectorielles open-source la plus avancée au monde, conçue pour permettre la recherche de similarités et les applications d'intelligence artificielle.

    +

    Open In Colab +GitHub Repository

    +

    Vanna est un framework Python RAG (Retrieval-Augmented Generation) open-source pour la génération SQL et les fonctionnalités associées. Milvus est la base de données vectorielles open-source la plus avancée au monde, conçue pour permettre la recherche de similarités dans les applications d'intégration et d'intelligence artificielle.

    Vanna fonctionne en deux étapes simples - former un "modèle" RAG sur vos données, puis poser des questions qui renverront des requêtes SQL qui peuvent être configurées pour être exécutées sur votre base de données. Ce guide montre comment utiliser Vanna pour générer et exécuter des requêtes SQL basées sur vos données stockées dans une base de données.

    Conditions préalables

    Open In Colab

    +

    Open In Colab +GitHub Repository

    Ce guide montre comment l'API d'intégration de VoyageAI peut être utilisée avec la base de données vectorielle Milvus pour effectuer une recherche sémantique sur du texte.

    Pour commencer

    Open In Colab

    +

    Open In Colab +GitHub Repository

    Ce guide montre comment construire un système de génération améliorée par récupération (RAG) en utilisant LangChain et Milvus.

    Le système RAG combine un système de recherche avec un modèle génératif pour générer un nouveau texte basé sur une invite donnée. Le système récupère d'abord les documents pertinents d'un corpus à l'aide de Milvus, puis utilise un modèle génératif pour générer un nouveau texte basé sur les documents récupérés.

    LangChain est un cadre de développement d'applications alimentées par de grands modèles de langage (LLM). Milvus est la base de données vectorielles open-source la plus avancée au monde, conçue pour alimenter la recherche de similarité d'intégration et les applications d'intelligence artificielle.

    diff --git a/localization/v2.4.x/site/fr/reference/architecture/architecture_overview.json b/localization/v2.4.x/site/fr/reference/architecture/architecture_overview.json index c03960585..d037d41d1 100644 --- a/localization/v2.4.x/site/fr/reference/architecture/architecture_overview.json +++ b/localization/v2.4.x/site/fr/reference/architecture/architecture_overview.json @@ -1 +1 @@ -{"codeList":[],"headingContent":"","anchorList":[{"label":"Présentation de l'architecture de Milvus","href":"Milvus-Architecture-Overview","type":1,"isActive":false},{"label":"Prochaines étapes","href":"Whats-next","type":2,"isActive":false}]} \ No newline at end of file +{"codeList":[],"headingContent":"Milvus Architecture Overview","anchorList":[{"label":"Présentation de l'architecture de Milvus","href":"Milvus-Architecture-Overview","type":1,"isActive":false},{"label":"Suite de l'article","href":"Whats-next","type":2,"isActive":false}]} \ No newline at end of file diff --git a/localization/v2.4.x/site/fr/reference/architecture/architecture_overview.md b/localization/v2.4.x/site/fr/reference/architecture/architecture_overview.md index 3465b97e0..2aa11ca6f 100644 --- a/localization/v2.4.x/site/fr/reference/architecture/architecture_overview.md +++ b/localization/v2.4.x/site/fr/reference/architecture/architecture_overview.md @@ -4,7 +4,7 @@ summary: >- Milvus fournit une base de données vectorielles rapide, fiable et stable, spécialement conçue pour la recherche de similarités et l'intelligence artificielle. -title: Aperçu de l'architecture Milvus +title: Présentation de l'architecture de Milvus ---

    Présentation de l'architecture de Milvus

    Construit sur les bibliothèques de recherche vectorielle les plus populaires, notamment Faiss, HNSW, DiskANN et SCANN, Milvus a été conçu pour la recherche de similarités sur des ensembles de données vectorielles denses contenant des millions, des milliards, voire des trillions de vecteurs. Avant de poursuivre, familiarisez-vous avec les principes de base de la recherche par incorporation.

    -

    Milvus prend également en charge le partage des données, l'ingestion de données en continu, les schémas dynamiques, la recherche combinant des données vectorielles et scalaires, la recherche multi-vecteur et hybride, les vecteurs épars et de nombreuses autres fonctions avancées. La plateforme offre des performances à la demande et peut être optimisée pour s'adapter à n'importe quel scénario d'intégration et de recherche. Nous recommandons de déployer Milvus à l'aide de Kubernetes pour une disponibilité et une élasticité optimales.

    +

    Milvus prend également en charge le partage des données, l'ingestion de données en continu, les schémas dynamiques, la recherche combinant des données vectorielles et scalaires, la recherche multi-vectorielle et hybride, les vecteurs épars et de nombreuses autres fonctions avancées. La plateforme offre des performances à la demande et peut être optimisée pour s'adapter à n'importe quel scénario d'intégration et de recherche. Nous recommandons de déployer Milvus à l'aide de Kubernetes pour une disponibilité et une élasticité optimales.

    Milvus adopte une architecture de stockage partagé qui présente une désagrégation du stockage et du calcul et une évolutivité horizontale pour ses nœuds de calcul. Suivant le principe de la désagrégation du plan de données et du plan de contrôle, Milvus comprend quatre couches: la couche d'accès, le service de coordination, le nœud de travail et le stockage. Ces couches sont mutuellement indépendantes en ce qui concerne la mise à l'échelle ou la reprise après sinistre.

    Architecture_diagram Schéma de l'architecture

    -

    Prochaines étapes

    diff --git a/localization/v2.4.x/site/fr/reference/disk_index.json b/localization/v2.4.x/site/fr/reference/disk_index.json index 9d66274c2..c87745e03 100644 --- a/localization/v2.4.x/site/fr/reference/disk_index.json +++ b/localization/v2.4.x/site/fr/reference/disk_index.json @@ -1 +1 @@ -{"codeList":["...\nDiskIndex:\n MaxDegree: 56\n SearchListSize: 100\n PQCodeBugetGBRatio: 0.125\n SearchCacheBudgetGBRatio: 0.125\n BeamWidthRatio: 4.0\n...\n"],"headingContent":"","anchorList":[{"label":"Indexation sur disque","href":"On-disk-Index","type":1,"isActive":false},{"label":"Conditions préalables","href":"Prerequisites","type":2,"isActive":false},{"label":"Limites","href":"Limits","type":2,"isActive":false},{"label":"Paramètres d'index et de recherche","href":"Index-and-search-settings","type":2,"isActive":false},{"label":"Configurations Milvus liées à DiskANN","href":"DiskANN-related-Milvus-configurations","type":2,"isActive":false},{"label":"Résolution des problèmes","href":"Troubleshooting","type":2,"isActive":false}]} \ No newline at end of file +{"codeList":["...\nDiskIndex:\n MaxDegree: 56\n SearchListSize: 100\n PQCodeBugetGBRatio: 0.125\n SearchCacheBudgetGBRatio: 0.125\n BeamWidthRatio: 4.0\n...\n"],"headingContent":"On-disk Index","anchorList":[{"label":"Indexation sur disque","href":"On-disk-Index","type":1,"isActive":false},{"label":"Conditions préalables","href":"Prerequisites","type":2,"isActive":false},{"label":"Limites","href":"Limits","type":2,"isActive":false},{"label":"Paramètres d'index et de recherche","href":"Index-and-search-settings","type":2,"isActive":false},{"label":"Configurations Milvus liées à DiskANN","href":"DiskANN-related-Milvus-configurations","type":2,"isActive":false},{"label":"Résolution des problèmes","href":"Troubleshooting","type":2,"isActive":false}]} \ No newline at end of file diff --git a/localization/v2.4.x/site/fr/reference/disk_index.md b/localization/v2.4.x/site/fr/reference/disk_index.md index f62a669f1..0e14eda91 100644 --- a/localization/v2.4.x/site/fr/reference/disk_index.md +++ b/localization/v2.4.x/site/fr/reference/disk_index.md @@ -2,7 +2,7 @@ id: disk_index.md related_key: disk_index summary: Mécanisme d'indexation des disques dans Milvus. -title: Index sur disque +title: Indexation sur disque ---

    Indexation sur disque

    Pour utiliser DiskANN, veillez à

    • N'utilisez que des vecteurs flottants ayant au moins une dimension dans vos données.
    • -
    • d'utiliser uniquement la distance euclidienne (L2) ou le produit intérieur (IP) pour mesurer la distance entre les vecteurs.
    • +
    • d'utiliser uniquement la distance euclidienne (L2), le produit intérieur (IP) ou COSINE pour mesurer la distance entre les vecteurs.

    Paramètres d'index et de recherche

      -
    • Paramètres de construction de l'index

      +
    • Paramètres de construction d'index

      Lors de la création d'un index DiskANN, utilisez DISKANN comme type d'index. Aucun paramètre d'index n'est nécessaire.

    • Paramètres de recherche

      @@ -132,7 +132,7 @@ DiskIndex: - +
      MaxDegreeDegré maximal du graphe de Vamana.
      Une valeur plus élevée permet d'obtenir un taux de rappel plus important, mais augmente la taille de l'index et le temps nécessaire à sa construction.
      [1, 512]56
      SearchListSizeTaille de la liste des candidats.
      Une valeur plus élevée augmente le temps consacré à la construction de l'index mais offre un taux de rappel plus élevé.
      Fixez-la à une valeur inférieure à MaxDegree, sauf si vous avez besoin de réduire le temps de construction de l'index.
      [1, int32_max]100
      PQCodeBugetGBRatioLimite de taille du code PQ.
      Une valeur plus élevée offre un taux de rappel plus important mais augmente l'utilisation de la mémoire.
      (0.0, 0.25]0.125
      SearchCacheBudgetGBRatioRapport entre les numéros de nœuds mis en cache et les données brutes.
      Une valeur plus élevée améliore les performances de construction d'index, mais augmente l'utilisation de la mémoire.
      [0.0, 0.3)0.10
      SearchCacheBudgetGBRatioRapport entre les numéros de nœuds mis en cache et les données brutes.
      Une valeur plus élevée améliore la performance de la construction de l'index mais augmente l'utilisation de la mémoire.
      [0.0, 0.3)0.10
      BeamWidthRatioRapport entre le nombre maximum de requêtes IO par itération de recherche et le nombre de CPU.[1, max(128 / nombre de CPU, 16)]4.0
      diff --git a/localization/v2.4.x/site/fr/reference/replica.json b/localization/v2.4.x/site/fr/reference/replica.json index b4fd79b68..035987e67 100644 --- a/localization/v2.4.x/site/fr/reference/replica.json +++ b/localization/v2.4.x/site/fr/reference/replica.json @@ -1 +1 @@ -{"codeList":[],"headingContent":"","anchorList":[{"label":"Réplique en mémoire","href":"In-Memory-Replica","type":1,"isActive":false},{"label":"Vue d'ensemble","href":"Overview","type":2,"isActive":false},{"label":"Concepts clés","href":"Key-Concepts","type":2,"isActive":false},{"label":"Détails de la conception","href":"Design-Details","type":2,"isActive":false}]} \ No newline at end of file +{"codeList":[],"headingContent":"In-Memory Replica","anchorList":[{"label":"Réplique en mémoire","href":"In-Memory-Replica","type":1,"isActive":false},{"label":"Vue d'ensemble","href":"Overview","type":2,"isActive":false},{"label":"Concepts clés","href":"Key-Concepts","type":2,"isActive":false},{"label":"Détails de la conception","href":"Design-Details","type":2,"isActive":false}]} \ No newline at end of file diff --git a/localization/v2.4.x/site/fr/reference/replica.md b/localization/v2.4.x/site/fr/reference/replica.md index 6faa1fe71..a01feb251 100644 --- a/localization/v2.4.x/site/fr/reference/replica.md +++ b/localization/v2.4.x/site/fr/reference/replica.md @@ -40,8 +40,8 @@ title: Réplique en mémoire Replica_Availiability Disponibilité des répliques

      Avec les répliques en mémoire, Milvus peut charger le même segment sur plusieurs nœuds de requête. Si un nœud d'interrogation a échoué ou est occupé par une demande de recherche en cours lorsqu'un autre arrive, le système peut envoyer de nouvelles demandes à un nœud d'interrogation inactif qui dispose d'une réplication du même segment.

      -

      Performances

      Les répliques en mémoire vous permettent d'exploiter les ressources supplémentaires de l'unité centrale et de la mémoire. C'est très utile si vous avez un ensemble de données relativement petit mais que vous souhaitez augmenter le débit de lecture avec des ressources matérielles supplémentaires. Le QPS (query per second) et le débit peuvent être améliorés de manière significative.

      -

      Disponibilité

      Les répliques en mémoire permettent à Milvus de récupérer plus rapidement si un nœud de requête tombe en panne. Lorsqu'un nœud d'interrogation tombe en panne, le segment ne doit pas être rechargé sur un autre nœud d'interrogation. Au contraire, la demande de recherche peut être renvoyée immédiatement à un nouveau nœud d'interrogation sans qu'il soit nécessaire de recharger les données. Grâce à la gestion simultanée de plusieurs répliques de segments, le système est plus résistant en cas de basculement.

      +

      Performances

      Les répliques en mémoire vous permettent d'exploiter les ressources supplémentaires de l'unité centrale et de la mémoire. C'est très utile si vous avez un ensemble de données relativement petit mais que vous souhaitez augmenter le débit de lecture avec des ressources matérielles supplémentaires. Le QPS (query per second) et le débit peuvent être considérablement améliorés.

      +

      Disponibilité

      Les répliques en mémoire permettent à Milvus de récupérer plus rapidement si un nœud de requête tombe en panne. Lorsqu'un nœud d'interrogation tombe en panne, le segment ne doit pas être rechargé sur un autre nœud d'interrogation. Au lieu de cela, la demande de recherche peut être renvoyée immédiatement à un nouveau nœud d'interrogation sans avoir à recharger les données. Grâce à la gestion simultanée de plusieurs répliques de segments, le système est plus résistant en cas de basculement.

      Concepts clés

      Les répliques en mémoire sont organisées en groupes de répliques. Chaque groupe de répliques contient des répliques en nuage. Chaque réplique de l'ensemble a une réplique de flux et une réplique historique qui correspondent aux segments croissants et scellés de l'ensemble (c.-à-d. le canal DML).

      - An illustration of how in-memory replica works + An illustration of how in-memory replica works Illustration du fonctionnement des répliques en mémoire

      Groupe de répliques

      Un groupe de répliques est constitué de plusieurs nœuds de requête chargés de gérer les données historiques et les répliques.

      Réplique en nuage

      Un réplica shard se compose d'un réplica en continu et d'un réplica historique, tous deux appartenant au même shard. Le nombre de répliques d'un groupe de répliques est déterminé par le nombre de schémas d'une collection donnée.

      diff --git a/localization/v2.4.x/site/fr/release_notes.json b/localization/v2.4.x/site/fr/release_notes.json index 9788c60a1..2d5f4bb20 100644 --- a/localization/v2.4.x/site/fr/release_notes.json +++ b/localization/v2.4.x/site/fr/release_notes.json @@ -1 +1 @@ -{"codeList":[],"headingContent":"Release Notes","anchorList":[{"label":"Notes de mise à jour","href":"Release-Notes","type":1,"isActive":false},{"label":"v2.4.11","href":"v2411","type":2,"isActive":false},{"label":"v2.4.10","href":"v2410","type":2,"isActive":false},{"label":"v2.4.9","href":"v249","type":2,"isActive":false},{"label":"v2.4.8","href":"v248","type":2,"isActive":false},{"label":"v2.4.6","href":"v246","type":2,"isActive":false},{"label":"v2.4.5","href":"v245","type":2,"isActive":false},{"label":"v2.4.4","href":"v244","type":2,"isActive":false},{"label":"v2.4.3","href":"v243","type":2,"isActive":false},{"label":"v2.4.1","href":"v241","type":2,"isActive":false},{"label":"v2.4.0","href":"v240","type":2,"isActive":false},{"label":"v2.4.0-rc.1","href":"v240-rc1","type":2,"isActive":false}]} \ No newline at end of file +{"codeList":[],"headingContent":"Release Notes","anchorList":[{"label":"Notes de mise à jour","href":"Release-Notes","type":1,"isActive":false},{"label":"v2.4.13-correction de bogues","href":"v2413-hotfix","type":2,"isActive":false},{"label":"[Obsolète] v2.4.13","href":"Deprecated-v2413","type":2,"isActive":false},{"label":"v2.4.12","href":"v2412","type":2,"isActive":false},{"label":"v2.4.11","href":"v2411","type":2,"isActive":false},{"label":"v2.4.10","href":"v2410","type":2,"isActive":false},{"label":"v2.4.9","href":"v249","type":2,"isActive":false},{"label":"v2.4.8","href":"v248","type":2,"isActive":false},{"label":"v2.4.6","href":"v246","type":2,"isActive":false},{"label":"v2.4.5","href":"v245","type":2,"isActive":false},{"label":"v2.4.4","href":"v244","type":2,"isActive":false},{"label":"v2.4.3","href":"v243","type":2,"isActive":false},{"label":"v2.4.1","href":"v241","type":2,"isActive":false},{"label":"v2.4.0","href":"v240","type":2,"isActive":false},{"label":"v2.4.0-rc.1","href":"v240-rc1","type":2,"isActive":false}]} \ No newline at end of file diff --git a/localization/v2.4.x/site/fr/release_notes.md b/localization/v2.4.x/site/fr/release_notes.md index 7400abd43..32cc67495 100644 --- a/localization/v2.4.x/site/fr/release_notes.md +++ b/localization/v2.4.x/site/fr/release_notes.md @@ -19,6 +19,163 @@ title: Notes de mise à jour >

      Découvrez les nouveautés de Milvus ! Cette page résume les nouvelles fonctionnalités, les améliorations, les problèmes connus et les corrections de bogues de chaque version. Vous trouverez dans cette section les notes de version pour chaque version publiée après la v2.4.0. Nous vous conseillons de consulter régulièrement cette page pour prendre connaissance des mises à jour.

      +

      v2.4.13-correction de bogues

      Date de publication : 17 octobre 2024

      + + + + + + + +
      Version de MilvusVersion du SDK PythonVersion du SDK JavaVersion du SDK Node.js
      2.4.13-hotfix2.4.82.4.52.4.9
      +

      Milvus v2.4.13-hotfix corrige un problème critique spécifique à la v2.4.13, où Milvus peut échouer à récupérer les informations de collecte après un redémarrage si tous les instantanés MetaKV ont été ramassés(#36933). Il est conseillé aux utilisateurs qui utilisent actuellement la version 2.4.13 de mettre à niveau vers la version 2.4.13-hotfix dès que possible afin d'éviter des perturbations potentielles.

      +

      Corrections critiques

        +
      • Chargement de la clé originale si l'horodatage est MaxTimestamp(#36935)
      • +
      +

      [Obsolète] v2.4.13

      Date de publication : 12 octobre 2024

      + + + + + + + +
      Version de MilvusVersion du SDK PythonVersion du SDK JavaVersion du SDK Node.js
      2.4.132.4.82.4.52.4.9
      +

      Milvus 2.4.13 introduit la charge de réplique dynamique, permettant aux utilisateurs d'ajuster le nombre de répliques de collection sans avoir à libérer et recharger la collection. Cette version corrige également plusieurs bogues critiques liés à l'importation en vrac, à l'analyse des expressions, à l'équilibrage de la charge et à la reprise sur panne. En outre, des améliorations significatives ont été apportées à l'utilisation des ressources MMAP et aux performances d'importation, améliorant ainsi l'efficacité globale du système. Nous recommandons vivement de passer à cette version pour améliorer les performances et la stabilité.

      +

      Fonctionnalités

        +
      • Ajustement dynamique des répliques pour les collections chargées(#36417)
      • +
      • MMAP de vecteurs épars dans les types de segments croissants(#36565)
      • +
      +

      Corrections de bugs

        +
      • Correction d'un problème de performance du flush(#36741)
      • +
      • Correction d'un bogue avec les expressions JSON dans "[]"(#36722)
      • +
      • Suppression des voisins si la cible compacte n'est pas indexée(#36694)
      • +
      • Amélioration des performances pour Rocksmq lorsque le canal est plein(#36618)
      • +
      • Correction d'un problème où les erreurs pendant le désépinglage n'étaient pas reportées(#36665)
      • +
      • Résolution d'une fuite de mémoire pour les segments importés dans le gestionnaire de segments(#36631)
      • +
      • Suppression des contrôles de santé inutiles pour les noeuds de requête dans le proxy(#36553)
      • +
      • Correction d'un problème de débordement avec les expressions de termes(#36534)
      • +
      • Enregistrement de l'ID du noeud avant d'assigner des tâches pour éviter une mauvaise allocation des tâches(#36493)
      • +
      • Résolution des problèmes de course de données dans le compactage de clustering(#36499)
      • +
      • Ajout d'une vérification de la longueur maximale des tableaux de chaînes après la correspondance des types(#36497)
      • +
      • Correction des conditions de course en mode mixte ou autonome(#36459)
      • +
      • Correction du déséquilibre des segments après des opérations répétées de chargement et de déchargement(#36543)
      • +
      • Correction d'un cas où les segments ne pouvaient pas être déplacés d'un noeud d'arrêt(#36475)
      • +
      • Mise à jour correcte des informations sur les segments même si certains segments étaient manquants(#36729)
      • +
      • Empêche les transactions etcd de dépasser la limite maximale dans le KV snapshot(#36773)
      • +
      +

      Améliorations

        +
      • Amélioration de l'estimation des ressources MMAP :
          +
        • Amélioration du code relatif au MMAP dans column.h(#36521)
        • +
        • Amélioration de l'estimation des ressources lors du chargement des collections(#36728)
        • +
      • +
      • Amélioration des performances :
          +
        • Amélioration de l'efficacité de l'analyse des expressions en convertissant l'Unicode en ASCII(#36676)
        • +
        • Production parallèle de messages pour plusieurs sujets(#36462)
        • +
        • Réduction de la surcharge du processeur lors du calcul de la taille du fichier d'index(#36580)
        • +
        • Récupération du type de message à partir de l'en-tête pour minimiser l'unmarshalling(#36454)
        • +
        • Optimisation de la politique de sélection des répliques en fonction de la charge de travail(#36384)
        • +
      • +
      • Fractionnement des messages de la tâche de suppression pour respecter les limites de taille maximale des messages(#36574)
      • +
      • Ajout d'une nouvelle URL RESTful pour décrire les tâches d'importation(#36754)
      • +
      • Optimisation de la planification des importations et ajout d'une mesure du coût du temps(#36684)
      • +
      • Ajout d'un rapport d'équilibre pour l'équilibreur du coordinateur de requêtes(#36749)
      • +
      • Utilisation de la configuration commune du GC(#36670)
      • +
      • Ajout d'un commutateur de politique de streaming forward pour le délégateur(#36712)
      • +
      • Activation du compactage manuel pour les collections sans index(#36581)
      • +
      • Activation de l'équilibrage de charge sur les noeuds de requête avec différentes capacités de mémoire(#36625)
      • +
      • Cas unifié pour les étiquettes entrantes en utilisant metrics.label(#36616)
      • +
      • Rendre les opérations de canal de transfert/segment idempotentes(#36552)
      • +
      • Ajout de métriques pour surveiller le débit d'importation et le nombre de lignes importées(#36588)
      • +
      • Prévention de la création de plusieurs objets timer dans les cibles(#36573)
      • +
      • Mise à jour de la version des expressions et de la réponse HTTP formatée pour les expressions(#36467)
      • +
      • Amélioration du garbage collection dans le snapshot KV(#36793)
      • +
      • Ajout de la prise en charge de l'exécution de méthodes avec des paramètres de contexte(#36798)
      • +
      +

      v2.4.12

      Date de publication : 26 septembre 2024

      + + + + + + + +
      Version de MilvusVersion du SDK PythonVersion du SDK JavaVersion du SDK Node.js
      2.4.122.4.72.4.42.4.9
      +

      Milvus 2.4.12 apporte des améliorations significatives et des corrections de bogues critiques. Cette version résout les problèmes de duplication des données et améliore la vitesse de reprise sur panne, en particulier lors de la gestion d'un grand nombre de suppressions. Cependant, il subsiste un problème connu où la reprise sur panne peut être lente lors de la suppression de quantités massives de données. Nous travaillons activement à la résolution de ce problème.

      +

      Améliorations

        +
      • Implémentation de l'arrêt en douceur pour le gestionnaire de graphe de flux(#36358)
      • +
      • Désactivation des vérifications d'index pour les champs vectoriels non chargés(#36280)
      • +
      • Filtrage des enregistrements de suppression non réussis pendant le chargement delta(#36272)
      • +
      • Amélioration de la gestion des exceptions std::stoi(#36296)
      • +
      • Désactivation des mots-clés comme noms de champs ou noms de champs dynamiques(#36108)
      • +
      • Ajout de métriques pour les entrées de suppression dans les segments L0(#36227)
      • +
      • Implémentation de la politique de transfert L0 pour supporter le chargement à distance(#36208)
      • +
      • Ajout d'une vérification du chargement du champ ANN dans le proxy(#36194)
      • +
      • Activation de la prise en charge des lignes vides(#36061)
      • +
      • Correction d'une vulnérabilité de sécurité(#36156)
      • +
      • Implémentation d'un gestionnaire de statistiques pour les mesures de taille des requêtes/réponses(#36118)
      • +
      • Correction de l'estimation de la taille pour les données de tableau encodées(#36379)
      • +
      +

      Correction de bogues

        +
      • Résolution des erreurs de type de métrique pour les collections avec deux champs vectoriels(#36473)
      • +
      • Correction des problèmes de mise en mémoire tampon longue causant des échecs de réception de la file d'attente des messages(#36425)
      • +
      • Implémentation de la prise en charge du retour compact-to-segments après fractionnement(#36429)
      • +
      • Résolution des problèmes de course aux données avec la goroutine de vérification de l'ID du noeud(#36377)
      • +
      • Suppression de la vérification du type d'élément(#36324)
      • +
      • Correction des problèmes d'accès concurrents pour les segments croissants et scellés(#36288)
      • +
      • Implémentation d'un futur verrou à état(#36333)
      • +
      • Correction de l'utilisation de l'offset dans HybridSearch(#36287, #36253)
      • +
      • Résolution des fuites de segments/canaux sales sur QueryNode(#36259)
      • +
      • Correction de la gestion de la duplication de la clé primaire(#36274)
      • +
      • Mise en place d'un type de métrique dans les requêtes de recherche(#36279)
      • +
      • Correction du problème d'effacement de la métrique stored_index_files_size(#36161)
      • +
      • Correction du comportement du groupe de privilèges de lecture-écriture pour l'accès global à l'API(#36145)
      • +

      v2.4.11

      Date de publication : 30 août 2024

      +

      Date de sortie : 30 août 2024

      @@ -112,7 +269,7 @@ title: Notes de mise à jour
      Version de MilvusVersion du SDK PythonVersion du SDK JavaVersion du SDK Node.js
      2.4.102.4.62.4.32.4.6
      -

      Milvus 2.4.10 apporte des améliorations significatives en termes de fonctionnalité et de stabilité. Les principales caractéristiques comprennent la prise en charge des opérations d'insertion ascendante sur les collections compatibles AutoID, les capacités de chargement de collections partielles et diverses configurations à mémoire mappée (MMAP) afin d'optimiser l'utilisation de la mémoire. Cette version corrige également plusieurs bogues provoquant des paniques, des vidages de noyau et des fuites de ressources. Nous recommandons une mise à jour pour profiter pleinement de ces améliorations.

      +

      Milvus 2.4.10 apporte des améliorations significatives en termes de fonctionnalité et de stabilité. Les principales caractéristiques comprennent la prise en charge des opérations d'insertion ascendante sur les collections compatibles avec AutoID, les capacités de chargement de collections partielles et diverses configurations à mémoire mappée (MMAP) pour optimiser l'utilisation de la mémoire. Cette version corrige également plusieurs bogues provoquant des paniques, des vidages de noyau et des fuites de ressources. Nous recommandons une mise à jour pour profiter pleinement de ces améliorations.

      Fonctionnalités

      • Upsert avec Auto ID: Prise en charge des opérations d'insertion avec génération automatique d'ID(#34633)
      • Chargement partiel des champs d'une collection [Beta Preview] : Permet de charger des champs spécifiques d'une collection(#35696)
      • @@ -301,7 +458,7 @@ title: Notes de mise à jour
      • Résolution d'une fuite de mémoire dans SegmentManager au sein de DataCoord en supprimant les segments vidés générés par les importations.(#34651)
      • Correction d'un problème de panique lorsque le compactage était désactivé et qu'une collection était abandonnée.(#34206)
      • Correction d'un problème de dépassement de mémoire dans DataNode en améliorant l'algorithme d'estimation de l'utilisation de la mémoire.(#34203)
      • -
      • Prévention de l'utilisation de la mémoire en rafale lorsque plusieurs requêtes d'extraction de vecteur ont un échec de cache en implémentant le vol unique pour le cache de morceaux.(#34283)
      • +
      • Prévention de l'utilisation de la mémoire en rafale lorsque plusieurs requêtes d'extraction de vecteurs ont un échec de cache en implémentant le vol unique pour le cache de morceaux.(#34283)
      • Capture de ErrKeyNotFound pendant les opérations CAS (Compare and Swap) dans la configuration.(#34489)
      • Correction d'un problème où les mises à jour de configuration échouaient à cause de l'utilisation par erreur de la valeur formatée dans une opération CAS.(#34373)
      @@ -312,7 +469,7 @@ title: Notes de mise à jour
    • Introduction d'une configuration de quotas pour le nombre d'entrées de segments L0 par collection, permettant un meilleur contrôle des taux de suppression en appliquant une rétropression.(#34837)
    • Extension du mécanisme de limitation du taux pour les opérations d'insertion afin de couvrir également les opérations d'insertion ascendante, garantissant des performances constantes en cas de charge élevée.(#34616)
    • Implémentation d'un pool CGO dynamique pour les appels CGO proxy, optimisant l'utilisation des ressources et les performances.(#34842)
    • -
    • Activation de l'option de compilation DiskAnn pour les systèmes d'exploitation Ubuntu, Rocky et Amazon, améliorant la compatibilité et les performances sur ces plateformes.(#34244)
    • +
    • Activation de l'option de compilation DiskAnn pour les systèmes d'exploitation Ubuntu, Rocky et Amazon, ce qui améliore la compatibilité et les performances sur ces plateformes.(#34244)
    • Mise à jour de Conan vers la version 1.64.1, assurant la compatibilité avec les dernières fonctionnalités et améliorations.(#35216)
    • Mise à jour de Knowhere vers la version 2.3.7, apportant des améliorations de performance et de nouvelles fonctionnalités.(#34709)
    • Correction de la révision de certains paquets tiers pour assurer des constructions cohérentes et réduire le risque de changements inattendus.(#35316)
    • @@ -355,7 +512,7 @@ title: Notes de mise à jour
    • Correction d'une fuite de mémoire des objets oracle de clé primaire lorsqu'un travailleur est hors ligne(#34020).
    • Correction de ChannelManagerImplV2 pour notifier le bon Node, adressant les problèmes de capture de paramètres dans la fermeture de boucle(#34004).
    • Correction d'une course aux données en lecture-écriture dans ImportTask segmentsInfo en implémentant une copie profonde(#34126).
    • -
    • Correction des informations de version pour l'option de configuration "legacyVersionWithoutRPCWatch" afin d'éviter les erreurs lors des mises à jour(#34185).
    • +
    • Correction des informations de version pour l'option de configuration "legacyVersionWithoutRPCWatch" pour éviter les erreurs lors des mises à jour(#34185).
    • Correction de la métrique pour le nombre de partitions chargées(#34195).
    • Passage de la configuration otlpSecure lors de la mise en place du traçage segcore(#34210).
    • Correction d'un problème où les propriétés de DataCoord étaient écrasées par erreur(#34240).
    • @@ -364,7 +521,7 @@ title: Notes de mise à jour
    • Correction d'un problème où les importations pouvaient générer des fichiers orphelins(#34071).
    • Correction des résultats de requête incomplets dus à la duplication des clés primaires dans un segment(#34302).
    • Résolution d'un problème de segments scellés manquants dans le compactage L0(#34566).
    • -
    • Correction du problème des données sales dans la méta channel-cp générée après le garbage collection(#34609).
    • +
    • Correction du problème des données sales dans le méta channel-cp généré après le garbage collection(#34609).
    • Correction des métriques où database_num était 0 après le redémarrage de RootCoord(#34010).
    • Correction d'une fuite de mémoire dans SegmentManager dans DataCoord en supprimant les segments vidés générés par l'importation(#34652).
    • Assurer que compressBinlog remplisse le logID des binlogs après le redémarrage de DataCoord, assurant un rechargement correct depuis KV(#34064).
    • @@ -393,7 +550,7 @@ title: Notes de mise à jour 2.4.52.4.42.4.12.4.3 -

      La version 2.4.5 de Milvus apporte plusieurs améliorations et corrections de bogues pour améliorer les performances, la stabilité et les fonctionnalités. Milvus 2.4.5 simplifie la recherche vectorielle sparse, float16 et bfloat16 grâce à l'indexation automatique, accélère les recherches, les suppressions et les compactions grâce aux optimisations du filtre Bloom, et s'attaque à la gestion des données grâce à des temps de chargement plus rapides et à la prise en charge des segments L0 importés. Elle introduit également l'index sparse HNSW pour une recherche efficace de données spares en haute dimension, améliore l'API RESTful avec la prise en charge des vecteurs flottants sparse, et corrige des bogues critiques pour une meilleure stabilité.

      +

      La version 2.4.5 de Milvus apporte plusieurs améliorations et corrections de bogues pour améliorer les performances, la stabilité et les fonctionnalités. Milvus 2.4.5 simplifie la recherche vectorielle sparse, float16 et bfloat16 grâce à l'indexation automatique, accélère les recherches, les suppressions et les compactions grâce aux optimisations du filtre Bloom et s'attaque à la gestion des données grâce à des temps de chargement plus rapides et à la prise en charge des segments L0 importés. Elle introduit également l'index sparse HNSW pour une recherche efficace de données spares en haute dimension, améliore l'API RESTful avec la prise en charge des vecteurs flottants sparse, et corrige des bogues critiques pour une meilleure stabilité.

      Nouvelles fonctionnalités

      • Ajout du support rbac à l'API de description/modification de base de données(#33804)
      • Support de la construction de l'index HNSW pour les vecteurs sparse(#33653, #33662)
      • @@ -411,7 +568,7 @@ title: Notes de mise à jour
      • Accélération du chargement des petites collections(#33746)
      • Prise en charge de l'importation de données de suppression dans le segment L0 (#33712)
      • Suppression des tâches de compactage de marque pour être temporisées afin d'éviter d'exécuter la même tâche encore et encore(#33833)
      • -
      • Gestion des vecteurs float16 et bfloat16 comme BinaryVector dans l'insertion en masse de numpy (#33788)
      • +
      • Gestion des vecteurs float16 et bfloat16 comme des BinaryVector dans l'insertion en masse de numpy (#33788)
      • Ajout du drapeau includeCurrentMsg pour la méthode seek(#33743)
      • Ajout de mergeInterval, targetBufSize, maxTolerantLagde msgdispatcher dans les configurations(#33680)
      • Amélioration de GetVectorByID pour les vecteurs épars(#33652)
      • @@ -464,7 +621,7 @@ title: Notes de mise à jour
      • Amélioration de la fiabilité de l'importation en vérifiant si un travail d'importation existe avant de démarrer.(#33673)
      • Amélioration de la gestion de l'index HNSW clairsemé (fonctionnalité interne)(#33714)
      • Nettoyage de la mémoire vectorielle pour éviter les fuites de mémoire(#33708)
      • -
      • Garantie d'un échauffement asynchrone plus fluide en corrigeant un problème de verrouillage d'état(#33687)
      • +
      • Assurer un échauffement asynchrone plus fluide en corrigeant un problème de verrouillage d'état(#33687)
      • Correction d'un bogue qui pouvait causer des résultats manquants dans les itérateurs de requêtes.(#33506)
      • Correction d'un bogue pouvant entraîner une taille inégale des segments d'importation (#33634)
      • Correction d'une mauvaise gestion de la taille des données pour les types bf16, fp16 et les vecteurs binaires(#33488)
      • @@ -540,7 +697,7 @@ title: Notes de mise à jour

        La version 2.4.3 de Milvus a introduit un grand nombre de fonctionnalités, d'améliorations et de corrections de bogues afin d'améliorer les performances et la fiabilité. Parmi les améliorations notables, citons la prise en charge de l'insertion en bloc de vecteurs flottants épars et l'accélération optimisée du filtre Bloom. Les améliorations couvrent différents domaines, des mises à jour dynamiques de la configuration à l'optimisation de l'utilisation de la mémoire. Les corrections de bogues ont permis de résoudre des problèmes critiques tels que des scénarios de panique et de garantir un fonctionnement plus fluide du système. Cette version souligne l'engagement continu de Milvus à améliorer les fonctionnalités, à optimiser les performances et à offrir une expérience utilisateur robuste.

        -

        Caractéristiques

          +

          Fonctionnalités

          • Prise en charge de l'insertion en bloc de vecteurs flottants épars pour binlog/json/parquet(#32649)

          Améliorations

            @@ -612,7 +769,7 @@ title: Notes de mise à jour
          • Correction de l'impossibilité de générer le traceID lors de l'utilisation de l'exportateur noop(#33208)
          • Amélioration de la récupération des résultats des requêtes(#33179)
          • Marquage de l'abandon du point de contrôle du canal pour éviter la fuite des métriques de décalage du point de contrôle(#33201)
          • -
          • Correction du blocage du noeud de la requête lors de l'arrêt de la progression(#33154)
          • +
          • Correction du blocage du noeud de la requête pendant l'arrêt de la progression(#33154)
          • Correction des segments manquants dans la réponse de rinçage(#33061)
          • Rendre l'opération de soumission idempotente(#33053)
          • Allocation d'une nouvelle tranche pour chaque lot dans le lecteur de flux(#33360)
          • @@ -707,7 +864,7 @@ title: Notes de mise à jour
          • Correction de l'erreur soulevée par BulkInsert lors de la rencontre de champs dynamiques dans les fichiers numpy(#32596)
          • Correction de bugs liés à l'interface RESTFulV2, y compris une correction importante qui permet aux paramètres numériques dans les requêtes d'accepter une entrée numérique au lieu d'une chaîne de caractères(#32485, #32355)
          • Correction d'une fuite de mémoire dans le proxy en supprimant l'événement "watching config" dans le limiteur de vitesse(#32313)
          • -
          • Correction du problème où le limiteur de taux rapporte incorrectement que la partition ne peut pas être trouvée quand partitionName n'est pas spécifié(#32647)
          • +
          • Correction du problème où le limiteur de taux rapporte incorrectement que la partition ne peut pas être trouvée lorsque partitionName n'est pas spécifié(#32647)
          • Ajout d'une détection entre les cas où la collection est dans l'état de récupération et où elle n'est pas chargée dans le type d'erreur.(#32447)
          • Correction de la métrique négative du nombre d'entités interrogeables(#32361)
          @@ -741,7 +898,7 @@ title: Notes de mise à jour
        • Support d'AutoIndex pour les champs scalaires(#31593)
        • Refactorisation de la recherche hybride pour des chemins d'exécution cohérents avec la recherche régulière(#31742, #32178)
        • Filtrage accéléré grâce au refactoring de bitset et bitset_view(#31592, #31754, #32139)
        • -
        • Les tâches d'importation prennent désormais en charge l'attente de l'achèvement de l'index des données(#31733)
        • +
        • Les tâches d'importation prennent désormais en charge l'attente de l'achèvement de l'index des données(#31733)
        • Amélioration de la compatibilité des importations(#32121), de la planification des tâches(#31475), et des limites sur la taille et le nombre de fichiers importés(#31542)
        • Efforts de simplification du code incluant la standardisation de l'interface pour la vérification de type(#31945, #31857), la suppression du code et des métriques obsolètes(#32079, #32134, #31535, #32211, #31935), et la normalisation des noms de constantes(#31515).
        • Nouvelle métrique pour QueryCoord current target channel check point lag latency(#31420)
        • @@ -799,7 +956,7 @@ title: Notes de mise à jour
          • Nouvel index GPU - CAGRA: Grâce à la contribution de NVIDIA, ce nouvel index GPU offre un gain de performance de 10 fois, en particulier pour les recherches par lots. Pour plus de détails, voir Index GPU.

          • Recherchemulti-vectorielle et hybride: Cette fonctionnalité permet de stocker des embeddings vectoriels provenant de plusieurs modèles et d'effectuer des recherches hybrides. Pour plus d'informations, reportez-vous à la section Recherche hybride.

          • -
          • Vecteurs épars: Idéaux pour l'interprétation et l'analyse des mots-clés, les vecteurs épars sont désormais pris en charge pour le traitement dans votre collection. Pour plus d'informations, reportez-vous à la section Vecteurs épars.

          • +
          • Vecteurs épars: Idéaux pour l'interprétation et l'analyse des mots-clés, les vecteurs épars sont désormais pris en charge pour le traitement dans votre collection. Pour plus d'informations, reportez-vous à la section Vecteurs épars.

          • Recherche par regroupement: L'agrégation catégorielle améliore la mémorisation au niveau des documents pour les applications de génération assistée par récupération (RAG). Pour plus d'informations, reportez-vous à la section Recherche par regroupement.

          • Index inversé et correspondance floue: ces fonctionnalités améliorent la recherche par mot-clé pour les champs scalaires. Pour plus d'informations, reportez-vous à la section Champs scalaires indexés et recherche filtrée.

          @@ -815,16 +972,16 @@ title: Notes de mise à jour
        • Personnaliser les stratégies de recherche en attribuant des poids aux différents modèles d'intégration.
        • Expérimenter divers modèles d'intégration pour trouver la combinaison de modèles optimale.
        -

        La prise en charge de plusieurs vecteurs permet de stocker, d'indexer et d'appliquer des stratégies de reclassement à plusieurs champs vectoriels de types différents, tels que FLOAT_VECTOR et SPARSE_FLOAT_VECTOR, dans une collection. Actuellement, deux stratégies de reclassement sont disponibles : Reciprocal Rank Fusion (RRF) et Average Weighted Scoring (notation moyenne pondérée). Ces deux stratégies combinent les résultats de recherche de différents champs vectoriels en un ensemble de résultats unifié. La première stratégie donne la priorité aux entités qui apparaissent systématiquement dans les résultats de recherche de différents champs vectoriels, tandis que l'autre stratégie attribue des poids aux résultats de recherche de chaque champ vectoriel afin de déterminer leur importance dans l'ensemble de résultats final.

        +

        La prise en charge de plusieurs vecteurs permet de stocker, d'indexer et d'appliquer des stratégies de reclassement à plusieurs champs de vecteurs de types différents, tels que FLOAT_VECTOR et SPARSE_FLOAT_VECTOR, dans une collection. Actuellement, deux stratégies de reclassement sont disponibles : Reciprocal Rank Fusion (RRF) et Average Weighted Scoring (notation moyenne pondérée). Ces deux stratégies combinent les résultats de recherche de différents champs vectoriels en un ensemble de résultats unifié. La première stratégie donne la priorité aux entités qui apparaissent systématiquement dans les résultats de recherche de différents champs vectoriels, tandis que l'autre stratégie attribue des poids aux résultats de recherche de chaque champ vectoriel afin de déterminer leur importance dans l'ensemble de résultats final.

        Un exemple de code se trouve dans hybrid_search.py.

        -

        Index inversé et correspondance floue

        Dans les versions précédentes de Milvus, les index de recherche binaire basés sur la mémoire et les index Marisa Trie étaient utilisés pour l'indexation des champs scalaires. Toutefois, ces méthodes étaient gourmandes en mémoire. La dernière version de Milvus utilise désormais l'index inversé basé sur Tantivy, qui peut être appliqué à tous les types de données numériques et de chaînes. Ce nouvel index améliore considérablement les performances des requêtes scalaires, en réduisant de dix fois les requêtes de mots-clés dans les chaînes de caractères. En outre, l'index inversé consomme moins de mémoire, grâce à des optimisations supplémentaires dans la compression des données et le mécanisme de stockage en mémoire (MMap) de la structure d'indexation interne.

        +

        Index inversé et correspondance floue

        Dans les versions précédentes de Milvus, les index de recherche binaire basés sur la mémoire et les index Marisa Trie étaient utilisés pour l'indexation des champs scalaires. Cependant, ces méthodes étaient gourmandes en mémoire. La dernière version de Milvus utilise désormais l'index inversé basé sur Tantivy, qui peut être appliqué à tous les types de données numériques et de chaînes. Ce nouvel index améliore considérablement les performances des requêtes scalaires, en réduisant de dix fois les requêtes de mots-clés dans les chaînes de caractères. En outre, l'index inversé consomme moins de mémoire, grâce à des optimisations supplémentaires dans la compression des données et le mécanisme de stockage en mémoire (MMap) de la structure d'indexation interne.

        Cette version prend également en charge les correspondances floues dans le filtrage scalaire en utilisant des préfixes, des infixes et des suffixes.

        Des exemples de code peuvent être trouvés dans inverted_index_example.py et fuzzy_match.py.

        Vous pouvez maintenant regrouper les résultats de la recherche par les valeurs d'un champ scalaire spécifique. Cela permet aux applications RAG d'implémenter le rappel au niveau du document. Considérons une collection de documents, chaque document est divisé en plusieurs passages. Chaque passage est représenté par un vecteur intégré et appartient à un document. Pour trouver les documents les plus pertinents au lieu de disperser les passages, vous pouvez inclure l'argument group_by_field dans l'opération search() pour regrouper les résultats par l'ID du document.

        Un exemple de code se trouve dans example_group_by.py.

        Type de données vectorielles Float16 et BFloat

        L'apprentissage automatique et les réseaux neuronaux utilisent souvent des types de données de demi-précision, tels que Float16 et BFloat- Bien que ces types de données puissent améliorer l'efficacité des requêtes et réduire l'utilisation de la mémoire, ils s'accompagnent d'une réduction de la précision. Avec cette version, Milvus prend désormais en charge ces types de données pour les champs vectoriels.

        Des exemples de code se trouvent dans float16_example.py et bfloat16_example.py.

        -

        Architecture mise à jour

        Segment L0

        Cette version inclut un nouveau segment appelé segment L0, conçu pour enregistrer les données supprimées. Ce segment compacte périodiquement les enregistrements supprimés stockés et les divise en segments scellés, ce qui réduit le nombre de vidanges de données nécessaires pour les petites suppressions et laisse une faible empreinte de stockage. Grâce à ce mécanisme, Milvus sépare complètement les compactions de données des vidanges de données, ce qui améliore les performances des opérations de suppression et d'insertion.

        +

        Architecture améliorée

        Segment L0

        Cette version inclut un nouveau segment appelé segment L0, conçu pour enregistrer les données supprimées. Ce segment compacte périodiquement les enregistrements supprimés stockés et les divise en segments scellés, ce qui réduit le nombre de vidanges de données nécessaires pour les petites suppressions et laisse une faible empreinte de stockage. Grâce à ce mécanisme, Milvus sépare complètement les compactions de données des vidanges de données, ce qui améliore les performances des opérations de suppression et d'insertion.

        BulkInsert remanié

        Cette version introduit également une logique d'insertion en bloc améliorée. Cela vous permet d'importer plusieurs fichiers en une seule demande d'insertion en bloc. Avec la version remaniée, les performances et la stabilité de l'insertion en bloc ont été améliorées de manière significative. L'expérience de l'utilisateur a également été améliorée, notamment grâce à une limitation fine du débit et à des messages d'erreur plus conviviaux. En outre, vous pouvez facilement accéder aux points d'extrémité d'insertion en bloc via l'API RESTful de Milvus.

        Stockage en mémoire

        Milvus utilise le stockage en mémoire (MMap) pour optimiser son utilisation de la mémoire. Au lieu de charger le contenu du fichier directement dans la mémoire, ce mécanisme mappe le contenu du fichier dans la mémoire. Cette approche s'accompagne d'une dégradation des performances. En activant le MMap pour une collection indexée par HNSW sur un hôte doté de 2 CPU et de 8 Go de RAM, vous pouvez charger 4 fois plus de données avec une dégradation des performances inférieure à 10 %.

        En outre, cette version permet également un contrôle dynamique et précis de MMap sans qu'il soit nécessaire de redémarrer Milvus.

        diff --git a/localization/v2.4.x/site/fr/tutorials/build-rag-with-milvus.json b/localization/v2.4.x/site/fr/tutorials/build-rag-with-milvus.json index 24550500b..e9a217837 100644 --- a/localization/v2.4.x/site/fr/tutorials/build-rag-with-milvus.json +++ b/localization/v2.4.x/site/fr/tutorials/build-rag-with-milvus.json @@ -1 +1 @@ -{"codeList":["$ pip install --upgrade pymilvus openai requests tqdm\n","import os\n\nos.environ[\"OPENAI_API_KEY\"] = \"sk-***********\"\n","$ wget https://github.com/milvus-io/milvus-docs/releases/download/v2.4.6-preview/milvus_docs_2.4.x_en.zip\n$ unzip -q milvus_docs_2.4.x_en.zip -d milvus_docs\n","from glob import glob\n\ntext_lines = []\n\nfor file_path in glob(\"milvus_docs/en/faq/*.md\", recursive=True):\n with open(file_path, \"r\") as file:\n file_text = file.read()\n\n text_lines += file_text.split(\"# \")\n","from openai import OpenAI\n\nopenai_client = OpenAI()\n","def emb_text(text):\n return (\n openai_client.embeddings.create(input=text, model=\"text-embedding-3-small\")\n .data[0]\n .embedding\n )\n","test_embedding = emb_text(\"This is a test\")\nembedding_dim = len(test_embedding)\nprint(embedding_dim)\nprint(test_embedding[:10])\n","from pymilvus import MilvusClient\n\nmilvus_client = MilvusClient(uri=\"./milvus_demo.db\")\n\ncollection_name = \"my_rag_collection\"\n","if milvus_client.has_collection(collection_name):\n milvus_client.drop_collection(collection_name)\n","milvus_client.create_collection(\n collection_name=collection_name,\n dimension=embedding_dim,\n metric_type=\"IP\", # Inner product distance\n consistency_level=\"Strong\", # Strong consistency level\n)\n","from tqdm import tqdm\n\ndata = []\n\nfor i, line in enumerate(tqdm(text_lines, desc=\"Creating embeddings\")):\n data.append({\"id\": i, \"vector\": emb_text(line), \"text\": line})\n\nmilvus_client.insert(collection_name=collection_name, data=data)\n","question = \"How is data stored in milvus?\"\n","search_res = milvus_client.search(\n collection_name=collection_name,\n data=[\n emb_text(question)\n ], # Use the `emb_text` function to convert the question to an embedding vector\n limit=3, # Return top 3 results\n search_params={\"metric_type\": \"IP\", \"params\": {}}, # Inner product distance\n output_fields=[\"text\"], # Return the text field\n)\n","import json\n\nretrieved_lines_with_distances = [\n (res[\"entity\"][\"text\"], res[\"distance\"]) for res in search_res[0]\n]\nprint(json.dumps(retrieved_lines_with_distances, indent=4))\n","context = \"\\n\".join(\n [line_with_distance[0] for line_with_distance in retrieved_lines_with_distances]\n)\n","SYSTEM_PROMPT = \"\"\"\nHuman: You are an AI assistant. You are able to find answers to the questions from the contextual passage snippets provided.\n\"\"\"\nUSER_PROMPT = f\"\"\"\nUse the following pieces of information enclosed in tags to provide an answer to the question enclosed in tags.\n\n{context}\n\n\n{question}\n\n\"\"\"\n","response = openai_client.chat.completions.create(\n model=\"gpt-3.5-turbo\",\n messages=[\n {\"role\": \"system\", \"content\": SYSTEM_PROMPT},\n {\"role\": \"user\", \"content\": USER_PROMPT},\n ],\n)\nprint(response.choices[0].message.content)\n"],"headingContent":"","anchorList":[{"label":"Construire RAG avec Milvus","href":"Build-RAG-with-Milvus","type":1,"isActive":false},{"label":"Préparation","href":"Preparation","type":2,"isActive":false},{"label":"Charger les données dans Milvus","href":"Load-data-into-Milvus","type":2,"isActive":false},{"label":"Construire un RAG","href":"Build-RAG","type":2,"isActive":false},{"label":"Déploiement rapide","href":"Quick-Deploy","type":2,"isActive":false}]} \ No newline at end of file +{"codeList":["$ pip install --upgrade pymilvus openai requests tqdm\n","import os\n\nos.environ[\"OPENAI_API_KEY\"] = \"sk-***********\"\n","$ wget https://github.com/milvus-io/milvus-docs/releases/download/v2.4.6-preview/milvus_docs_2.4.x_en.zip\n$ unzip -q milvus_docs_2.4.x_en.zip -d milvus_docs\n","from glob import glob\n\ntext_lines = []\n\nfor file_path in glob(\"milvus_docs/en/faq/*.md\", recursive=True):\n with open(file_path, \"r\") as file:\n file_text = file.read()\n\n text_lines += file_text.split(\"# \")\n","from openai import OpenAI\n\nopenai_client = OpenAI()\n","def emb_text(text):\n return (\n openai_client.embeddings.create(input=text, model=\"text-embedding-3-small\")\n .data[0]\n .embedding\n )\n","test_embedding = emb_text(\"This is a test\")\nembedding_dim = len(test_embedding)\nprint(embedding_dim)\nprint(test_embedding[:10])\n","from pymilvus import MilvusClient\n\nmilvus_client = MilvusClient(uri=\"./milvus_demo.db\")\n\ncollection_name = \"my_rag_collection\"\n","if milvus_client.has_collection(collection_name):\n milvus_client.drop_collection(collection_name)\n","milvus_client.create_collection(\n collection_name=collection_name,\n dimension=embedding_dim,\n metric_type=\"IP\", # Inner product distance\n consistency_level=\"Strong\", # Strong consistency level\n)\n","from tqdm import tqdm\n\ndata = []\n\nfor i, line in enumerate(tqdm(text_lines, desc=\"Creating embeddings\")):\n data.append({\"id\": i, \"vector\": emb_text(line), \"text\": line})\n\nmilvus_client.insert(collection_name=collection_name, data=data)\n","question = \"How is data stored in milvus?\"\n","search_res = milvus_client.search(\n collection_name=collection_name,\n data=[\n emb_text(question)\n ], # Use the `emb_text` function to convert the question to an embedding vector\n limit=3, # Return top 3 results\n search_params={\"metric_type\": \"IP\", \"params\": {}}, # Inner product distance\n output_fields=[\"text\"], # Return the text field\n)\n","import json\n\nretrieved_lines_with_distances = [\n (res[\"entity\"][\"text\"], res[\"distance\"]) for res in search_res[0]\n]\nprint(json.dumps(retrieved_lines_with_distances, indent=4))\n","context = \"\\n\".join(\n [line_with_distance[0] for line_with_distance in retrieved_lines_with_distances]\n)\n","SYSTEM_PROMPT = \"\"\"\nHuman: You are an AI assistant. You are able to find answers to the questions from the contextual passage snippets provided.\n\"\"\"\nUSER_PROMPT = f\"\"\"\nUse the following pieces of information enclosed in tags to provide an answer to the question enclosed in tags.\n\n{context}\n\n\n{question}\n\n\"\"\"\n","response = openai_client.chat.completions.create(\n model=\"gpt-3.5-turbo\",\n messages=[\n {\"role\": \"system\", \"content\": SYSTEM_PROMPT},\n {\"role\": \"user\", \"content\": USER_PROMPT},\n ],\n)\nprint(response.choices[0].message.content)\n"],"headingContent":"Build RAG with Milvus","anchorList":[{"label":"Construire RAG avec Milvus","href":"Build-RAG-with-Milvus","type":1,"isActive":false},{"label":"Préparation","href":"Preparation","type":2,"isActive":false},{"label":"Charger les données dans Milvus","href":"Load-data-into-Milvus","type":2,"isActive":false},{"label":"Construire un RAG","href":"Build-RAG","type":2,"isActive":false},{"label":"Déploiement rapide","href":"Quick-Deploy","type":2,"isActive":false}]} \ No newline at end of file diff --git a/localization/v2.4.x/site/fr/tutorials/build-rag-with-milvus.md b/localization/v2.4.x/site/fr/tutorials/build-rag-with-milvus.md index 8a4a7f5d9..bd6aa101d 100644 --- a/localization/v2.4.x/site/fr/tutorials/build-rag-with-milvus.md +++ b/localization/v2.4.x/site/fr/tutorials/build-rag-with-milvus.md @@ -1,7 +1,7 @@ --- id: build-rag-with-milvus.md summary: construire un chiffon avec milvus -title: Construire un RAG avec Milvus +title: Construire RAG avec Milvus ---

        Construire RAG avec Milvus

        Open In Colab

        +

        Open In Colab +GitHub Repository

        Dans ce tutoriel, nous allons vous montrer comment construire un pipeline RAG (Retrieval-Augmented Generation) avec Milvus.

        Le système RAG combine un système de recherche avec un modèle génératif pour générer un nouveau texte basé sur une invite donnée. Le système récupère d'abord les documents pertinents d'un corpus à l'aide de Milvus, puis utilise un modèle génératif pour générer un nouveau texte basé sur les documents récupérés.

        diff --git a/localization/v2.4.x/site/fr/tutorials/graph_rag_with_milvus.md b/localization/v2.4.x/site/fr/tutorials/graph_rag_with_milvus.md index a66bb967f..2e5bbb49f 100644 --- a/localization/v2.4.x/site/fr/tutorials/graph_rag_with_milvus.md +++ b/localization/v2.4.x/site/fr/tutorials/graph_rag_with_milvus.md @@ -18,10 +18,11 @@ title: Graphique RAG avec Milvus d="M4 9h1v1H4c-1.5 0-3-1.69-3-3.5S2.55 3 4 3h4c1.45 0 3 1.69 3 3.5 0 1.41-.91 2.72-2 3.25V8.59c.58-.45 1-1.27 1-2.09C10 5.22 8.98 4 8 4H4c-.98 0-2 1.22-2 2.5S3 9 4 9zm9-3h-1v1h1c1 0 2 1.22 2 2.5S13.98 12 13 12H9c-.98 0-2-1.22-2-2.5 0-.83.42-1.64 1-2.09V6.25c-1.09.53-2 1.84-2 3.25C6 11.31 7.55 13 9 13h4c1.45 0 3-1.69 3-3.5S14.5 6 13 6z" > -

        Open In Colab

        +

        Open In Colab +GitHub Repository

        L'application généralisée de grands modèles de langage souligne l'importance d'améliorer la précision et la pertinence de leurs réponses. La génération améliorée par récupération (RAG) améliore les modèles avec des bases de connaissances externes, fournissant plus d'informations contextuelles et atténuant les problèmes tels que l'hallucination et les connaissances insuffisantes. Toutefois, le fait de s'appuyer uniquement sur des paradigmes RAG simples a ses limites, en particulier lorsqu'il s'agit de relations complexes entre entités et de questions à sauts multiples, pour lesquelles le modèle a souvent du mal à fournir des réponses exactes.

        L'introduction de graphes de connaissances (KG) dans le système RAG offre une nouvelle solution. Les KG présentent les entités et leurs relations de manière structurée, en fournissant des informations de recherche plus précises et en aidant le RAG à mieux gérer les tâches complexes de réponse aux questions. KG-RAG en est encore à ses débuts, et il n'y a pas de consensus sur la manière d'extraire efficacement des entités et des relations à partir de KG ou sur la manière d'intégrer la recherche de similarité vectorielle avec des structures de graphe.

        -

        Dans ce carnet, nous présentons une approche simple mais puissante pour améliorer considérablement les performances de ce scénario. Il s'agit d'un paradigme RAG simple avec récupération multi-voies puis reranking, mais il met en œuvre le Graph RAG de manière logique et permet d'obtenir des performances de pointe dans le traitement des questions multi-sauts. Voyons comment il est mis en œuvre.

        +

        Dans ce carnet, nous présentons une approche simple mais puissante pour améliorer considérablement les performances de ce scénario. Il s'agit d'un paradigme RAG simple avec récupération multi-voies puis reranking, mais il met en œuvre le Graph RAG de manière logique et atteint des performances de pointe dans le traitement des questions multi-sauts. Voyons comment il est mis en œuvre.

        @@ -49,7 +50,7 @@ title: Graphique RAG avec Milvus

        Si vous utilisez Google Colab, pour activer les dépendances qui viennent d'être installées, vous devrez peut-être redémarrer le runtime (cliquez sur le menu "Runtime" en haut de l'écran, et sélectionnez "Restart session" dans le menu déroulant).

        -

        Nous utiliserons les modèles d'OpenAI. Vous devez préparer la clé api OPENAI_API_KEY en tant que variable d'environnement.

        +

        Nous utiliserons les modèles d'OpenAI. Vous devez préparer la clé api OPENAI_API_KEY comme variable d'environnement.

        import os
         
         os.environ["OPENAI_API_KEY"] = "sk-***********"
        diff --git a/localization/v2.4.x/site/fr/tutorials/hybrid_search_with_milvus.json b/localization/v2.4.x/site/fr/tutorials/hybrid_search_with_milvus.json
        index 9f9c41de2..62d4175e1 100644
        --- a/localization/v2.4.x/site/fr/tutorials/hybrid_search_with_milvus.json
        +++ b/localization/v2.4.x/site/fr/tutorials/hybrid_search_with_milvus.json
        @@ -1 +1 @@
        -{"codeList":["$ pip install --upgrade pymilvus \"pymilvus[model]\"\n","# Run this cell to download the dataset\n$ wget http://qim.fs.quoracdn.net/quora_duplicate_questions.tsv\n","import pandas as pd\n\nfile_path = \"quora_duplicate_questions.tsv\"\ndf = pd.read_csv(file_path, sep=\"\\t\")\nquestions = set()\nfor _, row in df.iterrows():\n    obj = row.to_dict()\n    questions.add(obj[\"question1\"][:512])\n    questions.add(obj[\"question2\"][:512])\n    if len(questions) > 500:  # Skip this if you want to use the full dataset\n        break\n\ndocs = list(questions)\n\n# example question\nprint(docs[0])\n","from milvus_model.hybrid import BGEM3EmbeddingFunction\n\nef = BGEM3EmbeddingFunction(use_fp16=False, device=\"cpu\")\ndense_dim = ef.dim[\"dense\"]\n\n# Generate embeddings using BGE-M3 model\ndocs_embeddings = ef(docs)\n","from pymilvus import (\n    connections,\n    utility,\n    FieldSchema,\n    CollectionSchema,\n    DataType,\n    Collection,\n)\n\n# Connect to Milvus given URI\nconnections.connect(uri=\"./milvus.db\")\n\n# Specify the data schema for the new Collection\nfields = [\n    # Use auto generated id as primary key\n    FieldSchema(\n        name=\"pk\", dtype=DataType.VARCHAR, is_primary=True, auto_id=True, max_length=100\n    ),\n    # Store the original text to retrieve based on semantically distance\n    FieldSchema(name=\"text\", dtype=DataType.VARCHAR, max_length=512),\n    # Milvus now supports both sparse and dense vectors,\n    # we can store each in a separate field to conduct hybrid search on both vectors\n    FieldSchema(name=\"sparse_vector\", dtype=DataType.SPARSE_FLOAT_VECTOR),\n    FieldSchema(name=\"dense_vector\", dtype=DataType.FLOAT_VECTOR, dim=dense_dim),\n]\nschema = CollectionSchema(fields)\n\n# Create collection (drop the old one if exists)\ncol_name = \"hybrid_demo\"\nif utility.has_collection(col_name):\n    Collection(col_name).drop()\ncol = Collection(col_name, schema, consistency_level=\"Strong\")\n\n# To make vector search efficient, we need to create indices for the vector fields\nsparse_index = {\"index_type\": \"SPARSE_INVERTED_INDEX\", \"metric_type\": \"IP\"}\ncol.create_index(\"sparse_vector\", sparse_index)\ndense_index = {\"index_type\": \"AUTOINDEX\", \"metric_type\": \"IP\"}\ncol.create_index(\"dense_vector\", dense_index)\ncol.load()\n","# For efficiency, we insert 50 records in each small batch\nfor i in range(0, len(docs), 50):\n    batched_entities = [\n        docs[i : i + 50],\n        docs_embeddings[\"sparse\"][i : i + 50],\n        docs_embeddings[\"dense\"][i : i + 50],\n    ]\n    col.insert(batched_entities)\nprint(\"Number of entities inserted:\", col.num_entities)\n","# Enter your search query\nquery = input(\"Enter your search query: \")\nprint(query)\n\n# Generate embeddings for the query\nquery_embeddings = ef([query])\n# print(query_embeddings)\n","from pymilvus import (\n    AnnSearchRequest,\n    WeightedRanker,\n)\n\n\ndef dense_search(col, query_dense_embedding, limit=10):\n    search_params = {\"metric_type\": \"IP\", \"params\": {}}\n    res = col.search(\n        [query_dense_embedding],\n        anns_field=\"dense_vector\",\n        limit=limit,\n        output_fields=[\"text\"],\n        param=search_params,\n    )[0]\n    return [hit.get(\"text\") for hit in res]\n\n\ndef sparse_search(col, query_sparse_embedding, limit=10):\n    search_params = {\n        \"metric_type\": \"IP\",\n        \"params\": {},\n    }\n    res = col.search(\n        [query_sparse_embedding],\n        anns_field=\"sparse_vector\",\n        limit=limit,\n        output_fields=[\"text\"],\n        param=search_params,\n    )[0]\n    return [hit.get(\"text\") for hit in res]\n\n\ndef hybrid_search(\n    col,\n    query_dense_embedding,\n    query_sparse_embedding,\n    sparse_weight=1.0,\n    dense_weight=1.0,\n    limit=10,\n):\n    dense_search_params = {\"metric_type\": \"IP\", \"params\": {}}\n    dense_req = AnnSearchRequest(\n        [query_dense_embedding], \"dense_vector\", dense_search_params, limit=limit\n    )\n    sparse_search_params = {\"metric_type\": \"IP\", \"params\": {}}\n    sparse_req = AnnSearchRequest(\n        [query_sparse_embedding], \"sparse_vector\", sparse_search_params, limit=limit\n    )\n    rerank = WeightedRanker(sparse_weight, dense_weight)\n    res = col.hybrid_search(\n        [sparse_req, dense_req], rerank=rerank, limit=limit, output_fields=[\"text\"]\n    )[0]\n    return [hit.get(\"text\") for hit in res]\n","dense_results = dense_search(col, query_embeddings[\"dense\"][0])\nsparse_results = sparse_search(col, query_embeddings[\"sparse\"][0])\nhybrid_results = hybrid_search(\n    col,\n    query_embeddings[\"dense\"][0],\n    query_embeddings[\"sparse\"][0],\n    sparse_weight=0.7,\n    dense_weight=1.0,\n)\n","def doc_text_formatting(ef, query, docs):\n    tokenizer = ef.model.tokenizer\n    query_tokens_ids = tokenizer.encode(query, return_offsets_mapping=True)\n    query_tokens = tokenizer.convert_ids_to_tokens(query_tokens_ids)\n    formatted_texts = []\n\n    for doc in docs:\n        ldx = 0\n        landmarks = []\n        encoding = tokenizer.encode_plus(doc, return_offsets_mapping=True)\n        tokens = tokenizer.convert_ids_to_tokens(encoding[\"input_ids\"])[1:-1]\n        offsets = encoding[\"offset_mapping\"][1:-1]\n        for token, (start, end) in zip(tokens, offsets):\n            if token in query_tokens:\n                if len(landmarks) != 0 and start == landmarks[-1]:\n                    landmarks[-1] = end\n                else:\n                    landmarks.append(start)\n                    landmarks.append(end)\n        close = False\n        formatted_text = \"\"\n        for i, c in enumerate(doc):\n            if ldx == len(landmarks):\n                pass\n            elif i == landmarks[ldx]:\n                if close:\n                    formatted_text += \"\"\n                else:\n                    formatted_text += \"\"\n                close = not close\n                ldx = ldx + 1\n            formatted_text += c\n        if close is True:\n            formatted_text += \"\"\n        formatted_texts.append(formatted_text)\n    return formatted_texts\n","from IPython.display import Markdown, display\n\n# Dense search results\ndisplay(Markdown(\"**Dense Search Results:**\"))\nformatted_results = doc_text_formatting(ef, query, dense_results)\nfor result in dense_results:\n    display(Markdown(result))\n\n# Sparse search results\ndisplay(Markdown(\"\\n**Sparse Search Results:**\"))\nformatted_results = doc_text_formatting(ef, query, sparse_results)\nfor result in formatted_results:\n    display(Markdown(result))\n\n# Hybrid search results\ndisplay(Markdown(\"\\n**Hybrid Search Results:**\"))\nformatted_results = doc_text_formatting(ef, query, hybrid_results)\nfor result in formatted_results:\n    display(Markdown(result))\n"],"headingContent":"","anchorList":[{"label":"Recherche hybride avec Milvus","href":"Hybrid-Search-with-Milvus","type":1,"isActive":false}]}
        \ No newline at end of file
        +{"codeList":["$ pip install --upgrade pymilvus \"pymilvus[model]\"\n","# Run this cell to download the dataset\n$ wget http://qim.fs.quoracdn.net/quora_duplicate_questions.tsv\n","import pandas as pd\n\nfile_path = \"quora_duplicate_questions.tsv\"\ndf = pd.read_csv(file_path, sep=\"\\t\")\nquestions = set()\nfor _, row in df.iterrows():\n    obj = row.to_dict()\n    questions.add(obj[\"question1\"][:512])\n    questions.add(obj[\"question2\"][:512])\n    if len(questions) > 500:  # Skip this if you want to use the full dataset\n        break\n\ndocs = list(questions)\n\n# example question\nprint(docs[0])\n","from milvus_model.hybrid import BGEM3EmbeddingFunction\n\nef = BGEM3EmbeddingFunction(use_fp16=False, device=\"cpu\")\ndense_dim = ef.dim[\"dense\"]\n\n# Generate embeddings using BGE-M3 model\ndocs_embeddings = ef(docs)\n","from pymilvus import (\n    connections,\n    utility,\n    FieldSchema,\n    CollectionSchema,\n    DataType,\n    Collection,\n)\n\n# Connect to Milvus given URI\nconnections.connect(uri=\"./milvus.db\")\n\n# Specify the data schema for the new Collection\nfields = [\n    # Use auto generated id as primary key\n    FieldSchema(\n        name=\"pk\", dtype=DataType.VARCHAR, is_primary=True, auto_id=True, max_length=100\n    ),\n    # Store the original text to retrieve based on semantically distance\n    FieldSchema(name=\"text\", dtype=DataType.VARCHAR, max_length=512),\n    # Milvus now supports both sparse and dense vectors,\n    # we can store each in a separate field to conduct hybrid search on both vectors\n    FieldSchema(name=\"sparse_vector\", dtype=DataType.SPARSE_FLOAT_VECTOR),\n    FieldSchema(name=\"dense_vector\", dtype=DataType.FLOAT_VECTOR, dim=dense_dim),\n]\nschema = CollectionSchema(fields)\n\n# Create collection (drop the old one if exists)\ncol_name = \"hybrid_demo\"\nif utility.has_collection(col_name):\n    Collection(col_name).drop()\ncol = Collection(col_name, schema, consistency_level=\"Strong\")\n\n# To make vector search efficient, we need to create indices for the vector fields\nsparse_index = {\"index_type\": \"SPARSE_INVERTED_INDEX\", \"metric_type\": \"IP\"}\ncol.create_index(\"sparse_vector\", sparse_index)\ndense_index = {\"index_type\": \"AUTOINDEX\", \"metric_type\": \"IP\"}\ncol.create_index(\"dense_vector\", dense_index)\ncol.load()\n","# For efficiency, we insert 50 records in each small batch\nfor i in range(0, len(docs), 50):\n    batched_entities = [\n        docs[i : i + 50],\n        docs_embeddings[\"sparse\"][i : i + 50],\n        docs_embeddings[\"dense\"][i : i + 50],\n    ]\n    col.insert(batched_entities)\nprint(\"Number of entities inserted:\", col.num_entities)\n","# Enter your search query\nquery = input(\"Enter your search query: \")\nprint(query)\n\n# Generate embeddings for the query\nquery_embeddings = ef([query])\n# print(query_embeddings)\n","from pymilvus import (\n    AnnSearchRequest,\n    WeightedRanker,\n)\n\n\ndef dense_search(col, query_dense_embedding, limit=10):\n    search_params = {\"metric_type\": \"IP\", \"params\": {}}\n    res = col.search(\n        [query_dense_embedding],\n        anns_field=\"dense_vector\",\n        limit=limit,\n        output_fields=[\"text\"],\n        param=search_params,\n    )[0]\n    return [hit.get(\"text\") for hit in res]\n\n\ndef sparse_search(col, query_sparse_embedding, limit=10):\n    search_params = {\n        \"metric_type\": \"IP\",\n        \"params\": {},\n    }\n    res = col.search(\n        [query_sparse_embedding],\n        anns_field=\"sparse_vector\",\n        limit=limit,\n        output_fields=[\"text\"],\n        param=search_params,\n    )[0]\n    return [hit.get(\"text\") for hit in res]\n\n\ndef hybrid_search(\n    col,\n    query_dense_embedding,\n    query_sparse_embedding,\n    sparse_weight=1.0,\n    dense_weight=1.0,\n    limit=10,\n):\n    dense_search_params = {\"metric_type\": \"IP\", \"params\": {}}\n    dense_req = AnnSearchRequest(\n        [query_dense_embedding], \"dense_vector\", dense_search_params, limit=limit\n    )\n    sparse_search_params = {\"metric_type\": \"IP\", \"params\": {}}\n    sparse_req = AnnSearchRequest(\n        [query_sparse_embedding], \"sparse_vector\", sparse_search_params, limit=limit\n    )\n    rerank = WeightedRanker(sparse_weight, dense_weight)\n    res = col.hybrid_search(\n        [sparse_req, dense_req], rerank=rerank, limit=limit, output_fields=[\"text\"]\n    )[0]\n    return [hit.get(\"text\") for hit in res]\n","dense_results = dense_search(col, query_embeddings[\"dense\"][0])\nsparse_results = sparse_search(col, query_embeddings[\"sparse\"]._getrow(0))\nhybrid_results = hybrid_search(\n    col,\n    query_embeddings[\"dense\"][0],\n    query_embeddings[\"sparse\"]._getrow(0),\n    sparse_weight=0.7,\n    dense_weight=1.0,\n)\n","def doc_text_formatting(ef, query, docs):\n    tokenizer = ef.model.tokenizer\n    query_tokens_ids = tokenizer.encode(query, return_offsets_mapping=True)\n    query_tokens = tokenizer.convert_ids_to_tokens(query_tokens_ids)\n    formatted_texts = []\n\n    for doc in docs:\n        ldx = 0\n        landmarks = []\n        encoding = tokenizer.encode_plus(doc, return_offsets_mapping=True)\n        tokens = tokenizer.convert_ids_to_tokens(encoding[\"input_ids\"])[1:-1]\n        offsets = encoding[\"offset_mapping\"][1:-1]\n        for token, (start, end) in zip(tokens, offsets):\n            if token in query_tokens:\n                if len(landmarks) != 0 and start == landmarks[-1]:\n                    landmarks[-1] = end\n                else:\n                    landmarks.append(start)\n                    landmarks.append(end)\n        close = False\n        formatted_text = \"\"\n        for i, c in enumerate(doc):\n            if ldx == len(landmarks):\n                pass\n            elif i == landmarks[ldx]:\n                if close:\n                    formatted_text += \"\"\n                else:\n                    formatted_text += \"\"\n                close = not close\n                ldx = ldx + 1\n            formatted_text += c\n        if close is True:\n            formatted_text += \"\"\n        formatted_texts.append(formatted_text)\n    return formatted_texts\n","from IPython.display import Markdown, display\n\n# Dense search results\ndisplay(Markdown(\"**Dense Search Results:**\"))\nformatted_results = doc_text_formatting(ef, query, dense_results)\nfor result in dense_results:\n    display(Markdown(result))\n\n# Sparse search results\ndisplay(Markdown(\"\\n**Sparse Search Results:**\"))\nformatted_results = doc_text_formatting(ef, query, sparse_results)\nfor result in formatted_results:\n    display(Markdown(result))\n\n# Hybrid search results\ndisplay(Markdown(\"\\n**Hybrid Search Results:**\"))\nformatted_results = doc_text_formatting(ef, query, hybrid_results)\nfor result in formatted_results:\n    display(Markdown(result))\n"],"headingContent":"Hybrid Search with Milvus","anchorList":[{"label":"Recherche hybride avec Milvus","href":"Hybrid-Search-with-Milvus","type":1,"isActive":false}]}
        \ No newline at end of file
        diff --git a/localization/v2.4.x/site/fr/tutorials/hybrid_search_with_milvus.md b/localization/v2.4.x/site/fr/tutorials/hybrid_search_with_milvus.md
        index c239cf540..3cc6d3408 100644
        --- a/localization/v2.4.x/site/fr/tutorials/hybrid_search_with_milvus.md
        +++ b/localization/v2.4.x/site/fr/tutorials/hybrid_search_with_milvus.md
        @@ -18,7 +18,8 @@ title: Recherche hybride avec Milvus
                   d="M4 9h1v1H4c-1.5 0-3-1.69-3-3.5S2.55 3 4 3h4c1.45 0 3 1.69 3 3.5 0 1.41-.91 2.72-2 3.25V8.59c.58-.45 1-1.27 1-2.09C10 5.22 8.98 4 8 4H4c-.98 0-2 1.22-2 2.5S3 9 4 9zm9-3h-1v1h1c1 0 2 1.22 2 2.5S13.98 12 13 12H9c-.98 0-2-1.22-2-2.5 0-.83.42-1.64 1-2.09V6.25c-1.09.53-2 1.84-2 3.25C6 11.31 7.55 13 9 13h4c1.45 0 3-1.69 3-3.5S14.5 6 13 6z"
                 >
               
        -    

        Open In Colab

        +

        Open In Colab +GitHub Repository

        Dans ce tutoriel, nous allons montrer comment effectuer une recherche hybride avec Milvus et le modèle BGE-M3. Le modèle BGE-M3 peut convertir le texte en vecteurs denses et épars. Milvus prend en charge le stockage des deux types de vecteurs dans une collection, ce qui permet d'effectuer une recherche hybride qui améliore la pertinence des résultats.

        Milvus prend en charge les méthodes de recherche denses, éparses et hybrides :

        @@ -71,7 +72,7 @@ Inference Embeddings: 100%|██████████| 32/32 [01:59<00:00
        • Définir l'uri comme un fichier local, par exemple "./milvus.db", est la méthode la plus pratique, car elle utilise automatiquement Milvus Lite pour stocker toutes les données dans ce fichier.
        • -
        • Si vous avez des données à grande échelle, par exemple plus d'un million de vecteurs, vous pouvez configurer un serveur Milvus plus performant sur Docker ou Kubernetes. Dans cette configuration, veuillez utiliser l'uri du serveur, par exemple http://localhost:19530, comme uri.
        • +
        • Si vous disposez de données à grande échelle, par exemple plus d'un million de vecteurs, vous pouvez configurer un serveur Milvus plus performant sur Docker ou Kubernetes. Dans cette configuration, veuillez utiliser l'uri du serveur, par exemple http://localhost:19530, comme uri.
        • Si vous souhaitez utiliser Zilliz Cloud, le service en nuage entièrement géré pour Milvus, ajustez l'uri et le token, qui correspondent au point final public et à la clé API dans Zilliz Cloud.
        @@ -201,11 +202,11 @@ def dense_search(col,

        Exécutons trois recherches différentes avec les fonctions définies :

        dense_results = dense_search(col, query_embeddings["dense"][0])
        -sparse_results = sparse_search(col, query_embeddings["sparse"][0])
        +sparse_results = sparse_search(col, query_embeddings["sparse"]._getrow(0))
         hybrid_results = hybrid_search(
             col,
             query_embeddings["dense"][0],
        -    query_embeddings["sparse"][0],
        +    query_embeddings["sparse"]._getrow(0),
             sparse_weight=0.7,
             dense_weight=1.0,
         )
        @@ -287,7 +288,7 @@ formatted_results = doc_text_formatting(ef, query, hybrid_results)
         

        Comment créer un nouveau terminal et un nouvel interpréteur de commandes sous Linux en utilisant la programmation C?

        Comment créer un nouveau shell dans un nouveau terminal en utilisant la programmation C (terminal Linux)?

        Quelle est la meilleure entreprise à démarrer à Hyderabad?

        -

        Quel est le meilleur moyen de démarrer une entreprise à Hyderabad?

        +

        Quel est le meilleur moyen de démarrer une entreprise à Hyderabad?

        Quelle est la meilleure façon de commencer la robotique? Quelle est la meilleure carte de développement pour que je puisse commencer à travailler dessus?

        Quelle est la meilleure façon de commencer à travailler en robotique? Quels sont les livres sur les algorithmes qui conviennent à un débutant complet?

        Comment faire pour que la vie vous convienne et que la vie cesse de vous maltraiter mentalement et émotionnellement?

        @@ -306,7 +307,7 @@ formatted_results = doc_text_formatting(ef, query, hybrid_results)

        Comment créer un nouveau terminal et un nouvel interpréteur de commandes sous Linux en utilisant la programmation C?

        Comment créer un nouveau shell dans un nouveau terminal en utilisant la programmation C (terminal Linux)?

        Quelle est la meilleure entreprise à démarrer à Hyderabad?

        -

        Quelle est la meilleure façon de démarrer une entreprise à Hyderabad?

        -

        Quelles sont les mathématiques dont un débutant a besoin pour comprendre les algorithmes de programmation informatique? Quels sont les livres sur les algorithmes qui conviennent à un débutant complet?

        +

        Quelle est la meilleure façon de démarrer une entreprise à Hyderabad?

        +

        Quelles sont les mathématiques dont un débutant a besoin pour comprendre les algorithmes de programmation informatique? Quels sont les livres sur les algorithmes qui conviennent à un débutant complet?

        Comment faire en sorte que la vie vous convienne et qu'elle cesse de vous maltraiter mentalement et émotionnellement?

        Déploiement rapide

        Pour savoir comment démarrer une démonstration en ligne avec ce tutoriel, veuillez vous référer à l 'exemple d'application.

        diff --git a/localization/v2.4.x/site/fr/tutorials/image_similarity_search.json b/localization/v2.4.x/site/fr/tutorials/image_similarity_search.json index 413391aa1..22108a2ec 100644 --- a/localization/v2.4.x/site/fr/tutorials/image_similarity_search.json +++ b/localization/v2.4.x/site/fr/tutorials/image_similarity_search.json @@ -1 +1 @@ -{"codeList":["!wget https://github.com/milvus-io/pymilvus-assets/releases/download/imagedata/reverse_image_search.zip\n!unzip -q -o reverse_image_search.zip\n","$ pip install pymilvus --upgrade\n$ pip install timm\n","import torch\nfrom PIL import Image\nimport timm\nfrom sklearn.preprocessing import normalize\nfrom timm.data import resolve_data_config\nfrom timm.data.transforms_factory import create_transform\n\n\nclass FeatureExtractor:\n def __init__(self, modelname):\n # Load the pre-trained model\n self.model = timm.create_model(\n modelname, pretrained=True, num_classes=0, global_pool=\"avg\"\n )\n self.model.eval()\n\n # Get the input size required by the model\n self.input_size = self.model.default_cfg[\"input_size\"]\n\n config = resolve_data_config({}, model=modelname)\n # Get the preprocessing function provided by TIMM for the model\n self.preprocess = create_transform(**config)\n\n def __call__(self, imagepath):\n # Preprocess the input image\n input_image = Image.open(imagepath).convert(\"RGB\") # Convert to RGB if needed\n input_image = self.preprocess(input_image)\n\n # Convert the image to a PyTorch tensor and add a batch dimension\n input_tensor = input_image.unsqueeze(0)\n\n # Perform inference\n with torch.no_grad():\n output = self.model(input_tensor)\n\n # Extract the feature vector\n feature_vector = output.squeeze().numpy()\n\n return normalize(feature_vector.reshape(1, -1), norm=\"l2\").flatten()\n","from pymilvus import MilvusClient\n\n# Set up a Milvus client\nclient = MilvusClient(uri=\"example.db\")\n# Create a collection in quick setup mode\nif client.has_collection(collection_name=\"image_embeddings\"):\n client.drop_collection(collection_name=\"image_embeddings\")\nclient.create_collection(\n collection_name=\"image_embeddings\",\n vector_field_name=\"vector\",\n dimension=512,\n auto_id=True,\n enable_dynamic_field=True,\n metric_type=\"COSINE\",\n)\n","import os\n\nextractor = FeatureExtractor(\"resnet34\")\n\nroot = \"./train\"\ninsert = True\nif insert is True:\n for dirpath, foldername, filenames in os.walk(root):\n for filename in filenames:\n if filename.endswith(\".JPEG\"):\n filepath = dirpath + \"/\" + filename\n image_embedding = extractor(filepath)\n client.insert(\n \"image_embeddings\",\n {\"vector\": image_embedding, \"filename\": filepath},\n )\n","from IPython.display import display\n\nquery_image = \"./test/Afghan_hound/n02088094_4261.JPEG\"\n\nresults = client.search(\n \"image_embeddings\",\n data=[extractor(query_image)],\n output_fields=[\"filename\"],\n search_params={\"metric_type\": \"COSINE\"},\n)\nimages = []\nfor result in results:\n for hit in result[:10]:\n filename = hit[\"entity\"][\"filename\"]\n img = Image.open(filename)\n img = img.resize((150, 150))\n images.append(img)\n\nwidth = 150 * 5\nheight = 150 * 2\nconcatenated_image = Image.new(\"RGB\", (width, height))\n\nfor idx, img in enumerate(images):\n x = idx % 5\n y = idx // 5\n concatenated_image.paste(img, (x * 150, y * 150))\ndisplay(\"query\")\ndisplay(Image.open(query_image).resize((150, 150)))\ndisplay(\"results\")\ndisplay(concatenated_image)\n"],"headingContent":"","anchorList":[{"label":"Recherche d'images avec Milvus","href":"Image-Search-with-Milvus","type":1,"isActive":false},{"label":"Préparation du jeu de données","href":"Dataset-Preparation","type":2,"isActive":false},{"label":"Conditions préalables","href":"Prequisites","type":2,"isActive":false},{"label":"Définir l'extracteur de caractéristiques","href":"Define-the-Feature-Extractor","type":2,"isActive":false},{"label":"Créer une collection Milvus","href":"Create-a-Milvus-Collection","type":2,"isActive":false},{"label":"Insérer les embeddings dans Milvus","href":"Insert-the-Embeddings-to-Milvus","type":2,"isActive":false},{"label":"Déploiement rapide","href":"Quick-Deploy","type":2,"isActive":false}]} \ No newline at end of file +{"codeList":["!wget https://github.com/milvus-io/pymilvus-assets/releases/download/imagedata/reverse_image_search.zip\n!unzip -q -o reverse_image_search.zip\n","$ pip install pymilvus --upgrade\n$ pip install timm\n","import torch\nfrom PIL import Image\nimport timm\nfrom sklearn.preprocessing import normalize\nfrom timm.data import resolve_data_config\nfrom timm.data.transforms_factory import create_transform\n\n\nclass FeatureExtractor:\n def __init__(self, modelname):\n # Load the pre-trained model\n self.model = timm.create_model(\n modelname, pretrained=True, num_classes=0, global_pool=\"avg\"\n )\n self.model.eval()\n\n # Get the input size required by the model\n self.input_size = self.model.default_cfg[\"input_size\"]\n\n config = resolve_data_config({}, model=modelname)\n # Get the preprocessing function provided by TIMM for the model\n self.preprocess = create_transform(**config)\n\n def __call__(self, imagepath):\n # Preprocess the input image\n input_image = Image.open(imagepath).convert(\"RGB\") # Convert to RGB if needed\n input_image = self.preprocess(input_image)\n\n # Convert the image to a PyTorch tensor and add a batch dimension\n input_tensor = input_image.unsqueeze(0)\n\n # Perform inference\n with torch.no_grad():\n output = self.model(input_tensor)\n\n # Extract the feature vector\n feature_vector = output.squeeze().numpy()\n\n return normalize(feature_vector.reshape(1, -1), norm=\"l2\").flatten()\n","from pymilvus import MilvusClient\n\n# Set up a Milvus client\nclient = MilvusClient(uri=\"example.db\")\n# Create a collection in quick setup mode\nif client.has_collection(collection_name=\"image_embeddings\"):\n client.drop_collection(collection_name=\"image_embeddings\")\nclient.create_collection(\n collection_name=\"image_embeddings\",\n vector_field_name=\"vector\",\n dimension=512,\n auto_id=True,\n enable_dynamic_field=True,\n metric_type=\"COSINE\",\n)\n","import os\n\nextractor = FeatureExtractor(\"resnet34\")\n\nroot = \"./train\"\ninsert = True\nif insert is True:\n for dirpath, foldername, filenames in os.walk(root):\n for filename in filenames:\n if filename.endswith(\".JPEG\"):\n filepath = dirpath + \"/\" + filename\n image_embedding = extractor(filepath)\n client.insert(\n \"image_embeddings\",\n {\"vector\": image_embedding, \"filename\": filepath},\n )\n","from IPython.display import display\n\nquery_image = \"./test/Afghan_hound/n02088094_4261.JPEG\"\n\nresults = client.search(\n \"image_embeddings\",\n data=[extractor(query_image)],\n output_fields=[\"filename\"],\n search_params={\"metric_type\": \"COSINE\"},\n)\nimages = []\nfor result in results:\n for hit in result[:10]:\n filename = hit[\"entity\"][\"filename\"]\n img = Image.open(filename)\n img = img.resize((150, 150))\n images.append(img)\n\nwidth = 150 * 5\nheight = 150 * 2\nconcatenated_image = Image.new(\"RGB\", (width, height))\n\nfor idx, img in enumerate(images):\n x = idx % 5\n y = idx // 5\n concatenated_image.paste(img, (x * 150, y * 150))\ndisplay(\"query\")\ndisplay(Image.open(query_image).resize((150, 150)))\ndisplay(\"results\")\ndisplay(concatenated_image)\n"],"headingContent":"Image Search with Milvus","anchorList":[{"label":"Recherche d'images avec Milvus","href":"Image-Search-with-Milvus","type":1,"isActive":false},{"label":"Préparation du jeu de données","href":"Dataset-Preparation","type":2,"isActive":false},{"label":"Conditions préalables","href":"Prequisites","type":2,"isActive":false},{"label":"Définir l'extracteur de caractéristiques","href":"Define-the-Feature-Extractor","type":2,"isActive":false},{"label":"Créer une collection Milvus","href":"Create-a-Milvus-Collection","type":2,"isActive":false},{"label":"Insérer les embeddings dans Milvus","href":"Insert-the-Embeddings-to-Milvus","type":2,"isActive":false},{"label":"Déploiement rapide","href":"Quick-Deploy","type":2,"isActive":false}]} \ No newline at end of file diff --git a/localization/v2.4.x/site/fr/tutorials/image_similarity_search.md b/localization/v2.4.x/site/fr/tutorials/image_similarity_search.md index 5e39c04ed..36104a09e 100644 --- a/localization/v2.4.x/site/fr/tutorials/image_similarity_search.md +++ b/localization/v2.4.x/site/fr/tutorials/image_similarity_search.md @@ -18,9 +18,10 @@ title: Recherche d'images avec Milvus d="M4 9h1v1H4c-1.5 0-3-1.69-3-3.5S2.55 3 4 3h4c1.45 0 3 1.69 3 3.5 0 1.41-.91 2.72-2 3.25V8.59c.58-.45 1-1.27 1-2.09C10 5.22 8.98 4 8 4H4c-.98 0-2 1.22-2 2.5S3 9 4 9zm9-3h-1v1h1c1 0 2 1.22 2 2.5S13.98 12 13 12H9c-.98 0-2-1.22-2-2.5 0-.83.42-1.64 1-2.09V6.25c-1.09.53-2 1.84-2 3.25C6 11.31 7.55 13 9 13h4c1.45 0 3-1.69 3-3.5S14.5 6 13 6z" > -

        Open In Colab

        +

        Open In Colab +GitHub Repository

        -

        Dans ce carnet, nous allons vous montrer comment utiliser Milvus pour rechercher des images similaires dans un ensemble de données. Nous utiliserons un sous-ensemble de l'ensemble de données ImageNet, puis nous rechercherons une image d'un chien de chasse afghan pour en faire la démonstration.

        +

        Dans ce carnet, nous allons vous montrer comment utiliser Milvus pour rechercher des images similaires dans un ensemble de données. Nous utiliserons un sous-ensemble de l'ensemble de données ImageNet, puis nous rechercherons une image d'un chien de chasse afghan.

        Préparation du jeu de données

        Open In Colab

        +

        Open In Colab +GitHub Repository

        -

        Ce tutoriel présente le RAG multimodal alimenté par Milvus, le modèle BGE visualisé et GPT-4o. Avec ce système, les utilisateurs peuvent télécharger une image et éditer des instructions textuelles, qui sont traitées par le modèle de recherche composé de BGE pour rechercher des images candidates. GPT-4o agit ensuite comme un re-ranker, en sélectionnant l'image la plus appropriée et en fournissant la justification du choix. Cette puissante combinaison permet une expérience de recherche d'images transparente et intuitive, en tirant parti de Milvus pour une recherche efficace, du modèle BGE pour un traitement et une mise en correspondance précis des images, et de GPT-4o pour un reranking avancé.

        +

        Ce tutoriel présente le RAG multimodal alimenté par Milvus, le modèle BGE visualisé et GPT-4o. Avec ce système, les utilisateurs peuvent télécharger une image et éditer des instructions textuelles, qui sont traitées par le modèle de recherche composé de BGE pour rechercher des images candidates. GPT-4o joue ensuite le rôle d'un re-ranker, en sélectionnant l'image la plus appropriée et en expliquant les raisons de son choix. Cette puissante combinaison permet une expérience de recherche d'images transparente et intuitive, en s'appuyant sur Milvus pour une recherche efficace, sur le modèle BGE pour un traitement et une mise en correspondance précis des images, et sur GPT-4o pour un reranking avancé.

        Préparation

        Cette section chargera des images d'exemple dans la base de données avec les encastrements correspondants.

        +

        Cette section chargera des images d'exemple dans la base de données avec les embeddings correspondants.

        Générer des embeddings

        Charger toutes les images jpeg du répertoire de données et appliquer l'encodeur pour convertir les images en embeddings.

        import os
         from tqdm import tqdm
        diff --git a/localization/v2.4.x/site/fr/tutorials/tutorials-overview.json b/localization/v2.4.x/site/fr/tutorials/tutorials-overview.json
        index de88cbfcd..13aa13eab 100644
        --- a/localization/v2.4.x/site/fr/tutorials/tutorials-overview.json
        +++ b/localization/v2.4.x/site/fr/tutorials/tutorials-overview.json
        @@ -1 +1 @@
        -{"codeList":[],"headingContent":"","anchorList":[{"label":"Vue d'ensemble des didacticiels","href":"Tutorials-Overview","type":1,"isActive":false}]}
        \ No newline at end of file
        +{"codeList":[],"headingContent":"Tutorials Overview","anchorList":[{"label":"Vue d'ensemble des didacticiels","href":"Tutorials-Overview","type":1,"isActive":false}]}
        \ No newline at end of file
        diff --git a/localization/v2.4.x/site/fr/tutorials/tutorials-overview.md b/localization/v2.4.x/site/fr/tutorials/tutorials-overview.md
        index 8a0a97461..2bc6df40e 100644
        --- a/localization/v2.4.x/site/fr/tutorials/tutorials-overview.md
        +++ b/localization/v2.4.x/site/fr/tutorials/tutorials-overview.md
        @@ -3,7 +3,7 @@ id: tutorials-overview.md
         summary: >-
           Cette page fournit une liste de tutoriels vous permettant d'interagir avec
           Milvus.
        -title: Vue d'ensemble des tutoriels
        +title: Vue d'ensemble des didacticiels
         ---
         

        Vue d'ensemble des didacticiels

        -

        Débloquer une collection

        +

        Charger partiellement une collection (aperçu public)

        +

        Cette fonctionnalité est actuellement en avant-première publique. L'API et la fonctionnalité peuvent changer à l'avenir.

        +
        +

        Lors de la réception de votre demande de chargement, Milvus charge tous les index des champs vectoriels et toutes les données des champs scalaires en mémoire. Si certains champs ne doivent pas être impliqués dans les recherches et les requêtes, vous pouvez les exclure du chargement pour réduire l'utilisation de la mémoire et améliorer les performances de recherche.

        +
        +
        # 7. Load the collection
        +client.load_collection(
        +    collection_name="customized_setup_2",
        +    load_fields=["my_id", "my_vector"] # Load only the specified fields
        +    skip_load_dynamic_field=True # Skip loading the dynamic field
        +)
        +
        +res = client.get_load_state(
        +    collection_name="customized_setup_2"
        +)
        +
        +print(res)
        +
        +# Output
        +#
        +# {
        +#     "state": "<LoadState: Loaded>"
        +# }
        +
        +

        Notez que seuls les champs répertoriés sur load_fields peuvent être utilisés comme conditions de filtrage et champs de sortie dans les recherches et les requêtes. Vous devez toujours inclure la clé primaire dans la liste. Les noms de champs exclus du chargement ne seront pas disponibles pour le filtrage ou la sortie.

        +

        Vous pouvez utiliser skip_load_dynamic_field=True pour ne pas charger le champ dynamique. Milvus traite le champ dynamique comme un champ unique, de sorte que toutes les clés du champ dynamique seront incluses ou exclues ensemble.

        +
        +

        Libération d'une collection

        Pour libérer une collection, utilisez la méthode release_collection() en spécifiant le nom de la collection.

        @@ -2049,7 +2076,7 @@ res = await client.alterAlias() en spécifiant le nom de la collection et l'alias.

        -

        Pour réaffecter des alias à d'autres collections, utilisez la méthode alterAlias() en spécifiant le nom de la collection et l'alias.

        +

        Pour réaffecter des alias à d'autres collections, utilisez la méthode alterAlias() en précisant le nom de la collection et l'alias.

        Pour réaffecter des alias à d'autres collections, vous pouvez utiliser le point de terminaison de l'API POST /v2/vectordb/aliases/alter pour réaffecter des alias à d'autres collections, vous pouvez utiliser le point de terminaison de l'API.

        @@ -2323,7 +2350,7 @@ collection.set_properties( } )
        -

        Définir MMAP

        Configurer la propriété de mappage de la mémoire (MMAP) pour la collection, qui détermine si les données sont mappées en mémoire pour améliorer les performances des requêtes. Pour plus d'informations, reportez-vous à la section Configurer le mappage de la mémoire .

        +

        Définir MMAP

        Configurer la propriété de mappage de la mémoire (MMAP) pour la collection, qui détermine si les données sont mappées en mémoire pour améliorer les performances des requêtes. Pour plus d'informations, reportez-vous à la section Configurer le mappage de la mémoire.

        Avant de définir la propriété MMAP, libérez d'abord la collection. Sinon, une erreur se produira.

        diff --git a/localization/v2.4.x/site/fr/userGuide/manage-indexes/index-vector-fields.json b/localization/v2.4.x/site/fr/userGuide/manage-indexes/index-vector-fields.json index 399fe2c82..d7ad51340 100644 --- a/localization/v2.4.x/site/fr/userGuide/manage-indexes/index-vector-fields.json +++ b/localization/v2.4.x/site/fr/userGuide/manage-indexes/index-vector-fields.json @@ -1 +1 @@ -{"codeList":["from pymilvus import MilvusClient, DataType\n\n# 1. Set up a Milvus client\nclient = MilvusClient(\n uri=\"http://localhost:19530\"\n)\n\n# 2. Create schema\n# 2.1. Create schema\nschema = MilvusClient.create_schema(\n auto_id=False,\n enable_dynamic_field=True,\n)\n\n# 2.2. Add fields to schema\nschema.add_field(field_name=\"id\", datatype=DataType.INT64, is_primary=True)\nschema.add_field(field_name=\"vector\", datatype=DataType.FLOAT_VECTOR, dim=5)\n\n# 3. Create collection\nclient.create_collection(\n collection_name=\"customized_setup\", \n schema=schema, \n)\n","import io.milvus.v2.client.ConnectConfig;\nimport io.milvus.v2.client.MilvusClientV2;\nimport io.milvus.v2.common.DataType;\nimport io.milvus.v2.service.collection.request.CreateCollectionReq;\n\nString CLUSTER_ENDPOINT = \"http://localhost:19530\";\n\n// 1. Connect to Milvus server\nConnectConfig connectConfig = ConnectConfig.builder()\n .uri(CLUSTER_ENDPOINT)\n .build();\n\nMilvusClientV2 client = new MilvusClientV2(connectConfig);\n\n// 2. Create a collection\n\n// 2.1 Create schema\nCreateCollectionReq.CollectionSchema schema = client.createSchema();\n\n// 2.2 Add fields to schema\nschema.addField(AddFieldReq.builder().fieldName(\"id\").dataType(DataType.Int64).isPrimaryKey(true).autoID(false).build());\nschema.addField(AddFieldReq.builder().fieldName(\"vector\").dataType(DataType.FloatVector).dimension(5).build());\n\n// 3 Create a collection without schema and index parameters\nCreateCollectionReq customizedSetupReq = CreateCollectionReq.builder()\n.collectionName(\"customized_setup\")\n.collectionSchema(schema)\n.build();\n\nclient.createCollection(customizedSetupReq);\n","// 1. Set up a Milvus Client\nclient = new MilvusClient({address, token});\n\n// 2. Define fields for the collection\nconst fields = [\n {\n name: \"id\",\n data_type: DataType.Int64,\n is_primary_key: true,\n autoID: false\n },\n {\n name: \"vector\",\n data_type: DataType.FloatVector,\n dim: 5\n },\n]\n\n// 3. Create a collection\nres = await client.createCollection({\n collection_name: \"customized_setup\",\n fields: fields,\n})\n\nconsole.log(res.error_code) \n\n// Output\n// \n// Success\n// \n","# 4.1. Set up the index parameters\nindex_params = MilvusClient.prepare_index_params()\n\n# 4.2. Add an index on the vector field.\nindex_params.add_index(\n field_name=\"vector\",\n metric_type=\"COSINE\",\n index_type=\"IVF_FLAT\",\n index_name=\"vector_index\",\n params={ \"nlist\": 128 }\n)\n\n# 4.3. Create an index file\nclient.create_index(\n collection_name=\"customized_setup\",\n index_params=index_params\n)\n","import io.milvus.v2.common.IndexParam;\nimport io.milvus.v2.service.index.request.CreateIndexReq;\n\n// 4 Prepare index parameters\n\n// 4.2 Add an index for the vector field \"vector\"\nIndexParam indexParamForVectorField = IndexParam.builder()\n .fieldName(\"vector\")\n .indexName(\"vector_index\")\n .indexType(IndexParam.IndexType.IVF_FLAT)\n .metricType(IndexParam.MetricType.COSINE)\n .extraParams(Map.of(\"nlist\", 128))\n .build();\n\nList indexParams = new ArrayList<>();\nindexParams.add(indexParamForVectorField);\n\n// 4.3 Crate an index file\nCreateIndexReq createIndexReq = CreateIndexReq.builder()\n .collectionName(\"customized_setup\")\n .indexParams(indexParams)\n .build();\n\nclient.createIndex(createIndexReq);\n","// 4. Set up index for the collection\n// 4.1. Set up the index parameters\nres = await client.createIndex({\n collection_name: \"customized_setup\",\n field_name: \"vector\",\n index_type: \"AUTOINDEX\",\n metric_type: \"COSINE\", \n index_name: \"vector_index\",\n params: { \"nlist\": 128 }\n})\n\nconsole.log(res.error_code)\n\n// Output\n// \n// Success\n// \n","# 5. Describe index\nres = client.list_indexes(\n collection_name=\"customized_setup\"\n)\n\nprint(res)\n\n# Output\n#\n# [\n# \"vector_index\",\n# ]\n\nres = client.describe_index(\n collection_name=\"customized_setup\",\n index_name=\"vector_index\"\n)\n\nprint(res)\n\n# Output\n#\n# {\n# \"index_type\": ,\n# \"metric_type\": \"COSINE\",\n# \"field_name\": \"vector\",\n# \"index_name\": \"vector_index\"\n# }\n","import io.milvus.v2.service.index.request.DescribeIndexReq;\nimport io.milvus.v2.service.index.response.DescribeIndexResp;\n\n// 5. Describe index\n// 5.1 List the index names\nListIndexesReq listIndexesReq = ListIndexesReq.builder()\n .collectionName(\"customized_setup\")\n .build();\n\nList indexNames = client.listIndexes(listIndexesReq);\n\nSystem.out.println(indexNames);\n\n// Output:\n// [\n// \"vector_index\"\n// ]\n\n// 5.2 Describe an index\nDescribeIndexReq describeIndexReq = DescribeIndexReq.builder()\n .collectionName(\"customized_setup\")\n .indexName(\"vector_index\")\n .build();\n\nDescribeIndexResp describeIndexResp = client.describeIndex(describeIndexReq);\n\nSystem.out.println(JSONObject.toJSON(describeIndexResp));\n\n// Output:\n// {\n// \"metricType\": \"COSINE\",\n// \"indexType\": \"AUTOINDEX\",\n// \"fieldName\": \"vector\",\n// \"indexName\": \"vector_index\"\n// }\n","// 5. Describe the index\nres = await client.describeIndex({\n collection_name: \"customized_setup\",\n index_name: \"vector_index\"\n})\n\nconsole.log(JSON.stringify(res.index_descriptions, null, 2))\n\n// Output\n// \n// [\n// {\n// \"params\": [\n// {\n// \"key\": \"index_type\",\n// \"value\": \"AUTOINDEX\"\n// },\n// {\n// \"key\": \"metric_type\",\n// \"value\": \"COSINE\"\n// }\n// ],\n// \"index_name\": \"vector_index\",\n// \"indexID\": \"449007919953063141\",\n// \"field_name\": \"vector\",\n// \"indexed_rows\": \"0\",\n// \"total_rows\": \"0\",\n// \"state\": \"Finished\",\n// \"index_state_fail_reason\": \"\",\n// \"pending_index_rows\": \"0\"\n// }\n// ]\n// \n","# 6. Drop index\nclient.drop_index(\n collection_name=\"customized_setup\",\n index_name=\"vector_index\"\n)\n","// 6. Drop index\n\nDropIndexReq dropIndexReq = DropIndexReq.builder()\n .collectionName(\"customized_setup\")\n .indexName(\"vector_index\")\n .build();\n\nclient.dropIndex(dropIndexReq);\n","// 6. Drop the index\nres = await client.dropIndex({\n collection_name: \"customized_setup\",\n index_name: \"vector_index\"\n})\n\nconsole.log(res.error_code)\n\n// Output\n// \n// Success\n// \n"],"headingContent":"","anchorList":[{"label":"Indexer des champs vectoriels","href":"Index-Vector-Fields","type":1,"isActive":false},{"label":"Vue d'ensemble","href":"Overview","type":2,"isActive":false},{"label":"Préparations","href":"Preparations","type":2,"isActive":false},{"label":"Indexation d'une collection","href":"Index-a-Collection","type":2,"isActive":false},{"label":"Vérifier les détails de l'index","href":"Check-Index-Details","type":2,"isActive":false},{"label":"Supprimer un index","href":"Drop-an-Index","type":2,"isActive":false}]} \ No newline at end of file +{"codeList":["from pymilvus import MilvusClient, DataType\n\n# 1. Set up a Milvus client\nclient = MilvusClient(\n uri=\"http://localhost:19530\"\n)\n\n# 2. Create schema\n# 2.1. Create schema\nschema = MilvusClient.create_schema(\n auto_id=False,\n enable_dynamic_field=True,\n)\n\n# 2.2. Add fields to schema\nschema.add_field(field_name=\"id\", datatype=DataType.INT64, is_primary=True)\nschema.add_field(field_name=\"vector\", datatype=DataType.FLOAT_VECTOR, dim=5)\n\n# 3. Create collection\nclient.create_collection(\n collection_name=\"customized_setup\", \n schema=schema, \n)\n","import io.milvus.v2.client.ConnectConfig;\nimport io.milvus.v2.client.MilvusClientV2;\nimport io.milvus.v2.common.DataType;\nimport io.milvus.v2.service.collection.request.CreateCollectionReq;\n\nString CLUSTER_ENDPOINT = \"http://localhost:19530\";\n\n// 1. Connect to Milvus server\nConnectConfig connectConfig = ConnectConfig.builder()\n .uri(CLUSTER_ENDPOINT)\n .build();\n\nMilvusClientV2 client = new MilvusClientV2(connectConfig);\n\n// 2. Create a collection\n\n// 2.1 Create schema\nCreateCollectionReq.CollectionSchema schema = client.createSchema();\n\n// 2.2 Add fields to schema\nschema.addField(AddFieldReq.builder().fieldName(\"id\").dataType(DataType.Int64).isPrimaryKey(true).autoID(false).build());\nschema.addField(AddFieldReq.builder().fieldName(\"vector\").dataType(DataType.FloatVector).dimension(5).build());\n\n// 3 Create a collection without schema and index parameters\nCreateCollectionReq customizedSetupReq = CreateCollectionReq.builder()\n.collectionName(\"customized_setup\")\n.collectionSchema(schema)\n.build();\n\nclient.createCollection(customizedSetupReq);\n","// 1. Set up a Milvus Client\nclient = new MilvusClient({address, token});\n\n// 2. Define fields for the collection\nconst fields = [\n {\n name: \"id\",\n data_type: DataType.Int64,\n is_primary_key: true,\n autoID: false\n },\n {\n name: \"vector\",\n data_type: DataType.FloatVector,\n dim: 5\n },\n]\n\n// 3. Create a collection\nres = await client.createCollection({\n collection_name: \"customized_setup\",\n fields: fields,\n})\n\nconsole.log(res.error_code) \n\n// Output\n// \n// Success\n// \n","# 4.1. Set up the index parameters\nindex_params = MilvusClient.prepare_index_params()\n\n# 4.2. Add an index on the vector field.\nindex_params.add_index(\n field_name=\"vector\",\n metric_type=\"COSINE\",\n index_type=\"IVF_FLAT\",\n index_name=\"vector_index\",\n params={ \"nlist\": 128 }\n)\n\n# 4.3. Create an index file\nclient.create_index(\n collection_name=\"customized_setup\",\n index_params=index_params,\n sync=False # Whether to wait for index creation to complete before returning. Defaults to True.\n)\n","import io.milvus.v2.common.IndexParam;\nimport io.milvus.v2.service.index.request.CreateIndexReq;\n\n// 4 Prepare index parameters\n\n// 4.2 Add an index for the vector field \"vector\"\nIndexParam indexParamForVectorField = IndexParam.builder()\n .fieldName(\"vector\")\n .indexName(\"vector_index\")\n .indexType(IndexParam.IndexType.IVF_FLAT)\n .metricType(IndexParam.MetricType.COSINE)\n .extraParams(Map.of(\"nlist\", 128))\n .build();\n\nList indexParams = new ArrayList<>();\nindexParams.add(indexParamForVectorField);\n\n// 4.3 Crate an index file\nCreateIndexReq createIndexReq = CreateIndexReq.builder()\n .collectionName(\"customized_setup\")\n .indexParams(indexParams)\n .build();\n\nclient.createIndex(createIndexReq);\n","// 4. Set up index for the collection\n// 4.1. Set up the index parameters\nres = await client.createIndex({\n collection_name: \"customized_setup\",\n field_name: \"vector\",\n index_type: \"AUTOINDEX\",\n metric_type: \"COSINE\", \n index_name: \"vector_index\",\n params: { \"nlist\": 128 }\n})\n\nconsole.log(res.error_code)\n\n// Output\n// \n// Success\n// \n","# 5. Describe index\nres = client.list_indexes(\n collection_name=\"customized_setup\"\n)\n\nprint(res)\n\n# Output\n#\n# [\n# \"vector_index\",\n# ]\n\nres = client.describe_index(\n collection_name=\"customized_setup\",\n index_name=\"vector_index\"\n)\n\nprint(res)\n\n# Output\n#\n# {\n# \"index_type\": ,\n# \"metric_type\": \"COSINE\",\n# \"field_name\": \"vector\",\n# \"index_name\": \"vector_index\"\n# }\n","import io.milvus.v2.service.index.request.DescribeIndexReq;\nimport io.milvus.v2.service.index.response.DescribeIndexResp;\n\n// 5. Describe index\n// 5.1 List the index names\nListIndexesReq listIndexesReq = ListIndexesReq.builder()\n .collectionName(\"customized_setup\")\n .build();\n\nList indexNames = client.listIndexes(listIndexesReq);\n\nSystem.out.println(indexNames);\n\n// Output:\n// [\n// \"vector_index\"\n// ]\n\n// 5.2 Describe an index\nDescribeIndexReq describeIndexReq = DescribeIndexReq.builder()\n .collectionName(\"customized_setup\")\n .indexName(\"vector_index\")\n .build();\n\nDescribeIndexResp describeIndexResp = client.describeIndex(describeIndexReq);\n\nSystem.out.println(JSONObject.toJSON(describeIndexResp));\n\n// Output:\n// {\n// \"metricType\": \"COSINE\",\n// \"indexType\": \"AUTOINDEX\",\n// \"fieldName\": \"vector\",\n// \"indexName\": \"vector_index\"\n// }\n","// 5. Describe the index\nres = await client.describeIndex({\n collection_name: \"customized_setup\",\n index_name: \"vector_index\"\n})\n\nconsole.log(JSON.stringify(res.index_descriptions, null, 2))\n\n// Output\n// \n// [\n// {\n// \"params\": [\n// {\n// \"key\": \"index_type\",\n// \"value\": \"AUTOINDEX\"\n// },\n// {\n// \"key\": \"metric_type\",\n// \"value\": \"COSINE\"\n// }\n// ],\n// \"index_name\": \"vector_index\",\n// \"indexID\": \"449007919953063141\",\n// \"field_name\": \"vector\",\n// \"indexed_rows\": \"0\",\n// \"total_rows\": \"0\",\n// \"state\": \"Finished\",\n// \"index_state_fail_reason\": \"\",\n// \"pending_index_rows\": \"0\"\n// }\n// ]\n// \n","# 6. Drop index\nclient.drop_index(\n collection_name=\"customized_setup\",\n index_name=\"vector_index\"\n)\n","// 6. Drop index\n\nDropIndexReq dropIndexReq = DropIndexReq.builder()\n .collectionName(\"customized_setup\")\n .indexName(\"vector_index\")\n .build();\n\nclient.dropIndex(dropIndexReq);\n","// 6. Drop the index\nres = await client.dropIndex({\n collection_name: \"customized_setup\",\n index_name: \"vector_index\"\n})\n\nconsole.log(res.error_code)\n\n// Output\n// \n// Success\n// \n"],"headingContent":"Index Vector Fields","anchorList":[{"label":"Indexer des champs vectoriels","href":"Index-Vector-Fields","type":1,"isActive":false},{"label":"Vue d'ensemble","href":"Overview","type":2,"isActive":false},{"label":"Préparations","href":"Preparations","type":2,"isActive":false},{"label":"Indexation d'une collection","href":"Index-a-Collection","type":2,"isActive":false},{"label":"Vérifier les détails de l'index","href":"Check-Index-Details","type":2,"isActive":false},{"label":"Supprimer un index","href":"Drop-an-Index","type":2,"isActive":false}]} \ No newline at end of file diff --git a/localization/v2.4.x/site/fr/userGuide/manage-indexes/index-vector-fields.md b/localization/v2.4.x/site/fr/userGuide/manage-indexes/index-vector-fields.md index 7a4e305b2..c250e1d02 100644 --- a/localization/v2.4.x/site/fr/userGuide/manage-indexes/index-vector-fields.md +++ b/localization/v2.4.x/site/fr/userGuide/manage-indexes/index-vector-fields.md @@ -4,7 +4,7 @@ order: 1 summary: >- Ce guide vous présente les opérations de base pour créer et gérer des index sur les champs vectoriels d'une collection. -title: Champs vectoriels d'index +title: Indexer des champs vectoriels ---

        Indexer des champs vectoriels

    import io.milvus.v2.common.IndexParam;
    @@ -331,13 +332,17 @@ res = await client.index_params
           Un objet IndexParams contenant une liste d'objets IndexParam.
         
    +    
    +      sync
    +      Contrôle la façon dont l'index est construit en fonction de la demande du client. Valeurs valides :
    • True (par défaut) : Le client attend que l'index soit entièrement construit avant de revenir. Cela signifie que vous n'obtiendrez pas de réponse tant que le processus ne sera pas terminé.
    • False: Le client retourne immédiatement après la réception de la demande et l'index est construit en arrière-plan. Pour savoir si la création de l'index est terminée, utilisez la méthode describe_index().
    + - + @@ -351,7 +356,7 @@ res = await client.indexType - + diff --git a/localization/v2.4.x/site/fr/userGuide/manage-partitions.json b/localization/v2.4.x/site/fr/userGuide/manage-partitions.json index ef64a084f..a7e85fbfb 100644 --- a/localization/v2.4.x/site/fr/userGuide/manage-partitions.json +++ b/localization/v2.4.x/site/fr/userGuide/manage-partitions.json @@ -1 +1 @@ -{"codeList":["from pymilvus import MilvusClient, DataType\n\n# 1. Set up a Milvus client\nclient = MilvusClient(\n uri=\"http://localhost:19530\"\n)\n\n# 2. Create a collection\nclient.create_collection(\n collection_name=\"quick_setup\",\n dimension=5,\n)\n\n","import io.milvus.v2.client.ConnectConfig;\nimport io.milvus.v2.client.MilvusClientV2;\nimport io.milvus.v2.service.collection.request.CreateCollectionReq;\n\nString CLUSTER_ENDPOINT = \"http://localhost:19530\";\n\n// 1. Connect to Milvus server\nConnectConfig connectConfig = ConnectConfig.builder()\n .uri(CLUSTER_ENDPOINT)\n .build();\n\nMilvusClientV2 client = new MilvusClientV2(connectConfig);\n\n// 2. Create a collection in quick setup mode\nCreateCollectionReq quickSetupReq = CreateCollectionReq.builder()\n .collectionName(\"quick_setup\")\n .dimension(5)\n .build();\n\nclient.createCollection(quickSetupReq);\n","const address = \"http://localhost:19530\"\n\n// 1. Set up a Milvus Client\nclient = new MilvusClient({address});\n\n// 2. Create a collection in quick setup mode\nawait client.createCollection({\n collection_name: \"quick_setup\",\n dimension: 5,\n}); \n","# 3. List partitions\nres = client.list_partitions(collection_name=\"quick_setup\")\nprint(res)\n\n# Output\n#\n# [\"_default\"]\n","import io.milvus.v2.service.partition.request.ListPartitionsReq;\n\n// 3. List all partitions in the collection\nListPartitionsReq listPartitionsReq = ListPartitionsReq.builder()\n .collectionName(\"quick_setup\")\n .build();\n\nList partitionNames = client.listPartitions(listPartitionsReq);\n\nSystem.out.println(partitionNames);\n\n// Output:\n// [\"_default\"]\n","// 3. List partitions\nres = await client.listPartitions({\n collection_name: \"quick_setup\"\n})\n\nconsole.log(res.partition_names)\n\n// Output\n// \n// [ '_default' ]\n// \n","# 4. Create more partitions\nclient.create_partition(\n collection_name=\"quick_setup\",\n partition_name=\"partitionA\"\n)\n\nclient.create_partition(\n collection_name=\"quick_setup\",\n partition_name=\"partitionB\"\n)\n\nres = client.list_partitions(collection_name=\"quick_setup\")\nprint(res)\n\n# Output\n#\n# [\"_default\", \"partitionA\", \"partitionB\"]\n","import io.milvus.v2.service.partition.request.CreatePartitionReq;\n\n// 4. Create more partitions\nCreatePartitionReq createPartitionReq = CreatePartitionReq.builder()\n .collectionName(\"quick_setup\")\n .partitionName(\"partitionA\")\n .build();\n\nclient.createPartition(createPartitionReq);\n\ncreatePartitionReq = CreatePartitionReq.builder()\n .collectionName(\"quick_setup\")\n .partitionName(\"partitionB\")\n .build();\n\nclient.createPartition(createPartitionReq);\n\nlistPartitionsReq = ListPartitionsReq.builder()\n .collectionName(\"quick_setup\")\n .build();\n\npartitionNames = client.listPartitions(listPartitionsReq);\n\nSystem.out.println(partitionNames);\n\n// Output:\n// [\n// \"_default\",\n// \"partitionA\",\n// \"partitionB\"\n// ]\n","// 4. Create more partitions\nawait client.createPartition({\n collection_name: \"quick_setup\",\n partition_name: \"partitionA\"\n})\n\nawait client.createPartition({\n collection_name: \"quick_setup\",\n partition_name: \"partitionB\"\n})\n\nres = await client.listPartitions({\n collection_name: \"quick_setup\"\n})\n\nconsole.log(res.partition_names)\n\n// Output\n// \n// [ '_default', 'partitionA', 'partitionB' ]\n// \n","# 5. Check whether a partition exists\nres = client.has_partition(\n collection_name=\"quick_setup\",\n partition_name=\"partitionA\"\n)\nprint(res)\n\n# Output\n#\n# True\n\nres = client.has_partition(\n collection_name=\"quick_setup\",\n partition_name=\"partitionC\"\n)\nprint(res)\n\n# Output\n#\n# False\n","import io.milvus.v2.service.partition.request.HasPartitionReq;\n\n// 5. Check whether a partition exists\nHasPartitionReq hasPartitionReq = HasPartitionReq.builder()\n .collectionName(\"quick_setup\")\n .partitionName(\"partitionA\")\n .build();\n\nboolean exists = client.hasPartition(hasPartitionReq);\n\nSystem.out.println(exists);\n\n// Output:\n// true\n\nhasPartitionReq = HasPartitionReq.builder()\n .collectionName(\"quick_setup\")\n .partitionName(\"partitionC\")\n .build();\n\nexists = client.hasPartition(hasPartitionReq);\n\nSystem.out.println(exists);\n\n// Output:\n// false\n","// 5. Check whether a partition exists\nres = await client.hasPartition({\n collection_name: \"quick_setup\",\n partition_name: \"partitionA\"\n})\n\nconsole.log(res.value)\n\n// Output\n// \n// true\n// \n\nres = await client.hasPartition({\n collection_name: \"quick_setup\",\n partition_name: \"partitionC\"\n})\n\nconsole.log(res.value)\n\n// Output\n// \n// false\n// \n","# Release the collection\nclient.release_collection(collection_name=\"quick_setup\")\n\n# Check the load status\nres = client.get_load_state(collection_name=\"quick_setup\")\nprint(res)\n\n# Output\n#\n# {\n# \"state\": \"\"\n# }\n\nres = client.get_load_state(\n collection_name=\"quick_setup\", \n partition_name=\"partitionA\"\n)\n\nprint(res)\n\n# Output\n#\n# {\n# \"state\": \"\"\n# }\n\nres = client.get_load_state(\n collection_name=\"quick_setup\", \n partition_name=\"partitionB\"\n)\n\nprint(res)\n\n# Output\n#\n# {\n# \"state\": \"\"\n# }\n\n","import io.milvus.v2.service.collection.request.GetLoadStateReq;\nimport io.milvus.v2.service.collection.request.ReleaseCollectionReq;\nimport io.milvus.v2.service.partition.request.LoadPartitionsReq;\nimport io.milvus.v2.service.partition.request.ReleasePartitionsReq;\n\n// 6. Load a partition independantly\n// 6.1 Release the collection\nReleaseCollectionReq releaseCollectionReq = ReleaseCollectionReq.builder()\n .collectionName(\"quick_setup\")\n .build();\n\nclient.releaseCollection(releaseCollectionReq);\n\n// 6.2 Load partitionA\nLoadPartitionsReq loadPartitionsReq = LoadPartitionsReq.builder()\n .collectionName(\"quick_setup\")\n .partitionNames(List.of(\"partitionA\"))\n .build();\n\nclient.loadPartitions(loadPartitionsReq);\n\nThread.sleep(3000);\n\n// 6.3 Check the load status of the collection and its partitions\nGetLoadStateReq getLoadStateReq = GetLoadStateReq.builder()\n .collectionName(\"quick_setup\")\n .build();\n\nboolean state = client.getLoadState(getLoadStateReq);\n\nSystem.out.println(state);\n\n// Output:\n// true\n\ngetLoadStateReq = GetLoadStateReq.builder()\n .collectionName(\"quick_setup\")\n .partitionName(\"partitionA\")\n .build();\n\nstate = client.getLoadState(getLoadStateReq);\n\nSystem.out.println(state);\n\n// Output:\n// true\n\ngetLoadStateReq = GetLoadStateReq.builder()\n .collectionName(\"quick_setup\")\n .partitionName(\"partitionB\")\n .build();\n\nstate = client.getLoadState(getLoadStateReq);\n\nSystem.out.println(state);\n\n// Output:\n// false\n","// 6. Load a partition indenpendantly\nawait client.releaseCollection({\n collection_name: \"quick_setup\"\n})\n\nres = await client.getLoadState({\n collection_name: \"quick_setup\"\n})\n\nconsole.log(res.state)\n\n// Output\n// \n// LoadStateNotLoad\n// \n\nawait client.loadPartitions({\n collection_name: \"quick_setup\",\n partition_names: [\"partitionA\"]\n})\n\nawait sleep(3000)\n\nres = await client.getLoadState({\n collection_name: \"quick_setup\"\n})\n\nconsole.log(res.state)\n\n// Output\n// \n// LoadStateLoaded\n// \n\nres = await client.getLoadState({\n collection_name: \"quick_setup\",\n partition_name: \"partitionA\"\n})\n\nconsole.log(res.state)\n\n// Output\n// \n// LoadStateLoaded\n// \n\nres = await client.getLoadState({\n collection_name: \"quick_setup\",\n partition_name: \"partitionB\"\n})\n\nconsole.log(res.state)\n\n// Output\n// \n// LoadStateLoaded\n// \n","client.load_partitions(\n collection_name=\"quick_setup\",\n partition_names=[\"partitionA\"]\n)\n\nres = client.get_load_state(collection_name=\"quick_setup\")\nprint(res)\n\n# Output\n#\n# {\n# \"state\": \"\"\n# }\n","LoadPartitionsReq loadPartitionsReq = LoadPartitionsReq.builder()\n .collectionName(\"quick_setup\")\n .partitionNames(List.of(\"partitionA\"))\n .build();\n\nclient.loadPartitions(loadPartitionsReq);\n\ngetLoadStateReq = GetLoadStateReq.builder()\n .collectionName(\"quick_setup\")\n .partitionName(\"partitionA\")\n .build();\n\nstate = client.getLoadState(getLoadStateReq);\n\nSystem.out.println(state);\n\n// Output:\n// true\n","await client.loadPartitions({\n collection_name: \"quick_setup\",\n partition_names: [\"partitionA\"]\n})\n\nres = await client.getLoadState({\n collection_name: \"quick_setup\",\n partition_name: \"partitionA\"\n})\n\nconsole.log(res.state)\n\n// Output\n// \n// LoadStateLoaded\n//\n","client.load_partitions(\n collection_name=\"quick_setup\",\n partition_names=[\"partitionA\", \"partitionB\"]\n)\n\nres = client.get_load_status(\n collection_name=\"quick_setup\",\n partition_name=\"partitionA\"\n)\n\n# Output\n#\n# {\n# \"state\": \"\"\n# }\n\nres = client.get_load_status(\n collection_name=\"quick_setup\",\n partition_name=\"partitionB\"\n)\n\n# Output\n#\n# {\n# \"state\": \"\"\n# }\n","LoadPartitionsReq loadPartitionsReq = LoadPartitionsReq.builder()\n .collectionName(\"quick_setup\")\n .partitionNames(List.of(\"partitionA\", \"partitionB\"))\n .build();\n\nclient.loadPartitions(loadPartitionsReq);\n\ngetLoadStateReq = GetLoadStateReq.builder()\n .collectionName(\"quick_setup\")\n .partitionName(\"partitionA\")\n .build();\n\nstate = client.getLoadState(getLoadStateReq);\n\nSystem.out.println(state);\n\n// Output:\n// true\n\ngetLoadStateReq = GetLoadStateReq.builder()\n .collectionName(\"quick_setup\")\n .partitionName(\"partitionB\")\n .build();\n\nstate = client.getLoadState(getLoadStateReq);\n\nSystem.out.println(state);\n\n// Output:\n// true\n","await client.loadPartitions({\n collection_name: \"quick_setup\",\n partition_names: [\"partitionA\", \"partitionB\"]\n})\n\nres = await client.getLoadState({\n collection_name: \"quick_setup\",\n partition_name: \"partitionA\"\n})\n\nconsole.log(res)\n\n// Output\n// \n// LoadStateLoaded\n// \n\nres = await client.getLoadState({\n collection_name: \"quick_setup\",\n partition_name: \"partitionB\"\n})\n\nconsole.log(res)\n\n// Output\n// \n// LoadStateLoaded\n// \n","# 7. Release a partition\nclient.release_partitions(\n collection_name=\"quick_setup\",\n partition_names=[\"partitionA\"]\n)\n\nres = client.get_load_state(\n collection_name=\"quick_setup\", \n partition_name=\"partitionA\"\n)\n\nprint(res)\n\n# Output\n#\n# {\n# \"state\": \"\"\n# }\n\n","import io.milvus.v2.service.partition.request.ReleasePartitionsReq;\n\n// 7. Release a partition\nReleasePartitionsReq releasePartitionsReq = ReleasePartitionsReq.builder()\n .collectionName(\"quick_setup\")\n .partitionNames(List.of(\"partitionA\"))\n .build();\n\nclient.releasePartitions(releasePartitionsReq);\n\ngetLoadStateReq = GetLoadStateReq.builder()\n .collectionName(\"quick_setup\")\n .partitionName(\"partitionA\")\n .build();\n\nstate = client.getLoadState(getLoadStateReq);\n\nSystem.out.println(state);\n\n// Output:\n// false\n","// 7. Release a partition\nawait client.releasePartitions({\n collection_name: \"quick_setup\",\n partition_names: [\"partitionA\"]\n})\n\nres = await client.getLoadState({\n collection_name: \"quick_setup\"\n})\n\nconsole.log(res.state)\n\n// Output\n// \n// LoadStateNotLoad\n// \n","client.release_partitions(\n collection_name=\"quick_setup\",\n partition_names=[\"_default\", \"partitionA\", \"partitionB\"]\n)\n\nres = client.get_load_status(\n collection_name=\"quick_setup\",\n)\n\n# Output\n#\n# {\n# \"state\": \"\"\n# }\n","# 8. Drop a partition\nclient.drop_partition(\n collection_name=\"quick_setup\",\n partition_name=\"partitionB\"\n)\n\nres = client.list_partitions(collection_name=\"quick_setup\")\nprint(res)\n\n# Output\n#\n# [\"_default\", \"partitionA\"]\n","import io.milvus.v2.service.partition.request.ReleasePartitionsReq;\n\nReleasePartitionsReq releasePartitionsReq = ReleasePartitionsReq.builder()\n .collectionName(\"quick_setup\")\n .partitionNames(List.of(\"_default\", \"partitionA\", \"partitionB\"))\n .build();\n\nclient.releasePartitions(releasePartitionsReq);\n\ngetLoadStateReq = GetLoadStateReq.builder()\n .collectionName(\"quick_setup\")\n .build();\n\nstate = client.getLoadState(getLoadStateReq);\n\nSystem.out.println(state);\n\n// Output:\n// false\n","\nawait client.releasePartitions({\n collection_name: \"quick_setup\",\n partition_names: [\"_default\", \"partitionA\", \"partitionB\"]\n})\n\nres = await client.getLoadState({\n collection_name: \"quick_setup\"\n})\n\nconsole.log(res)\n\n// Output\n// \n// {\n// status: {\n// error_code: 'Success',\n// reason: '',\n// code: 0,\n// retriable: false,\n// detail: ''\n// },\n// state: 'LoadStateNotLoad'\n// }\n// \n"],"headingContent":"","anchorList":[{"label":"Gérer les partitions","href":"Manage-Partitions","type":1,"isActive":false},{"label":"Vue d'ensemble","href":"Overview","type":2,"isActive":false},{"label":"Préparations","href":"Preparations","type":2,"isActive":false},{"label":"Lister les partitions","href":"List-Partitions","type":2,"isActive":false},{"label":"Créer des partitions","href":"Create-Partitions","type":2,"isActive":false},{"label":"Vérification de l'existence d'une partition spécifique","href":"Check-for-a-Specific-Partition","type":2,"isActive":false},{"label":"Charger et libérer des partitions","href":"Load--Release-Partitions","type":2,"isActive":false},{"label":"Abandonner des partitions","href":"Drop-Partitions","type":2,"isActive":false},{"label":"FAQ","href":"FAQ","type":2,"isActive":false}]} \ No newline at end of file +{"codeList":["from pymilvus import MilvusClient, DataType\n\n# 1. Set up a Milvus client\nclient = MilvusClient(\n uri=\"http://localhost:19530\"\n)\n\n# 2. Create a collection\nclient.create_collection(\n collection_name=\"quick_setup\",\n dimension=5,\n)\n\n","import io.milvus.v2.client.ConnectConfig;\nimport io.milvus.v2.client.MilvusClientV2;\nimport io.milvus.v2.service.collection.request.CreateCollectionReq;\n\nString CLUSTER_ENDPOINT = \"http://localhost:19530\";\n\n// 1. Connect to Milvus server\nConnectConfig connectConfig = ConnectConfig.builder()\n .uri(CLUSTER_ENDPOINT)\n .build();\n\nMilvusClientV2 client = new MilvusClientV2(connectConfig);\n\n// 2. Create a collection in quick setup mode\nCreateCollectionReq quickSetupReq = CreateCollectionReq.builder()\n .collectionName(\"quick_setup\")\n .dimension(5)\n .build();\n\nclient.createCollection(quickSetupReq);\n","const address = \"http://localhost:19530\"\n\n// 1. Set up a Milvus Client\nclient = new MilvusClient({address});\n\n// 2. Create a collection in quick setup mode\nawait client.createCollection({\n collection_name: \"quick_setup\",\n dimension: 5,\n}); \n","# 3. List partitions\nres = client.list_partitions(collection_name=\"quick_setup\")\nprint(res)\n\n# Output\n#\n# [\"_default\"]\n","import io.milvus.v2.service.partition.request.ListPartitionsReq;\n\n// 3. List all partitions in the collection\nListPartitionsReq listPartitionsReq = ListPartitionsReq.builder()\n .collectionName(\"quick_setup\")\n .build();\n\nList partitionNames = client.listPartitions(listPartitionsReq);\n\nSystem.out.println(partitionNames);\n\n// Output:\n// [\"_default\"]\n","// 3. List partitions\nres = await client.listPartitions({\n collection_name: \"quick_setup\"\n})\n\nconsole.log(res.partition_names)\n\n// Output\n// \n// [ '_default' ]\n// \n","# 4. Create more partitions\nclient.create_partition(\n collection_name=\"quick_setup\",\n partition_name=\"partitionA\"\n)\n\nclient.create_partition(\n collection_name=\"quick_setup\",\n partition_name=\"partitionB\"\n)\n\nres = client.list_partitions(collection_name=\"quick_setup\")\nprint(res)\n\n# Output\n#\n# [\"_default\", \"partitionA\", \"partitionB\"]\n","import io.milvus.v2.service.partition.request.CreatePartitionReq;\n\n// 4. Create more partitions\nCreatePartitionReq createPartitionReq = CreatePartitionReq.builder()\n .collectionName(\"quick_setup\")\n .partitionName(\"partitionA\")\n .build();\n\nclient.createPartition(createPartitionReq);\n\ncreatePartitionReq = CreatePartitionReq.builder()\n .collectionName(\"quick_setup\")\n .partitionName(\"partitionB\")\n .build();\n\nclient.createPartition(createPartitionReq);\n\nlistPartitionsReq = ListPartitionsReq.builder()\n .collectionName(\"quick_setup\")\n .build();\n\npartitionNames = client.listPartitions(listPartitionsReq);\n\nSystem.out.println(partitionNames);\n\n// Output:\n// [\n// \"_default\",\n// \"partitionA\",\n// \"partitionB\"\n// ]\n","// 4. Create more partitions\nawait client.createPartition({\n collection_name: \"quick_setup\",\n partition_name: \"partitionA\"\n})\n\nawait client.createPartition({\n collection_name: \"quick_setup\",\n partition_name: \"partitionB\"\n})\n\nres = await client.listPartitions({\n collection_name: \"quick_setup\"\n})\n\nconsole.log(res.partition_names)\n\n// Output\n// \n// [ '_default', 'partitionA', 'partitionB' ]\n// \n","# 5. Check whether a partition exists\nres = client.has_partition(\n collection_name=\"quick_setup\",\n partition_name=\"partitionA\"\n)\nprint(res)\n\n# Output\n#\n# True\n\nres = client.has_partition(\n collection_name=\"quick_setup\",\n partition_name=\"partitionC\"\n)\nprint(res)\n\n# Output\n#\n# False\n","import io.milvus.v2.service.partition.request.HasPartitionReq;\n\n// 5. Check whether a partition exists\nHasPartitionReq hasPartitionReq = HasPartitionReq.builder()\n .collectionName(\"quick_setup\")\n .partitionName(\"partitionA\")\n .build();\n\nboolean exists = client.hasPartition(hasPartitionReq);\n\nSystem.out.println(exists);\n\n// Output:\n// true\n\nhasPartitionReq = HasPartitionReq.builder()\n .collectionName(\"quick_setup\")\n .partitionName(\"partitionC\")\n .build();\n\nexists = client.hasPartition(hasPartitionReq);\n\nSystem.out.println(exists);\n\n// Output:\n// false\n","// 5. Check whether a partition exists\nres = await client.hasPartition({\n collection_name: \"quick_setup\",\n partition_name: \"partitionA\"\n})\n\nconsole.log(res.value)\n\n// Output\n// \n// true\n// \n\nres = await client.hasPartition({\n collection_name: \"quick_setup\",\n partition_name: \"partitionC\"\n})\n\nconsole.log(res.value)\n\n// Output\n// \n// false\n// \n","# Release the collection\nclient.release_collection(collection_name=\"quick_setup\")\n\n# Check the load status\nres = client.get_load_state(collection_name=\"quick_setup\")\nprint(res)\n\n# Output\n#\n# {\n# \"state\": \"\"\n# }\n\nres = client.get_load_state(\n collection_name=\"quick_setup\", \n partition_name=\"partitionA\"\n)\n\nprint(res)\n\n# Output\n#\n# {\n# \"state\": \"\"\n# }\n\nres = client.get_load_state(\n collection_name=\"quick_setup\", \n partition_name=\"partitionB\"\n)\n\nprint(res)\n\n# Output\n#\n# {\n# \"state\": \"\"\n# }\n\n","import io.milvus.v2.service.collection.request.GetLoadStateReq;\nimport io.milvus.v2.service.collection.request.ReleaseCollectionReq;\nimport io.milvus.v2.service.partition.request.LoadPartitionsReq;\nimport io.milvus.v2.service.partition.request.ReleasePartitionsReq;\n\n// 6. Load a partition independantly\n// 6.1 Release the collection\nReleaseCollectionReq releaseCollectionReq = ReleaseCollectionReq.builder()\n .collectionName(\"quick_setup\")\n .build();\n\nclient.releaseCollection(releaseCollectionReq);\n\n// 6.2 Load partitionA\nLoadPartitionsReq loadPartitionsReq = LoadPartitionsReq.builder()\n .collectionName(\"quick_setup\")\n .partitionNames(List.of(\"partitionA\"))\n .build();\n\nclient.loadPartitions(loadPartitionsReq);\n\nThread.sleep(3000);\n\n// 6.3 Check the load status of the collection and its partitions\nGetLoadStateReq getLoadStateReq = GetLoadStateReq.builder()\n .collectionName(\"quick_setup\")\n .build();\n\nboolean state = client.getLoadState(getLoadStateReq);\n\nSystem.out.println(state);\n\n// Output:\n// true\n\ngetLoadStateReq = GetLoadStateReq.builder()\n .collectionName(\"quick_setup\")\n .partitionName(\"partitionA\")\n .build();\n\nstate = client.getLoadState(getLoadStateReq);\n\nSystem.out.println(state);\n\n// Output:\n// true\n\ngetLoadStateReq = GetLoadStateReq.builder()\n .collectionName(\"quick_setup\")\n .partitionName(\"partitionB\")\n .build();\n\nstate = client.getLoadState(getLoadStateReq);\n\nSystem.out.println(state);\n\n// Output:\n// false\n","// 6. Load a partition indenpendantly\nawait client.releaseCollection({\n collection_name: \"quick_setup\"\n})\n\nres = await client.getLoadState({\n collection_name: \"quick_setup\"\n})\n\nconsole.log(res.state)\n\n// Output\n// \n// LoadStateNotLoad\n// \n\nawait client.loadPartitions({\n collection_name: \"quick_setup\",\n partition_names: [\"partitionA\"]\n})\n\nawait sleep(3000)\n\nres = await client.getLoadState({\n collection_name: \"quick_setup\"\n})\n\nconsole.log(res.state)\n\n// Output\n// \n// LoadStateLoaded\n// \n\nres = await client.getLoadState({\n collection_name: \"quick_setup\",\n partition_name: \"partitionA\"\n})\n\nconsole.log(res.state)\n\n// Output\n// \n// LoadStateLoaded\n// \n\nres = await client.getLoadState({\n collection_name: \"quick_setup\",\n partition_name: \"partitionB\"\n})\n\nconsole.log(res.state)\n\n// Output\n// \n// LoadStateLoaded\n// \n","client.load_partitions(\n collection_name=\"quick_setup\",\n partition_names=[\"partitionA\"]\n)\n\nres = client.get_load_state(collection_name=\"quick_setup\")\nprint(res)\n\n# Output\n#\n# {\n# \"state\": \"\"\n# }\n","LoadPartitionsReq loadPartitionsReq = LoadPartitionsReq.builder()\n .collectionName(\"quick_setup\")\n .partitionNames(List.of(\"partitionA\"))\n .build();\n\nclient.loadPartitions(loadPartitionsReq);\n\ngetLoadStateReq = GetLoadStateReq.builder()\n .collectionName(\"quick_setup\")\n .partitionName(\"partitionA\")\n .build();\n\nstate = client.getLoadState(getLoadStateReq);\n\nSystem.out.println(state);\n\n// Output:\n// true\n","await client.loadPartitions({\n collection_name: \"quick_setup\",\n partition_names: [\"partitionA\"]\n})\n\nres = await client.getLoadState({\n collection_name: \"quick_setup\",\n partition_name: \"partitionA\"\n})\n\nconsole.log(res.state)\n\n// Output\n// \n// LoadStateLoaded\n//\n","client.load_partitions(\n collection_name=\"quick_setup\",\n partition_names=[\"partitionA\", \"partitionB\"]\n)\n\nres = client.get_load_status(\n collection_name=\"quick_setup\",\n partition_name=\"partitionA\"\n)\n\n# Output\n#\n# {\n# \"state\": \"\"\n# }\n\nres = client.get_load_status(\n collection_name=\"quick_setup\",\n partition_name=\"partitionB\"\n)\n\n# Output\n#\n# {\n# \"state\": \"\"\n# }\n","LoadPartitionsReq loadPartitionsReq = LoadPartitionsReq.builder()\n .collectionName(\"quick_setup\")\n .partitionNames(List.of(\"partitionA\", \"partitionB\"))\n .build();\n\nclient.loadPartitions(loadPartitionsReq);\n\ngetLoadStateReq = GetLoadStateReq.builder()\n .collectionName(\"quick_setup\")\n .partitionName(\"partitionA\")\n .build();\n\nstate = client.getLoadState(getLoadStateReq);\n\nSystem.out.println(state);\n\n// Output:\n// true\n\ngetLoadStateReq = GetLoadStateReq.builder()\n .collectionName(\"quick_setup\")\n .partitionName(\"partitionB\")\n .build();\n\nstate = client.getLoadState(getLoadStateReq);\n\nSystem.out.println(state);\n\n// Output:\n// true\n","await client.loadPartitions({\n collection_name: \"quick_setup\",\n partition_names: [\"partitionA\", \"partitionB\"]\n})\n\nres = await client.getLoadState({\n collection_name: \"quick_setup\",\n partition_name: \"partitionA\"\n})\n\nconsole.log(res)\n\n// Output\n// \n// LoadStateLoaded\n// \n\nres = await client.getLoadState({\n collection_name: \"quick_setup\",\n partition_name: \"partitionB\"\n})\n\nconsole.log(res)\n\n// Output\n// \n// LoadStateLoaded\n// \n","client.load_partitions(\n collection_name=\"quick_setup\",\n partition_names=[\"partitionA\"],\n load_fields=[\"id\", \"vector\"],\n skip_load_dynamic_field=True\n)\n","# 7. Release a partition\nclient.release_partitions(\n collection_name=\"quick_setup\",\n partition_names=[\"partitionA\"]\n)\n\nres = client.get_load_state(\n collection_name=\"quick_setup\", \n partition_name=\"partitionA\"\n)\n\nprint(res)\n\n# Output\n#\n# {\n# \"state\": \"\"\n# }\n\n","import io.milvus.v2.service.partition.request.ReleasePartitionsReq;\n\n// 7. Release a partition\nReleasePartitionsReq releasePartitionsReq = ReleasePartitionsReq.builder()\n .collectionName(\"quick_setup\")\n .partitionNames(List.of(\"partitionA\"))\n .build();\n\nclient.releasePartitions(releasePartitionsReq);\n\ngetLoadStateReq = GetLoadStateReq.builder()\n .collectionName(\"quick_setup\")\n .partitionName(\"partitionA\")\n .build();\n\nstate = client.getLoadState(getLoadStateReq);\n\nSystem.out.println(state);\n\n// Output:\n// false\n","// 7. Release a partition\nawait client.releasePartitions({\n collection_name: \"quick_setup\",\n partition_names: [\"partitionA\"]\n})\n\nres = await client.getLoadState({\n collection_name: \"quick_setup\"\n})\n\nconsole.log(res.state)\n\n// Output\n// \n// LoadStateNotLoad\n// \n","client.release_partitions(\n collection_name=\"quick_setup\",\n partition_names=[\"_default\", \"partitionA\", \"partitionB\"]\n)\n\nres = client.get_load_status(\n collection_name=\"quick_setup\",\n)\n\n# Output\n#\n# {\n# \"state\": \"\"\n# }\n","# 8. Drop a partition\nclient.drop_partition(\n collection_name=\"quick_setup\",\n partition_name=\"partitionB\"\n)\n\nres = client.list_partitions(collection_name=\"quick_setup\")\nprint(res)\n\n# Output\n#\n# [\"_default\", \"partitionA\"]\n","import io.milvus.v2.service.partition.request.ReleasePartitionsReq;\n\nReleasePartitionsReq releasePartitionsReq = ReleasePartitionsReq.builder()\n .collectionName(\"quick_setup\")\n .partitionNames(List.of(\"_default\", \"partitionA\", \"partitionB\"))\n .build();\n\nclient.releasePartitions(releasePartitionsReq);\n\ngetLoadStateReq = GetLoadStateReq.builder()\n .collectionName(\"quick_setup\")\n .build();\n\nstate = client.getLoadState(getLoadStateReq);\n\nSystem.out.println(state);\n\n// Output:\n// false\n","\nawait client.releasePartitions({\n collection_name: \"quick_setup\",\n partition_names: [\"_default\", \"partitionA\", \"partitionB\"]\n})\n\nres = await client.getLoadState({\n collection_name: \"quick_setup\"\n})\n\nconsole.log(res)\n\n// Output\n// \n// {\n// status: {\n// error_code: 'Success',\n// reason: '',\n// code: 0,\n// retriable: false,\n// detail: ''\n// },\n// state: 'LoadStateNotLoad'\n// }\n// \n"],"headingContent":"Manage Partitions","anchorList":[{"label":"Gérer les partitions","href":"Manage-Partitions","type":1,"isActive":false},{"label":"Vue d'ensemble","href":"Overview","type":2,"isActive":false},{"label":"Préparations","href":"Preparations","type":2,"isActive":false},{"label":"Lister les partitions","href":"List-Partitions","type":2,"isActive":false},{"label":"Créer des partitions","href":"Create-Partitions","type":2,"isActive":false},{"label":"Vérification de l'existence d'une partition spécifique","href":"Check-for-a-Specific-Partition","type":2,"isActive":false},{"label":"Charger et libérer des partitions","href":"Load--Release-Partitions","type":2,"isActive":false},{"label":"Abandonner des partitions","href":"Drop-Partitions","type":2,"isActive":false},{"label":"FAQ","href":"FAQ","type":2,"isActive":false}]} \ No newline at end of file diff --git a/localization/v2.4.x/site/fr/userGuide/manage-partitions.md b/localization/v2.4.x/site/fr/userGuide/manage-partitions.md index 6476c454f..3c1fbe2c9 100644 --- a/localization/v2.4.x/site/fr/userGuide/manage-partitions.md +++ b/localization/v2.4.x/site/fr/userGuide/manage-partitions.md @@ -1,7 +1,6 @@ --- id: manage-partitions.md title: Gérer les partitions -summary: '' ---

    Gérer les partitions

    Vous pouvez charger et libérer des partitions spécifiques afin de les rendre disponibles ou non pour les recherches et les requêtes.

    +

    Vous pouvez charger et libérer des partitions spécifiques pour les rendre disponibles ou non pour les recherches et les requêtes.

    Obtenir l'état de chargement

    Pour vérifier l'état de chargement d'une collection et de ses partitions, utilisez la commande get_load_state().

    @@ -647,7 +646,7 @@ res = await client.

    Pour charger plusieurs partitions à la fois, procédez comme suit :

    + Python Java Node .js
    client.load_partitions(
         collection_name="quick_setup",
         partition_names=["partitionA", "partitionB"]
    @@ -735,6 +734,16 @@ res = await client.// LoadStateLoaded
     // 
     
    +

    Pour charger des champs spécifiques dans une ou plusieurs partitions, procédez comme suit :

    +
    client.load_partitions(
    +    collection_name="quick_setup",
    +    partition_names=["partitionA"],
    +    load_fields=["id", "vector"],
    +    skip_load_dynamic_field=True
    +)
    +
    +

    Notez que seuls les champs répertoriés dans load_fields peuvent être utilisés comme conditions de filtrage et champs de sortie dans les recherches et les requêtes. Vous devez toujours inclure la clé primaire dans la liste. Les noms de champs exclus du chargement ne seront pas disponibles pour le filtrage ou la sortie.

    +

    Vous pouvez utiliser skip_load_dynamic_field=True pour ne pas charger le champ dynamique. Milvus traite le champ dynamique comme un champ unique, de sorte que toutes les clés du champ dynamique seront incluses ou exclues ensemble.

    Libération des partitions

    Pour libérer toutes les partitions d'une collection, il suffit d'appeler release_collection(). Pour libérer des partitions spécifiques d'une collection, utilisez release_partitions().

    @@ -930,9 +939,9 @@ res = await client.rootCoord.maxPartitionNum. Pour plus de détails, voir Configurations du système.

    +

    Par défaut, Milvus permet de créer un maximum de 1 024 partitions. Vous pouvez ajuster le nombre maximum de partitions en configurant rootCoord.maxPartitionNum. Pour plus de détails, voir Configurations du système.

  • Comment puis-je faire la différence entre les partitions et les clés de partition ?

    -

    Les partitions sont des unités de stockage physiques, tandis que les clés de partition sont des concepts logiques qui affectent automatiquement les données à des partitions spécifiques sur la base d'une colonne désignée.

    +

    Les partitions sont des unités de stockage physiques, tandis que les clés de partition sont des concepts logiques qui affectent automatiquement les données à des partitions spécifiques en fonction d'une colonne désignée.

    Par exemple, dans Milvus, si vous avez une collection dont la clé de partition est définie comme étant le champ color, le système affecte automatiquement les données aux partitions en fonction des valeurs hachées du champ color pour chaque entité. Ce processus automatisé libère l'utilisateur de la responsabilité de spécifier manuellement la partition lors de l'insertion ou de la recherche de données.

    En revanche, lorsque vous créez manuellement des partitions, vous devez affecter des données à chaque partition en fonction des critères de la clé de partition. Si vous avez une collection avec un champ color, vous devez affecter manuellement les entités dont la valeur color est red à partition A, et les entités dont la valeur color est blue à partition B. Cette gestion manuelle demande plus d'efforts.

    En résumé, les partitions et les clés de partition sont utilisées pour optimiser le calcul des données et améliorer l'efficacité des requêtes. Il est essentiel de reconnaître que l'activation d'une clé de partition signifie l'abandon du contrôle de la gestion manuelle de l'insertion et du chargement des données de partition, car ces processus sont entièrement automatisés et gérés par Milvus.

  • diff --git a/localization/v2.4.x/site/fr/userGuide/search-query-get/single-vector-search.json b/localization/v2.4.x/site/fr/userGuide/search-query-get/single-vector-search.json index 2cfd03b18..f5df4719c 100644 --- a/localization/v2.4.x/site/fr/userGuide/search-query-get/single-vector-search.json +++ b/localization/v2.4.x/site/fr/userGuide/search-query-get/single-vector-search.json @@ -1 +1 @@ -{"codeList":["# 1. Set up a Milvus client\nclient = MilvusClient(\n uri=CLUSTER_ENDPOINT,\n token=TOKEN \n)\n\n# 2. Create a collection\nclient.create_collection(\n collection_name=\"quick_setup\",\n dimension=5,\n metric_type=\"IP\"\n)\n\n# 3. Insert randomly generated vectors \ncolors = [\"green\", \"blue\", \"yellow\", \"red\", \"black\", \"white\", \"purple\", \"pink\", \"orange\", \"brown\", \"grey\"]\ndata = []\n\nfor i in range(1000):\n current_color = random.choice(colors)\n data.append({\n \"id\": i,\n \"vector\": [ random.uniform(-1, 1) for _ in range(5) ],\n \"color\": current_color,\n \"color_tag\": f\"{current_color}_{str(random.randint(1000, 9999))}\"\n })\n\nres = client.insert(\n collection_name=\"quick_setup\",\n data=data\n)\n\nprint(res)\n\n# Output\n#\n# {\n# \"insert_count\": 1000,\n# \"ids\": [\n# 0,\n# 1,\n# 2,\n# 3,\n# 4,\n# 5,\n# 6,\n# 7,\n# 8,\n# 9,\n# \"(990 more items hidden)\"\n# ]\n# }\n\n# 6.1 Create partitions \nclient.create_partition(\n collection_name=\"quick_setup\",\n partition_name=\"red\"\n)\n\nclient.create_partition(\n collection_name=\"quick_setup\",\n partition_name=\"blue\"\n)\n\n# 6.1 Insert data into partitions\nred_data = [ {\"id\": i, \"vector\": [ random.uniform(-1, 1) for _ in range(5) ], \"color\": \"red\", \"color_tag\": f\"red_{str(random.randint(1000, 9999))}\" } for i in range(500) ]\nblue_data = [ {\"id\": i, \"vector\": [ random.uniform(-1, 1) for _ in range(5) ], \"color\": \"blue\", \"color_tag\": f\"blue_{str(random.randint(1000, 9999))}\" } for i in range(500) ]\n\nres = client.insert(\n collection_name=\"quick_setup\",\n data=red_data,\n partition_name=\"red\"\n)\n\nprint(res)\n\n# Output\n#\n# {\n# \"insert_count\": 500,\n# \"ids\": [\n# 0,\n# 1,\n# 2,\n# 3,\n# 4,\n# 5,\n# 6,\n# 7,\n# 8,\n# 9,\n# \"(490 more items hidden)\"\n# ]\n# }\n\nres = client.insert(\n collection_name=\"quick_setup\",\n data=blue_data,\n partition_name=\"blue\"\n)\n\nprint(res)\n\n# Output\n#\n# {\n# \"insert_count\": 500,\n# \"ids\": [\n# 0,\n# 1,\n# 2,\n# 3,\n# 4,\n# 5,\n# 6,\n# 7,\n# 8,\n# 9,\n# \"(490 more items hidden)\"\n# ]\n# }\n","import java.util.ArrayList;\nimport java.util.Arrays;\nimport java.util.List;\nimport java.util.Map;\nimport java.util.Random;\n\nimport com.alibaba.fastjson.JSONObject;\n\nimport io.milvus.v2.client.ConnectConfig;\nimport io.milvus.v2.client.MilvusClientV2;\nimport io.milvus.v2.service.collection.request.CreateCollectionReq;\nimport io.milvus.v2.service.collection.request.GetLoadStateReq;\nimport io.milvus.v2.service.vector.request.InsertReq;\nimport io.milvus.v2.service.vector.response.InsertResp; \n\nString CLUSTER_ENDPOINT = \"http://localhost:19530\";\n\n// 1. Connect to Milvus server\nConnectConfig connectConfig = ConnectConfig.builder()\n .uri(CLUSTER_ENDPOINT)\n .build();\n\nMilvusClientV2 client = new MilvusClientV2(connectConfig); \n\n// 2. Create a collection in quick setup mode\nCreateCollectionReq quickSetupReq = CreateCollectionReq.builder()\n .collectionName(\"quick_setup\")\n .dimension(5)\n .metricType(\"IP\")\n .build();\n\nclient.createCollection(quickSetupReq);\n\nGetLoadStateReq loadStateReq = GetLoadStateReq.builder()\n .collectionName(\"quick_setup\")\n .build();\n\nboolean state = client.getLoadState(loadStateReq);\n\nSystem.out.println(state);\n\n// Output:\n// true\n\n// 3. Insert randomly generated vectors into the collection\nList colors = Arrays.asList(\"green\", \"blue\", \"yellow\", \"red\", \"black\", \"white\", \"purple\", \"pink\", \"orange\", \"brown\", \"grey\");\nList data = new ArrayList<>();\n\nfor (int i=0; i<1000; i++) {\n Random rand = new Random();\n String current_color = colors.get(rand.nextInt(colors.size()-1));\n JSONObject row = new JSONObject();\n row.put(\"id\", Long.valueOf(i));\n row.put(\"vector\", Arrays.asList(rand.nextFloat(), rand.nextFloat(), rand.nextFloat(), rand.nextFloat(), rand.nextFloat()));\n row.put(\"color_tag\", current_color + \"_\" + String.valueOf(rand.nextInt(8999) + 1000));\n data.add(row);\n}\n\nInsertReq insertReq = InsertReq.builder()\n .collectionName(\"quick_setup\")\n .data(data)\n .build();\n\nInsertResp insertResp = client.insert(insertReq);\n\nSystem.out.println(JSONObject.toJSON(insertResp));\n\n// Output:\n// {\"insertCnt\": 1000}\n\n// 6.1. Create a partition\nCreatePartitionReq partitionReq = CreatePartitionReq.builder()\n .collectionName(\"quick_setup\")\n .partitionName(\"red\")\n .build();\n\nclient.createPartition(partitionReq);\n\npartitionReq = CreatePartitionReq.builder()\n .collectionName(\"quick_setup\")\n .partitionName(\"blue\")\n .build();\n\nclient.createPartition(partitionReq);\n\n// 6.2 Insert data into the partition\ndata = new ArrayList<>();\n\nfor (int i=1000; i<1500; i++) {\n Random rand = new Random();\n String current_color = \"red\";\n JSONObject row = new JSONObject();\n row.put(\"id\", Long.valueOf(i));\n row.put(\"vector\", Arrays.asList(rand.nextFloat(), rand.nextFloat(), rand.nextFloat(), rand.nextFloat(), rand.nextFloat()));\n row.put(\"color\", current_color);\n row.put(\"color_tag\", current_color + \"_\" + String.valueOf(rand.nextInt(8999) + 1000));\n data.add(row);\n} \n\ninsertReq = InsertReq.builder()\n .collectionName(\"quick_setup\")\n .data(data)\n .partitionName(\"red\")\n .build();\n\ninsertResp = client.insert(insertReq);\n\nSystem.out.println(JSONObject.toJSON(insertResp));\n\n// Output:\n// {\"insertCnt\": 500}\n\ndata = new ArrayList<>();\n\nfor (int i=1500; i<2000; i++) {\n Random rand = new Random();\n String current_color = \"blue\";\n JSONObject row = new JSONObject();\n row.put(\"id\", Long.valueOf(i));\n row.put(\"vector\", Arrays.asList(rand.nextFloat(), rand.nextFloat(), rand.nextFloat(), rand.nextFloat(), rand.nextFloat()));\n row.put(\"color\", current_color);\n row.put(\"color_tag\", current_color + \"_\" + String.valueOf(rand.nextInt(8999) + 1000));\n data.add(row);\n}\n\ninsertReq = InsertReq.builder()\n .collectionName(\"quick_setup\")\n .data(data)\n .partitionName(\"blue\")\n .build();\n\ninsertResp = client.insert(insertReq);\n\nSystem.out.println(JSONObject.toJSON(insertResp));\n\n// Output:\n// {\"insertCnt\": 500}\n","const { MilvusClient, DataType, sleep } = require(\"@zilliz/milvus2-sdk-node\")\n\nconst address = \"http://localhost:19530\"\n\n// 1. Set up a Milvus Client\nclient = new MilvusClient({address});\n\n// 2. Create a collection in quick setup mode\nawait client.createCollection({\n collection_name: \"quick_setup\",\n dimension: 5,\n metric_type: \"IP\"\n}); \n\n// 3. Insert randomly generated vectors\nconst colors = [\"green\", \"blue\", \"yellow\", \"red\", \"black\", \"white\", \"purple\", \"pink\", \"orange\", \"brown\", \"grey\"]\ndata = []\n\nfor (let i = 0; i < 1000; i++) {\n current_color = colors[Math.floor(Math.random() * colors.length)]\n data.push({\n id: i,\n vector: [Math.random(), Math.random(), Math.random(), Math.random(), Math.random()],\n color: current_color,\n color_tag: `${current_color}_${Math.floor(Math.random() * 8999) + 1000}`\n })\n}\n\nvar res = await client.insert({\n collection_name: \"quick_setup\",\n data: data\n})\n\nconsole.log(res.insert_cnt)\n\n// Output\n// \n// 1000\n// \n\nawait client.createPartition({\n collection_name: \"quick_setup\",\n partition_name: \"red\"\n})\n\nawait client.createPartition({\n collection_name: \"quick_setup\",\n partition_name: \"blue\"\n})\n\n// 6.1 Insert data into partitions\nvar red_data = []\nvar blue_data = []\n\nfor (let i = 1000; i < 1500; i++) {\n red_data.push({\n id: i,\n vector: [Math.random(), Math.random(), Math.random(), Math.random(), Math.random()],\n color: \"red\",\n color_tag: `red_${Math.floor(Math.random() * 8999) + 1000}`\n })\n}\n\nfor (let i = 1500; i < 2000; i++) {\n blue_data.push({\n id: i,\n vector: [Math.random(), Math.random(), Math.random(), Math.random(), Math.random()],\n color: \"blue\",\n color_tag: `blue_${Math.floor(Math.random() * 8999) + 1000}`\n })\n}\n\nres = await client.insert({\n collection_name: \"quick_setup\",\n data: red_data,\n partition_name: \"red\"\n})\n\nconsole.log(res.insert_cnt)\n\n// Output\n// \n// 500\n// \n\nres = await client.insert({\n collection_name: \"quick_setup\",\n data: blue_data,\n partition_name: \"blue\"\n})\n\nconsole.log(res.insert_cnt)\n\n// Output\n// \n// 500\n// \n","# Single vector search\nres = client.search(\n collection_name=\"test_collection\", # Replace with the actual name of your collection\n # Replace with your query vector\n data=[[0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592]],\n limit=5, # Max. number of search results to return\n search_params={\"metric_type\": \"IP\", \"params\": {}} # Search parameters\n)\n\n# Convert the output to a formatted JSON string\nresult = json.dumps(res, indent=4)\nprint(result)\n","// 4. Single vector search\nList> query_vectors = Arrays.asList(Arrays.asList(0.3580376395471989f, -0.6023495712049978f, 0.18414012509913835f, -0.26286205330961354f, 0.9029438446296592f));\n\nSearchReq searchReq = SearchReq.builder()\n .collectionName(\"quick_setup\")\n .data(query_vectors)\n .topK(3) // The number of results to return\n .build();\n\nSearchResp searchResp = client.search(searchReq);\n\nSystem.out.println(JSONObject.toJSON(searchResp));\n","// 4. Single vector search\nvar query_vector = [0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592],\n\nres = await client.search({\n collection_name: \"quick_setup\",\n data: [query_vector],\n limit: 3, // The number of results to return\n})\n\nconsole.log(res.results)\n","[\n [\n {\n \"id\": 0,\n \"distance\": 1.4093276262283325,\n \"entity\": {}\n },\n {\n \"id\": 4,\n \"distance\": 0.9902134537696838,\n \"entity\": {}\n },\n {\n \"id\": 1,\n \"distance\": 0.8519943356513977,\n \"entity\": {}\n },\n {\n \"id\": 5,\n \"distance\": 0.7972343564033508,\n \"entity\": {}\n },\n {\n \"id\": 2,\n \"distance\": 0.5928734540939331,\n \"entity\": {}\n }\n ]\n]\n","{\"searchResults\": [[\n {\n \"score\": 1.263043,\n \"fields\": {\n \"vector\": [\n 0.9533119,\n 0.02538395,\n 0.76714665,\n 0.35481733,\n 0.9845762\n ],\n \"id\": 740\n }\n },\n {\n \"score\": 1.2377806,\n \"fields\": {\n \"vector\": [\n 0.7411156,\n 0.08687937,\n 0.8254139,\n 0.08370924,\n 0.99095553\n ],\n \"id\": 640\n }\n },\n {\n \"score\": 1.1869997,\n \"fields\": {\n \"vector\": [\n 0.87928146,\n 0.05324632,\n 0.6312755,\n 0.28005534,\n 0.9542448\n ],\n \"id\": 455\n }\n }\n]]}\n","[\n { score: 1.7463608980178833, id: '854' },\n { score: 1.744946002960205, id: '425' },\n { score: 1.7258622646331787, id: '718' }\n]\n","# Bulk-vector search\nres = client.search(\n collection_name=\"test_collection\", # Replace with the actual name of your collection\n data=[\n [0.19886812562848388, 0.06023560599112088, 0.6976963061752597, 0.2614474506242501, 0.838729485096104],\n [0.3172005263489739, 0.9719044792798428, -0.36981146090600725, -0.4860894583077995, 0.95791889146345]\n ], # Replace with your query vectors\n limit=2, # Max. number of search results to return\n search_params={\"metric_type\": \"IP\", \"params\": {}} # Search parameters\n)\n\nresult = json.dumps(res, indent=4)\nprint(result)\n","// 5. Batch vector search\nquery_vectors = Arrays.asList(\n Arrays.asList(0.3580376395471989f, -0.6023495712049978f, 0.18414012509913835f, -0.26286205330961354f, 0.9029438446296592f),\n Arrays.asList(0.19886812562848388f, 0.06023560599112088f, 0.6976963061752597f, 0.2614474506242501f, 0.838729485096104f)\n);\n\nsearchReq = SearchReq.builder()\n .collectionName(\"quick_setup\")\n .data(query_vectors)\n .topK(2)\n .build();\n\nsearchResp = client.search(searchReq);\n\nSystem.out.println(JSONObject.toJSON(searchResp));\n","// 5. Batch vector search\nvar query_vectors = [\n [0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592],\n [0.19886812562848388, 0.06023560599112088, 0.6976963061752597, 0.2614474506242501, 0.838729485096104]\n]\n\nres = await client.search({\n collection_name: \"quick_setup\",\n data: query_vectors,\n limit: 2,\n})\n\nconsole.log(res.results)\n","[\n [\n {\n \"id\": 1,\n \"distance\": 1.3017789125442505,\n \"entity\": {}\n },\n {\n \"id\": 7,\n \"distance\": 1.2419954538345337,\n \"entity\": {}\n }\n ], # Result set 1\n [\n {\n \"id\": 3,\n \"distance\": 2.3358664512634277,\n \"entity\": {}\n },\n {\n \"id\": 8,\n \"distance\": 0.5642921924591064,\n \"entity\": {}\n }\n ] # Result set 2\n]\n","// Two sets of vectors are returned as expected\n\n{\"searchResults\": [\n [\n {\n \"score\": 1.263043,\n \"fields\": {\n \"vector\": [\n 0.9533119,\n 0.02538395,\n 0.76714665,\n 0.35481733,\n 0.9845762\n ],\n \"id\": 740\n }\n },\n {\n \"score\": 1.2377806,\n \"fields\": {\n \"vector\": [\n 0.7411156,\n 0.08687937,\n 0.8254139,\n 0.08370924,\n 0.99095553\n ],\n \"id\": 640\n }\n }\n ],\n [\n {\n \"score\": 1.8654699,\n \"fields\": {\n \"vector\": [\n 0.4671427,\n 0.8378432,\n 0.98844475,\n 0.82763994,\n 0.9729997\n ],\n \"id\": 638\n }\n },\n {\n \"score\": 1.8581753,\n \"fields\": {\n \"vector\": [\n 0.735541,\n 0.60140246,\n 0.86730254,\n 0.93152493,\n 0.98603314\n ],\n \"id\": 855\n }\n }\n ]\n]}\n","[\n [\n { score: 2.3590476512908936, id: '854' },\n { score: 2.2896690368652344, id: '59' }\n [\n { score: 2.664059638977051, id: '59' },\n { score: 2.59483003616333, id: '854' }\n ]\n]\n","# 6.2 Search within a partition\nquery_vector = [0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592]\n\nres = client.search(\n collection_name=\"quick_setup\",\n data=[query_vector],\n limit=5,\n search_params={\"metric_type\": \"IP\", \"params\": {\"level\": 1}},\n partition_names=[\"red\"]\n)\n\nprint(res)\n","// 6.3 Search within partitions\nquery_vectors = Arrays.asList(Arrays.asList(0.3580376395471989f, -0.6023495712049978f, 0.18414012509913835f, -0.26286205330961354f, 0.9029438446296592f));\n\nsearchReq = SearchReq.builder()\n .collectionName(\"quick_setup\")\n .data(query_vectors)\n .partitionNames(Arrays.asList(\"red\"))\n .topK(5)\n .build();\n\nsearchResp = client.search(searchReq);\n\nSystem.out.println(JSONObject.toJSON(searchResp));\n","// 6.2 Search within partitions\nquery_vector = [0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592]\n\nres = await client.search({\n collection_name: \"quick_setup\",\n data: [query_vector],\n partition_names: [\"red\"],\n limit: 5,\n})\n\nconsole.log(res.results)\n","[\n [\n {\n \"id\": 16,\n \"distance\": 0.9200337529182434,\n \"entity\": {}\n },\n {\n \"id\": 14,\n \"distance\": 0.4505271911621094,\n \"entity\": {}\n },\n {\n \"id\": 15,\n \"distance\": 0.19924677908420563,\n \"entity\": {}\n },\n {\n \"id\": 17,\n \"distance\": 0.0075093843042850494,\n \"entity\": {}\n },\n {\n \"id\": 13,\n \"distance\": -0.14609718322753906,\n \"entity\": {}\n }\n ]\n]\n","{\"searchResults\": [\n [\n {\n \"score\": 1.1677284,\n \"fields\": {\n \"vector\": [\n 0.9986977,\n 0.17964739,\n 0.49086612,\n 0.23155272,\n 0.98438674\n ],\n \"id\": 1435\n }\n },\n {\n \"score\": 1.1476475,\n \"fields\": {\n \"vector\": [\n 0.6952647,\n 0.13417172,\n 0.91045254,\n 0.119336545,\n 0.9338931\n ],\n \"id\": 1291\n }\n },\n {\n \"score\": 1.0969629,\n \"fields\": {\n \"vector\": [\n 0.3363194,\n 0.028906643,\n 0.6675426,\n 0.030419827,\n 0.9735209\n ],\n \"id\": 1168\n }\n },\n {\n \"score\": 1.0741848,\n \"fields\": {\n \"vector\": [\n 0.9980543,\n 0.36063594,\n 0.66427994,\n 0.17359233,\n 0.94954175\n ],\n \"id\": 1164\n }\n },\n {\n \"score\": 1.0584627,\n \"fields\": {\n \"vector\": [\n 0.7187005,\n 0.12674773,\n 0.987718,\n 0.3110777,\n 0.86093885\n ],\n \"id\": 1085\n }\n }\n ],\n [\n {\n \"score\": 1.8030131,\n \"fields\": {\n \"vector\": [\n 0.59726167,\n 0.7054632,\n 0.9573117,\n 0.94529945,\n 0.8664103\n ],\n \"id\": 1203\n }\n },\n {\n \"score\": 1.7728865,\n \"fields\": {\n \"vector\": [\n 0.6672442,\n 0.60448086,\n 0.9325822,\n 0.80272985,\n 0.8861626\n ],\n \"id\": 1448\n }\n },\n {\n \"score\": 1.7536311,\n \"fields\": {\n \"vector\": [\n 0.59663296,\n 0.77831805,\n 0.8578314,\n 0.88818026,\n 0.9030075\n ],\n \"id\": 1010\n }\n },\n {\n \"score\": 1.7520742,\n \"fields\": {\n \"vector\": [\n 0.854198,\n 0.72294194,\n 0.9245805,\n 0.86126596,\n 0.7969224\n ],\n \"id\": 1219\n }\n },\n {\n \"score\": 1.7452049,\n \"fields\": {\n \"vector\": [\n 0.96419,\n 0.943535,\n 0.87611496,\n 0.8268136,\n 0.79786557\n ],\n \"id\": 1149\n }\n }\n ]\n]}\n","[\n { score: 3.0258803367614746, id: '1201' },\n { score: 3.004319190979004, id: '1458' },\n { score: 2.880324363708496, id: '1187' },\n { score: 2.8246407508850098, id: '1347' },\n { score: 2.797295093536377, id: '1406' }\n]\n","res = client.search(\n collection_name=\"quick_setup\",\n data=[query_vector],\n limit=5,\n search_params={\"metric_type\": \"IP\", \"params\": {\"level\": 1}},\n partition_names=[\"blue\"]\n)\n\nprint(res)\n","searchReq = SearchReq.builder()\n .collectionName(\"quick_setup\")\n .data(query_vectors)\n .partitionNames(Arrays.asList(\"blue\"))\n .topK(5)\n .build();\n\nsearchResp = client.search(searchReq);\n\nSystem.out.println(JSONObject.toJSON(searchResp));\n","res = await client.search({\n collection_name: \"quick_setup\",\n data: [query_vector],\n partition_names: [\"blue\"],\n limit: 5,\n})\n\nconsole.log(res.results)\n","[\n [\n {\n \"id\": 20,\n \"distance\": 2.363696813583374,\n \"entity\": {}\n },\n {\n \"id\": 26,\n \"distance\": 1.0665391683578491,\n \"entity\": {}\n },\n {\n \"id\": 23,\n \"distance\": 1.066049575805664,\n \"entity\": {}\n },\n {\n \"id\": 29,\n \"distance\": 0.8353596925735474,\n \"entity\": {}\n },\n {\n \"id\": 28,\n \"distance\": 0.7484277486801147,\n \"entity\": {}\n }\n ]\n]\n","{\"searchResults\": [\n [\n {\n \"score\": 1.1628494,\n \"fields\": {\n \"vector\": [\n 0.7442872,\n 0.046407282,\n 0.71031404,\n 0.3544345,\n 0.9819991\n ],\n \"id\": 1992\n }\n },\n {\n \"score\": 1.1470042,\n \"fields\": {\n \"vector\": [\n 0.5505825,\n 0.04367262,\n 0.9985836,\n 0.18922359,\n 0.93255126\n ],\n \"id\": 1977\n }\n },\n {\n \"score\": 1.1450152,\n \"fields\": {\n \"vector\": [\n 0.89994013,\n 0.052991092,\n 0.8645576,\n 0.6406729,\n 0.95679337\n ],\n \"id\": 1573\n }\n },\n {\n \"score\": 1.1439825,\n \"fields\": {\n \"vector\": [\n 0.9253267,\n 0.15890503,\n 0.7999555,\n 0.19126713,\n 0.898583\n ],\n \"id\": 1552\n }\n },\n {\n \"score\": 1.1029172,\n \"fields\": {\n \"vector\": [\n 0.95661926,\n 0.18777144,\n 0.38115507,\n 0.14323527,\n 0.93137646\n ],\n \"id\": 1823\n }\n }\n ],\n [\n {\n \"score\": 1.8005109,\n \"fields\": {\n \"vector\": [\n 0.5953582,\n 0.7794224,\n 0.9388869,\n 0.79825854,\n 0.9197286\n ],\n \"id\": 1888\n }\n },\n {\n \"score\": 1.7714822,\n \"fields\": {\n \"vector\": [\n 0.56805456,\n 0.89422905,\n 0.88187534,\n 0.914824,\n 0.8944365\n ],\n \"id\": 1648\n }\n },\n {\n \"score\": 1.7561421,\n \"fields\": {\n \"vector\": [\n 0.83421993,\n 0.39865613,\n 0.92319834,\n 0.42695504,\n 0.96633124\n ],\n \"id\": 1688\n }\n },\n {\n \"score\": 1.7553532,\n \"fields\": {\n \"vector\": [\n 0.89994013,\n 0.052991092,\n 0.8645576,\n 0.6406729,\n 0.95679337\n ],\n \"id\": 1573\n }\n },\n {\n \"score\": 1.7543385,\n \"fields\": {\n \"vector\": [\n 0.16542226,\n 0.38248396,\n 0.9888778,\n 0.80913955,\n 0.9501492\n ],\n \"id\": 1544\n }\n }\n ]\n]}\n","[\n { score: 2.8421106338500977, id: '1745' },\n { score: 2.838560104370117, id: '1782' },\n { score: 2.8134000301361084, id: '1511' },\n { score: 2.718268871307373, id: '1679' },\n { score: 2.7014894485473633, id: '1597' }\n]\n","# Search with output fields\nres = client.search(\n collection_name=\"test_collection\", # Replace with the actual name of your collection\n data=[[0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592]],\n limit=5, # Max. number of search results to return\n search_params={\"metric_type\": \"IP\", \"params\": {}}, # Search parameters\n output_fields=[\"color\"] # Output fields to return\n)\n\nresult = json.dumps(res, indent=4)\nprint(result)\n","// 7. Search with output fields\nquery_vectors = Arrays.asList(Arrays.asList(0.3580376395471989f, -0.6023495712049978f, 0.18414012509913835f, -0.26286205330961354f, 0.9029438446296592f));\n\nsearchReq = SearchReq.builder()\n .collectionName(\"quick_setup\")\n .data(query_vectors)\n .outputFields(Arrays.asList(\"color\"))\n .topK(5)\n .build();\n\nsearchResp = client.search(searchReq);\n\nSystem.out.println(JSONObject.toJSON(searchResp));\n","// 7. Search with output fields\nquery_vector = [0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592]\n\nres = await client.search({\n collection_name: \"quick_setup\",\n data: [query_vector],\n limit: 5,\n output_fields: [\"color\"],\n})\n\nconsole.log(res.results)\n","[\n [\n {\n \"id\": 0,\n \"distance\": 1.4093276262283325,\n \"entity\": {\n \"color\": \"pink_8682\"\n }\n },\n {\n \"id\": 16,\n \"distance\": 1.0159327983856201,\n \"entity\": {\n \"color\": \"yellow_1496\"\n }\n },\n {\n \"id\": 4,\n \"distance\": 0.9902134537696838,\n \"entity\": {\n \"color\": \"red_4794\"\n }\n },\n {\n \"id\": 14,\n \"distance\": 0.9803846478462219,\n \"entity\": {\n \"color\": \"green_2899\"\n }\n },\n {\n \"id\": 1,\n \"distance\": 0.8519943356513977,\n \"entity\": {\n \"color\": \"red_7025\"\n }\n }\n ]\n]\n","{\"searchResults\": [\n [\n {\n \"score\": 1.263043,\n \"fields\": {}\n },\n {\n \"score\": 1.2377806,\n \"fields\": {}\n },\n {\n \"score\": 1.1869997,\n \"fields\": {}\n },\n {\n \"score\": 1.1748955,\n \"fields\": {}\n },\n {\n \"score\": 1.1720343,\n \"fields\": {}\n }\n ]\n]}\n","\n[\n { score: 3.036271572113037, id: '59', color: 'orange' },\n { score: 3.0267879962921143, id: '1745', color: 'blue' },\n { score: 3.0069446563720703, id: '854', color: 'black' },\n { score: 2.984386682510376, id: '718', color: 'black' },\n { score: 2.916019916534424, id: '425', color: 'purple' }\n]\n","# Search with filter\nres = client.search(\n collection_name=\"test_collection\", # Replace with the actual name of your collection\n data=[[0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592]],\n limit=5, # Max. number of search results to return\n search_params={\"metric_type\": \"IP\", \"params\": {}}, # Search parameters\n output_fields=[\"color\"], # Output fields to return\n filter='color like \"red%\"'\n)\n\nresult = json.dumps(res, indent=4)\nprint(result)\n","// 8. Filtered search\nquery_vectors = Arrays.asList(Arrays.asList(0.3580376395471989f, -0.6023495712049978f, 0.18414012509913835f, -0.26286205330961354f, 0.9029438446296592f));\n\nsearchReq = SearchReq.builder()\n .collectionName(\"quick_setup\")\n .data(query_vectors)\n .outputFields(Arrays.asList(\"color_tag\"))\n .filter(\"color_tag like \\\"red%\\\"\")\n .topK(5)\n .build();\n\nsearchResp = client.search(searchReq);\n\nSystem.out.println(JSONObject.toJSON(searchResp));\n","// 8. Filtered search\n// 8.1 Filter with \"like\" operator and prefix wildcard\nquery_vector = [0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592]\n\nres = await client.search({\n collection_name: \"quick_setup\",\n data: [query_vector],\n limit: 5,\n filters: \"color_tag like \\\"red%\\\"\",\n output_fields: [\"color_tag\"]\n})\n\nconsole.log(res.results)\n","[\n [\n {\n \"id\": 4,\n \"distance\": 0.9902134537696838,\n \"entity\": {\n \"color\": \"red_4794\"\n }\n },\n {\n \"id\": 1,\n \"distance\": 0.8519943356513977,\n \"entity\": {\n \"color\": \"red_7025\"\n }\n },\n {\n \"id\": 6,\n \"distance\": -0.4113418459892273,\n \"entity\": {\n \"color\": \"red_9392\"\n }\n }\n ]\n]\n","{\"searchResults\": [\n [\n {\n \"score\": 1.1869997,\n \"fields\": {\"color_tag\": \"red_3026\"}\n },\n {\n \"score\": 1.1677284,\n \"fields\": {\"color_tag\": \"red_9030\"}\n },\n {\n \"score\": 1.1476475,\n \"fields\": {\"color_tag\": \"red_3744\"}\n },\n {\n \"score\": 1.0969629,\n \"fields\": {\"color_tag\": \"red_4168\"}\n },\n {\n \"score\": 1.0741848,\n \"fields\": {\"color_tag\": \"red_9678\"}\n }\n ]\n]}\n","[\n { score: 2.5080761909484863, id: '1201', color_tag: 'red_8904' },\n { score: 2.491129159927368, id: '425', color_tag: 'purple_8212' },\n { score: 2.4889798164367676, id: '1458', color_tag: 'red_6891' },\n { score: 2.42964243888855, id: '724', color_tag: 'black_9885' },\n { score: 2.4004223346710205, id: '854', color_tag: 'black_5990' }\n]\n","# Infix match on color field\nres = client.search(\n collection_name=\"test_collection\", # Replace with the actual name of your collection\n data=[[0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592]],\n limit=5, # Max. number of search results to return\n search_params={\"metric_type\": \"IP\", \"params\": {}}, # Search parameters\n output_fields=[\"color\"], # Output fields to return\n filter='color like \"%ll%\"' # Filter on color field, infix match on \"ll\"\n)\n\nresult = json.dumps(res, indent=4)\nprint(result)\n","// 8. Filtered search\nquery_vectors = Arrays.asList(Arrays.asList(0.3580376395471989f, -0.6023495712049978f, 0.18414012509913835f, -0.26286205330961354f, 0.9029438446296592f));\n\nsearchReq = SearchReq.builder()\n .collectionName(\"quick_setup\")\n .data(query_vectors)\n .outputFields(Arrays.asList(\"color_tag\"))\n .filter(\"color like \\\"%ll%\\\"\")\n .topK(5)\n .build();\n\nsearchResp = client.search(searchReq);\n\nSystem.out.println(JSONObject.toJSON(searchResp));\n","// 8. Filtered search\n// 8.1 Filter with \"like\" operator and prefix wildcard\nquery_vector = [0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592]\n\nres = await client.search({\n collection_name: \"quick_setup\",\n data: [query_vector],\n limit: 5,\n filters: \"color_tag like \\\"%ll%\\\"\",\n output_fields: [\"color_tag\"]\n})\n\nconsole.log(res.results)\n","[\n [\n {\n \"id\": 5,\n \"distance\": 0.7972343564033508,\n \"entity\": {\n \"color\": \"yellow_4222\"\n }\n }\n ]\n]\n","{\"searchResults\": [\n [\n {\n \"score\": 1.1869997,\n \"fields\": {\"color_tag\": \"yellow_4222\"}\n }\n ]\n]}\n","[\n { score: 2.5080761909484863, id: '1201', color_tag: 'yellow_4222' }\n]\n","# Conduct a range search\nsearch_params = {\n \"metric_type\": \"IP\",\n \"params\": {\n \"radius\": 0.8, # Radius of the search circle\n \"range_filter\": 1.0 # Range filter to filter out vectors that are not within the search circle\n }\n}\n\nres = client.search(\n collection_name=\"test_collection\", # Replace with the actual name of your collection\n data=[[0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592]],\n limit=3, # Max. number of search results to return\n search_params=search_params, # Search parameters\n output_fields=[\"color\"], # Output fields to return\n)\n\nresult = json.dumps(res, indent=4)\nprint(result)\n","// 9. Range search\nquery_vectors = Arrays.asList(Arrays.asList(0.3580376395471989f, -0.6023495712049978f, 0.18414012509913835f, -0.26286205330961354f, 0.9029438446296592f));\n\nsearchReq = SearchReq.builder()\n .collectionName(\"quick_setup\")\n .data(query_vectors)\n .outputFields(Arrays.asList(\"color_tag\"))\n .searchParams(Map.of(\"radius\", 0.1, \"range\", 1.0))\n .topK(5)\n .build();\n\nsearchResp = client.search(searchReq);\n\nSystem.out.println(JSONObject.toJSON(searchResp));\n","// 9. Range search\nquery_vector = [0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592]\n\nres = await client.search({\n collection_name: \"quick_setup\",\n data: [query_vector],\n limit: 5,\n params: {\n radius: 0.1,\n range: 1.0\n },\n output_fields: [\"color_tag\"]\n})\n\nconsole.log(res.results)\n","[\n [\n {\n \"id\": 4,\n \"distance\": 0.9902134537696838,\n \"entity\": {\n \"color\": \"red_4794\"\n }\n },\n {\n \"id\": 14,\n \"distance\": 0.9803846478462219,\n \"entity\": {\n \"color\": \"green_2899\"\n }\n },\n {\n \"id\": 1,\n \"distance\": 0.8519943356513977,\n \"entity\": {\n \"color\": \"red_7025\"\n }\n }\n ]\n]\n","{\"searchResults\": [\n [\n {\n \"score\": 1.263043,\n \"fields\": {\"color_tag\": \"green_2052\"}\n },\n {\n \"score\": 1.2377806,\n \"fields\": {\"color_tag\": \"purple_3709\"}\n },\n {\n \"score\": 1.1869997,\n \"fields\": {\"color_tag\": \"red_3026\"}\n },\n {\n \"score\": 1.1748955,\n \"fields\": {\"color_tag\": \"black_1646\"}\n },\n {\n \"score\": 1.1720343,\n \"fields\": {\"color_tag\": \"green_4853\"}\n }\n ]\n]}\n","[\n { score: 2.3387961387634277, id: '718', color_tag: 'black_7154' },\n { score: 2.3352415561676025, id: '1745', color_tag: 'blue_8741' },\n { score: 2.290485382080078, id: '1408', color_tag: 'red_2324' },\n { score: 2.285870313644409, id: '854', color_tag: 'black_5990' },\n { score: 2.2593345642089844, id: '1309', color_tag: 'red_8458' }\n]\n","# Connect to Milvus\nclient = MilvusClient(uri='http://localhost:19530') # Milvus server address\n\n# Load data into collection\nclient.load_collection(\"group_search\") # Collection name\n\n# Group search results\nres = client.search(\n collection_name=\"group_search\", # Collection name\n data=[[0.14529211512077012, 0.9147257273453546, 0.7965055218724449, 0.7009258593102812, 0.5605206522382088]], # Query vector\n search_params={\n \"metric_type\": \"L2\",\n \"params\": {\"nprobe\": 10},\n }, # Search parameters\n limit=10, # Max. number of search results to return\n group_by_field=\"doc_id\", # Group results by document ID\n output_fields=[\"doc_id\", \"passage_id\"]\n)\n\n# Retrieve the values in the `doc_id` column\ndoc_ids = [result['entity']['doc_id'] for result in res[0]]\n\nprint(doc_ids)\n","[5, 10, 1, 7, 9, 6, 3, 4, 8, 2]\n","# Connect to Milvus\nclient = MilvusClient(uri='http://localhost:19530') # Milvus server address\n\n# Load data into collection\nclient.load_collection(\"group_search\") # Collection name\n\n# Search without `group_by_field`\nres = client.search(\n collection_name=\"group_search\", # Collection name\n data=query_passage_vector, # Replace with your query vector\n search_params={\n \"metric_type\": \"L2\",\n \"params\": {\"nprobe\": 10},\n }, # Search parameters\n limit=10, # Max. number of search results to return\n # group_by_field=\"doc_id\", # Group results by document ID\n output_fields=[\"doc_id\", \"passage_id\"]\n)\n\n# Retrieve the values in the `doc_id` column\ndoc_ids = [result['entity']['doc_id'] for result in res[0]]\n\nprint(doc_ids)\n","[1, 10, 3, 10, 1, 9, 4, 4, 8, 6]\n","# In normal cases, you do not need to set search parameters manually\n# Except for range searches.\nsearch_parameters = {\n 'metric_type': 'L2',\n 'params': {\n 'nprobe': 10,\n 'level': 1,\n 'radius': 1.0\n 'range_filter': 0.8\n }\n}\n"],"headingContent":"","anchorList":[{"label":"Recherche à vecteur unique","href":"Single-Vector-Search","type":1,"isActive":false},{"label":"Vue d'ensemble","href":"Overview","type":2,"isActive":false},{"label":"Préparations","href":"Preparations","type":2,"isActive":false},{"label":"Recherche de base","href":"Basic-search","type":2,"isActive":false},{"label":"Recherche filtrée","href":"Filtered-search","type":2,"isActive":false},{"label":"Recherche par plage","href":"Range-search","type":2,"isActive":false},{"label":"Recherche par regroupement","href":"Grouping-search","type":2,"isActive":false},{"label":"Paramètres de recherche","href":"Search-parameters","type":2,"isActive":false}]} \ No newline at end of file +{"codeList":["# 1. Set up a Milvus client\nclient = MilvusClient(\n uri=CLUSTER_ENDPOINT,\n token=TOKEN \n)\n\n# 2. Create a collection\nclient.create_collection(\n collection_name=\"quick_setup\",\n dimension=5,\n metric_type=\"IP\"\n)\n\n# 3. Insert randomly generated vectors \ncolors = [\"green\", \"blue\", \"yellow\", \"red\", \"black\", \"white\", \"purple\", \"pink\", \"orange\", \"brown\", \"grey\"]\ndata = []\n\nfor i in range(1000):\n current_color = random.choice(colors)\n data.append({\n \"id\": i,\n \"vector\": [ random.uniform(-1, 1) for _ in range(5) ],\n \"color\": current_color,\n \"color_tag\": f\"{current_color}_{str(random.randint(1000, 9999))}\"\n })\n\nres = client.insert(\n collection_name=\"quick_setup\",\n data=data\n)\n\nprint(res)\n\n# Output\n#\n# {\n# \"insert_count\": 1000,\n# \"ids\": [\n# 0,\n# 1,\n# 2,\n# 3,\n# 4,\n# 5,\n# 6,\n# 7,\n# 8,\n# 9,\n# \"(990 more items hidden)\"\n# ]\n# }\n\n# 6.1 Create partitions \nclient.create_partition(\n collection_name=\"quick_setup\",\n partition_name=\"red\"\n)\n\nclient.create_partition(\n collection_name=\"quick_setup\",\n partition_name=\"blue\"\n)\n\n# 6.1 Insert data into partitions\nred_data = [ {\"id\": i, \"vector\": [ random.uniform(-1, 1) for _ in range(5) ], \"color\": \"red\", \"color_tag\": f\"red_{str(random.randint(1000, 9999))}\" } for i in range(500) ]\nblue_data = [ {\"id\": i, \"vector\": [ random.uniform(-1, 1) for _ in range(5) ], \"color\": \"blue\", \"color_tag\": f\"blue_{str(random.randint(1000, 9999))}\" } for i in range(500) ]\n\nres = client.insert(\n collection_name=\"quick_setup\",\n data=red_data,\n partition_name=\"red\"\n)\n\nprint(res)\n\n# Output\n#\n# {\n# \"insert_count\": 500,\n# \"ids\": [\n# 0,\n# 1,\n# 2,\n# 3,\n# 4,\n# 5,\n# 6,\n# 7,\n# 8,\n# 9,\n# \"(490 more items hidden)\"\n# ]\n# }\n\nres = client.insert(\n collection_name=\"quick_setup\",\n data=blue_data,\n partition_name=\"blue\"\n)\n\nprint(res)\n\n# Output\n#\n# {\n# \"insert_count\": 500,\n# \"ids\": [\n# 0,\n# 1,\n# 2,\n# 3,\n# 4,\n# 5,\n# 6,\n# 7,\n# 8,\n# 9,\n# \"(490 more items hidden)\"\n# ]\n# }\n","import java.util.ArrayList;\nimport java.util.Arrays;\nimport java.util.List;\nimport java.util.Map;\nimport java.util.Random;\n\nimport com.alibaba.fastjson.JSONObject;\n\nimport io.milvus.v2.client.ConnectConfig;\nimport io.milvus.v2.client.MilvusClientV2;\nimport io.milvus.v2.service.collection.request.CreateCollectionReq;\nimport io.milvus.v2.service.collection.request.GetLoadStateReq;\nimport io.milvus.v2.service.vector.request.InsertReq;\nimport io.milvus.v2.service.vector.response.InsertResp; \n\nString CLUSTER_ENDPOINT = \"http://localhost:19530\";\n\n// 1. Connect to Milvus server\nConnectConfig connectConfig = ConnectConfig.builder()\n .uri(CLUSTER_ENDPOINT)\n .build();\n\nMilvusClientV2 client = new MilvusClientV2(connectConfig); \n\n// 2. Create a collection in quick setup mode\nCreateCollectionReq quickSetupReq = CreateCollectionReq.builder()\n .collectionName(\"quick_setup\")\n .dimension(5)\n .metricType(\"IP\")\n .build();\n\nclient.createCollection(quickSetupReq);\n\nGetLoadStateReq loadStateReq = GetLoadStateReq.builder()\n .collectionName(\"quick_setup\")\n .build();\n\nboolean state = client.getLoadState(loadStateReq);\n\nSystem.out.println(state);\n\n// Output:\n// true\n\n// 3. Insert randomly generated vectors into the collection\nList colors = Arrays.asList(\"green\", \"blue\", \"yellow\", \"red\", \"black\", \"white\", \"purple\", \"pink\", \"orange\", \"brown\", \"grey\");\nList data = new ArrayList<>();\n\nfor (int i=0; i<1000; i++) {\n Random rand = new Random();\n String current_color = colors.get(rand.nextInt(colors.size()-1));\n JSONObject row = new JSONObject();\n row.put(\"id\", Long.valueOf(i));\n row.put(\"vector\", Arrays.asList(rand.nextFloat(), rand.nextFloat(), rand.nextFloat(), rand.nextFloat(), rand.nextFloat()));\n row.put(\"color_tag\", current_color + \"_\" + String.valueOf(rand.nextInt(8999) + 1000));\n data.add(row);\n}\n\nInsertReq insertReq = InsertReq.builder()\n .collectionName(\"quick_setup\")\n .data(data)\n .build();\n\nInsertResp insertResp = client.insert(insertReq);\n\nSystem.out.println(JSONObject.toJSON(insertResp));\n\n// Output:\n// {\"insertCnt\": 1000}\n\n// 6.1. Create a partition\nCreatePartitionReq partitionReq = CreatePartitionReq.builder()\n .collectionName(\"quick_setup\")\n .partitionName(\"red\")\n .build();\n\nclient.createPartition(partitionReq);\n\npartitionReq = CreatePartitionReq.builder()\n .collectionName(\"quick_setup\")\n .partitionName(\"blue\")\n .build();\n\nclient.createPartition(partitionReq);\n\n// 6.2 Insert data into the partition\ndata = new ArrayList<>();\n\nfor (int i=1000; i<1500; i++) {\n Random rand = new Random();\n String current_color = \"red\";\n JSONObject row = new JSONObject();\n row.put(\"id\", Long.valueOf(i));\n row.put(\"vector\", Arrays.asList(rand.nextFloat(), rand.nextFloat(), rand.nextFloat(), rand.nextFloat(), rand.nextFloat()));\n row.put(\"color\", current_color);\n row.put(\"color_tag\", current_color + \"_\" + String.valueOf(rand.nextInt(8999) + 1000));\n data.add(row);\n} \n\ninsertReq = InsertReq.builder()\n .collectionName(\"quick_setup\")\n .data(data)\n .partitionName(\"red\")\n .build();\n\ninsertResp = client.insert(insertReq);\n\nSystem.out.println(JSONObject.toJSON(insertResp));\n\n// Output:\n// {\"insertCnt\": 500}\n\ndata = new ArrayList<>();\n\nfor (int i=1500; i<2000; i++) {\n Random rand = new Random();\n String current_color = \"blue\";\n JSONObject row = new JSONObject();\n row.put(\"id\", Long.valueOf(i));\n row.put(\"vector\", Arrays.asList(rand.nextFloat(), rand.nextFloat(), rand.nextFloat(), rand.nextFloat(), rand.nextFloat()));\n row.put(\"color\", current_color);\n row.put(\"color_tag\", current_color + \"_\" + String.valueOf(rand.nextInt(8999) + 1000));\n data.add(row);\n}\n\ninsertReq = InsertReq.builder()\n .collectionName(\"quick_setup\")\n .data(data)\n .partitionName(\"blue\")\n .build();\n\ninsertResp = client.insert(insertReq);\n\nSystem.out.println(JSONObject.toJSON(insertResp));\n\n// Output:\n// {\"insertCnt\": 500}\n","const { MilvusClient, DataType, sleep } = require(\"@zilliz/milvus2-sdk-node\")\n\nconst address = \"http://localhost:19530\"\n\n// 1. Set up a Milvus Client\nclient = new MilvusClient({address});\n\n// 2. Create a collection in quick setup mode\nawait client.createCollection({\n collection_name: \"quick_setup\",\n dimension: 5,\n metric_type: \"IP\"\n}); \n\n// 3. Insert randomly generated vectors\nconst colors = [\"green\", \"blue\", \"yellow\", \"red\", \"black\", \"white\", \"purple\", \"pink\", \"orange\", \"brown\", \"grey\"]\ndata = []\n\nfor (let i = 0; i < 1000; i++) {\n current_color = colors[Math.floor(Math.random() * colors.length)]\n data.push({\n id: i,\n vector: [Math.random(), Math.random(), Math.random(), Math.random(), Math.random()],\n color: current_color,\n color_tag: `${current_color}_${Math.floor(Math.random() * 8999) + 1000}`\n })\n}\n\nvar res = await client.insert({\n collection_name: \"quick_setup\",\n data: data\n})\n\nconsole.log(res.insert_cnt)\n\n// Output\n// \n// 1000\n// \n\nawait client.createPartition({\n collection_name: \"quick_setup\",\n partition_name: \"red\"\n})\n\nawait client.createPartition({\n collection_name: \"quick_setup\",\n partition_name: \"blue\"\n})\n\n// 6.1 Insert data into partitions\nvar red_data = []\nvar blue_data = []\n\nfor (let i = 1000; i < 1500; i++) {\n red_data.push({\n id: i,\n vector: [Math.random(), Math.random(), Math.random(), Math.random(), Math.random()],\n color: \"red\",\n color_tag: `red_${Math.floor(Math.random() * 8999) + 1000}`\n })\n}\n\nfor (let i = 1500; i < 2000; i++) {\n blue_data.push({\n id: i,\n vector: [Math.random(), Math.random(), Math.random(), Math.random(), Math.random()],\n color: \"blue\",\n color_tag: `blue_${Math.floor(Math.random() * 8999) + 1000}`\n })\n}\n\nres = await client.insert({\n collection_name: \"quick_setup\",\n data: red_data,\n partition_name: \"red\"\n})\n\nconsole.log(res.insert_cnt)\n\n// Output\n// \n// 500\n// \n\nres = await client.insert({\n collection_name: \"quick_setup\",\n data: blue_data,\n partition_name: \"blue\"\n})\n\nconsole.log(res.insert_cnt)\n\n// Output\n// \n// 500\n// \n","# Single vector search\nres = client.search(\n collection_name=\"test_collection\", # Replace with the actual name of your collection\n # Replace with your query vector\n data=[[0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592]],\n limit=5, # Max. number of search results to return\n search_params={\"metric_type\": \"IP\", \"params\": {}} # Search parameters\n)\n\n# Convert the output to a formatted JSON string\nresult = json.dumps(res, indent=4)\nprint(result)\n","// 4. Single vector search\nList> query_vectors = Arrays.asList(Arrays.asList(0.3580376395471989f, -0.6023495712049978f, 0.18414012509913835f, -0.26286205330961354f, 0.9029438446296592f));\n\nSearchReq searchReq = SearchReq.builder()\n .collectionName(\"quick_setup\")\n .data(query_vectors)\n .topK(3) // The number of results to return\n .build();\n\nSearchResp searchResp = client.search(searchReq);\n\nSystem.out.println(JSONObject.toJSON(searchResp));\n","// 4. Single vector search\nvar query_vector = [0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592],\n\nres = await client.search({\n collection_name: \"quick_setup\",\n data: [query_vector],\n limit: 3, // The number of results to return\n})\n\nconsole.log(res.results)\n","[\n [\n {\n \"id\": 0,\n \"distance\": 1.4093276262283325,\n \"entity\": {}\n },\n {\n \"id\": 4,\n \"distance\": 0.9902134537696838,\n \"entity\": {}\n },\n {\n \"id\": 1,\n \"distance\": 0.8519943356513977,\n \"entity\": {}\n },\n {\n \"id\": 5,\n \"distance\": 0.7972343564033508,\n \"entity\": {}\n },\n {\n \"id\": 2,\n \"distance\": 0.5928734540939331,\n \"entity\": {}\n }\n ]\n]\n","{\"searchResults\": [[\n {\n \"score\": 1.263043,\n \"fields\": {\n \"vector\": [\n 0.9533119,\n 0.02538395,\n 0.76714665,\n 0.35481733,\n 0.9845762\n ],\n \"id\": 740\n }\n },\n {\n \"score\": 1.2377806,\n \"fields\": {\n \"vector\": [\n 0.7411156,\n 0.08687937,\n 0.8254139,\n 0.08370924,\n 0.99095553\n ],\n \"id\": 640\n }\n },\n {\n \"score\": 1.1869997,\n \"fields\": {\n \"vector\": [\n 0.87928146,\n 0.05324632,\n 0.6312755,\n 0.28005534,\n 0.9542448\n ],\n \"id\": 455\n }\n }\n]]}\n","[\n { score: 1.7463608980178833, id: '854' },\n { score: 1.744946002960205, id: '425' },\n { score: 1.7258622646331787, id: '718' }\n]\n","# Bulk-vector search\nres = client.search(\n collection_name=\"test_collection\", # Replace with the actual name of your collection\n data=[\n [0.19886812562848388, 0.06023560599112088, 0.6976963061752597, 0.2614474506242501, 0.838729485096104],\n [0.3172005263489739, 0.9719044792798428, -0.36981146090600725, -0.4860894583077995, 0.95791889146345]\n ], # Replace with your query vectors\n limit=2, # Max. number of search results to return\n search_params={\"metric_type\": \"IP\", \"params\": {}} # Search parameters\n)\n\nresult = json.dumps(res, indent=4)\nprint(result)\n","// 5. Batch vector search\nquery_vectors = Arrays.asList(\n Arrays.asList(0.3580376395471989f, -0.6023495712049978f, 0.18414012509913835f, -0.26286205330961354f, 0.9029438446296592f),\n Arrays.asList(0.19886812562848388f, 0.06023560599112088f, 0.6976963061752597f, 0.2614474506242501f, 0.838729485096104f)\n);\n\nsearchReq = SearchReq.builder()\n .collectionName(\"quick_setup\")\n .data(query_vectors)\n .topK(2)\n .build();\n\nsearchResp = client.search(searchReq);\n\nSystem.out.println(JSONObject.toJSON(searchResp));\n","// 5. Batch vector search\nvar query_vectors = [\n [0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592],\n [0.19886812562848388, 0.06023560599112088, 0.6976963061752597, 0.2614474506242501, 0.838729485096104]\n]\n\nres = await client.search({\n collection_name: \"quick_setup\",\n data: query_vectors,\n limit: 2,\n})\n\nconsole.log(res.results)\n","[\n [\n {\n \"id\": 1,\n \"distance\": 1.3017789125442505,\n \"entity\": {}\n },\n {\n \"id\": 7,\n \"distance\": 1.2419954538345337,\n \"entity\": {}\n }\n ], # Result set 1\n [\n {\n \"id\": 3,\n \"distance\": 2.3358664512634277,\n \"entity\": {}\n },\n {\n \"id\": 8,\n \"distance\": 0.5642921924591064,\n \"entity\": {}\n }\n ] # Result set 2\n]\n","// Two sets of vectors are returned as expected\n\n{\"searchResults\": [\n [\n {\n \"score\": 1.263043,\n \"fields\": {\n \"vector\": [\n 0.9533119,\n 0.02538395,\n 0.76714665,\n 0.35481733,\n 0.9845762\n ],\n \"id\": 740\n }\n },\n {\n \"score\": 1.2377806,\n \"fields\": {\n \"vector\": [\n 0.7411156,\n 0.08687937,\n 0.8254139,\n 0.08370924,\n 0.99095553\n ],\n \"id\": 640\n }\n }\n ],\n [\n {\n \"score\": 1.8654699,\n \"fields\": {\n \"vector\": [\n 0.4671427,\n 0.8378432,\n 0.98844475,\n 0.82763994,\n 0.9729997\n ],\n \"id\": 638\n }\n },\n {\n \"score\": 1.8581753,\n \"fields\": {\n \"vector\": [\n 0.735541,\n 0.60140246,\n 0.86730254,\n 0.93152493,\n 0.98603314\n ],\n \"id\": 855\n }\n }\n ]\n]}\n","[\n [\n { score: 2.3590476512908936, id: '854' },\n { score: 2.2896690368652344, id: '59' }\n [\n { score: 2.664059638977051, id: '59' },\n { score: 2.59483003616333, id: '854' }\n ]\n]\n","# 6.2 Search within a partition\nquery_vector = [0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592]\n\nres = client.search(\n collection_name=\"quick_setup\",\n data=[query_vector],\n limit=5,\n search_params={\"metric_type\": \"IP\", \"params\": {\"level\": 1}},\n partition_names=[\"red\"]\n)\n\nprint(res)\n","// 6.3 Search within partitions\nquery_vectors = Arrays.asList(Arrays.asList(0.3580376395471989f, -0.6023495712049978f, 0.18414012509913835f, -0.26286205330961354f, 0.9029438446296592f));\n\nsearchReq = SearchReq.builder()\n .collectionName(\"quick_setup\")\n .data(query_vectors)\n .partitionNames(Arrays.asList(\"red\"))\n .topK(5)\n .build();\n\nsearchResp = client.search(searchReq);\n\nSystem.out.println(JSONObject.toJSON(searchResp));\n","// 6.2 Search within partitions\nquery_vector = [0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592]\n\nres = await client.search({\n collection_name: \"quick_setup\",\n data: [query_vector],\n partition_names: [\"red\"],\n limit: 5,\n})\n\nconsole.log(res.results)\n","[\n [\n {\n \"id\": 16,\n \"distance\": 0.9200337529182434,\n \"entity\": {}\n },\n {\n \"id\": 14,\n \"distance\": 0.4505271911621094,\n \"entity\": {}\n },\n {\n \"id\": 15,\n \"distance\": 0.19924677908420563,\n \"entity\": {}\n },\n {\n \"id\": 17,\n \"distance\": 0.0075093843042850494,\n \"entity\": {}\n },\n {\n \"id\": 13,\n \"distance\": -0.14609718322753906,\n \"entity\": {}\n }\n ]\n]\n","{\"searchResults\": [\n [\n {\n \"score\": 1.1677284,\n \"fields\": {\n \"vector\": [\n 0.9986977,\n 0.17964739,\n 0.49086612,\n 0.23155272,\n 0.98438674\n ],\n \"id\": 1435\n }\n },\n {\n \"score\": 1.1476475,\n \"fields\": {\n \"vector\": [\n 0.6952647,\n 0.13417172,\n 0.91045254,\n 0.119336545,\n 0.9338931\n ],\n \"id\": 1291\n }\n },\n {\n \"score\": 1.0969629,\n \"fields\": {\n \"vector\": [\n 0.3363194,\n 0.028906643,\n 0.6675426,\n 0.030419827,\n 0.9735209\n ],\n \"id\": 1168\n }\n },\n {\n \"score\": 1.0741848,\n \"fields\": {\n \"vector\": [\n 0.9980543,\n 0.36063594,\n 0.66427994,\n 0.17359233,\n 0.94954175\n ],\n \"id\": 1164\n }\n },\n {\n \"score\": 1.0584627,\n \"fields\": {\n \"vector\": [\n 0.7187005,\n 0.12674773,\n 0.987718,\n 0.3110777,\n 0.86093885\n ],\n \"id\": 1085\n }\n }\n ],\n [\n {\n \"score\": 1.8030131,\n \"fields\": {\n \"vector\": [\n 0.59726167,\n 0.7054632,\n 0.9573117,\n 0.94529945,\n 0.8664103\n ],\n \"id\": 1203\n }\n },\n {\n \"score\": 1.7728865,\n \"fields\": {\n \"vector\": [\n 0.6672442,\n 0.60448086,\n 0.9325822,\n 0.80272985,\n 0.8861626\n ],\n \"id\": 1448\n }\n },\n {\n \"score\": 1.7536311,\n \"fields\": {\n \"vector\": [\n 0.59663296,\n 0.77831805,\n 0.8578314,\n 0.88818026,\n 0.9030075\n ],\n \"id\": 1010\n }\n },\n {\n \"score\": 1.7520742,\n \"fields\": {\n \"vector\": [\n 0.854198,\n 0.72294194,\n 0.9245805,\n 0.86126596,\n 0.7969224\n ],\n \"id\": 1219\n }\n },\n {\n \"score\": 1.7452049,\n \"fields\": {\n \"vector\": [\n 0.96419,\n 0.943535,\n 0.87611496,\n 0.8268136,\n 0.79786557\n ],\n \"id\": 1149\n }\n }\n ]\n]}\n","[\n { score: 3.0258803367614746, id: '1201' },\n { score: 3.004319190979004, id: '1458' },\n { score: 2.880324363708496, id: '1187' },\n { score: 2.8246407508850098, id: '1347' },\n { score: 2.797295093536377, id: '1406' }\n]\n","res = client.search(\n collection_name=\"quick_setup\",\n data=[query_vector],\n limit=5,\n search_params={\"metric_type\": \"IP\", \"params\": {\"level\": 1}},\n partition_names=[\"blue\"]\n)\n\nprint(res)\n","searchReq = SearchReq.builder()\n .collectionName(\"quick_setup\")\n .data(query_vectors)\n .partitionNames(Arrays.asList(\"blue\"))\n .topK(5)\n .build();\n\nsearchResp = client.search(searchReq);\n\nSystem.out.println(JSONObject.toJSON(searchResp));\n","res = await client.search({\n collection_name: \"quick_setup\",\n data: [query_vector],\n partition_names: [\"blue\"],\n limit: 5,\n})\n\nconsole.log(res.results)\n","[\n [\n {\n \"id\": 20,\n \"distance\": 2.363696813583374,\n \"entity\": {}\n },\n {\n \"id\": 26,\n \"distance\": 1.0665391683578491,\n \"entity\": {}\n },\n {\n \"id\": 23,\n \"distance\": 1.066049575805664,\n \"entity\": {}\n },\n {\n \"id\": 29,\n \"distance\": 0.8353596925735474,\n \"entity\": {}\n },\n {\n \"id\": 28,\n \"distance\": 0.7484277486801147,\n \"entity\": {}\n }\n ]\n]\n","{\"searchResults\": [\n [\n {\n \"score\": 1.1628494,\n \"fields\": {\n \"vector\": [\n 0.7442872,\n 0.046407282,\n 0.71031404,\n 0.3544345,\n 0.9819991\n ],\n \"id\": 1992\n }\n },\n {\n \"score\": 1.1470042,\n \"fields\": {\n \"vector\": [\n 0.5505825,\n 0.04367262,\n 0.9985836,\n 0.18922359,\n 0.93255126\n ],\n \"id\": 1977\n }\n },\n {\n \"score\": 1.1450152,\n \"fields\": {\n \"vector\": [\n 0.89994013,\n 0.052991092,\n 0.8645576,\n 0.6406729,\n 0.95679337\n ],\n \"id\": 1573\n }\n },\n {\n \"score\": 1.1439825,\n \"fields\": {\n \"vector\": [\n 0.9253267,\n 0.15890503,\n 0.7999555,\n 0.19126713,\n 0.898583\n ],\n \"id\": 1552\n }\n },\n {\n \"score\": 1.1029172,\n \"fields\": {\n \"vector\": [\n 0.95661926,\n 0.18777144,\n 0.38115507,\n 0.14323527,\n 0.93137646\n ],\n \"id\": 1823\n }\n }\n ],\n [\n {\n \"score\": 1.8005109,\n \"fields\": {\n \"vector\": [\n 0.5953582,\n 0.7794224,\n 0.9388869,\n 0.79825854,\n 0.9197286\n ],\n \"id\": 1888\n }\n },\n {\n \"score\": 1.7714822,\n \"fields\": {\n \"vector\": [\n 0.56805456,\n 0.89422905,\n 0.88187534,\n 0.914824,\n 0.8944365\n ],\n \"id\": 1648\n }\n },\n {\n \"score\": 1.7561421,\n \"fields\": {\n \"vector\": [\n 0.83421993,\n 0.39865613,\n 0.92319834,\n 0.42695504,\n 0.96633124\n ],\n \"id\": 1688\n }\n },\n {\n \"score\": 1.7553532,\n \"fields\": {\n \"vector\": [\n 0.89994013,\n 0.052991092,\n 0.8645576,\n 0.6406729,\n 0.95679337\n ],\n \"id\": 1573\n }\n },\n {\n \"score\": 1.7543385,\n \"fields\": {\n \"vector\": [\n 0.16542226,\n 0.38248396,\n 0.9888778,\n 0.80913955,\n 0.9501492\n ],\n \"id\": 1544\n }\n }\n ]\n]}\n","[\n { score: 2.8421106338500977, id: '1745' },\n { score: 2.838560104370117, id: '1782' },\n { score: 2.8134000301361084, id: '1511' },\n { score: 2.718268871307373, id: '1679' },\n { score: 2.7014894485473633, id: '1597' }\n]\n","# Search with output fields\nres = client.search(\n collection_name=\"test_collection\", # Replace with the actual name of your collection\n data=[[0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592]],\n limit=5, # Max. number of search results to return\n search_params={\"metric_type\": \"IP\", \"params\": {}}, # Search parameters\n output_fields=[\"color\"] # Output fields to return\n)\n\nresult = json.dumps(res, indent=4)\nprint(result)\n","// 7. Search with output fields\nquery_vectors = Arrays.asList(Arrays.asList(0.3580376395471989f, -0.6023495712049978f, 0.18414012509913835f, -0.26286205330961354f, 0.9029438446296592f));\n\nsearchReq = SearchReq.builder()\n .collectionName(\"quick_setup\")\n .data(query_vectors)\n .outputFields(Arrays.asList(\"color\"))\n .topK(5)\n .build();\n\nsearchResp = client.search(searchReq);\n\nSystem.out.println(JSONObject.toJSON(searchResp));\n","// 7. Search with output fields\nquery_vector = [0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592]\n\nres = await client.search({\n collection_name: \"quick_setup\",\n data: [query_vector],\n limit: 5,\n output_fields: [\"color\"],\n})\n\nconsole.log(res.results)\n","[\n [\n {\n \"id\": 0,\n \"distance\": 1.4093276262283325,\n \"entity\": {\n \"color\": \"pink_8682\"\n }\n },\n {\n \"id\": 16,\n \"distance\": 1.0159327983856201,\n \"entity\": {\n \"color\": \"yellow_1496\"\n }\n },\n {\n \"id\": 4,\n \"distance\": 0.9902134537696838,\n \"entity\": {\n \"color\": \"red_4794\"\n }\n },\n {\n \"id\": 14,\n \"distance\": 0.9803846478462219,\n \"entity\": {\n \"color\": \"green_2899\"\n }\n },\n {\n \"id\": 1,\n \"distance\": 0.8519943356513977,\n \"entity\": {\n \"color\": \"red_7025\"\n }\n }\n ]\n]\n","{\"searchResults\": [\n [\n {\n \"score\": 1.263043,\n \"fields\": {}\n },\n {\n \"score\": 1.2377806,\n \"fields\": {}\n },\n {\n \"score\": 1.1869997,\n \"fields\": {}\n },\n {\n \"score\": 1.1748955,\n \"fields\": {}\n },\n {\n \"score\": 1.1720343,\n \"fields\": {}\n }\n ]\n]}\n","\n[\n { score: 3.036271572113037, id: '59', color: 'orange' },\n { score: 3.0267879962921143, id: '1745', color: 'blue' },\n { score: 3.0069446563720703, id: '854', color: 'black' },\n { score: 2.984386682510376, id: '718', color: 'black' },\n { score: 2.916019916534424, id: '425', color: 'purple' }\n]\n","# Search with filter\nres = client.search(\n collection_name=\"test_collection\", # Replace with the actual name of your collection\n data=[[0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592]],\n limit=5, # Max. number of search results to return\n search_params={\"metric_type\": \"IP\", \"params\": {}}, # Search parameters\n output_fields=[\"color\"], # Output fields to return\n filter='color like \"red%\"'\n)\n\nresult = json.dumps(res, indent=4)\nprint(result)\n","// 8. Filtered search\nquery_vectors = Arrays.asList(Arrays.asList(0.3580376395471989f, -0.6023495712049978f, 0.18414012509913835f, -0.26286205330961354f, 0.9029438446296592f));\n\nsearchReq = SearchReq.builder()\n .collectionName(\"quick_setup\")\n .data(query_vectors)\n .outputFields(Arrays.asList(\"color_tag\"))\n .filter(\"color_tag like \\\"red%\\\"\")\n .topK(5)\n .build();\n\nsearchResp = client.search(searchReq);\n\nSystem.out.println(JSONObject.toJSON(searchResp));\n","// 8. Filtered search\n// 8.1 Filter with \"like\" operator and prefix wildcard\nquery_vector = [0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592]\n\nres = await client.search({\n collection_name: \"quick_setup\",\n data: [query_vector],\n limit: 5,\n filters: \"color_tag like \\\"red%\\\"\",\n output_fields: [\"color_tag\"]\n})\n\nconsole.log(res.results)\n","[\n [\n {\n \"id\": 4,\n \"distance\": 0.9902134537696838,\n \"entity\": {\n \"color\": \"red_4794\"\n }\n },\n {\n \"id\": 1,\n \"distance\": 0.8519943356513977,\n \"entity\": {\n \"color\": \"red_7025\"\n }\n },\n {\n \"id\": 6,\n \"distance\": -0.4113418459892273,\n \"entity\": {\n \"color\": \"red_9392\"\n }\n }\n ]\n]\n","{\"searchResults\": [\n [\n {\n \"score\": 1.1869997,\n \"fields\": {\"color_tag\": \"red_3026\"}\n },\n {\n \"score\": 1.1677284,\n \"fields\": {\"color_tag\": \"red_9030\"}\n },\n {\n \"score\": 1.1476475,\n \"fields\": {\"color_tag\": \"red_3744\"}\n },\n {\n \"score\": 1.0969629,\n \"fields\": {\"color_tag\": \"red_4168\"}\n },\n {\n \"score\": 1.0741848,\n \"fields\": {\"color_tag\": \"red_9678\"}\n }\n ]\n]}\n","[\n { score: 2.5080761909484863, id: '1201', color_tag: 'red_8904' },\n { score: 2.491129159927368, id: '425', color_tag: 'purple_8212' },\n { score: 2.4889798164367676, id: '1458', color_tag: 'red_6891' },\n { score: 2.42964243888855, id: '724', color_tag: 'black_9885' },\n { score: 2.4004223346710205, id: '854', color_tag: 'black_5990' }\n]\n","# Infix match on color field\nres = client.search(\n collection_name=\"test_collection\", # Replace with the actual name of your collection\n data=[[0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592]],\n limit=5, # Max. number of search results to return\n search_params={\"metric_type\": \"IP\", \"params\": {}}, # Search parameters\n output_fields=[\"color\"], # Output fields to return\n filter='color like \"%ll%\"' # Filter on color field, infix match on \"ll\"\n)\n\nresult = json.dumps(res, indent=4)\nprint(result)\n","// 8. Filtered search\nquery_vectors = Arrays.asList(Arrays.asList(0.3580376395471989f, -0.6023495712049978f, 0.18414012509913835f, -0.26286205330961354f, 0.9029438446296592f));\n\nsearchReq = SearchReq.builder()\n .collectionName(\"quick_setup\")\n .data(query_vectors)\n .outputFields(Arrays.asList(\"color_tag\"))\n .filter(\"color like \\\"%ll%\\\"\")\n .topK(5)\n .build();\n\nsearchResp = client.search(searchReq);\n\nSystem.out.println(JSONObject.toJSON(searchResp));\n","// 8. Filtered search\n// 8.1 Filter with \"like\" operator and prefix wildcard\nquery_vector = [0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592]\n\nres = await client.search({\n collection_name: \"quick_setup\",\n data: [query_vector],\n limit: 5,\n filters: \"color_tag like \\\"%ll%\\\"\",\n output_fields: [\"color_tag\"]\n})\n\nconsole.log(res.results)\n","[\n [\n {\n \"id\": 5,\n \"distance\": 0.7972343564033508,\n \"entity\": {\n \"color\": \"yellow_4222\"\n }\n }\n ]\n]\n","{\"searchResults\": [\n [\n {\n \"score\": 1.1869997,\n \"fields\": {\"color_tag\": \"yellow_4222\"}\n }\n ]\n]}\n","[\n { score: 2.5080761909484863, id: '1201', color_tag: 'yellow_4222' }\n]\n","# Conduct a range search\nsearch_params = {\n \"metric_type\": \"IP\",\n \"params\": {\n \"radius\": 0.8, # Radius of the search circle\n \"range_filter\": 1.0 # Range filter to filter out vectors that are not within the search circle\n }\n}\n\nres = client.search(\n collection_name=\"test_collection\", # Replace with the actual name of your collection\n data=[[0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592]],\n limit=3, # Max. number of search results to return\n search_params=search_params, # Search parameters\n output_fields=[\"color\"], # Output fields to return\n)\n\nresult = json.dumps(res, indent=4)\nprint(result)\n","// 9. Range search\nquery_vectors = Arrays.asList(Arrays.asList(0.3580376395471989f, -0.6023495712049978f, 0.18414012509913835f, -0.26286205330961354f, 0.9029438446296592f));\n\nsearchReq = SearchReq.builder()\n .collectionName(\"quick_setup\")\n .data(query_vectors)\n .outputFields(Arrays.asList(\"color_tag\"))\n .searchParams(Map.of(\"radius\", 0.1, \"range\", 1.0))\n .topK(5)\n .build();\n\nsearchResp = client.search(searchReq);\n\nSystem.out.println(JSONObject.toJSON(searchResp));\n","// 9. Range search\nquery_vector = [0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592]\n\nres = await client.search({\n collection_name: \"quick_setup\",\n data: [query_vector],\n limit: 5,\n params: {\n radius: 0.1,\n range: 1.0\n },\n output_fields: [\"color_tag\"]\n})\n\nconsole.log(res.results)\n","[\n [\n {\n \"id\": 4,\n \"distance\": 0.9902134537696838,\n \"entity\": {\n \"color\": \"red_4794\"\n }\n },\n {\n \"id\": 14,\n \"distance\": 0.9803846478462219,\n \"entity\": {\n \"color\": \"green_2899\"\n }\n },\n {\n \"id\": 1,\n \"distance\": 0.8519943356513977,\n \"entity\": {\n \"color\": \"red_7025\"\n }\n }\n ]\n]\n","{\"searchResults\": [\n [\n {\n \"score\": 1.263043,\n \"fields\": {\"color_tag\": \"green_2052\"}\n },\n {\n \"score\": 1.2377806,\n \"fields\": {\"color_tag\": \"purple_3709\"}\n },\n {\n \"score\": 1.1869997,\n \"fields\": {\"color_tag\": \"red_3026\"}\n },\n {\n \"score\": 1.1748955,\n \"fields\": {\"color_tag\": \"black_1646\"}\n },\n {\n \"score\": 1.1720343,\n \"fields\": {\"color_tag\": \"green_4853\"}\n }\n ]\n]}\n","[\n { score: 2.3387961387634277, id: '718', color_tag: 'black_7154' },\n { score: 2.3352415561676025, id: '1745', color_tag: 'blue_8741' },\n { score: 2.290485382080078, id: '1408', color_tag: 'red_2324' },\n { score: 2.285870313644409, id: '854', color_tag: 'black_5990' },\n { score: 2.2593345642089844, id: '1309', color_tag: 'red_8458' }\n]\n","# Connect to Milvus\nclient = MilvusClient(uri='http://localhost:19530') # Milvus server address\n\n# Load data into collection\nclient.load_collection(\"group_search\") # Collection name\n\n# Group search results\nres = client.search(\n collection_name=\"group_search\", # Collection name\n data=[[0.14529211512077012, 0.9147257273453546, 0.7965055218724449, 0.7009258593102812, 0.5605206522382088]], # Query vector\n search_params={\n \"metric_type\": \"L2\",\n \"params\": {\"nprobe\": 10},\n }, # Search parameters\n limit=5, # Max. number of groups to return\n group_by_field=\"doc_id\", # Group results by document ID\n group_size=2, # returned at most 2 passages per document, the default value is 1\n group_strict_size=True, # ensure every group contains exactly 3 passages\n output_fields=[\"doc_id\", \"passage_id\"]\n)\n\n# Retrieve the values in the `doc_id` column\ndoc_ids = [result['entity']['doc_id'] for result in res[0]]\npassage_ids = [result['entity']['passage_id'] for result in res[0]]\n\nprint(doc_ids)\nprint(passage_ids)\n","[\"doc_11\", \"doc_11\", \"doc_7\", \"doc_7\", \"doc_3\", \"doc_3\", \"doc_2\", \"doc_2\", \"doc_8\", \"doc_8\"]\n[5, 10, 11, 10, 9, 6, 5, 4, 9, 2]\n","# Connect to Milvus\nclient = MilvusClient(uri='http://localhost:19530') # Milvus server address\n\n# Load data into collection\nclient.load_collection(\"group_search\") # Collection name\n\n# Search without `group_by_field`\nres = client.search(\n collection_name=\"group_search\", # Collection name\n data=query_passage_vector, # Replace with your query vector\n search_params={\n \"metric_type\": \"L2\",\n \"params\": {\"nprobe\": 10},\n }, # Search parameters\n limit=5, # Max. number of search results to return\n # group_by_field=\"doc_id\", # Group results by document ID\n # group_size=2, \n # group_strict_size=True,\n output_fields=[\"doc_id\", \"passage_id\"]\n)\n\n# Retrieve the values in the `doc_id` column\ndoc_ids = [result['entity']['doc_id'] for result in res[0]]\npassage_ids = [result['entity']['passage_id'] for result in res[0]]\n\nprint(doc_ids)\nprint(passage_ids)\n","[\"doc_11\", \"doc_11\", \"doc_11\", \"doc_11\", \"doc_11\"]\n[1, 10, 3, 12, 9]\n","# In normal cases, you do not need to set search parameters manually\n# Except for range searches.\nsearch_parameters = {\n 'metric_type': 'L2',\n 'params': {\n 'nprobe': 10,\n 'level': 1,\n 'radius': 1.0\n 'range_filter': 0.8\n }\n}\n"],"headingContent":"Single-Vector Search","anchorList":[{"label":"Recherche à vecteur unique","href":"Single-Vector-Search","type":1,"isActive":false},{"label":"Vue d'ensemble","href":"Overview","type":2,"isActive":false},{"label":"Préparations","href":"Preparations","type":2,"isActive":false},{"label":"Recherche de base","href":"Basic-search","type":2,"isActive":false},{"label":"Recherche filtrée","href":"Filtered-search","type":2,"isActive":false},{"label":"Recherche par plage","href":"Range-search","type":2,"isActive":false},{"label":"Recherche par regroupement","href":"Grouping-search","type":2,"isActive":false},{"label":"Paramètres de recherche","href":"Search-parameters","type":2,"isActive":false}]} \ No newline at end of file diff --git a/localization/v2.4.x/site/fr/userGuide/search-query-get/single-vector-search.md b/localization/v2.4.x/site/fr/userGuide/search-query-get/single-vector-search.md index 7fa3682eb..e650ca410 100644 --- a/localization/v2.4.x/site/fr/userGuide/search-query-get/single-vector-search.md +++ b/localization/v2.4.x/site/fr/userGuide/search-query-get/single-vector-search.md @@ -4,7 +4,7 @@ order: 1 summary: >- Cet article décrit comment rechercher des vecteurs dans une collection Milvus à l'aide d'un seul vecteur d'interrogation. -title: Recherche d'un seul vecteur +title: Recherche à vecteur unique ---

    Recherche à vecteur unique

    Il existe plusieurs types de recherche pour répondre à différents besoins :

      -
    • Recherche de base: Comprend la recherche à vecteur unique, la recherche à vecteur multiple, la recherche de partition et la recherche avec des champs de sortie spécifiés.

    • +
    • Recherche de base: Comprend la recherche sur un seul vecteur, la recherche sur un grand nombre de vecteurs, la recherche sur une partition et la recherche avec des champs de sortie spécifiés.

    • Recherche filtrée: Elle applique des critères de filtrage basés sur des champs scalaires pour affiner les résultats de la recherche.

    • Recherche par plage: Permet de trouver des vecteurs situés dans une plage de distance spécifique par rapport au vecteur de la requête.

    • Recherche groupée: Regroupe les résultats de la recherche sur la base d'un champ spécifique afin de garantir la diversité des résultats.

    • @@ -441,7 +441,7 @@ res = await client.search, vous pouvez fournir une ou plusieurs valeurs vectorielles représentant vos enchâssements de requête et une valeur limit indiquant le nombre de résultats à renvoyer.

      -

      En fonction de vos données et de votre vecteur d'interrogation, il se peut que vous obteniez moins de limit résultats. Cela se produit lorsque limit est plus grand que le nombre de vecteurs correspondant à votre requête.

      +

      En fonction de vos données et de votre vecteur d'interrogation, il se peut que vous obteniez moins de limit résultats. Cela se produit lorsque limit est plus grand que le nombre de vecteurs correspondants possibles pour votre requête.

      La recherche à vecteur unique est la forme la plus simple des opérations search dans Milvus, conçue pour trouver les vecteurs les plus similaires à un vecteur d'interrogation donné.

      Pour effectuer une recherche à vecteur unique, indiquez le nom de la collection cible, le vecteur d'interrogation et le nombre de résultats souhaité (limit). Cette opération renvoie un ensemble de résultats comprenant les vecteurs les plus similaires, leurs identifiants et les distances par rapport au vecteur d'interrogation.

      Voici un exemple de recherche des 5 entités les plus similaires au vecteur d'interrogation :

      @@ -1701,7 +1701,7 @@ res = await client.HAMMING
    ParamètreDescription de l'objetDescription
    Le nom de l'algorithme utilisé pour organiser les données dans le champ spécifique. Pour les algorithmes applicables, voir Index en mémoire et Index sur disque.Le nom de l'algorithme utilisé pour classer les données dans le champ spécifique. Pour les algorithmes applicables, voir Index en mémoire et Index sur disque.
    metricTypeDes distances de Hamming plus faibles indiquent une plus grande similarité.Pour exclure les vecteurs les plus proches des résultats, assurez-vous que :
    range_filter <= distance < radius
    -

    Pour en savoir plus sur les types de métriques de distance, reportez-vous à la section Métriques de similarité.

    +

    Pour en savoir plus sur les types de métriques de distance, reportez-vous à la section Métriques de similarité.

    Dans Milvus, le regroupement de la recherche en fonction d'un champ spécifique permet d'éviter la redondance d'un même élément de champ dans les résultats. Vous pouvez obtenir un ensemble varié de résultats pour le champ spécifique.

    -

    Considérons une collection de documents, chaque document étant divisé en plusieurs passages. Chaque passage est représenté par un vecteur intégré et appartient à un document. Pour trouver des documents pertinents plutôt que des passages similaires, vous pouvez inclure l'argument group_by_field dans l'option search() afin de regrouper les résultats en fonction de l'ID du document. Cela permet de renvoyer les documents les plus pertinents et les plus uniques, plutôt que des passages distincts d'un même document.

    -

    Voici un exemple de code permettant de regrouper les résultats de la recherche par champ :

    +

    Dans Milvus, la recherche par regroupement est conçue pour améliorer l'exhaustivité et la précision des résultats de recherche.

    +

    Considérons un scénario dans RAG, où des charges de documents sont divisées en divers passages, et où chaque passage est représenté par un vecteur intégré. Les utilisateurs souhaitent trouver les passages les plus pertinents afin de déclencher les MLD avec précision. La fonction de recherche ordinaire de Milvus peut répondre à cette exigence, mais elle peut produire des résultats très biaisés : la plupart des passages ne proviennent que de quelques documents et l'exhaustivité des résultats de la recherche est très faible. Cela peut sérieusement nuire à la précision, voire à l'exactitude des résultats fournis par le LLM et influencer négativement l'expérience des utilisateurs du LLM.

    +

    La recherche groupée peut résoudre efficacement ce problème. En passant un champ group_by_field et group_size, les utilisateurs de Milvus peuvent répartir les résultats de la recherche en plusieurs groupes et s'assurer que le nombre d'entités de chaque groupe ne dépasse pas un group_size spécifique. Cette fonction permet d'améliorer considérablement l'exhaustivité et l'équité des résultats de la recherche, ce qui améliore sensiblement la qualité des résultats du LLM.

    +

    Voici un exemple de code pour regrouper les résultats de recherche par champ :

    # Connect to Milvus
     client = MilvusClient(uri='http://localhost:19530') # Milvus server address
     
    @@ -1734,21 +1735,26 @@ res = client.search(
         "metric_type": "L2",
         "params": {"nprobe": 10},
         }, # Search parameters
    -    limit=10, # Max. number of search results to return
    +    limit=5, # Max. number of groups to return
         group_by_field="doc_id", # Group results by document ID
    +    group_size=2, # returned at most 2 passages per document, the default value is 1
    +    group_strict_size=True, # ensure every group contains exactly 3 passages
         output_fields=["doc_id", "passage_id"]
     )
     
     # Retrieve the values in the `doc_id` column
     doc_ids = [result['entity']['doc_id'] for result in res[0]]
    +passage_ids = [result['entity']['passage_id'] for result in res[0]]
     
     print(doc_ids)
    +print(passage_ids)
     

    Le résultat est similaire à ce qui suit :

    -
    [5, 10, 1, 7, 9, 6, 3, 4, 8, 2]
    +
    ["doc_11", "doc_11", "doc_7", "doc_7", "doc_3", "doc_3", "doc_2", "doc_2", "doc_8", "doc_8"]
    +[5, 10, 11, 10, 9, 6, 5, 4, 9, 2]
     
    -

    Dans le résultat donné, on peut observer que les entités renvoyées ne contiennent pas de valeurs doc_id en double.

    -

    À titre de comparaison, commentons la valeur group_by_field et effectuons une recherche régulière :

    +

    Dans le résultat donné, on peut observer que pour chaque document, exactement deux passages sont extraits et qu'un total de 5 documents composent collectivement les résultats.

    +

    À titre de comparaison, commentons les paramètres liés au groupe et effectuons une recherche normale :

    # Connect to Milvus
     client = MilvusClient(uri='http://localhost:19530') # Milvus server address
     
    @@ -1763,27 +1769,33 @@ res = client.search(
         "metric_type": "L2",
         "params": {"nprobe": 10},
         }, # Search parameters
    -    limit=10, # Max. number of search results to return
    +    limit=5, # Max. number of search results to return
         # group_by_field="doc_id", # Group results by document ID
    +    # group_size=2, 
    +    # group_strict_size=True,
         output_fields=["doc_id", "passage_id"]
     )
     
     # Retrieve the values in the `doc_id` column
     doc_ids = [result['entity']['doc_id'] for result in res[0]]
    +passage_ids = [result['entity']['passage_id'] for result in res[0]]
     
     print(doc_ids)
    +print(passage_ids)
     

    Le résultat est similaire à ce qui suit :

    -
    [1, 10, 3, 10, 1, 9, 4, 4, 8, 6]
    +
    ["doc_11", "doc_11", "doc_11", "doc_11", "doc_11"]
    +[1, 10, 3, 12, 9]
     
    -

    Dans le résultat donné, on peut observer que les entités renvoyées contiennent des valeurs doc_id en double.

    +

    Dans le résultat donné, on peut observer que "doc_11" a complètement dominé les résultats de la recherche, éclipsant les paragraphes de haute qualité des autres documents, ce qui peut être une mauvaise incitation au LLM.

    +

    Un dernier point à noter : par défaut, grouping_search renvoie les résultats instantanément lorsqu'il a suffisamment de groupes, ce qui peut conduire à ce que le nombre de résultats dans chaque groupe ne soit pas suffisant pour répondre à la taille du groupe (group_size). Si vous vous souciez du nombre de résultats pour chaque groupe, définissez group_strict_size=True comme indiqué dans le code ci-dessus. Milvus s'efforcera alors d'obtenir suffisamment de résultats pour chaque groupe, au prix d'une légère baisse des performances.

    Limitations

      -
    • Indexation: Cette fonction de regroupement ne fonctionne que pour les collections indexées avec le type HNSW, IVF_FLAT ou FLAT. Pour plus d'informations, voir Index en mémoire.

    • -
    • Vecteur: Actuellement, la recherche par regroupement ne prend pas en charge les champs vectoriels de type BINARY_VECTOR. Pour plus d'informations sur les types de données, voir Types de données pris en charge.

    • +
    • Indexation: Cette fonction de regroupement ne fonctionne que pour les collections indexées avec le type HNSW, IVF_FLAT ou FLAT. Pour plus d'informations, reportez-vous à la section Index en mémoire.

    • +
    • Vecteur: Actuellement, la recherche par regroupement ne prend pas en charge les champs vectoriels de type BINARY_VECTOR. Pour plus d'informations sur les types de données, voir Types de données pris en charge.

    • Champ: Actuellement, la recherche par regroupement n'autorise qu'une seule colonne. Vous ne pouvez pas spécifier plusieurs noms de champs dans la configuration de group_by_field. En outre, la recherche groupée est incompatible avec les types de données JSON, FLOAT, DOUBLE, ARRAY ou les champs vectoriels.

    • Impact sur les performances: Soyez conscient que les performances se dégradent avec l'augmentation du nombre de vecteurs de requête. Si l'on prend l'exemple d'un cluster doté de 2 cœurs de CPU et de 8 Go de mémoire, le temps d'exécution de la recherche par regroupement augmente proportionnellement au nombre de vecteurs de requête en entrée.

    • -
    • Fonctionnalité: Actuellement, la recherche par regroupement n'est pas prise en charge par la recherche par plage, les itérateurs de recherche ou la recherche hybride.

    • +
    • Fonctionnalité: Actuellement, la recherche par regroupement n'est pas prise en charge par la recherche d'intervalle, les itérateurs de recherche, etc.

    Paramètres de recherche

    Dans les recherches ci-dessus, à l'exception de la recherche par plage, les paramètres de recherche par défaut s'appliquent. Dans la plupart des cas, il n'est pas nécessaire de définir manuellement les paramètres de recherche.

    +

    Dans les recherches ci-dessus, à l'exception de la recherche par plage, les paramètres de recherche par défaut s'appliquent. Dans les cas normaux, il n'est pas nécessaire de définir manuellement les paramètres de recherche.

    # In normal cases, you do not need to set search parameters manually
     # Except for range searches.
     search_parameters = {
    diff --git a/localization/v2.4.x/site/fr/userGuide/search-query-get/with-iterators.json b/localization/v2.4.x/site/fr/userGuide/search-query-get/with-iterators.json
    index 848ee3fbd..29c34cede 100644
    --- a/localization/v2.4.x/site/fr/userGuide/search-query-get/with-iterators.json
    +++ b/localization/v2.4.x/site/fr/userGuide/search-query-get/with-iterators.json
    @@ -1 +1 @@
    -{"codeList":["from pymilvus import MilvusClient\n\n# 1. Set up a Milvus client\nclient = MilvusClient(\n    uri=\"http://localhost:19530\"\n)\n\n# 2. Create a collection\nclient.create_collection(\n    collection_name=\"quick_setup\",\n    dimension=5,\n)\n","import io.milvus.client.MilvusServiceClient;\nimport io.milvus.param.ConnectParam;\nimport io.milvus.param.highlevel.collection.CreateSimpleCollectionParam;\n\nString CLUSTER_ENDPOINT = \"http://localhost:19530\";\n\n// 1. Connect to Milvus server\nConnectParam connectParam = ConnectParam.newBuilder()\n        .withUri(CLUSTER_ENDPOINT)\n        .build();\n\nMilvusServiceClient client  = new MilvusServiceClient(connectParam);\n\n// 2. Create a collection\nCreateSimpleCollectionParam createCollectionParam = CreateSimpleCollectionParam.newBuilder()\n        .withCollectionName(\"quick_setup\")\n        .withDimension(5)\n        .build();\n\nclient.createCollection(createCollectionParam);\n","# 3. Insert randomly generated vectors \ncolors = [\"green\", \"blue\", \"yellow\", \"red\", \"black\", \"white\", \"purple\", \"pink\", \"orange\", \"brown\", \"grey\"]\ndata = []\n\nfor i in range(10000):\n    current_color = random.choice(colors)\n    current_tag = random.randint(1000, 9999)\n    data.append({\n        \"id\": i,\n        \"vector\": [ random.uniform(-1, 1) for _ in range(5) ],\n        \"color\": current_color,\n        \"tag\": current_tag,\n        \"color_tag\": f\"{current_color}_{str(current_tag)}\"\n    })\n\nprint(data[0])\n\n# Output\n#\n# {\n#     \"id\": 0,\n#     \"vector\": [\n#         -0.5705990742218152,\n#         0.39844925120642083,\n#         -0.8791287928610869,\n#         0.024163154953680932,\n#         0.6837669917169638\n#     ],\n#     \"color\": \"purple\",\n#     \"tag\": 7774,\n#     \"color_tag\": \"purple_7774\"\n# }\n\nres = client.insert(\n    collection_name=\"quick_setup\",\n    data=data,\n)\n\nprint(res)\n\n# Output\n#\n# {\n#     \"insert_count\": 10000,\n#     \"ids\": [\n#         0,\n#         1,\n#         2,\n#         3,\n#         4,\n#         5,\n#         6,\n#         7,\n#         8,\n#         9,\n#         \"(9990 more items hidden)\"\n#     ]\n# }\n","import java.util.ArrayList;\nimport java.util.Arrays;\nimport java.util.List;\nimport java.util.Random;\n\nimport com.alibaba.fastjson.JSONObject;\n\nimport io.milvus.param.R;\nimport io.milvus.param.dml.InsertParam;\nimport io.milvus.response.MutationResultWrapper;\nimport io.milvus.grpc.MutationResult;\n\n\n// 3. Insert randomly generated vectors into the collection\nList colors = Arrays.asList(\"green\", \"blue\", \"yellow\", \"red\", \"black\", \"white\", \"purple\", \"pink\", \"orange\", \"brown\", \"grey\");\nList data = new ArrayList<>();\n\nfor (int i=0; i<10000; i++) {\n    Random rand = new Random();\n    String current_color = colors.get(rand.nextInt(colors.size()-1));\n    JSONObject row = new JSONObject();\n    row.put(\"id\", Long.valueOf(i));\n    row.put(\"vector\", Arrays.asList(rand.nextFloat(), rand.nextFloat(), rand.nextFloat(), rand.nextFloat(), rand.nextFloat()));\n    row.put(\"color_tag\", current_color + \"_\" + String.valueOf(rand.nextInt(8999) + 1000));\n    data.add(row);\n}\n\nInsertParam insertParam = InsertParam.newBuilder()\n    .withCollectionName(\"quick_setup\")\n    .withRows(data)\n    .build();\n\nR insertRes = client.insert(insertParam);\n\nif (insertRes.getStatus() != R.Status.Success.getCode()) {\n    System.err.println(insertRes.getMessage());\n}\n\nMutationResultWrapper wrapper = new MutationResultWrapper(insertRes.getData());\nSystem.out.println(wrapper.getInsertCount());\n","from pymilvus import Collection\n\n# 4. Search with iterator\nconnections.connect(host=\"127.0.0.1\", port=19530)\ncollection = Collection(\"quick_setup\")\n\nquery_vectors = [[0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592]]\nsearch_params = {\n    \"metric_type\": \"IP\",\n    \"params\": {\"nprobe\": 10}\n}\n\niterator = collection.search_iterator(\n    data=query_vectors,\n    anns_field=\"vector\",\n    batch_size=10,\n    param=search_params,\n    output_fields=[\"color_tag\"],\n    limit=3\n)\n\nresults = []\n\nwhile True:\n    result = iterator.next()\n    if not result:\n        iterator.close()\n        break\n        \n    results.extend(result)\n    \n    for hit in result:\n        results.append(hit.to_dict())\n\nprint(results)\n\n# Output\n#\n# [\n#     {\n#         \"id\": 1756,\n#         \"distance\": 2.0642056465148926,\n#         \"entity\": {\n#             \"color_tag\": \"black_9109\"\n#         }\n#     },\n#     {\n#         \"id\": 6488,\n#         \"distance\": 1.9437453746795654,\n#         \"entity\": {\n#             \"color_tag\": \"purple_8164\"\n#         }\n#     },\n#     {\n#         \"id\": 3338,\n#         \"distance\": 1.9107104539871216,\n#         \"entity\": {\n#             \"color_tag\": \"brown_8121\"\n#         }\n#     }\n# ]\n","import io.milvus.param.dml.QueryIteratorParam;\nimport io.milvus.param.dml.SearchIteratorParam;\nimport io.milvus.response.QueryResultsWrapper;\nimport io.milvus.orm.iterator.SearchIterator;\n\n// 4. Search with iterators\nSearchIteratorParam iteratorParam = SearchIteratorParam.newBuilder()\n    .withCollectionName(\"quick_setup\")\n    .withVectorFieldName(\"vector\")\n    // Use withFloatVectors() in clusters compatible with Milvus 2.4.x\n    .withVectors(Arrays.asList(0.3580376395471989f, -0.6023495712049978f, 0.18414012509913835f, -0.26286205330961354f, 0.9029438446296592f))\n    .withBatchSize(10L)\n    .withParams(\"{\\\"metric_type\\\": \\\"COSINE\\\", \\\"params\\\": {\\\"level\\\": 1}}\")\n    .build();\n        \n\nR searchIteratorRes = client.searchIterator(iteratorParam);\n\nif (searchIteratorRes.getStatus() != R.Status.Success.getCode()) {\n    System.err.println(searchIteratorRes.getMessage());\n}\n\nSearchIterator searchIterator = searchIteratorRes.getData();\nList results = new ArrayList<>();\n\nwhile (true) {\n    List batchResults = searchIterator.next();\n    if (batchResults.isEmpty()) {\n        searchIterator.close();\n        break;\n    }\n    for (QueryResultsWrapper.RowRecord rowRecord : batchResults) {\n        results.add(rowRecord);\n    }\n}\n\nSystem.out.println(results.size());\n","# 6. Query with iterator\niterator = collection.query_iterator(\n    batch_size=10, # Controls the size of the return each time you call next()\n    expr=\"color_tag like \\\"brown_8\\\"\",\n    output_fields=[\"color_tag\"]\n)\n\nresults = []\n\nwhile True:\n    result = iterator.next()\n    if not result:\n        iterator.close()\n        break\n        \n    results.extend(result)\n    \n# 8. Check the search results\nprint(len(results))\n\nprint(results[:3])\n\n# Output\n#\n# [\n#     {\n#         \"color_tag\": \"brown_8785\",\n#         \"id\": 94\n#     },\n#     {\n#         \"color_tag\": \"brown_8568\",\n#         \"id\": 176\n#     },\n#     {\n#         \"color_tag\": \"brown_8721\",\n#         \"id\": 289\n#     }\n# ]\n","import io.milvus.param.dml.QueryIteratorParam;\nimport io.milvus.orm.iterator.QueryIterator;\n\n// 5. Query with iterators\n\ntry {\n    Files.write(Path.of(\"results.json\"), JSON.toJSONString(new ArrayList<>()).getBytes(), StandardOpenOption.CREATE, StandardOpenOption.TRUNCATE_EXISTING);\n} catch (Exception e) {\n    // TODO: handle exception\n    e.printStackTrace();\n}\n\nQueryIteratorParam queryIteratorParam = QueryIteratorParam.newBuilder()\n    .withCollectionName(\"quick_setup\")\n    .withExpr(\"color_tag like \\\"brown_8%\\\"\")\n    .withBatchSize(50L)\n    .addOutField(\"vector\")\n    .addOutField(\"color_tag\")\n    .build();\n\nR queryIteratRes = client.queryIterator(queryIteratorParam);\n\nif (queryIteratRes.getStatus() != R.Status.Success.getCode()) {\n    System.err.println(queryIteratRes.getMessage());\n}\n\nQueryIterator queryIterator = queryIteratRes.getData();\n\nwhile (true) {\n    List batchResults = queryIterator.next();\n    if (batchResults.isEmpty()) {\n        queryIterator.close();\n        break;\n    }\n\n    String jsonString = \"\";\n    List jsonObject = new ArrayList<>();\n    try {\n        jsonString = Files.readString(Path.of(\"results.json\"));\n        jsonObject = JSON.parseArray(jsonString).toJavaList(null);\n    } catch (IOException e) {\n        // TODO Auto-generated catch block\n        e.printStackTrace();\n    }\n\n    for (QueryResultsWrapper.RowRecord queryResult : batchResults) {\n        JSONObject row = new JSONObject();\n        row.put(\"id\", queryResult.get(\"id\"));\n        row.put(\"vector\", queryResult.get(\"vector\"));\n        row.put(\"color_tag\", queryResult.get(\"color_tag\"));\n        jsonObject.add(row);\n    }\n\n    try {\n        Files.write(Path.of(\"results.json\"), JSON.toJSONString(jsonObject).getBytes(), StandardOpenOption.WRITE);\n    } catch (IOException e) {\n        // TODO Auto-generated catch block\n        e.printStackTrace();\n    }\n}\n"],"headingContent":"","anchorList":[{"label":"Avec les itérateurs","href":"With-Iterators","type":1,"isActive":false},{"label":"Vue d'ensemble","href":"Overview","type":2,"isActive":false},{"label":"Préparations","href":"Preparations","type":2,"isActive":false},{"label":"Recherche avec itérateur","href":"Search-with-iterator","type":2,"isActive":false},{"label":"Interroger avec un itérateur","href":"Query-with-an-iterator","type":2,"isActive":false}]}
    \ No newline at end of file
    +{"codeList":["from pymilvus import MilvusClient\n\n# 1. Set up a Milvus client\nclient = MilvusClient(\n    uri=\"http://localhost:19530\"\n)\n\n# 2. Create a collection\nclient.create_collection(\n    collection_name=\"quick_setup\",\n    dimension=5,\n)\n","import io.milvus.client.MilvusServiceClient;\nimport io.milvus.param.ConnectParam;\nimport io.milvus.param.highlevel.collection.CreateSimpleCollectionParam;\n\nString CLUSTER_ENDPOINT = \"http://localhost:19530\";\n\n// 1. Connect to Milvus server\nConnectParam connectParam = ConnectParam.newBuilder()\n        .withUri(CLUSTER_ENDPOINT)\n        .build();\n\nMilvusServiceClient client  = new MilvusServiceClient(connectParam);\n\n// 2. Create a collection\nCreateSimpleCollectionParam createCollectionParam = CreateSimpleCollectionParam.newBuilder()\n        .withCollectionName(\"quick_setup\")\n        .withDimension(5)\n        .build();\n\nclient.createCollection(createCollectionParam);\n","# 3. Insert randomly generated vectors \ncolors = [\"green\", \"blue\", \"yellow\", \"red\", \"black\", \"white\", \"purple\", \"pink\", \"orange\", \"brown\", \"grey\"]\ndata = []\n\nfor i in range(10000):\n    current_color = random.choice(colors)\n    current_tag = random.randint(1000, 9999)\n    data.append({\n        \"id\": i,\n        \"vector\": [ random.uniform(-1, 1) for _ in range(5) ],\n        \"color\": current_color,\n        \"tag\": current_tag,\n        \"color_tag\": f\"{current_color}_{str(current_tag)}\"\n    })\n\nprint(data[0])\n\n# Output\n#\n# {\n#     \"id\": 0,\n#     \"vector\": [\n#         -0.5705990742218152,\n#         0.39844925120642083,\n#         -0.8791287928610869,\n#         0.024163154953680932,\n#         0.6837669917169638\n#     ],\n#     \"color\": \"purple\",\n#     \"tag\": 7774,\n#     \"color_tag\": \"purple_7774\"\n# }\n\nres = client.insert(\n    collection_name=\"quick_setup\",\n    data=data,\n)\n\nprint(res)\n\n# Output\n#\n# {\n#     \"insert_count\": 10000,\n#     \"ids\": [\n#         0,\n#         1,\n#         2,\n#         3,\n#         4,\n#         5,\n#         6,\n#         7,\n#         8,\n#         9,\n#         \"(9990 more items hidden)\"\n#     ]\n# }\n","import java.util.ArrayList;\nimport java.util.Arrays;\nimport java.util.List;\nimport java.util.Random;\n\nimport com.alibaba.fastjson.JSONObject;\n\nimport io.milvus.param.R;\nimport io.milvus.param.dml.InsertParam;\nimport io.milvus.response.MutationResultWrapper;\nimport io.milvus.grpc.MutationResult;\n\n\n// 3. Insert randomly generated vectors into the collection\nList colors = Arrays.asList(\"green\", \"blue\", \"yellow\", \"red\", \"black\", \"white\", \"purple\", \"pink\", \"orange\", \"brown\", \"grey\");\nList data = new ArrayList<>();\n\nfor (int i=0; i<10000; i++) {\n    Random rand = new Random();\n    String current_color = colors.get(rand.nextInt(colors.size()-1));\n    JSONObject row = new JSONObject();\n    row.put(\"id\", Long.valueOf(i));\n    row.put(\"vector\", Arrays.asList(rand.nextFloat(), rand.nextFloat(), rand.nextFloat(), rand.nextFloat(), rand.nextFloat()));\n    row.put(\"color_tag\", current_color + \"_\" + String.valueOf(rand.nextInt(8999) + 1000));\n    data.add(row);\n}\n\nInsertParam insertParam = InsertParam.newBuilder()\n    .withCollectionName(\"quick_setup\")\n    .withRows(data)\n    .build();\n\nR insertRes = client.insert(insertParam);\n\nif (insertRes.getStatus() != R.Status.Success.getCode()) {\n    System.err.println(insertRes.getMessage());\n}\n\nMutationResultWrapper wrapper = new MutationResultWrapper(insertRes.getData());\nSystem.out.println(wrapper.getInsertCount());\n","from pymilvus import Collection\n\n# 4. Search with iterator\nconnections.connect(host=\"127.0.0.1\", port=19530)\ncollection = Collection(\"quick_setup\")\n\nquery_vectors = [[0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592]]\nsearch_params = {\n    \"metric_type\": \"IP\",\n    \"params\": {\"nprobe\": 10}\n}\n\niterator = collection.search_iterator(\n    data=query_vectors,\n    anns_field=\"vector\",\n    batch_size=10,\n    param=search_params,\n    output_fields=[\"color_tag\"],\n    limit=300\n)\n# search 300 entities totally with 10 entities per page\n\nresults = []\n\nwhile True:\n    result = iterator.next()\n    if not result:\n        iterator.close()\n        break\n        \n    results.extend(result)\n    \n    for hit in result:\n        results.append(hit.to_dict())\n\nprint(results)\n\n# Output\n#\n# [\n#     {\n#         \"id\": 1756,\n#         \"distance\": 2.0642056465148926,\n#         \"entity\": {\n#             \"color_tag\": \"black_9109\"\n#         }\n#     },\n#     {\n#         \"id\": 6488,\n#         \"distance\": 1.9437453746795654,\n#         \"entity\": {\n#             \"color_tag\": \"purple_8164\"\n#         }\n#     },\n#     {\n#         \"id\": 3338,\n#         \"distance\": 1.9107104539871216,\n#         \"entity\": {\n#             \"color_tag\": \"brown_8121\"\n#         }\n#     }\n# ]\n","import io.milvus.param.dml.QueryIteratorParam;\nimport io.milvus.param.dml.SearchIteratorParam;\nimport io.milvus.response.QueryResultsWrapper;\nimport io.milvus.orm.iterator.SearchIterator;\n\n// 4. Search with iterators\nSearchIteratorParam iteratorParam = SearchIteratorParam.newBuilder()\n    .withCollectionName(\"quick_setup\")\n    .withVectorFieldName(\"vector\")\n    // Use withFloatVectors() in clusters compatible with Milvus 2.4.x\n    .withVectors(Arrays.asList(0.3580376395471989f, -0.6023495712049978f, 0.18414012509913835f, -0.26286205330961354f, 0.9029438446296592f))\n    .withBatchSize(10L)\n    .withParams(\"{\\\"metric_type\\\": \\\"COSINE\\\", \\\"params\\\": {\\\"level\\\": 1}}\")\n    .build();\n        \n\nR searchIteratorRes = client.searchIterator(iteratorParam);\n\nif (searchIteratorRes.getStatus() != R.Status.Success.getCode()) {\n    System.err.println(searchIteratorRes.getMessage());\n}\n\nSearchIterator searchIterator = searchIteratorRes.getData();\nList results = new ArrayList<>();\n\nwhile (true) {\n    List batchResults = searchIterator.next();\n    if (batchResults.isEmpty()) {\n        searchIterator.close();\n        break;\n    }\n    for (QueryResultsWrapper.RowRecord rowRecord : batchResults) {\n        results.add(rowRecord);\n    }\n}\n\nSystem.out.println(results.size());\n","# 6. Query with iterator\niterator = collection.query_iterator(\n    batch_size=10, # Controls the size of the return each time you call next()\n    expr=\"color_tag like \\\"brown_8\\\"\",\n    output_fields=[\"color_tag\"]\n)\n\nresults = []\n\nwhile True:\n    result = iterator.next()\n    if not result:\n        iterator.close()\n        break\n        \n    results.extend(result)\n    \n# 8. Check the search results\nprint(len(results))\n\nprint(results[:3])\n\n# Output\n#\n# [\n#     {\n#         \"color_tag\": \"brown_8785\",\n#         \"id\": 94\n#     },\n#     {\n#         \"color_tag\": \"brown_8568\",\n#         \"id\": 176\n#     },\n#     {\n#         \"color_tag\": \"brown_8721\",\n#         \"id\": 289\n#     }\n# ]\n","import io.milvus.param.dml.QueryIteratorParam;\nimport io.milvus.orm.iterator.QueryIterator;\n\n// 5. Query with iterators\n\ntry {\n    Files.write(Path.of(\"results.json\"), JSON.toJSONString(new ArrayList<>()).getBytes(), StandardOpenOption.CREATE, StandardOpenOption.TRUNCATE_EXISTING);\n} catch (Exception e) {\n    // TODO: handle exception\n    e.printStackTrace();\n}\n\nQueryIteratorParam queryIteratorParam = QueryIteratorParam.newBuilder()\n    .withCollectionName(\"quick_setup\")\n    .withExpr(\"color_tag like \\\"brown_8%\\\"\")\n    .withBatchSize(50L)\n    .addOutField(\"vector\")\n    .addOutField(\"color_tag\")\n    .build();\n\nR queryIteratRes = client.queryIterator(queryIteratorParam);\n\nif (queryIteratRes.getStatus() != R.Status.Success.getCode()) {\n    System.err.println(queryIteratRes.getMessage());\n}\n\nQueryIterator queryIterator = queryIteratRes.getData();\n\nwhile (true) {\n    List batchResults = queryIterator.next();\n    if (batchResults.isEmpty()) {\n        queryIterator.close();\n        break;\n    }\n\n    String jsonString = \"\";\n    List jsonObject = new ArrayList<>();\n    try {\n        jsonString = Files.readString(Path.of(\"results.json\"));\n        jsonObject = JSON.parseArray(jsonString).toJavaList(null);\n    } catch (IOException e) {\n        // TODO Auto-generated catch block\n        e.printStackTrace();\n    }\n\n    for (QueryResultsWrapper.RowRecord queryResult : batchResults) {\n        JSONObject row = new JSONObject();\n        row.put(\"id\", queryResult.get(\"id\"));\n        row.put(\"vector\", queryResult.get(\"vector\"));\n        row.put(\"color_tag\", queryResult.get(\"color_tag\"));\n        jsonObject.add(row);\n    }\n\n    try {\n        Files.write(Path.of(\"results.json\"), JSON.toJSONString(jsonObject).getBytes(), StandardOpenOption.WRITE);\n    } catch (IOException e) {\n        // TODO Auto-generated catch block\n        e.printStackTrace();\n    }\n}\n"],"headingContent":"With Iterators","anchorList":[{"label":"Avec les itérateurs","href":"With-Iterators","type":1,"isActive":false},{"label":"Vue d'ensemble","href":"Overview","type":2,"isActive":false},{"label":"Préparations","href":"Preparations","type":2,"isActive":false},{"label":"Recherche avec itérateur","href":"Search-with-iterator","type":2,"isActive":false},{"label":"Interroger avec un itérateur","href":"Query-with-an-iterator","type":2,"isActive":false}]}
    \ No newline at end of file
    diff --git a/localization/v2.4.x/site/fr/userGuide/search-query-get/with-iterators.md b/localization/v2.4.x/site/fr/userGuide/search-query-get/with-iterators.md
    index fdad69210..69effcb8a 100644
    --- a/localization/v2.4.x/site/fr/userGuide/search-query-get/with-iterators.md
    +++ b/localization/v2.4.x/site/fr/userGuide/search-query-get/with-iterators.md
    @@ -21,7 +21,7 @@ title: Avec les itérateurs
               d="M4 9h1v1H4c-1.5 0-3-1.69-3-3.5S2.55 3 4 3h4c1.45 0 3 1.69 3 3.5 0 1.41-.91 2.72-2 3.25V8.59c.58-.45 1-1.27 1-2.09C10 5.22 8.98 4 8 4H4c-.98 0-2 1.22-2 2.5S3 9 4 9zm9-3h-1v1h1c1 0 2 1.22 2 2.5S13.98 12 13 12H9c-.98 0-2-1.22-2-2.5 0-.83.42-1.64 1-2.09V6.25c-1.09.53-2 1.84-2 3.25C6 11.31 7.55 13 9 13h4c1.45 0 3-1.69 3-3.5S14.5 6 13 6z"
             >
           
    -    

    Milvus fournit des itérateurs de recherche et de requête pour l'itération des résultats avec un grand volume d'entités. Milvus limitant TopK à 16384, les utilisateurs peuvent utiliser les itérateurs pour renvoyer de grands nombres ou même des entités entières dans une collection en mode batch.

    +

    Milvus fournit des itérateurs de recherche et de requête pour parcourir un grand volume d'entités. Milvus limitant TopK à 16384, les utilisateurs peuvent utiliser les itérateurs pour renvoyer de grands nombres ou même des entités entières dans une collection en mode batch.

    Vue d'ensemble

    Les itérateurs sont des outils puissants qui vous aident à parcourir un grand volume de données ou toutes les données d'une collection à l'aide de valeurs de clé primaire et d'expressions booléennes. Cela peut améliorer de manière significative la façon dont vous récupérez les données. Contrairement à l'utilisation traditionnelle des paramètres de décalage et de limite, qui peuvent devenir moins efficaces avec le temps, les itérateurs offrent une solution plus évolutive.

    -

    Avantages de l'utilisation des itérateurs

      -
    • Simplicité: Élimination des paramètres complexes de décalage et de limite.

    • +

      Les itérateurs sont des outils efficaces pour analyser une collection entière ou parcourir un grand volume d'entités en spécifiant des valeurs de clé primaire ou une expression de filtre. Par rapport à un appel de recherche ou de requête avec des paramètres de décalage et de limite, l'utilisation d'itérateurs est plus efficace et plus évolutive.

      +

      Avantages de l'utilisation d'itérateurs

        +
      • Simplicité: Élimine les paramètres complexes de décalage et de limite.

      • Efficacité: Permet une extraction évolutive des données en ne récupérant que les données nécessaires.

      • -
      • Cohérence: Assure la cohérence de la taille des ensembles de données grâce aux filtres booléens.

      • +
      • Cohérence: Assure la cohérence de la taille de l'ensemble de données grâce à des filtres booléens.

      notes

      @@ -64,12 +64,12 @@ title: Avec les itérateurs d="M4 9h1v1H4c-1.5 0-3-1.69-3-3.5S2.55 3 4 3h4c1.45 0 3 1.69 3 3.5 0 1.41-.91 2.72-2 3.25V8.59c.58-.45 1-1.27 1-2.09C10 5.22 8.98 4 8 4H4c-.98 0-2 1.22-2 2.5S3 9 4 9zm9-3h-1v1h1c1 0 2 1.22 2 2.5S13.98 12 13 12H9c-.98 0-2-1.22-2-2.5 0-.83.42-1.64 1-2.09V6.25c-1.09.53-2 1.84-2 3.25C6 11.31 7.55 13 9 13h4c1.45 0 3-1.69 3-3.5S14.5 6 13 6z" > -

      Les étapes suivantes reprennent le code permettant de se connecter à Milvus, de configurer rapidement une collection et d'insérer plus de 10 000 entités générées de manière aléatoire dans la collection.

      +

      L'étape de préparation suivante se connecte à Milvus et insère des entités générées de manière aléatoire dans une collection.

      Étape 1 : Création d'une collection

      -

      Utilisez le code MilvusClient pour se connecter au serveur Milvus et create_collection() pour créer une collection.

      +

      Utiliser MilvusClient pour se connecter au serveur Milvus et create_collection() pour créer une collection.

      -

      Pour se connecter au serveur Milvus et créer une collection, il suffit d'utiliser MilvusClientV2 pour se connecter au serveur Milvus et createCollection() pour créer une collection.

      +

      Pour se connecter au serveur Milvus et créer une collection, il faut utiliser MilvusClientV2 pour se connecter au serveur Milvus et createCollection() pour créer une collection.

      @@ -266,8 +266,9 @@ iterator = collection.search_iterator( batch_size=10, param=search_params, output_fields=["color_tag"], - limit=3 + limit=300 ) +# search 300 entities totally with 10 entities per page results = [] @@ -575,7 +576,7 @@ R<QueryIterator> queryIteratRes = c withExpr - Définit l'expression à utiliser pour interroger les entités. Pour créer une condition de filtrage scalaire, reportez-vous à la section Règles des expressions booléennes. + Définit l'expression pour interroger les entités. Pour créer une condition de filtrage scalaire, reportez-vous à la section Règles des expressions booléennes. withBatchSize diff --git a/localization/v2.4.x/site/fr/userGuide/tools/cli_commands.json b/localization/v2.4.x/site/fr/userGuide/tools/cli_commands.json index 4c70870cd..d8142a301 100644 --- a/localization/v2.4.x/site/fr/userGuide/tools/cli_commands.json +++ b/localization/v2.4.x/site/fr/userGuide/tools/cli_commands.json @@ -1 +1 @@ -{"codeList":["clear\n","connect [-uri (text)] [-t (text)]\n","milvus_cli > connect -uri http://127.0.0.1:19530 \n","create database -db (text) \n","milvus_cli > create database -db testdb\n","use database -db (text) \n","milvus_cli > use database -db testdb\n","list databases \n","milvus_cli > list databases\n","delete database -db (text) \n","milvus_cli > delete database -db testdb\n","create user -u (text) -p (text)\n","milvus_cli > create user -u zilliz -p zilliz\n","create alias -c (text) -a (text) [-A] \n","milvus_cli > create alias -c car -a carAlias1\n","milvus_cli > create alias -c car2 -A -a carAlias1\n","create collection -c (text) -f (text) -p (text) [-a] [-d (text)]\n","## For array field: --schema-field support :::(:if Varchar)\n\nmilvus_cli > create collection -c car -f id:INT64:primary_field -f vector:FLOAT_VECTOR:128 -f color:INT64:color -f brand:ARRAY:64:VARCHAR:128 -p id -A -d 'car_collection'\n","create partition -c (text) -p (text) [-d (text)]\n","milvus_cli > create partition -c car -p new_partition -d test_add_partition\n","create index\n","milvus_cli > create index\n\nCollection name (car, car2): car2\n\nThe name of the field to create an index for (vector): vector\n\nIndex name: vectorIndex\n\n# Default is ''\nIndex type FLAT, IVF_FLAT, IVF_SQ8, IVF_PQ, RNSG, HNSW, ANNOY, AUTOINDEX, DISKANN, GPU_IVF_FLAT, GPU_IVF_PQ, SPARSE_INVERTED_INDEX, SPARSE_WAND, SCANN, STL_SORT, Trie, INVERTED, ) []: IVF_FLAT \n\n# Default is ''\nIndex metric type (L2, IP, HAMMING, TANIMOTO, COSINE, ) []: \n\nTimeout []:\n","delete user -u (text)\n","milvus_cli > delete user -u zilliz\n","delete alias -a (text) \n","delete collection -c (text) \n","milvus_cli > delete collection -c car\n","delete entities -c (text) -p (text) \n","milvus_cli > delete entities -c car\n\nThe expression to specify entities to be deleted, such as \"film_id in [ 0, 1 ]\": film_id in [ 0, 1 ]\n\nYou are trying to delete the entities of collection. This action cannot be undone!\n\nDo you want to continue? [y/N]: y\n","delete partition -c (text) -p (text)\n","milvus_cli > delete partition -c car -p new_partition\n","delete index -c (text) -in (text)\n","milvus_cli > delete index -c car -in indexName\n","show collection -c (text)\n","milvus_cli > show collection -c test_collection_insert\n","show partition -c (text) -p (text)\n","milvus_cli > show partition -c test_collection_insert -p _default\n","show index -c (text) -in (text)\n","milvus_cli > show index -c test_collection -in index_name\n","exit\n","help \n","import -c (text)[-p (text)] \n","milvus_cli > import -c car 'examples/import_csv/vectors.csv'\n\nReading csv file... [####################################] 100%\n\nColumn names are ['vector', 'color', 'brand']\n\nProcessed 50001 lines.\n\nInserting ...\n\nInsert successfully.\n-------------------------- ------------------\nTotal insert entities: 50000\nTotal collection entities: 150000\nMilvus timestamp: 428849214449254403\n-------------------------- ------------------\n","milvus_cli > import -c car 'https://raw.githubusercontent.com/milvus-\nio/milvus_cli/main/examples/import_csv/vectors.csv'\n\nReading file from remote URL.\n\nReading csv file... [####################################] 100%\n\nColumn names are ['vector', 'color', 'brand']\n\nProcessed 50001 lines.\n\nInserting ...\n\nInsert successfully.\n\n-------------------------- ------------------\nTotal insert entities: 50000\nTotal collection entities: 150000\nMilvus timestamp: 428849214449254403\n-------------------------- ------------------\n","list users\n","list collections\n","list indexes -c (text)\n","list partitions -c (text)\n","load -c (text) [-p (text)]\n","query\n","milvus_cli > query\n\nCollection name: car\n\nThe query expression: id in [ 428960801420883491, 428960801420883492,\n428960801420883493 ]\n\nName of partitions that contain entities(split by \",\" if multiple) []:\ndefault\n\nA list of fields to return(split by \",\" if multiple) []: color, brand\n\ntimeout []:\n\nGuarantee timestamp. This instructs Milvus to see all operations performed before a provided timestamp. If no such timestamp is provided, then Milvus will search all operations performed to date. [0]:\nGraceful time. Only used in bounded consistency level. If graceful_time is set, PyMilvus will use current timestamp minus the graceful_time as the guarantee_timestamp. This option is 5s by default if not set. [5]:\n","milvus_cli > query\n\nCollection name: car\n\nThe query expression: id > 428960801420883491\n\nName of partitions that contain entities(split by \",\" if multiple) []:\ndefault\n\nA list of fields to return(split by \",\" if multiple) []: id, color,\nbrand\n\ntimeout []:\n\nGuarantee timestamp. This instructs Milvus to see all operations performed before a provided timestamp. If no such timestamp is provided, then Milvus will search all operations performed to date. [0]:\nGraceful time. Only used in bounded consistency level. If graceful_time is set, PyMilvus will use current timestamp minus the graceful_time as the guarantee_timestamp. This option is 5s by default if not set. [5]:\n","release -c (text) [-p (text)]\n","search\n","milvus_cli > search\n\nCollection name (car, test_collection): car\n\nThe vectors of search data(the length of data is number of query (nq), the dim of every vector in data must be equal to vector field’s of collection. You can also import a csv file\nout headers): examples/import_csv/search_vectors.csv\n\nThe vector field used to search of collection (vector): vector\n\nSearch parameter nprobe's value: 10\n\nThe max number of returned record, also known as topk: 2\n\nThe boolean expression used to filter attribute []: id > 0\n\nThe names of partitions to search (split by \",\" if multiple) ['_default'] []: _default\n\ntimeout []:\n\nGuarantee Timestamp(It instructs Milvus to see all operations performed before a provided timestamp. If no such timestamp is provided, then Milvus will search all operations performed to date) [0]:\n\n","milvus_cli > search\n\nCollection name (car, test_collection): car\n\nThe vectors of search data(the length of data is number of query (nq), the dim of every vector in data must be equal to vector field’s of collection. You can also import a csv file without headers):\n [[0.71, 0.76, 0.17, 0.13, 0.42, 0.07, 0.15, 0.67, 0.58, 0.02, 0.39, 0.47, 0.58, 0.88, 0.73, 0.31, 0.23, 0.57, 0.33, 0.2, 0.03, 0.43, 0.78, 0.49, 0.17, 0.56, 0.76, 0.54, 0.45, 0.46, 0.05, 0.1, 0.43, 0.63, 0.29, 0.44, 0.65, 0.01, 0.35, 0.46, 0.66, 0.7, 0.88, 0.07, 0.49, 0.92, 0.57, 0.5, 0.16, 0.77, 0.98, 0.1, 0.44, 0.88, 0.82, 0.16, 0.67, 0.63, 0.57, 0.55, 0.95, 0.13, 0.64, 0.43, 0.71, 0.81, 0.43, 0.65, 0.76, 0.7, 0.05, 0.24, 0.03, 0.9, 0.46, 0.28, 0.92, 0.25, 0.97, 0.79, 0.73, 0.97, 0.49, 0.28, 0.64, 0.19, 0.23, 0.51, 0.09, 0.1, 0.53, 0.03, 0.23, 0.94, 0.87, 0.14, 0.42, 0.82, 0.91, 0.11, 0.91, 0.37, 0.26, 0.6, 0.89, 0.6, 0.32, 0.11, 0.98, 0.67, 0.12, 0.66, 0.47, 0.02, 0.15, 0.6, 0.64, 0.57, 0.14, 0.81, 0.75, 0.11, 0.49, 0.78, 0.16, 0.63, 0.57, 0.18]]\n\nThe vector field used to search of collection (vector): vector\n\nSearch parameter nprobe's value: 10\n\nThe specified number of decimal places of returned distance [-1]: 5\n\nThe max number of returned record, also known as topk: 2\n\nThe boolean expression used to filter attribute []: id > 0\n\nThe names of partitions to search (split by \",\" if multiple) ['_default'] []: _default\n\ntimeout []:\n\nGuarantee Timestamp(It instructs Milvus to see all operations performed before a provided timestamp. If no such timestamp is provided, then Milvus will search all operations performed to date) [0]:\n\n","milvus_cli > search\n\nCollection name (car, car2): car\n\nThe vectors of search data(the length of data is number of query (nq), the dim of every vector in data must be equal to vector field’s of collection. You can also import a csv file without headers): examples/import_csv/search_vectors.csv\n\nThe vector field used to search of collection (vector): vector\n\nThe specified number of decimal places of returned distance [-1]: 5\n\nThe max number of returned record, also known as topk: 2\n\nThe boolean expression used to filter attribute []:\n\nThe names of partitions to search (split by \",\" if multiple) ['_default'] []:\n\ntimeout []:\n\nGuarantee Timestamp(It instructs Milvus to see all operations performed before a provided timestamp. If no such timestamp is provided, then Milvus will search all operations performed to date) [0]:\n\n","list connections \n","show index_progress -c (text) [-i (text)]\n","show loading_progress -c (text) [-p (text)]\n","version\n","$ milvus_cli --version\nMilvus_CLI v0.4.0\n"],"headingContent":"","anchorList":[{"label":"Référence de la commande Milvus_CLI","href":"MilvusCLI-Command-Reference","type":1,"isActive":false},{"label":"clear (effacer)","href":"clear","type":2,"isActive":false},{"label":"connect","href":"connect","type":2,"isActive":false},{"label":"créer une base de données","href":"create-Database","type":2,"isActive":false},{"label":"utiliser la base de données","href":"use-Database","type":2,"isActive":false},{"label":"Lister les bases de données","href":"List-Databases","type":2,"isActive":false},{"label":"Supprimer une base de données","href":"Delete-Database","type":2,"isActive":false},{"label":"créer un utilisateur","href":"create-user","type":2,"isActive":false},{"label":"create alias","href":"create-alias","type":2,"isActive":false},{"label":"create collection","href":"create-collection","type":2,"isActive":false},{"label":"create partition","href":"create-partition","type":2,"isActive":false},{"label":"create index","href":"create-index","type":2,"isActive":false},{"label":"delete user","href":"delete-user","type":2,"isActive":false},{"label":"delete alias","href":"delete-alias","type":2,"isActive":false},{"label":"delete collection","href":"delete-collection","type":2,"isActive":false},{"label":"delete entités","href":"delete-entities","type":2,"isActive":false},{"label":"delete partition","href":"delete-partition","type":2,"isActive":false},{"label":"delete index","href":"delete-index","type":2,"isActive":false},{"label":"show collection","href":"show-collection","type":2,"isActive":false},{"label":"show partition","href":"show-partition","type":2,"isActive":false},{"label":"show index","href":"show-index","type":2,"isActive":false},{"label":"exit","href":"exit","type":2,"isActive":false},{"label":"help","href":"help","type":2,"isActive":false},{"label":"import","href":"import","type":2,"isActive":false},{"label":"list users","href":"list-users","type":2,"isActive":false},{"label":"list collections","href":"list-collections","type":2,"isActive":false},{"label":"list indexes","href":"list-indexes","type":2,"isActive":false},{"label":"list partitions","href":"list-partitions","type":2,"isActive":false},{"label":"load","href":"load","type":2,"isActive":false},{"label":"query","href":"query","type":2,"isActive":false},{"label":"libérer","href":"release","type":2,"isActive":false},{"label":"search","href":"search","type":2,"isActive":false},{"label":"Lister les connexions","href":"List-connection","type":2,"isActive":false},{"label":"show index_progress","href":"show-indexprogress","type":2,"isActive":false},{"label":"show loading_progress","href":"show-loadingprogress","type":2,"isActive":false},{"label":"version","href":"version","type":2,"isActive":false}]} \ No newline at end of file +{"codeList":["clear\n","connect [-uri (text)] [-t (text)]\n","milvus_cli > connect -uri http://127.0.0.1:19530 \n","create database -db (text) \n","milvus_cli > create database -db testdb\n","use database -db (text) \n","milvus_cli > use database -db testdb\n","list databases \n","milvus_cli > list databases\n","delete database -db (text) \n","milvus_cli > delete database -db testdb\n","create user -u (text) -p (text)\n","milvus_cli > create user -u zilliz -p zilliz\n","create alias -c (text) -a (text) [-A] \n","milvus_cli > create alias -c car -a carAlias1\n","milvus_cli > create alias -c car2 -A -a carAlias1\n","create collection -c (text) -f (text) -p (text) [-a] [-d (text)]\n","## For array field: --schema-field support :::(:if Varchar)\n\nmilvus_cli > create collection -c car -f id:INT64:primary_field -f vector:FLOAT_VECTOR:128 -f color:INT64:color -f brand:ARRAY:64:VARCHAR:128 -p id -A -d 'car_collection'\n","create partition -c (text) -p (text) [-d (text)]\n","milvus_cli > create partition -c car -p new_partition -d test_add_partition\n","create index\n","milvus_cli > create index\n\nCollection name (car, car2): car2\n\nThe name of the field to create an index for (vector): vector\n\nIndex name: vectorIndex\n\n# Default is ''\nIndex type FLAT, IVF_FLAT, IVF_SQ8, IVF_PQ, RNSG, HNSW, ANNOY, AUTOINDEX, DISKANN, GPU_IVF_FLAT, GPU_IVF_PQ, SPARSE_INVERTED_INDEX, SPARSE_WAND, SCANN, STL_SORT, Trie, INVERTED, ) []: IVF_FLAT \n\n# Default is ''\nIndex metric type (L2, IP, HAMMING, TANIMOTO, COSINE, ) []: \n\nTimeout []:\n","delete user -u (text)\n","milvus_cli > delete user -u zilliz\n","delete alias -a (text) \n","delete collection -c (text) \n","milvus_cli > delete collection -c car\n","delete entities -c (text) -p (text) \n","milvus_cli > delete entities -c car\n\nThe expression to specify entities to be deleted, such as \"film_id in [ 0, 1 ]\": film_id in [ 0, 1 ]\n\nYou are trying to delete the entities of collection. This action cannot be undone!\n\nDo you want to continue? [y/N]: y\n","delete partition -c (text) -p (text)\n","milvus_cli > delete partition -c car -p new_partition\n","delete index -c (text) -in (text)\n","milvus_cli > delete index -c car -in indexName\n","show collection -c (text)\n","milvus_cli > show collection -c test_collection_insert\n","show partition -c (text) -p (text)\n","milvus_cli > show partition -c test_collection_insert -p _default\n","show index -c (text) -in (text)\n","milvus_cli > show index -c test_collection -in index_name\n","exit\n","help \n","import -c (text)[-p (text)] \n","milvus_cli > import -c car 'examples/import_csv/vectors.csv'\n\nReading csv file... [####################################] 100%\n\nColumn names are ['vector', 'color', 'brand']\n\nProcessed 50001 lines.\n\nInserting ...\n\nInsert successfully.\n-------------------------- ------------------\nTotal insert entities: 50000\nTotal collection entities: 150000\nMilvus timestamp: 428849214449254403\n-------------------------- ------------------\n","milvus_cli > import -c car 'https://raw.githubusercontent.com/milvus-\nio/milvus_cli/main/examples/import_csv/vectors.csv'\n\nReading file from remote URL.\n\nReading csv file... [####################################] 100%\n\nColumn names are ['vector', 'color', 'brand']\n\nProcessed 50001 lines.\n\nInserting ...\n\nInsert successfully.\n\n-------------------------- ------------------\nTotal insert entities: 50000\nTotal collection entities: 150000\nMilvus timestamp: 428849214449254403\n-------------------------- ------------------\n","list users\n","list collections\n","list indexes -c (text)\n","list partitions -c (text)\n","load -c (text) [-p (text)]\n","query\n","milvus_cli > query\n\nCollection name: car\n\nThe query expression: id in [ 428960801420883491, 428960801420883492,\n428960801420883493 ]\n\nName of partitions that contain entities(split by \",\" if multiple) []:\ndefault\n\nA list of fields to return(split by \",\" if multiple) []: color, brand\n\ntimeout []:\n\nGuarantee timestamp. This instructs Milvus to see all operations performed before a provided timestamp. If no such timestamp is provided, then Milvus will search all operations performed to date. [0]:\nGraceful time. Only used in bounded consistency level. If graceful_time is set, PyMilvus will use current timestamp minus the graceful_time as the guarantee_timestamp. This option is 5s by default if not set. [5]:\n","milvus_cli > query\n\nCollection name: car\n\nThe query expression: id > 428960801420883491\n\nName of partitions that contain entities(split by \",\" if multiple) []:\ndefault\n\nA list of fields to return(split by \",\" if multiple) []: id, color,\nbrand\n\ntimeout []:\n\nGuarantee timestamp. This instructs Milvus to see all operations performed before a provided timestamp. If no such timestamp is provided, then Milvus will search all operations performed to date. [0]:\nGraceful time. Only used in bounded consistency level. If graceful_time is set, PyMilvus will use current timestamp minus the graceful_time as the guarantee_timestamp. This option is 5s by default if not set. [5]:\n","release -c (text) [-p (text)]\n","search\n","milvus_cli > search\n\nCollection name (car, test_collection): car\n\nThe vectors of search data(the length of data is number of query (nq), the dim of every vector in data must be equal to vector field’s of collection. You can also import a csv file\nout headers): examples/import_csv/search_vectors.csv\n\nThe vector field used to search of collection (vector): vector\n\nSearch parameter nprobe's value: 10\n\nThe max number of returned record, also known as topk: 2\n\nThe boolean expression used to filter attribute []: id > 0\n\nThe names of partitions to search (split by \",\" if multiple) ['_default'] []: _default\n\ntimeout []:\n\nGuarantee Timestamp(It instructs Milvus to see all operations performed before a provided timestamp. If no such timestamp is provided, then Milvus will search all operations performed to date) [0]:\n\n","milvus_cli > search\n\nCollection name (car, test_collection): car\n\nThe vectors of search data(the length of data is number of query (nq), the dim of every vector in data must be equal to vector field’s of collection. You can also import a csv file without headers):\n [[0.71, 0.76, 0.17, 0.13, 0.42, 0.07, 0.15, 0.67, 0.58, 0.02, 0.39, 0.47, 0.58, 0.88, 0.73, 0.31, 0.23, 0.57, 0.33, 0.2, 0.03, 0.43, 0.78, 0.49, 0.17, 0.56, 0.76, 0.54, 0.45, 0.46, 0.05, 0.1, 0.43, 0.63, 0.29, 0.44, 0.65, 0.01, 0.35, 0.46, 0.66, 0.7, 0.88, 0.07, 0.49, 0.92, 0.57, 0.5, 0.16, 0.77, 0.98, 0.1, 0.44, 0.88, 0.82, 0.16, 0.67, 0.63, 0.57, 0.55, 0.95, 0.13, 0.64, 0.43, 0.71, 0.81, 0.43, 0.65, 0.76, 0.7, 0.05, 0.24, 0.03, 0.9, 0.46, 0.28, 0.92, 0.25, 0.97, 0.79, 0.73, 0.97, 0.49, 0.28, 0.64, 0.19, 0.23, 0.51, 0.09, 0.1, 0.53, 0.03, 0.23, 0.94, 0.87, 0.14, 0.42, 0.82, 0.91, 0.11, 0.91, 0.37, 0.26, 0.6, 0.89, 0.6, 0.32, 0.11, 0.98, 0.67, 0.12, 0.66, 0.47, 0.02, 0.15, 0.6, 0.64, 0.57, 0.14, 0.81, 0.75, 0.11, 0.49, 0.78, 0.16, 0.63, 0.57, 0.18]]\n\nThe vector field used to search of collection (vector): vector\n\nSearch parameter nprobe's value: 10\n\nThe specified number of decimal places of returned distance [-1]: 5\n\nThe max number of returned record, also known as topk: 2\n\nThe boolean expression used to filter attribute []: id > 0\n\nThe names of partitions to search (split by \",\" if multiple) ['_default'] []: _default\n\ntimeout []:\n\nGuarantee Timestamp(It instructs Milvus to see all operations performed before a provided timestamp. If no such timestamp is provided, then Milvus will search all operations performed to date) [0]:\n\n","milvus_cli > search\n\nCollection name (car, car2): car\n\nThe vectors of search data(the length of data is number of query (nq), the dim of every vector in data must be equal to vector field’s of collection. You can also import a csv file without headers): examples/import_csv/search_vectors.csv\n\nThe vector field used to search of collection (vector): vector\n\nThe specified number of decimal places of returned distance [-1]: 5\n\nThe max number of returned record, also known as topk: 2\n\nThe boolean expression used to filter attribute []:\n\nThe names of partitions to search (split by \",\" if multiple) ['_default'] []:\n\ntimeout []:\n\nGuarantee Timestamp(It instructs Milvus to see all operations performed before a provided timestamp. If no such timestamp is provided, then Milvus will search all operations performed to date) [0]:\n\n","list connections \n","show index_progress -c (text) [-i (text)]\n","show loading_progress -c (text) [-p (text)]\n","version\n","$ milvus_cli --version\nMilvus_CLI v0.4.0\n"],"headingContent":"Milvus_CLI Command Reference","anchorList":[{"label":"Référence de la commande Milvus_CLI","href":"MilvusCLI-Command-Reference","type":1,"isActive":false},{"label":"clear (effacer)","href":"clear","type":2,"isActive":false},{"label":"connect","href":"connect","type":2,"isActive":false},{"label":"créer une base de données","href":"create-Database","type":2,"isActive":false},{"label":"utiliser la base de données","href":"use-Database","type":2,"isActive":false},{"label":"lister les bases de données","href":"list-Databases","type":2,"isActive":false},{"label":"supprimer la base de données","href":"delete-Database","type":2,"isActive":false},{"label":"créer un utilisateur","href":"create-user","type":2,"isActive":false},{"label":"create alias","href":"create-alias","type":2,"isActive":false},{"label":"create collection","href":"create-collection","type":2,"isActive":false},{"label":"create partition","href":"create-partition","type":2,"isActive":false},{"label":"create index","href":"create-index","type":2,"isActive":false},{"label":"delete user","href":"delete-user","type":2,"isActive":false},{"label":"delete alias","href":"delete-alias","type":2,"isActive":false},{"label":"delete collection","href":"delete-collection","type":2,"isActive":false},{"label":"delete entités","href":"delete-entities","type":2,"isActive":false},{"label":"delete partition","href":"delete-partition","type":2,"isActive":false},{"label":"delete index","href":"delete-index","type":2,"isActive":false},{"label":"show collection","href":"show-collection","type":2,"isActive":false},{"label":"show partition","href":"show-partition","type":2,"isActive":false},{"label":"show index","href":"show-index","type":2,"isActive":false},{"label":"exit","href":"exit","type":2,"isActive":false},{"label":"help","href":"help","type":2,"isActive":false},{"label":"import","href":"import","type":2,"isActive":false},{"label":"list users","href":"list-users","type":2,"isActive":false},{"label":"list collections","href":"list-collections","type":2,"isActive":false},{"label":"list indexes","href":"list-indexes","type":2,"isActive":false},{"label":"list partitions","href":"list-partitions","type":2,"isActive":false},{"label":"load","href":"load","type":2,"isActive":false},{"label":"query","href":"query","type":2,"isActive":false},{"label":"libérer","href":"release","type":2,"isActive":false},{"label":"search","href":"search","type":2,"isActive":false},{"label":"liste des connexions","href":"list-connection","type":2,"isActive":false},{"label":"show index_progress","href":"show-indexprogress","type":2,"isActive":false},{"label":"show loading_progress","href":"show-loadingprogress","type":2,"isActive":false},{"label":"version","href":"version","type":2,"isActive":false}]} \ No newline at end of file diff --git a/localization/v2.4.x/site/fr/userGuide/tools/cli_commands.md b/localization/v2.4.x/site/fr/userGuide/tools/cli_commands.md index 1d8cf6ab2..334a58baa 100644 --- a/localization/v2.4.x/site/fr/userGuide/tools/cli_commands.md +++ b/localization/v2.4.x/site/fr/userGuide/tools/cli_commands.md @@ -1,7 +1,7 @@ --- id: cli_commands.md summary: Interagir avec Milvus à l'aide de commandes. -title: Référence des commandes Milvus_CLI +title: Référence de la commande Milvus_CLI ---

      Référence de la commande Milvus_CLI

    -

    Lister les bases de données

    Lister les bases de données dans Milvus

    +

    Liste des bases de données dans Milvus

    Syntaxe

    list databases 
     
    -

    Exemples de base de données

    Exemple 1

    L'exemple suivant répertorie les bases de données de Milvus.

    +

    Exemples de base de données

    Exemple 1

    L'exemple suivant répertorie les bases de données dans Milvus.

    milvus_cli > list databases
     
    -

    Supprimer une base de données

    -

    Lister les connexions

    Répertorier les connexions.

    +

    Répertorie les connexions.

    Syntaxe

    list connections 
     
    @@ -1238,7 +1238,7 @@ Guarantee Timestamp(It instructs Milvus to see all -

    Affiche la progression du chargement de l'entité.

    +

    Affiche la progression du chargement d'une collection.

    Syntaxe

    show loading_progress -c (text) [-p (text)]
     
    diff --git a/localization/v2.4.x/site/fr/userGuide/tools/milvus_backup_overview.json b/localization/v2.4.x/site/fr/userGuide/tools/milvus_backup_overview.json index 223bf3daa..e0b3c4632 100644 --- a/localization/v2.4.x/site/fr/userGuide/tools/milvus_backup_overview.json +++ b/localization/v2.4.x/site/fr/userGuide/tools/milvus_backup_overview.json @@ -1 +1 @@ -{"codeList":[],"headingContent":"","anchorList":[{"label":"Sauvegarde Milvus","href":"Milvus-Backup","type":1,"isActive":false},{"label":"Conditions préalables","href":"Prerequisites","type":2,"isActive":false},{"label":"Architecture de Milvus Backup","href":"Architecture","type":2,"isActive":false},{"label":"Dernière version","href":"Latest-release","type":2,"isActive":false}]} \ No newline at end of file +{"codeList":[],"headingContent":"Milvus Backup","anchorList":[{"label":"Sauvegarde Milvus","href":"Milvus-Backup","type":1,"isActive":false},{"label":"Conditions préalables","href":"Prerequisites","type":2,"isActive":false},{"label":"Architecture de Milvus Backup","href":"Architecture","type":2,"isActive":false},{"label":"Dernière version","href":"Latest-release","type":2,"isActive":false}]} \ No newline at end of file diff --git a/localization/v2.4.x/site/fr/userGuide/tools/milvus_backup_overview.md b/localization/v2.4.x/site/fr/userGuide/tools/milvus_backup_overview.md index 8cb62ba23..670fbc4b5 100644 --- a/localization/v2.4.x/site/fr/userGuide/tools/milvus_backup_overview.md +++ b/localization/v2.4.x/site/fr/userGuide/tools/milvus_backup_overview.md @@ -79,5 +79,5 @@ title: Sauvegarde Milvus > diff --git a/localization/v2.4.x/site/fr/userGuide/use-partition-key.json b/localization/v2.4.x/site/fr/userGuide/use-partition-key.json index e7630a3f7..dc51dd883 100644 --- a/localization/v2.4.x/site/fr/userGuide/use-partition-key.json +++ b/localization/v2.4.x/site/fr/userGuide/use-partition-key.json @@ -1 +1 @@ -{"codeList":["import random, time\nfrom pymilvus import connections, MilvusClient, DataType\n\nSERVER_ADDR = \"http://localhost:19530\"\n\n# 1. Set up a Milvus client\nclient = MilvusClient(\n uri=SERVER_ADDR\n)\n\n# 2. Create a collection\nschema = MilvusClient.create_schema(\n auto_id=False,\n enable_dynamic_field=True,\n partition_key_field=\"color\",\n num_partitions=16 # Number of partitions. Defaults to 16.\n)\n\nschema.add_field(field_name=\"id\", datatype=DataType.INT64, is_primary=True)\nschema.add_field(field_name=\"vector\", datatype=DataType.FLOAT_VECTOR, dim=5)\nschema.add_field(field_name=\"color\", datatype=DataType.VARCHAR, max_length=512)\n","import io.milvus.v2.client.ConnectConfig;\nimport io.milvus.v2.client.MilvusClientV2;\nimport io.milvus.v2.common.DataType;\nimport io.milvus.v2.common.IndexParam;\nimport io.milvus.v2.service.collection.request.AddFieldReq;\nimport io.milvus.v2.service.collection.request.CreateCollectionReq;\n\nString CLUSTER_ENDPOINT = \"http://localhost:19530\";\n\n// 1. Connect to Milvus server\nConnectConfig connectConfig = ConnectConfig.builder()\n .uri(CLUSTER_ENDPOINT)\n .build();\n\nMilvusClientV2 client = new MilvusClientV2(connectConfig);\n\n// 2. Create a collection in customized setup mode\n\n// 2.1 Create schema\nCreateCollectionReq.CollectionSchema schema = client.createSchema();\n\n// 2.2 Add fields to schema\nschema.addField(AddFieldReq.builder()\n .fieldName(\"id\")\n .dataType(DataType.Int64)\n .isPrimaryKey(true)\n .autoID(false)\n .build());\n\nschema.addField(AddFieldReq.builder()\n .fieldName(\"vector\")\n .dataType(DataType.FloatVector)\n .dimension(5)\n .build());\n \nschema.addField(AddFieldReq.builder()\n .fieldName(\"color\")\n .dataType(DataType.VarChar)\n .maxLength(512)\n .isPartitionKey(true)\n .build());\n","const { MilvusClient, DataType, sleep } = require(\"@zilliz/milvus2-sdk-node\")\n\nconst address = \"http://localhost:19530\"\n\nasync function main() {\n// 1. Set up a Milvus Client\nclient = new MilvusClient({address}); \n\n// 2. Create a collection\n// 2.1 Define fields\nconst fields = [\n {\n name: \"id\",\n data_type: DataType.Int64,\n is_primary_key: true,\n auto_id: false\n },\n {\n name: \"vector\",\n data_type: DataType.FloatVector,\n dim: 5\n },\n {\n name: \"color\",\n data_type: DataType.VarChar,\n max_length: 512,\n is_partition_key: true\n }\n]\n","index_params = MilvusClient.prepare_index_params()\n\nindex_params.add_index(\n field_name=\"id\",\n index_type=\"STL_SORT\"\n)\n\nindex_params.add_index(\n field_name=\"color\",\n index_type=\"Trie\"\n)\n\nindex_params.add_index(\n field_name=\"vector\",\n index_type=\"IVF_FLAT\",\n metric_type=\"L2\",\n params={\"nlist\": 1024}\n)\n","// 2.3 Prepare index parameters\nIndexParam indexParamForVectorField = IndexParam.builder()\n .fieldName(\"vector\")\n .indexType(IndexParam.IndexType.IVF_FLAT)\n .metricType(IndexParam.MetricType.IP)\n .extraParams(Map.of(\"nlist\", 1024))\n .build();\n\nList indexParams = new ArrayList<>();\nindexParams.add(indexParamForVectorField);\n","// 2.2 Prepare index parameters\nconst index_params = [{\n field_name: \"color\",\n index_type: \"Trie\"\n},{\n field_name: \"id\",\n index_type: \"STL_SORT\"\n},{\n field_name: \"vector\",\n index_type: \"IVF_FLAT\",\n metric_type: \"IP\",\n params: { nlist: 1024}\n}]\n","client.create_collection(\n collection_name=\"test_collection\",\n schema=schema,\n index_params=index_params\n)\n","// 2.4 Create a collection with schema and index parameters\nCreateCollectionReq customizedSetupReq = CreateCollectionReq.builder()\n .collectionName(\"test_collection\")\n .collectionSchema(schema)\n .indexParams(indexParams) \n .build();\n\nclient.createCollection(customizedSetupReq);\n","// 2.3 Create a collection with fields and index parameters\nres = await client.createCollection({\n collection_name: \"test_collection\",\n fields: fields, \n index_params: index_params,\n})\n\nconsole.log(res.error_code)\n\n// Output\n// \n// Success\n//\n","# 3. Insert randomly generated vectors \ncolors = [\"green\", \"blue\", \"yellow\", \"red\", \"black\", \"white\", \"purple\", \"pink\", \"orange\", \"brown\", \"grey\"]\ndata = []\n\nfor i in range(1000):\n current_color = random.choice(colors)\n current_tag = random.randint(1000, 9999)\n data.append({\n \"id\": i,\n \"vector\": [ random.uniform(-1, 1) for _ in range(5) ],\n \"color\": current_color,\n \"tag\": current_tag,\n \"color_tag\": f\"{current_color}_{str(current_tag)}\"\n })\n\nprint(data[0])\n","// 3. Insert randomly generated vectors\nList colors = Arrays.asList(\"green\", \"blue\", \"yellow\", \"red\", \"black\", \"white\", \"purple\", \"pink\", \"orange\", \"brown\", \"grey\");\nList data = new ArrayList<>();\n\nfor (int i=0; i<1000; i++) {\n Random rand = new Random();\n String current_color = colors.get(rand.nextInt(colors.size()-1));\n int current_tag = rand.nextInt(8999) + 1000;\n JSONObject row = new JSONObject();\n row.put(\"id\", Long.valueOf(i));\n row.put(\"vector\", Arrays.asList(rand.nextFloat(), rand.nextFloat(), rand.nextFloat(), rand.nextFloat(), rand.nextFloat()));\n row.put(\"color\", current_color);\n row.put(\"tag\", current_tag);\n row.put(\"color_tag\", current_color + \"_\" + String.valueOf(rand.nextInt(8999) + 1000));\n data.add(row);\n}\n\nSystem.out.println(JSONObject.toJSON(data.get(0))); \n","// 3. Insert randomly generated vectors \nconst colors = [\"green\", \"blue\", \"yellow\", \"red\", \"black\", \"white\", \"purple\", \"pink\", \"orange\", \"brown\", \"grey\"]\nvar data = []\n\nfor (let i = 0; i < 1000; i++) {\n const current_color = colors[Math.floor(Math.random() * colors.length)]\n const current_tag = Math.floor(Math.random() * 8999 + 1000)\n data.push({\n id: i,\n vector: [Math.random(), Math.random(), Math.random(), Math.random(), Math.random()],\n color: current_color,\n tag: current_tag,\n color_tag: `${current_color}_${current_tag}`\n })\n}\n\nconsole.log(data[0])\n","{\n id: 0,\n vector: [\n 0.1275656405044483,\n 0.47417858592773277,\n 0.13858264437643286,\n 0.2390904907020377,\n 0.8447862593689635\n ],\n color: 'blue',\n tag: 2064,\n color_tag: 'blue_2064'\n}\n","res = client.insert(\n collection_name=\"test_collection\",\n data=data\n)\n\nprint(res)\n\n# Output\n#\n# {\n# \"insert_count\": 1000,\n# \"ids\": [\n# 0,\n# 1,\n# 2,\n# 3,\n# 4,\n# 5,\n# 6,\n# 7,\n# 8,\n# 9,\n# \"(990 more items hidden)\"\n# ]\n# }\n","// 3.1 Insert data into the collection\nInsertReq insertReq = InsertReq.builder()\n .collectionName(\"test_collection\")\n .data(data)\n .build();\n\nInsertResp insertResp = client.insert(insertReq);\n\nSystem.out.println(JSONObject.toJSON(insertResp));\n\n// Output:\n// {\"insertCnt\": 1000}\n","res = await client.insert({\n collection_name: \"test_collection\",\n data: data,\n})\n\nconsole.log(res.insert_cnt)\n\n// Output\n// \n// 1000\n// \n","# 4. Search with partition key\nquery_vectors = [[0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592]]\n\nres = client.search(\n collection_name=\"test_collection\",\n data=query_vectors,\n filter=\"color == 'green'\",\n search_params={\"metric_type\": \"L2\", \"params\": {\"nprobe\": 10}},\n output_fields=[\"id\", \"color_tag\"],\n limit=3\n)\n\nprint(res)\n\n# Output\n#\n# [\n# [\n# {\n# \"id\": 970,\n# \"distance\": 0.5770174264907837,\n# \"entity\": {\n# \"id\": 970,\n# \"color_tag\": \"green_9828\"\n# }\n# },\n# {\n# \"id\": 115,\n# \"distance\": 0.6898155808448792,\n# \"entity\": {\n# \"id\": 115,\n# \"color_tag\": \"green_4073\"\n# }\n# },\n# {\n# \"id\": 899,\n# \"distance\": 0.7028976678848267,\n# \"entity\": {\n# \"id\": 899,\n# \"color_tag\": \"green_9897\"\n# }\n# }\n# ]\n# ]\n","// 4. Search with partition key\nList> query_vectors = Arrays.asList(Arrays.asList(0.3580376395471989f, -0.6023495712049978f, 0.18414012509913835f, -0.26286205330961354f, 0.9029438446296592f));\n\nSearchReq searchReq = SearchReq.builder()\n .collectionName(\"test_collection\")\n .data(query_vectors)\n .filter(\"color == \\\"green\\\"\")\n .topK(3)\n .build();\n\nSearchResp searchResp = client.search(searchReq);\n\nSystem.out.println(JSONObject.toJSON(searchResp)); \n\n// Output:\n// {\"searchResults\": [[\n// {\n// \"distance\": 1.0586997,\n// \"id\": 414,\n// \"entity\": {}\n// },\n// {\n// \"distance\": 0.981384,\n// \"id\": 293,\n// \"entity\": {}\n// },\n// {\n// \"distance\": 0.9548756,\n// \"id\": 325,\n// \"entity\": {}\n// }\n// ]]}\n","// 4. Search with partition key\nconst query_vectors = [0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592]\n\nres = await client.search({\n collection_name: \"test_collection\",\n data: query_vectors,\n filter: \"color == 'green'\",\n output_fields: [\"color_tag\"],\n limit: 3\n})\n\nconsole.log(res.results)\n\n// Output\n// \n// [\n// { score: 2.402090549468994, id: '135', color_tag: 'green_2694' },\n// { score: 2.3938629627227783, id: '326', color_tag: 'green_7104' },\n// { score: 2.3235254287719727, id: '801', color_tag: 'green_3162' }\n// ]\n// \n"],"headingContent":"","anchorList":[{"label":"Use Partition Key","href":"Use-Partition-Key","type":1,"isActive":false},{"label":"Overview","href":"Overview","type":2,"isActive":false},{"label":"Enable partition key","href":"Enable-partition-key","type":2,"isActive":false},{"label":"List partitions","href":"List-partitions","type":2,"isActive":false},{"label":"Insert data","href":"Insert-data","type":2,"isActive":false},{"label":"Use partition key","href":"Use-partition-key","type":2,"isActive":false},{"label":"Typical use cases","href":"Typical-use-cases","type":2,"isActive":false}]} \ No newline at end of file +{"codeList":["import random, time\nfrom pymilvus import connections, MilvusClient, DataType\n\nSERVER_ADDR = \"http://localhost:19530\"\n\n# 1. Set up a Milvus client\nclient = MilvusClient(\n uri=SERVER_ADDR\n)\n\n# 2. Create a collection\nschema = MilvusClient.create_schema(\n auto_id=False,\n enable_dynamic_field=True,\n partition_key_field=\"color\",\n num_partitions=64 # Number of partitions. Defaults to 64.\n)\n\nschema.add_field(field_name=\"id\", datatype=DataType.INT64, is_primary=True)\nschema.add_field(field_name=\"vector\", datatype=DataType.FLOAT_VECTOR, dim=5)\nschema.add_field(field_name=\"color\", datatype=DataType.VARCHAR, max_length=512)\n","import io.milvus.v2.client.ConnectConfig;\nimport io.milvus.v2.client.MilvusClientV2;\nimport io.milvus.v2.common.DataType;\nimport io.milvus.v2.common.IndexParam;\nimport io.milvus.v2.service.collection.request.AddFieldReq;\nimport io.milvus.v2.service.collection.request.CreateCollectionReq;\n\nString CLUSTER_ENDPOINT = \"http://localhost:19530\";\n\n// 1. Connect to Milvus server\nConnectConfig connectConfig = ConnectConfig.builder()\n .uri(CLUSTER_ENDPOINT)\n .build();\n\nMilvusClientV2 client = new MilvusClientV2(connectConfig);\n\n// 2. Create a collection in customized setup mode\n\n// 2.1 Create schema\nCreateCollectionReq.CollectionSchema schema = client.createSchema();\n\n// 2.2 Add fields to schema\nschema.addField(AddFieldReq.builder()\n .fieldName(\"id\")\n .dataType(DataType.Int64)\n .isPrimaryKey(true)\n .autoID(false)\n .build());\n\nschema.addField(AddFieldReq.builder()\n .fieldName(\"vector\")\n .dataType(DataType.FloatVector)\n .dimension(5)\n .build());\n \nschema.addField(AddFieldReq.builder()\n .fieldName(\"color\")\n .dataType(DataType.VarChar)\n .maxLength(512)\n .isPartitionKey(true)\n .build());\n","const { MilvusClient, DataType, sleep } = require(\"@zilliz/milvus2-sdk-node\")\n\nconst address = \"http://localhost:19530\"\n\nasync function main() {\n// 1. Set up a Milvus Client\nclient = new MilvusClient({address}); \n\n// 2. Create a collection\n// 2.1 Define fields\nconst fields = [\n {\n name: \"id\",\n data_type: DataType.Int64,\n is_primary_key: true,\n auto_id: false\n },\n {\n name: \"vector\",\n data_type: DataType.FloatVector,\n dim: 5\n },\n {\n name: \"color\",\n data_type: DataType.VarChar,\n max_length: 512,\n is_partition_key: true\n }\n]\n","index_params = MilvusClient.prepare_index_params()\n\nindex_params.add_index(\n field_name=\"id\",\n index_type=\"STL_SORT\"\n)\n\nindex_params.add_index(\n field_name=\"color\",\n index_type=\"Trie\"\n)\n\nindex_params.add_index(\n field_name=\"vector\",\n index_type=\"IVF_FLAT\",\n metric_type=\"L2\",\n params={\"nlist\": 1024}\n)\n","// 2.3 Prepare index parameters\nIndexParam indexParamForVectorField = IndexParam.builder()\n .fieldName(\"vector\")\n .indexType(IndexParam.IndexType.IVF_FLAT)\n .metricType(IndexParam.MetricType.IP)\n .extraParams(Map.of(\"nlist\", 1024))\n .build();\n\nList indexParams = new ArrayList<>();\nindexParams.add(indexParamForVectorField);\n","// 2.2 Prepare index parameters\nconst index_params = [{\n field_name: \"color\",\n index_type: \"Trie\"\n},{\n field_name: \"id\",\n index_type: \"STL_SORT\"\n},{\n field_name: \"vector\",\n index_type: \"IVF_FLAT\",\n metric_type: \"IP\",\n params: { nlist: 1024}\n}]\n","client.create_collection(\n collection_name=\"test_collection\",\n schema=schema,\n index_params=index_params\n)\n","// 2.4 Create a collection with schema and index parameters\nCreateCollectionReq customizedSetupReq = CreateCollectionReq.builder()\n .collectionName(\"test_collection\")\n .collectionSchema(schema)\n .indexParams(indexParams) \n .build();\n\nclient.createCollection(customizedSetupReq);\n","// 2.3 Create a collection with fields and index parameters\nres = await client.createCollection({\n collection_name: \"test_collection\",\n fields: fields, \n index_params: index_params,\n})\n\nconsole.log(res.error_code)\n\n// Output\n// \n// Success\n//\n","# 3. Insert randomly generated vectors \ncolors = [\"green\", \"blue\", \"yellow\", \"red\", \"black\", \"white\", \"purple\", \"pink\", \"orange\", \"brown\", \"grey\"]\ndata = []\n\nfor i in range(1000):\n current_color = random.choice(colors)\n current_tag = random.randint(1000, 9999)\n data.append({\n \"id\": i,\n \"vector\": [ random.uniform(-1, 1) for _ in range(5) ],\n \"color\": current_color,\n \"tag\": current_tag,\n \"color_tag\": f\"{current_color}_{str(current_tag)}\"\n })\n\nprint(data[0])\n","// 3. Insert randomly generated vectors\nList colors = Arrays.asList(\"green\", \"blue\", \"yellow\", \"red\", \"black\", \"white\", \"purple\", \"pink\", \"orange\", \"brown\", \"grey\");\nList data = new ArrayList<>();\n\nfor (int i=0; i<1000; i++) {\n Random rand = new Random();\n String current_color = colors.get(rand.nextInt(colors.size()-1));\n int current_tag = rand.nextInt(8999) + 1000;\n JSONObject row = new JSONObject();\n row.put(\"id\", Long.valueOf(i));\n row.put(\"vector\", Arrays.asList(rand.nextFloat(), rand.nextFloat(), rand.nextFloat(), rand.nextFloat(), rand.nextFloat()));\n row.put(\"color\", current_color);\n row.put(\"tag\", current_tag);\n row.put(\"color_tag\", current_color + \"_\" + String.valueOf(rand.nextInt(8999) + 1000));\n data.add(row);\n}\n\nSystem.out.println(JSONObject.toJSON(data.get(0))); \n","// 3. Insert randomly generated vectors \nconst colors = [\"green\", \"blue\", \"yellow\", \"red\", \"black\", \"white\", \"purple\", \"pink\", \"orange\", \"brown\", \"grey\"]\nvar data = []\n\nfor (let i = 0; i < 1000; i++) {\n const current_color = colors[Math.floor(Math.random() * colors.length)]\n const current_tag = Math.floor(Math.random() * 8999 + 1000)\n data.push({\n id: i,\n vector: [Math.random(), Math.random(), Math.random(), Math.random(), Math.random()],\n color: current_color,\n tag: current_tag,\n color_tag: `${current_color}_${current_tag}`\n })\n}\n\nconsole.log(data[0])\n","{\n id: 0,\n vector: [\n 0.1275656405044483,\n 0.47417858592773277,\n 0.13858264437643286,\n 0.2390904907020377,\n 0.8447862593689635\n ],\n color: 'blue',\n tag: 2064,\n color_tag: 'blue_2064'\n}\n","res = client.insert(\n collection_name=\"test_collection\",\n data=data\n)\n\nprint(res)\n\n# Output\n#\n# {\n# \"insert_count\": 1000,\n# \"ids\": [\n# 0,\n# 1,\n# 2,\n# 3,\n# 4,\n# 5,\n# 6,\n# 7,\n# 8,\n# 9,\n# \"(990 more items hidden)\"\n# ]\n# }\n","// 3.1 Insert data into the collection\nInsertReq insertReq = InsertReq.builder()\n .collectionName(\"test_collection\")\n .data(data)\n .build();\n\nInsertResp insertResp = client.insert(insertReq);\n\nSystem.out.println(JSONObject.toJSON(insertResp));\n\n// Output:\n// {\"insertCnt\": 1000}\n","res = await client.insert({\n collection_name: \"test_collection\",\n data: data,\n})\n\nconsole.log(res.insert_cnt)\n\n// Output\n// \n// 1000\n// \n","# 4. Search with partition key\nquery_vectors = [[0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592]]\n\nres = client.search(\n collection_name=\"test_collection\",\n data=query_vectors,\n filter=\"color == 'green'\",\n search_params={\"metric_type\": \"L2\", \"params\": {\"nprobe\": 10}},\n output_fields=[\"id\", \"color_tag\"],\n limit=3\n)\n\nprint(res)\n\n# Output\n#\n# [\n# [\n# {\n# \"id\": 970,\n# \"distance\": 0.5770174264907837,\n# \"entity\": {\n# \"id\": 970,\n# \"color_tag\": \"green_9828\"\n# }\n# },\n# {\n# \"id\": 115,\n# \"distance\": 0.6898155808448792,\n# \"entity\": {\n# \"id\": 115,\n# \"color_tag\": \"green_4073\"\n# }\n# },\n# {\n# \"id\": 899,\n# \"distance\": 0.7028976678848267,\n# \"entity\": {\n# \"id\": 899,\n# \"color_tag\": \"green_9897\"\n# }\n# }\n# ]\n# ]\n","// 4. Search with partition key\nList> query_vectors = Arrays.asList(Arrays.asList(0.3580376395471989f, -0.6023495712049978f, 0.18414012509913835f, -0.26286205330961354f, 0.9029438446296592f));\n\nSearchReq searchReq = SearchReq.builder()\n .collectionName(\"test_collection\")\n .data(query_vectors)\n .filter(\"color == \\\"green\\\"\")\n .topK(3)\n .build();\n\nSearchResp searchResp = client.search(searchReq);\n\nSystem.out.println(JSONObject.toJSON(searchResp)); \n\n// Output:\n// {\"searchResults\": [[\n// {\n// \"distance\": 1.0586997,\n// \"id\": 414,\n// \"entity\": {}\n// },\n// {\n// \"distance\": 0.981384,\n// \"id\": 293,\n// \"entity\": {}\n// },\n// {\n// \"distance\": 0.9548756,\n// \"id\": 325,\n// \"entity\": {}\n// }\n// ]]}\n","// 4. Search with partition key\nconst query_vectors = [0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592]\n\nres = await client.search({\n collection_name: \"test_collection\",\n data: query_vectors,\n filter: \"color == 'green'\",\n output_fields: [\"color_tag\"],\n limit: 3\n})\n\nconsole.log(res.results)\n\n// Output\n// \n// [\n// { score: 2.402090549468994, id: '135', color_tag: 'green_2694' },\n// { score: 2.3938629627227783, id: '326', color_tag: 'green_7104' },\n// { score: 2.3235254287719727, id: '801', color_tag: 'green_3162' }\n// ]\n// \n"],"headingContent":"Use Partition Key","anchorList":[{"label":"Utiliser la clé de partition","href":"Use-Partition-Key","type":1,"isActive":false},{"label":"Vue d'ensemble","href":"Overview","type":2,"isActive":false},{"label":"Activer la clé de partition","href":"Enable-partition-key","type":2,"isActive":false},{"label":"Lister les partitions","href":"List-partitions","type":2,"isActive":false},{"label":"Insérer des données","href":"Insert-data","type":2,"isActive":false},{"label":"Utiliser la clé de partition","href":"Use-partition-key","type":2,"isActive":false},{"label":"Cas d'utilisation typiques","href":"Typical-use-cases","type":2,"isActive":false}]} \ No newline at end of file diff --git a/localization/v2.4.x/site/fr/userGuide/use-partition-key.md b/localization/v2.4.x/site/fr/userGuide/use-partition-key.md index 990c59b03..75168c66b 100644 --- a/localization/v2.4.x/site/fr/userGuide/use-partition-key.md +++ b/localization/v2.4.x/site/fr/userGuide/use-partition-key.md @@ -1,9 +1,8 @@ --- id: use-partition-key.md -title: Use Partition Key -summary: '' +title: Utiliser la clé de partition --- -

    Use Partition Key

    This guide walks you through using the partition key to accelerate data retrieval from your collection.

    -

    Overview

    Ce guide vous explique comment utiliser la clé de partition pour accélérer la récupération des données de votre collection.

    +

    Vue d'ensemble

    You can set a particular field in a collection as the partition key so that Milvus distributes incoming entities into different partitions according to their respective partition values in this field. This allows entities with the same key value to be grouped in a partition, accelerating search performance by avoiding the need to scan irrelevant partitions when filtering by the key field. When compared to traditional filtering methods, the partition key can greatly enhance query performance.

    -

    You can use the partition key to implement multi-tenancy. For details on multi-tenancy, read Multi-tenancy for more.

    -

    Enable partition key

    Vous pouvez définir un champ particulier d'une collection comme clé de partition afin que Milvus distribue les entités entrantes dans différentes partitions en fonction de leurs valeurs de partition respectives dans ce champ. Cela permet de regrouper les entités ayant la même valeur clé dans une partition, ce qui accélère les performances de recherche en évitant de devoir parcourir des partitions non pertinentes lors du filtrage par le champ clé. Par rapport aux méthodes de filtrage traditionnelles, la clé de partition peut améliorer considérablement les performances des requêtes.

    +

    Vous pouvez utiliser la clé de partition pour mettre en œuvre la multi-location. Pour plus d'informations sur le multi-tenant, lisez Multi-tenant pour plus d'informations.

    +

    Activer la clé de partition

    To set a field as the partition key, specify partition_key_field when creating a collection schema.

    -

    In the example code below, num_partitions determines the number of partitions that will be created. By default, it is set to 16. We recommend you retain the default value.

    +

    Pour définir un champ comme clé de partition, spécifiez partition_key_field lors de la création d'un schéma de collection.

    +

    Dans l'exemple de code ci-dessous, num_partitions détermine le nombre de partitions qui seront créées. Par défaut, il est défini sur 64. Nous vous recommandons de conserver la valeur par défaut.

    -

    For more information on parameters, refer to MilvusClient, create_schema(), and add_field() in the SDK reference.

    +

    Pour plus d'informations sur les paramètres, voir MilvusClient, create_schema(), et add_field() dans la référence du SDK.

    -

    For more information on parameters, refer to MilvusClientV2, createSchema(), and addField() in the SDK reference.

    +

    Pour plus d'informations sur les paramètres, voir MilvusClientV2, createSchema(), et addField() dans la référence du SDK.

    -

    For more information on parameters, refer to MilvusClient and createCollection() in the SDK reference.

    +

    Pour plus d'informations sur les paramètres, voir MilvusClient et createCollection() dans la référence du SDK.

    + Python Java Node.js
    import random, time
     from pymilvus import connections, MilvusClient, DataType
     
    @@ -82,7 +78,7 @@ schema = MilvusClient.create_schema(
         auto_id=False,
         enable_dynamic_field=True,
         partition_key_field="color",
    -    num_partitions=16 # Number of partitions. Defaults to 16.
    +    num_partitions=64 # Number of partitions. Defaults to 64.
     )
     
     schema.add_field(field_name="id", datatype=DataType.INT64, is_primary=True)
    @@ -161,12 +157,9 @@ client = new M
         }
     ]
     
    -

    After you have defined the fields, set up the index parameters.

    +

    Après avoir défini les champs, définissez les paramètres de l'index.

    + Python Java Node.js
    index_params = MilvusClient.prepare_index_params()
     
     index_params.add_index(
    @@ -211,12 +204,9 @@ indexParams.add(indexParamForVectorFie
         params: { nlist: 1024}
     }]
     
    -

    Finally, you can create a collection.

    +

    Enfin, vous pouvez créer une collection.

    + Python Java Node.js
    client.create_collection(
         collection_name="test_collection",
         schema=schema,
    @@ -246,7 +236,7 @@ res = await client.// Success
     //
     
    -

    List partitions

    Once a field of a collection is used as the partition key, Milvus creates the specified number of partitions and manages them on your behalf. Therefore, you cannot manipulate the partitions in this collection anymore.

    -

    The following snippet demonstrates that 64 partitions in a collection once one of its fields is used as the partition key.

    -

    Insert data

    Une fois qu'un champ d'une collection est utilisé comme clé de partition, Milvus crée le nombre spécifié de partitions et les gère en votre nom. Par conséquent, vous ne pouvez plus manipuler les partitions de cette collection.

    +

    L'extrait suivant montre que 64 partitions d'une collection sont créées lorsque l'un de ses champs est utilisé comme clé de partition.

    +

    Insérer des données

    Once the collection is ready, start inserting data as follows:

    -

    Prepare data

    +

    Une fois que la collection est prête, commencez à insérer des données comme suit :

    +

    Préparer les données

    # 3. Insert randomly generated vectors 
     colors = ["green", "blue", "yellow", "red", "black", "white", "purple", "pink", "orange", "brown", "grey"]
     data = []
    @@ -338,7 +325,7 @@ data = []
     
     console.log(data[0])
     
    -

    You can view the structure of the generated data by checking its first entry.

    +

    Vous pouvez visualiser la structure des données générées en vérifiant leur première entrée.

    {
         id: 0,
         vector: [
    @@ -353,20 +340,17 @@ data = []
         color_tag: 'blue_2064'
     }
     
    -

    Insert data

    -

    Use the insert() method to insert the data into the collection.

    +

    Insérer des données

    +

    Utilisez la méthode insert() pour insérer les données dans la collection.

    -

    Use the insert() method to insert the data into the collection.

    +

    Utilisez la méthode insert() pour insérer les données dans la collection.

    -

    Use the insert() method to insert the data into the collection.

    +

    Utiliser la méthode insert() pour insérer les données dans la collection.

    + Python Java Node.js
    res = client.insert(
         collection_name="test_collection",
         data=data
    @@ -418,7 +402,7 @@ data = []
     // 1000
     // 
     
    -

    Use partition key

    Once you have indexed and loaded the collection as well as inserted data, you can conduct a similarity search using the partition key.

    +

    Une fois que vous avez indexé et chargé la collection et inséré des données, vous pouvez effectuer une recherche de similarité à l'aide de la clé de partition.

    -

    For more information on parameters, refer to search() in the SDK reference.

    +

    Pour plus d'informations sur les paramètres, reportez-vous à search() dans la référence du SDK.

    -

    For more information on parameters, refer to search() in the SDK reference.

    +

    Pour plus d'informations sur les paramètres, voir search() dans la référence du SDK.

    -

    For more information on parameters, refer to search() in the SDK reference.

    +

    Pour plus d'informations sur les paramètres, voir search() dans la référence du SDK.

    notes

    -

    To conduct a similarity search using the partition key, you should include either of the following in the boolean expression of the search request:

    +

    Pour effectuer une recherche de similarité à l'aide de la clé de partition, vous devez inclure l'un des éléments suivants dans l'expression booléenne de la requête de recherche :

    • expr='<partition_key>=="xxxx"'

    • expr='<partition_key> in ["xxx", "xxx"]'

    -

    Do replace <partition_key> with the name of the field that is designated as the partition key.

    +

    Remplacez <partition_key> par le nom du champ désigné comme clé de partition.

    + Python Java Node.js
    # 4. Search with partition key
     query_vectors = [[0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592]]
     
    @@ -557,7 +538,7 @@ res = await client.// ]
     // 
     
    -

    Typical use cases

    You can utilize the partition key feature to achieve better search performance and enable multi-tenancy. This can be done by assigning a tenant-specific value as the partition key field for each entity. When searching or querying the collection, you can filter entities by the tenant-specific value by including the partition key field in the boolean expression. This approach ensures data isolation by tenants and avoids scanning unnecessary partitions.

    +

    Vous pouvez utiliser la fonction de clé de partition pour améliorer les performances de recherche et permettre la multi-location. Pour ce faire, vous pouvez attribuer une valeur spécifique au locataire comme champ de clé de partition pour chaque entité. Lors de la recherche ou de l'interrogation de la collection, vous pouvez filtrer les entités en fonction de la valeur spécifique au locataire en incluant le champ de la clé de partition dans l'expression booléenne. Cette approche garantit l'isolation des données par locataire et évite d'analyser des partitions inutiles.

    diff --git a/localization/v2.4.x/site/it/adminGuide/clouds/aws/s3.json b/localization/v2.4.x/site/it/adminGuide/clouds/aws/s3.json index 67863e9d5..047ca8f05 100644 --- a/localization/v2.4.x/site/it/adminGuide/clouds/aws/s3.json +++ b/localization/v2.4.x/site/it/adminGuide/clouds/aws/s3.json @@ -1 +1 @@ -{"codeList":["milvus_bucket_name=\"milvus-bucket-$(openssl rand -hex 12)\"\n\naws s3api create-bucket --bucket \"$milvus_bucket_name\" --region 'us-east-2' --acl private --object-ownership ObjectWriter --create-bucket-configuration LocationConstraint='us-east-2'\n\n\n# Output\n#\n# \"Location\": \"http://milvus-bucket-039dd013c0712f085d60e21f.s3.amazonaws.com/\"\n","echo '{\n \"Version\": \"2012-10-17\",\n \"Statement\": [\n {\n \"Effect\": \"Allow\",\n \"Action\": [\n \"s3:GetObject\",\n \"s3:PutObject\",\n \"s3:ListBucket\",\n \"s3:DeleteObject\"\n ],\n \"Resource\": [\n \"arn:aws:s3:::\",\n \"arn:aws:s3:::/*\"\n ]\n }\n ]\n}' > milvus-s3-policy.json\n\naws iam create-policy --policy-name MilvusS3ReadWrite --policy-document file://milvus-s3-policy.json\n\n\n# Get the ARN from the command output as follows:\n# {\n# \"Policy\": {\n# \"PolicyName\": \"MilvusS3ReadWrite\",\n# \"PolicyId\": \"AN5QQVVPM1BVTFlBNkdZT\",\n# \"Arn\": \"arn:aws:iam::12345678901:policy/MilvusS3ReadWrite\",\n# \"Path\": \"/\",\n# \"DefaultVersionId\": \"v1\",\n# \"AttachmentCount\": 0,\n# \"PermissionsBoundaryUsageCount\": 0,\n# \"IsAttachable\": true,\n# \"CreateDate\": \"2023-11-16T06:00:01+00:00\",\n# \"UpdateDate\": \"2023-11-16T06:00:01+00:00\"\n# }\n# } \n","eksctl create iamserviceaccount --name milvus-s3-access-sa --namespace milvus --cluster milvus-eks-cluster --role-name milvus-s3-access-sa \\\n --attach-policy-arn arn:aws:iam:::policy/MilvusS3ReadWrite --approve\n","aws iam get-role --role-name milvus-s3-access-sa --query Role.AssumeRolePolicyDocument\n# An example output is as follows\n{\n \"Version\": \"2012-10-17\",\n \"Statement\": [\n {\n \"Effect\": \"Allow\",\n \"Principal\": {\n \"Federated\": \"arn:aws:iam::111122223333:oidc-provider/oidc.eks.region-code.amazonaws.com/id/EXAMPLED539D4633E53DE1B71EXAMPLE\"\n },\n \"Action\": \"sts:AssumeRoleWithWebIdentity\",\n \"Condition\": {\n \"StringEquals\": {\n \"oidc.eks.region-code.amazonaws.com/id/EXAMPLED539D4633E53DE1B71EXAMPLE:sub\": \"system:serviceaccount:default:my-service-account\",\n \"oidc.eks.region-code.amazonaws.com/id/EXAMPLED539D4633E53DE1B71EXAMPLE:aud\": \"sts.amazonaws.com\"\n }\n }\n }\n ]\n}\n","aws iam list-attached-role-policies --role-name milvus-s3-access-sa --query 'AttachedPolicies[].PolicyArn' --output text\n# An example output is as follows\narn:aws:iam::12345678901:policy/MilvusS3ReadWrite\n","export policy_arn='arn:aws:iam::12345678901:policy/MilvusS3ReadWrite'\naws iam get-policy --policy-arn $policy_arn\n# An example output is as follows\n{\n \"Policy\": {\n \"PolicyName\": \"MilvusS3ReadWrite\",\n \"PolicyId\": \"EXAMPLEBIOWGLDEXAMPLE\",\n \"Arn\": \"arn:aws:iam::12345678901:policy/MilvusS3ReadWrite\",\n \"Path\": \"/\",\n \"DefaultVersionId\": \"v2\",\n [...]\n }\n}\n","aws iam get-policy-version --policy-arn $policy_arn --version-id v2\n# An example output is as follows\n{\n \"PolicyVersion\": {\n \"Document\": {\n \"Version\": \"2012-10-17\",\n \"Statement\": [\n {\n \"Effect\": \"Allow\",\n \"Action\": [\n \"s3:GetObject\",\n \"s3:PutObject\",\n \"s3:ListBucket\",\n \"s3:DeleteObject\"\n ],\n \"Resource\": [\n \"arn:aws:s3:::\",\n \"arn:aws:s3:::/*\"\n ]\n }\n ]\n },\n [...]\n }\n}\n","kubectl describe serviceaccount milvus-s3-access-sa -n milvus\n# An example output is as follows\nName: milvus-s3-access-sa\nNamespace: milvus\nLabels: app.kubernetes.io/managed-by=eksctl\nAnnotations: eks.amazonaws.com/role-arn: arn:aws:iam::12345678901:role/milvus-s3-access-sa\n[...]\n","helm repo add milvus https://zilliztech.github.io/milvus-helm/\nhelm repo update\n","cluster:\n enabled: true\n\nservice:\n type: LoadBalancer\n port: 19530\n annotations: \n service.beta.kubernetes.io/aws-load-balancer-type: external\n service.beta.kubernetes.io/aws-load-balancer-name: milvus-service\n service.beta.kubernetes.io/aws-load-balancer-scheme: internet-facing\n service.beta.kubernetes.io/aws-load-balancer-nlb-target-type: ip\n\nserviceAccount:\n create: false\n name: milvus-s3-access-sa\n\nminio:\n enabled: false\n\nexternalS3:\n enabled: true\n host: \"s3.us-east-2.amazonaws.com\"\n port: \"443\"\n useSSL: true\n bucketName: \"\"\n useIAM: true\n cloudProvider: \"aws\"\n iamEndpoint: \"\"\n\nrootCoordinator:\n replicas: 2\n activeStandby:\n enabled: true\n resources: \n limits:\n cpu: 1\n memory: 2Gi\n\nindexCoordinator:\n replicas: 2\n activeStandby:\n enabled: true\n resources: \n limits:\n cpu: \"0.5\"\n memory: 0.5Gi\n\nqueryCoordinator:\n replicas: 2\n activeStandby:\n enabled: true\n resources: \n limits:\n cpu: \"0.5\"\n memory: 0.5Gi\n\ndataCoordinator:\n replicas: 2\n activeStandby:\n enabled: true\n resources: \n limits:\n cpu: \"0.5\"\n memory: 0.5Gi\n\nproxy:\n replicas: 2\n resources: \n limits:\n cpu: 1\n memory: 2Gi \n","helm upgrade --install milvus-demo milvus/milvus -n milvus -f milvus.yaml\n"],"headingContent":"","anchorList":[{"label":"Configurazione dell'accesso S3 per ruolo IAM","href":"Configure-S3-Access-by-IAM-Role","type":1,"isActive":false},{"label":"Prima di iniziare","href":"Before-you-start","type":2,"isActive":false},{"label":"Associare un ruolo IAM a un account di servizio Kubernetes","href":"Associate-an-IAM-role-with-a-Kubernetes-service-account","type":2,"isActive":false},{"label":"Verificare la configurazione del ruolo e dell'account di servizio","href":"Verify-the-role-and-service-account-setup","type":2,"isActive":false},{"label":"Distribuire Milvus","href":"Deploy-Milvus","type":2,"isActive":false},{"label":"Verificare l'installazione","href":"Verify-the-installation","type":2,"isActive":false}]} \ No newline at end of file +{"codeList":["milvus_bucket_name=\"milvus-bucket-$(openssl rand -hex 12)\"\n\naws s3api create-bucket --bucket \"$milvus_bucket_name\" --region 'us-east-2' --acl private --object-ownership ObjectWriter --create-bucket-configuration LocationConstraint='us-east-2'\n\n\n# Output\n#\n# \"Location\": \"http://milvus-bucket-039dd013c0712f085d60e21f.s3.amazonaws.com/\"\n","echo '{\n \"Version\": \"2012-10-17\",\n \"Statement\": [\n {\n \"Effect\": \"Allow\",\n \"Action\": [\n \"s3:ListBucket\"\n ],\n \"Resource\": [\n \"arn:aws:s3:::\"\n ]\n },\n {\n \"Effect\": \"Allow\",\n \"Action\": [\n \"s3:DeleteObject\",\n \"s3:GetObject\",\n \"s3:PutObject\"\n ],\n \"Resource\": [\n \"arn:aws:s3:::/*\"\n ]\n }\n ]\n}' > milvus-s3-policy.json\n\naws iam create-policy --policy-name MilvusS3ReadWrite --policy-document file://milvus-s3-policy.json\n\n\n# Get the ARN from the command output as follows:\n# {\n# \"Policy\": {\n# \"PolicyName\": \"MilvusS3ReadWrite\",\n# \"PolicyId\": \"AN5QQVVPM1BVTFlBNkdZT\",\n# \"Arn\": \"arn:aws:iam::12345678901:policy/MilvusS3ReadWrite\",\n# \"Path\": \"/\",\n# \"DefaultVersionId\": \"v1\",\n# \"AttachmentCount\": 0,\n# \"PermissionsBoundaryUsageCount\": 0,\n# \"IsAttachable\": true,\n# \"CreateDate\": \"2023-11-16T06:00:01+00:00\",\n# \"UpdateDate\": \"2023-11-16T06:00:01+00:00\"\n# }\n# } \n","eksctl create iamserviceaccount --name milvus-s3-access-sa --namespace milvus --cluster milvus-eks-cluster --role-name milvus-s3-access-sa \\\n --attach-policy-arn arn:aws:iam:::policy/MilvusS3ReadWrite --approve\n","aws iam get-role --role-name milvus-s3-access-sa --query Role.AssumeRolePolicyDocument\n# An example output is as follows\n{\n \"Version\": \"2012-10-17\",\n \"Statement\": [\n {\n \"Effect\": \"Allow\",\n \"Principal\": {\n \"Federated\": \"arn:aws:iam::111122223333:oidc-provider/oidc.eks.region-code.amazonaws.com/id/EXAMPLED539D4633E53DE1B71EXAMPLE\"\n },\n \"Action\": \"sts:AssumeRoleWithWebIdentity\",\n \"Condition\": {\n \"StringEquals\": {\n \"oidc.eks.region-code.amazonaws.com/id/EXAMPLED539D4633E53DE1B71EXAMPLE:sub\": \"system:serviceaccount:default:my-service-account\",\n \"oidc.eks.region-code.amazonaws.com/id/EXAMPLED539D4633E53DE1B71EXAMPLE:aud\": \"sts.amazonaws.com\"\n }\n }\n }\n ]\n}\n","aws iam list-attached-role-policies --role-name milvus-s3-access-sa --query 'AttachedPolicies[].PolicyArn' --output text\n# An example output is as follows\narn:aws:iam::12345678901:policy/MilvusS3ReadWrite\n","export policy_arn='arn:aws:iam::12345678901:policy/MilvusS3ReadWrite'\naws iam get-policy --policy-arn $policy_arn\n# An example output is as follows\n{\n \"Policy\": {\n \"PolicyName\": \"MilvusS3ReadWrite\",\n \"PolicyId\": \"EXAMPLEBIOWGLDEXAMPLE\",\n \"Arn\": \"arn:aws:iam::12345678901:policy/MilvusS3ReadWrite\",\n \"Path\": \"/\",\n \"DefaultVersionId\": \"v2\",\n [...]\n }\n}\n","aws iam get-policy-version --policy-arn $policy_arn --version-id v2\n# An example output is as follows\n{\n \"PolicyVersion\": {\n \"Document\": {\n \"Version\": \"2012-10-17\",\n \"Statement\": [\n {\n \"Effect\": \"Allow\",\n \"Action\": [\n \"s3:GetObject\",\n \"s3:PutObject\",\n \"s3:ListBucket\",\n \"s3:DeleteObject\"\n ],\n \"Resource\": [\n \"arn:aws:s3:::\",\n \"arn:aws:s3:::/*\"\n ]\n }\n ]\n },\n [...]\n }\n}\n","kubectl describe serviceaccount milvus-s3-access-sa -n milvus\n# An example output is as follows\nName: milvus-s3-access-sa\nNamespace: milvus\nLabels: app.kubernetes.io/managed-by=eksctl\nAnnotations: eks.amazonaws.com/role-arn: arn:aws:iam::12345678901:role/milvus-s3-access-sa\n[...]\n","helm repo add milvus https://zilliztech.github.io/milvus-helm/\nhelm repo update\n","cluster:\n enabled: true\n\nservice:\n type: LoadBalancer\n port: 19530\n annotations: \n service.beta.kubernetes.io/aws-load-balancer-type: external\n service.beta.kubernetes.io/aws-load-balancer-name: milvus-service\n service.beta.kubernetes.io/aws-load-balancer-scheme: internet-facing\n service.beta.kubernetes.io/aws-load-balancer-nlb-target-type: ip\n\nserviceAccount:\n create: false\n name: milvus-s3-access-sa\n\nminio:\n enabled: false\n\nexternalS3:\n enabled: true\n host: \"s3.us-east-2.amazonaws.com\"\n port: \"443\"\n useSSL: true\n bucketName: \"\"\n useIAM: true\n cloudProvider: \"aws\"\n iamEndpoint: \"\"\n\nrootCoordinator:\n replicas: 2\n activeStandby:\n enabled: true\n resources: \n limits:\n cpu: 1\n memory: 2Gi\n\nindexCoordinator:\n replicas: 2\n activeStandby:\n enabled: true\n resources: \n limits:\n cpu: \"0.5\"\n memory: 0.5Gi\n\nqueryCoordinator:\n replicas: 2\n activeStandby:\n enabled: true\n resources: \n limits:\n cpu: \"0.5\"\n memory: 0.5Gi\n\ndataCoordinator:\n replicas: 2\n activeStandby:\n enabled: true\n resources: \n limits:\n cpu: \"0.5\"\n memory: 0.5Gi\n\nproxy:\n replicas: 2\n resources: \n limits:\n cpu: 1\n memory: 2Gi \n","helm upgrade --install milvus-demo milvus/milvus -n milvus -f milvus.yaml\n"],"headingContent":"Configure S3 Access by IAM Role","anchorList":[{"label":"Configurazione dell'accesso S3 per ruolo IAM","href":"Configure-S3-Access-by-IAM-Role","type":1,"isActive":false},{"label":"Prima di iniziare","href":"Before-you-start","type":2,"isActive":false},{"label":"Associare un ruolo IAM a un account di servizio Kubernetes","href":"Associate-an-IAM-role-with-a-Kubernetes-service-account","type":2,"isActive":false},{"label":"Verificare la configurazione del ruolo e dell'account di servizio","href":"Verify-the-role-and-service-account-setup","type":2,"isActive":false},{"label":"Distribuire Milvus","href":"Deploy-Milvus","type":2,"isActive":false},{"label":"Verificare l'installazione","href":"Verify-the-installation","type":2,"isActive":false}]} \ No newline at end of file diff --git a/localization/v2.4.x/site/it/adminGuide/clouds/aws/s3.md b/localization/v2.4.x/site/it/adminGuide/clouds/aws/s3.md index a8252bf3f..fe1a1f07f 100644 --- a/localization/v2.4.x/site/it/adminGuide/clouds/aws/s3.md +++ b/localization/v2.4.x/site/it/adminGuide/clouds/aws/s3.md @@ -73,13 +73,20 @@ aws s3api create-bucket --bucket "
  • Applicare il file:

    kubectl apply -f milvus-operator-certificate.yaml
    diff --git a/localization/v2.4.x/site/it/adminGuide/configure-docker.json b/localization/v2.4.x/site/it/adminGuide/configure-docker.json
    index 0f19c3515..45ace523e 100644
    --- a/localization/v2.4.x/site/it/adminGuide/configure-docker.json
    +++ b/localization/v2.4.x/site/it/adminGuide/configure-docker.json
    @@ -1 +1 @@
    -{"codeList":["$ wget https://raw.githubusercontent.com/milvus-io/milvus/v2.4.9/configs/milvus.yaml\n","# For Milvus standalone\n$ wget https://github.com/milvus-io/milvus/releases/download/v2.4.9/milvus-standalone-docker-compose.yml -O docker-compose.yml\n","...\n  standalone:\n    container_name: milvus-standalone\n    image: milvusdb/milvus:v2.2.13\n    command: [\"milvus\", \"run\", \"standalone\"]\n    environment:\n      ETCD_ENDPOINTS: etcd:2379\n      MINIO_ADDRESS: minio:9000\n    volumes:\n      - /local/path/to/your/milvus.yaml:/milvus/configs/milvus.yaml   # Map the local path to the container path\n      - ${DOCKER_VOLUME_DIRECTORY:-.}/volumes/milvus:/var/lib/milvus\n    ports:\n      - \"19530:19530\"\n      - \"9091:9091\"\n    depends_on:\n      - \"etcd\"\n      - \"minio\"\n...\n","$ sudo docker compose up -d\n"],"headingContent":"","anchorList":[{"label":"Configurare Milvus con Docker Compose","href":"Configure-Milvus-with-Docker-Compose","type":1,"isActive":false},{"label":"Scaricare un file di configurazione","href":"Download-a-configuration-file","type":2,"isActive":false},{"label":"Modificare il file di configurazione","href":"Modify-the-configuration-file","type":2,"isActive":false},{"label":"Scaricare un file di installazione","href":"Download-an-installation-file","type":2,"isActive":false},{"label":"Modificare il file di installazione","href":"Modify-the-installation-file","type":2,"isActive":false},{"label":"Avviare Milvus","href":"Start-Milvus","type":2,"isActive":false},{"label":"Cosa succede dopo","href":"Whats-next","type":2,"isActive":false}]}
    \ No newline at end of file
    +{"codeList":["$ wget https://raw.githubusercontent.com/milvus-io/milvus/v2.4.13-hotfix/configs/milvus.yaml\n","# For Milvus standalone\n$ wget https://github.com/milvus-io/milvus/releases/download/v2.4.13-hotfix/milvus-standalone-docker-compose.yml -O docker-compose.yml\n","...\n  standalone:\n    container_name: milvus-standalone\n    image: milvusdb/milvus:v2.2.13\n    command: [\"milvus\", \"run\", \"standalone\"]\n    environment:\n      ETCD_ENDPOINTS: etcd:2379\n      MINIO_ADDRESS: minio:9000\n    volumes:\n      - /local/path/to/your/milvus.yaml:/milvus/configs/milvus.yaml   # Map the local path to the container path\n      - ${DOCKER_VOLUME_DIRECTORY:-.}/volumes/milvus:/var/lib/milvus\n    ports:\n      - \"19530:19530\"\n      - \"9091:9091\"\n    depends_on:\n      - \"etcd\"\n      - \"minio\"\n...\n","$ sudo docker compose up -d\n"],"headingContent":"Configure Milvus with Docker Compose","anchorList":[{"label":"Configurare Milvus con Docker Compose","href":"Configure-Milvus-with-Docker-Compose","type":1,"isActive":false},{"label":"Scaricare un file di configurazione","href":"Download-a-configuration-file","type":2,"isActive":false},{"label":"Modificare il file di configurazione","href":"Modify-the-configuration-file","type":2,"isActive":false},{"label":"Scaricare un file di installazione","href":"Download-an-installation-file","type":2,"isActive":false},{"label":"Modificare il file di installazione","href":"Modify-the-installation-file","type":2,"isActive":false},{"label":"Avviare Milvus","href":"Start-Milvus","type":2,"isActive":false},{"label":"Cosa succede dopo","href":"Whats-next","type":2,"isActive":false}]}
    \ No newline at end of file
    diff --git a/localization/v2.4.x/site/it/adminGuide/configure-docker.md b/localization/v2.4.x/site/it/adminGuide/configure-docker.md
    index 1902e0ea8..def701f58 100644
    --- a/localization/v2.4.x/site/it/adminGuide/configure-docker.md
    +++ b/localization/v2.4.x/site/it/adminGuide/configure-docker.md
    @@ -38,8 +38,8 @@ Nella versione attuale, tutti i parametri hanno effetto solo dopo il riavvio di
               d="M4 9h1v1H4c-1.5 0-3-1.69-3-3.5S2.55 3 4 3h4c1.45 0 3 1.69 3 3.5 0 1.41-.91 2.72-2 3.25V8.59c.58-.45 1-1.27 1-2.09C10 5.22 8.98 4 8 4H4c-.98 0-2 1.22-2 2.5S3 9 4 9zm9-3h-1v1h1c1 0 2 1.22 2 2.5S13.98 12 13 12H9c-.98 0-2-1.22-2-2.5 0-.83.42-1.64 1-2.09V6.25c-1.09.53-2 1.84-2 3.25C6 11.31 7.55 13 9 13h4c1.45 0 3-1.69 3-3.5S14.5 6 13 6z"
             >
           
    -    

    Scaricare milvus.yaml direttamente o con il seguente comando.

    -
    $ wget https://raw.githubusercontent.com/milvus-io/milvus/v2.4.9/configs/milvus.yaml
    +    

    Scaricare milvus.yaml direttamente o con il seguente comando.

    +
    $ wget https://raw.githubusercontent.com/milvus-io/milvus/v2.4.13-hotfix/configs/milvus.yaml
     

    Modificare il file di configurazione

    Scaricate il file di installazione di Milvus standalone e salvatelo come docker-compose.yml.

    +

    Scaricate il file di installazione di Milvus standalone e salvatelo come docker-compose.yml.

    È anche possibile eseguire semplicemente il seguente comando.

    # For Milvus standalone
    -$ wget https://github.com/milvus-io/milvus/releases/download/v2.4.9/milvus-standalone-docker-compose.yml -O docker-compose.yml
    +$ wget https://github.com/milvus-io/milvus/releases/download/v2.4.13-hotfix/milvus-standalone-docker-compose.yml -O docker-compose.yml
     

    Modificare il file di installazione

    -

    In questo modo si garantisce che il pod QueryNode utilizzi il disco NVMe come volume di dati. Per i dettagli su come distribuire Milvus Distributed usando Milvus Operator, vedere Eseguire Milvus in Kubernetes con Milvus Operator.

    +

    Questo assicura che il pod QueryNode utilizzi il disco NVMe come volume di dati. Per i dettagli su come distribuire Milvus Distributed usando Milvus Operator, vedere Eseguire Milvus in Kubernetes con Milvus Operator.

    diff --git a/localization/v2.4.x/site/it/adminGuide/limit_collection_counts.json b/localization/v2.4.x/site/it/adminGuide/limit_collection_counts.json index 008351c61..06f2287dc 100644 --- a/localization/v2.4.x/site/it/adminGuide/limit_collection_counts.json +++ b/localization/v2.4.x/site/it/adminGuide/limit_collection_counts.json @@ -1 +1 @@ -{"codeList":["rootCoord:\n maxGeneralCapacity: 1024\n","60 (collections) x 2 (shards) x 4 (partitions) + 40 (collections) x 1 (shard) x 12 (partitions) = 960\n","failed checking constraint: sum_collections(parition*shard) exceeding the max general capacity:\n"],"headingContent":"","anchorList":[{"label":"Limitare il numero di raccolte","href":"Limit-Collection-Counts","type":1,"isActive":false},{"label":"Opzioni di configurazione","href":"Configuration-options","type":2,"isActive":false},{"label":"Calcolo del numero di collezioni","href":"Calculating-the-number-of-collections","type":2,"isActive":false}]} \ No newline at end of file +{"codeList":["rootCoord:\n maxGeneralCapacity: 65536\n","60 (collections) x 2 (shards) x 4 (partitions) + 40 (collections) x 1 (shard) x 12 (partitions) = 960\n","failed checking constraint: sum_collections(parition*shard) exceeding the max general capacity:\n"],"headingContent":"Limit Collection Counts","anchorList":[{"label":"Limitare il numero di raccolte","href":"Limit-Collection-Counts","type":1,"isActive":false},{"label":"Opzioni di configurazione","href":"Configuration-options","type":2,"isActive":false},{"label":"Calcolo del numero di collezioni","href":"Calculating-the-number-of-collections","type":2,"isActive":false}]} \ No newline at end of file diff --git a/localization/v2.4.x/site/it/adminGuide/limit_collection_counts.md b/localization/v2.4.x/site/it/adminGuide/limit_collection_counts.md index 56a0dfa61..8177d8223 100644 --- a/localization/v2.4.x/site/it/adminGuide/limit_collection_counts.md +++ b/localization/v2.4.x/site/it/adminGuide/limit_collection_counts.md @@ -1,7 +1,6 @@ --- id: limit_collection_counts.md title: Impostare limiti al numero di raccolte -summary: '' ---

    Limitare il numero di raccolte

    rootCoord:
    -    maxGeneralCapacity: 1024
    +    maxGeneralCapacity: 65536
     
    -

    Il parametro maxGeneralCapacity imposta il numero massimo di collezioni che l'istanza Milvus corrente può contenere. Il valore predefinito è 1024.

    +

    Il parametro maxGeneralCapacity imposta il numero massimo di collezioni che l'istanza Milvus corrente può contenere. Il valore predefinito è 65536.

    Calcolo del numero di collezioni

    -

    Tutti gli esempi di codice presenti in questa pagina sono in PyMilvus 2.4.5. Aggiornare l'installazione di PyMilvus prima di eseguirli.

    +

    Tutti gli esempi di codice presenti in questa pagina sono in PyMilvus 2.4.8. Aggiornare l'installazione di PyMilvus prima di eseguirli.

    1. Creare un gruppo di risorse.

      diff --git a/localization/v2.4.x/site/it/adminGuide/tls.json b/localization/v2.4.x/site/it/adminGuide/tls.json index 16834a893..649562e95 100644 --- a/localization/v2.4.x/site/it/adminGuide/tls.json +++ b/localization/v2.4.x/site/it/adminGuide/tls.json @@ -1 +1 @@ -{"codeList":["openssl version\n","sudo apt install openssl\n","mkdir cert && cd cert\ntouch openssl.cnf gen.sh\n","#\n# OpenSSL example configuration file.\n# This is mostly being used for generation of certificate requests.\n#\n\n# This definition stops the following lines choking if HOME isn't\n# defined.\nHOME = .\nRANDFILE = $ENV::HOME/.rnd\n\n# Extra OBJECT IDENTIFIER info:\n#oid_file = $ENV::HOME/.oid\noid_section = new_oids\n\n# To use this configuration file with the \"-extfile\" option of the\n# \"openssl x509\" utility, name here the section containing the\n# X.509v3 extensions to use:\n# extensions = \n# (Alternatively, use a configuration file that has only\n# X.509v3 extensions in its main [= default] section.)\n\n[ new_oids ]\n\n# We can add new OIDs in here for use by 'ca', 'req' and 'ts'.\n# Add a simple OID like this:\n# testoid1=1.2.3.4\n# Or use config file substitution like this:\n# testoid2=${testoid1}.5.6\n\n# Policies used by the TSA examples.\ntsa_policy1 = 1.2.3.4.1\ntsa_policy2 = 1.2.3.4.5.6\ntsa_policy3 = 1.2.3.4.5.7\n\n####################################################################\n[ ca ]\ndefault_ca = CA_default # The default ca section\n\n####################################################################\n[ CA_default ]\n\ndir = ./demoCA # Where everything is kept\ncerts = $dir/certs # Where the issued certs are kept\ncrl_dir = $dir/crl # Where the issued crl are kept\ndatabase = $dir/index.txt # database index file.\n#unique_subject = no # Set to 'no' to allow creation of\n # several ctificates with same subject.\nnew_certs_dir = $dir/newcerts # default place for new certs.\n\ncertificate = $dir/cacert.pem # The CA certificate\nserial = $dir/serial # The current serial number\ncrlnumber = $dir/crlnumber # the current crl number\n # must be commented out to leave a V1 CRL\ncrl = $dir/crl.pem # The current CRL\nprivate_key = $dir/private/cakey.pem# The private key\nRANDFILE = $dir/private/.rand # private random number file\n\nx509_extensions = usr_cert # The extentions to add to the cert\n\n# Comment out the following two lines for the \"traditional\"\n# (and highly broken) format.\nname_opt = ca_default # Subject Name options\ncert_opt = ca_default # Certificate field options\n\n# Extension copying option: use with caution.\ncopy_extensions = copy\n\n# Extensions to add to a CRL. Note: Netscape communicator chokes on V2 CRLs\n# so this is commented out by default to leave a V1 CRL.\n# crlnumber must also be commented out to leave a V1 CRL.\n# crl_extensions = crl_ext\n\ndefault_days = 365 # how long to certify for\ndefault_crl_days= 30 # how long before next CRL\ndefault_md = default # use public key default MD\npreserve = no # keep passed DN ordering\n\n# A few difference way of specifying how similar the request should look\n# For type CA, the listed attributes must be the same, and the optional\n# and supplied fields are just that :-)\npolicy = policy_match\n\n# For the CA policy\n[ policy_match ]\ncountryName = match\nstateOrProvinceName = match\norganizationName = match\norganizationalUnitName = optional\ncommonName = supplied\nemailAddress = optional\n\n# For the 'anything' policy\n# At this point in time, you must list all acceptable 'object'\n# types.\n[ policy_anything ]\ncountryName = optional\nstateOrProvinceName = optional\nlocalityName = optional\norganizationName = optional\norganizationalUnitName = optional\ncommonName = supplied\nemailAddress = optional\n\n####################################################################\n[ req ]\ndefault_bits = 2048\ndefault_keyfile = privkey.pem\ndistinguished_name = req_distinguished_name\nattributes = req_attributes\nx509_extensions = v3_ca # The extentions to add to the self signed cert\n\n# Passwords for private keys if not present they will be prompted for\n# input_password = secret\n# output_password = secret\n\n# This sets a mask for permitted string types. There are several options. \n# default: PrintableString, T61String, BMPString.\n# pkix : PrintableString, BMPString (PKIX recommendation before 2004)\n# utf8only: only UTF8Strings (PKIX recommendation after 2004).\n# nombstr : PrintableString, T61String (no BMPStrings or UTF8Strings).\n# MASK:XXXX a literal mask value.\n# WARNING: ancient versions of Netscape crash on BMPStrings or UTF8Strings.\nstring_mask = utf8only\n\nreq_extensions = v3_req # The extensions to add to a certificate request\n\n[ req_distinguished_name ]\ncountryName = Country Name (2 letter code)\ncountryName_default = AU\ncountryName_min = 2\ncountryName_max = 2\n\nstateOrProvinceName = State or Province Name (full name)\nstateOrProvinceName_default = Some-State\n\nlocalityName = Locality Name (eg, city)\n\n0.organizationName = Organization Name (eg, company)\n0.organizationName_default = Internet Widgits Pty Ltd\n\n# we can do this but it is not needed normally :-)\n#1.organizationName = Second Organization Name (eg, company)\n#1.organizationName_default = World Wide Web Pty Ltd\n\norganizationalUnitName = Organizational Unit Name (eg, section)\n#organizationalUnitName_default =\n\ncommonName = Common Name (e.g. server FQDN or YOUR name)\ncommonName_max = 64\n\nemailAddress = Email Address\nemailAddress_max = 64\n\n# SET-ex3 = SET extension number 3\n\n[ req_attributes ]\nchallengePassword = A challenge password\nchallengePassword_min = 4\nchallengePassword_max = 20\n\nunstructuredName = An optional company name\n\n[ usr_cert ]\n\n# These extensions are added when 'ca' signs a request.\n\n# This goes against PKIX guidelines but some CAs do it and some software\n# requires this to avoid interpreting an end user certificate as a CA.\n\nbasicConstraints=CA:FALSE\n\n# Here are some examples of the usage of nsCertType. If it is omitted\n# the certificate can be used for anything *except* object signing.\n\n# This is OK for an SSL server.\n# nsCertType = server\n\n# For an object signing certificate this would be used.\n# nsCertType = objsign\n\n# For normal client use this is typical\n# nsCertType = client, email\n\n# and for everything including object signing:\n# nsCertType = client, email, objsign\n\n# This is typical in keyUsage for a client certificate.\n# keyUsage = nonRepudiation, digitalSignature, keyEncipherment\n\n# This will be displayed in Netscape's comment listbox.\nnsComment = \"OpenSSL Generated Certificate\"\n\n# PKIX recommendations harmless if included in all certificates.\nsubjectKeyIdentifier=hash\nauthorityKeyIdentifier=keyid,issuer\n\n# This stuff is for subjectAltName and issuerAltname.\n# Import the email address.\n# subjectAltName=email:copy\n# An alternative to produce certificates that aren't\n# deprecated according to PKIX.\n# subjectAltName=email:move\n\n# Copy subject details\n# issuerAltName=issuer:copy\n\n#nsCaRevocationUrl = http://www.domain.dom/ca-crl.pem\n#nsBaseUrl\n#nsRevocationUrl\n#nsRenewalUrl\n#nsCaPolicyUrl\n#nsSslServerName\n\n# This is required for TSA certificates.\n# extendedKeyUsage = critical,timeStamping\n\n[ v3_req ]\n\n# Extensions to add to a certificate request\n\nbasicConstraints = CA:FALSE\nkeyUsage = nonRepudiation, digitalSignature, keyEncipherment\n\n\n[ v3_ca ]\n\n\n# Extensions for a typical CA\n\n\n# PKIX recommendation.\n\nsubjectKeyIdentifier=hash\n\nauthorityKeyIdentifier=keyid:always,issuer\n\n# This is what PKIX recommends but some broken software chokes on critical\n# extensions.\n#basicConstraints = critical,CA:true\n# So we do this instead.\nbasicConstraints = CA:true\n\n# Key usage: this is typical for a CA certificate. However since it will\n# prevent it being used as an test self-signed certificate it is best\n# left out by default.\n# keyUsage = cRLSign, keyCertSign\n\n# Some might want this also\n# nsCertType = sslCA, emailCA\n\n# Include email address in subject alt name: another PKIX recommendation\n# subjectAltName=email:copy\n# Copy issuer details\n# issuerAltName=issuer:copy\n\n# DER hex encoding of an extension: beware experts only!\n# obj=DER:02:03\n# Where 'obj' is a standard or added object\n# You can even override a supported extension:\n# basicConstraints= critical, DER:30:03:01:01:FF\n\n[ crl_ext ]\n\n# CRL extensions.\n# Only issuerAltName and authorityKeyIdentifier make any sense in a CRL.\n\n# issuerAltName=issuer:copy\nauthorityKeyIdentifier=keyid:always\n\n[ proxy_cert_ext ]\n# These extensions should be added when creating a proxy certificate\n\n# This goes against PKIX guidelines but some CAs do it and some software\n# requires this to avoid interpreting an end user certificate as a CA.\n\nbasicConstraints=CA:FALSE\n\n# Here are some examples of the usage of nsCertType. If it is omitted\n# the certificate can be used for anything *except* object signing.\n\n# This is OK for an SSL server.\n# nsCertType = server\n\n# For an object signing certificate this would be used.\n# nsCertType = objsign\n\n# For normal client use this is typical\n# nsCertType = client, email\n\n# and for everything including object signing:\n# nsCertType = client, email, objsign\n\n# This is typical in keyUsage for a client certificate.\n# keyUsage = nonRepudiation, digitalSignature, keyEncipherment\n\n# This will be displayed in Netscape's comment listbox.\nnsComment = \"OpenSSL Generated Certificate\"\n\n# PKIX recommendations harmless if included in all certificates.\nsubjectKeyIdentifier=hash\nauthorityKeyIdentifier=keyid,issuer\n\n# This stuff is for subjectAltName and issuerAltname.\n# Import the email address.\n# subjectAltName=email:copy\n# An alternative to produce certificates that aren't\n# deprecated according to PKIX.\n# subjectAltName=email:move\n\n# Copy subject details\n# issuerAltName=issuer:copy\n\n#nsCaRevocationUrl = http://www.domain.dom/ca-crl.pem\n#nsBaseUrl\n#nsRevocationUrl\n#nsRenewalUrl\n#nsCaPolicyUrl\n#nsSslServerName\n\n# This really needs to be in place for it to be a proxy certificate.\nproxyCertInfo=critical,language:id-ppl-anyLanguage,pathlen:3,policy:foo\n\n####################################################################\n[ tsa ]\n\ndefault_tsa = tsa_config1 # the default TSA section\n\n[ tsa_config1 ]\n\n# These are used by the TSA reply generation only.\ndir = ./demoCA # TSA root directory\nserial = $dir/tsaserial # The current serial number (mandatory)\ncrypto_device = builtin # OpenSSL engine to use for signing\nsigner_cert = $dir/tsacert.pem # The TSA signing certificate\n # (optional)\ncerts = $dir/cacert.pem # Certificate chain to include in reply\n # (optional)\nsigner_key = $dir/private/tsakey.pem # The TSA private key (optional)\n\ndefault_policy = tsa_policy1 # Policy if request did not specify it\n # (optional)\nother_policies = tsa_policy2, tsa_policy3 # acceptable policies (optional)\ndigests = md5, sha1 # Acceptable message digests (mandatory)\naccuracy = secs:1, millisecs:500, microsecs:100 # (optional)\nclock_precision_digits = 0 # number of digits after dot. (optional)\nordering = yes # Is ordering defined for timestamps?\n # (optional, default: no)\ntsa_name = yes # Must the TSA name be included in the reply?\n # (optional, default: no)\ness_cert_id_chain = no # Must the ESS cert id chain be included?\n # (optional, default: no)\n","#!/usr/bin/env sh\n# your variables\nCountry=\"CN\"\nState=\"Shanghai\"\nLocation=\"Shanghai\"\nOrganization=\"milvus\"\nOrganizational=\"milvus\"\nCommonName=\"localhost\"\n\necho \"generate ca.key\"\nopenssl genrsa -out ca.key 2048\n\necho \"generate ca.pem\"\nopenssl req -new -x509 -key ca.key -out ca.pem -days 3650 -subj \"/C=$Country/ST=$State/L=$Location/O=$Organization/OU=$Organizational/CN=$CommonName\"\n\necho \"generate server SAN certificate\"\nopenssl genpkey -algorithm RSA -out server.key\nopenssl req -new -nodes -key server.key -out server.csr -days 3650 -subj \"/C=$Country/O=$Organization/OU=$Organizational/CN=$CommonName\" -config ./openssl.cnf -extensions v3_req\nopenssl x509 -req -days 3650 -in server.csr -out server.pem -CA ca.pem -CAkey ca.key -CAcreateserial -extfile ./openssl.cnf -extensions v3_req\n\necho \"generate client SAN certificate\"\nopenssl genpkey -algorithm RSA -out client.key\nopenssl req -new -nodes -key client.key -out client.csr -days 3650 -subj \"/C=$Country/O=$Organization/OU=$Organizational/CN=$CommonName\" -config ./openssl.cnf -extensions v3_req\nopenssl x509 -req -days 3650 -in client.csr -out client.pem -CA ca.pem -CAkey ca.key -CAcreateserial -extfile ./openssl.cnf -extensions v3_req\n\n","chmod +x gen.sh\n./gen.sh\n","openssl genpkey -algorithm RSA -out ca.key\n","openssl req -new -x509 -key ca.key -out ca.pem -days 3650 -subj \"/C=$Country/ST=$State/L=$Location/O=$Organization/OU=$Organizational/CN=$CommonName\"\n","openssl genpkey -algorithm RSA -out server.key\n","openssl req -new -nodes -key server.key -out server.csr -days 3650 -subj \"/C=$Country/O=$Organization/OU=$Organizational/CN=$CommonName\" -config ./openssl.cnf -extensions v3_req\n","openssl x509 -req -days 3650 -in server.csr -out server.pem -CA ca.pem -CAkey ca.key -CAcreateserial -extfile ./openssl.cnf -extensions v3_req\n","tls:\n serverPemPath: /milvus/tls/server.pem\n serverKeyPath: /milvus/tls/server.key\n caPemPath: /milvus/tls/ca.pem\n\ncommon:\n security:\n tlsMode: 1\n","├── docker-compose.yml\n├── milvus.yaml\n└── tls\n ├── server.pem\n ├── server.key\n └── ca.pem\n"," standalone:\n container_name: milvus-standalone\n image: milvusdb/milvus:latest\n command: [\"milvus\", \"run\", \"standalone\"]\n security_opt:\n - seccomp:unconfined\n environment:\n ETCD_ENDPOINTS: etcd:2379\n MINIO_ADDRESS: minio:9000\n volumes:\n - ${DOCKER_VOLUME_DIRECTORY:-.}/volumes/milvus:/var/lib/milvus\n - ${DOCKER_VOLUME_DIRECTORY:-.}/tls:/milvus/tls\n - ${DOCKER_VOLUME_DIRECTORY:-.}/milvus.yaml:/milvus/configs/milvus.yaml\n","sudo docker compose up -d\n","from pymilvus import MilvusClient\n\nclient = MilvusClient(\n uri=\"http://localhost:19530\",\n secure=True,\n server_pem_path=\"path_to/server.pem\",\n server_name=\"localhost\"\n)\n","from pymilvus import MilvusClient\n\nclient = MilvusClient(\n uri=\"http://localhost:19530\",\n secure=True,\n client_pem_path=\"path_to/client.pem\",\n client_key_path=\"path_to/client.key\",\n ca_pem_path=\"path_to/ca.pem\",\n server_name=\"localhost\"\n)\n"],"headingContent":"","anchorList":[{"label":"Crittografia in transito","href":"Encryption-in-Transit","type":1,"isActive":false},{"label":"Creare il proprio certificato","href":"Create-your-own-certificate","type":2,"isActive":false},{"label":"Configurazione di un server Milvus con TLS","href":"Set-up-a-Milvus-server-with-TLS","type":2,"isActive":false},{"label":"Connettersi al server Milvus con TLS","href":"Connect-to-the-Milvus-server-with-TLS","type":2,"isActive":false}]} \ No newline at end of file +{"codeList":["openssl version\n","sudo apt install openssl\n","mkdir cert && cd cert\ntouch openssl.cnf gen.sh\n","#\n# OpenSSL example configuration file.\n# This is mostly being used for generation of certificate requests.\n#\n\n# This definition stops the following lines choking if HOME isn't\n# defined.\nHOME = .\nRANDFILE = $ENV::HOME/.rnd\n\n# Extra OBJECT IDENTIFIER info:\n#oid_file = $ENV::HOME/.oid\noid_section = new_oids\n\n# To use this configuration file with the \"-extfile\" option of the\n# \"openssl x509\" utility, name here the section containing the\n# X.509v3 extensions to use:\n# extensions = \n# (Alternatively, use a configuration file that has only\n# X.509v3 extensions in its main [= default] section.)\n\n[ new_oids ]\n\n# We can add new OIDs in here for use by 'ca', 'req' and 'ts'.\n# Add a simple OID like this:\n# testoid1=1.2.3.4\n# Or use config file substitution like this:\n# testoid2=${testoid1}.5.6\n\n# Policies used by the TSA examples.\ntsa_policy1 = 1.2.3.4.1\ntsa_policy2 = 1.2.3.4.5.6\ntsa_policy3 = 1.2.3.4.5.7\n\n####################################################################\n[ ca ]\ndefault_ca = CA_default # The default ca section\n\n####################################################################\n[ CA_default ]\n\ndir = ./demoCA # Where everything is kept\ncerts = $dir/certs # Where the issued certs are kept\ncrl_dir = $dir/crl # Where the issued crl are kept\ndatabase = $dir/index.txt # database index file.\n#unique_subject = no # Set to 'no' to allow creation of\n # several ctificates with same subject.\nnew_certs_dir = $dir/newcerts # default place for new certs.\n\ncertificate = $dir/cacert.pem # The CA certificate\nserial = $dir/serial # The current serial number\ncrlnumber = $dir/crlnumber # the current crl number\n # must be commented out to leave a V1 CRL\ncrl = $dir/crl.pem # The current CRL\nprivate_key = $dir/private/cakey.pem# The private key\nRANDFILE = $dir/private/.rand # private random number file\n\nx509_extensions = usr_cert # The extentions to add to the cert\n\n# Comment out the following two lines for the \"traditional\"\n# (and highly broken) format.\nname_opt = ca_default # Subject Name options\ncert_opt = ca_default # Certificate field options\n\n# Extension copying option: use with caution.\ncopy_extensions = copy\n\n# Extensions to add to a CRL. Note: Netscape communicator chokes on V2 CRLs\n# so this is commented out by default to leave a V1 CRL.\n# crlnumber must also be commented out to leave a V1 CRL.\n# crl_extensions = crl_ext\n\ndefault_days = 365 # how long to certify for\ndefault_crl_days= 30 # how long before next CRL\ndefault_md = default # use public key default MD\npreserve = no # keep passed DN ordering\n\n# A few difference way of specifying how similar the request should look\n# For type CA, the listed attributes must be the same, and the optional\n# and supplied fields are just that :-)\npolicy = policy_match\n\n# For the CA policy\n[ policy_match ]\ncountryName = match\nstateOrProvinceName = match\norganizationName = match\norganizationalUnitName = optional\ncommonName = supplied\nemailAddress = optional\n\n# For the 'anything' policy\n# At this point in time, you must list all acceptable 'object'\n# types.\n[ policy_anything ]\ncountryName = optional\nstateOrProvinceName = optional\nlocalityName = optional\norganizationName = optional\norganizationalUnitName = optional\ncommonName = supplied\nemailAddress = optional\n\n####################################################################\n[ req ]\ndefault_bits = 2048\ndefault_keyfile = privkey.pem\ndistinguished_name = req_distinguished_name\nattributes = req_attributes\nx509_extensions = v3_ca # The extentions to add to the self signed cert\n\n# Passwords for private keys if not present they will be prompted for\n# input_password = secret\n# output_password = secret\n\n# This sets a mask for permitted string types. There are several options. \n# default: PrintableString, T61String, BMPString.\n# pkix : PrintableString, BMPString (PKIX recommendation before 2004)\n# utf8only: only UTF8Strings (PKIX recommendation after 2004).\n# nombstr : PrintableString, T61String (no BMPStrings or UTF8Strings).\n# MASK:XXXX a literal mask value.\n# WARNING: ancient versions of Netscape crash on BMPStrings or UTF8Strings.\nstring_mask = utf8only\n\nreq_extensions = v3_req # The extensions to add to a certificate request\n\n[ req_distinguished_name ]\ncountryName = Country Name (2 letter code)\ncountryName_default = AU\ncountryName_min = 2\ncountryName_max = 2\n\nstateOrProvinceName = State or Province Name (full name)\nstateOrProvinceName_default = Some-State\n\nlocalityName = Locality Name (eg, city)\n\n0.organizationName = Organization Name (eg, company)\n0.organizationName_default = Internet Widgits Pty Ltd\n\n# we can do this but it is not needed normally :-)\n#1.organizationName = Second Organization Name (eg, company)\n#1.organizationName_default = World Wide Web Pty Ltd\n\norganizationalUnitName = Organizational Unit Name (eg, section)\n#organizationalUnitName_default =\n\ncommonName = Common Name (e.g. server FQDN or YOUR name)\ncommonName_max = 64\n\nemailAddress = Email Address\nemailAddress_max = 64\n\n# SET-ex3 = SET extension number 3\n\n[ req_attributes ]\nchallengePassword = A challenge password\nchallengePassword_min = 4\nchallengePassword_max = 20\n\nunstructuredName = An optional company name\n\n[ usr_cert ]\n\n# These extensions are added when 'ca' signs a request.\n\n# This goes against PKIX guidelines but some CAs do it and some software\n# requires this to avoid interpreting an end user certificate as a CA.\n\nbasicConstraints=CA:FALSE\n\n# Here are some examples of the usage of nsCertType. If it is omitted\n# the certificate can be used for anything *except* object signing.\n\n# This is OK for an SSL server.\n# nsCertType = server\n\n# For an object signing certificate this would be used.\n# nsCertType = objsign\n\n# For normal client use this is typical\n# nsCertType = client, email\n\n# and for everything including object signing:\n# nsCertType = client, email, objsign\n\n# This is typical in keyUsage for a client certificate.\n# keyUsage = nonRepudiation, digitalSignature, keyEncipherment\n\n# This will be displayed in Netscape's comment listbox.\nnsComment = \"OpenSSL Generated Certificate\"\n\n# PKIX recommendations harmless if included in all certificates.\nsubjectKeyIdentifier=hash\nauthorityKeyIdentifier=keyid,issuer\n\n# This stuff is for subjectAltName and issuerAltname.\n# Import the email address.\n# subjectAltName=email:copy\n# An alternative to produce certificates that aren't\n# deprecated according to PKIX.\n# subjectAltName=email:move\n\n# Copy subject details\n# issuerAltName=issuer:copy\n\n#nsCaRevocationUrl = http://www.domain.dom/ca-crl.pem\n#nsBaseUrl\n#nsRevocationUrl\n#nsRenewalUrl\n#nsCaPolicyUrl\n#nsSslServerName\n\n# This is required for TSA certificates.\n# extendedKeyUsage = critical,timeStamping\n\n[ v3_req ]\n\n# Extensions to add to a certificate request\n\nbasicConstraints = CA:FALSE\nkeyUsage = nonRepudiation, digitalSignature, keyEncipherment\n\n\n[ v3_ca ]\n\n\n# Extensions for a typical CA\n\n\n# PKIX recommendation.\n\nsubjectKeyIdentifier=hash\n\nauthorityKeyIdentifier=keyid:always,issuer\n\n# This is what PKIX recommends but some broken software chokes on critical\n# extensions.\n#basicConstraints = critical,CA:true\n# So we do this instead.\nbasicConstraints = CA:true\n\n# Key usage: this is typical for a CA certificate. However since it will\n# prevent it being used as an test self-signed certificate it is best\n# left out by default.\n# keyUsage = cRLSign, keyCertSign\n\n# Some might want this also\n# nsCertType = sslCA, emailCA\n\n# Include email address in subject alt name: another PKIX recommendation\n# subjectAltName=email:copy\n# Copy issuer details\n# issuerAltName=issuer:copy\n\n# DER hex encoding of an extension: beware experts only!\n# obj=DER:02:03\n# Where 'obj' is a standard or added object\n# You can even override a supported extension:\n# basicConstraints= critical, DER:30:03:01:01:FF\n\n[ crl_ext ]\n\n# CRL extensions.\n# Only issuerAltName and authorityKeyIdentifier make any sense in a CRL.\n\n# issuerAltName=issuer:copy\nauthorityKeyIdentifier=keyid:always\n\n[ proxy_cert_ext ]\n# These extensions should be added when creating a proxy certificate\n\n# This goes against PKIX guidelines but some CAs do it and some software\n# requires this to avoid interpreting an end user certificate as a CA.\n\nbasicConstraints=CA:FALSE\n\n# Here are some examples of the usage of nsCertType. If it is omitted\n# the certificate can be used for anything *except* object signing.\n\n# This is OK for an SSL server.\n# nsCertType = server\n\n# For an object signing certificate this would be used.\n# nsCertType = objsign\n\n# For normal client use this is typical\n# nsCertType = client, email\n\n# and for everything including object signing:\n# nsCertType = client, email, objsign\n\n# This is typical in keyUsage for a client certificate.\n# keyUsage = nonRepudiation, digitalSignature, keyEncipherment\n\n# This will be displayed in Netscape's comment listbox.\nnsComment = \"OpenSSL Generated Certificate\"\n\n# PKIX recommendations harmless if included in all certificates.\nsubjectKeyIdentifier=hash\nauthorityKeyIdentifier=keyid,issuer\n\n# This stuff is for subjectAltName and issuerAltname.\n# Import the email address.\n# subjectAltName=email:copy\n# An alternative to produce certificates that aren't\n# deprecated according to PKIX.\n# subjectAltName=email:move\n\n# Copy subject details\n# issuerAltName=issuer:copy\n\n#nsCaRevocationUrl = http://www.domain.dom/ca-crl.pem\n#nsBaseUrl\n#nsRevocationUrl\n#nsRenewalUrl\n#nsCaPolicyUrl\n#nsSslServerName\n\n# This really needs to be in place for it to be a proxy certificate.\nproxyCertInfo=critical,language:id-ppl-anyLanguage,pathlen:3,policy:foo\n\n####################################################################\n[ tsa ]\n\ndefault_tsa = tsa_config1 # the default TSA section\n\n[ tsa_config1 ]\n\n# These are used by the TSA reply generation only.\ndir = ./demoCA # TSA root directory\nserial = $dir/tsaserial # The current serial number (mandatory)\ncrypto_device = builtin # OpenSSL engine to use for signing\nsigner_cert = $dir/tsacert.pem # The TSA signing certificate\n # (optional)\ncerts = $dir/cacert.pem # Certificate chain to include in reply\n # (optional)\nsigner_key = $dir/private/tsakey.pem # The TSA private key (optional)\n\ndefault_policy = tsa_policy1 # Policy if request did not specify it\n # (optional)\nother_policies = tsa_policy2, tsa_policy3 # acceptable policies (optional)\ndigests = md5, sha1 # Acceptable message digests (mandatory)\naccuracy = secs:1, millisecs:500, microsecs:100 # (optional)\nclock_precision_digits = 0 # number of digits after dot. (optional)\nordering = yes # Is ordering defined for timestamps?\n # (optional, default: no)\ntsa_name = yes # Must the TSA name be included in the reply?\n # (optional, default: no)\ness_cert_id_chain = no # Must the ESS cert id chain be included?\n # (optional, default: no)\n","#!/usr/bin/env sh\n# your variables\nCountry=\"CN\"\nState=\"Shanghai\"\nLocation=\"Shanghai\"\nOrganization=\"milvus\"\nOrganizational=\"milvus\"\nCommonName=\"localhost\"\n\necho \"generate ca.key\"\nopenssl genrsa -out ca.key 2048\n\necho \"generate ca.pem\"\nopenssl req -new -x509 -key ca.key -out ca.pem -days 3650 -subj \"/C=$Country/ST=$State/L=$Location/O=$Organization/OU=$Organizational/CN=$CommonName\"\n\necho \"generate server SAN certificate\"\nopenssl genpkey -algorithm RSA -out server.key\nopenssl req -new -nodes -key server.key -out server.csr -days 3650 -subj \"/C=$Country/O=$Organization/OU=$Organizational/CN=$CommonName\" -config ./openssl.cnf -extensions v3_req\nopenssl x509 -req -days 3650 -in server.csr -out server.pem -CA ca.pem -CAkey ca.key -CAcreateserial -extfile ./openssl.cnf -extensions v3_req\n\necho \"generate client SAN certificate\"\nopenssl genpkey -algorithm RSA -out client.key\nopenssl req -new -nodes -key client.key -out client.csr -days 3650 -subj \"/C=$Country/O=$Organization/OU=$Organizational/CN=$CommonName\" -config ./openssl.cnf -extensions v3_req\nopenssl x509 -req -days 3650 -in client.csr -out client.pem -CA ca.pem -CAkey ca.key -CAcreateserial -extfile ./openssl.cnf -extensions v3_req\n\n","chmod +x gen.sh\n./gen.sh\n","openssl genpkey -algorithm RSA -out ca.key\n","openssl req -new -x509 -key ca.key -out ca.pem -days 3650 -subj \"/C=$Country/ST=$State/L=$Location/O=$Organization/OU=$Organizational/CN=$CommonName\"\n","openssl genpkey -algorithm RSA -out server.key\n","openssl req -new -nodes -key server.key -out server.csr -days 3650 -subj \"/C=$Country/O=$Organization/OU=$Organizational/CN=$CommonName\" -config ./openssl.cnf -extensions v3_req\n","openssl x509 -req -days 3650 -in server.csr -out server.pem -CA ca.pem -CAkey ca.key -CAcreateserial -extfile ./openssl.cnf -extensions v3_req\n","tls:\n serverPemPath: /milvus/tls/server.pem\n serverKeyPath: /milvus/tls/server.key\n caPemPath: /milvus/tls/ca.pem\n\ncommon:\n security:\n tlsMode: 1\n","├── docker-compose.yml\n├── milvus.yaml\n└── tls\n ├── server.pem\n ├── server.key\n └── ca.pem\n"," standalone:\n container_name: milvus-standalone\n image: milvusdb/milvus:latest\n command: [\"milvus\", \"run\", \"standalone\"]\n security_opt:\n - seccomp:unconfined\n environment:\n ETCD_ENDPOINTS: etcd:2379\n MINIO_ADDRESS: minio:9000\n volumes:\n - ${DOCKER_VOLUME_DIRECTORY:-.}/volumes/milvus:/var/lib/milvus\n - ${DOCKER_VOLUME_DIRECTORY:-.}/tls:/milvus/tls\n - ${DOCKER_VOLUME_DIRECTORY:-.}/milvus.yaml:/milvus/configs/milvus.yaml\n","sudo docker compose up -d\n","from pymilvus import MilvusClient\n\nclient = MilvusClient(\n uri=\"https://localhost:19530\",\n secure=True,\n server_pem_path=\"path_to/server.pem\",\n server_name=\"localhost\"\n)\n","from pymilvus import MilvusClient\n\nclient = MilvusClient(\n uri=\"https://localhost:19530\",\n secure=True,\n client_pem_path=\"path_to/client.pem\",\n client_key_path=\"path_to/client.key\",\n ca_pem_path=\"path_to/ca.pem\",\n server_name=\"localhost\"\n)\n","curl --cacert path_to/ca.pem https://localhost:19530/v2/vectordb/collections/list\n","curl --cert path_to/client.pem --key path_to/client.key --cacert path_to/ca.pem https://localhost:19530/v2/vectordb/collections/list\n"],"headingContent":"Encryption in Transit","anchorList":[{"label":"Crittografia in transito","href":"Encryption-in-Transit","type":1,"isActive":false},{"label":"Creare il proprio certificato","href":"Create-your-own-certificate","type":2,"isActive":false},{"label":"Configurazione di un server Milvus con TLS","href":"Set-up-a-Milvus-server-with-TLS","type":2,"isActive":false},{"label":"Connettersi al server Milvus con TLS","href":"Connect-to-the-Milvus-server-with-TLS","type":2,"isActive":false},{"label":"Connettersi al server RESTful di Milvus con TLS","href":"Connect-to-the-Milvus-RESTful-server-with-TLS","type":2,"isActive":false}]} \ No newline at end of file diff --git a/localization/v2.4.x/site/it/adminGuide/tls.md b/localization/v2.4.x/site/it/adminGuide/tls.md index 66000b569..feaaf18e7 100644 --- a/localization/v2.4.x/site/it/adminGuide/tls.md +++ b/localization/v2.4.x/site/it/adminGuide/tls.md @@ -18,10 +18,10 @@ summary: Scoprite come attivare il proxy TLS in Milvus. d="M4 9h1v1H4c-1.5 0-3-1.69-3-3.5S2.55 3 4 3h4c1.45 0 3 1.69 3 3.5 0 1.41-.91 2.72-2 3.25V8.59c.58-.45 1-1.27 1-2.09C10 5.22 8.98 4 8 4H4c-.98 0-2 1.22-2 2.5S3 9 4 9zm9-3h-1v1h1c1 0 2 1.22 2 2.5S13.98 12 13 12H9c-.98 0-2-1.22-2-2.5 0-.83.42-1.64 1-2.09V6.25c-1.09.53-2 1.84-2 3.25C6 11.31 7.55 13 9 13h4c1.45 0 3-1.69 3-3.5S14.5 6 13 6z" > -

      TLS (Transport Layer Security) è un protocollo di crittografia che garantisce la sicurezza delle comunicazioni. Il proxy Milvus utilizza l'autenticazione TLS unidirezionale e bidirezionale.

      -

      Questo argomento descrive come abilitare il proxy TLS in Milvus.

      +

      TLS (Transport Layer Security) è un protocollo di crittografia che garantisce la sicurezza delle comunicazioni. Milvus proxy utilizza l'autenticazione TLS unidirezionale e bidirezionale.

      +

      Questo argomento descrive come abilitare TLS in Milvus proxy per il traffico gRPC e RESTful.

      -

      TLS e l'autenticazione utente sono due approcci di sicurezza distinti. Se avete abilitato sia l'autenticazione utente che il TLS nel vostro sistema Milvus, dovrete fornire un nome utente, una password e i percorsi dei file dei certificati. Per informazioni su come abilitare l'autenticazione dell'utente, fate riferimento a Autenticare l'accesso dell'utente.

      +

      TLS e l'autenticazione utente sono due approcci di sicurezza distinti. Se avete abilitato sia l'autenticazione utente che il TLS nel vostro sistema Milvus, dovrete fornire un nome utente, una password e il percorso del file del certificato. Per informazioni su come abilitare l'autenticazione dell'utente, fate riferimento a Autenticare l'accesso dell'utente.

      Creare il proprio certificato

    -

    Per ulteriori informazioni, vedere example_tls1.py e example_tls2.py.

    +

    Per ulteriori informazioni, vedere esempio_tls1.py e esempio_tls2.py.

    +

    Connettersi al server RESTful di Milvus con TLS

    Per le API RESTful, è possibile verificare il tls utilizzando il comando curl.

    +

    Connessione TLS unidirezionale

    curl --cacert path_to/ca.pem https://localhost:19530/v2/vectordb/collections/list
    +
    +

    Connessione TLS bidirezionale

    curl --cert path_to/client.pem --key path_to/client.key --cacert path_to/ca.pem https://localhost:19530/v2/vectordb/collections/list
    +
    diff --git a/localization/v2.4.x/site/it/adminGuide/upgrade_milvus_cluster-docker.json b/localization/v2.4.x/site/it/adminGuide/upgrade_milvus_cluster-docker.json index b6471ac4c..69785760f 100644 --- a/localization/v2.4.x/site/it/adminGuide/upgrade_milvus_cluster-docker.json +++ b/localization/v2.4.x/site/it/adminGuide/upgrade_milvus_cluster-docker.json @@ -1 +1 @@ -{"codeList":["...\nrootcoord:\n container_name: milvus-rootcoord\n image: milvusdb/milvus:v2.4.9\n...\nproxy:\n container_name: milvus-proxy\n image: milvusdb/milvus:v2.4.9\n...\nquerycoord:\n container_name: milvus-querycoord\n image: milvusdb/milvus:v2.4.9 \n...\nquerynode:\n container_name: milvus-querynode\n image: milvusdb/milvus:v2.4.9\n...\nindexcoord:\n container_name: milvus-indexcoord\n image: milvusdb/milvus:v2.4.9\n...\nindexnode:\n container_name: milvus-indexnode\n image: milvusdb/milvus:v2.4.9 \n...\ndatacoord:\n container_name: milvus-datacoord\n image: milvusdb/milvus:v2.4.9 \n...\ndatanode:\n container_name: milvus-datanode\n image: milvusdb/milvus:v2.4.9\n","docker compose down\ndocker compose up -d\n","docker stop \n","# migration.yaml\ncmd:\n # Option: run/backup/rollback\n type: run\n runWithBackup: true\nconfig:\n sourceVersion: 2.1.4 # Specify your milvus version\n targetVersion: 2.4.9\n backupFilePath: /tmp/migration.bak\nmetastore:\n type: etcd\netcd:\n endpoints:\n - milvus-etcd:2379 # Use the etcd container name\n rootPath: by-dev # The root path where data is stored in etcd\n metaSubPath: meta\n kvSubPath: kv\n","# Suppose your docker-compose run with the default milvus network,\n# and you put migration.yaml in the same directory with docker-compose.yaml.\ndocker run --rm -it --network milvus -v $(pwd)/migration.yaml:/milvus/configs/migration.yaml milvus/meta-migration:v2.2.0 /milvus/bin/meta-migration -config=/milvus/configs/migration.yaml\n","Update the milvus image tag in the docker-compose.yaml\ndocker compose down\ndocker compose up -d\n"],"headingContent":"","anchorList":[{"label":"Aggiornamento del cluster Milvus con Docker Compose","href":"Upgrade-Milvus-Cluster-with-Docker-Compose","type":1,"isActive":false},{"label":"Aggiornare Milvus cambiando l'immagine","href":"Upgrade-Milvus-by-changing-its-image","type":2,"isActive":false},{"label":"Migrazione dei metadati","href":"Migrate-the-metadata","type":2,"isActive":false},{"label":"Cosa fare dopo","href":"Whats-next","type":2,"isActive":false}]} \ No newline at end of file +{"codeList":["...\nrootcoord:\n container_name: milvus-rootcoord\n image: milvusdb/milvus:v2.4.13-hotfix\n...\nproxy:\n container_name: milvus-proxy\n image: milvusdb/milvus:v2.4.13-hotfix\n...\nquerycoord:\n container_name: milvus-querycoord\n image: milvusdb/milvus:v2.4.13-hotfix \n...\nquerynode:\n container_name: milvus-querynode\n image: milvusdb/milvus:v2.4.13-hotfix\n...\nindexcoord:\n container_name: milvus-indexcoord\n image: milvusdb/milvus:v2.4.13-hotfix\n...\nindexnode:\n container_name: milvus-indexnode\n image: milvusdb/milvus:v2.4.13-hotfix \n...\ndatacoord:\n container_name: milvus-datacoord\n image: milvusdb/milvus:v2.4.13-hotfix \n...\ndatanode:\n container_name: milvus-datanode\n image: milvusdb/milvus:v2.4.13-hotfix\n","docker compose down\ndocker compose up -d\n","docker stop \n","# migration.yaml\ncmd:\n # Option: run/backup/rollback\n type: run\n runWithBackup: true\nconfig:\n sourceVersion: 2.1.4 # Specify your milvus version\n targetVersion: 2.4.13-hotfix\n backupFilePath: /tmp/migration.bak\nmetastore:\n type: etcd\netcd:\n endpoints:\n - milvus-etcd:2379 # Use the etcd container name\n rootPath: by-dev # The root path where data is stored in etcd\n metaSubPath: meta\n kvSubPath: kv\n","# Suppose your docker-compose run with the default milvus network,\n# and you put migration.yaml in the same directory with docker-compose.yaml.\ndocker run --rm -it --network milvus -v $(pwd)/migration.yaml:/milvus/configs/migration.yaml milvus/meta-migration:v2.2.0 /milvus/bin/meta-migration -config=/milvus/configs/migration.yaml\n","Update the milvus image tag in the docker-compose.yaml\ndocker compose down\ndocker compose up -d\n"],"headingContent":"Upgrade Milvus Cluster with Docker Compose","anchorList":[{"label":"Aggiornamento del cluster Milvus con Docker Compose","href":"Upgrade-Milvus-Cluster-with-Docker-Compose","type":1,"isActive":false},{"label":"Aggiornare Milvus cambiando l'immagine","href":"Upgrade-Milvus-by-changing-its-image","type":2,"isActive":false},{"label":"Migrazione dei metadati","href":"Migrate-the-metadata","type":2,"isActive":false},{"label":"Cosa fare dopo","href":"Whats-next","type":2,"isActive":false}]} \ No newline at end of file diff --git a/localization/v2.4.x/site/it/adminGuide/upgrade_milvus_cluster-docker.md b/localization/v2.4.x/site/it/adminGuide/upgrade_milvus_cluster-docker.md index 0a1889a98..b1ff5bc11 100644 --- a/localization/v2.4.x/site/it/adminGuide/upgrade_milvus_cluster-docker.md +++ b/localization/v2.4.x/site/it/adminGuide/upgrade_milvus_cluster-docker.md @@ -20,7 +20,7 @@ title: Aggiornamento del cluster Milvus con Docker Compose >

    Questo argomento descrive come aggiornare Milvus utilizzando Docker Compose.

    -

    In casi normali, è possibile aggiornare Milvus cambiando la sua immagine. Tuttavia, è necessario migrare i metadati prima di qualsiasi aggiornamento da v2.1.x a v2.4.9.

    +

    In casi normali, è possibile aggiornare Milvus cambiando la sua immagine. Tuttavia, è necessario migrare i metadati prima di qualsiasi aggiornamento da v2.1.x a v2.4.13-hotfix.

    Aggiornare Milvus cambiando l'immagine

  • Eseguire i seguenti comandi per eseguire l'aggiornamento.

    docker compose down
    @@ -105,7 +105,7 @@ cmd:
       runWithBackup: true
     config:
       sourceVersion: 2.1.4   # Specify your milvus version
    -  targetVersion: 2.4.9
    +  targetVersion: 2.4.13-hotfix
       backupFilePath: /tmp/migration.bak
     metastore:
       type: etcd
    @@ -148,7 +148,7 @@ docker compose up -d
     
  • Se siete pronti a distribuire il vostro cluster su cloud:
  • diff --git a/localization/v2.4.x/site/it/adminGuide/upgrade_milvus_cluster-helm.json b/localization/v2.4.x/site/it/adminGuide/upgrade_milvus_cluster-helm.json index 43fddf484..e1f85b406 100644 --- a/localization/v2.4.x/site/it/adminGuide/upgrade_milvus_cluster-helm.json +++ b/localization/v2.4.x/site/it/adminGuide/upgrade_milvus_cluster-helm.json @@ -1 +1 @@ -{"codeList":["$ helm repo update\n$ helm search repo zilliztech/milvus --versions\n","helm repo add zilliztech https://zilliztech.github.io/milvus-helm\nhelm repo update\n# upgrade existing helm release\nhelm upgrade my-release zilliztech/milvus\n","NAME CHART VERSION APP VERSION DESCRIPTION \nzilliztech/milvus 4.1.34 2.4.5 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.33 2.4.4 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.32 2.4.3 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.31 2.4.1 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.30 2.4.1 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.29 2.4.0 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.24 2.3.11 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.23 2.3.10 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.22 2.3.10 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.21 2.3.10 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.20 2.3.10 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.18 2.3.10 Milvus is an open-source vector database built ... \nzilliztech/milvus 4.1.18 2.3.9 Milvus is an open-source vector database built ... \nzilliztech/milvus 4.1.17 2.3.8 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.16 2.3.7 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.15 2.3.5 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.14 2.3.6 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.13 2.3.5 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.12 2.3.5 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.11 2.3.4 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.10 2.3.3 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.9 2.3.3 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.8 2.3.2 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.7 2.3.2 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.6 2.3.1 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.5 2.3.1 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.4 2.3.1 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.3 2.3.1 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.2 2.3.1 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.1 2.3.0 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.0 2.3.0 Milvus is an open-source vector database built ...\n","sh rollingUpdate.sh -n default -i my-release -o update -t 2.4.9 -w 'milvusdb/milvus:v2.4.9'\n","helm repo update\nhelm upgrade my-release zilliztech/milvus --reuse-values --version=4.1.24 # use the helm chart version here\n","NAME NAMESPACE REVISION UPDATED STATUS CHART APP VERSION \nnew-release default 1 2022-11-21 15:41:25.51539 +0800 CST deployed milvus-3.2.18 2.1.4 \n","NAME READY STATUS RESTARTS AGE\nmy-release-etcd-0 1/1 Running 0 21m\nmy-release-etcd-1 1/1 Running 0 21m\nmy-release-etcd-2 1/1 Running 0 21m\nmy-release-milvus-datacoord-664c58798d-fl75s 1/1 Running 0 21m\nmy-release-milvus-datanode-5f75686c55-xfg2r 1/1 Running 0 21m\nmy-release-milvus-indexcoord-5f98b97589-2l48r 1/1 Running 0 21m\nmy-release-milvus-indexnode-857b4ddf98-vmd75 1/1 Running 0 21m\nmy-release-milvus-proxy-6c548f787f-scspp 1/1 Running 0 21m\nmy-release-milvus-querycoord-c454f44cd-dwmwq 1/1 Running 0 21m\nmy-release-milvus-querynode-76bb4946d-lbrz6 1/1 Running 0 21m\nmy-release-milvus-rootcoord-7764c5b686-62msm 1/1 Running 0 21m\nmy-release-minio-0 1/1 Running 0 21m\nmy-release-minio-1 1/1 Running 0 21m\nmy-release-minio-2 1/1 Running 0 21m\nmy-release-minio-3 1/1 Running 0 21m\nmy-release-pulsar-bookie-0 1/1 Running 0 21m\nmy-release-pulsar-bookie-1 1/1 Running 0 21m\nmy-release-pulsar-bookie-2 1/1 Running 0 21m\nmy-release-pulsar-bookie-init-tjxpj 0/1 Completed 0 21m\nmy-release-pulsar-broker-0 1/1 Running 0 21m\nmy-release-pulsar-proxy-0 1/1 Running 0 21m\nmy-release-pulsar-pulsar-init-c8vvc 0/1 Completed 0 21m\nmy-release-pulsar-recovery-0 1/1 Running 0 21m\nmy-release-pulsar-zookeeper-0 1/1 Running 0 21m\nmy-release-pulsar-zookeeper-1 1/1 Running 0 20m\nmy-release-pulsar-zookeeper-2 1/1 Running 0 20m\n","$ kubectl get pods my-release-milvus-proxy-6c548f787f-scspp -o=jsonpath='{$.spec.containers[0].image}'\n# milvusdb/milvus:v2.1.4\n","./migrate.sh -i my-release -s 2.1.4 -t 2.4.9\n","./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.9\n","./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.9 -r by-dev\n","./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.9 -r by-dev -w milvusdb/milvus:v2.4.9\n","./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.9 -w milvusdb/milvus:v2.4.9 -d true\n","./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.9 -r by-dev -o rollback -w milvusdb/milvus:v2.1.1\n./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.9 -r by-dev -o migrate -w milvusdb/milvus:v2.4.9\n"],"headingContent":"","anchorList":[{"label":"Aggiornamento del cluster Milvus con i grafici Helm","href":"Upgrade-Milvus-Cluster-with-Helm-Chart","type":1,"isActive":false},{"label":"Controllare i grafici di Milvus Helm","href":"Check-Milvus-Helm-Chart","type":2,"isActive":false},{"label":"Eseguire un aggiornamento continuo","href":"Conduct-a-rolling-upgrade","type":2,"isActive":false},{"label":"Aggiornare Milvus con Helm","href":"Upgrade-Milvus-using-Helm","type":2,"isActive":false},{"label":"Migrazione dei metadati","href":"Migrate-the-metadata","type":2,"isActive":false}]} \ No newline at end of file +{"codeList":["$ helm repo update\n$ helm search repo zilliztech/milvus --versions\n","helm repo add zilliztech https://zilliztech.github.io/milvus-helm\nhelm repo update\n# upgrade existing helm release\nhelm upgrade my-release zilliztech/milvus\n","NAME CHART VERSION APP VERSION DESCRIPTION \nzilliztech/milvus 4.1.34 2.4.5 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.33 2.4.4 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.32 2.4.3 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.31 2.4.1 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.30 2.4.1 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.29 2.4.0 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.24 2.3.11 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.23 2.3.10 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.22 2.3.10 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.21 2.3.10 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.20 2.3.10 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.18 2.3.10 Milvus is an open-source vector database built ... \nzilliztech/milvus 4.1.18 2.3.9 Milvus is an open-source vector database built ... \nzilliztech/milvus 4.1.17 2.3.8 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.16 2.3.7 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.15 2.3.5 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.14 2.3.6 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.13 2.3.5 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.12 2.3.5 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.11 2.3.4 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.10 2.3.3 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.9 2.3.3 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.8 2.3.2 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.7 2.3.2 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.6 2.3.1 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.5 2.3.1 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.4 2.3.1 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.3 2.3.1 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.2 2.3.1 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.1 2.3.0 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.0 2.3.0 Milvus is an open-source vector database built ...\n","sh rollingUpdate.sh -n default -i my-release -o update -t 2.4.13-hotfix -w 'milvusdb/milvus:v2.4.13-hotfix'\n","helm repo update\nhelm upgrade my-release zilliztech/milvus --reuse-values --version=4.1.24 # use the helm chart version here\n","NAME NAMESPACE REVISION UPDATED STATUS CHART APP VERSION \nnew-release default 1 2022-11-21 15:41:25.51539 +0800 CST deployed milvus-3.2.18 2.1.4 \n","NAME READY STATUS RESTARTS AGE\nmy-release-etcd-0 1/1 Running 0 21m\nmy-release-etcd-1 1/1 Running 0 21m\nmy-release-etcd-2 1/1 Running 0 21m\nmy-release-milvus-datacoord-664c58798d-fl75s 1/1 Running 0 21m\nmy-release-milvus-datanode-5f75686c55-xfg2r 1/1 Running 0 21m\nmy-release-milvus-indexcoord-5f98b97589-2l48r 1/1 Running 0 21m\nmy-release-milvus-indexnode-857b4ddf98-vmd75 1/1 Running 0 21m\nmy-release-milvus-proxy-6c548f787f-scspp 1/1 Running 0 21m\nmy-release-milvus-querycoord-c454f44cd-dwmwq 1/1 Running 0 21m\nmy-release-milvus-querynode-76bb4946d-lbrz6 1/1 Running 0 21m\nmy-release-milvus-rootcoord-7764c5b686-62msm 1/1 Running 0 21m\nmy-release-minio-0 1/1 Running 0 21m\nmy-release-minio-1 1/1 Running 0 21m\nmy-release-minio-2 1/1 Running 0 21m\nmy-release-minio-3 1/1 Running 0 21m\nmy-release-pulsar-bookie-0 1/1 Running 0 21m\nmy-release-pulsar-bookie-1 1/1 Running 0 21m\nmy-release-pulsar-bookie-2 1/1 Running 0 21m\nmy-release-pulsar-bookie-init-tjxpj 0/1 Completed 0 21m\nmy-release-pulsar-broker-0 1/1 Running 0 21m\nmy-release-pulsar-proxy-0 1/1 Running 0 21m\nmy-release-pulsar-pulsar-init-c8vvc 0/1 Completed 0 21m\nmy-release-pulsar-recovery-0 1/1 Running 0 21m\nmy-release-pulsar-zookeeper-0 1/1 Running 0 21m\nmy-release-pulsar-zookeeper-1 1/1 Running 0 20m\nmy-release-pulsar-zookeeper-2 1/1 Running 0 20m\n","$ kubectl get pods my-release-milvus-proxy-6c548f787f-scspp -o=jsonpath='{$.spec.containers[0].image}'\n# milvusdb/milvus:v2.1.4\n","./migrate.sh -i my-release -s 2.1.4 -t 2.4.13-hotfix\n","./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.13-hotfix\n","./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.13-hotfix -r by-dev\n","./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.13-hotfix -r by-dev -w milvusdb/milvus:v2.4.13-hotfix\n","./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.13-hotfix -w milvusdb/milvus:v2.4.13-hotfix -d true\n","./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.13-hotfix -r by-dev -o rollback -w milvusdb/milvus:v2.1.1\n./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.13-hotfix -r by-dev -o migrate -w milvusdb/milvus:v2.4.13-hotfix\n"],"headingContent":"Upgrade Milvus Cluster with Helm Chart","anchorList":[{"label":"Aggiornamento del cluster Milvus con i grafici Helm","href":"Upgrade-Milvus-Cluster-with-Helm-Chart","type":1,"isActive":false},{"label":"Controllare i grafici di Milvus Helm","href":"Check-Milvus-Helm-Chart","type":2,"isActive":false},{"label":"Eseguire un aggiornamento continuo","href":"Conduct-a-rolling-upgrade","type":2,"isActive":false},{"label":"Aggiornare Milvus con Helm","href":"Upgrade-Milvus-using-Helm","type":2,"isActive":false},{"label":"Migrazione dei metadati","href":"Migrate-the-metadata","type":2,"isActive":false}]} \ No newline at end of file diff --git a/localization/v2.4.x/site/it/adminGuide/upgrade_milvus_cluster-helm.md b/localization/v2.4.x/site/it/adminGuide/upgrade_milvus_cluster-helm.md index 8e8af9fd8..9076e61c7 100644 --- a/localization/v2.4.x/site/it/adminGuide/upgrade_milvus_cluster-helm.md +++ b/localization/v2.4.x/site/it/adminGuide/upgrade_milvus_cluster-helm.md @@ -5,7 +5,7 @@ order: 1 group: upgrade_milvus_cluster-operator.md related_key: upgrade Milvus Cluster summary: Scoprite come aggiornare il cluster Milvus con Helm Chart. -title: Aggiornamento del cluster Milvus con il diagramma di Helm +title: Aggiornamento del cluster Milvus con i grafici Helm ---

    Aggiornamento del cluster Milvus con i grafici Helm

    È possibile scegliere il percorso di aggiornamento per il proprio Milvus come segue:

    -
    - Eseguire un aggiornamento continuo](#conduct-a-rolling-upgrade) da Milvus v2.2.3 e versioni successive a v2.4.9.
    +
    - [Eseguire un aggiornamento continuo](#condurre-un-rolling-upgrade) da Milvus v2.2.3 e versioni successive a v2.4.13-hotfix.

    Eseguire un aggiornamento continuo

    1. Lo script codifica l'ordine di aggiornamento delle distribuzioni e non può essere modificato.
    2. Lo script utilizza kubectl patch per aggiornare le distribuzioni e kubectl rollout status per controllare il loro stato.
    3. -
    4. Lo script usa kubectl patch per aggiornare l'etichetta app.kubernetes.io/version delle distribuzioni con quella specificata dopo il flag -t nel comando.
    5. +
    6. Lo script usa kubectl patch per aggiornare l'etichetta app.kubernetes.io/version delle distribuzioni a quella specificata dopo il flag -t nel comando.
    @@ -169,8 +169,8 @@ helm upgrade my-release zilliztech/milvus --reuse-values --version= -

    A partire da Milvus 2.2.0, i metadati sono incompatibili con quelli delle versioni precedenti. I seguenti esempi ipotizzano un aggiornamento da Milvus 2.1.4 a Milvus 2.2.0.

    -

    1. Controllare la versione di Milvus

    Eseguire $ helm list per verificare la versione dell'applicazione Milvus. Si può notare che APP VERSION è la 2.1.4.

    +

    A partire da Milvus 2.2.0, i metadati sono incompatibili con quelli delle versioni precedenti. I seguenti esempi presuppongono un aggiornamento da Milvus 2.1.4 a Milvus 2.2.0.

    +

    1. Controllare la versione di Milvus

    Eseguire $ helm list per verificare la versione dell'applicazione Milvus. Si può notare che APP VERSION è 2.1.4.

    NAME                NAMESPACE   REVISION    UPDATED                                 STATUS      CHART           APP VERSION    
     new-release         default     1           2022-11-21 15:41:25.51539 +0800 CST     deployed    milvus-3.2.18   2.1.4 
     
    @@ -203,7 +203,7 @@ my-release-pulsar-zookeeper-0 my-release-pulsar-zookeeper-1 1/1 Running 0 20m my-release-pulsar-zookeeper-2 1/1 Running 0 20m
    -

    3. Controllare il tag immagine

    Controllare il tag image per il pod my-release-milvus-proxy-6c548f787f-scspp. Si può notare che la release del cluster Milvus è la v2.1.4.

    +

    3. Controllare il tag immagine

    Controllare il tag image per il pod my-release-milvus-proxy-6c548f787f-scspp. È possibile vedere che la release del cluster Milvus è v2.1.4.

    $ kubectl get pods my-release-milvus-proxy-6c548f787f-scspp -o=jsonpath='{$.spec.containers[0].image}'
     # milvusdb/milvus:v2.1.4
     
    @@ -235,25 +235,25 @@ my-release-pulsar-zookeeper-2
  • Migrare i metadati di Milvus.
  • Avviare i componenti Milvus con una nuova immagine.
  • -

    2. Aggiornamento di Milvus dalla v2.1.x alla 2.4.9

    I comandi seguenti presuppongono l'aggiornamento di Milvus dalla v2.1.4 alla 2.4.9. Modificateli in base alle vostre esigenze.

    +

    2. Aggiornamento di Milvus dalla versione 2.1.x alla versione 2.4.13-hotfix

    I comandi seguenti presuppongono l'aggiornamento di Milvus dalla v2.1.4 alla 2.4.13-hotfix. Modificateli in base alle vostre esigenze.

      -
    1. Specificare il nome dell'istanza Milvus, la versione Milvus di origine e la versione Milvus di destinazione.

      -
      ./migrate.sh -i my-release -s 2.1.4 -t 2.4.9
      +
    2. Specificare il nome dell'istanza di Milvus, la versione di Milvus di partenza e la versione di Milvus di arrivo.

      +
      ./migrate.sh -i my-release -s 2.1.4 -t 2.4.13-hotfix
       
    3. Specificate lo spazio dei nomi con -n se Milvus non è installato nello spazio dei nomi predefinito di K8s.

      -
      ./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.9
      +
      ./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.13-hotfix
       
    4. Specificare il percorso della radice con -r se Milvus è installato con il percorso personalizzato rootpath.

      -
      ./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.9 -r by-dev
      +
      ./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.13-hotfix -r by-dev
       
    5. -
    6. Specificate il tag dell'immagine con -w se il vostro Milvus è installato con un image personalizzato.

      -
      ./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.9 -r by-dev -w milvusdb/milvus:v2.4.9
      +
    7. Specificate il tag dell'immagine con -w se Milvus è installato con il tag personalizzato image.

      +
      ./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.13-hotfix -r by-dev -w milvusdb/milvus:v2.4.13-hotfix
       
    8. Impostare -d true se si desidera rimuovere automaticamente il pod di migrazione al termine della migrazione.

      -
      ./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.9 -w milvusdb/milvus:v2.4.9 -d true
      +
      ./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.13-hotfix -w milvusdb/milvus:v2.4.13-hotfix -d true
       
    9. Se la migrazione non va a buon fine, si può tornare indietro e migrare di nuovo.

      -
      ./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.9 -r by-dev -o rollback -w milvusdb/milvus:v2.1.1
      -./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.9 -r by-dev -o migrate -w milvusdb/milvus:v2.4.9
      +
      ./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.13-hotfix -r by-dev -o rollback -w milvusdb/milvus:v2.1.1
      +./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.13-hotfix -r by-dev -o migrate -w milvusdb/milvus:v2.4.13-hotfix
       
    diff --git a/localization/v2.4.x/site/it/adminGuide/upgrade_milvus_cluster-operator.json b/localization/v2.4.x/site/it/adminGuide/upgrade_milvus_cluster-operator.json index 93d536fae..1bfacc7a6 100644 --- a/localization/v2.4.x/site/it/adminGuide/upgrade_milvus_cluster-operator.json +++ b/localization/v2.4.x/site/it/adminGuide/upgrade_milvus_cluster-operator.json @@ -1 +1 @@ -{"codeList":["helm repo add zilliztech-milvus-operator https://zilliztech.github.io/milvus-operator/\nhelm repo update zilliztech-milvus-operator\nhelm -n milvus-operator upgrade milvus-operator zilliztech-milvus-operator/milvus-operator\n","apiVersion: milvus.io/v1beta1\nkind: Milvus\nmetadata:\n name: my-release\nspec:\n components:\n enableRollingUpdate: true\n imageUpdateMode: rollingUpgrade # Default value, can be omitted\n image: milvusdb/milvus:v2.4.9\n","apiVersion: milvus.io/v1beta1\nkind: Milvus\nmetadata:\n name: my-release\nspec:\n components:\n enableRollingUpdate: true\n imageUpdateMode: all\n image: milvusdb/milvus:v2.4.9\n","apiVersion: milvus.io/v1beta1\nkind: Milvus\nmetadata:\n name: my-release\nspec:\n components:\n enableRollingUpdate: true\n imageUpdateMode: rollingDowngrade\n image: milvusdb/milvus:\n","kubectl apply -f milvusupgrade.yml\n","apiVersion: milvus.io/v1beta1\nkind: Milvus\nmetadata:\n name: my-release\nspec:\n # Omit other fields ...\n components:\n image: milvusdb/milvus:v2.4.9\n","kubectl apply -f milvusupgrade.yaml\n","apiVersion: milvus.io/v1beta1\nkind: MilvusUpgrade\nmetadata:\n name: my-release-upgrade\nspec:\n milvus:\n namespace: default\n name: my-release\n sourceVersion: \"v2.1.4\"\n targetVersion: \"v2.4.9\"\n # below are some omit default values:\n # targetImage: \"milvusdb/milvus:v2.4.9\"\n # toolImage: \"milvusdb/meta-migration:v2.2.0\"\n # operation: upgrade\n # rollbackIfFailed: true\n # backupPVC: \"\"\n # maxRetry: 3\n","$ kubectl apply -f https://github.com/zilliztech/milvus-operator/blob/main/config/samples/beta/milvusupgrade.yaml\n","kubectl describe milvus release-name\n"],"headingContent":"","anchorList":[{"label":"Aggiornamento del cluster Milvus con Milvus Operator","href":"Upgrade-Milvus-Cluster-with-Milvus-Operator","type":1,"isActive":false},{"label":"Aggiornare l'operatore Milvus","href":"Upgrade-your-Milvus-operator","type":2,"isActive":false},{"label":"Eseguire un aggiornamento continuo","href":"Conduct-a-rolling-upgrade","type":2,"isActive":false},{"label":"Aggiornare Milvus cambiando l'immagine","href":"Upgrade-Milvus-by-changing-its-image","type":2,"isActive":false},{"label":"Migrare i metadati","href":"Migrate-the-metadata","type":2,"isActive":false}]} \ No newline at end of file +{"codeList":["helm repo add zilliztech-milvus-operator https://zilliztech.github.io/milvus-operator/\nhelm repo update zilliztech-milvus-operator\nhelm -n milvus-operator upgrade milvus-operator zilliztech-milvus-operator/milvus-operator\n","apiVersion: milvus.io/v1beta1\nkind: Milvus\nmetadata:\n name: my-release\nspec:\n components:\n enableRollingUpdate: true\n imageUpdateMode: rollingUpgrade # Default value, can be omitted\n image: milvusdb/milvus:v2.4.13-hotfix\n","apiVersion: milvus.io/v1beta1\nkind: Milvus\nmetadata:\n name: my-release\nspec:\n components:\n enableRollingUpdate: true\n imageUpdateMode: all\n image: milvusdb/milvus:v2.4.13-hotfix\n","apiVersion: milvus.io/v1beta1\nkind: Milvus\nmetadata:\n name: my-release\nspec:\n components:\n enableRollingUpdate: true\n imageUpdateMode: rollingDowngrade\n image: milvusdb/milvus:\n","kubectl apply -f milvusupgrade.yml\n","apiVersion: milvus.io/v1beta1\nkind: Milvus\nmetadata:\n name: my-release\nspec:\n # Omit other fields ...\n components:\n image: milvusdb/milvus:v2.4.13-hotfix\n","kubectl apply -f milvusupgrade.yaml\n","apiVersion: milvus.io/v1beta1\nkind: MilvusUpgrade\nmetadata:\n name: my-release-upgrade\nspec:\n milvus:\n namespace: default\n name: my-release\n sourceVersion: \"v2.1.4\"\n targetVersion: \"v2.4.13-hotfix\"\n # below are some omit default values:\n # targetImage: \"milvusdb/milvus:v2.4.13-hotfix\"\n # toolImage: \"milvusdb/meta-migration:v2.2.0\"\n # operation: upgrade\n # rollbackIfFailed: true\n # backupPVC: \"\"\n # maxRetry: 3\n","$ kubectl apply -f https://github.com/zilliztech/milvus-operator/blob/main/config/samples/beta/milvusupgrade.yaml\n","kubectl describe milvus release-name\n"],"headingContent":"Upgrade Milvus Cluster with Milvus Operator","anchorList":[{"label":"Aggiornamento del cluster Milvus con Milvus Operator","href":"Upgrade-Milvus-Cluster-with-Milvus-Operator","type":1,"isActive":false},{"label":"Aggiornare l'operatore Milvus","href":"Upgrade-your-Milvus-operator","type":2,"isActive":false},{"label":"Eseguire un aggiornamento continuo","href":"Conduct-a-rolling-upgrade","type":2,"isActive":false},{"label":"Aggiornare Milvus cambiando l'immagine","href":"Upgrade-Milvus-by-changing-its-image","type":2,"isActive":false},{"label":"Migrare i metadati","href":"Migrate-the-metadata","type":2,"isActive":false}]} \ No newline at end of file diff --git a/localization/v2.4.x/site/it/adminGuide/upgrade_milvus_cluster-operator.md b/localization/v2.4.x/site/it/adminGuide/upgrade_milvus_cluster-operator.md index 00d4311c9..2e14ed6b7 100644 --- a/localization/v2.4.x/site/it/adminGuide/upgrade_milvus_cluster-operator.md +++ b/localization/v2.4.x/site/it/adminGuide/upgrade_milvus_cluster-operator.md @@ -46,9 +46,9 @@ helm -n milvus-operator upgrade milvus-

    Una volta aggiornato l'operatore Milvus alla versione più recente, avete le seguenti possibilità:

    Eseguire un aggiornamento continuo

    In questo file di configurazione, impostare spec.components.enableRollingUpdate su true e spec.components.image sulla versione di Milvus desiderata.

    Per impostazione predefinita, Milvus esegue l'aggiornamento continuo dei coordinatori in modo ordinato, sostituendo le immagini dei pod dei coordinatori una dopo l'altra. Per ridurre il tempo di aggiornamento, si può impostare spec.components.imageUpdateMode su all, in modo che Milvus sostituisca tutte le immagini dei pod nello stesso momento.

    @@ -88,7 +88,7 @@ spec: components: enableRollingUpdate: true imageUpdateMode: all - image: milvusdb/milvus:v2.4.9 + image: milvusdb/milvus:v2.4.13-hotfix

    Si può impostare spec.components.imageUpdateMode su rollingDowngrade per far sì che Milvus sostituisca le immagini dei pod coordinatori con una versione inferiore.

    apiVersion: milvus.io/v1beta1
    @@ -128,7 +128,7 @@ metadata:
     spec:
       # Omit other fields ...
       components:
    -   image: milvusdb/milvus:v2.4.9
    +   image: milvusdb/milvus:v2.4.13-hotfix
     

    Eseguire quindi quanto segue per eseguire l'aggiornamento:

    kubectl apply -f milvusupgrade.yaml
    @@ -148,8 +148,8 @@ spec:
               d="M4 9h1v1H4c-1.5 0-3-1.69-3-3.5S2.55 3 4 3h4c1.45 0 3 1.69 3 3.5 0 1.41-.91 2.72-2 3.25V8.59c.58-.45 1-1.27 1-2.09C10 5.22 8.98 4 8 4H4c-.98 0-2 1.22-2 2.5S3 9 4 9zm9-3h-1v1h1c1 0 2 1.22 2 2.5S13.98 12 13 12H9c-.98 0-2-1.22-2-2.5 0-.83.42-1.64 1-2.09V6.25c-1.09.53-2 1.84-2 3.25C6 11.31 7.55 13 9 13h4c1.45 0 3-1.69 3-3.5S14.5 6 13 6z"
             >
           
    -    

    A partire da Milvus 2.2.0, i metadati sono incompatibili con quelli delle versioni precedenti. I seguenti esempi presuppongono un aggiornamento da Milvus 2.1.4 a Milvus 2.4.9.

    -

    1. Creare un file .yaml per la migrazione dei metadati

    Creare un file di migrazione dei metadati. Il seguente è un esempio. È necessario specificare i file name, sourceVersion e targetVersion nel file di configurazione. L'esempio seguente imposta name su my-release-upgrade, sourceVersion su v2.1.4 e targetVersion su v2.4.9. Ciò significa che il cluster Milvus verrà aggiornato dalla v2.1.4 alla v2.4.9.

    +

    A partire da Milvus 2.2.0, i metadati sono incompatibili con quelli delle versioni precedenti. I seguenti esempi ipotizzano un aggiornamento da Milvus 2.1.4 a Milvus 2.4.13-hotfix.

    +

    1. Creare un file .yaml per la migrazione dei metadati.

    Creare un file di migrazione dei metadati. Il seguente è un esempio. È necessario specificare i file name, sourceVersion e targetVersion nel file di configurazione. L'esempio seguente imposta name su my-release-upgrade, sourceVersion su v2.1.4 e targetVersion su v2.4.13-hotfix. Ciò significa che il cluster Milvus verrà aggiornato dalla v2.1.4 alla v2.4.13-hotfix.

    apiVersion: milvus.io/v1beta1
     kind: MilvusUpgrade
     metadata:
    @@ -159,9 +159,9 @@ spec:
         namespace: default
         name: my-release
       sourceVersion: "v2.1.4"
    -  targetVersion: "v2.4.9"
    +  targetVersion: "v2.4.13-hotfix"
       # below are some omit default values:
    -  # targetImage: "milvusdb/milvus:v2.4.9"
    +  # targetImage: "milvusdb/milvus:v2.4.13-hotfix"
       # toolImage: "milvusdb/meta-migration:v2.2.0"
       # operation: upgrade
       # rollbackIfFailed: true
    diff --git a/localization/v2.4.x/site/it/adminGuide/upgrade_milvus_standalone-docker.json b/localization/v2.4.x/site/it/adminGuide/upgrade_milvus_standalone-docker.json
    index 9ad761167..d94229432 100644
    --- a/localization/v2.4.x/site/it/adminGuide/upgrade_milvus_standalone-docker.json
    +++ b/localization/v2.4.x/site/it/adminGuide/upgrade_milvus_standalone-docker.json
    @@ -1 +1 @@
    -{"codeList":["...\nstandalone:\n  container_name: milvus-standalone\n  image: milvusdb/milvus:v2.4.9\n","docker compose down\ndocker compose up -d\n","docker stop \n","# migration.yaml\ncmd:\n  # Option: run/backup/rollback\n  type: run\n  runWithBackup: true\nconfig:\n  sourceVersion: 2.1.4   # Specify your milvus version\n  targetVersion: 2.4.9\n  backupFilePath: /tmp/migration.bak\nmetastore:\n  type: etcd\netcd:\n  endpoints:\n    - milvus-etcd:2379  # Use the etcd container name\n  rootPath: by-dev # The root path where data is stored in etcd\n  metaSubPath: meta\n  kvSubPath: kv\n","# Suppose your docker-compose run with the default milvus network,\n# and you put migration.yaml in the same directory with docker-compose.yaml.\ndocker run --rm -it --network milvus -v $(pwd)/migration.yaml:/milvus/configs/migration.yaml milvusdb/meta-migration:v2.2.0 /milvus/bin/meta-migration -config=/milvus/configs/migration.yaml\n","// Run the following only after update the milvus image tag in the docker-compose.yaml\ndocker compose down\ndocker compose up -d\n"],"headingContent":"","anchorList":[{"label":"Aggiornamento di Milvus Standalone con Docker Compose","href":"Upgrade-Milvus-Standalone-with-Docker-Compose","type":1,"isActive":false},{"label":"Aggiornare Milvus cambiando l'immagine","href":"Upgrade-Milvus-by-changing-its-image","type":2,"isActive":false},{"label":"Migrare i metadati","href":"Migrate-the-metadata","type":2,"isActive":false},{"label":"Cosa fare dopo","href":"Whats-next","type":2,"isActive":false}]}
    \ No newline at end of file
    +{"codeList":["...\nstandalone:\n  container_name: milvus-standalone\n  image: milvusdb/milvus:v2.4.13-hotfix\n","docker compose down\ndocker compose up -d\n","docker stop \n","# migration.yaml\ncmd:\n  # Option: run/backup/rollback\n  type: run\n  runWithBackup: true\nconfig:\n  sourceVersion: 2.1.4   # Specify your milvus version\n  targetVersion: 2.4.13-hotfix\n  backupFilePath: /tmp/migration.bak\nmetastore:\n  type: etcd\netcd:\n  endpoints:\n    - milvus-etcd:2379  # Use the etcd container name\n  rootPath: by-dev # The root path where data is stored in etcd\n  metaSubPath: meta\n  kvSubPath: kv\n","# Suppose your docker-compose run with the default milvus network,\n# and you put migration.yaml in the same directory with docker-compose.yaml.\ndocker run --rm -it --network milvus -v $(pwd)/migration.yaml:/milvus/configs/migration.yaml milvusdb/meta-migration:v2.2.0 /milvus/bin/meta-migration -config=/milvus/configs/migration.yaml\n","// Run the following only after update the milvus image tag in the docker-compose.yaml\ndocker compose down\ndocker compose up -d\n"],"headingContent":"Upgrade Milvus Standalone with Docker Compose","anchorList":[{"label":"Aggiornamento di Milvus Standalone con Docker Compose","href":"Upgrade-Milvus-Standalone-with-Docker-Compose","type":1,"isActive":false},{"label":"Aggiornare Milvus cambiando l'immagine","href":"Upgrade-Milvus-by-changing-its-image","type":2,"isActive":false},{"label":"Migrare i metadati","href":"Migrate-the-metadata","type":2,"isActive":false},{"label":"Cosa fare dopo","href":"Whats-next","type":2,"isActive":false}]}
    \ No newline at end of file
    diff --git a/localization/v2.4.x/site/it/adminGuide/upgrade_milvus_standalone-docker.md b/localization/v2.4.x/site/it/adminGuide/upgrade_milvus_standalone-docker.md
    index 112b8cd14..dee4cd6b5 100644
    --- a/localization/v2.4.x/site/it/adminGuide/upgrade_milvus_standalone-docker.md
    +++ b/localization/v2.4.x/site/it/adminGuide/upgrade_milvus_standalone-docker.md
    @@ -24,9 +24,9 @@ title: Aggiornamento di Milvus Standalone con Docker Compose
             >
           
         

    Questo argomento descrive come aggiornare Milvus utilizzando Docker Compose.

    -

    In casi normali, è possibile aggiornare Milvus cambiando l'immagine. Tuttavia, è necessario migrare i metadati prima di qualsiasi aggiornamento dalla versione v2.1.x alla versione v2.4.9.

    +

    In casi normali, è possibile aggiornare Milvus cambiando l'immagine. Tuttavia, è necessario migrare i metadati prima di qualsiasi aggiornamento da v2.1.x a v2.4.13-hotfix.

    -

    A causa di problemi di sicurezza, Milvus aggiorna il proprio MinIO a RELEASE.2023-03-20T20-16-18Z con il rilascio della v2.2.5. Prima di qualsiasi aggiornamento da versioni precedenti di Milvus Standalone installate con Docker Compose, è necessario creare un deployment MinIO a singolo nodo e singola unità e migrare le impostazioni e i contenuti di MinIO esistenti al nuovo deployment. Per i dettagli, fate riferimento a questa guida.

    +

    A causa di problemi di sicurezza, Milvus aggiorna il suo MinIO a RELEASE.2023-03-20T20-16-18Z con il rilascio della v2.2.5. Prima di qualsiasi aggiornamento da versioni precedenti di Milvus Standalone installate con Docker Compose, è necessario creare un deployment MinIO a singolo nodo e singola unità e migrare le impostazioni e i contenuti di MinIO esistenti al nuovo deployment. Per i dettagli, fate riferimento a questa guida.

    Aggiornare Milvus cambiando l'immagine

  • Eseguite i seguenti comandi per eseguire l'aggiornamento.

    docker compose down
    @@ -83,7 +83,7 @@ cmd:
       runWithBackup: true
     config:
       sourceVersion: 2.1.4   # Specify your milvus version
    -  targetVersion: 2.4.9
    +  targetVersion: 2.4.13-hotfix
       backupFilePath: /tmp/migration.bak
     metastore:
       type: etcd
    @@ -126,7 +126,7 @@ docker compose up -d
     
  • Se siete pronti a distribuire il vostro cluster su cloud:
  • diff --git a/localization/v2.4.x/site/it/adminGuide/upgrade_milvus_standalone-helm.json b/localization/v2.4.x/site/it/adminGuide/upgrade_milvus_standalone-helm.json index 57729628a..b2382b47e 100644 --- a/localization/v2.4.x/site/it/adminGuide/upgrade_milvus_standalone-helm.json +++ b/localization/v2.4.x/site/it/adminGuide/upgrade_milvus_standalone-helm.json @@ -1 +1 @@ -{"codeList":["$ helm repo update\n$ helm search repo zilliztech/milvus --versions\n","helm repo add zilliztech https://zilliztech.github.io/milvus-helm\nhelm repo update\n# upgrade existing helm release\nhelm upgrade my-release zilliztech/milvus\n","NAME CHART VERSION APP VERSION DESCRIPTION \nzilliztech/milvus 4.1.34 2.4.5 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.33 2.4.4 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.32 2.4.3 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.31 2.4.1 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.30 2.4.1 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.29 2.4.0 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.24 2.3.11 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.23 2.3.10 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.22 2.3.10 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.21 2.3.10 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.20 2.3.10 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.18 2.3.10 Milvus is an open-source vector database built ... \nzilliztech/milvus 4.1.18 2.3.9 Milvus is an open-source vector database built ... \nzilliztech/milvus 4.1.17 2.3.8 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.16 2.3.7 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.15 2.3.5 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.14 2.3.6 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.13 2.3.5 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.12 2.3.5 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.11 2.3.4 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.10 2.3.3 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.9 2.3.3 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.8 2.3.2 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.7 2.3.2 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.6 2.3.1 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.5 2.3.1 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.4 2.3.1 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.3 2.3.1 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.2 2.3.1 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.1 2.3.0 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.0 2.3.0 Milvus is an open-source vector database built ...\n","sh rollingUpdate.sh -n default -i my-release -o update -t 2.4.9 -w 'milvusdb/milvus:v2.4.9'\n","helm repo update\nhelm upgrade my-release milvus/milvus --reuse-values --version=4.1.24 # use the helm chart version here\n","NAME NAMESPACE REVISION UPDATED STATUS CHART APP VERSION \nmy-release default 1 2022-11-21 15:41:25.51539 +0800 CST deployed milvus-3.2.18 2.1.4\n","NAME READY STATUS RESTARTS AGE\nmy-release-etcd-0 1/1 Running 0 84s\nmy-release-milvus-standalone-75c599fffc-6rwlj 1/1 Running 0 84s\nmy-release-minio-744dd9586f-qngzv 1/1 Running 0 84s\n","$ kubectl get pods my-release-milvus-proxy-6c548f787f-scspp -o=jsonpath='{$.spec.containers[0].image}'\n# milvusdb/milvus:v2.1.4\n","./migrate.sh -i my-release -s 2.1.4 -t 2.4.9\n","./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.9\n","./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.9 -r by-dev\n","./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.9 -r by-dev -w milvusdb/milvus:v2.4.9\n","./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.9 -w milvusdb/milvus:v2.4.9 -d true\n","./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.9 -r by-dev -o rollback -w milvusdb/milvus:v2.1.1\n./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.9 -r by-dev -o migrate -w milvusdb/milvus:v2.4.9\n"],"headingContent":"","anchorList":[{"label":"Aggiornamento di Milvus standalone con i grafici Helm","href":"Upgrade-Milvus-Standalone-with-Helm-Chart","type":1,"isActive":false},{"label":"Controllare la versione di Milvus","href":"Check-the-Milvus-version","type":2,"isActive":false},{"label":"Eseguire un aggiornamento continuo","href":"Conduct-a-rolling-upgrade","type":2,"isActive":false},{"label":"Aggiornare Milvus con Helm","href":"Upgrade-Milvus-using-Helm","type":2,"isActive":false},{"label":"Migrazione dei metadati","href":"Migrate-the-metadata","type":2,"isActive":false}]} \ No newline at end of file +{"codeList":["$ helm repo update\n$ helm search repo zilliztech/milvus --versions\n","helm repo add zilliztech https://zilliztech.github.io/milvus-helm\nhelm repo update\n# upgrade existing helm release\nhelm upgrade my-release zilliztech/milvus\n","NAME CHART VERSION APP VERSION DESCRIPTION \nzilliztech/milvus 4.1.34 2.4.5 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.33 2.4.4 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.32 2.4.3 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.31 2.4.1 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.30 2.4.1 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.29 2.4.0 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.24 2.3.11 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.23 2.3.10 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.22 2.3.10 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.21 2.3.10 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.20 2.3.10 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.18 2.3.10 Milvus is an open-source vector database built ... \nzilliztech/milvus 4.1.18 2.3.9 Milvus is an open-source vector database built ... \nzilliztech/milvus 4.1.17 2.3.8 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.16 2.3.7 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.15 2.3.5 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.14 2.3.6 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.13 2.3.5 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.12 2.3.5 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.11 2.3.4 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.10 2.3.3 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.9 2.3.3 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.8 2.3.2 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.7 2.3.2 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.6 2.3.1 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.5 2.3.1 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.4 2.3.1 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.3 2.3.1 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.2 2.3.1 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.1 2.3.0 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.0 2.3.0 Milvus is an open-source vector database built ...\n","sh rollingUpdate.sh -n default -i my-release -o update -t 2.4.13-hotfix -w 'milvusdb/milvus:v2.4.13-hotfix'\n","helm repo update\nhelm upgrade my-release milvus/milvus --reuse-values --version=4.1.24 # use the helm chart version here\n","NAME NAMESPACE REVISION UPDATED STATUS CHART APP VERSION \nmy-release default 1 2022-11-21 15:41:25.51539 +0800 CST deployed milvus-3.2.18 2.1.4\n","NAME READY STATUS RESTARTS AGE\nmy-release-etcd-0 1/1 Running 0 84s\nmy-release-milvus-standalone-75c599fffc-6rwlj 1/1 Running 0 84s\nmy-release-minio-744dd9586f-qngzv 1/1 Running 0 84s\n","$ kubectl get pods my-release-milvus-proxy-6c548f787f-scspp -o=jsonpath='{$.spec.containers[0].image}'\n# milvusdb/milvus:v2.1.4\n","./migrate.sh -i my-release -s 2.1.4 -t 2.4.13-hotfix\n","./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.13-hotfix\n","./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.13-hotfix -r by-dev\n","./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.13-hotfix -r by-dev -w milvusdb/milvus:v2.4.13-hotfix\n","./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.13-hotfix -w milvusdb/milvus:v2.4.13-hotfix -d true\n","./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.13-hotfix -r by-dev -o rollback -w milvusdb/milvus:v2.1.1\n./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.13-hotfix -r by-dev -o migrate -w milvusdb/milvus:v2.4.13-hotfix\n"],"headingContent":"Upgrade Milvus Standalone with Helm Chart","anchorList":[{"label":"Aggiornamento di Milvus standalone con i grafici Helm","href":"Upgrade-Milvus-Standalone-with-Helm-Chart","type":1,"isActive":false},{"label":"Controllare la versione di Milvus","href":"Check-the-Milvus-version","type":2,"isActive":false},{"label":"Eseguire un aggiornamento continuo","href":"Conduct-a-rolling-upgrade","type":2,"isActive":false},{"label":"Aggiornare Milvus con Helm","href":"Upgrade-Milvus-using-Helm","type":2,"isActive":false},{"label":"Migrazione dei metadati","href":"Migrate-the-metadata","type":2,"isActive":false}]} \ No newline at end of file diff --git a/localization/v2.4.x/site/it/adminGuide/upgrade_milvus_standalone-helm.md b/localization/v2.4.x/site/it/adminGuide/upgrade_milvus_standalone-helm.md index f92b10747..59911537e 100644 --- a/localization/v2.4.x/site/it/adminGuide/upgrade_milvus_standalone-helm.md +++ b/localization/v2.4.x/site/it/adminGuide/upgrade_milvus_standalone-helm.md @@ -5,7 +5,7 @@ order: 1 group: upgrade_milvus_standalone-operator.md related_key: upgrade Milvus Standalone summary: Scoprite come aggiornare Milvus standalone con Helm Chart. -title: Aggiornamento di Milvus Standalone con Helm Chart +title: Aggiornamento di Milvus standalone con i grafici Helm ---

    Aggiornamento di Milvus standalone con i grafici Helm

    È possibile scegliere il percorso di aggiornamento per il proprio Milvus come segue:

    -
    - Eseguire un aggiornamento continuo](#conduct-a-rolling-upgrade) da Milvus v2.2.3 e versioni successive a v2.4.9.
    +
    - [Eseguire un aggiornamento continuo](#condurre-un-rolling-upgrade) da Milvus v2.2.3 e versioni successive a v2.4.13-hotfix.

    Eseguire un aggiornamento continuo

      @@ -170,7 +170,7 @@ helm upgrade my-release milvus/milvus --reuse-values --version= -

      A partire da Milvus 2.2.0, i metadati sono incompatibili con quelli delle versioni precedenti. I seguenti esempi ipotizzano un aggiornamento da Milvus 2.1.4 a Milvus 2.2.0.

      +

      A partire da Milvus 2.2.0, i metadati sono incompatibili con quelli delle versioni precedenti. Gli esempi seguenti presuppongono un aggiornamento da Milvus 2.1.4 a Milvus 2.2.0.

      1. Controllare la versione di Milvus

      Eseguire $ helm list per verificare la versione dell'applicazione Milvus. Si può notare che APP VERSION è 2.1.4.

      NAME                NAMESPACE   REVISION    UPDATED                                 STATUS      CHART           APP VERSION     
       my-release          default     1           2022-11-21 15:41:25.51539 +0800 CST     deployed    milvus-3.2.18   2.1.4
      @@ -213,25 +213,25 @@ my-release-minio-744dd9586f-qngzv               1/1     Running   0          84s
       
    1. Migrare i metadati di Milvus.
    2. Avviare i componenti Milvus con una nuova immagine.
    -

    2. Aggiornamento di Milvus dalla v2.1.x alla 2.4.9

    I comandi seguenti presuppongono l'aggiornamento di Milvus dalla v2.1.4 alla 2.4.9. Modificateli in base alle vostre esigenze.

    +

    2. Aggiornamento di Milvus dalla versione 2.1.x alla versione 2.4.13-hotfix

    I comandi seguenti presuppongono l'aggiornamento di Milvus dalla v2.1.4 alla 2.4.13-hotfix. Modificateli in base alle vostre esigenze.

      -
    1. Specificare il nome dell'istanza Milvus, la versione Milvus di origine e la versione Milvus di destinazione.

      -
      ./migrate.sh -i my-release -s 2.1.4 -t 2.4.9
      +
    2. Specificare il nome dell'istanza di Milvus, la versione di Milvus di partenza e la versione di Milvus di arrivo.

      +
      ./migrate.sh -i my-release -s 2.1.4 -t 2.4.13-hotfix
       
    3. Specificate lo spazio dei nomi con -n se Milvus non è installato nello spazio dei nomi predefinito di K8s.

      -
      ./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.9
      +
      ./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.13-hotfix
       
    4. Specificare il percorso della radice con -r se Milvus è installato con il percorso personalizzato rootpath.

      -
      ./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.9 -r by-dev
      +
      ./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.13-hotfix -r by-dev
       
    5. -
    6. Specificate il tag dell'immagine con -w se il vostro Milvus è installato con un image personalizzato.

      -
      ./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.9 -r by-dev -w milvusdb/milvus:v2.4.9
      +
    7. Specificate il tag dell'immagine con -w se Milvus è installato con il tag personalizzato image.

      +
      ./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.13-hotfix -r by-dev -w milvusdb/milvus:v2.4.13-hotfix
       
    8. Impostare -d true se si desidera rimuovere automaticamente il pod di migrazione al termine della migrazione.

      -
      ./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.9 -w milvusdb/milvus:v2.4.9 -d true
      +
      ./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.13-hotfix -w milvusdb/milvus:v2.4.13-hotfix -d true
       
    9. Se la migrazione non va a buon fine, si può tornare indietro e migrare di nuovo.

      -
      ./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.9 -r by-dev -o rollback -w milvusdb/milvus:v2.1.1
      -./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.9 -r by-dev -o migrate -w milvusdb/milvus:v2.4.9
      +
      ./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.13-hotfix -r by-dev -o rollback -w milvusdb/milvus:v2.1.1
      +./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.13-hotfix -r by-dev -o migrate -w milvusdb/milvus:v2.4.13-hotfix
       
    diff --git a/localization/v2.4.x/site/it/adminGuide/upgrade_milvus_standalone-operator.json b/localization/v2.4.x/site/it/adminGuide/upgrade_milvus_standalone-operator.json index 68eeed516..ab739fdc0 100644 --- a/localization/v2.4.x/site/it/adminGuide/upgrade_milvus_standalone-operator.json +++ b/localization/v2.4.x/site/it/adminGuide/upgrade_milvus_standalone-operator.json @@ -1 +1 @@ -{"codeList":["helm repo add zilliztech-milvus-operator https://zilliztech.github.io/milvus-operator/\nhelm repo update zilliztech-milvus-operator\nhelm -n milvus-operator upgrade milvus-operator zilliztech-milvus-operator/milvus-operator\n","apiVersion: milvus.io/v1beta1\nkind: Milvus\nmetadata:\n name: my-release\nspec:\n components:\n enableRollingUpdate: true\n imageUpdateMode: rollingUpgrade # Default value, can be omitted\n image: milvusdb/milvus:v2.4.9\n","apiVersion: milvus.io/v1beta1\nkind: Milvus\nmetadata:\n name: my-release\nspec:\n components:\n enableRollingUpdate: true\n imageUpdateMode: all\n image: milvusdb/milvus:v2.4.9\n","apiVersion: milvus.io/v1beta1\nkind: Milvus\nmetadata:\n name: my-release\nspec:\n components:\n enableRollingUpdate: true\n imageUpdateMode: rollingDowngrade\n image: milvusdb/milvus:\n","kubectl apply -f milvusupgrade.yml\n","apiVersion: milvus.io/v1beta1\nkind: Milvus\nmetadata:\n name: my-release\nlabels:\n app: milvus\nspec:\n # Omit other fields ...\n components:\n image: milvusdb/milvus:v2.4.9\n","kubectl apply -f milvusupgrade.yaml\n","apiVersion: milvus.io/v1beta1\nkind: MilvusUpgrade\nmetadata:\n name: my-release-upgrade\nspec:\n milvus:\n namespace: default\n name: my-release\n sourceVersion: \"v2.1.4\"\n targetVersion: \"v2.4.9\"\n # below are some omit default values:\n # targetImage: \"milvusdb/milvus:v2.4.9\"\n # toolImage: \"milvusdb/meta-migration:v2.2.0\"\n # operation: upgrade\n # rollbackIfFailed: true\n # backupPVC: \"\"\n # maxRetry: 3\n","$ kubectl apply -f https://raw.githubusercontent.com/zilliztech/milvus-operator/main/config/samples/milvusupgrade.yaml\n","kubectl describe milvus release-name\n"],"headingContent":"","anchorList":[{"label":"Aggiornamento di Milvus standalone con Milvus Operator","href":"Upgrade-Milvus-Standalone-with-Milvus-Operator","type":1,"isActive":false},{"label":"Aggiornare l'operatore Milvus","href":"Upgrade-your-Milvus-operator","type":2,"isActive":false},{"label":"Eseguire un aggiornamento continuo","href":"Conduct-a-rolling-upgrade","type":2,"isActive":false},{"label":"Aggiornare Milvus cambiando l'immagine","href":"Upgrade-Milvus-by-changing-its-image","type":2,"isActive":false},{"label":"Migrare i metadati","href":"Migrate-the-metadata","type":2,"isActive":false}]} \ No newline at end of file +{"codeList":["helm repo add zilliztech-milvus-operator https://zilliztech.github.io/milvus-operator/\nhelm repo update zilliztech-milvus-operator\nhelm -n milvus-operator upgrade milvus-operator zilliztech-milvus-operator/milvus-operator\n","apiVersion: milvus.io/v1beta1\nkind: Milvus\nmetadata:\n name: my-release\nspec:\n components:\n enableRollingUpdate: true\n imageUpdateMode: rollingUpgrade # Default value, can be omitted\n image: milvusdb/milvus:v2.4.13-hotfix\n","apiVersion: milvus.io/v1beta1\nkind: Milvus\nmetadata:\n name: my-release\nspec:\n components:\n enableRollingUpdate: true\n imageUpdateMode: all\n image: milvusdb/milvus:v2.4.13-hotfix\n","apiVersion: milvus.io/v1beta1\nkind: Milvus\nmetadata:\n name: my-release\nspec:\n components:\n enableRollingUpdate: true\n imageUpdateMode: rollingDowngrade\n image: milvusdb/milvus:\n","kubectl apply -f milvusupgrade.yml\n","apiVersion: milvus.io/v1beta1\nkind: Milvus\nmetadata:\n name: my-release\nlabels:\n app: milvus\nspec:\n # Omit other fields ...\n components:\n image: milvusdb/milvus:v2.4.13-hotfix\n","kubectl apply -f milvusupgrade.yaml\n","apiVersion: milvus.io/v1beta1\nkind: MilvusUpgrade\nmetadata:\n name: my-release-upgrade\nspec:\n milvus:\n namespace: default\n name: my-release\n sourceVersion: \"v2.1.4\"\n targetVersion: \"v2.4.13-hotfix\"\n # below are some omit default values:\n # targetImage: \"milvusdb/milvus:v2.4.13-hotfix\"\n # toolImage: \"milvusdb/meta-migration:v2.2.0\"\n # operation: upgrade\n # rollbackIfFailed: true\n # backupPVC: \"\"\n # maxRetry: 3\n","$ kubectl apply -f https://raw.githubusercontent.com/zilliztech/milvus-operator/main/config/samples/milvusupgrade.yaml\n","kubectl describe milvus release-name\n"],"headingContent":"Upgrade Milvus Standalone with Milvus Operator","anchorList":[{"label":"Aggiornamento di Milvus standalone con Milvus Operator","href":"Upgrade-Milvus-Standalone-with-Milvus-Operator","type":1,"isActive":false},{"label":"Aggiornare l'operatore Milvus","href":"Upgrade-your-Milvus-operator","type":2,"isActive":false},{"label":"Eseguire un aggiornamento continuo","href":"Conduct-a-rolling-upgrade","type":2,"isActive":false},{"label":"Aggiornare Milvus cambiando l'immagine","href":"Upgrade-Milvus-by-changing-its-image","type":2,"isActive":false},{"label":"Migrare i metadati","href":"Migrate-the-metadata","type":2,"isActive":false}]} \ No newline at end of file diff --git a/localization/v2.4.x/site/it/adminGuide/upgrade_milvus_standalone-operator.md b/localization/v2.4.x/site/it/adminGuide/upgrade_milvus_standalone-operator.md index 4e6473f8e..9e27fb883 100644 --- a/localization/v2.4.x/site/it/adminGuide/upgrade_milvus_standalone-operator.md +++ b/localization/v2.4.x/site/it/adminGuide/upgrade_milvus_standalone-operator.md @@ -5,7 +5,7 @@ order: 0 group: upgrade_milvus_standalone-operator.md related_key: upgrade Milvus Standalone summary: Scoprite come aggiornare Milvus standalone con Milvus operator. -title: Aggiornamento di Milvus Standalone con Milvus Operator +title: Aggiornamento di Milvus standalone con Milvus Operator ---

    Aggiornamento di Milvus standalone con Milvus Operator

    Una volta aggiornato l'operatore Milvus alla versione più recente, avete le seguenti possibilità:

    Eseguire un aggiornamento continuo

    In questo file di configurazione, impostare spec.components.enableRollingUpdate su true e spec.components.image sulla versione di Milvus desiderata.

    Per impostazione predefinita, Milvus esegue un aggiornamento continuo dei coordinatori in modo ordinato, sostituendo le immagini dei pod dei coordinatori una dopo l'altra. Per ridurre il tempo di aggiornamento, si può impostare spec.components.imageUpdateMode su all, in modo che Milvus sostituisca tutte le immagini dei pod nello stesso momento.

    @@ -88,7 +88,7 @@ spec: components: enableRollingUpdate: true imageUpdateMode: all - image: milvusdb/milvus:v2.4.9 + image: milvusdb/milvus:v2.4.13-hotfix

    Si può impostare spec.components.imageUpdateMode su rollingDowngrade per far sì che Milvus sostituisca le immagini dei pod coordinatori con una versione inferiore.

    apiVersion: milvus.io/v1beta1
    @@ -130,7 +130,7 @@ labels:
     spec:
       # Omit other fields ...
       components:
    -   image: milvusdb/milvus:v2.4.9
    +   image: milvusdb/milvus:v2.4.13-hotfix
     

    Eseguire quindi quanto segue per eseguire l'aggiornamento:

    kubectl apply -f milvusupgrade.yaml
    @@ -150,8 +150,8 @@ spec:
               d="M4 9h1v1H4c-1.5 0-3-1.69-3-3.5S2.55 3 4 3h4c1.45 0 3 1.69 3 3.5 0 1.41-.91 2.72-2 3.25V8.59c.58-.45 1-1.27 1-2.09C10 5.22 8.98 4 8 4H4c-.98 0-2 1.22-2 2.5S3 9 4 9zm9-3h-1v1h1c1 0 2 1.22 2 2.5S13.98 12 13 12H9c-.98 0-2-1.22-2-2.5 0-.83.42-1.64 1-2.09V6.25c-1.09.53-2 1.84-2 3.25C6 11.31 7.55 13 9 13h4c1.45 0 3-1.69 3-3.5S14.5 6 13 6z"
             >
           
    -    

    A partire da Milvus 2.2.0, i metadati sono incompatibili con quelli delle versioni precedenti. I seguenti esempi ipotizzano un aggiornamento da Milvus 2.1.4 a Milvus v2.4.9.

    -

    1. Creare un file .yaml per la migrazione dei metadati

    Creare un file di migrazione dei metadati. Il seguente è un esempio. È necessario specificare i file name, sourceVersion e targetVersion nel file di configurazione. L'esempio seguente imposta name su my-release-upgrade, sourceVersion su v2.1.4 e targetVersion su v2.4.9. Ciò significa che l'istanza di Milvus verrà aggiornata dalla v2.1.4 alla v2.4.9.

    +

    A partire da Milvus 2.2.0, i metadati sono incompatibili con quelli delle versioni precedenti. I seguenti esempi ipotizzano un aggiornamento da Milvus 2.1.4 a Milvus v2.4.13-hotfix.

    +

    1. Creare un file .yaml per la migrazione dei metadati.

    Creare un file di migrazione dei metadati. Il seguente è un esempio. È necessario specificare i file name, sourceVersion e targetVersion nel file di configurazione. L'esempio seguente imposta name su my-release-upgrade, sourceVersion su v2.1.4 e targetVersion su v2.4.13-hotfix. Ciò significa che l'istanza Milvus verrà aggiornata dalla v2.1.4 alla v2.4.13-hotfix.

    apiVersion: milvus.io/v1beta1
     kind: MilvusUpgrade
     metadata:
    @@ -161,9 +161,9 @@ spec:
         namespace: default
         name: my-release
       sourceVersion: "v2.1.4"
    -  targetVersion: "v2.4.9"
    +  targetVersion: "v2.4.13-hotfix"
       # below are some omit default values:
    -  # targetImage: "milvusdb/milvus:v2.4.9"
    +  # targetImage: "milvusdb/milvus:v2.4.13-hotfix"
       # toolImage: "milvusdb/meta-migration:v2.2.0"
       # operation: upgrade
       # rollbackIfFailed: true
    diff --git a/localization/v2.4.x/site/it/embeddings/embed-with-cohere.json b/localization/v2.4.x/site/it/embeddings/embed-with-cohere.json
    index 40401b602..3ce35a476 100644
    --- a/localization/v2.4.x/site/it/embeddings/embed-with-cohere.json
    +++ b/localization/v2.4.x/site/it/embeddings/embed-with-cohere.json
    @@ -1 +1 @@
    -{"codeList":["pip install --upgrade pymilvus\npip install \"pymilvus[model]\"\n","cohere_ef = CohereEmbeddingFunction(\n    model_name=\"embed-english-light-v3.0\",\n    api_key=\"YOUR_COHERE_API_KEY\",\n    input_type=\"search_document\",\n    embedding_types=[\"float\"]\n)\n","docs = [\n    \"Artificial intelligence was founded as an academic discipline in 1956.\",\n    \"Alan Turing was the first person to conduct substantial research in AI.\",\n    \"Born in Maida Vale, London, Turing was raised in southern England.\",\n]\n\ndocs_embeddings = cohere_ef.encode_documents(docs)\n\n# Print embeddings\nprint(\"Embeddings:\", docs_embeddings)\n# Print dimension and shape of embeddings\nprint(\"Dim:\", cohere_ef.dim, docs_embeddings[0].shape)\n","Embeddings: [array([ 3.43322754e-02,  1.16252899e-03, -5.25207520e-02,  1.32846832e-03,\n       -6.80541992e-02,  6.10961914e-02, -7.06176758e-02,  1.48925781e-01,\n        1.54174805e-01,  1.98516846e-02,  2.43835449e-02,  3.55224609e-02,\n        1.82952881e-02,  7.57446289e-02, -2.40783691e-02,  4.40063477e-02,\n...\n        0.06359863, -0.01971436, -0.02253723,  0.00354195,  0.00222015,\n        0.00184727,  0.03408813, -0.00777817,  0.04919434,  0.01519775,\n       -0.02862549,  0.04760742, -0.07891846,  0.0124054 ], dtype=float32)]\nDim: 384 (384,)\n","queries = [\"When was artificial intelligence founded\", \n           \"Where was Alan Turing born?\"]\n\nquery_embeddings = cohere_ef.encode_queries(queries)\n\nprint(\"Embeddings:\", query_embeddings)\nprint(\"Dim\", cohere_ef.dim, query_embeddings[0].shape)\n","Embeddings: [array([-1.33361816e-02,  9.79423523e-04, -7.28759766e-02, -1.93786621e-02,\n       -9.71679688e-02,  4.34875488e-02, -9.81445312e-02,  1.16882324e-01,\n        5.89904785e-02, -4.19921875e-02,  4.95910645e-02,  5.83496094e-02,\n        3.47595215e-02, -5.87463379e-03, -7.30514526e-03,  2.92816162e-02,\n...\n        0.00749969, -0.01192474,  0.02719116,  0.03347778,  0.07696533,\n        0.01409149,  0.00964355, -0.01681519, -0.0073204 ,  0.00043154,\n       -0.04577637,  0.03591919, -0.02807617, -0.04812622], dtype=float32)]\nDim 384 (384,)\n"],"headingContent":"","anchorList":[{"label":"Cohere","href":"Cohere","type":1,"isActive":false}]}
    \ No newline at end of file
    +{"codeList":["pip install --upgrade pymilvus\npip install \"pymilvus[model]\"\n","from pymilvus.model.dense import CohereEmbeddingFunction\n\ncohere_ef = CohereEmbeddingFunction(\n    model_name=\"embed-english-light-v3.0\",\n    api_key=\"YOUR_COHERE_API_KEY\",\n    input_type=\"search_document\",\n    embedding_types=[\"float\"]\n)\n","docs = [\n    \"Artificial intelligence was founded as an academic discipline in 1956.\",\n    \"Alan Turing was the first person to conduct substantial research in AI.\",\n    \"Born in Maida Vale, London, Turing was raised in southern England.\",\n]\n\ndocs_embeddings = cohere_ef.encode_documents(docs)\n\n# Print embeddings\nprint(\"Embeddings:\", docs_embeddings)\n# Print dimension and shape of embeddings\nprint(\"Dim:\", cohere_ef.dim, docs_embeddings[0].shape)\n","Embeddings: [array([ 3.43322754e-02,  1.16252899e-03, -5.25207520e-02,  1.32846832e-03,\n       -6.80541992e-02,  6.10961914e-02, -7.06176758e-02,  1.48925781e-01,\n        1.54174805e-01,  1.98516846e-02,  2.43835449e-02,  3.55224609e-02,\n        1.82952881e-02,  7.57446289e-02, -2.40783691e-02,  4.40063477e-02,\n...\n        0.06359863, -0.01971436, -0.02253723,  0.00354195,  0.00222015,\n        0.00184727,  0.03408813, -0.00777817,  0.04919434,  0.01519775,\n       -0.02862549,  0.04760742, -0.07891846,  0.0124054 ], dtype=float32)]\nDim: 384 (384,)\n","queries = [\"When was artificial intelligence founded\", \n           \"Where was Alan Turing born?\"]\n\nquery_embeddings = cohere_ef.encode_queries(queries)\n\nprint(\"Embeddings:\", query_embeddings)\nprint(\"Dim\", cohere_ef.dim, query_embeddings[0].shape)\n","Embeddings: [array([-1.33361816e-02,  9.79423523e-04, -7.28759766e-02, -1.93786621e-02,\n       -9.71679688e-02,  4.34875488e-02, -9.81445312e-02,  1.16882324e-01,\n        5.89904785e-02, -4.19921875e-02,  4.95910645e-02,  5.83496094e-02,\n        3.47595215e-02, -5.87463379e-03, -7.30514526e-03,  2.92816162e-02,\n...\n        0.00749969, -0.01192474,  0.02719116,  0.03347778,  0.07696533,\n        0.01409149,  0.00964355, -0.01681519, -0.0073204 ,  0.00043154,\n       -0.04577637,  0.03591919, -0.02807617, -0.04812622], dtype=float32)]\nDim 384 (384,)\n"],"headingContent":"Cohere","anchorList":[{"label":"Cohere","href":"Cohere","type":1,"isActive":false}]}
    \ No newline at end of file
    diff --git a/localization/v2.4.x/site/it/embeddings/embed-with-cohere.md b/localization/v2.4.x/site/it/embeddings/embed-with-cohere.md
    index 21dd430bc..6c9ec1ab4 100644
    --- a/localization/v2.4.x/site/it/embeddings/embed-with-cohere.md
    +++ b/localization/v2.4.x/site/it/embeddings/embed-with-cohere.md
    @@ -21,14 +21,16 @@ title: Incorporare Cohere
               d="M4 9h1v1H4c-1.5 0-3-1.69-3-3.5S2.55 3 4 3h4c1.45 0 3 1.69 3 3.5 0 1.41-.91 2.72-2 3.25V8.59c.58-.45 1-1.27 1-2.09C10 5.22 8.98 4 8 4H4c-.98 0-2 1.22-2 2.5S3 9 4 9zm9-3h-1v1h1c1 0 2 1.22 2 2.5S13.98 12 13 12H9c-.98 0-2-1.22-2-2.5 0-.83.42-1.64 1-2.09V6.25c-1.09.53-2 1.84-2 3.25C6 11.31 7.55 13 9 13h4c1.45 0 3-1.69 3-3.5S14.5 6 13 6z"
             >
           
    -    

    I modelli di embedding di Cohere sono utilizzati per generare embeddings di testo, ovvero elenchi di numeri in virgola mobile che catturano informazioni semantiche sul testo. Questi embeddings possono essere utilizzati per compiti come la classificazione del testo e la ricerca semantica.

    +

    I modelli di embedding di Cohere sono utilizzati per generare embedding di testo, ovvero elenchi di numeri in virgola mobile che catturano informazioni semantiche sul testo. Questi embeddings possono essere utilizzati per compiti come la classificazione del testo e la ricerca semantica.

    Milvus si integra con i modelli di embedding di Cohere utilizzando la classe CohereEmbeddingFunction. Questa classe gestisce il calcolo degli embeddings e li restituisce in un formato compatibile con Milvus per l'indicizzazione e la ricerca.

    Per utilizzare questa funzione, installare le dipendenze necessarie:

    pip install --upgrade pymilvus
     pip install "pymilvus[model]"
     

    Quindi, istanziare la classe CohereEmbeddingFunction:

    -
    cohere_ef = CohereEmbeddingFunction(
    +
    from pymilvus.model.dense import CohereEmbeddingFunction
    +
    +cohere_ef = CohereEmbeddingFunction(
         model_name="embed-english-light-v3.0",
         api_key="YOUR_COHERE_API_KEY",
         input_type="search_document",
    diff --git a/localization/v2.4.x/site/it/embeddings/embed-with-jina.json b/localization/v2.4.x/site/it/embeddings/embed-with-jina.json
    index 3ba04b590..4cee744ff 100644
    --- a/localization/v2.4.x/site/it/embeddings/embed-with-jina.json
    +++ b/localization/v2.4.x/site/it/embeddings/embed-with-jina.json
    @@ -1 +1 @@
    -{"codeList":["pip install --upgrade pymilvus\npip install \"pymilvus[model]\"\n","from pymilvus.model.dense import JinaEmbeddingFunction\n\njina_ef = JinaEmbeddingFunction(\n    model_name=\"jina-embeddings-v2-base-en\", # Defaults to `jina-embeddings-v2-base-en`\n    api_key=JINAAI_API_KEY # Provide your Jina AI API key\n)\n","docs = [\n    \"Artificial intelligence was founded as an academic discipline in 1956.\",\n    \"Alan Turing was the first person to conduct substantial research in AI.\",\n    \"Born in Maida Vale, London, Turing was raised in southern England.\",\n]\n\ndocs_embeddings = jina_ef.encode_documents(docs)\n\n# Print embeddings\nprint(\"Embeddings:\", docs_embeddings)\n# Print dimension and shape of embeddings\nprint(\"Dim:\", jina_ef.dim, docs_embeddings[0].shape)\n","Embeddings: [array([-4.88487840e-01, -4.28095880e-01,  4.90086500e-01, -1.63274320e-01,\n        3.43437800e-01,  3.21476880e-01,  2.83173790e-02, -3.10403670e-01,\n        4.76985040e-01, -1.77410420e-01, -3.84803180e-01, -2.19224200e-01,\n       -2.52898000e-01,  6.62411900e-02, -8.58173100e-01,  1.05221800e+00,\n...\n       -2.04462400e-01,  7.14229800e-01, -1.66823000e-01,  8.72551440e-01,\n        5.53560140e-01,  8.92506300e-01, -2.39408610e-01, -4.22413560e-01,\n       -3.19551350e-01,  5.59153850e-01,  2.44338100e-01, -8.60452100e-01])]\nDim: 768 (768,)\n","queries = [\"When was artificial intelligence founded\", \n           \"Where was Alan Turing born?\"]\n\nquery_embeddings = jina_ef.encode_queries(queries)\n\nprint(\"Embeddings:\", query_embeddings)\nprint(\"Dim\", jina_ef.dim, query_embeddings[0].shape)\n","Embeddings: [array([-5.99164660e-01, -3.49827350e-01,  8.22405160e-01, -1.18632730e-01,\n        5.78107540e-01,  1.09789170e-01,  2.91604200e-01, -3.29306450e-01,\n        2.93779640e-01, -2.17880800e-01, -6.84535440e-01, -3.79752000e-01,\n       -3.47541800e-01,  9.20846100e-02, -6.13804400e-01,  6.31312800e-01,\n...\n       -1.84993740e-02,  9.38629150e-01,  2.74858470e-02,  1.09396360e+00,\n        3.96270750e-01,  7.44445800e-01, -1.95404050e-01, -6.08383200e-01,\n       -3.75076300e-01,  3.87512200e-01,  8.11889650e-01, -3.76407620e-01])]\nDim 768 (768,)\n"],"headingContent":"","anchorList":[{"label":"Jina AI","href":"Jina-AI","type":1,"isActive":false}]}
    \ No newline at end of file
    +{"codeList":["pip install --upgrade pymilvus\npip install \"pymilvus[model]\"\n","from pymilvus.model.dense import JinaEmbeddingFunction\n\njina_ef = JinaEmbeddingFunction(\n    model_name=\"jina-embeddings-v3\", # Defaults to `jina-embeddings-v3`\n    api_key=JINAAI_API_KEY, # Provide your Jina AI API key\n    task=\"retrieval.passage\", # Specify the task\n    dimensions=1024, # Defaults to 1024\n)\n","\n```python\ndocs = [\n    \"Artificial intelligence was founded as an academic discipline in 1956.\",\n    \"Alan Turing was the first person to conduct substantial research in AI.\",\n    \"Born in Maida Vale, London, Turing was raised in southern England.\",\n]\n\ndocs_embeddings = jina_ef.encode_documents(docs)\n\n# Print embeddings\nprint(\"Embeddings:\", docs_embeddings)\n# Print dimension and shape of embeddings\nprint(\"Dim:\", jina_ef.dim, docs_embeddings[0].shape)\n","Embeddings: [array([9.80641991e-02, -8.51697400e-02,  7.36531913e-02,  1.42558888e-02,\n       -2.23589484e-02,  1.68494112e-03, -3.50753777e-02, -3.11530549e-02,\n       -3.26012149e-02,  5.04568312e-03,  3.69836427e-02,  3.48948985e-02,\n        8.19722563e-03,  5.88679723e-02, -6.71099266e-03, -1.82369724e-02,\n...\n        2.48654783e-02,  3.43279652e-02, -1.66154150e-02, -9.90478322e-03,\n       -2.96043139e-03, -8.57473817e-03, -7.39028037e-04,  6.25024503e-03,\n       -1.08831357e-02, -4.00776342e-02,  3.25369164e-02, -1.42691191e-03])]\nDim: 1024 (1024,)\n","queries = [\"When was artificial intelligence founded\", \n           \"Where was Alan Turing born?\"]\n\nquery_embeddings = jina_ef.encode_queries(queries)\n\nprint(\"Embeddings:\", query_embeddings)\nprint(\"Dim\", jina_ef.dim, query_embeddings[0].shape)\n","Embeddings: [array([8.79201014e-03,  1.47551354e-02,  4.02722731e-02, -2.52991207e-02,\n        1.12719582e-02,  3.75947170e-02,  3.97946090e-02, -7.36681819e-02,\n       -2.17952449e-02, -1.16298944e-02, -6.83426252e-03, -5.12507409e-02,\n        5.26071340e-02,  6.75181448e-02,  3.92445624e-02, -1.40817231e-02,\n...\n        8.81703943e-03,  4.24629413e-02, -2.32944116e-02, -2.05193572e-02,\n       -3.22035812e-02,  2.81896023e-03,  3.85326855e-02,  3.64372656e-02,\n       -1.65050142e-02, -4.26847413e-02,  2.02664156e-02, -1.72684863e-02])]\nDim 1024 (1024,)\n","from pymilvus.model.dense import JinaEmbeddingFunction\n\njina_ef = JinaEmbeddingFunction(\n    model_name=\"jina-embeddings-v3\", # Defaults to `jina-embeddings-v3`\n    api_key=JINA_API_KEY, # Provide your Jina AI API key\n    task=\"text-matching\",\n    dimensions=1024, # Defaults to 1024\n)\n\ntexts = [\n    \"Follow the white rabbit.\",  # English\n    \"Sigue al conejo blanco.\",  # Spanish\n    \"Suis le lapin blanc.\",  # French\n    \"跟着白兔走。\",  # Chinese\n    \"اتبع الأرنب الأبيض.\",  # Arabic\n    \"Folge dem weißen Kaninchen.\",  # German\n]\n\nembeddings = jina_ef(texts)\n\n# Compute similarities\nprint(embeddings[0] @ embeddings[1].T)\n"],"headingContent":"Jina AI","anchorList":[{"label":"Jina AI","href":"Jina-AI","type":1,"isActive":false}]}
    \ No newline at end of file
    diff --git a/localization/v2.4.x/site/it/embeddings/embed-with-jina.md b/localization/v2.4.x/site/it/embeddings/embed-with-jina.md
    index 355347014..8b5f73d9e 100644
    --- a/localization/v2.4.x/site/it/embeddings/embed-with-jina.md
    +++ b/localization/v2.4.x/site/it/embeddings/embed-with-jina.md
    @@ -31,19 +31,36 @@ pip install "pymilvus[model]"
     
    from pymilvus.model.dense import JinaEmbeddingFunction
     
     jina_ef = JinaEmbeddingFunction(
    -    model_name="jina-embeddings-v2-base-en", # Defaults to `jina-embeddings-v2-base-en`
    -    api_key=JINAAI_API_KEY # Provide your Jina AI API key
    +    model_name="jina-embeddings-v3", # Defaults to `jina-embeddings-v3`
    +    api_key=JINAAI_API_KEY, # Provide your Jina AI API key
    +    task="retrieval.passage", # Specify the task
    +    dimensions=1024, # Defaults to 1024
     )
     

    Parametri:

    • model_name (stringa)

      -

      Il nome del modello di incorporazione di Jina AI da utilizzare per la codifica. È possibile specificare uno qualsiasi dei nomi dei modelli di incorporamento Jina AI disponibili, ad esempio jina-embeddings-v2-base-en, jina-embeddings-v2-small-en, ecc. Se si lascia questo parametro non specificato, verrà utilizzato jina-embeddings-v2-base-en. Per un elenco dei modelli disponibili, consultare Jina Embeddings.

    • +

      Il nome del modello di incorporazione di Jina AI da utilizzare per la codifica. È possibile specificare uno qualsiasi dei nomi dei modelli di incorporamento Jina AI disponibili, ad esempio jina-embeddings-v3, jina-embeddings-v2-base-en, ecc. Se si lascia questo parametro non specificato, verrà utilizzato jina-embeddings-v3. Per un elenco dei modelli disponibili, consultare Jina Embeddings.

    • api_key (stringa)

      La chiave API per accedere all'API Jina AI.

    • +
    • task (stringa)

      +

      Il tipo di input passato al modello. Richiesto per i modelli di embedding v3 e successivi.

      +
        +
      • "retrieval.passage": Utilizzato per codificare documenti di grandi dimensioni nelle attività di recupero al momento dell'indicizzazione.
      • +
      • "retrieval.query": Utilizzato per codificare le query o le domande dell'utente nei compiti di recupero.
      • +
      • "classification": Utilizzato per codificare il testo per compiti di classificazione del testo.
      • +
      • "text-matching": Utilizzato per codificare il testo per la corrispondenza di somiglianza, ad esempio per misurare la somiglianza tra due frasi.
      • +
      • "clustering": Utilizzato per compiti di clustering o reranking.
      • +
    • +
    • dimensions (int)

      +

      Il numero di dimensioni che deve avere l'embedding di output risultante. Il valore predefinito è 1024. Supportato solo per i modelli di embedding v3 e successivi.

    • +
    • late_chunking (bool)

      +

      Questo parametro controlla se utilizzare il nuovo metodo di chunking introdotto da Jina AI il mese scorso per la codifica di un gruppo di frasi. L'impostazione predefinita è False. Quando è impostato su True, Jina AI API concatena tutte le frasi nel campo di input e le trasmette al modello come un'unica stringa. Internamente, il modello incorpora questa lunga stringa concatenata e poi esegue un chunking tardivo, restituendo un elenco di embeddings che corrisponde alla dimensione dell'elenco di input.

    -

    Per creare embeddings per i documenti, utilizzare il metodo encode_documents():

    -
    docs = [
    +

    Per creare incorporazioni per i documenti, utilizzare il metodo encode_documents(). Questo metodo è stato progettato per le incorporazioni di documenti in compiti di recupero asimmetrici, come l'indicizzazione di documenti per compiti di ricerca o di raccomandazione. Questo metodo utilizza retrieval.passage come task.

    +
    
    +```python
    +docs = [
         "Artificial intelligence was founded as an academic discipline in 1956.",
         "Alan Turing was the first person to conduct substantial research in AI.",
         "Born in Maida Vale, London, Turing was raised in southern England.",
    @@ -57,17 +74,17 @@ docs_embeddings = jina_ef.encode_documents(docs)
     print("Dim:", jina_ef.dim, docs_embeddings[0].shape)
     

    Il risultato atteso è simile al seguente:

    -
    Embeddings: [array([-4.88487840e-01, -4.28095880e-01,  4.90086500e-01, -1.63274320e-01,
    -        3.43437800e-01,  3.21476880e-01,  2.83173790e-02, -3.10403670e-01,
    -        4.76985040e-01, -1.77410420e-01, -3.84803180e-01, -2.19224200e-01,
    -       -2.52898000e-01,  6.62411900e-02, -8.58173100e-01,  1.05221800e+00,
    +
    Embeddings: [array([9.80641991e-02, -8.51697400e-02,  7.36531913e-02,  1.42558888e-02,
    +       -2.23589484e-02,  1.68494112e-03, -3.50753777e-02, -3.11530549e-02,
    +       -3.26012149e-02,  5.04568312e-03,  3.69836427e-02,  3.48948985e-02,
    +        8.19722563e-03,  5.88679723e-02, -6.71099266e-03, -1.82369724e-02,
     ...
    -       -2.04462400e-01,  7.14229800e-01, -1.66823000e-01,  8.72551440e-01,
    -        5.53560140e-01,  8.92506300e-01, -2.39408610e-01, -4.22413560e-01,
    -       -3.19551350e-01,  5.59153850e-01,  2.44338100e-01, -8.60452100e-01])]
    -Dim: 768 (768,)
    +        2.48654783e-02,  3.43279652e-02, -1.66154150e-02, -9.90478322e-03,
    +       -2.96043139e-03, -8.57473817e-03, -7.39028037e-04,  6.25024503e-03,
    +       -1.08831357e-02, -4.00776342e-02,  3.25369164e-02, -1.42691191e-03])]
    +Dim: 1024 (1024,)
     
    -

    Per creare embeddings per le query, utilizzare il metodo encode_queries():

    +

    Per creare incorporazioni per le query, utilizzare il metodo encode_queries(). Questo metodo è stato progettato per le incorporazioni di query in compiti di reperimento asimmetrici, come le query di ricerca o le domande. Questo metodo utilizza retrieval.query come task.

    queries = ["When was artificial intelligence founded", 
                "Where was Alan Turing born?"]
     
    @@ -76,14 +93,38 @@ query_embeddings = jina_ef.encode_queries(queries)
     print("Embeddings:", query_embeddings)
     print("Dim", jina_ef.dim, query_embeddings[0].shape)
     
    -

    L'output previsto è simile al seguente:

    -
    Embeddings: [array([-5.99164660e-01, -3.49827350e-01,  8.22405160e-01, -1.18632730e-01,
    -        5.78107540e-01,  1.09789170e-01,  2.91604200e-01, -3.29306450e-01,
    -        2.93779640e-01, -2.17880800e-01, -6.84535440e-01, -3.79752000e-01,
    -       -3.47541800e-01,  9.20846100e-02, -6.13804400e-01,  6.31312800e-01,
    +

    Il risultato atteso è simile al seguente:

    +
    Embeddings: [array([8.79201014e-03,  1.47551354e-02,  4.02722731e-02, -2.52991207e-02,
    +        1.12719582e-02,  3.75947170e-02,  3.97946090e-02, -7.36681819e-02,
    +       -2.17952449e-02, -1.16298944e-02, -6.83426252e-03, -5.12507409e-02,
    +        5.26071340e-02,  6.75181448e-02,  3.92445624e-02, -1.40817231e-02,
     ...
    -       -1.84993740e-02,  9.38629150e-01,  2.74858470e-02,  1.09396360e+00,
    -        3.96270750e-01,  7.44445800e-01, -1.95404050e-01, -6.08383200e-01,
    -       -3.75076300e-01,  3.87512200e-01,  8.11889650e-01, -3.76407620e-01])]
    -Dim 768 (768,)
    +        8.81703943e-03,  4.24629413e-02, -2.32944116e-02, -2.05193572e-02,
    +       -3.22035812e-02,  2.81896023e-03,  3.85326855e-02,  3.64372656e-02,
    +       -1.65050142e-02, -4.26847413e-02,  2.02664156e-02, -1.72684863e-02])]
    +Dim 1024 (1024,)
    +
    +

    Per creare incorporazioni di input per la corrispondenza di similarità (come i compiti STS o di reperimento simmetrico), la classificazione del testo, il clustering o il reranking, utilizzare il valore appropriato del parametro task quando si istanzia la classe JinaEmbeddingFunction.

    +
    from pymilvus.model.dense import JinaEmbeddingFunction
    +
    +jina_ef = JinaEmbeddingFunction(
    +    model_name="jina-embeddings-v3", # Defaults to `jina-embeddings-v3`
    +    api_key=JINA_API_KEY, # Provide your Jina AI API key
    +    task="text-matching",
    +    dimensions=1024, # Defaults to 1024
    +)
    +
    +texts = [
    +    "Follow the white rabbit.",  # English
    +    "Sigue al conejo blanco.",  # Spanish
    +    "Suis le lapin blanc.",  # French
    +    "跟着白兔走。",  # Chinese
    +    "اتبع الأرنب الأبيض.",  # Arabic
    +    "Folge dem weißen Kaninchen.",  # German
    +]
    +
    +embeddings = jina_ef(texts)
    +
    +# Compute similarities
    +print(embeddings[0] @ embeddings[1].T)
     
    diff --git a/localization/v2.4.x/site/it/embeddings/embed-with-voyage.json b/localization/v2.4.x/site/it/embeddings/embed-with-voyage.json index c5fe18415..ba7547304 100644 --- a/localization/v2.4.x/site/it/embeddings/embed-with-voyage.json +++ b/localization/v2.4.x/site/it/embeddings/embed-with-voyage.json @@ -1 +1 @@ -{"codeList":["pip install --upgrade pymilvus\npip install \"pymilvus[model]\"\n","from pymilvus.model.dense import VoyageEmbeddingFunction\n\nvoyage_ef = VoyageEmbeddingFunction(\n model_name=\"voyage-lite-02-instruct\", # Defaults to `voyage-2`\n api_key=VOYAGE_API_KEY # Provide your Voyage API key\n)\n","docs = [\n \"Artificial intelligence was founded as an academic discipline in 1956.\",\n \"Alan Turing was the first person to conduct substantial research in AI.\",\n \"Born in Maida Vale, London, Turing was raised in southern England.\",\n]\n\ndocs_embeddings = voyage_ef.encode_documents(docs)\n\n# Print embeddings\nprint(\"Embeddings:\", docs_embeddings)\n# Print dimension and shape of embeddings\nprint(\"Dim:\", voyage_ef.dim, docs_embeddings[0].shape)\n","Embeddings: [array([ 0.02582654, -0.00907086, -0.04604037, ..., -0.01227521,\n 0.04420955, -0.00038829]), array([ 0.03844212, -0.01597065, -0.03728884, ..., -0.02118733,\n 0.03349845, 0.0065346 ]), array([ 0.05143557, -0.01096631, -0.02690451, ..., -0.02416254,\n 0.07658645, 0.03064499])]\nDim: 1024 (1024,)\n","queries = [\"When was artificial intelligence founded\", \n \"Where was Alan Turing born?\"]\n\nquery_embeddings = voyage_ef.encode_queries(queries)\n\nprint(\"Embeddings:\", query_embeddings)\nprint(\"Dim\", voyage_ef.dim, query_embeddings[0].shape)\n","Embeddings: [array([ 0.01733501, -0.0230672 , -0.05208827, ..., -0.00957995,\n 0.04493361, 0.01485138]), array([ 0.05937521, -0.00729363, -0.02184347, ..., -0.02107683,\n 0.05706626, 0.0263358 ])]\nDim 1024 (1024,)\n"],"headingContent":"","anchorList":[{"label":"Voyage","href":"Voyage","type":1,"isActive":false}]} \ No newline at end of file +{"codeList":["pip install --upgrade pymilvus\npip install \"pymilvus[model]\"\n","from pymilvus.model.dense import VoyageEmbeddingFunction\n\nvoyage_ef = VoyageEmbeddingFunction(\n model_name=\"voyage-3\", # Defaults to `voyage-3`\n api_key=VOYAGE_API_KEY # Provide your Voyage API key\n)\n","docs = [\n \"Artificial intelligence was founded as an academic discipline in 1956.\",\n \"Alan Turing was the first person to conduct substantial research in AI.\",\n \"Born in Maida Vale, London, Turing was raised in southern England.\",\n]\n\ndocs_embeddings = voyage_ef.encode_documents(docs)\n\n# Print embeddings\nprint(\"Embeddings:\", docs_embeddings)\n# Print dimension and shape of embeddings\nprint(\"Dim:\", voyage_ef.dim, docs_embeddings[0].shape)\n","Embeddings: [array([ 0.02582654, -0.00907086, -0.04604037, ..., -0.01227521,\n 0.04420955, -0.00038829]), array([ 0.03844212, -0.01597065, -0.03728884, ..., -0.02118733,\n 0.03349845, 0.0065346 ]), array([ 0.05143557, -0.01096631, -0.02690451, ..., -0.02416254,\n 0.07658645, 0.03064499])]\nDim: 1024 (1024,)\n","queries = [\"When was artificial intelligence founded\", \n \"Where was Alan Turing born?\"]\n\nquery_embeddings = voyage_ef.encode_queries(queries)\n\nprint(\"Embeddings:\", query_embeddings)\nprint(\"Dim\", voyage_ef.dim, query_embeddings[0].shape)\n","Embeddings: [array([ 0.01733501, -0.0230672 , -0.05208827, ..., -0.00957995,\n 0.04493361, 0.01485138]), array([ 0.05937521, -0.00729363, -0.02184347, ..., -0.02107683,\n 0.05706626, 0.0263358 ])]\nDim 1024 (1024,)\n"],"headingContent":"Voyage","anchorList":[{"label":"Voyage","href":"Voyage","type":1,"isActive":false}]} \ No newline at end of file diff --git a/localization/v2.4.x/site/it/embeddings/embed-with-voyage.md b/localization/v2.4.x/site/it/embeddings/embed-with-voyage.md index 35422cb2f..0b7448c41 100644 --- a/localization/v2.4.x/site/it/embeddings/embed-with-voyage.md +++ b/localization/v2.4.x/site/it/embeddings/embed-with-voyage.md @@ -21,7 +21,7 @@ title: Incorporare il viaggio d="M4 9h1v1H4c-1.5 0-3-1.69-3-3.5S2.55 3 4 3h4c1.45 0 3 1.69 3 3.5 0 1.41-.91 2.72-2 3.25V8.59c.58-.45 1-1.27 1-2.09C10 5.22 8.98 4 8 4H4c-.98 0-2 1.22-2 2.5S3 9 4 9zm9-3h-1v1h1c1 0 2 1.22 2 2.5S13.98 12 13 12H9c-.98 0-2-1.22-2-2.5 0-.83.42-1.64 1-2.09V6.25c-1.09.53-2 1.84-2 3.25C6 11.31 7.55 13 9 13h4c1.45 0 3-1.69 3-3.5S14.5 6 13 6z" > -

    Milvus si integra con i modelli di Voyage tramite la classe VoyageEmbeddingFunction. Questa classe fornisce metodi per codificare documenti e query utilizzando i modelli di Voyage e restituire le incorporazioni come vettori densi compatibili con l'indicizzazione di Milvus. Per utilizzare questa funzionalità, è necessario ottenere una chiave API da Voyage creando un account sulla sua piattaforma.

    +

    Milvus si integra con i modelli di Voyage attraverso la classe VoyageEmbeddingFunction. Questa classe fornisce metodi per codificare documenti e query utilizzando i modelli di Voyage e restituendo le incorporazioni come vettori densi compatibili con l'indicizzazione di Milvus. Per utilizzare questa funzionalità, è necessario ottenere una chiave API da Voyage creando un account sulla sua piattaforma.

    Per utilizzare questa funzione, installare le dipendenze necessarie:

    pip install --upgrade pymilvus
     pip install "pymilvus[model]"
    @@ -30,13 +30,13 @@ pip install "pymilvus[model]"
     
    from pymilvus.model.dense import VoyageEmbeddingFunction
     
     voyage_ef = VoyageEmbeddingFunction(
    -    model_name="voyage-lite-02-instruct", # Defaults to `voyage-2`
    +    model_name="voyage-3", # Defaults to `voyage-3`
         api_key=VOYAGE_API_KEY # Provide your Voyage API key
     )
     

    Parametri:

      -
    • model_name (stringa) Il nome del modello Voyage da utilizzare per la codifica. È possibile specificare uno qualsiasi dei nomi dei modelli Voyage disponibili, ad esempio voyage-law-2, voyage-code-2, ecc. Se si lascia questo parametro non specificato, verrà utilizzato voyage-2. Per un elenco dei modelli disponibili, consultare la documentazione ufficiale di Voyage.
    • +
    • model_name (stringa) Il nome del modello Voyage da utilizzare per la codifica. È possibile specificare uno qualsiasi dei nomi dei modelli Voyage disponibili, ad esempio voyage-3-lite, voyage-finance-2, ecc. Se si lascia questo parametro non specificato, verrà utilizzato voyage-3. Per un elenco dei modelli disponibili, consultare la documentazione ufficiale di Voyage.
    • api_key (stringa) La chiave API per accedere all'API di Voyage. Per informazioni su come creare una chiave API, consultare Chiave API e client Python.

    Per creare le incorporazioni dei documenti, utilizzare il metodo encode_documents():

    diff --git a/localization/v2.4.x/site/it/embeddings/embeddings.json b/localization/v2.4.x/site/it/embeddings/embeddings.json index a20467f4f..bfa7cc366 100644 --- a/localization/v2.4.x/site/it/embeddings/embeddings.json +++ b/localization/v2.4.x/site/it/embeddings/embeddings.json @@ -1 +1 @@ -{"codeList":["pip install \"pymilvus[model]\"\n","from pymilvus import model\n\n# This will download \"all-MiniLM-L6-v2\", a light weight model.\nef = model.DefaultEmbeddingFunction()\n\n# Data from which embeddings are to be generated \ndocs = [\n \"Artificial intelligence was founded as an academic discipline in 1956.\",\n \"Alan Turing was the first person to conduct substantial research in AI.\",\n \"Born in Maida Vale, London, Turing was raised in southern England.\",\n]\n\nembeddings = ef.encode_documents(docs)\n\n# Print embeddings\nprint(\"Embeddings:\", embeddings)\n# Print dimension and shape of embeddings\nprint(\"Dim:\", ef.dim, embeddings[0].shape)\n","Embeddings: [array([-3.09392996e-02, -1.80662833e-02, 1.34775648e-02, 2.77156215e-02,\n -4.86349640e-03, -3.12581174e-02, -3.55921760e-02, 5.76934684e-03,\n 2.80773244e-03, 1.35783911e-01, 3.59678417e-02, 6.17732145e-02,\n...\n -4.61330153e-02, -4.85207550e-02, 3.13997865e-02, 7.82178566e-02,\n -4.75336798e-02, 5.21207601e-02, 9.04406682e-02, -5.36676683e-02],\n dtype=float32)]\nDim: 384 (384,)\n","from pymilvus.model.hybrid import BGEM3EmbeddingFunction\nfrom pymilvus import (\n utility,\n FieldSchema, CollectionSchema, DataType,\n Collection, AnnSearchRequest, RRFRanker, connections,\n)\n","# 1. prepare a small corpus to search\ndocs = [\n \"Artificial intelligence was founded as an academic discipline in 1956.\",\n \"Alan Turing was the first person to conduct substantial research in AI.\",\n \"Born in Maida Vale, London, Turing was raised in southern England.\",\n]\nquery = \"Who started AI research?\"\n\n# BGE-M3 model can embed texts as dense and sparse vectors.\n# It is included in the optional `model` module in pymilvus, to install it,\n# simply run \"pip install pymilvus[model]\".\n\nbge_m3_ef = BGEM3EmbeddingFunction(use_fp16=False, device=\"cpu\")\n\ndocs_embeddings = bge_m3_ef(docs)\nquery_embeddings = bge_m3_ef([query])\n","from pymilvus.model.sparse import BM25EmbeddingFunction\n","# 1. prepare a small corpus to search\ndocs = [\n \"Artificial intelligence was founded as an academic discipline in 1956.\",\n \"Alan Turing was the first person to conduct substantial research in AI.\",\n \"Born in Maida Vale, London, Turing was raised in southern England.\",\n]\nquery = \"Where was Turing born?\"\nbm25_ef = BM25EmbeddingFunction()\n\n# 2. fit the corpus to get BM25 model parameters on your documents.\nbm25_ef.fit(docs)\n\n# 3. store the fitted parameters to disk to expedite future processing.\nbm25_ef.save(\"bm25_params.json\")\n\n# 4. load the saved params\nnew_bm25_ef = BM25EmbeddingFunction()\nnew_bm25_ef.load(\"bm25_params.json\")\n\ndocs_embeddings = new_bm25_ef.encode_documents(docs)\nquery_embeddings = new_bm25_ef.encode_queries([query])\nprint(\"Dim:\", new_bm25_ef.dim, list(docs_embeddings)[0].shape)\n","Dim: 21 (1, 21)\n"],"headingContent":"","anchorList":[{"label":"Panoramica sull'incorporazione","href":"Embedding-Overview","type":1,"isActive":false},{"label":"Esempio 1: Usare la funzione di embedding predefinita per generare vettori densi","href":"Example-1-Use-default-embedding-function-to-generate-dense-vectors","type":2,"isActive":false},{"label":"Esempio 2: Generazione di vettori densi e radi in un'unica chiamata con il modello BGE M3","href":"Example-2-Generate-dense-and-sparse-vectors-in-one-call-with-BGE-M3-model","type":2,"isActive":false},{"label":"Esempio 3: Generare vettori sparsi usando il modello BM25","href":"Example-3-Generate--sparse-vectors-using-BM25-model","type":2,"isActive":false}]} \ No newline at end of file +{"codeList":["pip install \"pymilvus[model]\"\n","from pymilvus import model\n\n# This will download \"all-MiniLM-L6-v2\", a light weight model.\nef = model.DefaultEmbeddingFunction()\n\n# Data from which embeddings are to be generated \ndocs = [\n \"Artificial intelligence was founded as an academic discipline in 1956.\",\n \"Alan Turing was the first person to conduct substantial research in AI.\",\n \"Born in Maida Vale, London, Turing was raised in southern England.\",\n]\n\nembeddings = ef.encode_documents(docs)\n\n# Print embeddings\nprint(\"Embeddings:\", embeddings)\n# Print dimension and shape of embeddings\nprint(\"Dim:\", ef.dim, embeddings[0].shape)\n","Embeddings: [array([-3.09392996e-02, -1.80662833e-02, 1.34775648e-02, 2.77156215e-02,\n -4.86349640e-03, -3.12581174e-02, -3.55921760e-02, 5.76934684e-03,\n 2.80773244e-03, 1.35783911e-01, 3.59678417e-02, 6.17732145e-02,\n...\n -4.61330153e-02, -4.85207550e-02, 3.13997865e-02, 7.82178566e-02,\n -4.75336798e-02, 5.21207601e-02, 9.04406682e-02, -5.36676683e-02],\n dtype=float32)]\nDim: 384 (384,)\n","from pymilvus.model.hybrid import BGEM3EmbeddingFunction\nfrom pymilvus import (\n utility,\n FieldSchema, CollectionSchema, DataType,\n Collection, AnnSearchRequest, RRFRanker, connections,\n)\n","# 1. prepare a small corpus to search\ndocs = [\n \"Artificial intelligence was founded as an academic discipline in 1956.\",\n \"Alan Turing was the first person to conduct substantial research in AI.\",\n \"Born in Maida Vale, London, Turing was raised in southern England.\",\n]\nquery = \"Who started AI research?\"\n\n# BGE-M3 model can embed texts as dense and sparse vectors.\n# It is included in the optional `model` module in pymilvus, to install it,\n# simply run \"pip install pymilvus[model]\".\n\nbge_m3_ef = BGEM3EmbeddingFunction(use_fp16=False, device=\"cpu\")\n\ndocs_embeddings = bge_m3_ef(docs)\nquery_embeddings = bge_m3_ef([query])\n","from pymilvus.model.sparse import BM25EmbeddingFunction\n","# 1. prepare a small corpus to search\ndocs = [\n \"Artificial intelligence was founded as an academic discipline in 1956.\",\n \"Alan Turing was the first person to conduct substantial research in AI.\",\n \"Born in Maida Vale, London, Turing was raised in southern England.\",\n]\nquery = \"Where was Turing born?\"\nbm25_ef = BM25EmbeddingFunction()\n\n# 2. fit the corpus to get BM25 model parameters on your documents.\nbm25_ef.fit(docs)\n\n# 3. store the fitted parameters to disk to expedite future processing.\nbm25_ef.save(\"bm25_params.json\")\n\n# 4. load the saved params\nnew_bm25_ef = BM25EmbeddingFunction()\nnew_bm25_ef.load(\"bm25_params.json\")\n\ndocs_embeddings = new_bm25_ef.encode_documents(docs)\nquery_embeddings = new_bm25_ef.encode_queries([query])\nprint(\"Dim:\", new_bm25_ef.dim, list(docs_embeddings)[0].shape)\n","Dim: 21 (1, 21)\n"],"headingContent":"Embedding Overview","anchorList":[{"label":"Panoramica sull'incorporazione","href":"Embedding-Overview","type":1,"isActive":false},{"label":"Esempio 1: Usare la funzione di embedding predefinita per generare vettori densi","href":"Example-1-Use-default-embedding-function-to-generate-dense-vectors","type":2,"isActive":false},{"label":"Esempio 2: Generazione di vettori densi e radi in un'unica chiamata con il modello BGE M3","href":"Example-2-Generate-dense-and-sparse-vectors-in-one-call-with-BGE-M3-model","type":2,"isActive":false},{"label":"Esempio 3: Generare vettori sparsi usando il modello BM25","href":"Example-3-Generate--sparse-vectors-using-BM25-model","type":2,"isActive":false}]} \ No newline at end of file diff --git a/localization/v2.4.x/site/it/embeddings/embeddings.md b/localization/v2.4.x/site/it/embeddings/embeddings.md index 3f1332074..d3c64f98c 100644 --- a/localization/v2.4.x/site/it/embeddings/embeddings.md +++ b/localization/v2.4.x/site/it/embeddings/embeddings.md @@ -40,6 +40,10 @@ title: Panoramica sull'incorporazione voyageaiDensoAPI jinaDensoAPI cohereDensoAPI +IstruttoreDensoAperto +Mistral AIDensoAPI +NomicDensoAPI +mGTEIbridoFornito in modo aperto

    Esempio 1: Usare la funzione di embedding predefinita per generare vettori densi

    Avete ancora domande?

    È possibile:

    • Controllare Milvus su GitHub. Sentitevi liberi di fare domande, condividere idee e aiutare gli altri.
    • diff --git a/localization/v2.4.x/site/it/faq/performance_faq.json b/localization/v2.4.x/site/it/faq/performance_faq.json index ecdf4bf82..3e0392c65 100644 --- a/localization/v2.4.x/site/it/faq/performance_faq.json +++ b/localization/v2.4.x/site/it/faq/performance_faq.json @@ -1 +1 @@ -{"codeList":[],"headingContent":"","anchorList":[{"label":"FAQ sulle prestazioni","href":"Performance-FAQ","type":1,"isActive":false}]} \ No newline at end of file +{"codeList":[],"headingContent":"Performance FAQ","anchorList":[{"label":"FAQ sulle prestazioni","href":"Performance-FAQ","type":1,"isActive":false}]} \ No newline at end of file diff --git a/localization/v2.4.x/site/it/faq/performance_faq.md b/localization/v2.4.x/site/it/faq/performance_faq.md index 552565db3..36b2985e6 100644 --- a/localization/v2.4.x/site/it/faq/performance_faq.md +++ b/localization/v2.4.x/site/it/faq/performance_faq.md @@ -29,11 +29,11 @@ title: FAQ sulle prestazioni Accuracy test Test di accuratezza Performance testTest di performance

      -

      Perché le query a volte richiedono più tempo su dataset più piccoli?

      Gli indici riducono il tempo necessario per interrogare un segmento. Se un segmento non è stato indicizzato, Milvus ricorre alla ricerca bruta sui dati grezzi, aumentando drasticamente il tempo di interrogazione.

      +

      Perché le query a volte richiedono più tempo su dataset più piccoli?

      Le operazioni di query vengono eseguite su segmenti. Gli indici riducono il tempo necessario per interrogare un segmento. Se un segmento non è stato indicizzato, Milvus ricorre alla ricerca bruta sui dati grezzi, aumentando drasticamente il tempo di interrogazione.

      Di conseguenza, di solito ci vuole più tempo per interrogare un piccolo insieme di dati (collezione) perché non è stato costruito un indice. Questo perché le dimensioni dei segmenti non hanno raggiunto la soglia di costruzione dell'indice impostata da rootCoord.minSegmentSizeToEnableindex. Chiamare create_index() per forzare Milvus a indicizzare i segmenti che hanno raggiunto la soglia ma non sono ancora stati indicizzati automaticamente, migliorando significativamente le prestazioni delle query.

      Quali fattori influiscono sull'utilizzo della CPU?

      L'uso della CPU aumenta quando Milvus costruisce indici o esegue query. In generale, la creazione di indici è intensiva per la CPU, tranne quando si utilizza Annoy, che viene eseguito su un singolo thread.

      Durante l'esecuzione delle query, l'utilizzo della CPU è influenzato da nq e nprobe. Quando nq e nprobe sono piccoli, la concorrenza è bassa e l'uso della CPU rimane basso.

      -

      L'inserimento simultaneo di dati e la ricerca hanno un impatto sulle prestazioni delle query?

      Le operazioni di inserimento non sono intensive per la CPU. Tuttavia, poiché i nuovi segmenti potrebbero non aver raggiunto la soglia per la creazione dell'indice, Milvus ricorre alla ricerca bruta, con un impatto significativo sulle prestazioni della query.

      +

      L'inserimento simultaneo dei dati e la ricerca hanno un impatto sulle prestazioni delle query?

      Le operazioni di inserimento non sono intensive per la CPU. Tuttavia, poiché i nuovi segmenti potrebbero non aver raggiunto la soglia per la creazione dell'indice, Milvus ricorre alla ricerca bruta, con un impatto significativo sulle prestazioni della query.

      Il parametro rootcoord.minSegmentSizeToEnableIndex determina la soglia di costruzione dell'indice per un segmento ed è impostato su 1024 righe per impostazione predefinita. Per ulteriori informazioni, vedere Configurazione del sistema.

      Lo spazio di archiviazione viene rilasciato subito dopo l'eliminazione dei dati in Milvus?

      No, lo spazio di archiviazione non viene liberato immediatamente quando si eliminano i dati in Milvus. Sebbene l'eliminazione dei dati segni le entità come "logicamente eliminate", lo spazio effettivo potrebbe non essere liberato immediatamente. Ecco perché:

        @@ -44,7 +44,7 @@ title: FAQ sulle prestazioni

        Quando si sceglie un livello di coerenza, bisogna considerare il compromesso tra coerenza e prestazioni. Per le operazioni che richiedono una visibilità immediata, utilizzare un livello di coerenza "forte". Per le scritture più veloci, privilegiare una consistenza più debole (i dati potrebbero non essere immediatamente visibili). Per ulteriori informazioni, consultare Consistenza.

        L'indicizzazione di un campo VARCHAR può migliorare la velocità di cancellazione?

        L'indicizzazione di un campo VARCHAR può accelerare le operazioni di "Delete By Expression", ma solo a determinate condizioni:

          -
        • Indice invertito: Questo indice è utile per le espressioni IN o == sui campi VARCHAR a chiave non primaria.
        • +
        • Indice invertito: Questo indice è utile per le espressioni IN o == su campi VARCHAR a chiave non primaria.
        • Indice Trie: Questo indice è utile per le query con prefisso (ad esempio, LIKE prefix%) su campi VARCHAR non primari.

        Tuttavia, l'indicizzazione di un campo VARCHAR non è più veloce:

        diff --git a/localization/v2.4.x/site/it/faq/product_faq.json b/localization/v2.4.x/site/it/faq/product_faq.json index 78ad23027..c904fe0e4 100644 --- a/localization/v2.4.x/site/it/faq/product_faq.json +++ b/localization/v2.4.x/site/it/faq/product_faq.json @@ -1 +1 @@ -{"codeList":["60 * 2 * 4 + 40 * 1 * 12 = 960\n"],"headingContent":"","anchorList":[{"label":"FAQ sul prodotto","href":"Product-FAQ","type":1,"isActive":false}]} \ No newline at end of file +{"codeList":["60 * 2 * 4 + 40 * 1 * 12 = 960\n"],"headingContent":"Product FAQ","anchorList":[{"label":"FAQ sul prodotto","href":"Product-FAQ","type":1,"isActive":false}]} \ No newline at end of file diff --git a/localization/v2.4.x/site/it/faq/product_faq.md b/localization/v2.4.x/site/it/faq/product_faq.md index 449e3618d..1193efcf4 100644 --- a/localization/v2.4.x/site/it/faq/product_faq.md +++ b/localization/v2.4.x/site/it/faq/product_faq.md @@ -64,7 +64,7 @@ title: FAQ sul prodotto

        Per evitare questo problema, provare a impostare nprobe più grande e nlist e k più piccoli.

        Per ulteriori informazioni, vedere Indice vettoriale.

        Qual è la dimensione massima dei vettori supportata da Milvus?

        Per impostazione predefinita, Milvus può gestire vettori con un massimo di 32.768 dimensioni. È possibile aumentare il valore di Proxy.maxDimension per consentire un vettore di dimensioni maggiori.

        -

        Milvus supporta la CPU Apple M1?

        La versione attuale di Milvus non supporta la CPU Apple M1.

        +

        Milvus supporta la CPU Apple M1?

        L'attuale versione di Milvus non supporta direttamente la CPU Apple M1. Dopo Milvus 2.3, Milvus fornirà immagini Docker per l'architettura ARM64.

        Quali tipi di dati supporta Milvus per il campo chiave primaria?

        Nella versione attuale, Milvus supporta sia INT64 che stringhe.

        Milvus è scalabile?

        Sì. È possibile distribuire il cluster Milvus con più nodi tramite Helm Chart su Kubernetes. Per ulteriori istruzioni, consultare la Guida alla scalabilità.

        La query viene eseguita in memoria? Cosa sono i dati incrementali e i dati storici?

        Si. Quando arriva una richiesta di query, Milvus cerca sia i dati incrementali che i dati storici caricandoli in memoria. I dati incrementali si trovano nei segmenti in crescita, che vengono bufferizzati in memoria prima che raggiungano la soglia per essere persistiti nel motore di archiviazione, mentre i dati storici provengono dai segmenti sigillati che vengono archiviati nella memoria degli oggetti. I dati incrementali e i dati storici costituiscono l'intero set di dati da ricercare.

        diff --git a/localization/v2.4.x/site/it/getstarted/install_SDKs/install-java.json b/localization/v2.4.x/site/it/getstarted/install_SDKs/install-java.json index e20e9328c..c2f8f3df3 100644 --- a/localization/v2.4.x/site/it/getstarted/install_SDKs/install-java.json +++ b/localization/v2.4.x/site/it/getstarted/install_SDKs/install-java.json @@ -1 +1 @@ -{"codeList":["\n io.milvus\n milvus-sdk-java\n 2.4.3\n\n","implementation 'io.milvus:milvus-sdk-java:2.4.3'\n"],"headingContent":"","anchorList":[{"label":"Installare l'SDK Milvus Java","href":"Install-Milvus-Java-SDK","type":1,"isActive":false},{"label":"Requisiti","href":"Requirement","type":2,"isActive":false},{"label":"Installare l'SDK Milvus Java","href":"Install-Milvus-Java-SDK","type":2,"isActive":false},{"label":"Cosa succede dopo","href":"Whats-next","type":2,"isActive":false}]} \ No newline at end of file +{"codeList":["\n io.milvus\n milvus-sdk-java\n 2.4.5\n\n","implementation 'io.milvus:milvus-sdk-java:2.4.5'\n"],"headingContent":"Install Milvus Java SDK","anchorList":[{"label":"Installare l'SDK Milvus Java","href":"Install-Milvus-Java-SDK","type":1,"isActive":false},{"label":"Requisiti","href":"Requirement","type":2,"isActive":false},{"label":"Installare l'SDK Milvus Java","href":"Install-Milvus-Java-SDK","type":2,"isActive":false},{"label":"Cosa succede dopo","href":"Whats-next","type":2,"isActive":false}]} \ No newline at end of file diff --git a/localization/v2.4.x/site/it/getstarted/install_SDKs/install-java.md b/localization/v2.4.x/site/it/getstarted/install_SDKs/install-java.md index b4c75d2a6..6fe54099d 100644 --- a/localization/v2.4.x/site/it/getstarted/install_SDKs/install-java.md +++ b/localization/v2.4.x/site/it/getstarted/install_SDKs/install-java.md @@ -63,13 +63,13 @@ title: Installare l'SDK Milvus Java
        <dependency>
             <groupId>io.milvus</groupId>
             <artifactId>milvus-sdk-java</artifactId>
        -    <version>2.4.3</version>
        +    <version>2.4.5</version>
         </dependency>
         
        • Gradle/Grails
        -
        implementation 'io.milvus:milvus-sdk-java:2.4.3'
        +
        implementation 'io.milvus:milvus-sdk-java:2.4.5'
         

        Cosa succede dopo

        PyMilvus è disponibile nell'indice dei pacchetti Python.

        Si raccomanda di installare una versione di PyMilvus che corrisponda alla versione del server Milvus installato. Per ulteriori informazioni, vedere le Note di rilascio.
        -
        $ python3 -m pip install pymilvus==2.4.5
        +
        $ python3 -m pip install pymilvus==2.4.8
         

        Verifica dell'installazione

        Open In Colab

        +

        Open In Colab +GitHub Repository

        I vettori, il formato dei dati di output dei modelli di reti neurali, possono codificare efficacemente le informazioni e svolgere un ruolo fondamentale nelle applicazioni di IA come le basi di conoscenza, la ricerca semantica, la Retrieval Augmented Generation (RAG) e altro ancora.

        Milvus è un database vettoriale open-source che si adatta ad applicazioni di IA di ogni dimensione, dall'esecuzione di un chatbot dimostrativo in un notebook Jupyter alla costruzione di ricerche su scala web che servono miliardi di utenti. In questa guida vi spiegheremo come configurare Milvus localmente in pochi minuti e come utilizzare la libreria client Python per generare, memorizzare e cercare vettori.

        Installare Milvus

        Milvus fornisce un file di configurazione di Docker Compose nel repository Milvus. Per installare Milvus usando Docker Compose, basta eseguire il comando

        # Download the configuration file
        -$ wget https://github.com/milvus-io/milvus/releases/download/v2.4.9/milvus-standalone-docker-compose.yml -O docker-compose.yml
        +$ wget https://github.com/milvus-io/milvus/releases/download/v2.4.13-hotfix/milvus-standalone-docker-compose.yml -O docker-compose.yml
         
         # Start Milvus
        -$ sudo docker compose up -d
        +$ sudo docker-compose up -d
         
         Creating milvus-etcd  ... done
         Creating milvus-minio ... done
        @@ -81,7 +81,7 @@ Creating milvus-standalone ... done
         

    È possibile verificare se i contenitori sono attivi e funzionanti usando il seguente comando:

    -
    $ sudo docker compose ps
    +
    $ sudo docker-compose ps
     
           Name                     Command                  State                            Ports
     --------------------------------------------------------------------------------------------------------------------
    @@ -91,7 +91,7 @@ milvus-standalone   /tini -- milvus run standalone   Up             0.0.0.0:1953
     

    È possibile arrestare ed eliminare questo contenitore come segue

    # Stop Milvus
    -$ sudo docker compose down
    +$ sudo docker-compose down
     
     # Delete service data
     $ sudo rm -rf volumes
    diff --git a/localization/v2.4.x/site/it/getstarted/run-milvus-gpu/install_standalone-docker-compose-gpu.json b/localization/v2.4.x/site/it/getstarted/run-milvus-gpu/install_standalone-docker-compose-gpu.json
    index 832f969ca..2125e8166 100644
    --- a/localization/v2.4.x/site/it/getstarted/run-milvus-gpu/install_standalone-docker-compose-gpu.json
    +++ b/localization/v2.4.x/site/it/getstarted/run-milvus-gpu/install_standalone-docker-compose-gpu.json
    @@ -1 +1 @@
    -{"codeList":["$ wget https://github.com/milvus-io/milvus/releases/download/v2.4.9/milvus-standalone-docker-compose-gpu.yml -O docker-compose.yml\n","...\nstandalone:\n  ...\n  deploy:\n    resources:\n      reservations:\n        devices:\n          - driver: nvidia\n            capabilities: [\"gpu\"]\n            device_ids: [\"0\"]\n...\n","...\nstandalone:\n  ...\n  deploy:\n    resources:\n      reservations:\n        devices:\n          - driver: nvidia\n            capabilities: [\"gpu\"]\n            device_ids: ['0', '1']\n...\n","$ sudo docker compose up -d\n\nCreating milvus-etcd  ... done\nCreating milvus-minio ... done\nCreating milvus-standalone ... done\n","$ sudo docker compose ps\n\n      Name                     Command                  State                            Ports\n--------------------------------------------------------------------------------------------------------------------\nmilvus-etcd         etcd -advertise-client-url ...   Up             2379/tcp, 2380/tcp\nmilvus-minio        /usr/bin/docker-entrypoint ...   Up (healthy)   9000/tcp\nmilvus-standalone   /tini -- milvus run standalone   Up             0.0.0.0:19530->19530/tcp, 0.0.0.0:9091->9091/tcp\n","$ CUDA_VISIBLE_DEVICES=0 ./milvus run standalone\n","$ CUDA_VISIBLE_DEVICES=0,1 ./milvus run standalone\n","# Stop Milvus\n$ sudo docker compose down\n\n# Delete service data\n$ sudo rm -rf volumes\n","docker cp :/milvus/configs/milvus.yaml milvus.yaml\n","vim milvus.yaml\n","...\ngpu:\n  initMemSize: 0\n  maxMemSize: 0\n...\n","docker cp milvus.yaml :/milvus/configs/milvus.yaml\n","docker stop \ndocker start \n"],"headingContent":"","anchorList":[{"label":"Esecuzione di Milvus con supporto GPU tramite Docker Compose","href":"Run-Milvus-with-GPU-Support-Using-Docker-Compose","type":1,"isActive":false},{"label":"Prerequisiti","href":"Prerequisites","type":2,"isActive":false},{"label":"Installare Milvus","href":"Install-Milvus","type":2,"isActive":false},{"label":"Configurare il pool di memoria","href":"Configure-memory-pool","type":2,"isActive":false},{"label":"Cosa succede ora","href":"Whats-next","type":2,"isActive":false}]}
    \ No newline at end of file
    +{"codeList":["$ wget https://github.com/milvus-io/milvus/releases/download/v2.4.13-hotfix/milvus-standalone-docker-compose-gpu.yml -O docker-compose.yml\n","...\nstandalone:\n  ...\n  deploy:\n    resources:\n      reservations:\n        devices:\n          - driver: nvidia\n            capabilities: [\"gpu\"]\n            device_ids: [\"0\"]\n...\n","...\nstandalone:\n  ...\n  deploy:\n    resources:\n      reservations:\n        devices:\n          - driver: nvidia\n            capabilities: [\"gpu\"]\n            device_ids: ['0', '1']\n...\n","$ sudo docker compose up -d\n\nCreating milvus-etcd  ... done\nCreating milvus-minio ... done\nCreating milvus-standalone ... done\n","$ sudo docker compose ps\n\n      Name                     Command                  State                            Ports\n--------------------------------------------------------------------------------------------------------------------\nmilvus-etcd         etcd -advertise-client-url ...   Up             2379/tcp, 2380/tcp\nmilvus-minio        /usr/bin/docker-entrypoint ...   Up (healthy)   9000/tcp\nmilvus-standalone   /tini -- milvus run standalone   Up             0.0.0.0:19530->19530/tcp, 0.0.0.0:9091->9091/tcp\n","$ CUDA_VISIBLE_DEVICES=0 ./milvus run standalone\n","$ CUDA_VISIBLE_DEVICES=0,1 ./milvus run standalone\n","# Stop Milvus\n$ sudo docker compose down\n\n# Delete service data\n$ sudo rm -rf volumes\n","docker cp :/milvus/configs/milvus.yaml milvus.yaml\n","vim milvus.yaml\n","...\ngpu:\n  initMemSize: 0\n  maxMemSize: 0\n...\n","docker cp milvus.yaml :/milvus/configs/milvus.yaml\n","docker stop \ndocker start \n"],"headingContent":"Run Milvus with GPU Support Using Docker Compose","anchorList":[{"label":"Esecuzione di Milvus con supporto GPU tramite Docker Compose","href":"Run-Milvus-with-GPU-Support-Using-Docker-Compose","type":1,"isActive":false},{"label":"Prerequisiti","href":"Prerequisites","type":2,"isActive":false},{"label":"Installare Milvus","href":"Install-Milvus","type":2,"isActive":false},{"label":"Configurare il pool di memoria","href":"Configure-memory-pool","type":2,"isActive":false},{"label":"Cosa succede ora","href":"Whats-next","type":2,"isActive":false}]}
    \ No newline at end of file
    diff --git a/localization/v2.4.x/site/it/getstarted/run-milvus-gpu/install_standalone-docker-compose-gpu.md b/localization/v2.4.x/site/it/getstarted/run-milvus-gpu/install_standalone-docker-compose-gpu.md
    index a4f0c0021..1e6a9e33d 100644
    --- a/localization/v2.4.x/site/it/getstarted/run-milvus-gpu/install_standalone-docker-compose-gpu.md
    +++ b/localization/v2.4.x/site/it/getstarted/run-milvus-gpu/install_standalone-docker-compose-gpu.md
    @@ -3,7 +3,7 @@ id: install_standalone-docker-compose-gpu.md
     label: Standalone (Docker Compose)
     related_key: Kubernetes
     summary: Scoprite come installare il cluster Milvus su Kubernetes.
    -title: Eseguire Milvus con il supporto delle GPU usando Docker Compose
    +title: Esecuzione di Milvus con supporto GPU tramite Docker Compose
     ---
     

    Esecuzione di Milvus con supporto GPU tramite Docker Compose

    Per installare Milvus con supporto GPU usando Docker Compose, seguite i seguenti passi.

    -

    1. Scaricare e configurare il file YAML

    Scaricare milvus-standalone-docker-compose-gpu.yml e salvarlo come docker-compose.yml manualmente o con il seguente comando.

    -
    $ wget https://github.com/milvus-io/milvus/releases/download/v2.4.9/milvus-standalone-docker-compose-gpu.yml -O docker-compose.yml
    +

    1. Scaricare e configurare il file YAML

    Scaricare milvus-standalone-docker-compose-gpu.yml e salvarlo come docker-compose.yml manualmente o con il seguente comando.

    +
    $ wget https://github.com/milvus-io/milvus/releases/download/v2.4.13-hotfix/milvus-standalone-docker-compose-gpu.yml -O docker-compose.yml
     

    È necessario apportare alcune modifiche alle variabili d'ambiente del servizio standalone nel file YAML come segue:

      diff --git a/localization/v2.4.x/site/it/integrations/evaluation_with_deepeval.md b/localization/v2.4.x/site/it/integrations/evaluation_with_deepeval.md index 567955ed1..fe150ec69 100644 --- a/localization/v2.4.x/site/it/integrations/evaluation_with_deepeval.md +++ b/localization/v2.4.x/site/it/integrations/evaluation_with_deepeval.md @@ -20,7 +20,8 @@ title: Valutazione con DeepEval d="M4 9h1v1H4c-1.5 0-3-1.69-3-3.5S2.55 3 4 3h4c1.45 0 3 1.69 3 3.5 0 1.41-.91 2.72-2 3.25V8.59c.58-.45 1-1.27 1-2.09C10 5.22 8.98 4 8 4H4c-.98 0-2 1.22-2 2.5S3 9 4 9zm9-3h-1v1h1c1 0 2 1.22 2 2.5S13.98 12 13 12H9c-.98 0-2-1.22-2-2.5 0-.83.42-1.64 1-2.09V6.25c-1.09.53-2 1.84-2 3.25C6 11.31 7.55 13 9 13h4c1.45 0 3-1.69 3-3.5S14.5 6 13 6z" > -

      Open In Colab

      +

      Open In Colab +GitHub Repository

      Questa guida mostra come utilizzare DeepEval per valutare una pipeline Retrieval-Augmented Generation (RAG) costruita su Milvus.

      Il sistema RAG combina un sistema di recupero con un modello generativo per generare nuovo testo sulla base di un prompt dato. Il sistema recupera prima i documenti rilevanti da un corpus utilizzando Milvus e poi utilizza un modello generativo per generare nuovo testo sulla base dei documenti recuperati.

      DeepEval è un framework che aiuta a valutare le pipeline RAG. Esistono strumenti e framework che aiutano a costruire queste pipeline, ma valutarle e quantificarne le prestazioni può essere difficile. È qui che entra in gioco DeepEval.

      @@ -366,9 +367,9 @@ result = evaluate(
      /Users/eureka/miniconda3/envs/zilliz/lib/python3.9/site-packages/deepeval/__init__.py:49: UserWarning: You are using deepeval version 1.1.6, however version 1.2.2 is available. You should consider upgrading via the "pip install --upgrade deepeval" command.
         warnings.warn(
       
      -
      Stai utilizzando l'ultima metrica di precisione contestuale di DeepEval! (usando gpt-4o, strict=False, async_mode=True)...
      -
      ✨ Si sta eseguendo l'ultima metrica di richiamo contestuale di DeepEval! (usando gpt-4o, strict=False, async_mode=True)...
      -
      ✨ Stai eseguendo l'ultima metrica di rilevanza contestuale di DeepEval! (usando gpt-4o, strict=False, async_mode=True)...
      +
      Stai utilizzando l'ultima metrica di precisione contestuale di DeepEval! (usando gpt-4o, strict=False, async_mode=True)...
      +
      ✨ Si sta eseguendo l'ultima metrica di richiamo contestuale di DeepEval! (usando gpt-4o, strict=False, async_mode=True)...
      +
      ✨ Stai eseguendo l'ultima metrica di rilevanza contestuale di DeepEval! (usando gpt-4o, strict=False, async_mode=True)...
      Event loop is already running. Applying nest_asyncio patch to allow async execution...
       
       
      @@ -422,7 +423,7 @@ result = evaluate(
           print_results=False,  # Change to True to see detailed metric results
       )
       
      -
      Stai utilizzando l'ultima metrica di pertinenza delle risposte di DeepEval! (usando gpt-4o, strict=False, async_mode=True)...
      +
      Stai utilizzando l'ultima metrica di pertinenza delle risposte di DeepEval! (usando gpt-4o, strict=False, async_mode=True)...
      ✨ Stai eseguendo l'ultima metrica di fedeltà di DeepEval! (usando gpt-4o, strict=False, async_mode=True)...
      Event loop is already running. Applying nest_asyncio patch to allow async execution...
       
      diff --git a/localization/v2.4.x/site/it/integrations/evaluation_with_phoenix.md b/localization/v2.4.x/site/it/integrations/evaluation_with_phoenix.md
      index 7212a79c3..221cf337e 100644
      --- a/localization/v2.4.x/site/it/integrations/evaluation_with_phoenix.md
      +++ b/localization/v2.4.x/site/it/integrations/evaluation_with_phoenix.md
      @@ -20,7 +20,8 @@ title: Valutazione con Arize Pheonix
                 d="M4 9h1v1H4c-1.5 0-3-1.69-3-3.5S2.55 3 4 3h4c1.45 0 3 1.69 3 3.5 0 1.41-.91 2.72-2 3.25V8.59c.58-.45 1-1.27 1-2.09C10 5.22 8.98 4 8 4H4c-.98 0-2 1.22-2 2.5S3 9 4 9zm9-3h-1v1h1c1 0 2 1.22 2 2.5S13.98 12 13 12H9c-.98 0-2-1.22-2-2.5 0-.83.42-1.64 1-2.09V6.25c-1.09.53-2 1.84-2 3.25C6 11.31 7.55 13 9 13h4c1.45 0 3-1.69 3-3.5S14.5 6 13 6z"
               >
             
      -    

      Open In Colab

      +

      Open In Colab +GitHub Repository

      Questa guida mostra come utilizzare Arize Pheonix per valutare una pipeline Retrieval-Augmented Generation (RAG) costruita su Milvus.

      Il sistema RAG combina un sistema di recupero con un modello generativo per generare nuovo testo sulla base di un prompt dato. Il sistema recupera prima i documenti rilevanti da un corpus utilizzando Milvus e poi utilizza un modello generativo per generare nuovo testo sulla base dei documenti recuperati.

      Arize Pheonix è un framework che aiuta a valutare le pipeline RAG. Esistono strumenti e framework che aiutano a costruire queste pipeline, ma valutarle e quantificarne le prestazioni può essere difficile. È qui che entra in gioco Arize Pheonix.

      @@ -359,7 +360,7 @@ OpenAIInstrumentor().instrument()

      - Alt Text + Alt Text Testo Alt

      import nest_asyncio
       
      diff --git a/localization/v2.4.x/site/it/integrations/integrate_with_bentoml.json b/localization/v2.4.x/site/it/integrations/integrate_with_bentoml.json
      index 26f89e440..c7fae7f61 100644
      --- a/localization/v2.4.x/site/it/integrations/integrate_with_bentoml.json
      +++ b/localization/v2.4.x/site/it/integrations/integrate_with_bentoml.json
      @@ -1 +1 @@
      -{"codeList":["$ pip install -U pymilvus bentoml\n","import bentoml\n\nBENTO_EMBEDDING_MODEL_END_POINT = \"BENTO_EMBEDDING_MODEL_END_POINT\"\nBENTO_API_TOKEN = \"BENTO_API_TOKEN\"\n\nembedding_client = bentoml.SyncHTTPClient(\n    BENTO_EMBEDDING_MODEL_END_POINT, token=BENTO_API_TOKEN\n)\n","# naively chunk on newlines\ndef chunk_text(filename: str) -> list:\n    with open(filename, \"r\") as f:\n        text = f.read()\n    sentences = text.split(\"\\n\")\n    return sentences\n","import os\nimport requests\nimport urllib.request\n\n# set up the data source\nrepo = \"ytang07/bento_octo_milvus_RAG\"\ndirectory = \"data\"\nsave_dir = \"./city_data\"\napi_url = f\"https://api.github.com/repos/{repo}/contents/{directory}\"\n\n\nresponse = requests.get(api_url)\ndata = response.json()\n\nif not os.path.exists(save_dir):\n    os.makedirs(save_dir)\n\nfor item in data:\n    if item[\"type\"] == \"file\":\n        file_url = item[\"download_url\"]\n        file_path = os.path.join(save_dir, item[\"name\"])\n        urllib.request.urlretrieve(file_url, file_path)\n","# please upload your data directory under this file's folder\ncities = os.listdir(\"city_data\")\n# store chunked text for each of the cities in a list of dicts\ncity_chunks = []\nfor city in cities:\n    chunked = chunk_text(f\"city_data/{city}\")\n    cleaned = []\n    for chunk in chunked:\n        if len(chunk) > 7:\n            cleaned.append(chunk)\n    mapped = {\"city_name\": city.split(\".\")[0], \"chunks\": cleaned}\n    city_chunks.append(mapped)\n","def get_embeddings(texts: list) -> list:\n    if len(texts) > 25:\n        splits = [texts[x : x + 25] for x in range(0, len(texts), 25)]\n        embeddings = []\n        for split in splits:\n            embedding_split = embedding_client.encode(sentences=split)\n            embeddings += embedding_split\n        return embeddings\n    return embedding_client.encode(\n        sentences=texts,\n    )\n","entries = []\nfor city_dict in city_chunks:\n    # No need for the embeddings list if get_embeddings already returns a list of lists\n    embedding_list = get_embeddings(city_dict[\"chunks\"])  # returns a list of lists\n    # Now match texts with embeddings and city name\n    for i, embedding in enumerate(embedding_list):\n        entry = {\n            \"embedding\": embedding,\n            \"sentence\": city_dict[\"chunks\"][\n                i\n            ],  # Assume \"chunks\" has the corresponding texts for the embeddings\n            \"city\": city_dict[\"city_name\"],\n        }\n        entries.append(entry)\n    print(entries)\n","from pymilvus import MilvusClient\n\nCOLLECTION_NAME = \"Bento_Milvus_RAG\"  # random name for your collection\nDIMENSION = 384\n\n# Initialize a Milvus Lite client\nmilvus_client = MilvusClient(\"milvus_demo.db\")\n","from pymilvus import connections\n\nconnections.connect(uri=\"milvus_demo.db\")\n","from pymilvus import MilvusClient, DataType, Collection\n\n# Create schema\nschema = MilvusClient.create_schema(\n    auto_id=True,\n    enable_dynamic_field=True,\n)\n\n# 3.2. Add fields to schema\nschema.add_field(field_name=\"id\", datatype=DataType.INT64, is_primary=True)\nschema.add_field(field_name=\"embedding\", datatype=DataType.FLOAT_VECTOR, dim=DIMENSION)\n","# prepare index parameters\nindex_params = milvus_client.prepare_index_params()\n\n# add index\nindex_params.add_index(\n    field_name=\"embedding\",\n    index_type=\"AUTOINDEX\",  # use autoindex instead of other complex indexing method\n    metric_type=\"COSINE\",  # L2, COSINE, or IP\n)\n\n# create collection\nif milvus_client.has_collection(collection_name=COLLECTION_NAME):\n    milvus_client.drop_collection(collection_name=COLLECTION_NAME)\nmilvus_client.create_collection(\n    collection_name=COLLECTION_NAME, schema=schema, index_params=index_params\n)\n\n# Outside the loop, now you upsert all the entries at once\nmilvus_client.insert(collection_name=COLLECTION_NAME, data=entries)\n","BENTO_LLM_END_POINT = \"BENTO_LLM_END_POINT\"\n\nllm_client = bentoml.SyncHTTPClient(BENTO_LLM_END_POINT, token=BENTO_API_TOKEN)\n","def dorag(question: str, context: str):\n\n    prompt = (\n        f\"You are a helpful assistant. The user has a question. Answer the user question based only on the context: {context}. \\n\"\n        f\"The user question is {question}\"\n    )\n\n    results = llm_client.generate(\n        max_tokens=1024,\n        prompt=prompt,\n    )\n\n    res = \"\"\n    for result in results:\n        res += result\n\n    return res\n","question = \"What state is Cambridge in?\"\n\n\ndef ask_a_question(question):\n    embeddings = get_embeddings([question])\n    res = milvus_client.search(\n        collection_name=COLLECTION_NAME,\n        data=embeddings,  # search for the one (1) embedding returned as a list of lists\n        anns_field=\"embedding\",  # Search across embeddings\n        limit=5,  # get me the top 5 results\n        output_fields=[\"sentence\"],  # get the sentence/chunk and city\n    )\n\n    sentences = []\n    for hits in res:\n        for hit in hits:\n            print(hit)\n            sentences.append(hit[\"entity\"][\"sentence\"])\n    context = \". \".join(sentences)\n    return context\n\n\ncontext = ask_a_question(question=question)\nprint(context)\n","print(dorag(question=question, context=context))\n"],"headingContent":"","anchorList":[{"label":"Generazione Aumentata dal Recupero (RAG) con Milvus e BentoML","href":"Retrieval-Augmented-Generation-RAG-with-Milvus-and-BentoML","type":1,"isActive":false},{"label":"Introduzione","href":"Introduction","type":2,"isActive":false},{"label":"Prima di iniziare","href":"Before-you-begin","type":2,"isActive":false},{"label":"Servire gli embeddings con BentoML/BentoCloud","href":"Serving-Embeddings-with-BentoMLBentoCloud","type":2,"isActive":false},{"label":"Inserire i dati in un database vettoriale per recuperarli","href":"Inserting-Data-into-a-Vector-Database-for-Retrieval","type":2,"isActive":false},{"label":"Creazione della raccolta Milvus Lite","href":"Creating-Your-Milvus-Lite-Collection","type":2,"isActive":false},{"label":"Configurare l'LLM per RAG","href":"Set-up-Your-LLM-for-RAG","type":2,"isActive":false},{"label":"Istruzioni LLM","href":"LLM-Instructions","type":2,"isActive":false},{"label":"Un esempio di RAG","href":"A-RAG-Example","type":2,"isActive":false}]}
      \ No newline at end of file
      +{"codeList":["$ pip install -U pymilvus bentoml\n","import bentoml\n\nBENTO_EMBEDDING_MODEL_END_POINT = \"BENTO_EMBEDDING_MODEL_END_POINT\"\nBENTO_API_TOKEN = \"BENTO_API_TOKEN\"\n\nembedding_client = bentoml.SyncHTTPClient(\n    BENTO_EMBEDDING_MODEL_END_POINT, token=BENTO_API_TOKEN\n)\n","# naively chunk on newlines\ndef chunk_text(filename: str) -> list:\n    with open(filename, \"r\") as f:\n        text = f.read()\n    sentences = text.split(\"\\n\")\n    return sentences\n","import os\nimport requests\nimport urllib.request\n\n# set up the data source\nrepo = \"ytang07/bento_octo_milvus_RAG\"\ndirectory = \"data\"\nsave_dir = \"./city_data\"\napi_url = f\"https://api.github.com/repos/{repo}/contents/{directory}\"\n\n\nresponse = requests.get(api_url)\ndata = response.json()\n\nif not os.path.exists(save_dir):\n    os.makedirs(save_dir)\n\nfor item in data:\n    if item[\"type\"] == \"file\":\n        file_url = item[\"download_url\"]\n        file_path = os.path.join(save_dir, item[\"name\"])\n        urllib.request.urlretrieve(file_url, file_path)\n","# please upload your data directory under this file's folder\ncities = os.listdir(\"city_data\")\n# store chunked text for each of the cities in a list of dicts\ncity_chunks = []\nfor city in cities:\n    chunked = chunk_text(f\"city_data/{city}\")\n    cleaned = []\n    for chunk in chunked:\n        if len(chunk) > 7:\n            cleaned.append(chunk)\n    mapped = {\"city_name\": city.split(\".\")[0], \"chunks\": cleaned}\n    city_chunks.append(mapped)\n","def get_embeddings(texts: list) -> list:\n    if len(texts) > 25:\n        splits = [texts[x : x + 25] for x in range(0, len(texts), 25)]\n        embeddings = []\n        for split in splits:\n            embedding_split = embedding_client.encode(sentences=split)\n            embeddings += embedding_split\n        return embeddings\n    return embedding_client.encode(\n        sentences=texts,\n    )\n","entries = []\nfor city_dict in city_chunks:\n    # No need for the embeddings list if get_embeddings already returns a list of lists\n    embedding_list = get_embeddings(city_dict[\"chunks\"])  # returns a list of lists\n    # Now match texts with embeddings and city name\n    for i, embedding in enumerate(embedding_list):\n        entry = {\n            \"embedding\": embedding,\n            \"sentence\": city_dict[\"chunks\"][\n                i\n            ],  # Assume \"chunks\" has the corresponding texts for the embeddings\n            \"city\": city_dict[\"city_name\"],\n        }\n        entries.append(entry)\n    print(entries)\n","from pymilvus import MilvusClient\n\nCOLLECTION_NAME = \"Bento_Milvus_RAG\"  # random name for your collection\nDIMENSION = 384\n\n# Initialize a Milvus Lite client\nmilvus_client = MilvusClient(\"milvus_demo.db\")\n","from pymilvus import connections\n\nconnections.connect(uri=\"milvus_demo.db\")\n","from pymilvus import MilvusClient, DataType, Collection\n\n# Create schema\nschema = MilvusClient.create_schema(\n    auto_id=True,\n    enable_dynamic_field=True,\n)\n\n# 3.2. Add fields to schema\nschema.add_field(field_name=\"id\", datatype=DataType.INT64, is_primary=True)\nschema.add_field(field_name=\"embedding\", datatype=DataType.FLOAT_VECTOR, dim=DIMENSION)\n","# prepare index parameters\nindex_params = milvus_client.prepare_index_params()\n\n# add index\nindex_params.add_index(\n    field_name=\"embedding\",\n    index_type=\"AUTOINDEX\",  # use autoindex instead of other complex indexing method\n    metric_type=\"COSINE\",  # L2, COSINE, or IP\n)\n\n# create collection\nif milvus_client.has_collection(collection_name=COLLECTION_NAME):\n    milvus_client.drop_collection(collection_name=COLLECTION_NAME)\nmilvus_client.create_collection(\n    collection_name=COLLECTION_NAME, schema=schema, index_params=index_params\n)\n\n# Outside the loop, now you upsert all the entries at once\nmilvus_client.insert(collection_name=COLLECTION_NAME, data=entries)\n","BENTO_LLM_END_POINT = \"BENTO_LLM_END_POINT\"\n\nllm_client = bentoml.SyncHTTPClient(BENTO_LLM_END_POINT, token=BENTO_API_TOKEN)\n","def dorag(question: str, context: str):\n\n    prompt = (\n        f\"You are a helpful assistant. The user has a question. Answer the user question based only on the context: {context}. \\n\"\n        f\"The user question is {question}\"\n    )\n\n    results = llm_client.generate(\n        max_tokens=1024,\n        prompt=prompt,\n    )\n\n    res = \"\"\n    for result in results:\n        res += result\n\n    return res\n","question = \"What state is Cambridge in?\"\n\n\ndef ask_a_question(question):\n    embeddings = get_embeddings([question])\n    res = milvus_client.search(\n        collection_name=COLLECTION_NAME,\n        data=embeddings,  # search for the one (1) embedding returned as a list of lists\n        anns_field=\"embedding\",  # Search across embeddings\n        limit=5,  # get me the top 5 results\n        output_fields=[\"sentence\"],  # get the sentence/chunk and city\n    )\n\n    sentences = []\n    for hits in res:\n        for hit in hits:\n            print(hit)\n            sentences.append(hit[\"entity\"][\"sentence\"])\n    context = \". \".join(sentences)\n    return context\n\n\ncontext = ask_a_question(question=question)\nprint(context)\n","print(dorag(question=question, context=context))\n"],"headingContent":"Retrieval-Augmented Generation (RAG) with Milvus and BentoML","anchorList":[{"label":"Generazione Aumentata dal Recupero (RAG) con Milvus e BentoML","href":"Retrieval-Augmented-Generation-RAG-with-Milvus-and-BentoML","type":1,"isActive":false},{"label":"Introduzione","href":"Introduction","type":2,"isActive":false},{"label":"Prima di iniziare","href":"Before-you-begin","type":2,"isActive":false},{"label":"Servire gli embeddings con BentoML/BentoCloud","href":"Serving-Embeddings-with-BentoMLBentoCloud","type":2,"isActive":false},{"label":"Inserire i dati in un database vettoriale per recuperarli","href":"Inserting-Data-into-a-Vector-Database-for-Retrieval","type":2,"isActive":false},{"label":"Creazione della raccolta Milvus Lite","href":"Creating-Your-Milvus-Lite-Collection","type":2,"isActive":false},{"label":"Configurare l'LLM per RAG","href":"Set-up-Your-LLM-for-RAG","type":2,"isActive":false},{"label":"Istruzioni LLM","href":"LLM-Instructions","type":2,"isActive":false},{"label":"Un esempio di RAG","href":"A-RAG-Example","type":2,"isActive":false}]}
      \ No newline at end of file
      diff --git a/localization/v2.4.x/site/it/integrations/integrate_with_bentoml.md b/localization/v2.4.x/site/it/integrations/integrate_with_bentoml.md
      index 3ddb3976a..4956864ed 100644
      --- a/localization/v2.4.x/site/it/integrations/integrate_with_bentoml.md
      +++ b/localization/v2.4.x/site/it/integrations/integrate_with_bentoml.md
      @@ -21,7 +21,8 @@ title: Generazione Aumentata dal Recupero (RAG) con Milvus e BentoML
                 d="M4 9h1v1H4c-1.5 0-3-1.69-3-3.5S2.55 3 4 3h4c1.45 0 3 1.69 3 3.5 0 1.41-.91 2.72-2 3.25V8.59c.58-.45 1-1.27 1-2.09C10 5.22 8.98 4 8 4H4c-.98 0-2 1.22-2 2.5S3 9 4 9zm9-3h-1v1h1c1 0 2 1.22 2 2.5S13.98 12 13 12H9c-.98 0-2-1.22-2-2.5 0-.83.42-1.64 1-2.09V6.25c-1.09.53-2 1.84-2 3.25C6 11.31 7.55 13 9 13h4c1.45 0 3-1.69 3-3.5S14.5 6 13 6z"
               >
             
      -    

      Open In Colab

      +

      Open In Colab +GitHub Repository

      Introduzione

      Open In Colab

      +

      Open In Colab +GitHub Repository

      Questa guida mostra come costruire un sistema di Retrieval-Augmented Generation (RAG) utilizzando CAMEL e Milvus.

      Il sistema RAG combina un sistema di recupero con un modello generativo per generare nuovo testo in base a un prompt dato. Il sistema recupera prima i documenti rilevanti da un corpus utilizzando Milvus e poi utilizza un modello generativo per generare nuovo testo sulla base dei documenti recuperati.

      CAMEL è un framework multi-agente. Milvus è il database vettoriale open-source più avanzato al mondo, costruito per alimentare la ricerca di similarità di incorporamento e le applicazioni di intelligenza artificiale.

      diff --git a/localization/v2.4.x/site/it/integrations/integrate_with_dspy.json b/localization/v2.4.x/site/it/integrations/integrate_with_dspy.json index 2cd9a437b..c25fe1fcc 100644 --- a/localization/v2.4.x/site/it/integrations/integrate_with_dspy.json +++ b/localization/v2.4.x/site/it/integrations/integrate_with_dspy.json @@ -1 +1 @@ -{"codeList":["$ pip install \"dspy-ai[milvus]\"\n$ pip install -U pymilvus\n","from dspy.datasets import HotPotQA\n\n# Load the dataset.\ndataset = HotPotQA(\n train_seed=1, train_size=20, eval_seed=2023, dev_size=50, test_size=0\n)\n\n# Tell DSPy that the 'question' field is the input. Any other fields are labels and/or metadata.\ntrainset = [x.with_inputs(\"question\") for x in dataset.train]\ndevset = [x.with_inputs(\"question\") for x in dataset.dev]\n","import requests\nimport os\n\nos.environ[\"OPENAI_API_KEY\"] = \"\"\nMILVUS_URI = \"example.db\"\nMILVUS_TOKEN = \"\"\n\nfrom pymilvus import MilvusClient, DataType, Collection\nfrom dspy.retrieve.milvus_rm import openai_embedding_function\n\nclient = MilvusClient(uri=MILVUS_URI, token=MILVUS_TOKEN)\n\nif \"dspy_example\" not in client.list_collections():\n client.create_collection(\n collection_name=\"dspy_example\",\n overwrite=True,\n dimension=1536,\n primary_field_name=\"id\",\n vector_field_name=\"embedding\",\n id_type=\"int\",\n metric_type=\"IP\",\n max_length=65535,\n enable_dynamic=True,\n )\ntext = requests.get(\n \"https://raw.githubusercontent.com/wxywb/dspy_dataset_sample/master/sample_data.txt\"\n).text\n\nfor idx, passage in enumerate(text.split(\"\\n\")):\n if len(passage) == 0:\n continue\n client.insert(\n collection_name=\"dspy_example\",\n data=[\n {\n \"id\": idx,\n \"embedding\": openai_embedding_function(passage)[0],\n \"text\": passage,\n }\n ],\n )\n","from dspy.retrieve.milvus_rm import MilvusRM\nimport dspy\n\nretriever_model = MilvusRM(\n collection_name=\"dspy_example\",\n uri=MILVUS_URI,\n token=MILVUS_TOKEN, # ignore this if no token is required for Milvus connection\n embedding_function=openai_embedding_function,\n)\nturbo = dspy.OpenAI(model=\"gpt-3.5-turbo\")\ndspy.settings.configure(lm=turbo)\n","class GenerateAnswer(dspy.Signature):\n \"\"\"Answer questions with short factoid answers.\"\"\"\n\n context = dspy.InputField(desc=\"may contain relevant facts\")\n question = dspy.InputField()\n answer = dspy.OutputField(desc=\"often between 1 and 5 words\")\n","class RAG(dspy.Module):\n def __init__(self, rm):\n super().__init__()\n self.retrieve = rm\n\n # This signature indicates the task imposed on the COT module.\n self.generate_answer = dspy.ChainOfThought(GenerateAnswer)\n\n def forward(self, question):\n # Use milvus_rm to retrieve context for the question.\n context = self.retrieve(question).passages\n # COT module takes \"context, query\" and output \"answer\".\n prediction = self.generate_answer(context=context, question=question)\n return dspy.Prediction(\n context=[item.long_text for item in context], answer=prediction.answer\n )\n","rag = RAG(retriever_model)\nprint(rag(\"who write At My Window\").answer)\n","from dspy.evaluate.evaluate import Evaluate\nfrom dspy.datasets import HotPotQA\n\nevaluate_on_hotpotqa = Evaluate(\n devset=devset, num_threads=1, display_progress=False, display_table=5\n)\n\nmetric = dspy.evaluate.answer_exact_match\nscore = evaluate_on_hotpotqa(rag, metric=metric)\nprint(\"rag:\", score)\n","from dspy.teleprompt import BootstrapFewShot\n\n# Validation logic: check that the predicted answer is correct.# Also check that the retrieved context does contain that answer.\n\n\ndef validate_context_and_answer(example, pred, trace=None):\n answer_EM = dspy.evaluate.answer_exact_match(example, pred)\n answer_PM = dspy.evaluate.answer_passage_match(example, pred)\n return answer_EM and answer_PM\n\n\n# Set up a basic teleprompter, which will compile our RAG program.\nteleprompter = BootstrapFewShot(metric=validate_context_and_answer)\n\n# Compile!\ncompiled_rag = teleprompter.compile(rag, trainset=trainset)\n\n# Now compiled_rag is optimized and ready to answer your new question!\n# Now, let’s evaluate the compiled RAG program.\nscore = evaluate_on_hotpotqa(compiled_rag, metric=metric)\nprint(score)\nprint(\"compile_rag:\", score)\n"],"headingContent":"","anchorList":[{"label":"Integrare Milvus con DSPy","href":"Integrate-Milvus-with-DSPy","type":1,"isActive":false},{"label":"Cos'è DSPy","href":"What-is-DSPy","type":2,"isActive":false},{"label":"Vantaggi dell'uso di DSPy","href":"Benefits-of-using-DSPy","type":2,"isActive":false},{"label":"I moduli","href":"Modules","type":2,"isActive":false},{"label":"Perché Milvus in DSPy","href":"Why-Milvus-in-DSPy","type":2,"isActive":false},{"label":"Esempi","href":"Examples","type":2,"isActive":false},{"label":"Sintesi","href":"Summary","type":2,"isActive":false}]} \ No newline at end of file +{"codeList":["$ pip install \"dspy-ai[milvus]\"\n$ pip install -U pymilvus\n","from dspy.datasets import HotPotQA\n\n# Load the dataset.\ndataset = HotPotQA(\n train_seed=1, train_size=20, eval_seed=2023, dev_size=50, test_size=0\n)\n\n# Tell DSPy that the 'question' field is the input. Any other fields are labels and/or metadata.\ntrainset = [x.with_inputs(\"question\") for x in dataset.train]\ndevset = [x.with_inputs(\"question\") for x in dataset.dev]\n","import requests\nimport os\n\nos.environ[\"OPENAI_API_KEY\"] = \"\"\nMILVUS_URI = \"example.db\"\nMILVUS_TOKEN = \"\"\n\nfrom pymilvus import MilvusClient, DataType, Collection\nfrom dspy.retrieve.milvus_rm import openai_embedding_function\n\nclient = MilvusClient(uri=MILVUS_URI, token=MILVUS_TOKEN)\n\nif \"dspy_example\" not in client.list_collections():\n client.create_collection(\n collection_name=\"dspy_example\",\n overwrite=True,\n dimension=1536,\n primary_field_name=\"id\",\n vector_field_name=\"embedding\",\n id_type=\"int\",\n metric_type=\"IP\",\n max_length=65535,\n enable_dynamic=True,\n )\ntext = requests.get(\n \"https://raw.githubusercontent.com/wxywb/dspy_dataset_sample/master/sample_data.txt\"\n).text\n\nfor idx, passage in enumerate(text.split(\"\\n\")):\n if len(passage) == 0:\n continue\n client.insert(\n collection_name=\"dspy_example\",\n data=[\n {\n \"id\": idx,\n \"embedding\": openai_embedding_function(passage)[0],\n \"text\": passage,\n }\n ],\n )\n","from dspy.retrieve.milvus_rm import MilvusRM\nimport dspy\n\nretriever_model = MilvusRM(\n collection_name=\"dspy_example\",\n uri=MILVUS_URI,\n token=MILVUS_TOKEN, # ignore this if no token is required for Milvus connection\n embedding_function=openai_embedding_function,\n)\nturbo = dspy.OpenAI(model=\"gpt-3.5-turbo\")\ndspy.settings.configure(lm=turbo)\n","class GenerateAnswer(dspy.Signature):\n \"\"\"Answer questions with short factoid answers.\"\"\"\n\n context = dspy.InputField(desc=\"may contain relevant facts\")\n question = dspy.InputField()\n answer = dspy.OutputField(desc=\"often between 1 and 5 words\")\n","class RAG(dspy.Module):\n def __init__(self, rm):\n super().__init__()\n self.retrieve = rm\n\n # This signature indicates the task imposed on the COT module.\n self.generate_answer = dspy.ChainOfThought(GenerateAnswer)\n\n def forward(self, question):\n # Use milvus_rm to retrieve context for the question.\n context = self.retrieve(question).passages\n # COT module takes \"context, query\" and output \"answer\".\n prediction = self.generate_answer(context=context, question=question)\n return dspy.Prediction(\n context=[item.long_text for item in context], answer=prediction.answer\n )\n","rag = RAG(retriever_model)\nprint(rag(\"who write At My Window\").answer)\n","from dspy.evaluate.evaluate import Evaluate\nfrom dspy.datasets import HotPotQA\n\nevaluate_on_hotpotqa = Evaluate(\n devset=devset, num_threads=1, display_progress=False, display_table=5\n)\n\nmetric = dspy.evaluate.answer_exact_match\nscore = evaluate_on_hotpotqa(rag, metric=metric)\nprint(\"rag:\", score)\n","from dspy.teleprompt import BootstrapFewShot\n\n# Validation logic: check that the predicted answer is correct.# Also check that the retrieved context does contain that answer.\n\n\ndef validate_context_and_answer(example, pred, trace=None):\n answer_EM = dspy.evaluate.answer_exact_match(example, pred)\n answer_PM = dspy.evaluate.answer_passage_match(example, pred)\n return answer_EM and answer_PM\n\n\n# Set up a basic teleprompter, which will compile our RAG program.\nteleprompter = BootstrapFewShot(metric=validate_context_and_answer)\n\n# Compile!\ncompiled_rag = teleprompter.compile(rag, trainset=trainset)\n\n# Now compiled_rag is optimized and ready to answer your new question!\n# Now, let’s evaluate the compiled RAG program.\nscore = evaluate_on_hotpotqa(compiled_rag, metric=metric)\nprint(score)\nprint(\"compile_rag:\", score)\n"],"headingContent":"Integrate Milvus with DSPy","anchorList":[{"label":"Integrare Milvus con DSPy","href":"Integrate-Milvus-with-DSPy","type":1,"isActive":false},{"label":"Cos'è DSPy","href":"What-is-DSPy","type":2,"isActive":false},{"label":"Vantaggi dell'uso di DSPy","href":"Benefits-of-using-DSPy","type":2,"isActive":false},{"label":"I moduli","href":"Modules","type":2,"isActive":false},{"label":"Perché Milvus in DSPy","href":"Why-Milvus-in-DSPy","type":2,"isActive":false},{"label":"Esempi","href":"Examples","type":2,"isActive":false},{"label":"Sintesi","href":"Summary","type":2,"isActive":false}]} \ No newline at end of file diff --git a/localization/v2.4.x/site/it/integrations/integrate_with_dspy.md b/localization/v2.4.x/site/it/integrations/integrate_with_dspy.md index d171f1c5c..6ec5adaef 100644 --- a/localization/v2.4.x/site/it/integrations/integrate_with_dspy.md +++ b/localization/v2.4.x/site/it/integrations/integrate_with_dspy.md @@ -20,7 +20,8 @@ title: Integrare Milvus con DSPy d="M4 9h1v1H4c-1.5 0-3-1.69-3-3.5S2.55 3 4 3h4c1.45 0 3 1.69 3 3.5 0 1.41-.91 2.72-2 3.25V8.59c.58-.45 1-1.27 1-2.09C10 5.22 8.98 4 8 4H4c-.98 0-2 1.22-2 2.5S3 9 4 9zm9-3h-1v1h1c1 0 2 1.22 2 2.5S13.98 12 13 12H9c-.98 0-2-1.22-2-2.5 0-.83.42-1.64 1-2.09V6.25c-1.09.53-2 1.84-2 3.25C6 11.31 7.55 13 9 13h4c1.45 0 3-1.69 3-3.5S14.5 6 13 6z" > -

      Open In Colab

      +

      Open In Colab +GitHub Repository

      Cos'è DSPy

      DSPy, introdotto dallo Stanford NLP Group, è un framework programmatico innovativo progettato per ottimizzare i prompt e i pesi all'interno dei modelli linguistici, particolarmente utile in scenari in cui i modelli linguistici di grandi dimensioni (LLM) sono integrati in più fasi di una pipeline. A differenza delle tecniche convenzionali di ingegneria dei prompt, che si basano sulla creazione e la modifica manuale, DSPy adotta un approccio basato sull'apprendimento. Assimilando esempi di domande e risposte, DSPy genera dinamicamente prompt ottimizzati, adatti a compiti specifici. Questa metodologia innovativa consente di riassemblare senza problemi intere pipeline, eliminando la necessità di continui aggiustamenti manuali dei prompt. La sintassi pitonica di DSPy offre vari moduli componibili e dichiarativi, semplificando l'istruzione degli LLM.

      +

      DSPy, introdotto dallo Stanford NLP Group, è un framework programmatico innovativo progettato per ottimizzare i prompt e i pesi all'interno dei modelli linguistici, particolarmente utile in scenari in cui i modelli linguistici di grandi dimensioni (LLM) sono integrati in più fasi di una pipeline. A differenza delle tecniche convenzionali di ingegneria dei prompt, che si affidano alla creazione e alla modifica manuale, DSPy adotta un approccio basato sull'apprendimento. Assimilando esempi di domande e risposte, DSPy genera dinamicamente prompt ottimizzati, adatti a compiti specifici. Questa metodologia innovativa consente di riassemblare senza problemi intere pipeline, eliminando la necessità di continui aggiustamenti manuali dei prompt. La sintassi pitonica di DSPy offre vari moduli componibili e dichiarativi, semplificando l'istruzione degli LLM.

      Vantaggi dell'uso di DSPy

      Ottimizzazione della pipeline

      Dopo aver definito il programma, il passo successivo è la compilazione. Questo processo aggiorna i parametri di ogni modulo per migliorare le prestazioni. Il processo di compilazione dipende da tre fattori critici:

        -
      • Set di allenamento: Per questa dimostrazione utilizzeremo i 20 esempi di domande e risposte del nostro set di dati di addestramento.
      • +
      • Set di allenamento: Per questa dimostrazione utilizzeremo i 20 esempi di domande e risposte del nostro set di dati di allenamento.
      • Metrica di convalida: Stabiliamo una semplice metrica validate_context_and_answer. Questa metrica verifica l'accuratezza della risposta prevista e assicura che il contesto recuperato includa la risposta.
      • -
      • Ottimizzatore specifico (Teleprompter): Il compilatore di DSPy incorpora diversi telepromptori progettati per ottimizzare efficacemente i programmi.
      • +
      • Ottimizzatore specifico (Teleprompter): Il compilatore di DSPy incorpora più telepromptori progettati per ottimizzare efficacemente i programmi.
      from dspy.teleprompt import BootstrapFewShot
       
      diff --git a/localization/v2.4.x/site/it/integrations/integrate_with_haystack.json b/localization/v2.4.x/site/it/integrations/integrate_with_haystack.json
      index f0755c35a..d825b3b88 100644
      --- a/localization/v2.4.x/site/it/integrations/integrate_with_haystack.json
      +++ b/localization/v2.4.x/site/it/integrations/integrate_with_haystack.json
      @@ -1 +1 @@
      -{"codeList":["! pip install --upgrade --quiet pymilvus milvus-haystack markdown-it-py mdit_plain\n","import os\n\nos.environ[\"OPENAI_API_KEY\"] = \"sk-***********\"\n","import os\nimport urllib.request\n\nurl = \"https://www.gutenberg.org/cache/epub/7785/pg7785.txt\"\nfile_path = \"./davinci.txt\"\n\nif not os.path.exists(file_path):\n    urllib.request.urlretrieve(url, file_path)\n","from haystack import Pipeline\nfrom haystack.components.converters import MarkdownToDocument\nfrom haystack.components.embedders import OpenAIDocumentEmbedder, OpenAITextEmbedder\nfrom haystack.components.preprocessors import DocumentSplitter\nfrom haystack.components.writers import DocumentWriter\nfrom haystack.utils import Secret\n\nfrom milvus_haystack import MilvusDocumentStore\nfrom milvus_haystack.milvus_embedding_retriever import MilvusEmbeddingRetriever\n\n\ndocument_store = MilvusDocumentStore(\n    connection_args={\"uri\": \"./milvus.db\"},\n    # connection_args={\"uri\": \"http://localhost:19530\"},\n    # connection_args={\"uri\": YOUR_ZILLIZ_CLOUD_URI, \"token\": Secret.from_env_var(\"ZILLIZ_CLOUD_API_KEY\")},\n    drop_old=True,\n)\n","indexing_pipeline = Pipeline()\nindexing_pipeline.add_component(\"converter\", MarkdownToDocument())\nindexing_pipeline.add_component(\n    \"splitter\", DocumentSplitter(split_by=\"sentence\", split_length=2)\n)\nindexing_pipeline.add_component(\"embedder\", OpenAIDocumentEmbedder())\nindexing_pipeline.add_component(\"writer\", DocumentWriter(document_store))\nindexing_pipeline.connect(\"converter\", \"splitter\")\nindexing_pipeline.connect(\"splitter\", \"embedder\")\nindexing_pipeline.connect(\"embedder\", \"writer\")\nindexing_pipeline.run({\"converter\": {\"sources\": [file_path]}})\n\nprint(\"Number of documents:\", document_store.count_documents())\n","question = 'Where is the painting \"Warrior\" currently stored?'\n\nretrieval_pipeline = Pipeline()\nretrieval_pipeline.add_component(\"embedder\", OpenAITextEmbedder())\nretrieval_pipeline.add_component(\n    \"retriever\", MilvusEmbeddingRetriever(document_store=document_store, top_k=3)\n)\nretrieval_pipeline.connect(\"embedder\", \"retriever\")\n\nretrieval_results = retrieval_pipeline.run({\"embedder\": {\"text\": question}})\n\nfor doc in retrieval_results[\"retriever\"][\"documents\"]:\n    print(doc.content)\n    print(\"-\" * 10)\n","from haystack.utils import Secret\nfrom haystack.components.builders import PromptBuilder\nfrom haystack.components.generators import OpenAIGenerator\n\nprompt_template = \"\"\"Answer the following query based on the provided context. If the context does\n                     not include an answer, reply with 'I don't know'.\\n\n                     Query: {{query}}\n                     Documents:\n                     {% for doc in documents %}\n                        {{ doc.content }}\n                     {% endfor %}\n                     Answer:\n                  \"\"\"\n\nrag_pipeline = Pipeline()\nrag_pipeline.add_component(\"text_embedder\", OpenAITextEmbedder())\nrag_pipeline.add_component(\n    \"retriever\", MilvusEmbeddingRetriever(document_store=document_store, top_k=3)\n)\nrag_pipeline.add_component(\"prompt_builder\", PromptBuilder(template=prompt_template))\nrag_pipeline.add_component(\n    \"generator\",\n    OpenAIGenerator(\n        api_key=Secret.from_token(os.getenv(\"OPENAI_API_KEY\")),\n        generation_kwargs={\"temperature\": 0},\n    ),\n)\nrag_pipeline.connect(\"text_embedder.embedding\", \"retriever.query_embedding\")\nrag_pipeline.connect(\"retriever.documents\", \"prompt_builder.documents\")\nrag_pipeline.connect(\"prompt_builder\", \"generator\")\n\nresults = rag_pipeline.run(\n    {\n        \"text_embedder\": {\"text\": question},\n        \"prompt_builder\": {\"query\": question},\n    }\n)\nprint(\"RAG answer:\", results[\"generator\"][\"replies\"][0])\n"],"headingContent":"","anchorList":[{"label":"Generazione Aumentata dal Recupero (RAG) con Milvus e Haystack","href":"Retrieval-Augmented-Generation-RAG-with-Milvus-and-Haystack","type":1,"isActive":false},{"label":"Prerequisiti","href":"Prerequisites","type":2,"isActive":false},{"label":"Preparare i dati","href":"Prepare-the-data","type":2,"isActive":false},{"label":"Creare la pipeline di indicizzazione","href":"Create-the-indexing-Pipeline","type":2,"isActive":false},{"label":"Creare la pipeline di recupero","href":"Create-the-retrieval-pipeline","type":2,"isActive":false},{"label":"Creare la pipeline RAG","href":"Create-the-RAG-pipeline","type":2,"isActive":false}]}
      \ No newline at end of file
      +{"codeList":["! pip install --upgrade --quiet pymilvus milvus-haystack markdown-it-py mdit_plain\n","import os\n\nos.environ[\"OPENAI_API_KEY\"] = \"sk-***********\"\n","import os\nimport urllib.request\n\nurl = \"https://www.gutenberg.org/cache/epub/7785/pg7785.txt\"\nfile_path = \"./davinci.txt\"\n\nif not os.path.exists(file_path):\n    urllib.request.urlretrieve(url, file_path)\n","from haystack import Pipeline\nfrom haystack.components.converters import MarkdownToDocument\nfrom haystack.components.embedders import OpenAIDocumentEmbedder, OpenAITextEmbedder\nfrom haystack.components.preprocessors import DocumentSplitter\nfrom haystack.components.writers import DocumentWriter\nfrom haystack.utils import Secret\n\nfrom milvus_haystack import MilvusDocumentStore\nfrom milvus_haystack.milvus_embedding_retriever import MilvusEmbeddingRetriever\n\n\ndocument_store = MilvusDocumentStore(\n    connection_args={\"uri\": \"./milvus.db\"},\n    # connection_args={\"uri\": \"http://localhost:19530\"},\n    # connection_args={\"uri\": YOUR_ZILLIZ_CLOUD_URI, \"token\": Secret.from_env_var(\"ZILLIZ_CLOUD_API_KEY\")},\n    drop_old=True,\n)\n","indexing_pipeline = Pipeline()\nindexing_pipeline.add_component(\"converter\", MarkdownToDocument())\nindexing_pipeline.add_component(\n    \"splitter\", DocumentSplitter(split_by=\"sentence\", split_length=2)\n)\nindexing_pipeline.add_component(\"embedder\", OpenAIDocumentEmbedder())\nindexing_pipeline.add_component(\"writer\", DocumentWriter(document_store))\nindexing_pipeline.connect(\"converter\", \"splitter\")\nindexing_pipeline.connect(\"splitter\", \"embedder\")\nindexing_pipeline.connect(\"embedder\", \"writer\")\nindexing_pipeline.run({\"converter\": {\"sources\": [file_path]}})\n\nprint(\"Number of documents:\", document_store.count_documents())\n","question = 'Where is the painting \"Warrior\" currently stored?'\n\nretrieval_pipeline = Pipeline()\nretrieval_pipeline.add_component(\"embedder\", OpenAITextEmbedder())\nretrieval_pipeline.add_component(\n    \"retriever\", MilvusEmbeddingRetriever(document_store=document_store, top_k=3)\n)\nretrieval_pipeline.connect(\"embedder\", \"retriever\")\n\nretrieval_results = retrieval_pipeline.run({\"embedder\": {\"text\": question}})\n\nfor doc in retrieval_results[\"retriever\"][\"documents\"]:\n    print(doc.content)\n    print(\"-\" * 10)\n","from haystack.utils import Secret\nfrom haystack.components.builders import PromptBuilder\nfrom haystack.components.generators import OpenAIGenerator\n\nprompt_template = \"\"\"Answer the following query based on the provided context. If the context does\n                     not include an answer, reply with 'I don't know'.\\n\n                     Query: {{query}}\n                     Documents:\n                     {% for doc in documents %}\n                        {{ doc.content }}\n                     {% endfor %}\n                     Answer:\n                  \"\"\"\n\nrag_pipeline = Pipeline()\nrag_pipeline.add_component(\"text_embedder\", OpenAITextEmbedder())\nrag_pipeline.add_component(\n    \"retriever\", MilvusEmbeddingRetriever(document_store=document_store, top_k=3)\n)\nrag_pipeline.add_component(\"prompt_builder\", PromptBuilder(template=prompt_template))\nrag_pipeline.add_component(\n    \"generator\",\n    OpenAIGenerator(\n        api_key=Secret.from_token(os.getenv(\"OPENAI_API_KEY\")),\n        generation_kwargs={\"temperature\": 0},\n    ),\n)\nrag_pipeline.connect(\"text_embedder.embedding\", \"retriever.query_embedding\")\nrag_pipeline.connect(\"retriever.documents\", \"prompt_builder.documents\")\nrag_pipeline.connect(\"prompt_builder\", \"generator\")\n\nresults = rag_pipeline.run(\n    {\n        \"text_embedder\": {\"text\": question},\n        \"prompt_builder\": {\"query\": question},\n    }\n)\nprint(\"RAG answer:\", results[\"generator\"][\"replies\"][0])\n"],"headingContent":"Retrieval-Augmented Generation (RAG) with Milvus and Haystack","anchorList":[{"label":"Generazione Aumentata dal Recupero (RAG) con Milvus e Haystack","href":"Retrieval-Augmented-Generation-RAG-with-Milvus-and-Haystack","type":1,"isActive":false},{"label":"Prerequisiti","href":"Prerequisites","type":2,"isActive":false},{"label":"Preparare i dati","href":"Prepare-the-data","type":2,"isActive":false},{"label":"Creare la pipeline di indicizzazione","href":"Create-the-indexing-Pipeline","type":2,"isActive":false},{"label":"Creare la pipeline di recupero","href":"Create-the-retrieval-pipeline","type":2,"isActive":false},{"label":"Creare la pipeline RAG","href":"Create-the-RAG-pipeline","type":2,"isActive":false}]}
      \ No newline at end of file
      diff --git a/localization/v2.4.x/site/it/integrations/integrate_with_haystack.md b/localization/v2.4.x/site/it/integrations/integrate_with_haystack.md
      index 813a63ff0..081307c5e 100644
      --- a/localization/v2.4.x/site/it/integrations/integrate_with_haystack.md
      +++ b/localization/v2.4.x/site/it/integrations/integrate_with_haystack.md
      @@ -20,7 +20,8 @@ title: Generazione Aumentata dal Recupero (RAG) con Milvus e Haystack
                 d="M4 9h1v1H4c-1.5 0-3-1.69-3-3.5S2.55 3 4 3h4c1.45 0 3 1.69 3 3.5 0 1.41-.91 2.72-2 3.25V8.59c.58-.45 1-1.27 1-2.09C10 5.22 8.98 4 8 4H4c-.98 0-2 1.22-2 2.5S3 9 4 9zm9-3h-1v1h1c1 0 2 1.22 2 2.5S13.98 12 13 12H9c-.98 0-2-1.22-2-2.5 0-.83.42-1.64 1-2.09V6.25c-1.09.53-2 1.84-2 3.25C6 11.31 7.55 13 9 13h4c1.45 0 3-1.69 3-3.5S14.5 6 13 6z"
               >
             
      -    

      Open In Colab

      +

      Open In Colab +GitHub Repository

      Questa guida mostra come costruire un sistema di Retrieval-Augmented Generation (RAG) utilizzando Haystack e Milvus.

      Il sistema RAG combina un sistema di recupero con un modello generativo per generare nuovo testo sulla base di un prompt dato. Il sistema recupera prima i documenti rilevanti da un corpus utilizzando Milvus e poi utilizza un modello generativo per generare nuovo testo sulla base dei documenti recuperati.

      Haystack è il framework Python open source di deepset per la creazione di applicazioni personalizzate con modelli linguistici di grandi dimensioni (LLM). Milvus è il database vettoriale open source più avanzato al mondo, costruito per alimentare le applicazioni di ricerca di similarità e di intelligenza artificiale.

      diff --git a/localization/v2.4.x/site/it/integrations/integrate_with_hugging-face.json b/localization/v2.4.x/site/it/integrations/integrate_with_hugging-face.json index 3853735d3..34705ea17 100644 --- a/localization/v2.4.x/site/it/integrations/integrate_with_hugging-face.json +++ b/localization/v2.4.x/site/it/integrations/integrate_with_hugging-face.json @@ -1 +1 @@ -{"codeList":["$ pip install --upgrade pymilvus transformers datasets torch\n","from datasets import load_dataset\n\n\nDATASET = \"squad\" # Name of dataset from HuggingFace Datasets\nINSERT_RATIO = 0.001 # Ratio of example dataset to be inserted\n\ndata = load_dataset(DATASET, split=\"validation\")\n# Generates a fixed subset. To generate a random subset, remove the seed.\ndata = data.train_test_split(test_size=INSERT_RATIO, seed=42)[\"test\"]\n# Clean up the data structure in the dataset.\ndata = data.map(\n lambda val: {\"answer\": val[\"answers\"][\"text\"][0]},\n remove_columns=[\"id\", \"answers\", \"context\"],\n)\n\n# View summary of example data\nprint(data)\n","from transformers import AutoTokenizer, AutoModel\nimport torch\n\nMODEL = (\n \"sentence-transformers/all-MiniLM-L6-v2\" # Name of model from HuggingFace Models\n)\nINFERENCE_BATCH_SIZE = 64 # Batch size of model inference\n\n# Load tokenizer & model from HuggingFace Hub\ntokenizer = AutoTokenizer.from_pretrained(MODEL)\nmodel = AutoModel.from_pretrained(MODEL)\n\n\ndef encode_text(batch):\n # Tokenize sentences\n encoded_input = tokenizer(\n batch[\"question\"], padding=True, truncation=True, return_tensors=\"pt\"\n )\n\n # Compute token embeddings\n with torch.no_grad():\n model_output = model(**encoded_input)\n\n # Perform pooling\n token_embeddings = model_output[0]\n attention_mask = encoded_input[\"attention_mask\"]\n input_mask_expanded = (\n attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float()\n )\n sentence_embeddings = torch.sum(\n token_embeddings * input_mask_expanded, 1\n ) / torch.clamp(input_mask_expanded.sum(1), min=1e-9)\n\n # Normalize embeddings\n batch[\"question_embedding\"] = torch.nn.functional.normalize(\n sentence_embeddings, p=2, dim=1\n )\n return batch\n\n\ndata = data.map(encode_text, batched=True, batch_size=INFERENCE_BATCH_SIZE)\ndata_list = data.to_list()\n","from pymilvus import MilvusClient\n\n\nMILVUS_URI = \"./huggingface_milvus_test.db\" # Connection URI\nCOLLECTION_NAME = \"huggingface_test\" # Collection name\nDIMENSION = 384 # Embedding dimension depending on model\n\nmilvus_client = MilvusClient(MILVUS_URI)\nif milvus_client.has_collection(collection_name=COLLECTION_NAME):\n milvus_client.drop_collection(collection_name=COLLECTION_NAME)\nmilvus_client.create_collection(\n collection_name=COLLECTION_NAME,\n dimension=DIMENSION,\n auto_id=True, # Enable auto id\n enable_dynamic_field=True, # Enable dynamic fields\n vector_field_name=\"question_embedding\", # Map vector field name and embedding column in dataset\n consistency_level=\"Strong\", # To enable search with latest data\n)\n","milvus_client.insert(collection_name=COLLECTION_NAME, data=data_list)\n","questions = {\n \"question\": [\n \"What is LGM?\",\n \"When did Massachusetts first mandate that children be educated in schools?\",\n ]\n}\n\n# Generate question embeddings\nquestion_embeddings = [v.tolist() for v in encode_text(questions)[\"question_embedding\"]]\n\n# Search across Milvus\nsearch_results = milvus_client.search(\n collection_name=COLLECTION_NAME,\n data=question_embeddings,\n limit=3, # How many search results to output\n output_fields=[\"answer\", \"question\"], # Include these fields in search results\n)\n\n# Print out results\nfor q, res in zip(questions[\"question\"], search_results):\n print(\"Question:\", q)\n for r in res:\n print(\n {\n \"answer\": r[\"entity\"][\"answer\"],\n \"score\": r[\"distance\"],\n \"original question\": r[\"entity\"][\"question\"],\n }\n )\n print(\"\\n\")\n"],"headingContent":"","anchorList":[{"label":"Risposta alle domande con Milvus e Hugging Face","href":"Question-Answering-Using-Milvus-and-Hugging-Face","type":1,"isActive":false},{"label":"Prima di iniziare","href":"Before-you-begin","type":2,"isActive":false},{"label":"Preparare i dati","href":"Prepare-data","type":2,"isActive":false},{"label":"Inserire i dati","href":"Insert-data","type":2,"isActive":false},{"label":"Porre domande","href":"Ask-questions","type":2,"isActive":false}]} \ No newline at end of file +{"codeList":["$ pip install --upgrade pymilvus transformers datasets torch\n","from datasets import load_dataset\n\n\nDATASET = \"squad\" # Name of dataset from HuggingFace Datasets\nINSERT_RATIO = 0.001 # Ratio of example dataset to be inserted\n\ndata = load_dataset(DATASET, split=\"validation\")\n# Generates a fixed subset. To generate a random subset, remove the seed.\ndata = data.train_test_split(test_size=INSERT_RATIO, seed=42)[\"test\"]\n# Clean up the data structure in the dataset.\ndata = data.map(\n lambda val: {\"answer\": val[\"answers\"][\"text\"][0]},\n remove_columns=[\"id\", \"answers\", \"context\"],\n)\n\n# View summary of example data\nprint(data)\n","from transformers import AutoTokenizer, AutoModel\nimport torch\n\nMODEL = (\n \"sentence-transformers/all-MiniLM-L6-v2\" # Name of model from HuggingFace Models\n)\nINFERENCE_BATCH_SIZE = 64 # Batch size of model inference\n\n# Load tokenizer & model from HuggingFace Hub\ntokenizer = AutoTokenizer.from_pretrained(MODEL)\nmodel = AutoModel.from_pretrained(MODEL)\n\n\ndef encode_text(batch):\n # Tokenize sentences\n encoded_input = tokenizer(\n batch[\"question\"], padding=True, truncation=True, return_tensors=\"pt\"\n )\n\n # Compute token embeddings\n with torch.no_grad():\n model_output = model(**encoded_input)\n\n # Perform pooling\n token_embeddings = model_output[0]\n attention_mask = encoded_input[\"attention_mask\"]\n input_mask_expanded = (\n attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float()\n )\n sentence_embeddings = torch.sum(\n token_embeddings * input_mask_expanded, 1\n ) / torch.clamp(input_mask_expanded.sum(1), min=1e-9)\n\n # Normalize embeddings\n batch[\"question_embedding\"] = torch.nn.functional.normalize(\n sentence_embeddings, p=2, dim=1\n )\n return batch\n\n\ndata = data.map(encode_text, batched=True, batch_size=INFERENCE_BATCH_SIZE)\ndata_list = data.to_list()\n","from pymilvus import MilvusClient\n\n\nMILVUS_URI = \"./huggingface_milvus_test.db\" # Connection URI\nCOLLECTION_NAME = \"huggingface_test\" # Collection name\nDIMENSION = 384 # Embedding dimension depending on model\n\nmilvus_client = MilvusClient(MILVUS_URI)\nif milvus_client.has_collection(collection_name=COLLECTION_NAME):\n milvus_client.drop_collection(collection_name=COLLECTION_NAME)\nmilvus_client.create_collection(\n collection_name=COLLECTION_NAME,\n dimension=DIMENSION,\n auto_id=True, # Enable auto id\n enable_dynamic_field=True, # Enable dynamic fields\n vector_field_name=\"question_embedding\", # Map vector field name and embedding column in dataset\n consistency_level=\"Strong\", # To enable search with latest data\n)\n","milvus_client.insert(collection_name=COLLECTION_NAME, data=data_list)\n","questions = {\n \"question\": [\n \"What is LGM?\",\n \"When did Massachusetts first mandate that children be educated in schools?\",\n ]\n}\n\n# Generate question embeddings\nquestion_embeddings = [v.tolist() for v in encode_text(questions)[\"question_embedding\"]]\n\n# Search across Milvus\nsearch_results = milvus_client.search(\n collection_name=COLLECTION_NAME,\n data=question_embeddings,\n limit=3, # How many search results to output\n output_fields=[\"answer\", \"question\"], # Include these fields in search results\n)\n\n# Print out results\nfor q, res in zip(questions[\"question\"], search_results):\n print(\"Question:\", q)\n for r in res:\n print(\n {\n \"answer\": r[\"entity\"][\"answer\"],\n \"score\": r[\"distance\"],\n \"original question\": r[\"entity\"][\"question\"],\n }\n )\n print(\"\\n\")\n"],"headingContent":"Question Answering Using Milvus and Hugging Face","anchorList":[{"label":"Risposta alle domande con Milvus e Hugging Face","href":"Question-Answering-Using-Milvus-and-Hugging-Face","type":1,"isActive":false},{"label":"Prima di iniziare","href":"Before-you-begin","type":2,"isActive":false},{"label":"Preparare i dati","href":"Prepare-data","type":2,"isActive":false},{"label":"Inserire i dati","href":"Insert-data","type":2,"isActive":false},{"label":"Porre domande","href":"Ask-questions","type":2,"isActive":false}]} \ No newline at end of file diff --git a/localization/v2.4.x/site/it/integrations/integrate_with_hugging-face.md b/localization/v2.4.x/site/it/integrations/integrate_with_hugging-face.md index 64366bea4..823ea5133 100644 --- a/localization/v2.4.x/site/it/integrations/integrate_with_hugging-face.md +++ b/localization/v2.4.x/site/it/integrations/integrate_with_hugging-face.md @@ -5,7 +5,7 @@ summary: >- utilizzando Hugging Face come caricatore di dati e generatore di incorporazioni per l'elaborazione dei dati e Milvus come database vettoriale per la ricerca semantica. -title: Risposta alle domande con Milvus e la faccia abbracciata +title: Risposta alle domande con Milvus e Hugging Face ---

      Risposta alle domande con Milvus e Hugging Face

      Open In Colab

      +

      Open In Colab +GitHub Repository

      Un sistema di risposta alle domande basato sulla ricerca semantica funziona trovando la domanda più simile da un insieme di coppie domanda-risposta per una determinata domanda. Una volta identificata la domanda più simile, la risposta corrispondente dal set di dati viene considerata come la risposta alla domanda. Questo approccio si basa su misure di somiglianza semantica per determinare la somiglianza tra le domande e recuperare le risposte pertinenti.

      Questo tutorial mostra come costruire un sistema di risposta alle domande utilizzando Hugging Face come caricatore di dati e generatore di incorporazioni per l'elaborazione dei dati e Milvus come database vettoriale per la ricerca semantica.

      Prima di iniziare

      Open In Colab

      +

      Open In Colab +GitHub Repository

      Questa guida mostra come utilizzare gli embeddings di Jina AI e Milvus per condurre attività di ricerca e recupero di similarità.

      Chi è Jina AI

      Jina AI, fondata nel 2020 a Berlino, è un'azienda pionieristica di intelligenza artificiale che mira a rivoluzionare il futuro dell'intelligenza artificiale attraverso la sua base di ricerca. Specializzata nell'intelligenza artificiale multimodale, Jina AI mira a consentire alle aziende e agli sviluppatori di sfruttare la potenza dei dati multimodali per la creazione di valore e la riduzione dei costi attraverso la sua suite integrata di componenti, tra cui embedding, reranker, prompt ops e infrastruttura di base. Gli embedding all'avanguardia di Jina AI vantano prestazioni di alto livello, con un modello di lunghezza di 8192 token ideale per la rappresentazione completa dei dati. Grazie al supporto multilingue e alla perfetta integrazione con piattaforme leader come OpenAI, questi embedding facilitano le applicazioni multilingue.

      -

      Incorporazione di Milvus e Jina AI

      Per memorizzare e cercare queste incorporazioni in modo efficiente per velocità e scala, è necessaria un'infrastruttura specifica progettata per questo scopo. Milvus è un database vettoriale avanzato open-source ampiamente conosciuto, in grado di gestire dati vettoriali su larga scala. Milvus consente una ricerca veloce e accurata dei vettori (embedding) in base a numerose metriche. La sua scalabilità consente di gestire senza problemi volumi enormi di dati di immagini, garantendo operazioni di ricerca ad alte prestazioni anche quando i set di dati crescono.

      +

      Per memorizzare e ricercare questi embedding in modo efficiente per velocità e scala, è necessaria un'infrastruttura specifica progettata per questo scopo. Milvus è un database vettoriale avanzato open-source ampiamente conosciuto, in grado di gestire dati vettoriali su larga scala. Milvus consente una ricerca veloce e accurata dei vettori (embedding) in base a numerose metriche. La sua scalabilità consente di gestire senza problemi volumi enormi di dati di immagini, garantendo operazioni di ricerca ad alte prestazioni anche quando i set di dati crescono.

      Esempi

      Il modello di embedding principale di Jina AI eccelle nella comprensione di testi dettagliati, rendendolo ideale per la ricerca semantica, la classificazione dei contenuti, quindi supporta l'analisi avanzata del sentiment, la sintesi del testo e i sistemi di raccomandazione personalizzati.

      -
      from pymilvus.model.dense import JinaEmbeddingFunction
      +
      from pymilvus.model.dense import JinaEmbeddingFunction
       
       jina_api_key = "<YOUR_JINA_API_KEY>"
      -ef = JinaEmbeddingFunction("jina-embeddings-v2-base-en", jina_api_key)
      +ef = JinaEmbeddingFunction(
      +    "jina-embeddings-v3", 
      +    jina_api_key,
      +    task="retrieval.passage",
      +    dimensions=1024
      +)
       
       query = "what is information retrieval?"
       doc = "Information retrieval is the process of finding relevant information from a large collection of data or documents."
       
      -qvecs = ef.encode_queries([query])
      -dvecs = ef.encode_documents([doc])
      +qvecs = ef.encode_queries([query])  # This method uses `retrieval.query` as the task
      +dvecs = ef.encode_documents([doc])  # This method uses `retrieval.passage` as the task
       

      Incorporamenti bilingue

      Open In Colab

      +

      Open In Colab +GitHub Repository

      Questa guida mostra come costruire un sistema di Retrieval-Augmented Generation (RAG) utilizzando LlamaIndex e Milvus.

      Il sistema RAG combina un sistema di recupero con un modello generativo per generare nuovo testo in base a un prompt dato. Il sistema recupera prima i documenti rilevanti da un corpus utilizzando Milvus e poi utilizza un modello generativo per generare nuovo testo sulla base dei documenti recuperati.

      LlamaIndex è un framework di dati semplice e flessibile per collegare fonti di dati personalizzate a grandi modelli linguistici (LLM). Milvus è il database vettoriale open-source più avanzato al mondo, costruito per alimentare le applicazioni di ricerca di similarità e di intelligenza artificiale.

      diff --git a/localization/v2.4.x/site/it/integrations/integrate_with_openai.json b/localization/v2.4.x/site/it/integrations/integrate_with_openai.json index ce2d9c910..a7d296366 100644 --- a/localization/v2.4.x/site/it/integrations/integrate_with_openai.json +++ b/localization/v2.4.x/site/it/integrations/integrate_with_openai.json @@ -1 +1 @@ -{"codeList":["pip install --upgrade openai pymilvus\n","from openai import OpenAI\nfrom pymilvus import MilvusClient\n\nMODEL_NAME = \"text-embedding-3-small\" # Which model to use, please check https://platform.openai.com/docs/guides/embeddings for available models\nDIMENSION = 1536 # Dimension of vector embedding\n\n# Connect to OpenAI with API Key.\nopenai_client = OpenAI(api_key=\"\")\n\ndocs = [\n \"Artificial intelligence was founded as an academic discipline in 1956.\",\n \"Alan Turing was the first person to conduct substantial research in AI.\",\n \"Born in Maida Vale, London, Turing was raised in southern England.\",\n]\n\nvectors = [\n vec.embedding\n for vec in openai_client.embeddings.create(input=docs, model=MODEL_NAME).data\n]\n\n# Prepare data to be stored in Milvus vector database.\n# We can store the id, vector representation, raw text and labels such as \"subject\" in this case in Milvus.\ndata = [\n {\"id\": i, \"vector\": vectors[i], \"text\": docs[i], \"subject\": \"history\"}\n for i in range(len(docs))\n]\n\n\n# Connect to Milvus, all data is stored in a local file named \"milvus_openai_demo.db\"\n# in current directory. You can also connect to a remote Milvus server following this\n# instruction: https://milvus.io/docs/install_standalone-docker.md.\nmilvus_client = MilvusClient(uri=\"milvus_openai_demo.db\")\nCOLLECTION_NAME = \"demo_collection\" # Milvus collection name\n# Create a collection to store the vectors and text.\nif milvus_client.has_collection(collection_name=COLLECTION_NAME):\n milvus_client.drop_collection(collection_name=COLLECTION_NAME)\nmilvus_client.create_collection(collection_name=COLLECTION_NAME, dimension=DIMENSION)\n\n# Insert all data into Milvus vector database.\nres = milvus_client.insert(collection_name=\"demo_collection\", data=data)\n\nprint(res[\"insert_count\"])\n","queries = [\"When was artificial intelligence founded?\"]\n\nquery_vectors = [\n vec.embedding\n for vec in openai_client.embeddings.create(input=queries, model=MODEL_NAME).data\n]\n\nres = milvus_client.search(\n collection_name=COLLECTION_NAME, # target collection\n data=query_vectors, # query vectors\n limit=2, # number of returned entities\n output_fields=[\"text\", \"subject\"], # specifies fields to be returned\n)\n\nfor q in queries:\n print(\"Query:\", q)\n for result in res:\n print(result)\n print(\"\\n\")\n","[\n {\n \"id\": 0,\n \"distance\": -0.772376537322998,\n \"entity\": {\n \"text\": \"Artificial intelligence was founded as an academic discipline in 1956.\",\n \"subject\": \"history\",\n },\n },\n {\n \"id\": 1,\n \"distance\": -0.58596271276474,\n \"entity\": {\n \"text\": \"Alan Turing was the first person to conduct substantial research in AI.\",\n \"subject\": \"history\",\n },\n },\n]\n"],"headingContent":"","anchorList":[{"label":"Ricerca semantica con Milvus e OpenAI","href":"Semantic-Search-with-Milvus-and-OpenAI","type":1,"isActive":false},{"label":"Come iniziare","href":"Getting-started","type":2,"isActive":false},{"label":"Ricerca di titoli di libri con OpenAI e Milvus","href":"Searching-book-titles-with-OpenAI--Milvus","type":2,"isActive":false}]} \ No newline at end of file +{"codeList":["pip install --upgrade openai pymilvus\n","from openai import OpenAI\nfrom pymilvus import MilvusClient\n\nMODEL_NAME = \"text-embedding-3-small\" # Which model to use, please check https://platform.openai.com/docs/guides/embeddings for available models\nDIMENSION = 1536 # Dimension of vector embedding\n\n# Connect to OpenAI with API Key.\nopenai_client = OpenAI(api_key=\"\")\n\ndocs = [\n \"Artificial intelligence was founded as an academic discipline in 1956.\",\n \"Alan Turing was the first person to conduct substantial research in AI.\",\n \"Born in Maida Vale, London, Turing was raised in southern England.\",\n]\n\nvectors = [\n vec.embedding\n for vec in openai_client.embeddings.create(input=docs, model=MODEL_NAME).data\n]\n\n# Prepare data to be stored in Milvus vector database.\n# We can store the id, vector representation, raw text and labels such as \"subject\" in this case in Milvus.\ndata = [\n {\"id\": i, \"vector\": vectors[i], \"text\": docs[i], \"subject\": \"history\"}\n for i in range(len(docs))\n]\n\n\n# Connect to Milvus, all data is stored in a local file named \"milvus_openai_demo.db\"\n# in current directory. You can also connect to a remote Milvus server following this\n# instruction: https://milvus.io/docs/install_standalone-docker.md.\nmilvus_client = MilvusClient(uri=\"milvus_openai_demo.db\")\nCOLLECTION_NAME = \"demo_collection\" # Milvus collection name\n# Create a collection to store the vectors and text.\nif milvus_client.has_collection(collection_name=COLLECTION_NAME):\n milvus_client.drop_collection(collection_name=COLLECTION_NAME)\nmilvus_client.create_collection(collection_name=COLLECTION_NAME, dimension=DIMENSION)\n\n# Insert all data into Milvus vector database.\nres = milvus_client.insert(collection_name=\"demo_collection\", data=data)\n\nprint(res[\"insert_count\"])\n","queries = [\"When was artificial intelligence founded?\"]\n\nquery_vectors = [\n vec.embedding\n for vec in openai_client.embeddings.create(input=queries, model=MODEL_NAME).data\n]\n\nres = milvus_client.search(\n collection_name=COLLECTION_NAME, # target collection\n data=query_vectors, # query vectors\n limit=2, # number of returned entities\n output_fields=[\"text\", \"subject\"], # specifies fields to be returned\n)\n\nfor q in queries:\n print(\"Query:\", q)\n for result in res:\n print(result)\n print(\"\\n\")\n","[\n {\n \"id\": 0,\n \"distance\": -0.772376537322998,\n \"entity\": {\n \"text\": \"Artificial intelligence was founded as an academic discipline in 1956.\",\n \"subject\": \"history\",\n },\n },\n {\n \"id\": 1,\n \"distance\": -0.58596271276474,\n \"entity\": {\n \"text\": \"Alan Turing was the first person to conduct substantial research in AI.\",\n \"subject\": \"history\",\n },\n },\n]\n"],"headingContent":"Semantic Search with Milvus and OpenAI","anchorList":[{"label":"Ricerca semantica con Milvus e OpenAI","href":"Semantic-Search-with-Milvus-and-OpenAI","type":1,"isActive":false},{"label":"Come iniziare","href":"Getting-started","type":2,"isActive":false},{"label":"Ricerca di titoli di libri con OpenAI e Milvus","href":"Searching-book-titles-with-OpenAI--Milvus","type":2,"isActive":false}]} \ No newline at end of file diff --git a/localization/v2.4.x/site/it/integrations/integrate_with_openai.md b/localization/v2.4.x/site/it/integrations/integrate_with_openai.md index e499a2693..8d53bf323 100644 --- a/localization/v2.4.x/site/it/integrations/integrate_with_openai.md +++ b/localization/v2.4.x/site/it/integrations/integrate_with_openai.md @@ -20,8 +20,9 @@ summary: >- d="M4 9h1v1H4c-1.5 0-3-1.69-3-3.5S2.55 3 4 3h4c1.45 0 3 1.69 3 3.5 0 1.41-.91 2.72-2 3.25V8.59c.58-.45 1-1.27 1-2.09C10 5.22 8.98 4 8 4H4c-.98 0-2 1.22-2 2.5S3 9 4 9zm9-3h-1v1h1c1 0 2 1.22 2 2.5S13.98 12 13 12H9c-.98 0-2-1.22-2-2.5 0-.83.42-1.64 1-2.09V6.25c-1.09.53-2 1.84-2 3.25C6 11.31 7.55 13 9 13h4c1.45 0 3-1.69 3-3.5S14.5 6 13 6z" > -

      Open In Colab

      -

      Questa guida mostra come l'API Embedding di OpenAI possa essere utilizzata con il database vettoriale Milvus per condurre una ricerca semantica sul testo.

      +

      Open In Colab +GitHub Repository

      +

      Questa guida mostra come l 'API Embedding di OpenAI possa essere utilizzata con il database vettoriale Milvus per condurre una ricerca semantica sul testo.

      Come iniziare

      Open In Colab

      +

      Open In Colab +GitHub Repository

      Questa guida mostra come utilizzare Ragas per valutare una pipeline Retrieval-Augmented Generation (RAG) costruita su Milvus.

      Il sistema RAG combina un sistema di recupero con un modello generativo per generare nuovo testo in base a un prompt dato. Il sistema recupera prima i documenti rilevanti da un corpus utilizzando Milvus e poi utilizza un modello generativo per generare nuovo testo sulla base dei documenti recuperati.

      Ragas è un framework che aiuta a valutare le pipeline RAG. Esistono strumenti e framework che aiutano a costruire queste pipeline, ma valutarle e quantificarne le prestazioni può essere difficile. È qui che entra in gioco Ragas (RAG Assessment).

      diff --git a/localization/v2.4.x/site/it/integrations/integrate_with_vanna.json b/localization/v2.4.x/site/it/integrations/integrate_with_vanna.json index 5a7ccd3b4..22b3e26c6 100644 --- a/localization/v2.4.x/site/it/integrations/integrate_with_vanna.json +++ b/localization/v2.4.x/site/it/integrations/integrate_with_vanna.json @@ -1 +1 @@ -{"codeList":["$ pip install \"vanna[milvus,openai]\"\n","import os\n\nos.environ[\"OPENAI_API_KEY\"] = \"sk-***********\"\n","from pymilvus import MilvusClient, model\nfrom vanna.milvus import Milvus_VectorStore\nfrom vanna.openai import OpenAI_Chat\n\n\nclass VannaMilvus(Milvus_VectorStore, OpenAI_Chat):\n def __init__(self, config=None):\n Milvus_VectorStore.__init__(self, config=config)\n OpenAI_Chat.__init__(self, config=config)\n","milvus_uri = \"./milvus_vanna.db\"\n\nmilvus_client = MilvusClient(uri=milvus_uri)\n\nvn_milvus = VannaMilvus(\n config={\n \"api_key\": os.getenv(\"OPENAI_API_KEY\"),\n \"model\": \"gpt-3.5-turbo\",\n \"milvus_client\": milvus_client,\n \"embedding_function\": model.DefaultEmbeddingFunction(),\n \"n_results\": 2, # The number of results to return from Milvus semantic search.\n }\n)\n","import sqlite3\n\nsqlite_path = \"./my-database.sqlite\"\nsql_connect = sqlite3.connect(sqlite_path)\nc = sql_connect.cursor()\n\ninit_sqls = \"\"\"\nCREATE TABLE IF NOT EXISTS Customer (\n ID INTEGER PRIMARY KEY AUTOINCREMENT,\n Name TEXT NOT NULL,\n Company TEXT NOT NULL,\n City TEXT NOT NULL,\n Phone TEXT NOT NULL\n);\n\nCREATE TABLE IF NOT EXISTS Company (\n ID INTEGER PRIMARY KEY AUTOINCREMENT,\n Name TEXT NOT NULL,\n Industry TEXT NOT NULL,\n Location TEXT NOT NULL,\n EmployeeCount INTEGER NOT NULL\n);\n\nCREATE TABLE IF NOT EXISTS User (\n ID INTEGER PRIMARY KEY AUTOINCREMENT,\n Username TEXT NOT NULL UNIQUE,\n Email TEXT NOT NULL UNIQUE\n);\n\nINSERT INTO Customer (Name, Company, City, Phone) \nVALUES ('John Doe', 'ABC Corp', 'New York', '123-456-7890');\n\nINSERT INTO Customer (Name, Company, City, Phone) \nVALUES ('Jane Smith', 'XYZ Inc', 'Los Angeles', '098-765-4321');\n\nINSERT INTO Company (Name, Industry, Location, EmployeeCount)\nVALUES ('ABC Corp', 'cutting-edge technology', 'New York', 100);\n\nINSERT INTO User (Username, Email)\nVALUES ('johndoe123', 'johndoe123@example.com');\n\"\"\"\n\nfor sql in init_sqls.split(\";\"):\n c.execute(sql)\n\nsql_connect.commit()\n\n# Connect to the SQLite database\nvn_milvus.connect_to_sqlite(sqlite_path)\n","# If there exists training data, we should remove it before training.\nexisting_training_data = vn_milvus.get_training_data()\nif len(existing_training_data) > 0:\n for _, training_data in existing_training_data.iterrows():\n vn_milvus.remove_training_data(training_data[\"id\"])\n\n# Get the DDL of the SQLite database\ndf_ddl = vn_milvus.run_sql(\"SELECT type, sql FROM sqlite_master WHERE sql is not null\")\n\n# Train the model on the DDL data\nfor ddl in df_ddl[\"sql\"].to_list():\n vn_milvus.train(ddl=ddl)\n","# Add documentation about your business terminology or definitions.\nvn_milvus.train(\n documentation=\"ABC Corp specializes in cutting-edge technology solutions and innovation.\"\n)\nvn_milvus.train(\n documentation=\"XYZ Inc is a global leader in manufacturing and supply chain management.\"\n)\n\n# You can also add SQL queries to your training data.\nvn_milvus.train(sql=\"SELECT * FROM Customer WHERE Name = 'John Doe'\")\n","training_data = vn_milvus.get_training_data()\ntraining_data\n","sql = vn_milvus.generate_sql(\"what is the phone number of John Doe?\")\nvn_milvus.run_sql(sql)\n","sql = vn_milvus.generate_sql(\"which customer works for a manufacturing corporation?\")\nvn_milvus.run_sql(sql)\n","sql_connect.close()\nmilvus_client.close()\n\nos.remove(sqlite_path)\nif os.path.exists(milvus_uri):\n os.remove(milvus_uri)\n"],"headingContent":"","anchorList":[{"label":"Scrivere SQL con Vanna e Milvus","href":"Write-SQL-with-Vanna-and-Milvus","type":1,"isActive":false},{"label":"Prerequisiti","href":"Prerequisites","type":2,"isActive":false},{"label":"Preparazione dei dati","href":"Data-preparation","type":2,"isActive":false},{"label":"Addestramento con i dati","href":"Train-with-data","type":2,"isActive":false},{"label":"Generare SQL ed eseguirli","href":"Generate-SQLs-and-execute-them","type":2,"isActive":false}]} \ No newline at end of file +{"codeList":["$ pip install \"vanna[milvus,openai]\"\n","import os\n\nos.environ[\"OPENAI_API_KEY\"] = \"sk-***********\"\n","from pymilvus import MilvusClient, model\nfrom vanna.milvus import Milvus_VectorStore\nfrom vanna.openai import OpenAI_Chat\n\n\nclass VannaMilvus(Milvus_VectorStore, OpenAI_Chat):\n def __init__(self, config=None):\n Milvus_VectorStore.__init__(self, config=config)\n OpenAI_Chat.__init__(self, config=config)\n","milvus_uri = \"./milvus_vanna.db\"\n\nmilvus_client = MilvusClient(uri=milvus_uri)\n\nvn_milvus = VannaMilvus(\n config={\n \"api_key\": os.getenv(\"OPENAI_API_KEY\"),\n \"model\": \"gpt-3.5-turbo\",\n \"milvus_client\": milvus_client,\n \"embedding_function\": model.DefaultEmbeddingFunction(),\n \"n_results\": 2, # The number of results to return from Milvus semantic search.\n }\n)\n","import sqlite3\n\nsqlite_path = \"./my-database.sqlite\"\nsql_connect = sqlite3.connect(sqlite_path)\nc = sql_connect.cursor()\n\ninit_sqls = \"\"\"\nCREATE TABLE IF NOT EXISTS Customer (\n ID INTEGER PRIMARY KEY AUTOINCREMENT,\n Name TEXT NOT NULL,\n Company TEXT NOT NULL,\n City TEXT NOT NULL,\n Phone TEXT NOT NULL\n);\n\nCREATE TABLE IF NOT EXISTS Company (\n ID INTEGER PRIMARY KEY AUTOINCREMENT,\n Name TEXT NOT NULL,\n Industry TEXT NOT NULL,\n Location TEXT NOT NULL,\n EmployeeCount INTEGER NOT NULL\n);\n\nCREATE TABLE IF NOT EXISTS User (\n ID INTEGER PRIMARY KEY AUTOINCREMENT,\n Username TEXT NOT NULL UNIQUE,\n Email TEXT NOT NULL UNIQUE\n);\n\nINSERT INTO Customer (Name, Company, City, Phone) \nVALUES ('John Doe', 'ABC Corp', 'New York', '123-456-7890');\n\nINSERT INTO Customer (Name, Company, City, Phone) \nVALUES ('Jane Smith', 'XYZ Inc', 'Los Angeles', '098-765-4321');\n\nINSERT INTO Company (Name, Industry, Location, EmployeeCount)\nVALUES ('ABC Corp', 'cutting-edge technology', 'New York', 100);\n\nINSERT INTO User (Username, Email)\nVALUES ('johndoe123', 'johndoe123@example.com');\n\"\"\"\n\nfor sql in init_sqls.split(\";\"):\n c.execute(sql)\n\nsql_connect.commit()\n\n# Connect to the SQLite database\nvn_milvus.connect_to_sqlite(sqlite_path)\n","# If there exists training data, we should remove it before training.\nexisting_training_data = vn_milvus.get_training_data()\nif len(existing_training_data) > 0:\n for _, training_data in existing_training_data.iterrows():\n vn_milvus.remove_training_data(training_data[\"id\"])\n\n# Get the DDL of the SQLite database\ndf_ddl = vn_milvus.run_sql(\"SELECT type, sql FROM sqlite_master WHERE sql is not null\")\n\n# Train the model on the DDL data\nfor ddl in df_ddl[\"sql\"].to_list():\n vn_milvus.train(ddl=ddl)\n","# Add documentation about your business terminology or definitions.\nvn_milvus.train(\n documentation=\"ABC Corp specializes in cutting-edge technology solutions and innovation.\"\n)\nvn_milvus.train(\n documentation=\"XYZ Inc is a global leader in manufacturing and supply chain management.\"\n)\n\n# You can also add SQL queries to your training data.\nvn_milvus.train(sql=\"SELECT * FROM Customer WHERE Name = 'John Doe'\")\n","training_data = vn_milvus.get_training_data()\ntraining_data\n","sql = vn_milvus.generate_sql(\"what is the phone number of John Doe?\")\nvn_milvus.run_sql(sql)\n","sql = vn_milvus.generate_sql(\"which customer works for a manufacturing corporation?\")\nvn_milvus.run_sql(sql)\n","sql_connect.close()\nmilvus_client.close()\n\nos.remove(sqlite_path)\nif os.path.exists(milvus_uri):\n os.remove(milvus_uri)\n"],"headingContent":"Write SQL with Vanna and Milvus","anchorList":[{"label":"Scrivere SQL con Vanna e Milvus","href":"Write-SQL-with-Vanna-and-Milvus","type":1,"isActive":false},{"label":"Prerequisiti","href":"Prerequisites","type":2,"isActive":false},{"label":"Preparazione dei dati","href":"Data-preparation","type":2,"isActive":false},{"label":"Addestramento con i dati","href":"Train-with-data","type":2,"isActive":false},{"label":"Generare SQL ed eseguirli","href":"Generate-SQLs-and-execute-them","type":2,"isActive":false}]} \ No newline at end of file diff --git a/localization/v2.4.x/site/it/integrations/integrate_with_vanna.md b/localization/v2.4.x/site/it/integrations/integrate_with_vanna.md index 6607a9780..efa6c1175 100644 --- a/localization/v2.4.x/site/it/integrations/integrate_with_vanna.md +++ b/localization/v2.4.x/site/it/integrations/integrate_with_vanna.md @@ -20,7 +20,9 @@ title: Scrivere SQL con Vanna e Milvus d="M4 9h1v1H4c-1.5 0-3-1.69-3-3.5S2.55 3 4 3h4c1.45 0 3 1.69 3 3.5 0 1.41-.91 2.72-2 3.25V8.59c.58-.45 1-1.27 1-2.09C10 5.22 8.98 4 8 4H4c-.98 0-2 1.22-2 2.5S3 9 4 9zm9-3h-1v1h1c1 0 2 1.22 2 2.5S13.98 12 13 12H9c-.98 0-2-1.22-2-2.5 0-.83.42-1.64 1-2.09V6.25c-1.09.53-2 1.84-2 3.25C6 11.31 7.55 13 9 13h4c1.45 0 3-1.69 3-3.5S14.5 6 13 6z" > -

      Vanna è un framework RAG (Retrieval-Augmented Generation) open-source in Python per la generazione di SQL e funzionalità correlate. Milvus è il database vettoriale open-source più avanzato al mondo, costruito per alimentare la ricerca di similarità e le applicazioni di intelligenza artificiale.

      +

      Open In Colab +GitHub Repository

      +

      Vanna è un framework RAG (Retrieval-Augmented Generation) open-source in Python per la generazione di SQL e funzionalità correlate. Milvus è il database vettoriale open-source più avanzato al mondo, costruito per alimentare la ricerca di similarità e le applicazioni di intelligenza artificiale.

      Vanna funziona in due semplici fasi: addestra un "modello" RAG sui dati e poi pone domande che restituiscono query SQL che possono essere impostate per essere eseguite sul database. Questa guida mostra come utilizzare Vanna per generare ed eseguire query SQL basate sui dati memorizzati in un database.

      Prerequisiti

      Open In Colab

      -

      Questa guida illustra come l'API di embedding di VoyageAI possa essere utilizzata con il database vettoriale Milvus per condurre una ricerca semantica sul testo.

      +

      Open In Colab +GitHub Repository

      +

      Questa guida illustra come l 'API di embedding di VoyageAI possa essere utilizzata con il database vettoriale Milvus per condurre una ricerca semantica sul testo.

      Come iniziare

      Open In Colab

      +

      Open In Colab +GitHub Repository

      Questa guida mostra come costruire un sistema di Retrieval-Augmented Generation (RAG) utilizzando LangChain e Milvus.

      Il sistema RAG combina un sistema di recupero con un modello generativo per generare nuovo testo sulla base di un prompt dato. Il sistema recupera prima i documenti rilevanti da un corpus utilizzando Milvus e poi utilizza un modello generativo per generare nuovo testo sulla base dei documenti recuperati.

      LangChain è un framework per lo sviluppo di applicazioni basate su modelli linguistici di grandi dimensioni (LLM). Milvus è il database vettoriale open-source più avanzato al mondo, costruito per alimentare le applicazioni di ricerca di similarità e di intelligenza artificiale.

      diff --git a/localization/v2.4.x/site/it/menuStructure/it.json b/localization/v2.4.x/site/it/menuStructure/it.json index 757764bcf..93633a316 100644 --- a/localization/v2.4.x/site/it/menuStructure/it.json +++ b/localization/v2.4.x/site/it/menuStructure/it.json @@ -1391,7 +1391,7 @@ "children": [] }, { - "label": "Dificare", + "label": "Dify", "id": "dify_with_milvus.md", "order": 6, "children": [] diff --git a/localization/v2.4.x/site/it/reference/architecture/architecture_overview.json b/localization/v2.4.x/site/it/reference/architecture/architecture_overview.json index 7e525af45..4464f3d1c 100644 --- a/localization/v2.4.x/site/it/reference/architecture/architecture_overview.json +++ b/localization/v2.4.x/site/it/reference/architecture/architecture_overview.json @@ -1 +1 @@ -{"codeList":[],"headingContent":"","anchorList":[{"label":"Panoramica dell'architettura di Milvus","href":"Milvus-Architecture-Overview","type":1,"isActive":false},{"label":"Cosa c'è dopo","href":"Whats-next","type":2,"isActive":false}]} \ No newline at end of file +{"codeList":[],"headingContent":"Milvus Architecture Overview","anchorList":[{"label":"Panoramica dell'architettura di Milvus","href":"Milvus-Architecture-Overview","type":1,"isActive":false},{"label":"Cosa c'è dopo","href":"Whats-next","type":2,"isActive":false}]} \ No newline at end of file diff --git a/localization/v2.4.x/site/it/reference/architecture/architecture_overview.md b/localization/v2.4.x/site/it/reference/architecture/architecture_overview.md index 9851df5b2..ad8156162 100644 --- a/localization/v2.4.x/site/it/reference/architecture/architecture_overview.md +++ b/localization/v2.4.x/site/it/reference/architecture/architecture_overview.md @@ -3,7 +3,7 @@ id: architecture_overview.md summary: >- Milvus offre un database vettoriale veloce, affidabile e stabile, costruito appositamente per la ricerca di similarità e l'intelligenza artificiale. -title: Panoramica dell'architettura Milvus +title: Panoramica dell'architettura di Milvus ---

      Panoramica dell'architettura di Milvus

      Costruito sulla base delle più diffuse librerie di ricerca vettoriale, tra cui Faiss, HNSW, DiskANN, SCANN e altre, Milvus è stato progettato per la ricerca di similarità su insiemi di dati vettoriali densi, contenenti milioni, miliardi o addirittura trilioni di vettori. Prima di procedere, è bene familiarizzare con i principi di base dell'embedding retrieval.

      -

      Milvus supporta anche lo sharding dei dati, l'ingestione di dati in streaming, lo schema dinamico, la ricerca combinata di dati vettoriali e scalari, la ricerca ibrida e multi-vettore, il vettore sparse e molte altre funzioni avanzate. La piattaforma offre prestazioni on demand e può essere ottimizzata per adattarsi a qualsiasi scenario di embedding retrieval. Si consiglia di distribuire Milvus utilizzando Kubernetes per ottenere disponibilità ed elasticità ottimali.

      +

      Milvus supporta anche lo sharding dei dati, l'ingestione di dati in streaming, lo schema dinamico, la ricerca di dati vettoriali e scalari, la ricerca multivettoriale e ibrida, il vettore sparse e molte altre funzioni avanzate. La piattaforma offre prestazioni on demand e può essere ottimizzata per adattarsi a qualsiasi scenario di embedding retrieval. Si consiglia di distribuire Milvus utilizzando Kubernetes per ottenere disponibilità ed elasticità ottimali.

      Milvus adotta un'architettura di storage condiviso che prevede la disaggregazione dello storage e dell'elaborazione e la scalabilità orizzontale dei nodi di elaborazione. Seguendo il principio della disaggregazione del piano dati e del piano di controllo, Milvus comprende quattro livelli: livello di accesso, servizio di coordinamento, nodo lavoratore e storage. Questi livelli sono indipendenti l'uno dall'altro per quanto riguarda la scalabilità o il disaster recovery.

      Architecture_diagram - Diagramma_architettura

      + Diagramma_di_architettura

      +

      Secondo la figura, le interfacce possono essere classificate nelle seguenti categorie:

      +
        +
      • DDL / DCL: createCollection / createPartition / dropCollection / dropPartition / hasCollection / hasPartition
      • +
      • DML / Produce: insert / delete / upsert
      • +
      • DQL: ricerca / interrogazione
      • +

      Cosa c'è dopo

      diff --git a/localization/v2.4.x/site/it/reference/disk_index.json b/localization/v2.4.x/site/it/reference/disk_index.json index 53214f024..000fa8182 100644 --- a/localization/v2.4.x/site/it/reference/disk_index.json +++ b/localization/v2.4.x/site/it/reference/disk_index.json @@ -1 +1 @@ -{"codeList":["...\nDiskIndex:\n MaxDegree: 56\n SearchListSize: 100\n PQCodeBugetGBRatio: 0.125\n SearchCacheBudgetGBRatio: 0.125\n BeamWidthRatio: 4.0\n...\n"],"headingContent":"","anchorList":[{"label":"Indice su disco","href":"On-disk-Index","type":1,"isActive":false},{"label":"Prerequisiti","href":"Prerequisites","type":2,"isActive":false},{"label":"Limiti","href":"Limits","type":2,"isActive":false},{"label":"Impostazioni dell'indice e della ricerca","href":"Index-and-search-settings","type":2,"isActive":false},{"label":"Configurazioni di Milvus relative a DiskANN","href":"DiskANN-related-Milvus-configurations","type":2,"isActive":false},{"label":"Risoluzione dei problemi","href":"Troubleshooting","type":2,"isActive":false}]} \ No newline at end of file +{"codeList":["...\nDiskIndex:\n MaxDegree: 56\n SearchListSize: 100\n PQCodeBugetGBRatio: 0.125\n SearchCacheBudgetGBRatio: 0.125\n BeamWidthRatio: 4.0\n...\n"],"headingContent":"On-disk Index","anchorList":[{"label":"Indice su disco","href":"On-disk-Index","type":1,"isActive":false},{"label":"Prerequisiti","href":"Prerequisites","type":2,"isActive":false},{"label":"Limiti","href":"Limits","type":2,"isActive":false},{"label":"Impostazioni dell'indice e della ricerca","href":"Index-and-search-settings","type":2,"isActive":false},{"label":"Configurazioni di Milvus relative a DiskANN","href":"DiskANN-related-Milvus-configurations","type":2,"isActive":false},{"label":"Risoluzione dei problemi","href":"Troubleshooting","type":2,"isActive":false}]} \ No newline at end of file diff --git a/localization/v2.4.x/site/it/reference/disk_index.md b/localization/v2.4.x/site/it/reference/disk_index.md index 6555a2b0b..be1075183 100644 --- a/localization/v2.4.x/site/it/reference/disk_index.md +++ b/localization/v2.4.x/site/it/reference/disk_index.md @@ -68,7 +68,7 @@ Attualmente, un campo vettoriale supporta solo un tipo di indice. Milvus cancell

      Per utilizzare DiskANN, assicurarsi che

      • Usare solo vettori float con almeno 1 dimensione nei dati.
      • -
      • Utilizzare solo la distanza euclidea (L2) o il prodotto interno (IP) per misurare la distanza tra i vettori.
      • +
      • Utilizzare solo la distanza euclidea (L2), il prodotto interno (IP) o COSINE per misurare la distanza tra i vettori.

      Impostazioni dell'indice e della ricerca

      Le repliche in memoria sono organizzate come gruppi di repliche. Ogni gruppo di repliche contiene repliche shard. Ogni replica shard ha una replica streaming e una replica storica che corrispondono ai segmenti in crescita e sigillati nello shard (ad esempio, il canale DML).

      - An illustration of how in-memory replica works + An illustration of how in-memory replica works Un'illustrazione del funzionamento della replica in-memory

      Gruppo di replica

      Un gruppo di replica è costituito da più nodi di query responsabili della gestione dei dati storici e delle repliche.

      Replica shard

      Una replica shard consiste in una replica streaming e in una replica storica, entrambe appartenenti allo stesso shard. Il numero di repliche shard in un gruppo di repliche è determinato dal numero di shard in una raccolta specifica.

      diff --git a/localization/v2.4.x/site/it/release_notes.json b/localization/v2.4.x/site/it/release_notes.json index 5575f5dfd..12909ff6d 100644 --- a/localization/v2.4.x/site/it/release_notes.json +++ b/localization/v2.4.x/site/it/release_notes.json @@ -1 +1 @@ -{"codeList":[],"headingContent":"Release Notes","anchorList":[{"label":"Note di rilascio","href":"Release-Notes","type":1,"isActive":false},{"label":"v2.4.11","href":"v2411","type":2,"isActive":false},{"label":"v2.4.10","href":"v2410","type":2,"isActive":false},{"label":"v2.4.9","href":"v249","type":2,"isActive":false},{"label":"v2.4.8","href":"v248","type":2,"isActive":false},{"label":"v2.4.6","href":"v246","type":2,"isActive":false},{"label":"v2.4.5","href":"v245","type":2,"isActive":false},{"label":"v2.4.4","href":"v244","type":2,"isActive":false},{"label":"v2.4.3","href":"v243","type":2,"isActive":false},{"label":"v2.4.1","href":"v241","type":2,"isActive":false},{"label":"v2.4.0","href":"v240","type":2,"isActive":false},{"label":"v2.4.0-rc.1","href":"v240-rc1","type":2,"isActive":false}]} \ No newline at end of file +{"codeList":[],"headingContent":"Release Notes","anchorList":[{"label":"Note di rilascio","href":"Release-Notes","type":1,"isActive":false},{"label":"v2.4.13-fisso caldo","href":"v2413-hotfix","type":2,"isActive":false},{"label":"[Deprecato] v2.4.13","href":"Deprecated-v2413","type":2,"isActive":false},{"label":"v2.4.12","href":"v2412","type":2,"isActive":false},{"label":"v2.4.11","href":"v2411","type":2,"isActive":false},{"label":"v2.4.10","href":"v2410","type":2,"isActive":false},{"label":"v2.4.9","href":"v249","type":2,"isActive":false},{"label":"v2.4.8","href":"v248","type":2,"isActive":false},{"label":"v2.4.6","href":"v246","type":2,"isActive":false},{"label":"v2.4.5","href":"v245","type":2,"isActive":false},{"label":"v2.4.4","href":"v244","type":2,"isActive":false},{"label":"v2.4.3","href":"v243","type":2,"isActive":false},{"label":"v2.4.1","href":"v241","type":2,"isActive":false},{"label":"v2.4.0","href":"v240","type":2,"isActive":false},{"label":"v2.4.0-rc.1","href":"v240-rc1","type":2,"isActive":false}]} \ No newline at end of file diff --git a/localization/v2.4.x/site/it/release_notes.md b/localization/v2.4.x/site/it/release_notes.md index de26d64d9..acaeaa1b1 100644 --- a/localization/v2.4.x/site/it/release_notes.md +++ b/localization/v2.4.x/site/it/release_notes.md @@ -19,6 +19,163 @@ title: Note di rilascio >

      Scoprite le novità di Milvus! Questa pagina riassume le nuove funzionalità, i miglioramenti, i problemi noti e le correzioni di bug di ogni versione. In questa sezione è possibile trovare le note di rilascio per ogni versione rilasciata dopo la v2.4.0. Si consiglia di visitare regolarmente questa pagina per conoscere gli aggiornamenti.

      +

      v2.4.13-fisso caldo

      Data di rilascio: 17 ottobre 2024

      + + + + + + + +
      Versione di MilvusVersione dell'SDK PythonVersione dell'SDK JavaVersione SDK Node.js
      2.4.13-hotfix2.4.82.4.52.4.9
      +

      Milvus v2.4.13-hotfix risolve un problema critico specifico della versione 2.4.13, a causa del quale Milvus potrebbe non riuscire a recuperare le informazioni sulla raccolta dopo un riavvio se tutte le istantanee MetaKV sono state raccolte come spazzatura(#36933). Si consiglia agli utenti che attualmente utilizzano la versione 2.4.13 di eseguire l'aggiornamento alla versione 2.4.13-hotfix il prima possibile per evitare potenziali interruzioni.

      +

      Correzioni critiche

        +
      • Carica la chiave originale se il timestamp è MaxTimestamp(#36935)
      • +
      +

      [Deprecato] v2.4.13

      Data di rilascio: 12 ottobre 2024

      + + + + + + + +
      Versione di MilvusVersione dell'SDK PythonVersione dell'SDK JavaVersione SDK Node.js
      2.4.132.4.82.4.52.4.9
      +

      Milvus 2.4.13 introduce il carico dinamico delle repliche, consentendo agli utenti di regolare il numero di repliche della collezione senza dover rilasciare e ricaricare la collezione. Questa versione risolve anche diversi bug critici relativi all'importazione in blocco, all'analisi delle espressioni, al bilanciamento del carico e al recupero dei guasti. Inoltre, sono stati apportati miglioramenti significativi all'utilizzo delle risorse MMAP e alle prestazioni di importazione, migliorando l'efficienza complessiva del sistema. Si consiglia vivamente di passare a questa versione per migliorare le prestazioni e la stabilità.

      +

      Caratteristiche

        +
      • Regolazione dinamica delle repliche per le collezioni caricate(#36417)
      • +
      • MMAP vettoriale sparse in tipi di segmento crescenti(#36565)
      • +
      +

      Correzioni di bug

        +
      • Corretto un problema di prestazioni del flush (#36741)
      • +
      • Corretto un bug con le espressioni JSON in "[]"(#36722)
      • +
      • Rimossi i vicini se il target compatto non è indicizzato(#36694)
      • +
      • Migliorate le prestazioni di Rocksmq quando il canale è pieno(#36618)
      • +
      • Corretto un problema per cui gli errori durante l'impacchettamento non venivano differiti (#36665)
      • +
      • Risolta una perdita di memoria per i segmenti importati nel gestore dei segmenti(#36631)
      • +
      • Saltati i controlli sanitari non necessari per i nodi di query nel proxy (#36553)
      • +
      • Risolto un problema di overflow con le espressioni di termine(#36534)
      • +
      • Registrato l'ID del nodo prima di assegnare i compiti per evitare un'errata allocazione dei compiti (#36493)
      • +
      • Risolti i problemi di corsa ai dati nella compattazione del clustering (#36499)
      • +
      • Aggiunto un controllo per la lunghezza massima dell'array di stringhe dopo la corrispondenza dei tipi (#36497)
      • +
      • Risolte condizioni di gara in modalità mix o standalone (#36459)
      • +
      • Corretto lo sbilanciamento dei segmenti dopo ripetute operazioni di caricamento e rilascio(#36543)
      • +
      • Corretto un caso d'angolo in cui i segmenti non potevano essere spostati da un nodo di arresto (#36475)
      • +
      • Aggiornate correttamente le informazioni sui segmenti anche se alcuni segmenti erano mancanti(#36729)
      • +
      • Impedito che le transazioni etcd superino il limite massimo nello snapshot KV (#36773)
      • +
      +

      Miglioramenti

        +
      • Migliorata la stima delle risorse MMAP:
          +
        • Migliorato il codice relativo a MMAP in column.h(#36521)
        • +
        • Raffinata la stima delle risorse durante il caricamento delle raccolte(#36728)
        • +
      • +
      • Miglioramenti delle prestazioni:
          +
        • Migliorata l'efficienza dell'analisi delle espressioni convertendo Unicode in ASCII(#36676).
        • +
        • Abilitata la produzione parallela di messaggi per più argomenti(#36462)
        • +
        • Ridotto l'overhead della CPU nel calcolo della dimensione del file indice(#36580)
        • +
        • Recuperato il tipo di messaggio dall'intestazione per ridurre al minimo l'unmarshalling (#36454)
        • +
        • Ottimizzata la politica di selezione delle repliche basata sul carico di lavoro (#36384)
        • +
      • +
      • Suddivisione dei messaggi delle attività di cancellazione per rientrare nei limiti di dimensione massima dei messaggi(#36574)
      • +
      • Aggiunto un nuovo URL RESTful per descrivere i lavori di importazione(#36754)
      • +
      • Ottimizzata la pianificazione delle importazioni e aggiunta una metrica del costo del tempo(#36684)
      • +
      • Aggiunto il log dei rapporti di bilanciamento per il bilanciatore del coordinatore delle query(#36749)
      • +
      • Passato all'uso di una configurazione GC comune (#36670)
      • +
      • Aggiunto l'interruttore della politica di streaming forward per il delegatore(#36712)
      • +
      • Abilitata la compattazione manuale per le raccolte senza indici(#36581)
      • +
      • Abilitato il bilanciamento del carico sui nodi di query con capacità di memoria variabile(#36625)
      • +
      • Caso unificato per le etichette in entrata usando metrics.label(#36616)
      • +
      • Rese idempotenti le operazioni di trasferimento di canali/segmenti (#36552)
      • +
      • Aggiunte metriche per monitorare il throughput delle importazioni e il conteggio delle righe importate(#36588)
      • +
      • Impedita la creazione di oggetti timer multipli nei target(#36573)
      • +
      • Aggiornata la versione delle espressioni e la formattazione della risposta HTTP per le espressioni(#36467)
      • +
      • Migliorata la garbage collection in KV snapshot (#36793)
      • +
      • Aggiunto il supporto per l'esecuzione di metodi con parametri contestuali(#36798)
      • +
      +

      v2.4.12

      Data di rilascio: 26 settembre 2024

      + + + + + + + +
      Versione MilvusVersione dell'SDK PythonVersione dell'SDK JavaVersione SDK Node.js
      2.4.122.4.72.4.42.4.9
      +

      Milvus 2.4.12 introduce miglioramenti significativi e correzioni di bug critici. Questa versione risolve i problemi di duplicazione dei dati e migliora la velocità di recupero dei guasti, in particolare quando si gestisce un gran numero di cancellazioni. Tuttavia, persiste un problema noto per cui il recupero dei guasti può essere lento quando si eliminano grandi quantità di dati. Stiamo lavorando attivamente per risolvere questo problema.

      +

      Miglioramenti

        +
      • Implementato l'arresto graduale per il gestore del flowgraph(#36358).
      • +
      • Disabilitato il controllo degli indici per i campi vettoriali non caricati(#36280)
      • +
      • Filtrati i record di cancellazione non colpiti durante il caricamento delta (#36272)
      • +
      • Migliorata la gestione degli errori per le eccezioni di std::stoi(#36296)
      • +
      • Disconosciute le parole chiave come nomi di campo o nomi di campo dinamici(#36108)
      • +
      • Aggiunte metriche per la cancellazione di voci nei segmenti L0(#36227)
      • +
      • Implementata la politica di inoltro L0 per supportare il caricamento remoto(#36208)
      • +
      • Aggiunto controllo del caricamento del campo RNA nel proxy (#36194)
      • +
      • Abilitato il supporto per le righe vuote sparse (#36061)
      • +
      • Corretta una vulnerabilità di sicurezza(#36156)
      • +
      • Implementato un gestore di statistiche per le metriche delle dimensioni di richiesta/risposta(#36118)
      • +
      • Corretta la stima delle dimensioni per i dati codificati degli array (#36379)
      • +
      +

      Correzioni di bug

        +
      • Risolti gli errori del tipo di metrica per le raccolte con due campi vettoriali(#36473)
      • +
      • Corretti i problemi di buffering lungo che causano errori di ricezione della coda di messaggi(#36425)
      • +
      • Implementato il corretto ritorno di compact-to-segments dopo il supporto della divisione (#36429)
      • +
      • Risolti i problemi di corsa ai dati con la goroutine di controllo dell'ID del nodo (#36377)
      • +
      • Rimosso il controllo del tipo di elemento (#36324)
      • +
      • Risolti i problemi di accesso concorrente per i segmenti in crescita e sigillati(#36288)
      • +
      • Implementato un futuro blocco statico (#36333)
      • +
      • Corretto l'uso degli offset in HybridSearch(#36287, #36253)
      • +
      • Risolte le perdite di segmenti/canali sporchi su QueryNode(#36259)
      • +
      • Corretta la gestione della duplicazione delle chiavi primarie(#36274)
      • +
      • Applicata l'impostazione del tipo di metrica nelle richieste di ricerca(#36279)
      • +
      • Corretto il problema di cancellazione della metrica stored_index_files_size(#36161)
      • +
      • Corretto il comportamento del gruppo di privilegi di lettura per l'accesso globale all'API(#36145)
      • +

      v2.4.11

      • Corretto un problema per cui i dati di cancellazione andavano persi a causa dell'omissione di segmenti durante la compattazione L0.[#33980, #34363]
      • Corretto un problema per cui i messaggi di cancellazione non venivano inoltrati a causa di una gestione errata dell'ambito dei dati.(#35313)
      • -
      • Risolta un'eccezione SIGBUS che si verificava a causa dell'uso non corretto di mmap.[#34455, #34530]
      • +
      • Risolta un'eccezione SIGBUS che si verificava a causa di un uso non corretto di mmap.[#34455, #34530]
      • Corretti gli arresti anomali causati da espressioni di ricerca illegali.(#35307)
      • Corretto un problema per cui l'orologio DataNode falliva a causa di un'impostazione errata del timeout nel contesto dell'orologio.(#35017)
      @@ -441,7 +598,7 @@ title: Note di rilascio
    • Assicurato un riscaldamento asincrono più fluido grazie alla correzione di un problema di blocco di stato.(#33687)
    • Risolto un bug che poteva causare risultati mancanti negli iteratori delle query.(#33506)
    • Corretto un bug che poteva causare una dimensione non uniforme dei segmenti di importazione (#33634).
    • -
    • Corretta la gestione della dimensione dei dati per i tipi bf16, fp16 e vettori binari(#33488).
    • +
    • Corretta la gestione delle dimensioni dei dati non corrette per i tipi bf16, fp16 e vettori binari(#33488).
    • Migliorata la stabilità risolvendo potenziali problemi con il compattatore L0(#33564)
    • Assicurato che gli aggiornamenti della configurazione dinamica siano riflessi correttamente nella cache.(#33590)
    • Migliorata l'accuratezza della metrica RootCoordQuotaStates (#33601)
    • @@ -570,7 +727,7 @@ title: Note di rilascio
    • Utilizzata la capacità di TestLocations per accelerare la scrittura e la compattazione(#32948)
    • Ottimizzato il pool di parser dei piani per evitare ricicli inutili (#32869)
    • Migliorata la velocità di caricamento (#32898)
    • -
    • Utilizzato il livello di coerenza predefinito della raccolta per restv2(#32956)
    • +
    • Utilizzato il livello di consistenza predefinito della raccolta per restv2(#32956)
    • Aggiunta la risposta ai costi per l'API Rest (#32620)
    • Abilitata la politica di bilanciamento esclusivo dei canali (#32911)
    • Esposta l'API describedatabase nel proxy (#32732)
    • @@ -627,7 +784,7 @@ title: Note di rilascio
    • Inizializzato il logger degli accessi prima dell'inizializzazione del server(#32976)
    • Il compattatore è in grado di cancellare i segmenti vuoti(#32821)
    • Riempito il numero di voce deltalog e l'intervallo di tempo nelle compattazioni l0(#33004)
    • -
    • Corretto l'arresto anomalo del proxy a causa della corsa dei dati della cache del leader dello shard(#32971)
    • +
    • Corretto l'arresto anomalo del proxy dovuto alla corsa dei dati della cache del leader dello shard(#32971)
    • Corretta l'unità di tempo per la metrica dell'indice di carico (#32935)
    • Corretto il problema per cui il segmento sul nodo di query in arresto non poteva essere rilasciato con successo(#32929)
    • Corretta la stima delle risorse dell'indice(#32842)
    • @@ -662,7 +819,7 @@ title: Note di rilascio

      Data di rilascio: 6 maggio 2024

      - + @@ -689,10 +846,10 @@ title: Note di rilascio
    • Aggiunta l'etichetta db alle metriche per le operazioni di cancellazione e inserimento in blocco (#32611)
    • Aggiunta la logica per saltare la verifica attraverso la configurazione per le colonne AutoID e PartitionKey (#32592)
    • Raffinati gli errori relativi all'autenticazione(#32253)
    • -
    • Raffinati i log degli errori per AllocSegmentID in DataCoord(#32351, #32335)
    • +
    • Affinati i log degli errori per AllocSegmentID in DataCoord(#32351, #32335)
    • Rimosse le metriche duplicate(#32380, #32308) e ripulite quelle inutilizzate(#32404, #32515).
    • Aggiunta opzione di configurazione per controllare se imporre l'attivazione della funzione partitionKey (#32433)
    • -
    • Aggiunta un'opzione di configurazione per controllare la quantità massima di dati che possono essere inseriti in una singola richiesta(#32433).
    • +
    • Aggiunta un'opzione di configurazione per controllare la quantità massima di dati che possono essere inseriti in una singola richiesta (#32433).
    • Parallelizzare l'operazione applyDelete a livello di segmento per accelerare l'elaborazione dei messaggi Delete da parte del delegatore(#32291)
    • Usato l'indice(#32232, #32505, #32533, #32595) e l'add cache(#32580) per accelerare le operazioni di filtraggio frequenti in QueryCoord.
    • Riscritta la struttura dei dati(#32273) e rifattorizzato il codice(#32389) per accelerare le operazioni più comuni in DataCoord.
    • @@ -767,7 +924,7 @@ title: Note di rilascio
    • Aggiornamento della sicurezza per il server nats (#32023)
    • Memorizzati i file degli indici invertiti in un percorso di archiviazione locale di QueryNode invece che in /tmp(#32210)
    • Risolte le perdite di memoria dei datacoord per collectionInfo(#32243)
    • -
    • Risolti bug relativi a fp16/bf16 che potevano causare panico di sistema(#31677, #31841, #32196)
    • +
    • Risolti i bug relativi a fp16/bf16 che potevano causare panico di sistema(#31677, #31841, #32196)
    • Risolti problemi con la ricerca per raggruppamento che restituiva risultati insufficienti(#32151)
    • Adattamento della ricerca con iteratori per gestire in modo più efficace gli offset nella fase di riduzione e garantire risultati adeguati con "reduceStopForBest" abilitato(#32088)
    • diff --git a/localization/v2.4.x/site/it/tutorials/build-rag-with-milvus.json b/localization/v2.4.x/site/it/tutorials/build-rag-with-milvus.json index e26704274..777981665 100644 --- a/localization/v2.4.x/site/it/tutorials/build-rag-with-milvus.json +++ b/localization/v2.4.x/site/it/tutorials/build-rag-with-milvus.json @@ -1 +1 @@ -{"codeList":["$ pip install --upgrade pymilvus openai requests tqdm\n","import os\n\nos.environ[\"OPENAI_API_KEY\"] = \"sk-***********\"\n","$ wget https://github.com/milvus-io/milvus-docs/releases/download/v2.4.6-preview/milvus_docs_2.4.x_en.zip\n$ unzip -q milvus_docs_2.4.x_en.zip -d milvus_docs\n","from glob import glob\n\ntext_lines = []\n\nfor file_path in glob(\"milvus_docs/en/faq/*.md\", recursive=True):\n with open(file_path, \"r\") as file:\n file_text = file.read()\n\n text_lines += file_text.split(\"# \")\n","from openai import OpenAI\n\nopenai_client = OpenAI()\n","def emb_text(text):\n return (\n openai_client.embeddings.create(input=text, model=\"text-embedding-3-small\")\n .data[0]\n .embedding\n )\n","test_embedding = emb_text(\"This is a test\")\nembedding_dim = len(test_embedding)\nprint(embedding_dim)\nprint(test_embedding[:10])\n","from pymilvus import MilvusClient\n\nmilvus_client = MilvusClient(uri=\"./milvus_demo.db\")\n\ncollection_name = \"my_rag_collection\"\n","if milvus_client.has_collection(collection_name):\n milvus_client.drop_collection(collection_name)\n","milvus_client.create_collection(\n collection_name=collection_name,\n dimension=embedding_dim,\n metric_type=\"IP\", # Inner product distance\n consistency_level=\"Strong\", # Strong consistency level\n)\n","from tqdm import tqdm\n\ndata = []\n\nfor i, line in enumerate(tqdm(text_lines, desc=\"Creating embeddings\")):\n data.append({\"id\": i, \"vector\": emb_text(line), \"text\": line})\n\nmilvus_client.insert(collection_name=collection_name, data=data)\n","question = \"How is data stored in milvus?\"\n","search_res = milvus_client.search(\n collection_name=collection_name,\n data=[\n emb_text(question)\n ], # Use the `emb_text` function to convert the question to an embedding vector\n limit=3, # Return top 3 results\n search_params={\"metric_type\": \"IP\", \"params\": {}}, # Inner product distance\n output_fields=[\"text\"], # Return the text field\n)\n","import json\n\nretrieved_lines_with_distances = [\n (res[\"entity\"][\"text\"], res[\"distance\"]) for res in search_res[0]\n]\nprint(json.dumps(retrieved_lines_with_distances, indent=4))\n","context = \"\\n\".join(\n [line_with_distance[0] for line_with_distance in retrieved_lines_with_distances]\n)\n","SYSTEM_PROMPT = \"\"\"\nHuman: You are an AI assistant. You are able to find answers to the questions from the contextual passage snippets provided.\n\"\"\"\nUSER_PROMPT = f\"\"\"\nUse the following pieces of information enclosed in tags to provide an answer to the question enclosed in tags.\n\n{context}\n\n\n{question}\n\n\"\"\"\n","response = openai_client.chat.completions.create(\n model=\"gpt-3.5-turbo\",\n messages=[\n {\"role\": \"system\", \"content\": SYSTEM_PROMPT},\n {\"role\": \"user\", \"content\": USER_PROMPT},\n ],\n)\nprint(response.choices[0].message.content)\n"],"headingContent":"","anchorList":[{"label":"Costruire RAG con Milvus","href":"Build-RAG-with-Milvus","type":1,"isActive":false},{"label":"Preparazione","href":"Preparation","type":2,"isActive":false},{"label":"Caricare i dati in Milvus","href":"Load-data-into-Milvus","type":2,"isActive":false},{"label":"Costruire la RAG","href":"Build-RAG","type":2,"isActive":false},{"label":"Distribuzione rapida","href":"Quick-Deploy","type":2,"isActive":false}]} \ No newline at end of file +{"codeList":["$ pip install --upgrade pymilvus openai requests tqdm\n","import os\n\nos.environ[\"OPENAI_API_KEY\"] = \"sk-***********\"\n","$ wget https://github.com/milvus-io/milvus-docs/releases/download/v2.4.6-preview/milvus_docs_2.4.x_en.zip\n$ unzip -q milvus_docs_2.4.x_en.zip -d milvus_docs\n","from glob import glob\n\ntext_lines = []\n\nfor file_path in glob(\"milvus_docs/en/faq/*.md\", recursive=True):\n with open(file_path, \"r\") as file:\n file_text = file.read()\n\n text_lines += file_text.split(\"# \")\n","from openai import OpenAI\n\nopenai_client = OpenAI()\n","def emb_text(text):\n return (\n openai_client.embeddings.create(input=text, model=\"text-embedding-3-small\")\n .data[0]\n .embedding\n )\n","test_embedding = emb_text(\"This is a test\")\nembedding_dim = len(test_embedding)\nprint(embedding_dim)\nprint(test_embedding[:10])\n","from pymilvus import MilvusClient\n\nmilvus_client = MilvusClient(uri=\"./milvus_demo.db\")\n\ncollection_name = \"my_rag_collection\"\n","if milvus_client.has_collection(collection_name):\n milvus_client.drop_collection(collection_name)\n","milvus_client.create_collection(\n collection_name=collection_name,\n dimension=embedding_dim,\n metric_type=\"IP\", # Inner product distance\n consistency_level=\"Strong\", # Strong consistency level\n)\n","from tqdm import tqdm\n\ndata = []\n\nfor i, line in enumerate(tqdm(text_lines, desc=\"Creating embeddings\")):\n data.append({\"id\": i, \"vector\": emb_text(line), \"text\": line})\n\nmilvus_client.insert(collection_name=collection_name, data=data)\n","question = \"How is data stored in milvus?\"\n","search_res = milvus_client.search(\n collection_name=collection_name,\n data=[\n emb_text(question)\n ], # Use the `emb_text` function to convert the question to an embedding vector\n limit=3, # Return top 3 results\n search_params={\"metric_type\": \"IP\", \"params\": {}}, # Inner product distance\n output_fields=[\"text\"], # Return the text field\n)\n","import json\n\nretrieved_lines_with_distances = [\n (res[\"entity\"][\"text\"], res[\"distance\"]) for res in search_res[0]\n]\nprint(json.dumps(retrieved_lines_with_distances, indent=4))\n","context = \"\\n\".join(\n [line_with_distance[0] for line_with_distance in retrieved_lines_with_distances]\n)\n","SYSTEM_PROMPT = \"\"\"\nHuman: You are an AI assistant. You are able to find answers to the questions from the contextual passage snippets provided.\n\"\"\"\nUSER_PROMPT = f\"\"\"\nUse the following pieces of information enclosed in tags to provide an answer to the question enclosed in tags.\n\n{context}\n\n\n{question}\n\n\"\"\"\n","response = openai_client.chat.completions.create(\n model=\"gpt-3.5-turbo\",\n messages=[\n {\"role\": \"system\", \"content\": SYSTEM_PROMPT},\n {\"role\": \"user\", \"content\": USER_PROMPT},\n ],\n)\nprint(response.choices[0].message.content)\n"],"headingContent":"Build RAG with Milvus","anchorList":[{"label":"Costruire RAG con Milvus","href":"Build-RAG-with-Milvus","type":1,"isActive":false},{"label":"Preparazione","href":"Preparation","type":2,"isActive":false},{"label":"Caricare i dati in Milvus","href":"Load-data-into-Milvus","type":2,"isActive":false},{"label":"Costruire la RAG","href":"Build-RAG","type":2,"isActive":false},{"label":"Distribuzione rapida","href":"Quick-Deploy","type":2,"isActive":false}]} \ No newline at end of file diff --git a/localization/v2.4.x/site/it/tutorials/build-rag-with-milvus.md b/localization/v2.4.x/site/it/tutorials/build-rag-with-milvus.md index ea43dbfc4..63c2ce587 100644 --- a/localization/v2.4.x/site/it/tutorials/build-rag-with-milvus.md +++ b/localization/v2.4.x/site/it/tutorials/build-rag-with-milvus.md @@ -18,7 +18,8 @@ title: Costruire RAG con Milvus d="M4 9h1v1H4c-1.5 0-3-1.69-3-3.5S2.55 3 4 3h4c1.45 0 3 1.69 3 3.5 0 1.41-.91 2.72-2 3.25V8.59c.58-.45 1-1.27 1-2.09C10 5.22 8.98 4 8 4H4c-.98 0-2 1.22-2 2.5S3 9 4 9zm9-3h-1v1h1c1 0 2 1.22 2 2.5S13.98 12 13 12H9c-.98 0-2-1.22-2-2.5 0-.83.42-1.64 1-2.09V6.25c-1.09.53-2 1.84-2 3.25C6 11.31 7.55 13 9 13h4c1.45 0 3-1.69 3-3.5S14.5 6 13 6z" > -

      Open In Colab

      +

      Open In Colab +GitHub Repository

      In questo tutorial vi mostreremo come costruire una pipeline RAG (Retrieval-Augmented Generation) con Milvus.

      Il sistema RAG combina un sistema di recupero con un modello generativo per generare nuovo testo in base a un prompt dato. Il sistema recupera prima i documenti rilevanti da un corpus utilizzando Milvus e poi utilizza un modello generativo per generare un nuovo testo basato sui documenti recuperati.

      @@ -100,7 +101,7 @@ embedding_dim = len(test_embedding) d="M4 9h1v1H4c-1.5 0-3-1.69-3-3.5S2.55 3 4 3h4c1.45 0 3 1.69 3 3.5 0 1.41-.91 2.72-2 3.25V8.59c.58-.45 1-1.27 1-2.09C10 5.22 8.98 4 8 4H4c-.98 0-2 1.22-2 2.5S3 9 4 9zm9-3h-1v1h1c1 0 2 1.22 2 2.5S13.98 12 13 12H9c-.98 0-2-1.22-2-2.5 0-.83.42-1.64 1-2.09V6.25c-1.09.53-2 1.84-2 3.25C6 11.31 7.55 13 9 13h4c1.45 0 3-1.69 3-3.5S14.5 6 13 6z" > -

      Creare la raccolta

      from pymilvus import MilvusClient
      +    

      Creare la collezione

      from pymilvus import MilvusClient
       
       milvus_client = MilvusClient(uri="./milvus_demo.db")
       
      @@ -119,7 +120,7 @@ collection_name = "my_rag_collection"
           milvus_client.drop_collection(collection_name)
       

      Creare una nuova raccolta con i parametri specificati.

      -

      Se non si specifica alcun campo, Milvus creerà automaticamente un campo predefinito id per la chiave primaria e un campo vector per memorizzare i dati vettoriali. Un campo JSON riservato viene utilizzato per memorizzare campi non definiti da schemi e i loro valori.

      +

      Se non si specifica alcun campo, Milvus creerà automaticamente un campo predefinito id per la chiave primaria e un campo vector per memorizzare i dati vettoriali. Un campo JSON riservato viene utilizzato per memorizzare campi non definiti dalla mappa e i loro valori.

      milvus_client.create_collection(
           collection_name=collection_name,
           dimension=embedding_dim,
      diff --git a/localization/v2.4.x/site/it/tutorials/graph_rag_with_milvus.md b/localization/v2.4.x/site/it/tutorials/graph_rag_with_milvus.md
      index a00b93209..cc2bd7957 100644
      --- a/localization/v2.4.x/site/it/tutorials/graph_rag_with_milvus.md
      +++ b/localization/v2.4.x/site/it/tutorials/graph_rag_with_milvus.md
      @@ -18,7 +18,8 @@ title: Grafico RAG con Milvus
                 d="M4 9h1v1H4c-1.5 0-3-1.69-3-3.5S2.55 3 4 3h4c1.45 0 3 1.69 3 3.5 0 1.41-.91 2.72-2 3.25V8.59c.58-.45 1-1.27 1-2.09C10 5.22 8.98 4 8 4H4c-.98 0-2 1.22-2 2.5S3 9 4 9zm9-3h-1v1h1c1 0 2 1.22 2 2.5S13.98 12 13 12H9c-.98 0-2-1.22-2-2.5 0-.83.42-1.64 1-2.09V6.25c-1.09.53-2 1.84-2 3.25C6 11.31 7.55 13 9 13h4c1.45 0 3-1.69 3-3.5S14.5 6 13 6z"
               >
             
      -    

      Open In Colab

      +

      Open In Colab +GitHub Repository

      L'applicazione diffusa di modelli linguistici di grandi dimensioni evidenzia l'importanza di migliorare l'accuratezza e la pertinenza delle loro risposte. La Retrieval-Augmented Generation (RAG) arricchisce i modelli con basi di conoscenza esterne, fornendo maggiori informazioni contestuali e mitigando problemi come l'allucinazione e la conoscenza insufficiente. Tuttavia, affidarsi esclusivamente a semplici paradigmi RAG ha i suoi limiti, soprattutto quando si tratta di relazioni complesse tra entità e domande multi-hop, dove il modello spesso fatica a fornire risposte accurate.

      L'introduzione dei grafi di conoscenza (KG) nel sistema RAG offre una nuova soluzione. I KG presentano le entità e le loro relazioni in modo strutturato, fornendo informazioni di recupero più precise e aiutando RAG a gestire meglio le attività di risposta a domande complesse. KG-RAG è ancora in fase iniziale e non esiste un consenso su come recuperare efficacemente entità e relazioni dai KG o su come integrare la ricerca di similarità vettoriale con le strutture a grafo.

      In questo quaderno, introduciamo un approccio semplice ma potente per migliorare notevolmente le prestazioni di questo scenario. Si tratta di un semplice paradigma RAG con recupero a più vie e successivo reranking, ma implementa il Graph RAG in modo logico e raggiunge prestazioni all'avanguardia nella gestione di domande multi-hop. Vediamo come viene implementato.

      diff --git a/localization/v2.4.x/site/it/tutorials/hybrid_search_with_milvus.json b/localization/v2.4.x/site/it/tutorials/hybrid_search_with_milvus.json index f304bc3cf..166a16625 100644 --- a/localization/v2.4.x/site/it/tutorials/hybrid_search_with_milvus.json +++ b/localization/v2.4.x/site/it/tutorials/hybrid_search_with_milvus.json @@ -1 +1 @@ -{"codeList":["$ pip install --upgrade pymilvus \"pymilvus[model]\"\n","# Run this cell to download the dataset\n$ wget http://qim.fs.quoracdn.net/quora_duplicate_questions.tsv\n","import pandas as pd\n\nfile_path = \"quora_duplicate_questions.tsv\"\ndf = pd.read_csv(file_path, sep=\"\\t\")\nquestions = set()\nfor _, row in df.iterrows():\n obj = row.to_dict()\n questions.add(obj[\"question1\"][:512])\n questions.add(obj[\"question2\"][:512])\n if len(questions) > 500: # Skip this if you want to use the full dataset\n break\n\ndocs = list(questions)\n\n# example question\nprint(docs[0])\n","from milvus_model.hybrid import BGEM3EmbeddingFunction\n\nef = BGEM3EmbeddingFunction(use_fp16=False, device=\"cpu\")\ndense_dim = ef.dim[\"dense\"]\n\n# Generate embeddings using BGE-M3 model\ndocs_embeddings = ef(docs)\n","from pymilvus import (\n connections,\n utility,\n FieldSchema,\n CollectionSchema,\n DataType,\n Collection,\n)\n\n# Connect to Milvus given URI\nconnections.connect(uri=\"./milvus.db\")\n\n# Specify the data schema for the new Collection\nfields = [\n # Use auto generated id as primary key\n FieldSchema(\n name=\"pk\", dtype=DataType.VARCHAR, is_primary=True, auto_id=True, max_length=100\n ),\n # Store the original text to retrieve based on semantically distance\n FieldSchema(name=\"text\", dtype=DataType.VARCHAR, max_length=512),\n # Milvus now supports both sparse and dense vectors,\n # we can store each in a separate field to conduct hybrid search on both vectors\n FieldSchema(name=\"sparse_vector\", dtype=DataType.SPARSE_FLOAT_VECTOR),\n FieldSchema(name=\"dense_vector\", dtype=DataType.FLOAT_VECTOR, dim=dense_dim),\n]\nschema = CollectionSchema(fields)\n\n# Create collection (drop the old one if exists)\ncol_name = \"hybrid_demo\"\nif utility.has_collection(col_name):\n Collection(col_name).drop()\ncol = Collection(col_name, schema, consistency_level=\"Strong\")\n\n# To make vector search efficient, we need to create indices for the vector fields\nsparse_index = {\"index_type\": \"SPARSE_INVERTED_INDEX\", \"metric_type\": \"IP\"}\ncol.create_index(\"sparse_vector\", sparse_index)\ndense_index = {\"index_type\": \"AUTOINDEX\", \"metric_type\": \"IP\"}\ncol.create_index(\"dense_vector\", dense_index)\ncol.load()\n","# For efficiency, we insert 50 records in each small batch\nfor i in range(0, len(docs), 50):\n batched_entities = [\n docs[i : i + 50],\n docs_embeddings[\"sparse\"][i : i + 50],\n docs_embeddings[\"dense\"][i : i + 50],\n ]\n col.insert(batched_entities)\nprint(\"Number of entities inserted:\", col.num_entities)\n","# Enter your search query\nquery = input(\"Enter your search query: \")\nprint(query)\n\n# Generate embeddings for the query\nquery_embeddings = ef([query])\n# print(query_embeddings)\n","from pymilvus import (\n AnnSearchRequest,\n WeightedRanker,\n)\n\n\ndef dense_search(col, query_dense_embedding, limit=10):\n search_params = {\"metric_type\": \"IP\", \"params\": {}}\n res = col.search(\n [query_dense_embedding],\n anns_field=\"dense_vector\",\n limit=limit,\n output_fields=[\"text\"],\n param=search_params,\n )[0]\n return [hit.get(\"text\") for hit in res]\n\n\ndef sparse_search(col, query_sparse_embedding, limit=10):\n search_params = {\n \"metric_type\": \"IP\",\n \"params\": {},\n }\n res = col.search(\n [query_sparse_embedding],\n anns_field=\"sparse_vector\",\n limit=limit,\n output_fields=[\"text\"],\n param=search_params,\n )[0]\n return [hit.get(\"text\") for hit in res]\n\n\ndef hybrid_search(\n col,\n query_dense_embedding,\n query_sparse_embedding,\n sparse_weight=1.0,\n dense_weight=1.0,\n limit=10,\n):\n dense_search_params = {\"metric_type\": \"IP\", \"params\": {}}\n dense_req = AnnSearchRequest(\n [query_dense_embedding], \"dense_vector\", dense_search_params, limit=limit\n )\n sparse_search_params = {\"metric_type\": \"IP\", \"params\": {}}\n sparse_req = AnnSearchRequest(\n [query_sparse_embedding], \"sparse_vector\", sparse_search_params, limit=limit\n )\n rerank = WeightedRanker(sparse_weight, dense_weight)\n res = col.hybrid_search(\n [sparse_req, dense_req], rerank=rerank, limit=limit, output_fields=[\"text\"]\n )[0]\n return [hit.get(\"text\") for hit in res]\n","dense_results = dense_search(col, query_embeddings[\"dense\"][0])\nsparse_results = sparse_search(col, query_embeddings[\"sparse\"][0])\nhybrid_results = hybrid_search(\n col,\n query_embeddings[\"dense\"][0],\n query_embeddings[\"sparse\"][0],\n sparse_weight=0.7,\n dense_weight=1.0,\n)\n","def doc_text_formatting(ef, query, docs):\n tokenizer = ef.model.tokenizer\n query_tokens_ids = tokenizer.encode(query, return_offsets_mapping=True)\n query_tokens = tokenizer.convert_ids_to_tokens(query_tokens_ids)\n formatted_texts = []\n\n for doc in docs:\n ldx = 0\n landmarks = []\n encoding = tokenizer.encode_plus(doc, return_offsets_mapping=True)\n tokens = tokenizer.convert_ids_to_tokens(encoding[\"input_ids\"])[1:-1]\n offsets = encoding[\"offset_mapping\"][1:-1]\n for token, (start, end) in zip(tokens, offsets):\n if token in query_tokens:\n if len(landmarks) != 0 and start == landmarks[-1]:\n landmarks[-1] = end\n else:\n landmarks.append(start)\n landmarks.append(end)\n close = False\n formatted_text = \"\"\n for i, c in enumerate(doc):\n if ldx == len(landmarks):\n pass\n elif i == landmarks[ldx]:\n if close:\n formatted_text += \"\"\n else:\n formatted_text += \"\"\n close = not close\n ldx = ldx + 1\n formatted_text += c\n if close is True:\n formatted_text += \"\"\n formatted_texts.append(formatted_text)\n return formatted_texts\n","from IPython.display import Markdown, display\n\n# Dense search results\ndisplay(Markdown(\"**Dense Search Results:**\"))\nformatted_results = doc_text_formatting(ef, query, dense_results)\nfor result in dense_results:\n display(Markdown(result))\n\n# Sparse search results\ndisplay(Markdown(\"\\n**Sparse Search Results:**\"))\nformatted_results = doc_text_formatting(ef, query, sparse_results)\nfor result in formatted_results:\n display(Markdown(result))\n\n# Hybrid search results\ndisplay(Markdown(\"\\n**Hybrid Search Results:**\"))\nformatted_results = doc_text_formatting(ef, query, hybrid_results)\nfor result in formatted_results:\n display(Markdown(result))\n"],"headingContent":"","anchorList":[{"label":"Ricerca ibrida con Milvus","href":"Hybrid-Search-with-Milvus","type":1,"isActive":false}]} \ No newline at end of file +{"codeList":["$ pip install --upgrade pymilvus \"pymilvus[model]\"\n","# Run this cell to download the dataset\n$ wget http://qim.fs.quoracdn.net/quora_duplicate_questions.tsv\n","import pandas as pd\n\nfile_path = \"quora_duplicate_questions.tsv\"\ndf = pd.read_csv(file_path, sep=\"\\t\")\nquestions = set()\nfor _, row in df.iterrows():\n obj = row.to_dict()\n questions.add(obj[\"question1\"][:512])\n questions.add(obj[\"question2\"][:512])\n if len(questions) > 500: # Skip this if you want to use the full dataset\n break\n\ndocs = list(questions)\n\n# example question\nprint(docs[0])\n","from milvus_model.hybrid import BGEM3EmbeddingFunction\n\nef = BGEM3EmbeddingFunction(use_fp16=False, device=\"cpu\")\ndense_dim = ef.dim[\"dense\"]\n\n# Generate embeddings using BGE-M3 model\ndocs_embeddings = ef(docs)\n","from pymilvus import (\n connections,\n utility,\n FieldSchema,\n CollectionSchema,\n DataType,\n Collection,\n)\n\n# Connect to Milvus given URI\nconnections.connect(uri=\"./milvus.db\")\n\n# Specify the data schema for the new Collection\nfields = [\n # Use auto generated id as primary key\n FieldSchema(\n name=\"pk\", dtype=DataType.VARCHAR, is_primary=True, auto_id=True, max_length=100\n ),\n # Store the original text to retrieve based on semantically distance\n FieldSchema(name=\"text\", dtype=DataType.VARCHAR, max_length=512),\n # Milvus now supports both sparse and dense vectors,\n # we can store each in a separate field to conduct hybrid search on both vectors\n FieldSchema(name=\"sparse_vector\", dtype=DataType.SPARSE_FLOAT_VECTOR),\n FieldSchema(name=\"dense_vector\", dtype=DataType.FLOAT_VECTOR, dim=dense_dim),\n]\nschema = CollectionSchema(fields)\n\n# Create collection (drop the old one if exists)\ncol_name = \"hybrid_demo\"\nif utility.has_collection(col_name):\n Collection(col_name).drop()\ncol = Collection(col_name, schema, consistency_level=\"Strong\")\n\n# To make vector search efficient, we need to create indices for the vector fields\nsparse_index = {\"index_type\": \"SPARSE_INVERTED_INDEX\", \"metric_type\": \"IP\"}\ncol.create_index(\"sparse_vector\", sparse_index)\ndense_index = {\"index_type\": \"AUTOINDEX\", \"metric_type\": \"IP\"}\ncol.create_index(\"dense_vector\", dense_index)\ncol.load()\n","# For efficiency, we insert 50 records in each small batch\nfor i in range(0, len(docs), 50):\n batched_entities = [\n docs[i : i + 50],\n docs_embeddings[\"sparse\"][i : i + 50],\n docs_embeddings[\"dense\"][i : i + 50],\n ]\n col.insert(batched_entities)\nprint(\"Number of entities inserted:\", col.num_entities)\n","# Enter your search query\nquery = input(\"Enter your search query: \")\nprint(query)\n\n# Generate embeddings for the query\nquery_embeddings = ef([query])\n# print(query_embeddings)\n","from pymilvus import (\n AnnSearchRequest,\n WeightedRanker,\n)\n\n\ndef dense_search(col, query_dense_embedding, limit=10):\n search_params = {\"metric_type\": \"IP\", \"params\": {}}\n res = col.search(\n [query_dense_embedding],\n anns_field=\"dense_vector\",\n limit=limit,\n output_fields=[\"text\"],\n param=search_params,\n )[0]\n return [hit.get(\"text\") for hit in res]\n\n\ndef sparse_search(col, query_sparse_embedding, limit=10):\n search_params = {\n \"metric_type\": \"IP\",\n \"params\": {},\n }\n res = col.search(\n [query_sparse_embedding],\n anns_field=\"sparse_vector\",\n limit=limit,\n output_fields=[\"text\"],\n param=search_params,\n )[0]\n return [hit.get(\"text\") for hit in res]\n\n\ndef hybrid_search(\n col,\n query_dense_embedding,\n query_sparse_embedding,\n sparse_weight=1.0,\n dense_weight=1.0,\n limit=10,\n):\n dense_search_params = {\"metric_type\": \"IP\", \"params\": {}}\n dense_req = AnnSearchRequest(\n [query_dense_embedding], \"dense_vector\", dense_search_params, limit=limit\n )\n sparse_search_params = {\"metric_type\": \"IP\", \"params\": {}}\n sparse_req = AnnSearchRequest(\n [query_sparse_embedding], \"sparse_vector\", sparse_search_params, limit=limit\n )\n rerank = WeightedRanker(sparse_weight, dense_weight)\n res = col.hybrid_search(\n [sparse_req, dense_req], rerank=rerank, limit=limit, output_fields=[\"text\"]\n )[0]\n return [hit.get(\"text\") for hit in res]\n","dense_results = dense_search(col, query_embeddings[\"dense\"][0])\nsparse_results = sparse_search(col, query_embeddings[\"sparse\"]._getrow(0))\nhybrid_results = hybrid_search(\n col,\n query_embeddings[\"dense\"][0],\n query_embeddings[\"sparse\"]._getrow(0),\n sparse_weight=0.7,\n dense_weight=1.0,\n)\n","def doc_text_formatting(ef, query, docs):\n tokenizer = ef.model.tokenizer\n query_tokens_ids = tokenizer.encode(query, return_offsets_mapping=True)\n query_tokens = tokenizer.convert_ids_to_tokens(query_tokens_ids)\n formatted_texts = []\n\n for doc in docs:\n ldx = 0\n landmarks = []\n encoding = tokenizer.encode_plus(doc, return_offsets_mapping=True)\n tokens = tokenizer.convert_ids_to_tokens(encoding[\"input_ids\"])[1:-1]\n offsets = encoding[\"offset_mapping\"][1:-1]\n for token, (start, end) in zip(tokens, offsets):\n if token in query_tokens:\n if len(landmarks) != 0 and start == landmarks[-1]:\n landmarks[-1] = end\n else:\n landmarks.append(start)\n landmarks.append(end)\n close = False\n formatted_text = \"\"\n for i, c in enumerate(doc):\n if ldx == len(landmarks):\n pass\n elif i == landmarks[ldx]:\n if close:\n formatted_text += \"\"\n else:\n formatted_text += \"\"\n close = not close\n ldx = ldx + 1\n formatted_text += c\n if close is True:\n formatted_text += \"\"\n formatted_texts.append(formatted_text)\n return formatted_texts\n","from IPython.display import Markdown, display\n\n# Dense search results\ndisplay(Markdown(\"**Dense Search Results:**\"))\nformatted_results = doc_text_formatting(ef, query, dense_results)\nfor result in dense_results:\n display(Markdown(result))\n\n# Sparse search results\ndisplay(Markdown(\"\\n**Sparse Search Results:**\"))\nformatted_results = doc_text_formatting(ef, query, sparse_results)\nfor result in formatted_results:\n display(Markdown(result))\n\n# Hybrid search results\ndisplay(Markdown(\"\\n**Hybrid Search Results:**\"))\nformatted_results = doc_text_formatting(ef, query, hybrid_results)\nfor result in formatted_results:\n display(Markdown(result))\n"],"headingContent":"Hybrid Search with Milvus","anchorList":[{"label":"Ricerca ibrida con Milvus","href":"Hybrid-Search-with-Milvus","type":1,"isActive":false}]} \ No newline at end of file diff --git a/localization/v2.4.x/site/it/tutorials/hybrid_search_with_milvus.md b/localization/v2.4.x/site/it/tutorials/hybrid_search_with_milvus.md index 7312cc5d7..c57d5803c 100644 --- a/localization/v2.4.x/site/it/tutorials/hybrid_search_with_milvus.md +++ b/localization/v2.4.x/site/it/tutorials/hybrid_search_with_milvus.md @@ -18,7 +18,8 @@ title: Ricerca ibrida con Milvus d="M4 9h1v1H4c-1.5 0-3-1.69-3-3.5S2.55 3 4 3h4c1.45 0 3 1.69 3 3.5 0 1.41-.91 2.72-2 3.25V8.59c.58-.45 1-1.27 1-2.09C10 5.22 8.98 4 8 4H4c-.98 0-2 1.22-2 2.5S3 9 4 9zm9-3h-1v1h1c1 0 2 1.22 2 2.5S13.98 12 13 12H9c-.98 0-2-1.22-2-2.5 0-.83.42-1.64 1-2.09V6.25c-1.09.53-2 1.84-2 3.25C6 11.31 7.55 13 9 13h4c1.45 0 3-1.69 3-3.5S14.5 6 13 6z" > -

      Open In Colab

      +

      Open In Colab +GitHub Repository

      In questa esercitazione dimostreremo come condurre una ricerca ibrida con Milvus e il modello BGE-M3. Il modello BGE-M3 può convertire il testo in vettori densi e radi. Milvus supporta la memorizzazione di entrambi i tipi di vettori in un'unica raccolta, consentendo una ricerca ibrida che migliora la rilevanza dei risultati.

      Milvus supporta metodi di recupero densi, radi e ibridi:

      @@ -201,11 +202,11 @@ def dense_search(col,

      Eseguiamo tre diverse ricerche con le funzioni definite:

      dense_results = dense_search(col, query_embeddings["dense"][0])
      -sparse_results = sparse_search(col, query_embeddings["sparse"][0])
      +sparse_results = sparse_search(col, query_embeddings["sparse"]._getrow(0))
       hybrid_results = hybrid_search(
           col,
           query_embeddings["dense"][0],
      -    query_embeddings["sparse"][0],
      +    query_embeddings["sparse"]._getrow(0),
           sparse_weight=0.7,
           dense_weight=1.0,
       )
      @@ -309,4 +310,4 @@ formatted_results = doc_text_formatting(ef, query, hybrid_results)
       

      Quale attività è meglio avviare a Hyderabad?

      Di quale matematica ha bisogno un principiante per capire gli algoritmi della programmazione informatica? Quali libri sugli algoritmi sono adatti a un principiante assoluto?

      Come fare in modo che la vita si adatti a voi e impedisca alla vita di abusare di voi mentalmente ed emotivamente?

      -

      Distribuzione rapida

      Per sapere come avviare una demo online con questo tutorial, fate riferimento all'applicazione di esempio.

      +

      Distribuzione rapida

      Per sapere come avviare una demo online con questo tutorial, fate riferimento all 'applicazione di esempio.

      diff --git a/localization/v2.4.x/site/it/tutorials/image_similarity_search.json b/localization/v2.4.x/site/it/tutorials/image_similarity_search.json index 32c2310dd..76bdf12e4 100644 --- a/localization/v2.4.x/site/it/tutorials/image_similarity_search.json +++ b/localization/v2.4.x/site/it/tutorials/image_similarity_search.json @@ -1 +1 @@ -{"codeList":["!wget https://github.com/milvus-io/pymilvus-assets/releases/download/imagedata/reverse_image_search.zip\n!unzip -q -o reverse_image_search.zip\n","$ pip install pymilvus --upgrade\n$ pip install timm\n","import torch\nfrom PIL import Image\nimport timm\nfrom sklearn.preprocessing import normalize\nfrom timm.data import resolve_data_config\nfrom timm.data.transforms_factory import create_transform\n\n\nclass FeatureExtractor:\n def __init__(self, modelname):\n # Load the pre-trained model\n self.model = timm.create_model(\n modelname, pretrained=True, num_classes=0, global_pool=\"avg\"\n )\n self.model.eval()\n\n # Get the input size required by the model\n self.input_size = self.model.default_cfg[\"input_size\"]\n\n config = resolve_data_config({}, model=modelname)\n # Get the preprocessing function provided by TIMM for the model\n self.preprocess = create_transform(**config)\n\n def __call__(self, imagepath):\n # Preprocess the input image\n input_image = Image.open(imagepath).convert(\"RGB\") # Convert to RGB if needed\n input_image = self.preprocess(input_image)\n\n # Convert the image to a PyTorch tensor and add a batch dimension\n input_tensor = input_image.unsqueeze(0)\n\n # Perform inference\n with torch.no_grad():\n output = self.model(input_tensor)\n\n # Extract the feature vector\n feature_vector = output.squeeze().numpy()\n\n return normalize(feature_vector.reshape(1, -1), norm=\"l2\").flatten()\n","from pymilvus import MilvusClient\n\n# Set up a Milvus client\nclient = MilvusClient(uri=\"example.db\")\n# Create a collection in quick setup mode\nif client.has_collection(collection_name=\"image_embeddings\"):\n client.drop_collection(collection_name=\"image_embeddings\")\nclient.create_collection(\n collection_name=\"image_embeddings\",\n vector_field_name=\"vector\",\n dimension=512,\n auto_id=True,\n enable_dynamic_field=True,\n metric_type=\"COSINE\",\n)\n","import os\n\nextractor = FeatureExtractor(\"resnet34\")\n\nroot = \"./train\"\ninsert = True\nif insert is True:\n for dirpath, foldername, filenames in os.walk(root):\n for filename in filenames:\n if filename.endswith(\".JPEG\"):\n filepath = dirpath + \"/\" + filename\n image_embedding = extractor(filepath)\n client.insert(\n \"image_embeddings\",\n {\"vector\": image_embedding, \"filename\": filepath},\n )\n","from IPython.display import display\n\nquery_image = \"./test/Afghan_hound/n02088094_4261.JPEG\"\n\nresults = client.search(\n \"image_embeddings\",\n data=[extractor(query_image)],\n output_fields=[\"filename\"],\n search_params={\"metric_type\": \"COSINE\"},\n)\nimages = []\nfor result in results:\n for hit in result[:10]:\n filename = hit[\"entity\"][\"filename\"]\n img = Image.open(filename)\n img = img.resize((150, 150))\n images.append(img)\n\nwidth = 150 * 5\nheight = 150 * 2\nconcatenated_image = Image.new(\"RGB\", (width, height))\n\nfor idx, img in enumerate(images):\n x = idx % 5\n y = idx // 5\n concatenated_image.paste(img, (x * 150, y * 150))\ndisplay(\"query\")\ndisplay(Image.open(query_image).resize((150, 150)))\ndisplay(\"results\")\ndisplay(concatenated_image)\n"],"headingContent":"","anchorList":[{"label":"Ricerca di immagini con Milvus","href":"Image-Search-with-Milvus","type":1,"isActive":false},{"label":"Preparazione del set di dati","href":"Dataset-Preparation","type":2,"isActive":false},{"label":"Requisiti preliminari","href":"Prequisites","type":2,"isActive":false},{"label":"Definire l'estrattore di funzioni","href":"Define-the-Feature-Extractor","type":2,"isActive":false},{"label":"Creare una raccolta Milvus","href":"Create-a-Milvus-Collection","type":2,"isActive":false},{"label":"Inserire gli embeddings in Milvus","href":"Insert-the-Embeddings-to-Milvus","type":2,"isActive":false},{"label":"Distribuzione rapida","href":"Quick-Deploy","type":2,"isActive":false}]} \ No newline at end of file +{"codeList":["!wget https://github.com/milvus-io/pymilvus-assets/releases/download/imagedata/reverse_image_search.zip\n!unzip -q -o reverse_image_search.zip\n","$ pip install pymilvus --upgrade\n$ pip install timm\n","import torch\nfrom PIL import Image\nimport timm\nfrom sklearn.preprocessing import normalize\nfrom timm.data import resolve_data_config\nfrom timm.data.transforms_factory import create_transform\n\n\nclass FeatureExtractor:\n def __init__(self, modelname):\n # Load the pre-trained model\n self.model = timm.create_model(\n modelname, pretrained=True, num_classes=0, global_pool=\"avg\"\n )\n self.model.eval()\n\n # Get the input size required by the model\n self.input_size = self.model.default_cfg[\"input_size\"]\n\n config = resolve_data_config({}, model=modelname)\n # Get the preprocessing function provided by TIMM for the model\n self.preprocess = create_transform(**config)\n\n def __call__(self, imagepath):\n # Preprocess the input image\n input_image = Image.open(imagepath).convert(\"RGB\") # Convert to RGB if needed\n input_image = self.preprocess(input_image)\n\n # Convert the image to a PyTorch tensor and add a batch dimension\n input_tensor = input_image.unsqueeze(0)\n\n # Perform inference\n with torch.no_grad():\n output = self.model(input_tensor)\n\n # Extract the feature vector\n feature_vector = output.squeeze().numpy()\n\n return normalize(feature_vector.reshape(1, -1), norm=\"l2\").flatten()\n","from pymilvus import MilvusClient\n\n# Set up a Milvus client\nclient = MilvusClient(uri=\"example.db\")\n# Create a collection in quick setup mode\nif client.has_collection(collection_name=\"image_embeddings\"):\n client.drop_collection(collection_name=\"image_embeddings\")\nclient.create_collection(\n collection_name=\"image_embeddings\",\n vector_field_name=\"vector\",\n dimension=512,\n auto_id=True,\n enable_dynamic_field=True,\n metric_type=\"COSINE\",\n)\n","import os\n\nextractor = FeatureExtractor(\"resnet34\")\n\nroot = \"./train\"\ninsert = True\nif insert is True:\n for dirpath, foldername, filenames in os.walk(root):\n for filename in filenames:\n if filename.endswith(\".JPEG\"):\n filepath = dirpath + \"/\" + filename\n image_embedding = extractor(filepath)\n client.insert(\n \"image_embeddings\",\n {\"vector\": image_embedding, \"filename\": filepath},\n )\n","from IPython.display import display\n\nquery_image = \"./test/Afghan_hound/n02088094_4261.JPEG\"\n\nresults = client.search(\n \"image_embeddings\",\n data=[extractor(query_image)],\n output_fields=[\"filename\"],\n search_params={\"metric_type\": \"COSINE\"},\n)\nimages = []\nfor result in results:\n for hit in result[:10]:\n filename = hit[\"entity\"][\"filename\"]\n img = Image.open(filename)\n img = img.resize((150, 150))\n images.append(img)\n\nwidth = 150 * 5\nheight = 150 * 2\nconcatenated_image = Image.new(\"RGB\", (width, height))\n\nfor idx, img in enumerate(images):\n x = idx % 5\n y = idx // 5\n concatenated_image.paste(img, (x * 150, y * 150))\ndisplay(\"query\")\ndisplay(Image.open(query_image).resize((150, 150)))\ndisplay(\"results\")\ndisplay(concatenated_image)\n"],"headingContent":"Image Search with Milvus","anchorList":[{"label":"Ricerca di immagini con Milvus","href":"Image-Search-with-Milvus","type":1,"isActive":false},{"label":"Preparazione del set di dati","href":"Dataset-Preparation","type":2,"isActive":false},{"label":"Requisiti preliminari","href":"Prequisites","type":2,"isActive":false},{"label":"Definire l'estrattore di funzioni","href":"Define-the-Feature-Extractor","type":2,"isActive":false},{"label":"Creare una raccolta Milvus","href":"Create-a-Milvus-Collection","type":2,"isActive":false},{"label":"Inserire gli embeddings in Milvus","href":"Insert-the-Embeddings-to-Milvus","type":2,"isActive":false},{"label":"Distribuzione rapida","href":"Quick-Deploy","type":2,"isActive":false}]} \ No newline at end of file diff --git a/localization/v2.4.x/site/it/tutorials/image_similarity_search.md b/localization/v2.4.x/site/it/tutorials/image_similarity_search.md index 0165fcc01..e4fcb385f 100644 --- a/localization/v2.4.x/site/it/tutorials/image_similarity_search.md +++ b/localization/v2.4.x/site/it/tutorials/image_similarity_search.md @@ -1,7 +1,7 @@ --- id: image_similarity_search.md summary: ricerca di immagini con Milvus -title: Ricerca immagini con Milvus +title: Ricerca di immagini con Milvus ---

      Ricerca di immagini con Milvus

      Open In Colab

      +

      Open In Colab +GitHub Repository

      In questo quaderno mostreremo come utilizzare Milvus per cercare immagini simili in un set di dati. Per dimostrarlo, utilizzeremo un sottoinsieme del dataset ImageNet e cercheremo un'immagine di un cane afgano.

      Preparazione del set di dati

      Per sapere come avviare una demo online con questa esercitazione, consultare l 'applicazione di esempio.

      +

      Per sapere come avviare una demo online con questo tutorial, consultare l 'applicazione di esempio.

      diff --git a/localization/v2.4.x/site/it/tutorials/multimodal_rag_with_milvus.json b/localization/v2.4.x/site/it/tutorials/multimodal_rag_with_milvus.json index e62885ebf..90a2b8069 100644 --- a/localization/v2.4.x/site/it/tutorials/multimodal_rag_with_milvus.json +++ b/localization/v2.4.x/site/it/tutorials/multimodal_rag_with_milvus.json @@ -1 +1 @@ -{"codeList":["$ pip install --upgrade pymilvus openai datasets opencv-python timm einops ftfy peft tqdm\n","$ git clone https://github.com/FlagOpen/FlagEmbedding.git\n$ pip install -e FlagEmbedding\n","$ wget https://github.com/milvus-io/bootcamp/releases/download/data/amazon_reviews_2023_subset.tar.gz\n$ tar -xzf amazon_reviews_2023_subset.tar.gz\n","$ wget https://huggingface.co/BAAI/bge-visualized/resolve/main/Visualized_base_en_v1.5.pth\n","import torch\nfrom FlagEmbedding.visual.modeling import Visualized_BGE\n\n\nclass Encoder:\n def __init__(self, model_name: str, model_path: str):\n self.model = Visualized_BGE(model_name_bge=model_name, model_weight=model_path)\n self.model.eval()\n\n def encode_query(self, image_path: str, text: str) -> list[float]:\n with torch.no_grad():\n query_emb = self.model.encode(image=image_path, text=text)\n return query_emb.tolist()[0]\n\n def encode_image(self, image_path: str) -> list[float]:\n with torch.no_grad():\n query_emb = self.model.encode(image=image_path)\n return query_emb.tolist()[0]\n\n\nmodel_name = \"BAAI/bge-base-en-v1.5\"\nmodel_path = \"./Visualized_base_en_v1.5.pth\" # Change to your own value if using a different model path\nencoder = Encoder(model_name, model_path)\n","import os\nfrom tqdm import tqdm\nfrom glob import glob\n\n\n# Generate embeddings for the image dataset\ndata_dir = (\n \"./images_folder\" # Change to your own value if using a different data directory\n)\nimage_list = glob(\n os.path.join(data_dir, \"images\", \"*.jpg\")\n) # We will only use images ending with \".jpg\"\nimage_dict = {}\nfor image_path in tqdm(image_list, desc=\"Generating image embeddings: \"):\n try:\n image_dict[image_path] = encoder.encode_image(image_path)\n except Exception as e:\n print(f\"Failed to generate embedding for {image_path}. Skipped.\")\n continue\nprint(\"Number of encoded images:\", len(image_dict))\n","from pymilvus import MilvusClient\n\n\ndim = len(list(image_dict.values())[0])\ncollection_name = \"multimodal_rag_demo\"\n\n# Connect to Milvus client given URI\nmilvus_client = MilvusClient(uri=\"./milvus_demo.db\")\n\n# Create Milvus Collection\n# By default, vector field name is \"vector\"\nmilvus_client.create_collection(\n collection_name=collection_name,\n auto_id=True,\n dimension=dim,\n enable_dynamic_field=True,\n)\n\n# Insert data into collection\nmilvus_client.insert(\n collection_name=collection_name,\n data=[{\"image_path\": k, \"vector\": v} for k, v in image_dict.items()],\n)\n","query_image = os.path.join(\n data_dir, \"leopard.jpg\"\n) # Change to your own query image path\nquery_text = \"phone case with this image theme\"\n\n# Generate query embedding given image and text instructions\nquery_vec = encoder.encode_query(image_path=query_image, text=query_text)\n\nsearch_results = milvus_client.search(\n collection_name=collection_name,\n data=[query_vec],\n output_fields=[\"image_path\"],\n limit=9, # Max number of search results to return\n search_params={\"metric_type\": \"COSINE\", \"params\": {}}, # Search parameters\n)[0]\n\nretrieved_images = [hit.get(\"entity\").get(\"image_path\") for hit in search_results]\nprint(retrieved_images)\n","import numpy as np\nimport cv2\n\nimg_height = 300\nimg_width = 300\nrow_count = 3\n\n\ndef create_panoramic_view(query_image_path: str, retrieved_images: list) -> np.ndarray:\n \"\"\"\n creates a 5x5 panoramic view image from a list of images\n\n args:\n images: list of images to be combined\n\n returns:\n np.ndarray: the panoramic view image\n \"\"\"\n panoramic_width = img_width * row_count\n panoramic_height = img_height * row_count\n panoramic_image = np.full(\n (panoramic_height, panoramic_width, 3), 255, dtype=np.uint8\n )\n\n # create and resize the query image with a blue border\n query_image_null = np.full((panoramic_height, img_width, 3), 255, dtype=np.uint8)\n query_image = Image.open(query_image_path).convert(\"RGB\")\n query_array = np.array(query_image)[:, :, ::-1]\n resized_image = cv2.resize(query_array, (img_width, img_height))\n\n border_size = 10\n blue = (255, 0, 0) # blue color in BGR\n bordered_query_image = cv2.copyMakeBorder(\n resized_image,\n border_size,\n border_size,\n border_size,\n border_size,\n cv2.BORDER_CONSTANT,\n value=blue,\n )\n\n query_image_null[img_height * 2 : img_height * 3, 0:img_width] = cv2.resize(\n bordered_query_image, (img_width, img_height)\n )\n\n # add text \"query\" below the query image\n text = \"query\"\n font_scale = 1\n font_thickness = 2\n text_org = (10, img_height * 3 + 30)\n cv2.putText(\n query_image_null,\n text,\n text_org,\n cv2.FONT_HERSHEY_SIMPLEX,\n font_scale,\n blue,\n font_thickness,\n cv2.LINE_AA,\n )\n\n # combine the rest of the images into the panoramic view\n retrieved_imgs = [\n np.array(Image.open(img).convert(\"RGB\"))[:, :, ::-1] for img in retrieved_images\n ]\n for i, image in enumerate(retrieved_imgs):\n image = cv2.resize(image, (img_width - 4, img_height - 4))\n row = i // row_count\n col = i % row_count\n start_row = row * img_height\n start_col = col * img_width\n\n border_size = 2\n bordered_image = cv2.copyMakeBorder(\n image,\n border_size,\n border_size,\n border_size,\n border_size,\n cv2.BORDER_CONSTANT,\n value=(0, 0, 0),\n )\n panoramic_image[\n start_row : start_row + img_height, start_col : start_col + img_width\n ] = bordered_image\n\n # add red index numbers to each image\n text = str(i)\n org = (start_col + 50, start_row + 30)\n (font_width, font_height), baseline = cv2.getTextSize(\n text, cv2.FONT_HERSHEY_SIMPLEX, 1, 2\n )\n\n top_left = (org[0] - 48, start_row + 2)\n bottom_right = (org[0] - 48 + font_width + 5, org[1] + baseline + 5)\n\n cv2.rectangle(\n panoramic_image, top_left, bottom_right, (255, 255, 255), cv2.FILLED\n )\n cv2.putText(\n panoramic_image,\n text,\n (start_col + 10, start_row + 30),\n cv2.FONT_HERSHEY_SIMPLEX,\n 1,\n (0, 0, 255),\n 2,\n cv2.LINE_AA,\n )\n\n # combine the query image with the panoramic view\n panoramic_image = np.hstack([query_image_null, panoramic_image])\n return panoramic_image\n","from PIL import Image\n\ncombined_image_path = os.path.join(data_dir, \"combined_image.jpg\")\npanoramic_image = create_panoramic_view(query_image, retrieved_images)\ncv2.imwrite(combined_image_path, panoramic_image)\n\ncombined_image = Image.open(combined_image_path)\nshow_combined_image = combined_image.resize((300, 300))\nshow_combined_image.show()\n","import requests\nimport base64\n\nopenai_api_key = \"sk-***\" # Change to your OpenAI API Key\n\n\ndef generate_ranking_explanation(\n combined_image_path: str, caption: str, infos: dict = None\n) -> tuple[list[int], str]:\n with open(combined_image_path, \"rb\") as image_file:\n base64_image = base64.b64encode(image_file.read()).decode(\"utf-8\")\n\n information = (\n \"You are responsible for ranking results for a Composed Image Retrieval. \"\n \"The user retrieves an image with an 'instruction' indicating their retrieval intent. \"\n \"For example, if the user queries a red car with the instruction 'change this car to blue,' a similar type of car in blue would be ranked higher in the results. \"\n \"Now you would receive instruction and query image with blue border. Every item has its red index number in its top left. Do not misunderstand it. \"\n f\"User instruction: {caption} \\n\\n\"\n )\n\n # add additional information for each image\n if infos:\n for i, info in enumerate(infos[\"product\"]):\n information += f\"{i}. {info}\\n\"\n\n information += (\n \"Provide a new ranked list of indices from most suitable to least suitable, followed by an explanation for the top 1 most suitable item only. \"\n \"The format of the response has to be 'Ranked list: []' with the indices in brackets as integers, followed by 'Reasons:' plus the explanation why this most fit user's query intent.\"\n )\n\n headers = {\n \"Content-Type\": \"application/json\",\n \"Authorization\": f\"Bearer {openai_api_key}\",\n }\n\n payload = {\n \"model\": \"gpt-4o\",\n \"messages\": [\n {\n \"role\": \"user\",\n \"content\": [\n {\"type\": \"text\", \"text\": information},\n {\n \"type\": \"image_url\",\n \"image_url\": {\"url\": f\"data:image/jpeg;base64,{base64_image}\"},\n },\n ],\n }\n ],\n \"max_tokens\": 300,\n }\n\n response = requests.post(\n \"https://api.openai.com/v1/chat/completions\", headers=headers, json=payload\n )\n result = response.json()[\"choices\"][0][\"message\"][\"content\"]\n\n # parse the ranked indices from the response\n start_idx = result.find(\"[\")\n end_idx = result.find(\"]\")\n ranked_indices_str = result[start_idx + 1 : end_idx].split(\",\")\n ranked_indices = [int(index.strip()) for index in ranked_indices_str]\n\n # extract explanation\n explanation = result[end_idx + 1 :].strip()\n\n return ranked_indices, explanation\n","ranked_indices, explanation = generate_ranking_explanation(\n combined_image_path, query_text\n)\n","print(explanation)\n\nbest_index = ranked_indices[0]\nbest_img = Image.open(retrieved_images[best_index])\nbest_img = best_img.resize((150, 150))\nbest_img.show()\n"],"headingContent":"","anchorList":[{"label":"RAG multimodale con Milvus","href":"Multimodal-RAG-with-Milvus","type":1,"isActive":false},{"label":"Preparazione","href":"Preparation","type":2,"isActive":false},{"label":"Caricare i dati","href":"Load-Data","type":2,"isActive":false},{"label":"Ricerca multimodale con Reranker generativo","href":"Multimodal-Search-with-Generative-Reranker","type":2,"isActive":false}]} \ No newline at end of file +{"codeList":["$ pip install --upgrade pymilvus openai datasets opencv-python timm einops ftfy peft tqdm\n","$ git clone https://github.com/FlagOpen/FlagEmbedding.git\n$ pip install -e FlagEmbedding\n","$ wget https://github.com/milvus-io/bootcamp/releases/download/data/amazon_reviews_2023_subset.tar.gz\n$ tar -xzf amazon_reviews_2023_subset.tar.gz\n","$ wget https://huggingface.co/BAAI/bge-visualized/resolve/main/Visualized_base_en_v1.5.pth\n","import torch\nfrom FlagEmbedding.visual.modeling import Visualized_BGE\n\n\nclass Encoder:\n def __init__(self, model_name: str, model_path: str):\n self.model = Visualized_BGE(model_name_bge=model_name, model_weight=model_path)\n self.model.eval()\n\n def encode_query(self, image_path: str, text: str) -> list[float]:\n with torch.no_grad():\n query_emb = self.model.encode(image=image_path, text=text)\n return query_emb.tolist()[0]\n\n def encode_image(self, image_path: str) -> list[float]:\n with torch.no_grad():\n query_emb = self.model.encode(image=image_path)\n return query_emb.tolist()[0]\n\n\nmodel_name = \"BAAI/bge-base-en-v1.5\"\nmodel_path = \"./Visualized_base_en_v1.5.pth\" # Change to your own value if using a different model path\nencoder = Encoder(model_name, model_path)\n","import os\nfrom tqdm import tqdm\nfrom glob import glob\n\n\n# Generate embeddings for the image dataset\ndata_dir = (\n \"./images_folder\" # Change to your own value if using a different data directory\n)\nimage_list = glob(\n os.path.join(data_dir, \"images\", \"*.jpg\")\n) # We will only use images ending with \".jpg\"\nimage_dict = {}\nfor image_path in tqdm(image_list, desc=\"Generating image embeddings: \"):\n try:\n image_dict[image_path] = encoder.encode_image(image_path)\n except Exception as e:\n print(f\"Failed to generate embedding for {image_path}. Skipped.\")\n continue\nprint(\"Number of encoded images:\", len(image_dict))\n","from pymilvus import MilvusClient\n\n\ndim = len(list(image_dict.values())[0])\ncollection_name = \"multimodal_rag_demo\"\n\n# Connect to Milvus client given URI\nmilvus_client = MilvusClient(uri=\"./milvus_demo.db\")\n\n# Create Milvus Collection\n# By default, vector field name is \"vector\"\nmilvus_client.create_collection(\n collection_name=collection_name,\n auto_id=True,\n dimension=dim,\n enable_dynamic_field=True,\n)\n\n# Insert data into collection\nmilvus_client.insert(\n collection_name=collection_name,\n data=[{\"image_path\": k, \"vector\": v} for k, v in image_dict.items()],\n)\n","query_image = os.path.join(\n data_dir, \"leopard.jpg\"\n) # Change to your own query image path\nquery_text = \"phone case with this image theme\"\n\n# Generate query embedding given image and text instructions\nquery_vec = encoder.encode_query(image_path=query_image, text=query_text)\n\nsearch_results = milvus_client.search(\n collection_name=collection_name,\n data=[query_vec],\n output_fields=[\"image_path\"],\n limit=9, # Max number of search results to return\n search_params={\"metric_type\": \"COSINE\", \"params\": {}}, # Search parameters\n)[0]\n\nretrieved_images = [hit.get(\"entity\").get(\"image_path\") for hit in search_results]\nprint(retrieved_images)\n","import numpy as np\nimport cv2\n\nimg_height = 300\nimg_width = 300\nrow_count = 3\n\n\ndef create_panoramic_view(query_image_path: str, retrieved_images: list) -> np.ndarray:\n \"\"\"\n creates a 5x5 panoramic view image from a list of images\n\n args:\n images: list of images to be combined\n\n returns:\n np.ndarray: the panoramic view image\n \"\"\"\n panoramic_width = img_width * row_count\n panoramic_height = img_height * row_count\n panoramic_image = np.full(\n (panoramic_height, panoramic_width, 3), 255, dtype=np.uint8\n )\n\n # create and resize the query image with a blue border\n query_image_null = np.full((panoramic_height, img_width, 3), 255, dtype=np.uint8)\n query_image = Image.open(query_image_path).convert(\"RGB\")\n query_array = np.array(query_image)[:, :, ::-1]\n resized_image = cv2.resize(query_array, (img_width, img_height))\n\n border_size = 10\n blue = (255, 0, 0) # blue color in BGR\n bordered_query_image = cv2.copyMakeBorder(\n resized_image,\n border_size,\n border_size,\n border_size,\n border_size,\n cv2.BORDER_CONSTANT,\n value=blue,\n )\n\n query_image_null[img_height * 2 : img_height * 3, 0:img_width] = cv2.resize(\n bordered_query_image, (img_width, img_height)\n )\n\n # add text \"query\" below the query image\n text = \"query\"\n font_scale = 1\n font_thickness = 2\n text_org = (10, img_height * 3 + 30)\n cv2.putText(\n query_image_null,\n text,\n text_org,\n cv2.FONT_HERSHEY_SIMPLEX,\n font_scale,\n blue,\n font_thickness,\n cv2.LINE_AA,\n )\n\n # combine the rest of the images into the panoramic view\n retrieved_imgs = [\n np.array(Image.open(img).convert(\"RGB\"))[:, :, ::-1] for img in retrieved_images\n ]\n for i, image in enumerate(retrieved_imgs):\n image = cv2.resize(image, (img_width - 4, img_height - 4))\n row = i // row_count\n col = i % row_count\n start_row = row * img_height\n start_col = col * img_width\n\n border_size = 2\n bordered_image = cv2.copyMakeBorder(\n image,\n border_size,\n border_size,\n border_size,\n border_size,\n cv2.BORDER_CONSTANT,\n value=(0, 0, 0),\n )\n panoramic_image[\n start_row : start_row + img_height, start_col : start_col + img_width\n ] = bordered_image\n\n # add red index numbers to each image\n text = str(i)\n org = (start_col + 50, start_row + 30)\n (font_width, font_height), baseline = cv2.getTextSize(\n text, cv2.FONT_HERSHEY_SIMPLEX, 1, 2\n )\n\n top_left = (org[0] - 48, start_row + 2)\n bottom_right = (org[0] - 48 + font_width + 5, org[1] + baseline + 5)\n\n cv2.rectangle(\n panoramic_image, top_left, bottom_right, (255, 255, 255), cv2.FILLED\n )\n cv2.putText(\n panoramic_image,\n text,\n (start_col + 10, start_row + 30),\n cv2.FONT_HERSHEY_SIMPLEX,\n 1,\n (0, 0, 255),\n 2,\n cv2.LINE_AA,\n )\n\n # combine the query image with the panoramic view\n panoramic_image = np.hstack([query_image_null, panoramic_image])\n return panoramic_image\n","from PIL import Image\n\ncombined_image_path = os.path.join(data_dir, \"combined_image.jpg\")\npanoramic_image = create_panoramic_view(query_image, retrieved_images)\ncv2.imwrite(combined_image_path, panoramic_image)\n\ncombined_image = Image.open(combined_image_path)\nshow_combined_image = combined_image.resize((300, 300))\nshow_combined_image.show()\n","import requests\nimport base64\n\nopenai_api_key = \"sk-***\" # Change to your OpenAI API Key\n\n\ndef generate_ranking_explanation(\n combined_image_path: str, caption: str, infos: dict = None\n) -> tuple[list[int], str]:\n with open(combined_image_path, \"rb\") as image_file:\n base64_image = base64.b64encode(image_file.read()).decode(\"utf-8\")\n\n information = (\n \"You are responsible for ranking results for a Composed Image Retrieval. \"\n \"The user retrieves an image with an 'instruction' indicating their retrieval intent. \"\n \"For example, if the user queries a red car with the instruction 'change this car to blue,' a similar type of car in blue would be ranked higher in the results. \"\n \"Now you would receive instruction and query image with blue border. Every item has its red index number in its top left. Do not misunderstand it. \"\n f\"User instruction: {caption} \\n\\n\"\n )\n\n # add additional information for each image\n if infos:\n for i, info in enumerate(infos[\"product\"]):\n information += f\"{i}. {info}\\n\"\n\n information += (\n \"Provide a new ranked list of indices from most suitable to least suitable, followed by an explanation for the top 1 most suitable item only. \"\n \"The format of the response has to be 'Ranked list: []' with the indices in brackets as integers, followed by 'Reasons:' plus the explanation why this most fit user's query intent.\"\n )\n\n headers = {\n \"Content-Type\": \"application/json\",\n \"Authorization\": f\"Bearer {openai_api_key}\",\n }\n\n payload = {\n \"model\": \"gpt-4o\",\n \"messages\": [\n {\n \"role\": \"user\",\n \"content\": [\n {\"type\": \"text\", \"text\": information},\n {\n \"type\": \"image_url\",\n \"image_url\": {\"url\": f\"data:image/jpeg;base64,{base64_image}\"},\n },\n ],\n }\n ],\n \"max_tokens\": 300,\n }\n\n response = requests.post(\n \"https://api.openai.com/v1/chat/completions\", headers=headers, json=payload\n )\n result = response.json()[\"choices\"][0][\"message\"][\"content\"]\n\n # parse the ranked indices from the response\n start_idx = result.find(\"[\")\n end_idx = result.find(\"]\")\n ranked_indices_str = result[start_idx + 1 : end_idx].split(\",\")\n ranked_indices = [int(index.strip()) for index in ranked_indices_str]\n\n # extract explanation\n explanation = result[end_idx + 1 :].strip()\n\n return ranked_indices, explanation\n","ranked_indices, explanation = generate_ranking_explanation(\n combined_image_path, query_text\n)\n","print(explanation)\n\nbest_index = ranked_indices[0]\nbest_img = Image.open(retrieved_images[best_index])\nbest_img = best_img.resize((150, 150))\nbest_img.show()\n"],"headingContent":"Multimodal RAG with Milvus","anchorList":[{"label":"RAG multimodale con Milvus","href":"Multimodal-RAG-with-Milvus","type":1,"isActive":false},{"label":"Preparazione","href":"Preparation","type":2,"isActive":false},{"label":"Caricare i dati","href":"Load-Data","type":2,"isActive":false},{"label":"Ricerca multimodale con Reranker generativo","href":"Multimodal-Search-with-Generative-Reranker","type":2,"isActive":false}]} \ No newline at end of file diff --git a/localization/v2.4.x/site/it/tutorials/multimodal_rag_with_milvus.md b/localization/v2.4.x/site/it/tutorials/multimodal_rag_with_milvus.md index 28273ae50..ae8e250d2 100644 --- a/localization/v2.4.x/site/it/tutorials/multimodal_rag_with_milvus.md +++ b/localization/v2.4.x/site/it/tutorials/multimodal_rag_with_milvus.md @@ -18,7 +18,8 @@ title: RAG multimodale con Milvus d="M4 9h1v1H4c-1.5 0-3-1.69-3-3.5S2.55 3 4 3h4c1.45 0 3 1.69 3 3.5 0 1.41-.91 2.72-2 3.25V8.59c.58-.45 1-1.27 1-2.09C10 5.22 8.98 4 8 4H4c-.98 0-2 1.22-2 2.5S3 9 4 9zm9-3h-1v1h1c1 0 2 1.22 2 2.5S13.98 12 13 12H9c-.98 0-2-1.22-2-2.5 0-.83.42-1.64 1-2.09V6.25c-1.09.53-2 1.84-2 3.25C6 11.31 7.55 13 9 13h4c1.45 0 3-1.69 3-3.5S14.5 6 13 6z" > -

      Open In Colab

      +

      Open In Colab +GitHub Repository

      Questo tutorial illustra il RAG multimodale basato su Milvus, il modello BGE visualizzato e GPT-4o. Con questo sistema, gli utenti possono caricare un'immagine e modificare le istruzioni di testo, che vengono elaborate dal modello di recupero composto di BGE per cercare le immagini candidate. GPT-4o agisce quindi come un reranker, selezionando l'immagine più adatta e fornendo le motivazioni alla base della scelta. Questa potente combinazione consente di ottenere un'esperienza di ricerca delle immagini intuitiva e senza soluzione di continuità, sfruttando Milvus per un reperimento efficiente, il modello BGE per un'elaborazione e una corrispondenza precisa delle immagini e GPT-4o per un reranking avanzato.

      @@ -124,7 +125,7 @@ image_dict = {} Number of encoded images: 900
      -

      Inserire in Milvus

      Inserisce le immagini con i percorsi e gli embeddings corrispondenti nella raccolta Milvus.

      +

      Inserire in Milvus

      Inserisce le immagini con i percorsi e gli embeddings corrispondenti nella collezione Milvus.

      Come per l'argomento di MilvusClient:

        diff --git a/localization/v2.4.x/site/it/tutorials/tutorials-overview.json b/localization/v2.4.x/site/it/tutorials/tutorials-overview.json index af4c8529a..ff5b08069 100644 --- a/localization/v2.4.x/site/it/tutorials/tutorials-overview.json +++ b/localization/v2.4.x/site/it/tutorials/tutorials-overview.json @@ -1 +1 @@ -{"codeList":[],"headingContent":"","anchorList":[{"label":"Panoramica dei tutorial","href":"Tutorials-Overview","type":1,"isActive":false}]} \ No newline at end of file +{"codeList":[],"headingContent":"Tutorials Overview","anchorList":[{"label":"Panoramica dei tutorial","href":"Tutorials-Overview","type":1,"isActive":false}]} \ No newline at end of file diff --git a/localization/v2.4.x/site/it/tutorials/tutorials-overview.md b/localization/v2.4.x/site/it/tutorials/tutorials-overview.md index 884e08c25..2dea4fb14 100644 --- a/localization/v2.4.x/site/it/tutorials/tutorials-overview.md +++ b/localization/v2.4.x/site/it/tutorials/tutorials-overview.md @@ -1,7 +1,7 @@ --- id: tutorials-overview.md summary: Questa pagina fornisce un elenco di esercitazioni per interagire con Milvus. -title: Panoramica delle esercitazioni +title: Panoramica dei tutorial ---

        Panoramica dei tutorial

      Versione MilvusVersione dell'SDK PythonVersione dell'SDK JavaVersione SDK Node.js
      Versione MilvusVersione dell'SDK PythonVersione dell'SDK JavaVersione dell'SDK Node.js
      2.4.12.4.12.4.02.4.2
      diff --git a/localization/v2.4.x/site/it/userGuide/clustering-compaction.json b/localization/v2.4.x/site/it/userGuide/clustering-compaction.json index 7c97b101f..25d294f36 100644 --- a/localization/v2.4.x/site/it/userGuide/clustering-compaction.json +++ b/localization/v2.4.x/site/it/userGuide/clustering-compaction.json @@ -1 +1 @@ -{"codeList":["dataCoord:\n compaction:\n clustering:\n enable: true \n autoEnable: false \n triggerInterval: 600 \n minInterval: 3600 \n maxInterval: 259200 \n newDataSizeThreshold: 512m \n timeout: 7200\n \nqueryNode:\n enableSegmentPrune: true \n\ndatanode:\n clusteringCompaction:\n memoryBufferRatio: 0.1 \n workPoolSize: 8 \ncommon:\n usePartitionKeyAsClusteringKey: true \n","default_fields = [\n FieldSchema(name=\"id\", dtype=DataType.INT64, is_primary=True),\n FieldSchema(name=\"key\", dtype=DataType.INT64, is_clustering_key=True),\n FieldSchema(name=\"var\", dtype=DataType.VARCHAR, max_length=1000, is_primary=False),\n FieldSchema(name=\"embeddings\", dtype=DataType.FLOAT_VECTOR, dim=dim)\n]\n\ndefault_schema = CollectionSchema(\n fields=default_fields, \n description=\"test clustering-key collection\"\n)\n\ncoll1 = Collection(name=\"clustering_test\", schema=default_schema)\n","coll1.compact(is_clustering=True)\ncoll1.get_compaction_state(is_clustering=True)\ncoll1.wait_for_compaction_completed(is_clustering=True)\n"],"headingContent":"","anchorList":[{"label":"Compattazione dei cluster","href":"Clustering-Compaction","type":1,"isActive":false},{"label":"Panoramica","href":"Overview","type":2,"isActive":false},{"label":"Utilizzare la compattazione del clustering","href":"Use-Clustering-Compaction","type":2,"isActive":false},{"label":"Configurazione della raccolta","href":"Collection-Configuration","type":2,"isActive":false},{"label":"Attivare la compattazione del clustering","href":"Trigger-Clustering-Compaction","type":2,"isActive":false},{"label":"Le migliori pratiche","href":"Best-practices","type":2,"isActive":false}]} \ No newline at end of file +{"codeList":["dataCoord:\n compaction:\n clustering:\n enable: true \n autoEnable: false \n triggerInterval: 600 \n minInterval: 3600 \n maxInterval: 259200 \n newDataSizeThreshold: 512m \n timeout: 7200\n \nqueryNode:\n enableSegmentPrune: true \n\ndatanode:\n clusteringCompaction:\n memoryBufferRatio: 0.1 \n workPoolSize: 8 \ncommon:\n usePartitionKeyAsClusteringKey: true \n","default_fields = [\n FieldSchema(name=\"id\", dtype=DataType.INT64, is_primary=True),\n FieldSchema(name=\"key\", dtype=DataType.INT64, is_clustering_key=True),\n FieldSchema(name=\"var\", dtype=DataType.VARCHAR, max_length=1000, is_primary=False),\n FieldSchema(name=\"embeddings\", dtype=DataType.FLOAT_VECTOR, dim=dim)\n]\n\ndefault_schema = CollectionSchema(\n fields=default_fields, \n description=\"test clustering-key collection\"\n)\n\ncoll1 = Collection(name=\"clustering_test\", schema=default_schema)\n","coll1.compact(is_clustering=True)\ncoll1.get_compaction_state(is_clustering=True)\ncoll1.wait_for_compaction_completed(is_clustering=True)\n"],"headingContent":"Clustering Compaction","anchorList":[{"label":"Compattazione dei cluster","href":"Clustering-Compaction","type":1,"isActive":false},{"label":"Panoramica","href":"Overview","type":2,"isActive":false},{"label":"Utilizzare la compattazione del clustering","href":"Use-Clustering-Compaction","type":2,"isActive":false},{"label":"Attivare la compattazione del clustering","href":"Trigger-Clustering-Compaction","type":2,"isActive":false},{"label":"Le migliori pratiche","href":"Best-practices","type":2,"isActive":false}]} \ No newline at end of file diff --git a/localization/v2.4.x/site/it/userGuide/clustering-compaction.md b/localization/v2.4.x/site/it/userGuide/clustering-compaction.md index 8b0037ebb..93d61e2ec 100644 --- a/localization/v2.4.x/site/it/userGuide/clustering-compaction.md +++ b/localization/v2.4.x/site/it/userGuide/clustering-compaction.md @@ -1,6 +1,6 @@ --- id: clustering-compaction.md -title: Compattazione del clustering +title: Compattazione dei cluster related_key: 'clustering, compaction' summary: >- La compattazione del clustering è progettata per migliorare le prestazioni di @@ -99,8 +99,8 @@ common: enableSpecifica se abilitare la compattazione del clustering.
      Impostare questo valore su true se è necessario abilitare questa funzione per ogni raccolta che ha una chiave di clustering.false autoEnableSpecifica se abilitare la compattazione automatica.
      L'impostazione di true indica che Milvus compatta le raccolte che hanno una chiave di raggruppamento agli intervalli specificati.false triggerIntervalSpecifica l'intervallo in millisecondi in cui Milvus avvia la compattazione del cluster.
      Questo parametro è valido solo quando autoEnable è impostato su true.- -minIntervalSpecifica l'intervallo minimo in millisecondi.
      Questo parametro è valido solo quando autoEnable è impostato su true.
      L'impostazione di un numero intero superiore a triggerInterval consente di evitare compattazioni ripetute in un breve periodo.- -maxIntervalSpecifica l'intervallo massimo in millisecondi.
      Questo parametro è valido solo quando autoEnable è impostato su true.
      Quando Milvus rileva che una raccolta non è stata compattata per un periodo superiore a questo valore, forza la compattazione del cluster.- +minIntervalSpecifica l'intervallo minimo in secondi.
      Questo parametro è valido solo quando autoEnable è impostato su true.
      L'impostazione di un numero intero superiore a triggerInterval consente di evitare compattazioni ripetute in un breve periodo.- +maxIntervalSpecifica l'intervallo massimo in secondi.
      Questo parametro è valido solo quando autoEnable è impostato su true.
      Quando Milvus rileva che una raccolta non è stata compattata con il cluster per un periodo superiore a questo valore, forza una compattazione con il cluster.- newDataSizeThresholdSpecifica la soglia superiore per attivare la compattazione del clustering.
      Questo parametro è valido solo quando autoEnable è impostato su true.
      Quando Milvus rileva che il volume dei dati in una raccolta supera questo valore, avvia un processo di compattazione del clustering.- timeoutSpecifica la durata del timeout per la compattazione del clustering.
      Una compattazione di clustering fallisce se il tempo di esecuzione supera questo valore.- @@ -139,22 +139,7 @@ common:

    Per applicare le modifiche di cui sopra al vostro cluster Milvus, seguite i passaggi in Configurazione di Milvus con Helm e Configurazione di Milvus con Milvus Operators.

    -

    Configurazione della raccolta

    Per la compattazione del cluster in una raccolta specifica, è necessario selezionare un campo scalare della raccolta come chiave di clustering.

    +

    Configurazione della raccolta

    Per la compattazione del cluster in una raccolta specifica, è necessario selezionare un campo scalare della raccolta come chiave di clustering.

    default_fields = [
         FieldSchema(name="id", dtype=DataType.INT64, is_primary=True),
         FieldSchema(name="key", dtype=DataType.INT64, is_clustering_key=True),
    diff --git a/localization/v2.4.x/site/it/userGuide/insert-update-delete.json b/localization/v2.4.x/site/it/userGuide/insert-update-delete.json
    index 0c019cea4..2150a655a 100644
    --- a/localization/v2.4.x/site/it/userGuide/insert-update-delete.json
    +++ b/localization/v2.4.x/site/it/userGuide/insert-update-delete.json
    @@ -1 +1 @@
    -{"codeList":["from pymilvus import MilvusClient\n\n# 1. Set up a Milvus client\nclient = MilvusClient(\n    uri=\"http://localhost:19530\"\n)\n\n# 2. Create a collection\nclient.create_collection(\n    collection_name=\"quick_setup\",\n    dimension=5,\n    metric_type=\"IP\"\n)\n","import io.milvus.v2.client.ConnectConfig;\nimport io.milvus.v2.client.MilvusClientV2;\nimport io.milvus.v2.service.collection.request.CreateCollectionReq;\n\nString CLUSTER_ENDPOINT = \"http://localhost:19530\";\n\n// 1. Connect to Milvus server\nConnectConfig connectConfig = ConnectConfig.builder()\n    .uri(CLUSTER_ENDPOINT)\n    .build();\n\nMilvusClientV2 client = new MilvusClientV2(connectConfig);\n\n// 2. Create a collection in quick setup mode\nCreateCollectionReq quickSetupReq = CreateCollectionReq.builder()\n    .collectionName(\"quick_setup\")\n    .dimension(5)\n    .metricType(\"IP\")\n    .build();\n\nclient.createCollection(quickSetupReq);\n","const { MilvusClient, DataType, sleep } = require(\"@zilliz/milvus2-sdk-node\")\n\nconst address = \"http://localhost:19530\"\n\n// 1. Set up a Milvus Client\nclient = new MilvusClient({address});\n\n// 2. Create a collection in quick setup mode\nawait client.createCollection({\n    collection_name: \"quick_setup\",\n    dimension: 5,\n    metric_type: \"IP\"\n});  \n","# 3. Insert some data\ndata=[\n    {\"id\": 0, \"vector\": [0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592], \"color\": \"pink_8682\"},\n    {\"id\": 1, \"vector\": [0.19886812562848388, 0.06023560599112088, 0.6976963061752597, 0.2614474506242501, 0.838729485096104], \"color\": \"red_7025\"},\n    {\"id\": 2, \"vector\": [0.43742130801983836, -0.5597502546264526, 0.6457887650909682, 0.7894058910881185, 0.20785793220625592], \"color\": \"orange_6781\"},\n    {\"id\": 3, \"vector\": [0.3172005263489739, 0.9719044792798428, -0.36981146090600725, -0.4860894583077995, 0.95791889146345], \"color\": \"pink_9298\"},\n    {\"id\": 4, \"vector\": [0.4452349528804562, -0.8757026943054742, 0.8220779437047674, 0.46406290649483184, 0.30337481143159106], \"color\": \"red_4794\"},\n    {\"id\": 5, \"vector\": [0.985825131989184, -0.8144651566660419, 0.6299267002202009, 0.1206906911183383, -0.1446277761879955], \"color\": \"yellow_4222\"},\n    {\"id\": 6, \"vector\": [0.8371977790571115, -0.015764369584852833, -0.31062937026679327, -0.562666951622192, -0.8984947637863987], \"color\": \"red_9392\"},\n    {\"id\": 7, \"vector\": [-0.33445148015177995, -0.2567135004164067, 0.8987539745369246, 0.9402995886420709, 0.5378064918413052], \"color\": \"grey_8510\"},\n    {\"id\": 8, \"vector\": [0.39524717779832685, 0.4000257286739164, -0.5890507376891594, -0.8650502298996872, -0.6140360785406336], \"color\": \"white_9381\"},\n    {\"id\": 9, \"vector\": [0.5718280481994695, 0.24070317428066512, -0.3737913482606834, -0.06726932177492717, -0.6980531615588608], \"color\": \"purple_4976\"}\n]\n\nres = client.insert(\n    collection_name=\"quick_setup\",\n    data=data\n)\n\nprint(res)\n\n# Output\n#\n# {\n#     \"insert_count\": 10,\n#     \"ids\": [\n#         0,\n#         1,\n#         2,\n#         3,\n#         4,\n#         5,\n#         6,\n#         7,\n#         8,\n#         9\n#     ]\n# }\n","import java.util.Arrays;\nimport java.util.List;\nimport java.util.Map;\nimport io.milvus.v2.service.vector.request.InsertReq;\nimport io.milvus.v2.service.vector.response.InsertResp;\n\n// 3. Insert some data\nList data = Arrays.asList(\n    new JSONObject(Map.of(\"id\", 0L, \"vector\", Arrays.asList(0.3580376395471989f, -0.6023495712049978f, 0.18414012509913835f, -0.26286205330961354f, 0.9029438446296592f), \"color\", \"pink_8682\")),\n    new JSONObject(Map.of(\"id\", 1L, \"vector\", Arrays.asList(0.19886812562848388f, 0.06023560599112088f, 0.6976963061752597f, 0.2614474506242501f, 0.838729485096104f), \"color\", \"red_7025\")),\n    new JSONObject(Map.of(\"id\", 2L, \"vector\", Arrays.asList(0.43742130801983836f, -0.5597502546264526f, 0.6457887650909682f, 0.7894058910881185f, 0.20785793220625592f), \"color\", \"orange_6781\")),\n    new JSONObject(Map.of(\"id\", 3L, \"vector\", Arrays.asList(0.3172005263489739f, 0.9719044792798428f, -0.36981146090600725f, -0.4860894583077995f, 0.95791889146345f), \"color\", \"pink_9298\")),\n    new JSONObject(Map.of(\"id\", 4L, \"vector\", Arrays.asList(0.4452349528804562f, -0.8757026943054742f, 0.8220779437047674f, 0.46406290649483184f, 0.30337481143159106f), \"color\", \"red_4794\")),\n    new JSONObject(Map.of(\"id\", 5L, \"vector\", Arrays.asList(0.985825131989184f, -0.8144651566660419f, 0.6299267002202009f, 0.1206906911183383f, -0.1446277761879955f), \"color\", \"yellow_4222\")),\n    new JSONObject(Map.of(\"id\", 6L, \"vector\", Arrays.asList(0.8371977790571115f, -0.015764369584852833f, -0.31062937026679327f, -0.562666951622192f, -0.8984947637863987f), \"color\", \"red_9392\")),\n    new JSONObject(Map.of(\"id\", 7L, \"vector\", Arrays.asList(-0.33445148015177995f, -0.2567135004164067f, 0.8987539745369246f, 0.9402995886420709f, 0.5378064918413052f), \"color\", \"grey_8510\")),\n    new JSONObject(Map.of(\"id\", 8L, \"vector\", Arrays.asList(0.39524717779832685f, 0.4000257286739164f, -0.5890507376891594f, -0.8650502298996872f, -0.6140360785406336f), \"color\", \"white_9381\")),\n    new JSONObject(Map.of(\"id\", 9L, \"vector\", Arrays.asList(0.5718280481994695f, 0.24070317428066512f, -0.3737913482606834f, -0.06726932177492717f, -0.6980531615588608f), \"color\", \"purple_4976\"))\n);\n\nInsertReq insertReq = InsertReq.builder()\n    .collectionName(\"quick_setup\")\n    .data(data)\n    .build();\n\nInsertResp insertResp = client.insert(insertReq);\n\nSystem.out.println(JSONObject.toJSON(insertResp));\n\n// Output:\n// {\"insertCnt\": 10}\n","// 3. Insert some data\n\nvar data = [\n    {id: 0, vector: [0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592], color: \"pink_8682\"},\n    {id: 1, vector: [0.19886812562848388, 0.06023560599112088, 0.6976963061752597, 0.2614474506242501, 0.838729485096104], color: \"red_7025\"},\n    {id: 2, vector: [0.43742130801983836, -0.5597502546264526, 0.6457887650909682, 0.7894058910881185, 0.20785793220625592], color: \"orange_6781\"},\n    {id: 3, vector: [0.3172005263489739, 0.9719044792798428, -0.36981146090600725, -0.4860894583077995, 0.95791889146345], color: \"pink_9298\"},\n    {id: 4, vector: [0.4452349528804562, -0.8757026943054742, 0.8220779437047674, 0.46406290649483184, 0.30337481143159106], color: \"red_4794\"},\n    {id: 5, vector: [0.985825131989184, -0.8144651566660419, 0.6299267002202009, 0.1206906911183383, -0.1446277761879955], color: \"yellow_4222\"},\n    {id: 6, vector: [0.8371977790571115, -0.015764369584852833, -0.31062937026679327, -0.562666951622192, -0.8984947637863987], color: \"red_9392\"},\n    {id: 7, vector: [-0.33445148015177995, -0.2567135004164067, 0.8987539745369246, 0.9402995886420709, 0.5378064918413052], color: \"grey_8510\"},\n    {id: 8, vector: [0.39524717779832685, 0.4000257286739164, -0.5890507376891594, -0.8650502298996872, -0.6140360785406336], color: \"white_9381\"},\n    {id: 9, vector: [0.5718280481994695, 0.24070317428066512, -0.3737913482606834, -0.06726932177492717, -0.6980531615588608], color: \"purple_4976\"}        \n]\n\nvar res = await client.insert({\n    collection_name: \"quick_setup\",\n    data: data,\n})\n\nconsole.log(res.insert_cnt)\n\n// Output\n// \n// 10\n// \n","# 4. Insert some more data into a specific partition\ndata=[\n    {\"id\": 10, \"vector\": [-0.5570353903748935, -0.8997887893201304, -0.7123782431855732, -0.6298990746450119, 0.6699215060604258], \"color\": \"red_1202\"},\n    {\"id\": 11, \"vector\": [0.6319019033373907, 0.6821488267878275, 0.8552303045704168, 0.36929791364943054, -0.14152860714878068], \"color\": \"blue_4150\"},\n    {\"id\": 12, \"vector\": [0.9483947484855766, -0.32294203351925344, 0.9759290319978025, 0.8262982148666174, -0.8351194181285713], \"color\": \"orange_4590\"},\n    {\"id\": 13, \"vector\": [-0.5449109892498731, 0.043511240563786524, -0.25105249484790804, -0.012030655265886425, -0.0010987671273892108], \"color\": \"pink_9619\"},\n    {\"id\": 14, \"vector\": [0.6603339372951424, -0.10866551787442225, -0.9435597754324891, 0.8230244263466688, -0.7986720938400362], \"color\": \"orange_4863\"},\n    {\"id\": 15, \"vector\": [-0.8825129181091456, -0.9204557711667729, -0.935350065513425, 0.5484069690287079, 0.24448151140671204], \"color\": \"orange_7984\"},\n    {\"id\": 16, \"vector\": [0.6285586391568163, 0.5389064528263487, -0.3163366239905099, 0.22036279378888013, 0.15077052220816167], \"color\": \"blue_9010\"},\n    {\"id\": 17, \"vector\": [-0.20151825016059233, -0.905239387635804, 0.6749305353372479, -0.7324272081377843, -0.33007998971889263], \"color\": \"blue_4521\"},\n    {\"id\": 18, \"vector\": [0.2432286610792349, 0.01785636564206139, -0.651356982731391, -0.35848148851027895, -0.7387383128324057], \"color\": \"orange_2529\"},\n    {\"id\": 19, \"vector\": [0.055512329053363674, 0.7100266349039421, 0.4956956543575197, 0.24541352586717702, 0.4209030729923515], \"color\": \"red_9437\"}\n]\n\nclient.create_partition(\n    collection_name=\"quick_setup\",\n    partition_name=\"partitionA\"\n)\n\nres = client.insert(\n    collection_name=\"quick_setup\",\n    data=data,\n    partition_name=\"partitionA\"\n)\n\nprint(res)\n\n# Output\n#\n# {\n#     \"insert_count\": 10,\n#     \"ids\": [\n#         10,\n#         11,\n#         12,\n#         13,\n#         14,\n#         15,\n#         16,\n#         17,\n#         18,\n#         19\n#     ]\n# }\n","// 4. Insert some more data into a specific partition\ndata = Arrays.asList(\n    new JSONObject(Map.of(\"id\", 10L, \"vector\", Arrays.asList(-0.5570353903748935f, -0.8997887893201304f, -0.7123782431855732f, -0.6298990746450119f, 0.6699215060604258f), \"color\", \"red_1202\")),\n    new JSONObject(Map.of(\"id\", 11L, \"vector\", Arrays.asList(0.6319019033373907f, 0.6821488267878275f, 0.8552303045704168f, 0.36929791364943054f, -0.14152860714878068f), \"color\", \"blue_4150\")),\n    new JSONObject(Map.of(\"id\", 12L, \"vector\", Arrays.asList(0.9483947484855766f, -0.32294203351925344f, 0.9759290319978025f, 0.8262982148666174f, -0.8351194181285713f), \"color\", \"orange_4590\")),\n    new JSONObject(Map.of(\"id\", 13L, \"vector\", Arrays.asList(-0.5449109892498731f, 0.043511240563786524f, -0.25105249484790804f, -0.012030655265886425f, -0.0010987671273892108f), \"color\", \"pink_9619\")),\n    new JSONObject(Map.of(\"id\", 14L, \"vector\", Arrays.asList(0.6603339372951424f, -0.10866551787442225f, -0.9435597754324891f, 0.8230244263466688f, -0.7986720938400362f), \"color\", \"orange_4863\")),\n    new JSONObject(Map.of(\"id\", 15L, \"vector\", Arrays.asList(-0.8825129181091456f, -0.9204557711667729f, -0.935350065513425f, 0.5484069690287079f, 0.24448151140671204f), \"color\", \"orange_7984\")),\n    new JSONObject(Map.of(\"id\", 16L, \"vector\", Arrays.asList(0.6285586391568163f, 0.5389064528263487f, -0.3163366239905099f, 0.22036279378888013f, 0.15077052220816167f), \"color\", \"blue_9010\")),\n    new JSONObject(Map.of(\"id\", 17L, \"vector\", Arrays.asList(-0.20151825016059233f, -0.905239387635804f, 0.6749305353372479f, -0.7324272081377843f, -0.33007998971889263f), \"color\", \"blue_4521\")),\n    new JSONObject(Map.of(\"id\", 18L, \"vector\", Arrays.asList(0.2432286610792349f, 0.01785636564206139f, -0.651356982731391f, -0.35848148851027895f, -0.7387383128324057f), \"color\", \"orange_2529\")),\n    new JSONObject(Map.of(\"id\", 19L, \"vector\", Arrays.asList(0.055512329053363674f, 0.7100266349039421f, 0.4956956543575197f, 0.24541352586717702f, 0.4209030729923515f), \"color\", \"red_9437\"))\n);\n\nCreatePartitionReq createPartitionReq = CreatePartitionReq.builder()\n    .collectionName(\"quick_setup\")\n    .partitionName(\"partitionA\")\n    .build();\n\nclient.createPartition(createPartitionReq);\n\ninsertReq = InsertReq.builder()\n    .collectionName(\"quick_setup\")\n    .data(data)\n    .partitionName(\"partitionA\")\n    .build();\n\ninsertResp = client.insert(insertReq);\n\nSystem.out.println(JSONObject.toJSON(insertResp));\n\n// Output:\n// {\"insertCnt\": 10}\n","// 4. Insert some more data into a specific partition\ndata = [\n    {id: 10, vector: [-0.5570353903748935, -0.8997887893201304, -0.7123782431855732, -0.6298990746450119, 0.6699215060604258], color: \"red_1202\"},\n    {id: 11, vector: [0.6319019033373907, 0.6821488267878275, 0.8552303045704168, 0.36929791364943054, -0.14152860714878068], color: \"blue_4150\"},\n    {id: 12, vector: [0.9483947484855766, -0.32294203351925344, 0.9759290319978025, 0.8262982148666174, -0.8351194181285713], color: \"orange_4590\"},\n    {id: 13, vector: [-0.5449109892498731, 0.043511240563786524, -0.25105249484790804, -0.012030655265886425, -0.0010987671273892108], color: \"pink_9619\"},\n    {id: 14, vector: [0.6603339372951424, -0.10866551787442225, -0.9435597754324891, 0.8230244263466688, -0.7986720938400362], color: \"orange_4863\"},\n    {id: 15, vector: [-0.8825129181091456, -0.9204557711667729, -0.935350065513425, 0.5484069690287079, 0.24448151140671204], color: \"orange_7984\"},\n    {id: 16, vector: [0.6285586391568163, 0.5389064528263487, -0.3163366239905099, 0.22036279378888013, 0.15077052220816167], color: \"blue_9010\"},\n    {id: 17, vector: [-0.20151825016059233, -0.905239387635804, 0.6749305353372479, -0.7324272081377843, -0.33007998971889263], color: \"blue_4521\"},\n    {id: 18, vector: [0.2432286610792349, 0.01785636564206139, -0.651356982731391, -0.35848148851027895, -0.7387383128324057], color: \"orange_2529\"},\n    {id: 19, vector: [0.055512329053363674, 0.7100266349039421, 0.4956956543575197, 0.24541352586717702, 0.4209030729923515], color: \"red_9437\"}\n]\n\nawait client.createPartition({\n    collection_name: \"quick_setup\",\n    partition_name: \"partitionA\"\n})\n\nres = await client.insert({\n    collection_name: \"quick_setup\",\n    data: data,\n    partition_name: \"partitionA\"\n})\n\nconsole.log(res.insert_cnt)\n\n// Output\n// \n// 10\n// \n","# 5. Upsert some data\ndata=[\n    {\"id\": 0, \"vector\": [-0.619954382375778, 0.4479436794798608, -0.17493894838751745, -0.4248030059917294, -0.8648452746018911], \"color\": \"black_9898\"},\n    {\"id\": 1, \"vector\": [0.4762662251462588, -0.6942502138717026, -0.4490002642657902, -0.628696575798281, 0.9660395877041965], \"color\": \"red_7319\"},\n    {\"id\": 2, \"vector\": [-0.8864122635045097, 0.9260170474445351, 0.801326976181461, 0.6383943392381306, 0.7563037341572827], \"color\": \"white_6465\"},\n    {\"id\": 3, \"vector\": [0.14594326235891586, -0.3775407299900644, -0.3765479013078812, 0.20612075380355122, 0.4902678929632145], \"color\": \"orange_7580\"},\n    {\"id\": 4, \"vector\": [0.4548498669607359, -0.887610217681605, 0.5655081329910452, 0.19220509387904117, 0.016513983433433577], \"color\": \"red_3314\"},\n    {\"id\": 5, \"vector\": [0.11755001847051827, -0.7295149788999611, 0.2608115847524266, -0.1719167007897875, 0.7417611743754855], \"color\": \"black_9955\"},\n    {\"id\": 6, \"vector\": [0.9363032158314308, 0.030699901477745373, 0.8365910312319647, 0.7823840208444011, 0.2625222076909237], \"color\": \"yellow_2461\"},\n    {\"id\": 7, \"vector\": [0.0754823906014721, -0.6390658668265143, 0.5610517334334937, -0.8986261118798251, 0.9372056764266794], \"color\": \"white_5015\"},\n    {\"id\": 8, \"vector\": [-0.3038434006935904, 0.1279149203380523, 0.503958664270957, -0.2622661156746988, 0.7407627307791929], \"color\": \"purple_6414\"},\n    {\"id\": 9, \"vector\": [-0.7125086947677588, -0.8050968321012257, -0.32608864121785786, 0.3255654958645424, 0.26227968923834233], \"color\": \"brown_7231\"}\n]\n\nres = client.upsert(\n    collection_name='quick_setup',\n    data=data\n)\n\nprint(res)\n\n# Output\n#\n# {\n#     \"upsert_count\": 10\n# }\n","// 5. Upsert some data\ndata = Arrays.asList(\n    new JSONObject(Map.of(\"id\", 0L, \"vector\", Arrays.asList(-0.619954382375778f, 0.4479436794798608f, -0.17493894838751745f, -0.4248030059917294f, -0.8648452746018911f), \"color\", \"black_9898\")),\n    new JSONObject(Map.of(\"id\", 1L, \"vector\", Arrays.asList(0.4762662251462588f, -0.6942502138717026f, -0.4490002642657902f, -0.628696575798281f, 0.9660395877041965f), \"color\", \"red_7319\")),\n    new JSONObject(Map.of(\"id\", 2L, \"vector\", Arrays.asList(-0.8864122635045097f, 0.9260170474445351f, 0.801326976181461f, 0.6383943392381306f, 0.7563037341572827f), \"color\", \"white_6465\")),\n    new JSONObject(Map.of(\"id\", 3L, \"vector\", Arrays.asList(0.14594326235891586f, -0.3775407299900644f, -0.3765479013078812f, 0.20612075380355122f, 0.4902678929632145f), \"color\", \"orange_7580\")),\n    new JSONObject(Map.of(\"id\", 4L, \"vector\", Arrays.asList(0.4548498669607359f, -0.887610217681605f, 0.5655081329910452f, 0.19220509387904117f, 0.016513983433433577f), \"color\", \"red_3314\")),\n    new JSONObject(Map.of(\"id\", 5L, \"vector\", Arrays.asList(0.11755001847051827f, -0.7295149788999611f, 0.2608115847524266f, -0.1719167007897875f, 0.7417611743754855f), \"color\", \"black_9955\")),\n    new JSONObject(Map.of(\"id\", 6L, \"vector\", Arrays.asList(0.9363032158314308f, 0.030699901477745373f, 0.8365910312319647f, 0.7823840208444011f, 0.2625222076909237f), \"color\", \"yellow_2461\")),\n    new JSONObject(Map.of(\"id\", 7L, \"vector\", Arrays.asList(0.0754823906014721f, -0.6390658668265143f, 0.5610517334334937f, -0.8986261118798251f, 0.9372056764266794f), \"color\", \"white_5015\")),\n    new JSONObject(Map.of(\"id\", 8L, \"vector\", Arrays.asList(-0.3038434006935904f, 0.1279149203380523f, 0.503958664270957f, -0.2622661156746988f, 0.7407627307791929f), \"color\", \"purple_6414\")),\n    new JSONObject(Map.of(\"id\", 9L, \"vector\", Arrays.asList(-0.7125086947677588f, -0.8050968321012257f, -0.32608864121785786f, 0.3255654958645424f, 0.26227968923834233f), \"color\", \"brown_7231\"))\n);\n\nUpsertReq upsertReq = UpsertReq.builder()\n    .collectionName(\"quick_setup\")\n    .data(data)\n    .build();\n\nUpsertResp upsertResp = client.upsert(upsertReq);\n\nSystem.out.println(JSONObject.toJSON(upsertResp));\n\n// Output:\n// {\"upsertCnt\": 10}\n","// 5. Upsert some data\ndata = [\n    {id: 0, vector: [-0.619954382375778, 0.4479436794798608, -0.17493894838751745, -0.4248030059917294, -0.8648452746018911], color: \"black_9898\"},\n    {id: 1, vector: [0.4762662251462588, -0.6942502138717026, -0.4490002642657902, -0.628696575798281, 0.9660395877041965], color: \"red_7319\"},\n    {id: 2, vector: [-0.8864122635045097, 0.9260170474445351, 0.801326976181461, 0.6383943392381306, 0.7563037341572827], color: \"white_6465\"},\n    {id: 3, vector: [0.14594326235891586, -0.3775407299900644, -0.3765479013078812, 0.20612075380355122, 0.4902678929632145], color: \"orange_7580\"},\n    {id: 4, vector: [0.4548498669607359, -0.887610217681605, 0.5655081329910452, 0.19220509387904117, 0.016513983433433577], color: \"red_3314\"},\n    {id: 5, vector: [0.11755001847051827, -0.7295149788999611, 0.2608115847524266, -0.1719167007897875, 0.7417611743754855], color: \"black_9955\"},\n    {id: 6, vector: [0.9363032158314308, 0.030699901477745373, 0.8365910312319647, 0.7823840208444011, 0.2625222076909237], color: \"yellow_2461\"},\n    {id: 7, vector: [0.0754823906014721, -0.6390658668265143, 0.5610517334334937, -0.8986261118798251, 0.9372056764266794], color: \"white_5015\"},\n    {id: 8, vector: [-0.3038434006935904, 0.1279149203380523, 0.503958664270957, -0.2622661156746988, 0.7407627307791929], color: \"purple_6414\"},\n    {id: 9, vector: [-0.7125086947677588, -0.8050968321012257, -0.32608864121785786, 0.3255654958645424, 0.26227968923834233], color: \"brown_7231\"}\n]\n\nres = await client.upsert({\n    collection_name: \"quick_setup\",\n    data: data,\n})\n\nconsole.log(res.upsert_cnt)\n\n// Output\n// \n// 10\n// \n","# 6. Upsert data in partitions\ndata=[\n    {\"id\": 10, \"vector\": [0.06998888224297328, 0.8582816610326578, -0.9657938677934292, 0.6527905683627726, -0.8668460657158576], \"color\": \"black_3651\"},\n    {\"id\": 11, \"vector\": [0.6060703043917468, -0.3765080534566074, -0.7710758854987239, 0.36993888322346136, 0.5507513364206531], \"color\": \"grey_2049\"},\n    {\"id\": 12, \"vector\": [-0.9041813104515337, -0.9610546012461163, 0.20033003106083358, 0.11842506351635174, 0.8327356724591011], \"color\": \"blue_6168\"},\n    {\"id\": 13, \"vector\": [0.3202914977909075, -0.7279137773695252, -0.04747830871620273, 0.8266053056909548, 0.8277957187455489], \"color\": \"blue_1672\"},\n    {\"id\": 14, \"vector\": [0.2975811497890859, 0.2946936202691086, 0.5399463833894609, 0.8385334966677529, -0.4450543984655133], \"color\": \"pink_1601\"},\n    {\"id\": 15, \"vector\": [-0.04697464305600074, -0.08509022265734134, 0.9067184632552001, -0.2281912685064822, -0.9747503428652762], \"color\": \"yellow_9925\"},\n    {\"id\": 16, \"vector\": [-0.9363075919673911, -0.8153981031085669, 0.7943039120490902, -0.2093886809842529, 0.0771191335807897], \"color\": \"orange_9872\"},\n    {\"id\": 17, \"vector\": [-0.050451522820639916, 0.18931572752321935, 0.7522886192190488, -0.9071793089474034, 0.6032647330692296], \"color\": \"red_6450\"},\n    {\"id\": 18, \"vector\": [-0.9181544231141592, 0.6700755998126806, -0.014174674636136642, 0.6325780463623432, -0.49662222164032976], \"color\": \"purple_7392\"},\n    {\"id\": 19, \"vector\": [0.11426945899602536, 0.6089190684002581, -0.5842735738352236, 0.057050610092692855, -0.035163433018196244], \"color\": \"pink_4996\"}\n]\n\nres = client.upsert(\n    collection_name=\"quick_setup\",\n    data=data,\n    partition_name=\"partitionA\"\n)\n\nprint(res)\n\n# Output\n#\n# {\n#     \"upsert_count\": 10\n# }\n","import io.milvus.v2.service.vector.request.UpsertReq;\nimport io.milvus.v2.service.vector.response.UpsertResp;\n\n// 6. Upsert data in parition\n\ndata = Arrays.asList(\n    new JSONObject(Map.of(\"id\", 10L, \"vector\", Arrays.asList(0.06998888224297328f, 0.8582816610326578f, -0.9657938677934292f, 0.6527905683627726f, -0.8668460657158576f), \"color\", \"black_3651\")),\n    new JSONObject(Map.of(\"id\", 11L, \"vector\", Arrays.asList(0.6060703043917468f, -0.3765080534566074f, -0.7710758854987239f, 0.36993888322346136f, 0.5507513364206531f), \"color\", \"grey_2049\")),\n    new JSONObject(Map.of(\"id\", 12L, \"vector\", Arrays.asList(-0.9041813104515337f, -0.9610546012461163f, 0.20033003106083358f, 0.11842506351635174f, 0.8327356724591011f), \"color\", \"blue_6168\")),\n    new JSONObject(Map.of(\"id\", 13L, \"vector\", Arrays.asList(0.3202914977909075f, -0.7279137773695252f, -0.04747830871620273f, 0.8266053056909548f, 0.8277957187455489f), \"color\", \"blue_1672\")),\n    new JSONObject(Map.of(\"id\", 14L, \"vector\", Arrays.asList(0.2975811497890859f, 0.2946936202691086f, 0.5399463833894609f, 0.8385334966677529f, -0.4450543984655133f), \"color\", \"pink_1601\")),\n    new JSONObject(Map.of(\"id\", 15L, \"vector\", Arrays.asList(-0.04697464305600074f, -0.08509022265734134f, 0.9067184632552001f, -0.2281912685064822f, -0.9747503428652762f), \"color\", \"yellow_9925\")),\n    new JSONObject(Map.of(\"id\", 16L, \"vector\", Arrays.asList(-0.9363075919673911f, -0.8153981031085669f, 0.7943039120490902f, -0.2093886809842529f, 0.0771191335807897f), \"color\", \"orange_9872\")),\n    new JSONObject(Map.of(\"id\", 17L, \"vector\", Arrays.asList(-0.050451522820639916f, 0.18931572752321935f, 0.7522886192190488f, -0.9071793089474034f, 0.6032647330692296f), \"color\", \"red_6450\")),\n    new JSONObject(Map.of(\"id\", 18L, \"vector\", Arrays.asList(-0.9181544231141592f, 0.6700755998126806f, -0.014174674636136642f, 0.6325780463623432f, -0.49662222164032976f), \"color\", \"purple_7392\")),\n    new JSONObject(Map.of(\"id\", 19L, \"vector\", Arrays.asList(0.11426945899602536f, 0.6089190684002581f, -0.5842735738352236f, 0.057050610092692855f, -0.035163433018196244f), \"color\", \"pink_4996\"))\n);\n\nupsertReq = UpsertReq.builder()\n    .collectionName(\"quick_setup\")\n    .data(data)\n    .partitionName(\"partitionA\")\n    .build();\n\nupsertResp = client.upsert(upsertReq);\n\nSystem.out.println(JSONObject.toJSON(upsertResp));\n\n// Output:\n// {\"upsertCnt\": 10}\n","// 6. Upsert data in partitions\ndata = [\n    {id: 10, vector: [0.06998888224297328, 0.8582816610326578, -0.9657938677934292, 0.6527905683627726, -0.8668460657158576], color: \"black_3651\"},\n    {id: 11, vector: [0.6060703043917468, -0.3765080534566074, -0.7710758854987239, 0.36993888322346136, 0.5507513364206531], color: \"grey_2049\"},\n    {id: 12, vector: [-0.9041813104515337, -0.9610546012461163, 0.20033003106083358, 0.11842506351635174, 0.8327356724591011], color: \"blue_6168\"},\n    {id: 13, vector: [0.3202914977909075, -0.7279137773695252, -0.04747830871620273, 0.8266053056909548, 0.8277957187455489], color: \"blue_1672\"},\n    {id: 14, vector: [0.2975811497890859, 0.2946936202691086, 0.5399463833894609, 0.8385334966677529, -0.4450543984655133], color: \"pink_1601\"},\n    {id: 15, vector: [-0.04697464305600074, -0.08509022265734134, 0.9067184632552001, -0.2281912685064822, -0.9747503428652762], color: \"yellow_9925\"},\n    {id: 16, vector: [-0.9363075919673911, -0.8153981031085669, 0.7943039120490902, -0.2093886809842529, 0.0771191335807897], color: \"orange_9872\"},\n    {id: 17, vector: [-0.050451522820639916, 0.18931572752321935, 0.7522886192190488, -0.9071793089474034, 0.6032647330692296], color: \"red_6450\"},\n    {id: 18, vector: [-0.9181544231141592, 0.6700755998126806, -0.014174674636136642, 0.6325780463623432, -0.49662222164032976], color: \"purple_7392\"},\n    {id: 19, vector: [0.11426945899602536, 0.6089190684002581, -0.5842735738352236, 0.057050610092692855, -0.035163433018196244], color: \"pink_4996\"}\n]\n\nres = await client.upsert({\n    collection_name: \"quick_setup\",\n    data: data,\n    partition_name: \"partitionA\"\n})\n\nconsole.log(res.upsert_cnt)\n\n// Output\n// \n// 10\n// \n","# 7. Delete entities\nres = client.delete(\n    collection_name=\"quick_setup\",\n    filter=\"id in [4,5,6]\"\n)\n\nprint(res)\n\n# Output\n#\n# {\n#     \"delete_count\": 3\n# }\n","import io.milvus.v2.service.vector.request.DeleteReq;\nimport io.milvus.v2.service.vector.response.DeleteResp;\n\n\n// 7. Delete entities\n\nDeleteReq deleteReq = DeleteReq.builder()\n    .collectionName(\"quick_setup\")\n    .filter(\"id in [4, 5, 6]\")\n    .build();\n\nDeleteResp deleteResp = client.delete(deleteReq);\n\nSystem.out.println(JSONObject.toJSON(deleteResp));\n\n// Output:\n// {\"deleteCnt\": 3}\n","// 7. Delete entities\nres = await client.delete({\n    collection_name: \"quick_setup\",\n    filter: \"id in [4,5,6]\"\n})\n\nconsole.log(res.delete_cnt)\n\n// Output\n// \n// 3\n// \n","res = client.delete(\n    collection_name=\"quick_setup\",\n    ids=[18, 19],\n    partition_name=\"partitionA\"\n)\n\nprint(res)\n\n# Output\n#\n# {\n#     \"delete_count\": 2\n# }\n","deleteReq = DeleteReq.builder()\n    .collectionName(\"quick_setup\")\n    .ids(Arrays.asList(18L, 19L))\n    .partitionName(\"partitionA\")\n    .build();\n\ndeleteResp = client.delete(deleteReq);\n\nSystem.out.println(JSONObject.toJSON(deleteResp));\n\n// Output:\n// {\"deleteCnt\": 2}\n","res = await client.delete({\n    collection_name: \"quick_setup\",\n    ids: [18, 19],\n    partition_name: \"partitionA\"\n})\n\nconsole.log(res.delete_cnt)\n\n// Output\n// \n// 2\n// \n","res = client.delete(\ncollection_name='quick_setup',\npartition_name='partitionA',\nfilter='color like \"blue%\"'\n)\n\nprint(\"Entities deleted from partitionA: \", res['delete_count'])\n\n# Output:\n# Entities deleted from partitionA:  3\n","deleteReq = DeleteReq.builder()\n    .collectionName(\"quick_setup\")\n    .filter('color like \"blue%\"')\n    .partitionName(\"partitionA\")\n    .build();\n\ndeleteResp = client.delete(deleteReq);\n\nSystem.out.println(JSONObject.toJSON(deleteResp));\n\n// Output:\n// {\"deleteCnt\": 3}\n","res = await client.delete({\ncollection_name: \"quick_setup\",\npartition_name: \"partitionA\",\nfilter: 'color like \"blue%\"'\n})\n\nconsole.log(\"Entities deleted from partitionA: \" + res.delete_cnt)\n\n// Output:\n// Entities deleted from partitionA: 3\n"],"headingContent":"","anchorList":[{"label":"Inserimento, upsert e cancellazione","href":"Insert-Upsert--Delete","type":1,"isActive":false},{"label":"Prima di iniziare","href":"Before-you-start","type":2,"isActive":false},{"label":"Panoramica","href":"Overview","type":2,"isActive":false},{"label":"Preparazione","href":"Preparations","type":2,"isActive":false},{"label":"Inserire le entità","href":"Insert-entities","type":2,"isActive":false},{"label":"Aggiungere entità","href":"Upsert-entities","type":2,"isActive":false},{"label":"Eliminare le entità","href":"Delete-entities","type":2,"isActive":false}]}
    \ No newline at end of file
    +{"codeList":["from pymilvus import MilvusClient\n\n# 1. Set up a Milvus client\nclient = MilvusClient(\n    uri=\"http://localhost:19530\"\n)\n\n# 2. Create a collection\nclient.create_collection(\n    collection_name=\"quick_setup\",\n    dimension=5,\n    metric_type=\"IP\"\n)\n","import io.milvus.v2.client.ConnectConfig;\nimport io.milvus.v2.client.MilvusClientV2;\nimport io.milvus.v2.service.collection.request.CreateCollectionReq;\n\nString CLUSTER_ENDPOINT = \"http://localhost:19530\";\n\n// 1. Connect to Milvus server\nConnectConfig connectConfig = ConnectConfig.builder()\n    .uri(CLUSTER_ENDPOINT)\n    .build();\n\nMilvusClientV2 client = new MilvusClientV2(connectConfig);\n\n// 2. Create a collection in quick setup mode\nCreateCollectionReq quickSetupReq = CreateCollectionReq.builder()\n    .collectionName(\"quick_setup\")\n    .dimension(5)\n    .metricType(\"IP\")\n    .build();\n\nclient.createCollection(quickSetupReq);\n","const { MilvusClient, DataType, sleep } = require(\"@zilliz/milvus2-sdk-node\")\n\nconst address = \"http://localhost:19530\"\n\n// 1. Set up a Milvus Client\nclient = new MilvusClient({address});\n\n// 2. Create a collection in quick setup mode\nawait client.createCollection({\n    collection_name: \"quick_setup\",\n    dimension: 5,\n    metric_type: \"IP\"\n});  \n","# 3. Insert some data\ndata=[\n    {\"id\": 0, \"vector\": [0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592], \"color\": \"pink_8682\"},\n    {\"id\": 1, \"vector\": [0.19886812562848388, 0.06023560599112088, 0.6976963061752597, 0.2614474506242501, 0.838729485096104], \"color\": \"red_7025\"},\n    {\"id\": 2, \"vector\": [0.43742130801983836, -0.5597502546264526, 0.6457887650909682, 0.7894058910881185, 0.20785793220625592], \"color\": \"orange_6781\"},\n    {\"id\": 3, \"vector\": [0.3172005263489739, 0.9719044792798428, -0.36981146090600725, -0.4860894583077995, 0.95791889146345], \"color\": \"pink_9298\"},\n    {\"id\": 4, \"vector\": [0.4452349528804562, -0.8757026943054742, 0.8220779437047674, 0.46406290649483184, 0.30337481143159106], \"color\": \"red_4794\"},\n    {\"id\": 5, \"vector\": [0.985825131989184, -0.8144651566660419, 0.6299267002202009, 0.1206906911183383, -0.1446277761879955], \"color\": \"yellow_4222\"},\n    {\"id\": 6, \"vector\": [0.8371977790571115, -0.015764369584852833, -0.31062937026679327, -0.562666951622192, -0.8984947637863987], \"color\": \"red_9392\"},\n    {\"id\": 7, \"vector\": [-0.33445148015177995, -0.2567135004164067, 0.8987539745369246, 0.9402995886420709, 0.5378064918413052], \"color\": \"grey_8510\"},\n    {\"id\": 8, \"vector\": [0.39524717779832685, 0.4000257286739164, -0.5890507376891594, -0.8650502298996872, -0.6140360785406336], \"color\": \"white_9381\"},\n    {\"id\": 9, \"vector\": [0.5718280481994695, 0.24070317428066512, -0.3737913482606834, -0.06726932177492717, -0.6980531615588608], \"color\": \"purple_4976\"}\n]\n\nres = client.insert(\n    collection_name=\"quick_setup\",\n    data=data\n)\n\nprint(res)\n\n# Output\n#\n# {\n#     \"insert_count\": 10,\n#     \"ids\": [\n#         0,\n#         1,\n#         2,\n#         3,\n#         4,\n#         5,\n#         6,\n#         7,\n#         8,\n#         9\n#     ]\n# }\n","import java.util.Arrays;\nimport java.util.List;\nimport java.util.Map;\nimport io.milvus.v2.service.vector.request.InsertReq;\nimport io.milvus.v2.service.vector.response.InsertResp;\n\n// 3. Insert some data\nList data = Arrays.asList(\n    new JSONObject(Map.of(\"id\", 0L, \"vector\", Arrays.asList(0.3580376395471989f, -0.6023495712049978f, 0.18414012509913835f, -0.26286205330961354f, 0.9029438446296592f), \"color\", \"pink_8682\")),\n    new JSONObject(Map.of(\"id\", 1L, \"vector\", Arrays.asList(0.19886812562848388f, 0.06023560599112088f, 0.6976963061752597f, 0.2614474506242501f, 0.838729485096104f), \"color\", \"red_7025\")),\n    new JSONObject(Map.of(\"id\", 2L, \"vector\", Arrays.asList(0.43742130801983836f, -0.5597502546264526f, 0.6457887650909682f, 0.7894058910881185f, 0.20785793220625592f), \"color\", \"orange_6781\")),\n    new JSONObject(Map.of(\"id\", 3L, \"vector\", Arrays.asList(0.3172005263489739f, 0.9719044792798428f, -0.36981146090600725f, -0.4860894583077995f, 0.95791889146345f), \"color\", \"pink_9298\")),\n    new JSONObject(Map.of(\"id\", 4L, \"vector\", Arrays.asList(0.4452349528804562f, -0.8757026943054742f, 0.8220779437047674f, 0.46406290649483184f, 0.30337481143159106f), \"color\", \"red_4794\")),\n    new JSONObject(Map.of(\"id\", 5L, \"vector\", Arrays.asList(0.985825131989184f, -0.8144651566660419f, 0.6299267002202009f, 0.1206906911183383f, -0.1446277761879955f), \"color\", \"yellow_4222\")),\n    new JSONObject(Map.of(\"id\", 6L, \"vector\", Arrays.asList(0.8371977790571115f, -0.015764369584852833f, -0.31062937026679327f, -0.562666951622192f, -0.8984947637863987f), \"color\", \"red_9392\")),\n    new JSONObject(Map.of(\"id\", 7L, \"vector\", Arrays.asList(-0.33445148015177995f, -0.2567135004164067f, 0.8987539745369246f, 0.9402995886420709f, 0.5378064918413052f), \"color\", \"grey_8510\")),\n    new JSONObject(Map.of(\"id\", 8L, \"vector\", Arrays.asList(0.39524717779832685f, 0.4000257286739164f, -0.5890507376891594f, -0.8650502298996872f, -0.6140360785406336f), \"color\", \"white_9381\")),\n    new JSONObject(Map.of(\"id\", 9L, \"vector\", Arrays.asList(0.5718280481994695f, 0.24070317428066512f, -0.3737913482606834f, -0.06726932177492717f, -0.6980531615588608f), \"color\", \"purple_4976\"))\n);\n\nInsertReq insertReq = InsertReq.builder()\n    .collectionName(\"quick_setup\")\n    .data(data)\n    .build();\n\nInsertResp insertResp = client.insert(insertReq);\n\nSystem.out.println(JSONObject.toJSON(insertResp));\n\n// Output:\n// {\"insertCnt\": 10}\n","// 3. Insert some data\n\nvar data = [\n    {id: 0, vector: [0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592], color: \"pink_8682\"},\n    {id: 1, vector: [0.19886812562848388, 0.06023560599112088, 0.6976963061752597, 0.2614474506242501, 0.838729485096104], color: \"red_7025\"},\n    {id: 2, vector: [0.43742130801983836, -0.5597502546264526, 0.6457887650909682, 0.7894058910881185, 0.20785793220625592], color: \"orange_6781\"},\n    {id: 3, vector: [0.3172005263489739, 0.9719044792798428, -0.36981146090600725, -0.4860894583077995, 0.95791889146345], color: \"pink_9298\"},\n    {id: 4, vector: [0.4452349528804562, -0.8757026943054742, 0.8220779437047674, 0.46406290649483184, 0.30337481143159106], color: \"red_4794\"},\n    {id: 5, vector: [0.985825131989184, -0.8144651566660419, 0.6299267002202009, 0.1206906911183383, -0.1446277761879955], color: \"yellow_4222\"},\n    {id: 6, vector: [0.8371977790571115, -0.015764369584852833, -0.31062937026679327, -0.562666951622192, -0.8984947637863987], color: \"red_9392\"},\n    {id: 7, vector: [-0.33445148015177995, -0.2567135004164067, 0.8987539745369246, 0.9402995886420709, 0.5378064918413052], color: \"grey_8510\"},\n    {id: 8, vector: [0.39524717779832685, 0.4000257286739164, -0.5890507376891594, -0.8650502298996872, -0.6140360785406336], color: \"white_9381\"},\n    {id: 9, vector: [0.5718280481994695, 0.24070317428066512, -0.3737913482606834, -0.06726932177492717, -0.6980531615588608], color: \"purple_4976\"}        \n]\n\nvar res = await client.insert({\n    collection_name: \"quick_setup\",\n    data: data,\n})\n\nconsole.log(res.insert_cnt)\n\n// Output\n// \n// 10\n// \n","# 4. Insert some more data into a specific partition\ndata=[\n    {\"id\": 10, \"vector\": [-0.5570353903748935, -0.8997887893201304, -0.7123782431855732, -0.6298990746450119, 0.6699215060604258], \"color\": \"red_1202\"},\n    {\"id\": 11, \"vector\": [0.6319019033373907, 0.6821488267878275, 0.8552303045704168, 0.36929791364943054, -0.14152860714878068], \"color\": \"blue_4150\"},\n    {\"id\": 12, \"vector\": [0.9483947484855766, -0.32294203351925344, 0.9759290319978025, 0.8262982148666174, -0.8351194181285713], \"color\": \"orange_4590\"},\n    {\"id\": 13, \"vector\": [-0.5449109892498731, 0.043511240563786524, -0.25105249484790804, -0.012030655265886425, -0.0010987671273892108], \"color\": \"pink_9619\"},\n    {\"id\": 14, \"vector\": [0.6603339372951424, -0.10866551787442225, -0.9435597754324891, 0.8230244263466688, -0.7986720938400362], \"color\": \"orange_4863\"},\n    {\"id\": 15, \"vector\": [-0.8825129181091456, -0.9204557711667729, -0.935350065513425, 0.5484069690287079, 0.24448151140671204], \"color\": \"orange_7984\"},\n    {\"id\": 16, \"vector\": [0.6285586391568163, 0.5389064528263487, -0.3163366239905099, 0.22036279378888013, 0.15077052220816167], \"color\": \"blue_9010\"},\n    {\"id\": 17, \"vector\": [-0.20151825016059233, -0.905239387635804, 0.6749305353372479, -0.7324272081377843, -0.33007998971889263], \"color\": \"blue_4521\"},\n    {\"id\": 18, \"vector\": [0.2432286610792349, 0.01785636564206139, -0.651356982731391, -0.35848148851027895, -0.7387383128324057], \"color\": \"orange_2529\"},\n    {\"id\": 19, \"vector\": [0.055512329053363674, 0.7100266349039421, 0.4956956543575197, 0.24541352586717702, 0.4209030729923515], \"color\": \"red_9437\"}\n]\n\nclient.create_partition(\n    collection_name=\"quick_setup\",\n    partition_name=\"partitionA\"\n)\n\nres = client.insert(\n    collection_name=\"quick_setup\",\n    data=data,\n    partition_name=\"partitionA\"\n)\n\nprint(res)\n\n# Output\n#\n# {\n#     \"insert_count\": 10,\n#     \"ids\": [\n#         10,\n#         11,\n#         12,\n#         13,\n#         14,\n#         15,\n#         16,\n#         17,\n#         18,\n#         19\n#     ]\n# }\n","// 4. Insert some more data into a specific partition\ndata = Arrays.asList(\n    new JSONObject(Map.of(\"id\", 10L, \"vector\", Arrays.asList(-0.5570353903748935f, -0.8997887893201304f, -0.7123782431855732f, -0.6298990746450119f, 0.6699215060604258f), \"color\", \"red_1202\")),\n    new JSONObject(Map.of(\"id\", 11L, \"vector\", Arrays.asList(0.6319019033373907f, 0.6821488267878275f, 0.8552303045704168f, 0.36929791364943054f, -0.14152860714878068f), \"color\", \"blue_4150\")),\n    new JSONObject(Map.of(\"id\", 12L, \"vector\", Arrays.asList(0.9483947484855766f, -0.32294203351925344f, 0.9759290319978025f, 0.8262982148666174f, -0.8351194181285713f), \"color\", \"orange_4590\")),\n    new JSONObject(Map.of(\"id\", 13L, \"vector\", Arrays.asList(-0.5449109892498731f, 0.043511240563786524f, -0.25105249484790804f, -0.012030655265886425f, -0.0010987671273892108f), \"color\", \"pink_9619\")),\n    new JSONObject(Map.of(\"id\", 14L, \"vector\", Arrays.asList(0.6603339372951424f, -0.10866551787442225f, -0.9435597754324891f, 0.8230244263466688f, -0.7986720938400362f), \"color\", \"orange_4863\")),\n    new JSONObject(Map.of(\"id\", 15L, \"vector\", Arrays.asList(-0.8825129181091456f, -0.9204557711667729f, -0.935350065513425f, 0.5484069690287079f, 0.24448151140671204f), \"color\", \"orange_7984\")),\n    new JSONObject(Map.of(\"id\", 16L, \"vector\", Arrays.asList(0.6285586391568163f, 0.5389064528263487f, -0.3163366239905099f, 0.22036279378888013f, 0.15077052220816167f), \"color\", \"blue_9010\")),\n    new JSONObject(Map.of(\"id\", 17L, \"vector\", Arrays.asList(-0.20151825016059233f, -0.905239387635804f, 0.6749305353372479f, -0.7324272081377843f, -0.33007998971889263f), \"color\", \"blue_4521\")),\n    new JSONObject(Map.of(\"id\", 18L, \"vector\", Arrays.asList(0.2432286610792349f, 0.01785636564206139f, -0.651356982731391f, -0.35848148851027895f, -0.7387383128324057f), \"color\", \"orange_2529\")),\n    new JSONObject(Map.of(\"id\", 19L, \"vector\", Arrays.asList(0.055512329053363674f, 0.7100266349039421f, 0.4956956543575197f, 0.24541352586717702f, 0.4209030729923515f), \"color\", \"red_9437\"))\n);\n\nCreatePartitionReq createPartitionReq = CreatePartitionReq.builder()\n    .collectionName(\"quick_setup\")\n    .partitionName(\"partitionA\")\n    .build();\n\nclient.createPartition(createPartitionReq);\n\ninsertReq = InsertReq.builder()\n    .collectionName(\"quick_setup\")\n    .data(data)\n    .partitionName(\"partitionA\")\n    .build();\n\ninsertResp = client.insert(insertReq);\n\nSystem.out.println(JSONObject.toJSON(insertResp));\n\n// Output:\n// {\"insertCnt\": 10}\n","// 4. Insert some more data into a specific partition\ndata = [\n    {id: 10, vector: [-0.5570353903748935, -0.8997887893201304, -0.7123782431855732, -0.6298990746450119, 0.6699215060604258], color: \"red_1202\"},\n    {id: 11, vector: [0.6319019033373907, 0.6821488267878275, 0.8552303045704168, 0.36929791364943054, -0.14152860714878068], color: \"blue_4150\"},\n    {id: 12, vector: [0.9483947484855766, -0.32294203351925344, 0.9759290319978025, 0.8262982148666174, -0.8351194181285713], color: \"orange_4590\"},\n    {id: 13, vector: [-0.5449109892498731, 0.043511240563786524, -0.25105249484790804, -0.012030655265886425, -0.0010987671273892108], color: \"pink_9619\"},\n    {id: 14, vector: [0.6603339372951424, -0.10866551787442225, -0.9435597754324891, 0.8230244263466688, -0.7986720938400362], color: \"orange_4863\"},\n    {id: 15, vector: [-0.8825129181091456, -0.9204557711667729, -0.935350065513425, 0.5484069690287079, 0.24448151140671204], color: \"orange_7984\"},\n    {id: 16, vector: [0.6285586391568163, 0.5389064528263487, -0.3163366239905099, 0.22036279378888013, 0.15077052220816167], color: \"blue_9010\"},\n    {id: 17, vector: [-0.20151825016059233, -0.905239387635804, 0.6749305353372479, -0.7324272081377843, -0.33007998971889263], color: \"blue_4521\"},\n    {id: 18, vector: [0.2432286610792349, 0.01785636564206139, -0.651356982731391, -0.35848148851027895, -0.7387383128324057], color: \"orange_2529\"},\n    {id: 19, vector: [0.055512329053363674, 0.7100266349039421, 0.4956956543575197, 0.24541352586717702, 0.4209030729923515], color: \"red_9437\"}\n]\n\nawait client.createPartition({\n    collection_name: \"quick_setup\",\n    partition_name: \"partitionA\"\n})\n\nres = await client.insert({\n    collection_name: \"quick_setup\",\n    data: data,\n    partition_name: \"partitionA\"\n})\n\nconsole.log(res.insert_cnt)\n\n// Output\n// \n// 10\n// \n","# 5. Upsert some data\ndata=[\n    {\"id\": 0, \"vector\": [-0.619954382375778, 0.4479436794798608, -0.17493894838751745, -0.4248030059917294, -0.8648452746018911], \"color\": \"black_9898\"},\n    {\"id\": 1, \"vector\": [0.4762662251462588, -0.6942502138717026, -0.4490002642657902, -0.628696575798281, 0.9660395877041965], \"color\": \"red_7319\"},\n    {\"id\": 2, \"vector\": [-0.8864122635045097, 0.9260170474445351, 0.801326976181461, 0.6383943392381306, 0.7563037341572827], \"color\": \"white_6465\"},\n    {\"id\": 3, \"vector\": [0.14594326235891586, -0.3775407299900644, -0.3765479013078812, 0.20612075380355122, 0.4902678929632145], \"color\": \"orange_7580\"},\n    {\"id\": 4, \"vector\": [0.4548498669607359, -0.887610217681605, 0.5655081329910452, 0.19220509387904117, 0.016513983433433577], \"color\": \"red_3314\"},\n    {\"id\": 5, \"vector\": [0.11755001847051827, -0.7295149788999611, 0.2608115847524266, -0.1719167007897875, 0.7417611743754855], \"color\": \"black_9955\"},\n    {\"id\": 6, \"vector\": [0.9363032158314308, 0.030699901477745373, 0.8365910312319647, 0.7823840208444011, 0.2625222076909237], \"color\": \"yellow_2461\"},\n    {\"id\": 7, \"vector\": [0.0754823906014721, -0.6390658668265143, 0.5610517334334937, -0.8986261118798251, 0.9372056764266794], \"color\": \"white_5015\"},\n    {\"id\": 8, \"vector\": [-0.3038434006935904, 0.1279149203380523, 0.503958664270957, -0.2622661156746988, 0.7407627307791929], \"color\": \"purple_6414\"},\n    {\"id\": 9, \"vector\": [-0.7125086947677588, -0.8050968321012257, -0.32608864121785786, 0.3255654958645424, 0.26227968923834233], \"color\": \"brown_7231\"}\n]\n\nres = client.upsert(\n    collection_name='quick_setup',\n    data=data\n)\n\nprint(res)\n\n# Output\n#\n# {\n#     \"upsert_count\": 10\n# }\n","// 5. Upsert some data\ndata = Arrays.asList(\n    new JSONObject(Map.of(\"id\", 0L, \"vector\", Arrays.asList(-0.619954382375778f, 0.4479436794798608f, -0.17493894838751745f, -0.4248030059917294f, -0.8648452746018911f), \"color\", \"black_9898\")),\n    new JSONObject(Map.of(\"id\", 1L, \"vector\", Arrays.asList(0.4762662251462588f, -0.6942502138717026f, -0.4490002642657902f, -0.628696575798281f, 0.9660395877041965f), \"color\", \"red_7319\")),\n    new JSONObject(Map.of(\"id\", 2L, \"vector\", Arrays.asList(-0.8864122635045097f, 0.9260170474445351f, 0.801326976181461f, 0.6383943392381306f, 0.7563037341572827f), \"color\", \"white_6465\")),\n    new JSONObject(Map.of(\"id\", 3L, \"vector\", Arrays.asList(0.14594326235891586f, -0.3775407299900644f, -0.3765479013078812f, 0.20612075380355122f, 0.4902678929632145f), \"color\", \"orange_7580\")),\n    new JSONObject(Map.of(\"id\", 4L, \"vector\", Arrays.asList(0.4548498669607359f, -0.887610217681605f, 0.5655081329910452f, 0.19220509387904117f, 0.016513983433433577f), \"color\", \"red_3314\")),\n    new JSONObject(Map.of(\"id\", 5L, \"vector\", Arrays.asList(0.11755001847051827f, -0.7295149788999611f, 0.2608115847524266f, -0.1719167007897875f, 0.7417611743754855f), \"color\", \"black_9955\")),\n    new JSONObject(Map.of(\"id\", 6L, \"vector\", Arrays.asList(0.9363032158314308f, 0.030699901477745373f, 0.8365910312319647f, 0.7823840208444011f, 0.2625222076909237f), \"color\", \"yellow_2461\")),\n    new JSONObject(Map.of(\"id\", 7L, \"vector\", Arrays.asList(0.0754823906014721f, -0.6390658668265143f, 0.5610517334334937f, -0.8986261118798251f, 0.9372056764266794f), \"color\", \"white_5015\")),\n    new JSONObject(Map.of(\"id\", 8L, \"vector\", Arrays.asList(-0.3038434006935904f, 0.1279149203380523f, 0.503958664270957f, -0.2622661156746988f, 0.7407627307791929f), \"color\", \"purple_6414\")),\n    new JSONObject(Map.of(\"id\", 9L, \"vector\", Arrays.asList(-0.7125086947677588f, -0.8050968321012257f, -0.32608864121785786f, 0.3255654958645424f, 0.26227968923834233f), \"color\", \"brown_7231\"))\n);\n\nUpsertReq upsertReq = UpsertReq.builder()\n    .collectionName(\"quick_setup\")\n    .data(data)\n    .build();\n\nUpsertResp upsertResp = client.upsert(upsertReq);\n\nSystem.out.println(JSONObject.toJSON(upsertResp));\n\n// Output:\n// {\"upsertCnt\": 10}\n","// 5. Upsert some data\ndata = [\n    {id: 0, vector: [-0.619954382375778, 0.4479436794798608, -0.17493894838751745, -0.4248030059917294, -0.8648452746018911], color: \"black_9898\"},\n    {id: 1, vector: [0.4762662251462588, -0.6942502138717026, -0.4490002642657902, -0.628696575798281, 0.9660395877041965], color: \"red_7319\"},\n    {id: 2, vector: [-0.8864122635045097, 0.9260170474445351, 0.801326976181461, 0.6383943392381306, 0.7563037341572827], color: \"white_6465\"},\n    {id: 3, vector: [0.14594326235891586, -0.3775407299900644, -0.3765479013078812, 0.20612075380355122, 0.4902678929632145], color: \"orange_7580\"},\n    {id: 4, vector: [0.4548498669607359, -0.887610217681605, 0.5655081329910452, 0.19220509387904117, 0.016513983433433577], color: \"red_3314\"},\n    {id: 5, vector: [0.11755001847051827, -0.7295149788999611, 0.2608115847524266, -0.1719167007897875, 0.7417611743754855], color: \"black_9955\"},\n    {id: 6, vector: [0.9363032158314308, 0.030699901477745373, 0.8365910312319647, 0.7823840208444011, 0.2625222076909237], color: \"yellow_2461\"},\n    {id: 7, vector: [0.0754823906014721, -0.6390658668265143, 0.5610517334334937, -0.8986261118798251, 0.9372056764266794], color: \"white_5015\"},\n    {id: 8, vector: [-0.3038434006935904, 0.1279149203380523, 0.503958664270957, -0.2622661156746988, 0.7407627307791929], color: \"purple_6414\"},\n    {id: 9, vector: [-0.7125086947677588, -0.8050968321012257, -0.32608864121785786, 0.3255654958645424, 0.26227968923834233], color: \"brown_7231\"}\n]\n\nres = await client.upsert({\n    collection_name: \"quick_setup\",\n    data: data,\n})\n\nconsole.log(res.upsert_cnt)\n\n// Output\n// \n// 10\n// \n","# 6. Upsert data in partitions\ndata=[\n    {\"id\": 10, \"vector\": [0.06998888224297328, 0.8582816610326578, -0.9657938677934292, 0.6527905683627726, -0.8668460657158576], \"color\": \"black_3651\"},\n    {\"id\": 11, \"vector\": [0.6060703043917468, -0.3765080534566074, -0.7710758854987239, 0.36993888322346136, 0.5507513364206531], \"color\": \"grey_2049\"},\n    {\"id\": 12, \"vector\": [-0.9041813104515337, -0.9610546012461163, 0.20033003106083358, 0.11842506351635174, 0.8327356724591011], \"color\": \"blue_6168\"},\n    {\"id\": 13, \"vector\": [0.3202914977909075, -0.7279137773695252, -0.04747830871620273, 0.8266053056909548, 0.8277957187455489], \"color\": \"blue_1672\"},\n    {\"id\": 14, \"vector\": [0.2975811497890859, 0.2946936202691086, 0.5399463833894609, 0.8385334966677529, -0.4450543984655133], \"color\": \"pink_1601\"},\n    {\"id\": 15, \"vector\": [-0.04697464305600074, -0.08509022265734134, 0.9067184632552001, -0.2281912685064822, -0.9747503428652762], \"color\": \"yellow_9925\"},\n    {\"id\": 16, \"vector\": [-0.9363075919673911, -0.8153981031085669, 0.7943039120490902, -0.2093886809842529, 0.0771191335807897], \"color\": \"orange_9872\"},\n    {\"id\": 17, \"vector\": [-0.050451522820639916, 0.18931572752321935, 0.7522886192190488, -0.9071793089474034, 0.6032647330692296], \"color\": \"red_6450\"},\n    {\"id\": 18, \"vector\": [-0.9181544231141592, 0.6700755998126806, -0.014174674636136642, 0.6325780463623432, -0.49662222164032976], \"color\": \"purple_7392\"},\n    {\"id\": 19, \"vector\": [0.11426945899602536, 0.6089190684002581, -0.5842735738352236, 0.057050610092692855, -0.035163433018196244], \"color\": \"pink_4996\"}\n]\n\nres = client.upsert(\n    collection_name=\"quick_setup\",\n    data=data,\n    partition_name=\"partitionA\"\n)\n\nprint(res)\n\n# Output\n#\n# {\n#     \"upsert_count\": 10\n# }\n","import io.milvus.v2.service.vector.request.UpsertReq;\nimport io.milvus.v2.service.vector.response.UpsertResp;\n\n// 6. Upsert data in parition\n\ndata = Arrays.asList(\n    new JSONObject(Map.of(\"id\", 10L, \"vector\", Arrays.asList(0.06998888224297328f, 0.8582816610326578f, -0.9657938677934292f, 0.6527905683627726f, -0.8668460657158576f), \"color\", \"black_3651\")),\n    new JSONObject(Map.of(\"id\", 11L, \"vector\", Arrays.asList(0.6060703043917468f, -0.3765080534566074f, -0.7710758854987239f, 0.36993888322346136f, 0.5507513364206531f), \"color\", \"grey_2049\")),\n    new JSONObject(Map.of(\"id\", 12L, \"vector\", Arrays.asList(-0.9041813104515337f, -0.9610546012461163f, 0.20033003106083358f, 0.11842506351635174f, 0.8327356724591011f), \"color\", \"blue_6168\")),\n    new JSONObject(Map.of(\"id\", 13L, \"vector\", Arrays.asList(0.3202914977909075f, -0.7279137773695252f, -0.04747830871620273f, 0.8266053056909548f, 0.8277957187455489f), \"color\", \"blue_1672\")),\n    new JSONObject(Map.of(\"id\", 14L, \"vector\", Arrays.asList(0.2975811497890859f, 0.2946936202691086f, 0.5399463833894609f, 0.8385334966677529f, -0.4450543984655133f), \"color\", \"pink_1601\")),\n    new JSONObject(Map.of(\"id\", 15L, \"vector\", Arrays.asList(-0.04697464305600074f, -0.08509022265734134f, 0.9067184632552001f, -0.2281912685064822f, -0.9747503428652762f), \"color\", \"yellow_9925\")),\n    new JSONObject(Map.of(\"id\", 16L, \"vector\", Arrays.asList(-0.9363075919673911f, -0.8153981031085669f, 0.7943039120490902f, -0.2093886809842529f, 0.0771191335807897f), \"color\", \"orange_9872\")),\n    new JSONObject(Map.of(\"id\", 17L, \"vector\", Arrays.asList(-0.050451522820639916f, 0.18931572752321935f, 0.7522886192190488f, -0.9071793089474034f, 0.6032647330692296f), \"color\", \"red_6450\")),\n    new JSONObject(Map.of(\"id\", 18L, \"vector\", Arrays.asList(-0.9181544231141592f, 0.6700755998126806f, -0.014174674636136642f, 0.6325780463623432f, -0.49662222164032976f), \"color\", \"purple_7392\")),\n    new JSONObject(Map.of(\"id\", 19L, \"vector\", Arrays.asList(0.11426945899602536f, 0.6089190684002581f, -0.5842735738352236f, 0.057050610092692855f, -0.035163433018196244f), \"color\", \"pink_4996\"))\n);\n\nupsertReq = UpsertReq.builder()\n    .collectionName(\"quick_setup\")\n    .data(data)\n    .partitionName(\"partitionA\")\n    .build();\n\nupsertResp = client.upsert(upsertReq);\n\nSystem.out.println(JSONObject.toJSON(upsertResp));\n\n// Output:\n// {\"upsertCnt\": 10}\n","// 6. Upsert data in partitions\ndata = [\n    {id: 10, vector: [0.06998888224297328, 0.8582816610326578, -0.9657938677934292, 0.6527905683627726, -0.8668460657158576], color: \"black_3651\"},\n    {id: 11, vector: [0.6060703043917468, -0.3765080534566074, -0.7710758854987239, 0.36993888322346136, 0.5507513364206531], color: \"grey_2049\"},\n    {id: 12, vector: [-0.9041813104515337, -0.9610546012461163, 0.20033003106083358, 0.11842506351635174, 0.8327356724591011], color: \"blue_6168\"},\n    {id: 13, vector: [0.3202914977909075, -0.7279137773695252, -0.04747830871620273, 0.8266053056909548, 0.8277957187455489], color: \"blue_1672\"},\n    {id: 14, vector: [0.2975811497890859, 0.2946936202691086, 0.5399463833894609, 0.8385334966677529, -0.4450543984655133], color: \"pink_1601\"},\n    {id: 15, vector: [-0.04697464305600074, -0.08509022265734134, 0.9067184632552001, -0.2281912685064822, -0.9747503428652762], color: \"yellow_9925\"},\n    {id: 16, vector: [-0.9363075919673911, -0.8153981031085669, 0.7943039120490902, -0.2093886809842529, 0.0771191335807897], color: \"orange_9872\"},\n    {id: 17, vector: [-0.050451522820639916, 0.18931572752321935, 0.7522886192190488, -0.9071793089474034, 0.6032647330692296], color: \"red_6450\"},\n    {id: 18, vector: [-0.9181544231141592, 0.6700755998126806, -0.014174674636136642, 0.6325780463623432, -0.49662222164032976], color: \"purple_7392\"},\n    {id: 19, vector: [0.11426945899602536, 0.6089190684002581, -0.5842735738352236, 0.057050610092692855, -0.035163433018196244], color: \"pink_4996\"}\n]\n\nres = await client.upsert({\n    collection_name: \"quick_setup\",\n    data: data,\n    partition_name: \"partitionA\"\n})\n\nconsole.log(res.upsert_cnt)\n\n// Output\n// \n// 10\n// \n","# 7. Delete entities\nres = client.delete(\n    collection_name=\"quick_setup\",\n    filter=\"id in [4,5,6]\"\n)\n\nprint(res)\n\n# Output\n#\n# {\n#     \"delete_count\": 3\n# }\n","import io.milvus.v2.service.vector.request.DeleteReq;\nimport io.milvus.v2.service.vector.response.DeleteResp;\n\n\n// 7. Delete entities\n\nDeleteReq deleteReq = DeleteReq.builder()\n    .collectionName(\"quick_setup\")\n    .filter(\"id in [4, 5, 6]\")\n    .build();\n\nDeleteResp deleteResp = client.delete(deleteReq);\n\nSystem.out.println(JSONObject.toJSON(deleteResp));\n\n// Output:\n// {\"deleteCnt\": 3}\n","// 7. Delete entities\nres = await client.delete({\n    collection_name: \"quick_setup\",\n    filter: \"id in [4,5,6]\"\n})\n\nconsole.log(res.delete_cnt)\n\n// Output\n// \n// 3\n// \n","res = client.delete(\n    collection_name=\"quick_setup\",\n    ids=[18, 19],\n    partition_name=\"partitionA\"\n)\n\nprint(res)\n\n# Output\n#\n# {\n#     \"delete_count\": 2\n# }\n","deleteReq = DeleteReq.builder()\n    .collectionName(\"quick_setup\")\n    .ids(Arrays.asList(18L, 19L))\n    .partitionName(\"partitionA\")\n    .build();\n\ndeleteResp = client.delete(deleteReq);\n\nSystem.out.println(JSONObject.toJSON(deleteResp));\n\n// Output:\n// {\"deleteCnt\": 2}\n","res = await client.delete({\n    collection_name: \"quick_setup\",\n    ids: [18, 19],\n    partition_name: \"partitionA\"\n})\n\nconsole.log(res.delete_cnt)\n\n// Output\n// \n// 2\n// \n","res = client.delete(\ncollection_name='quick_setup',\npartition_name='partitionA',\nfilter='color like \"blue%\"'\n)\n\nprint(\"Entities deleted from partitionA: \", res['delete_count'])\n\n# Output:\n# Entities deleted from partitionA:  3\n","deleteReq = DeleteReq.builder()\n    .collectionName(\"quick_setup\")\n    .filter('color like \"blue%\"')\n    .partitionName(\"partitionA\")\n    .build();\n\ndeleteResp = client.delete(deleteReq);\n\nSystem.out.println(JSONObject.toJSON(deleteResp));\n\n// Output:\n// {\"deleteCnt\": 3}\n","res = await client.delete({\ncollection_name: \"quick_setup\",\npartition_name: \"partitionA\",\nfilter: 'color like \"blue%\"'\n})\n\nconsole.log(\"Entities deleted from partitionA: \" + res.delete_cnt)\n\n// Output:\n// Entities deleted from partitionA: 3\n"],"headingContent":"Insert, Upsert & Delete","anchorList":[{"label":"Inserimento, upsert e cancellazione","href":"Insert-Upsert--Delete","type":1,"isActive":false},{"label":"Prima di iniziare","href":"Before-you-start","type":2,"isActive":false},{"label":"Panoramica","href":"Overview","type":2,"isActive":false},{"label":"Preparazione","href":"Preparations","type":2,"isActive":false},{"label":"Inserire le entità","href":"Insert-entities","type":2,"isActive":false},{"label":"Aggiungere entità","href":"Upsert-entities","type":2,"isActive":false},{"label":"Eliminare le entità","href":"Delete-entities","type":2,"isActive":false}]}
    \ No newline at end of file
    diff --git a/localization/v2.4.x/site/it/userGuide/insert-update-delete.md b/localization/v2.4.x/site/it/userGuide/insert-update-delete.md
    index a8085bfcb..501930152 100644
    --- a/localization/v2.4.x/site/it/userGuide/insert-update-delete.md
    +++ b/localization/v2.4.x/site/it/userGuide/insert-update-delete.md
    @@ -3,7 +3,7 @@ id: insert-update-delete.md
     summary: >-
       Questa guida illustra le operazioni di manipolazione dei dati all'interno di
       una collezione, tra cui l'inserimento, l'upsertion e la cancellazione.
    -title: 'Inserire, inserire ed eliminare'
    +title: 'Inserimento, upsert e cancellazione'
     ---
     

    Inserimento, upsert e cancellazione

    Durante il processo di caricamento di una raccolta, Milvus carica in memoria il file di indice della raccolta. Al contrario, quando si rilascia una raccolta, Milvus scarica il file di indice dalla memoria. Prima di effettuare ricerche in una collezione, assicurarsi che la collezione sia caricata.

    Caricare una collezione

    -

    Per caricare un insieme, utilizzare il metodo load_collection() specificando il nome della collezione. Si può anche impostare replica_number per determinare il numero di repliche in memoria dei segmenti di dati da creare sui nodi di query quando la raccolta viene caricata.

    +

    Per caricare un insieme, utilizzare il metodo load_collection() specificando il nome della collezione. Si può anche impostare replica_number per determinare quante repliche in memoria dei segmenti di dati creare sui nodi di query quando la raccolta viene caricata.

    • Milvus Standalone: Il valore massimo consentito per replica_number è 1.
    • Milvus Cluster: Il valore massimo non deve superare il valore queryNode.replicas impostato nelle configurazioni di Milvus. Per ulteriori dettagli, consultare la sezione Configurazioni relative ai nodi di query.
    • @@ -1593,8 +1593,35 @@ $ curl -X POST "http://# } # }
    -

    Rilasciare una raccolta

    -

    Per rilasciare un insieme, utilizzare il metodo release_collection() specificando il nome dell'insieme.

    +

    Caricare parzialmente una raccolta (anteprima pubblica)

    +

    Questa funzione è attualmente in anteprima pubblica. L'API e la funzionalità potrebbero cambiare in futuro.

    +
    +

    Quando riceve la richiesta di caricamento, Milvus carica in memoria tutti gli indici dei campi vettoriali e tutti i dati dei campi scalari. Se alcuni campi non devono essere coinvolti nelle ricerche e nelle query, è possibile escluderli dal caricamento per ridurre l'utilizzo della memoria, migliorando le prestazioni della ricerca.

    +
    +
    # 7. Load the collection
    +client.load_collection(
    +    collection_name="customized_setup_2",
    +    load_fields=["my_id", "my_vector"] # Load only the specified fields
    +    skip_load_dynamic_field=True # Skip loading the dynamic field
    +)
    +
    +res = client.get_load_state(
    +    collection_name="customized_setup_2"
    +)
    +
    +print(res)
    +
    +# Output
    +#
    +# {
    +#     "state": "<LoadState: Loaded>"
    +# }
    +
    +

    Si noti che solo i campi elencati in load_fields possono essere utilizzati come condizioni di filtraggio e campi di output nelle ricerche e nelle query. È necessario includere sempre la chiave primaria nell'elenco. I nomi dei campi esclusi dal caricamento non saranno disponibili per il filtraggio o l'output.

    +

    È possibile utilizzare skip_load_dynamic_field=True per saltare il caricamento del campo dinamico. Milvus tratta il campo dinamico come un singolo campo, quindi tutte le chiavi del campo dinamico saranno incluse o escluse insieme.

    +
    +

    Rilasciare una collezione

    +

    Per rilasciare una collezione, utilizzare il metodo release_collection() specificando il nome della collezione.

    Per rilasciare un insieme, utilizzare il metodo releaseCollection() specificando il nome dell'insieme.

    @@ -2323,7 +2350,7 @@ collection.set_properties( } )
    -

    Imposta MMAP

    Configura la proprietà di mappatura della memoria (MMAP) per l'insieme, che determina se i dati sono mappati in memoria per migliorare le prestazioni delle query. Per ulteriori informazioni, consultare Configurazione della mappatura della memoria .

    +

    Imposta MMAP

    Configura la proprietà di mappatura della memoria (MMAP) per l'insieme, che determina se i dati sono mappati in memoria per migliorare le prestazioni delle query. Per ulteriori informazioni, consultare Configurazione della mappatura della memoria.

    Prima di impostare la proprietà MMAP, rilasciare l'insieme. In caso contrario, si verificherà un errore.

    @@ -2345,7 +2372,7 @@ collection.set_properties( } )
    -

    Abbandono di una raccolta

    import io.milvus.v2.common.IndexParam;
    @@ -331,6 +332,10 @@ res = await client.index_params
           Un oggetto IndexParams contenente un elenco di oggetti IndexParam.
         
    +    
    +      sync
    +      Controlla il modo in cui l'indice viene costruito in relazione alla richiesta del client. Valori validi:
    • True (predefinito): Il client attende che l'indice sia completamente costruito prima di restituirlo. Ciò significa che non si otterrà una risposta finché il processo non sarà completato.
    • False: Il client ritorna immediatamente dopo aver ricevuto la richiesta e l'indice viene costruito in background. Per sapere se la creazione dell'indice è stata completata, utilizzare il metodo describe_index().
    + diff --git a/localization/v2.4.x/site/it/userGuide/manage-partitions.json b/localization/v2.4.x/site/it/userGuide/manage-partitions.json index f3d7cfe91..bd01bedcc 100644 --- a/localization/v2.4.x/site/it/userGuide/manage-partitions.json +++ b/localization/v2.4.x/site/it/userGuide/manage-partitions.json @@ -1 +1 @@ -{"codeList":["from pymilvus import MilvusClient, DataType\n\n# 1. Set up a Milvus client\nclient = MilvusClient(\n uri=\"http://localhost:19530\"\n)\n\n# 2. Create a collection\nclient.create_collection(\n collection_name=\"quick_setup\",\n dimension=5,\n)\n\n","import io.milvus.v2.client.ConnectConfig;\nimport io.milvus.v2.client.MilvusClientV2;\nimport io.milvus.v2.service.collection.request.CreateCollectionReq;\n\nString CLUSTER_ENDPOINT = \"http://localhost:19530\";\n\n// 1. Connect to Milvus server\nConnectConfig connectConfig = ConnectConfig.builder()\n .uri(CLUSTER_ENDPOINT)\n .build();\n\nMilvusClientV2 client = new MilvusClientV2(connectConfig);\n\n// 2. Create a collection in quick setup mode\nCreateCollectionReq quickSetupReq = CreateCollectionReq.builder()\n .collectionName(\"quick_setup\")\n .dimension(5)\n .build();\n\nclient.createCollection(quickSetupReq);\n","const address = \"http://localhost:19530\"\n\n// 1. Set up a Milvus Client\nclient = new MilvusClient({address});\n\n// 2. Create a collection in quick setup mode\nawait client.createCollection({\n collection_name: \"quick_setup\",\n dimension: 5,\n}); \n","# 3. List partitions\nres = client.list_partitions(collection_name=\"quick_setup\")\nprint(res)\n\n# Output\n#\n# [\"_default\"]\n","import io.milvus.v2.service.partition.request.ListPartitionsReq;\n\n// 3. List all partitions in the collection\nListPartitionsReq listPartitionsReq = ListPartitionsReq.builder()\n .collectionName(\"quick_setup\")\n .build();\n\nList partitionNames = client.listPartitions(listPartitionsReq);\n\nSystem.out.println(partitionNames);\n\n// Output:\n// [\"_default\"]\n","// 3. List partitions\nres = await client.listPartitions({\n collection_name: \"quick_setup\"\n})\n\nconsole.log(res.partition_names)\n\n// Output\n// \n// [ '_default' ]\n// \n","# 4. Create more partitions\nclient.create_partition(\n collection_name=\"quick_setup\",\n partition_name=\"partitionA\"\n)\n\nclient.create_partition(\n collection_name=\"quick_setup\",\n partition_name=\"partitionB\"\n)\n\nres = client.list_partitions(collection_name=\"quick_setup\")\nprint(res)\n\n# Output\n#\n# [\"_default\", \"partitionA\", \"partitionB\"]\n","import io.milvus.v2.service.partition.request.CreatePartitionReq;\n\n// 4. Create more partitions\nCreatePartitionReq createPartitionReq = CreatePartitionReq.builder()\n .collectionName(\"quick_setup\")\n .partitionName(\"partitionA\")\n .build();\n\nclient.createPartition(createPartitionReq);\n\ncreatePartitionReq = CreatePartitionReq.builder()\n .collectionName(\"quick_setup\")\n .partitionName(\"partitionB\")\n .build();\n\nclient.createPartition(createPartitionReq);\n\nlistPartitionsReq = ListPartitionsReq.builder()\n .collectionName(\"quick_setup\")\n .build();\n\npartitionNames = client.listPartitions(listPartitionsReq);\n\nSystem.out.println(partitionNames);\n\n// Output:\n// [\n// \"_default\",\n// \"partitionA\",\n// \"partitionB\"\n// ]\n","// 4. Create more partitions\nawait client.createPartition({\n collection_name: \"quick_setup\",\n partition_name: \"partitionA\"\n})\n\nawait client.createPartition({\n collection_name: \"quick_setup\",\n partition_name: \"partitionB\"\n})\n\nres = await client.listPartitions({\n collection_name: \"quick_setup\"\n})\n\nconsole.log(res.partition_names)\n\n// Output\n// \n// [ '_default', 'partitionA', 'partitionB' ]\n// \n","# 5. Check whether a partition exists\nres = client.has_partition(\n collection_name=\"quick_setup\",\n partition_name=\"partitionA\"\n)\nprint(res)\n\n# Output\n#\n# True\n\nres = client.has_partition(\n collection_name=\"quick_setup\",\n partition_name=\"partitionC\"\n)\nprint(res)\n\n# Output\n#\n# False\n","import io.milvus.v2.service.partition.request.HasPartitionReq;\n\n// 5. Check whether a partition exists\nHasPartitionReq hasPartitionReq = HasPartitionReq.builder()\n .collectionName(\"quick_setup\")\n .partitionName(\"partitionA\")\n .build();\n\nboolean exists = client.hasPartition(hasPartitionReq);\n\nSystem.out.println(exists);\n\n// Output:\n// true\n\nhasPartitionReq = HasPartitionReq.builder()\n .collectionName(\"quick_setup\")\n .partitionName(\"partitionC\")\n .build();\n\nexists = client.hasPartition(hasPartitionReq);\n\nSystem.out.println(exists);\n\n// Output:\n// false\n","// 5. Check whether a partition exists\nres = await client.hasPartition({\n collection_name: \"quick_setup\",\n partition_name: \"partitionA\"\n})\n\nconsole.log(res.value)\n\n// Output\n// \n// true\n// \n\nres = await client.hasPartition({\n collection_name: \"quick_setup\",\n partition_name: \"partitionC\"\n})\n\nconsole.log(res.value)\n\n// Output\n// \n// false\n// \n","# Release the collection\nclient.release_collection(collection_name=\"quick_setup\")\n\n# Check the load status\nres = client.get_load_state(collection_name=\"quick_setup\")\nprint(res)\n\n# Output\n#\n# {\n# \"state\": \"\"\n# }\n\nres = client.get_load_state(\n collection_name=\"quick_setup\", \n partition_name=\"partitionA\"\n)\n\nprint(res)\n\n# Output\n#\n# {\n# \"state\": \"\"\n# }\n\nres = client.get_load_state(\n collection_name=\"quick_setup\", \n partition_name=\"partitionB\"\n)\n\nprint(res)\n\n# Output\n#\n# {\n# \"state\": \"\"\n# }\n\n","import io.milvus.v2.service.collection.request.GetLoadStateReq;\nimport io.milvus.v2.service.collection.request.ReleaseCollectionReq;\nimport io.milvus.v2.service.partition.request.LoadPartitionsReq;\nimport io.milvus.v2.service.partition.request.ReleasePartitionsReq;\n\n// 6. Load a partition independantly\n// 6.1 Release the collection\nReleaseCollectionReq releaseCollectionReq = ReleaseCollectionReq.builder()\n .collectionName(\"quick_setup\")\n .build();\n\nclient.releaseCollection(releaseCollectionReq);\n\n// 6.2 Load partitionA\nLoadPartitionsReq loadPartitionsReq = LoadPartitionsReq.builder()\n .collectionName(\"quick_setup\")\n .partitionNames(List.of(\"partitionA\"))\n .build();\n\nclient.loadPartitions(loadPartitionsReq);\n\nThread.sleep(3000);\n\n// 6.3 Check the load status of the collection and its partitions\nGetLoadStateReq getLoadStateReq = GetLoadStateReq.builder()\n .collectionName(\"quick_setup\")\n .build();\n\nboolean state = client.getLoadState(getLoadStateReq);\n\nSystem.out.println(state);\n\n// Output:\n// true\n\ngetLoadStateReq = GetLoadStateReq.builder()\n .collectionName(\"quick_setup\")\n .partitionName(\"partitionA\")\n .build();\n\nstate = client.getLoadState(getLoadStateReq);\n\nSystem.out.println(state);\n\n// Output:\n// true\n\ngetLoadStateReq = GetLoadStateReq.builder()\n .collectionName(\"quick_setup\")\n .partitionName(\"partitionB\")\n .build();\n\nstate = client.getLoadState(getLoadStateReq);\n\nSystem.out.println(state);\n\n// Output:\n// false\n","// 6. Load a partition indenpendantly\nawait client.releaseCollection({\n collection_name: \"quick_setup\"\n})\n\nres = await client.getLoadState({\n collection_name: \"quick_setup\"\n})\n\nconsole.log(res.state)\n\n// Output\n// \n// LoadStateNotLoad\n// \n\nawait client.loadPartitions({\n collection_name: \"quick_setup\",\n partition_names: [\"partitionA\"]\n})\n\nawait sleep(3000)\n\nres = await client.getLoadState({\n collection_name: \"quick_setup\"\n})\n\nconsole.log(res.state)\n\n// Output\n// \n// LoadStateLoaded\n// \n\nres = await client.getLoadState({\n collection_name: \"quick_setup\",\n partition_name: \"partitionA\"\n})\n\nconsole.log(res.state)\n\n// Output\n// \n// LoadStateLoaded\n// \n\nres = await client.getLoadState({\n collection_name: \"quick_setup\",\n partition_name: \"partitionB\"\n})\n\nconsole.log(res.state)\n\n// Output\n// \n// LoadStateLoaded\n// \n","client.load_partitions(\n collection_name=\"quick_setup\",\n partition_names=[\"partitionA\"]\n)\n\nres = client.get_load_state(collection_name=\"quick_setup\")\nprint(res)\n\n# Output\n#\n# {\n# \"state\": \"\"\n# }\n","LoadPartitionsReq loadPartitionsReq = LoadPartitionsReq.builder()\n .collectionName(\"quick_setup\")\n .partitionNames(List.of(\"partitionA\"))\n .build();\n\nclient.loadPartitions(loadPartitionsReq);\n\ngetLoadStateReq = GetLoadStateReq.builder()\n .collectionName(\"quick_setup\")\n .partitionName(\"partitionA\")\n .build();\n\nstate = client.getLoadState(getLoadStateReq);\n\nSystem.out.println(state);\n\n// Output:\n// true\n","await client.loadPartitions({\n collection_name: \"quick_setup\",\n partition_names: [\"partitionA\"]\n})\n\nres = await client.getLoadState({\n collection_name: \"quick_setup\",\n partition_name: \"partitionA\"\n})\n\nconsole.log(res.state)\n\n// Output\n// \n// LoadStateLoaded\n//\n","client.load_partitions(\n collection_name=\"quick_setup\",\n partition_names=[\"partitionA\", \"partitionB\"]\n)\n\nres = client.get_load_status(\n collection_name=\"quick_setup\",\n partition_name=\"partitionA\"\n)\n\n# Output\n#\n# {\n# \"state\": \"\"\n# }\n\nres = client.get_load_status(\n collection_name=\"quick_setup\",\n partition_name=\"partitionB\"\n)\n\n# Output\n#\n# {\n# \"state\": \"\"\n# }\n","LoadPartitionsReq loadPartitionsReq = LoadPartitionsReq.builder()\n .collectionName(\"quick_setup\")\n .partitionNames(List.of(\"partitionA\", \"partitionB\"))\n .build();\n\nclient.loadPartitions(loadPartitionsReq);\n\ngetLoadStateReq = GetLoadStateReq.builder()\n .collectionName(\"quick_setup\")\n .partitionName(\"partitionA\")\n .build();\n\nstate = client.getLoadState(getLoadStateReq);\n\nSystem.out.println(state);\n\n// Output:\n// true\n\ngetLoadStateReq = GetLoadStateReq.builder()\n .collectionName(\"quick_setup\")\n .partitionName(\"partitionB\")\n .build();\n\nstate = client.getLoadState(getLoadStateReq);\n\nSystem.out.println(state);\n\n// Output:\n// true\n","await client.loadPartitions({\n collection_name: \"quick_setup\",\n partition_names: [\"partitionA\", \"partitionB\"]\n})\n\nres = await client.getLoadState({\n collection_name: \"quick_setup\",\n partition_name: \"partitionA\"\n})\n\nconsole.log(res)\n\n// Output\n// \n// LoadStateLoaded\n// \n\nres = await client.getLoadState({\n collection_name: \"quick_setup\",\n partition_name: \"partitionB\"\n})\n\nconsole.log(res)\n\n// Output\n// \n// LoadStateLoaded\n// \n","# 7. Release a partition\nclient.release_partitions(\n collection_name=\"quick_setup\",\n partition_names=[\"partitionA\"]\n)\n\nres = client.get_load_state(\n collection_name=\"quick_setup\", \n partition_name=\"partitionA\"\n)\n\nprint(res)\n\n# Output\n#\n# {\n# \"state\": \"\"\n# }\n\n","import io.milvus.v2.service.partition.request.ReleasePartitionsReq;\n\n// 7. Release a partition\nReleasePartitionsReq releasePartitionsReq = ReleasePartitionsReq.builder()\n .collectionName(\"quick_setup\")\n .partitionNames(List.of(\"partitionA\"))\n .build();\n\nclient.releasePartitions(releasePartitionsReq);\n\ngetLoadStateReq = GetLoadStateReq.builder()\n .collectionName(\"quick_setup\")\n .partitionName(\"partitionA\")\n .build();\n\nstate = client.getLoadState(getLoadStateReq);\n\nSystem.out.println(state);\n\n// Output:\n// false\n","// 7. Release a partition\nawait client.releasePartitions({\n collection_name: \"quick_setup\",\n partition_names: [\"partitionA\"]\n})\n\nres = await client.getLoadState({\n collection_name: \"quick_setup\"\n})\n\nconsole.log(res.state)\n\n// Output\n// \n// LoadStateNotLoad\n// \n","client.release_partitions(\n collection_name=\"quick_setup\",\n partition_names=[\"_default\", \"partitionA\", \"partitionB\"]\n)\n\nres = client.get_load_status(\n collection_name=\"quick_setup\",\n)\n\n# Output\n#\n# {\n# \"state\": \"\"\n# }\n","# 8. Drop a partition\nclient.drop_partition(\n collection_name=\"quick_setup\",\n partition_name=\"partitionB\"\n)\n\nres = client.list_partitions(collection_name=\"quick_setup\")\nprint(res)\n\n# Output\n#\n# [\"_default\", \"partitionA\"]\n","import io.milvus.v2.service.partition.request.ReleasePartitionsReq;\n\nReleasePartitionsReq releasePartitionsReq = ReleasePartitionsReq.builder()\n .collectionName(\"quick_setup\")\n .partitionNames(List.of(\"_default\", \"partitionA\", \"partitionB\"))\n .build();\n\nclient.releasePartitions(releasePartitionsReq);\n\ngetLoadStateReq = GetLoadStateReq.builder()\n .collectionName(\"quick_setup\")\n .build();\n\nstate = client.getLoadState(getLoadStateReq);\n\nSystem.out.println(state);\n\n// Output:\n// false\n","\nawait client.releasePartitions({\n collection_name: \"quick_setup\",\n partition_names: [\"_default\", \"partitionA\", \"partitionB\"]\n})\n\nres = await client.getLoadState({\n collection_name: \"quick_setup\"\n})\n\nconsole.log(res)\n\n// Output\n// \n// {\n// status: {\n// error_code: 'Success',\n// reason: '',\n// code: 0,\n// retriable: false,\n// detail: ''\n// },\n// state: 'LoadStateNotLoad'\n// }\n// \n"],"headingContent":"","anchorList":[{"label":"Gestire le partizioni","href":"Manage-Partitions","type":1,"isActive":false},{"label":"Panoramica","href":"Overview","type":2,"isActive":false},{"label":"Preparazione","href":"Preparations","type":2,"isActive":false},{"label":"Elencare le partizioni","href":"List-Partitions","type":2,"isActive":false},{"label":"Creare partizioni","href":"Create-Partitions","type":2,"isActive":false},{"label":"Verifica di una partizione specifica","href":"Check-for-a-Specific-Partition","type":2,"isActive":false},{"label":"Caricare e rilasciare partizioni","href":"Load--Release-Partitions","type":2,"isActive":false},{"label":"Rilasciare partizioni","href":"Drop-Partitions","type":2,"isActive":false},{"label":"DOMANDE FREQUENTI","href":"FAQ","type":2,"isActive":false}]} \ No newline at end of file +{"codeList":["from pymilvus import MilvusClient, DataType\n\n# 1. Set up a Milvus client\nclient = MilvusClient(\n uri=\"http://localhost:19530\"\n)\n\n# 2. Create a collection\nclient.create_collection(\n collection_name=\"quick_setup\",\n dimension=5,\n)\n\n","import io.milvus.v2.client.ConnectConfig;\nimport io.milvus.v2.client.MilvusClientV2;\nimport io.milvus.v2.service.collection.request.CreateCollectionReq;\n\nString CLUSTER_ENDPOINT = \"http://localhost:19530\";\n\n// 1. Connect to Milvus server\nConnectConfig connectConfig = ConnectConfig.builder()\n .uri(CLUSTER_ENDPOINT)\n .build();\n\nMilvusClientV2 client = new MilvusClientV2(connectConfig);\n\n// 2. Create a collection in quick setup mode\nCreateCollectionReq quickSetupReq = CreateCollectionReq.builder()\n .collectionName(\"quick_setup\")\n .dimension(5)\n .build();\n\nclient.createCollection(quickSetupReq);\n","const address = \"http://localhost:19530\"\n\n// 1. Set up a Milvus Client\nclient = new MilvusClient({address});\n\n// 2. Create a collection in quick setup mode\nawait client.createCollection({\n collection_name: \"quick_setup\",\n dimension: 5,\n}); \n","# 3. List partitions\nres = client.list_partitions(collection_name=\"quick_setup\")\nprint(res)\n\n# Output\n#\n# [\"_default\"]\n","import io.milvus.v2.service.partition.request.ListPartitionsReq;\n\n// 3. List all partitions in the collection\nListPartitionsReq listPartitionsReq = ListPartitionsReq.builder()\n .collectionName(\"quick_setup\")\n .build();\n\nList partitionNames = client.listPartitions(listPartitionsReq);\n\nSystem.out.println(partitionNames);\n\n// Output:\n// [\"_default\"]\n","// 3. List partitions\nres = await client.listPartitions({\n collection_name: \"quick_setup\"\n})\n\nconsole.log(res.partition_names)\n\n// Output\n// \n// [ '_default' ]\n// \n","# 4. Create more partitions\nclient.create_partition(\n collection_name=\"quick_setup\",\n partition_name=\"partitionA\"\n)\n\nclient.create_partition(\n collection_name=\"quick_setup\",\n partition_name=\"partitionB\"\n)\n\nres = client.list_partitions(collection_name=\"quick_setup\")\nprint(res)\n\n# Output\n#\n# [\"_default\", \"partitionA\", \"partitionB\"]\n","import io.milvus.v2.service.partition.request.CreatePartitionReq;\n\n// 4. Create more partitions\nCreatePartitionReq createPartitionReq = CreatePartitionReq.builder()\n .collectionName(\"quick_setup\")\n .partitionName(\"partitionA\")\n .build();\n\nclient.createPartition(createPartitionReq);\n\ncreatePartitionReq = CreatePartitionReq.builder()\n .collectionName(\"quick_setup\")\n .partitionName(\"partitionB\")\n .build();\n\nclient.createPartition(createPartitionReq);\n\nlistPartitionsReq = ListPartitionsReq.builder()\n .collectionName(\"quick_setup\")\n .build();\n\npartitionNames = client.listPartitions(listPartitionsReq);\n\nSystem.out.println(partitionNames);\n\n// Output:\n// [\n// \"_default\",\n// \"partitionA\",\n// \"partitionB\"\n// ]\n","// 4. Create more partitions\nawait client.createPartition({\n collection_name: \"quick_setup\",\n partition_name: \"partitionA\"\n})\n\nawait client.createPartition({\n collection_name: \"quick_setup\",\n partition_name: \"partitionB\"\n})\n\nres = await client.listPartitions({\n collection_name: \"quick_setup\"\n})\n\nconsole.log(res.partition_names)\n\n// Output\n// \n// [ '_default', 'partitionA', 'partitionB' ]\n// \n","# 5. Check whether a partition exists\nres = client.has_partition(\n collection_name=\"quick_setup\",\n partition_name=\"partitionA\"\n)\nprint(res)\n\n# Output\n#\n# True\n\nres = client.has_partition(\n collection_name=\"quick_setup\",\n partition_name=\"partitionC\"\n)\nprint(res)\n\n# Output\n#\n# False\n","import io.milvus.v2.service.partition.request.HasPartitionReq;\n\n// 5. Check whether a partition exists\nHasPartitionReq hasPartitionReq = HasPartitionReq.builder()\n .collectionName(\"quick_setup\")\n .partitionName(\"partitionA\")\n .build();\n\nboolean exists = client.hasPartition(hasPartitionReq);\n\nSystem.out.println(exists);\n\n// Output:\n// true\n\nhasPartitionReq = HasPartitionReq.builder()\n .collectionName(\"quick_setup\")\n .partitionName(\"partitionC\")\n .build();\n\nexists = client.hasPartition(hasPartitionReq);\n\nSystem.out.println(exists);\n\n// Output:\n// false\n","// 5. Check whether a partition exists\nres = await client.hasPartition({\n collection_name: \"quick_setup\",\n partition_name: \"partitionA\"\n})\n\nconsole.log(res.value)\n\n// Output\n// \n// true\n// \n\nres = await client.hasPartition({\n collection_name: \"quick_setup\",\n partition_name: \"partitionC\"\n})\n\nconsole.log(res.value)\n\n// Output\n// \n// false\n// \n","# Release the collection\nclient.release_collection(collection_name=\"quick_setup\")\n\n# Check the load status\nres = client.get_load_state(collection_name=\"quick_setup\")\nprint(res)\n\n# Output\n#\n# {\n# \"state\": \"\"\n# }\n\nres = client.get_load_state(\n collection_name=\"quick_setup\", \n partition_name=\"partitionA\"\n)\n\nprint(res)\n\n# Output\n#\n# {\n# \"state\": \"\"\n# }\n\nres = client.get_load_state(\n collection_name=\"quick_setup\", \n partition_name=\"partitionB\"\n)\n\nprint(res)\n\n# Output\n#\n# {\n# \"state\": \"\"\n# }\n\n","import io.milvus.v2.service.collection.request.GetLoadStateReq;\nimport io.milvus.v2.service.collection.request.ReleaseCollectionReq;\nimport io.milvus.v2.service.partition.request.LoadPartitionsReq;\nimport io.milvus.v2.service.partition.request.ReleasePartitionsReq;\n\n// 6. Load a partition independantly\n// 6.1 Release the collection\nReleaseCollectionReq releaseCollectionReq = ReleaseCollectionReq.builder()\n .collectionName(\"quick_setup\")\n .build();\n\nclient.releaseCollection(releaseCollectionReq);\n\n// 6.2 Load partitionA\nLoadPartitionsReq loadPartitionsReq = LoadPartitionsReq.builder()\n .collectionName(\"quick_setup\")\n .partitionNames(List.of(\"partitionA\"))\n .build();\n\nclient.loadPartitions(loadPartitionsReq);\n\nThread.sleep(3000);\n\n// 6.3 Check the load status of the collection and its partitions\nGetLoadStateReq getLoadStateReq = GetLoadStateReq.builder()\n .collectionName(\"quick_setup\")\n .build();\n\nboolean state = client.getLoadState(getLoadStateReq);\n\nSystem.out.println(state);\n\n// Output:\n// true\n\ngetLoadStateReq = GetLoadStateReq.builder()\n .collectionName(\"quick_setup\")\n .partitionName(\"partitionA\")\n .build();\n\nstate = client.getLoadState(getLoadStateReq);\n\nSystem.out.println(state);\n\n// Output:\n// true\n\ngetLoadStateReq = GetLoadStateReq.builder()\n .collectionName(\"quick_setup\")\n .partitionName(\"partitionB\")\n .build();\n\nstate = client.getLoadState(getLoadStateReq);\n\nSystem.out.println(state);\n\n// Output:\n// false\n","// 6. Load a partition indenpendantly\nawait client.releaseCollection({\n collection_name: \"quick_setup\"\n})\n\nres = await client.getLoadState({\n collection_name: \"quick_setup\"\n})\n\nconsole.log(res.state)\n\n// Output\n// \n// LoadStateNotLoad\n// \n\nawait client.loadPartitions({\n collection_name: \"quick_setup\",\n partition_names: [\"partitionA\"]\n})\n\nawait sleep(3000)\n\nres = await client.getLoadState({\n collection_name: \"quick_setup\"\n})\n\nconsole.log(res.state)\n\n// Output\n// \n// LoadStateLoaded\n// \n\nres = await client.getLoadState({\n collection_name: \"quick_setup\",\n partition_name: \"partitionA\"\n})\n\nconsole.log(res.state)\n\n// Output\n// \n// LoadStateLoaded\n// \n\nres = await client.getLoadState({\n collection_name: \"quick_setup\",\n partition_name: \"partitionB\"\n})\n\nconsole.log(res.state)\n\n// Output\n// \n// LoadStateLoaded\n// \n","client.load_partitions(\n collection_name=\"quick_setup\",\n partition_names=[\"partitionA\"]\n)\n\nres = client.get_load_state(collection_name=\"quick_setup\")\nprint(res)\n\n# Output\n#\n# {\n# \"state\": \"\"\n# }\n","LoadPartitionsReq loadPartitionsReq = LoadPartitionsReq.builder()\n .collectionName(\"quick_setup\")\n .partitionNames(List.of(\"partitionA\"))\n .build();\n\nclient.loadPartitions(loadPartitionsReq);\n\ngetLoadStateReq = GetLoadStateReq.builder()\n .collectionName(\"quick_setup\")\n .partitionName(\"partitionA\")\n .build();\n\nstate = client.getLoadState(getLoadStateReq);\n\nSystem.out.println(state);\n\n// Output:\n// true\n","await client.loadPartitions({\n collection_name: \"quick_setup\",\n partition_names: [\"partitionA\"]\n})\n\nres = await client.getLoadState({\n collection_name: \"quick_setup\",\n partition_name: \"partitionA\"\n})\n\nconsole.log(res.state)\n\n// Output\n// \n// LoadStateLoaded\n//\n","client.load_partitions(\n collection_name=\"quick_setup\",\n partition_names=[\"partitionA\", \"partitionB\"]\n)\n\nres = client.get_load_status(\n collection_name=\"quick_setup\",\n partition_name=\"partitionA\"\n)\n\n# Output\n#\n# {\n# \"state\": \"\"\n# }\n\nres = client.get_load_status(\n collection_name=\"quick_setup\",\n partition_name=\"partitionB\"\n)\n\n# Output\n#\n# {\n# \"state\": \"\"\n# }\n","LoadPartitionsReq loadPartitionsReq = LoadPartitionsReq.builder()\n .collectionName(\"quick_setup\")\n .partitionNames(List.of(\"partitionA\", \"partitionB\"))\n .build();\n\nclient.loadPartitions(loadPartitionsReq);\n\ngetLoadStateReq = GetLoadStateReq.builder()\n .collectionName(\"quick_setup\")\n .partitionName(\"partitionA\")\n .build();\n\nstate = client.getLoadState(getLoadStateReq);\n\nSystem.out.println(state);\n\n// Output:\n// true\n\ngetLoadStateReq = GetLoadStateReq.builder()\n .collectionName(\"quick_setup\")\n .partitionName(\"partitionB\")\n .build();\n\nstate = client.getLoadState(getLoadStateReq);\n\nSystem.out.println(state);\n\n// Output:\n// true\n","await client.loadPartitions({\n collection_name: \"quick_setup\",\n partition_names: [\"partitionA\", \"partitionB\"]\n})\n\nres = await client.getLoadState({\n collection_name: \"quick_setup\",\n partition_name: \"partitionA\"\n})\n\nconsole.log(res)\n\n// Output\n// \n// LoadStateLoaded\n// \n\nres = await client.getLoadState({\n collection_name: \"quick_setup\",\n partition_name: \"partitionB\"\n})\n\nconsole.log(res)\n\n// Output\n// \n// LoadStateLoaded\n// \n","client.load_partitions(\n collection_name=\"quick_setup\",\n partition_names=[\"partitionA\"],\n load_fields=[\"id\", \"vector\"],\n skip_load_dynamic_field=True\n)\n","# 7. Release a partition\nclient.release_partitions(\n collection_name=\"quick_setup\",\n partition_names=[\"partitionA\"]\n)\n\nres = client.get_load_state(\n collection_name=\"quick_setup\", \n partition_name=\"partitionA\"\n)\n\nprint(res)\n\n# Output\n#\n# {\n# \"state\": \"\"\n# }\n\n","import io.milvus.v2.service.partition.request.ReleasePartitionsReq;\n\n// 7. Release a partition\nReleasePartitionsReq releasePartitionsReq = ReleasePartitionsReq.builder()\n .collectionName(\"quick_setup\")\n .partitionNames(List.of(\"partitionA\"))\n .build();\n\nclient.releasePartitions(releasePartitionsReq);\n\ngetLoadStateReq = GetLoadStateReq.builder()\n .collectionName(\"quick_setup\")\n .partitionName(\"partitionA\")\n .build();\n\nstate = client.getLoadState(getLoadStateReq);\n\nSystem.out.println(state);\n\n// Output:\n// false\n","// 7. Release a partition\nawait client.releasePartitions({\n collection_name: \"quick_setup\",\n partition_names: [\"partitionA\"]\n})\n\nres = await client.getLoadState({\n collection_name: \"quick_setup\"\n})\n\nconsole.log(res.state)\n\n// Output\n// \n// LoadStateNotLoad\n// \n","client.release_partitions(\n collection_name=\"quick_setup\",\n partition_names=[\"_default\", \"partitionA\", \"partitionB\"]\n)\n\nres = client.get_load_status(\n collection_name=\"quick_setup\",\n)\n\n# Output\n#\n# {\n# \"state\": \"\"\n# }\n","# 8. Drop a partition\nclient.drop_partition(\n collection_name=\"quick_setup\",\n partition_name=\"partitionB\"\n)\n\nres = client.list_partitions(collection_name=\"quick_setup\")\nprint(res)\n\n# Output\n#\n# [\"_default\", \"partitionA\"]\n","import io.milvus.v2.service.partition.request.ReleasePartitionsReq;\n\nReleasePartitionsReq releasePartitionsReq = ReleasePartitionsReq.builder()\n .collectionName(\"quick_setup\")\n .partitionNames(List.of(\"_default\", \"partitionA\", \"partitionB\"))\n .build();\n\nclient.releasePartitions(releasePartitionsReq);\n\ngetLoadStateReq = GetLoadStateReq.builder()\n .collectionName(\"quick_setup\")\n .build();\n\nstate = client.getLoadState(getLoadStateReq);\n\nSystem.out.println(state);\n\n// Output:\n// false\n","\nawait client.releasePartitions({\n collection_name: \"quick_setup\",\n partition_names: [\"_default\", \"partitionA\", \"partitionB\"]\n})\n\nres = await client.getLoadState({\n collection_name: \"quick_setup\"\n})\n\nconsole.log(res)\n\n// Output\n// \n// {\n// status: {\n// error_code: 'Success',\n// reason: '',\n// code: 0,\n// retriable: false,\n// detail: ''\n// },\n// state: 'LoadStateNotLoad'\n// }\n// \n"],"headingContent":"Manage Partitions","anchorList":[{"label":"Gestire le partizioni","href":"Manage-Partitions","type":1,"isActive":false},{"label":"Panoramica","href":"Overview","type":2,"isActive":false},{"label":"Preparazione","href":"Preparations","type":2,"isActive":false},{"label":"Elencare le partizioni","href":"List-Partitions","type":2,"isActive":false},{"label":"Creare partizioni","href":"Create-Partitions","type":2,"isActive":false},{"label":"Verifica di una partizione specifica","href":"Check-for-a-Specific-Partition","type":2,"isActive":false},{"label":"Caricare e rilasciare partizioni","href":"Load--Release-Partitions","type":2,"isActive":false},{"label":"Rilasciare partizioni","href":"Drop-Partitions","type":2,"isActive":false},{"label":"DOMANDE FREQUENTI","href":"FAQ","type":2,"isActive":false}]} \ No newline at end of file diff --git a/localization/v2.4.x/site/it/userGuide/manage-partitions.md b/localization/v2.4.x/site/it/userGuide/manage-partitions.md index ca4e79c91..289518c22 100644 --- a/localization/v2.4.x/site/it/userGuide/manage-partitions.md +++ b/localization/v2.4.x/site/it/userGuide/manage-partitions.md @@ -1,7 +1,6 @@ --- id: manage-partitions.md title: Gestire le partizioni -summary: '' ---

    Gestire le partizioni +

    Per caricare campi specifici in una o più partizioni, procedere come segue:

    +
    client.load_partitions(
    +    collection_name="quick_setup",
    +    partition_names=["partitionA"],
    +    load_fields=["id", "vector"],
    +    skip_load_dynamic_field=True
    +)
    +
    +

    Si noti che solo i campi elencati in load_fields possono essere usati come condizioni di filtraggio e campi di output nelle ricerche e nelle query. È necessario includere sempre la chiave primaria nell'elenco. I nomi dei campi esclusi dal caricamento non saranno disponibili per il filtraggio o l'output.

    +

    È possibile utilizzare skip_load_dynamic_field=True per saltare il caricamento del campo dinamico. Milvus tratta il campo dinamico come un singolo campo, quindi tutte le chiavi del campo dinamico saranno incluse o escluse insieme.

    Rilasciare le partizioni

    -

    Per rilasciare tutte le partizioni di un insieme, è sufficiente richiamare il comando release_collection(). Per rilasciare partizioni specifiche di un insieme, si può usare il comando release_partitions().

    +

    Per rilasciare tutte le partizioni di un insieme, è sufficiente chiamare il comando release_collection(). Per rilasciare partizioni specifiche di un insieme, si può usare il comando release_partitions().

    Per rilasciare tutte le partizioni di un insieme, è sufficiente richiamare il comando releaseCollection(). Per rilasciare partizioni specifiche di un insieme, usare releasePartitions().

    @@ -930,10 +939,10 @@ res = await client.rootCoord.maxPartitionNum. Per maggiori dettagli, consultare la sezione Configurazioni di sistema.

    +

    Per impostazione predefinita, Milvus consente di creare un massimo di 1.024 partizioni. È possibile regolare il numero massimo di partizioni configurando rootCoord.maxPartitionNum. Per maggiori dettagli, consultare la sezione Configurazioni di sistema.

  • Come si fa a distinguere tra partizioni e chiavi di partizione?

    Le partizioni sono unità di archiviazione fisiche, mentre le chiavi di partizione sono concetti logici che assegnano automaticamente i dati a partizioni specifiche in base a una colonna designata.

    -

    Per esempio, in Milvus, se si dispone di una collezione con una chiave di partizione definita come campo color, il sistema assegna automaticamente i dati alle partizioni in base ai valori hash del campo color per ogni entità. Questo processo automatico solleva l'utente dalla responsabilità di specificare manualmente la partizione durante l'inserimento o la ricerca dei dati.

    +

    Ad esempio, in Milvus, se si dispone di una raccolta con una chiave di partizione definita come campo color, il sistema assegna automaticamente i dati alle partizioni in base ai valori hash del campo color per ogni entità. Questo processo automatico solleva l'utente dalla responsabilità di specificare manualmente la partizione durante l'inserimento o la ricerca dei dati.

    D'altra parte, quando si creano manualmente le partizioni, è necessario assegnare i dati a ciascuna partizione in base ai criteri della chiave di partizione. Se si dispone di una collezione con un campo color, si dovranno assegnare manualmente le entità con un valore color di red a partition A, e le entità con un valore color di blue a partition B. Questa gestione manuale richiede uno sforzo maggiore.

    In sintesi, sia le partizioni che le chiavi di partizione vengono utilizzate per ottimizzare il calcolo dei dati e migliorare l'efficienza delle query. È essenziale riconoscere che l'abilitazione di una chiave di partizione significa rinunciare al controllo sulla gestione manuale dell'inserimento e del caricamento dei dati delle partizioni, poiché questi processi sono completamente automatizzati e gestiti da Milvus.

  • diff --git a/localization/v2.4.x/site/it/userGuide/search-query-get/single-vector-search.json b/localization/v2.4.x/site/it/userGuide/search-query-get/single-vector-search.json index a4ccdbdc5..f855cbf67 100644 --- a/localization/v2.4.x/site/it/userGuide/search-query-get/single-vector-search.json +++ b/localization/v2.4.x/site/it/userGuide/search-query-get/single-vector-search.json @@ -1 +1 @@ -{"codeList":["# 1. Set up a Milvus client\nclient = MilvusClient(\n uri=CLUSTER_ENDPOINT,\n token=TOKEN \n)\n\n# 2. Create a collection\nclient.create_collection(\n collection_name=\"quick_setup\",\n dimension=5,\n metric_type=\"IP\"\n)\n\n# 3. Insert randomly generated vectors \ncolors = [\"green\", \"blue\", \"yellow\", \"red\", \"black\", \"white\", \"purple\", \"pink\", \"orange\", \"brown\", \"grey\"]\ndata = []\n\nfor i in range(1000):\n current_color = random.choice(colors)\n data.append({\n \"id\": i,\n \"vector\": [ random.uniform(-1, 1) for _ in range(5) ],\n \"color\": current_color,\n \"color_tag\": f\"{current_color}_{str(random.randint(1000, 9999))}\"\n })\n\nres = client.insert(\n collection_name=\"quick_setup\",\n data=data\n)\n\nprint(res)\n\n# Output\n#\n# {\n# \"insert_count\": 1000,\n# \"ids\": [\n# 0,\n# 1,\n# 2,\n# 3,\n# 4,\n# 5,\n# 6,\n# 7,\n# 8,\n# 9,\n# \"(990 more items hidden)\"\n# ]\n# }\n\n# 6.1 Create partitions \nclient.create_partition(\n collection_name=\"quick_setup\",\n partition_name=\"red\"\n)\n\nclient.create_partition(\n collection_name=\"quick_setup\",\n partition_name=\"blue\"\n)\n\n# 6.1 Insert data into partitions\nred_data = [ {\"id\": i, \"vector\": [ random.uniform(-1, 1) for _ in range(5) ], \"color\": \"red\", \"color_tag\": f\"red_{str(random.randint(1000, 9999))}\" } for i in range(500) ]\nblue_data = [ {\"id\": i, \"vector\": [ random.uniform(-1, 1) for _ in range(5) ], \"color\": \"blue\", \"color_tag\": f\"blue_{str(random.randint(1000, 9999))}\" } for i in range(500) ]\n\nres = client.insert(\n collection_name=\"quick_setup\",\n data=red_data,\n partition_name=\"red\"\n)\n\nprint(res)\n\n# Output\n#\n# {\n# \"insert_count\": 500,\n# \"ids\": [\n# 0,\n# 1,\n# 2,\n# 3,\n# 4,\n# 5,\n# 6,\n# 7,\n# 8,\n# 9,\n# \"(490 more items hidden)\"\n# ]\n# }\n\nres = client.insert(\n collection_name=\"quick_setup\",\n data=blue_data,\n partition_name=\"blue\"\n)\n\nprint(res)\n\n# Output\n#\n# {\n# \"insert_count\": 500,\n# \"ids\": [\n# 0,\n# 1,\n# 2,\n# 3,\n# 4,\n# 5,\n# 6,\n# 7,\n# 8,\n# 9,\n# \"(490 more items hidden)\"\n# ]\n# }\n","import java.util.ArrayList;\nimport java.util.Arrays;\nimport java.util.List;\nimport java.util.Map;\nimport java.util.Random;\n\nimport com.alibaba.fastjson.JSONObject;\n\nimport io.milvus.v2.client.ConnectConfig;\nimport io.milvus.v2.client.MilvusClientV2;\nimport io.milvus.v2.service.collection.request.CreateCollectionReq;\nimport io.milvus.v2.service.collection.request.GetLoadStateReq;\nimport io.milvus.v2.service.vector.request.InsertReq;\nimport io.milvus.v2.service.vector.response.InsertResp; \n\nString CLUSTER_ENDPOINT = \"http://localhost:19530\";\n\n// 1. Connect to Milvus server\nConnectConfig connectConfig = ConnectConfig.builder()\n .uri(CLUSTER_ENDPOINT)\n .build();\n\nMilvusClientV2 client = new MilvusClientV2(connectConfig); \n\n// 2. Create a collection in quick setup mode\nCreateCollectionReq quickSetupReq = CreateCollectionReq.builder()\n .collectionName(\"quick_setup\")\n .dimension(5)\n .metricType(\"IP\")\n .build();\n\nclient.createCollection(quickSetupReq);\n\nGetLoadStateReq loadStateReq = GetLoadStateReq.builder()\n .collectionName(\"quick_setup\")\n .build();\n\nboolean state = client.getLoadState(loadStateReq);\n\nSystem.out.println(state);\n\n// Output:\n// true\n\n// 3. Insert randomly generated vectors into the collection\nList colors = Arrays.asList(\"green\", \"blue\", \"yellow\", \"red\", \"black\", \"white\", \"purple\", \"pink\", \"orange\", \"brown\", \"grey\");\nList data = new ArrayList<>();\n\nfor (int i=0; i<1000; i++) {\n Random rand = new Random();\n String current_color = colors.get(rand.nextInt(colors.size()-1));\n JSONObject row = new JSONObject();\n row.put(\"id\", Long.valueOf(i));\n row.put(\"vector\", Arrays.asList(rand.nextFloat(), rand.nextFloat(), rand.nextFloat(), rand.nextFloat(), rand.nextFloat()));\n row.put(\"color_tag\", current_color + \"_\" + String.valueOf(rand.nextInt(8999) + 1000));\n data.add(row);\n}\n\nInsertReq insertReq = InsertReq.builder()\n .collectionName(\"quick_setup\")\n .data(data)\n .build();\n\nInsertResp insertResp = client.insert(insertReq);\n\nSystem.out.println(JSONObject.toJSON(insertResp));\n\n// Output:\n// {\"insertCnt\": 1000}\n\n// 6.1. Create a partition\nCreatePartitionReq partitionReq = CreatePartitionReq.builder()\n .collectionName(\"quick_setup\")\n .partitionName(\"red\")\n .build();\n\nclient.createPartition(partitionReq);\n\npartitionReq = CreatePartitionReq.builder()\n .collectionName(\"quick_setup\")\n .partitionName(\"blue\")\n .build();\n\nclient.createPartition(partitionReq);\n\n// 6.2 Insert data into the partition\ndata = new ArrayList<>();\n\nfor (int i=1000; i<1500; i++) {\n Random rand = new Random();\n String current_color = \"red\";\n JSONObject row = new JSONObject();\n row.put(\"id\", Long.valueOf(i));\n row.put(\"vector\", Arrays.asList(rand.nextFloat(), rand.nextFloat(), rand.nextFloat(), rand.nextFloat(), rand.nextFloat()));\n row.put(\"color\", current_color);\n row.put(\"color_tag\", current_color + \"_\" + String.valueOf(rand.nextInt(8999) + 1000));\n data.add(row);\n} \n\ninsertReq = InsertReq.builder()\n .collectionName(\"quick_setup\")\n .data(data)\n .partitionName(\"red\")\n .build();\n\ninsertResp = client.insert(insertReq);\n\nSystem.out.println(JSONObject.toJSON(insertResp));\n\n// Output:\n// {\"insertCnt\": 500}\n\ndata = new ArrayList<>();\n\nfor (int i=1500; i<2000; i++) {\n Random rand = new Random();\n String current_color = \"blue\";\n JSONObject row = new JSONObject();\n row.put(\"id\", Long.valueOf(i));\n row.put(\"vector\", Arrays.asList(rand.nextFloat(), rand.nextFloat(), rand.nextFloat(), rand.nextFloat(), rand.nextFloat()));\n row.put(\"color\", current_color);\n row.put(\"color_tag\", current_color + \"_\" + String.valueOf(rand.nextInt(8999) + 1000));\n data.add(row);\n}\n\ninsertReq = InsertReq.builder()\n .collectionName(\"quick_setup\")\n .data(data)\n .partitionName(\"blue\")\n .build();\n\ninsertResp = client.insert(insertReq);\n\nSystem.out.println(JSONObject.toJSON(insertResp));\n\n// Output:\n// {\"insertCnt\": 500}\n","const { MilvusClient, DataType, sleep } = require(\"@zilliz/milvus2-sdk-node\")\n\nconst address = \"http://localhost:19530\"\n\n// 1. Set up a Milvus Client\nclient = new MilvusClient({address});\n\n// 2. Create a collection in quick setup mode\nawait client.createCollection({\n collection_name: \"quick_setup\",\n dimension: 5,\n metric_type: \"IP\"\n}); \n\n// 3. Insert randomly generated vectors\nconst colors = [\"green\", \"blue\", \"yellow\", \"red\", \"black\", \"white\", \"purple\", \"pink\", \"orange\", \"brown\", \"grey\"]\ndata = []\n\nfor (let i = 0; i < 1000; i++) {\n current_color = colors[Math.floor(Math.random() * colors.length)]\n data.push({\n id: i,\n vector: [Math.random(), Math.random(), Math.random(), Math.random(), Math.random()],\n color: current_color,\n color_tag: `${current_color}_${Math.floor(Math.random() * 8999) + 1000}`\n })\n}\n\nvar res = await client.insert({\n collection_name: \"quick_setup\",\n data: data\n})\n\nconsole.log(res.insert_cnt)\n\n// Output\n// \n// 1000\n// \n\nawait client.createPartition({\n collection_name: \"quick_setup\",\n partition_name: \"red\"\n})\n\nawait client.createPartition({\n collection_name: \"quick_setup\",\n partition_name: \"blue\"\n})\n\n// 6.1 Insert data into partitions\nvar red_data = []\nvar blue_data = []\n\nfor (let i = 1000; i < 1500; i++) {\n red_data.push({\n id: i,\n vector: [Math.random(), Math.random(), Math.random(), Math.random(), Math.random()],\n color: \"red\",\n color_tag: `red_${Math.floor(Math.random() * 8999) + 1000}`\n })\n}\n\nfor (let i = 1500; i < 2000; i++) {\n blue_data.push({\n id: i,\n vector: [Math.random(), Math.random(), Math.random(), Math.random(), Math.random()],\n color: \"blue\",\n color_tag: `blue_${Math.floor(Math.random() * 8999) + 1000}`\n })\n}\n\nres = await client.insert({\n collection_name: \"quick_setup\",\n data: red_data,\n partition_name: \"red\"\n})\n\nconsole.log(res.insert_cnt)\n\n// Output\n// \n// 500\n// \n\nres = await client.insert({\n collection_name: \"quick_setup\",\n data: blue_data,\n partition_name: \"blue\"\n})\n\nconsole.log(res.insert_cnt)\n\n// Output\n// \n// 500\n// \n","# Single vector search\nres = client.search(\n collection_name=\"test_collection\", # Replace with the actual name of your collection\n # Replace with your query vector\n data=[[0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592]],\n limit=5, # Max. number of search results to return\n search_params={\"metric_type\": \"IP\", \"params\": {}} # Search parameters\n)\n\n# Convert the output to a formatted JSON string\nresult = json.dumps(res, indent=4)\nprint(result)\n","// 4. Single vector search\nList> query_vectors = Arrays.asList(Arrays.asList(0.3580376395471989f, -0.6023495712049978f, 0.18414012509913835f, -0.26286205330961354f, 0.9029438446296592f));\n\nSearchReq searchReq = SearchReq.builder()\n .collectionName(\"quick_setup\")\n .data(query_vectors)\n .topK(3) // The number of results to return\n .build();\n\nSearchResp searchResp = client.search(searchReq);\n\nSystem.out.println(JSONObject.toJSON(searchResp));\n","// 4. Single vector search\nvar query_vector = [0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592],\n\nres = await client.search({\n collection_name: \"quick_setup\",\n data: [query_vector],\n limit: 3, // The number of results to return\n})\n\nconsole.log(res.results)\n","[\n [\n {\n \"id\": 0,\n \"distance\": 1.4093276262283325,\n \"entity\": {}\n },\n {\n \"id\": 4,\n \"distance\": 0.9902134537696838,\n \"entity\": {}\n },\n {\n \"id\": 1,\n \"distance\": 0.8519943356513977,\n \"entity\": {}\n },\n {\n \"id\": 5,\n \"distance\": 0.7972343564033508,\n \"entity\": {}\n },\n {\n \"id\": 2,\n \"distance\": 0.5928734540939331,\n \"entity\": {}\n }\n ]\n]\n","{\"searchResults\": [[\n {\n \"score\": 1.263043,\n \"fields\": {\n \"vector\": [\n 0.9533119,\n 0.02538395,\n 0.76714665,\n 0.35481733,\n 0.9845762\n ],\n \"id\": 740\n }\n },\n {\n \"score\": 1.2377806,\n \"fields\": {\n \"vector\": [\n 0.7411156,\n 0.08687937,\n 0.8254139,\n 0.08370924,\n 0.99095553\n ],\n \"id\": 640\n }\n },\n {\n \"score\": 1.1869997,\n \"fields\": {\n \"vector\": [\n 0.87928146,\n 0.05324632,\n 0.6312755,\n 0.28005534,\n 0.9542448\n ],\n \"id\": 455\n }\n }\n]]}\n","[\n { score: 1.7463608980178833, id: '854' },\n { score: 1.744946002960205, id: '425' },\n { score: 1.7258622646331787, id: '718' }\n]\n","# Bulk-vector search\nres = client.search(\n collection_name=\"test_collection\", # Replace with the actual name of your collection\n data=[\n [0.19886812562848388, 0.06023560599112088, 0.6976963061752597, 0.2614474506242501, 0.838729485096104],\n [0.3172005263489739, 0.9719044792798428, -0.36981146090600725, -0.4860894583077995, 0.95791889146345]\n ], # Replace with your query vectors\n limit=2, # Max. number of search results to return\n search_params={\"metric_type\": \"IP\", \"params\": {}} # Search parameters\n)\n\nresult = json.dumps(res, indent=4)\nprint(result)\n","// 5. Batch vector search\nquery_vectors = Arrays.asList(\n Arrays.asList(0.3580376395471989f, -0.6023495712049978f, 0.18414012509913835f, -0.26286205330961354f, 0.9029438446296592f),\n Arrays.asList(0.19886812562848388f, 0.06023560599112088f, 0.6976963061752597f, 0.2614474506242501f, 0.838729485096104f)\n);\n\nsearchReq = SearchReq.builder()\n .collectionName(\"quick_setup\")\n .data(query_vectors)\n .topK(2)\n .build();\n\nsearchResp = client.search(searchReq);\n\nSystem.out.println(JSONObject.toJSON(searchResp));\n","// 5. Batch vector search\nvar query_vectors = [\n [0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592],\n [0.19886812562848388, 0.06023560599112088, 0.6976963061752597, 0.2614474506242501, 0.838729485096104]\n]\n\nres = await client.search({\n collection_name: \"quick_setup\",\n data: query_vectors,\n limit: 2,\n})\n\nconsole.log(res.results)\n","[\n [\n {\n \"id\": 1,\n \"distance\": 1.3017789125442505,\n \"entity\": {}\n },\n {\n \"id\": 7,\n \"distance\": 1.2419954538345337,\n \"entity\": {}\n }\n ], # Result set 1\n [\n {\n \"id\": 3,\n \"distance\": 2.3358664512634277,\n \"entity\": {}\n },\n {\n \"id\": 8,\n \"distance\": 0.5642921924591064,\n \"entity\": {}\n }\n ] # Result set 2\n]\n","// Two sets of vectors are returned as expected\n\n{\"searchResults\": [\n [\n {\n \"score\": 1.263043,\n \"fields\": {\n \"vector\": [\n 0.9533119,\n 0.02538395,\n 0.76714665,\n 0.35481733,\n 0.9845762\n ],\n \"id\": 740\n }\n },\n {\n \"score\": 1.2377806,\n \"fields\": {\n \"vector\": [\n 0.7411156,\n 0.08687937,\n 0.8254139,\n 0.08370924,\n 0.99095553\n ],\n \"id\": 640\n }\n }\n ],\n [\n {\n \"score\": 1.8654699,\n \"fields\": {\n \"vector\": [\n 0.4671427,\n 0.8378432,\n 0.98844475,\n 0.82763994,\n 0.9729997\n ],\n \"id\": 638\n }\n },\n {\n \"score\": 1.8581753,\n \"fields\": {\n \"vector\": [\n 0.735541,\n 0.60140246,\n 0.86730254,\n 0.93152493,\n 0.98603314\n ],\n \"id\": 855\n }\n }\n ]\n]}\n","[\n [\n { score: 2.3590476512908936, id: '854' },\n { score: 2.2896690368652344, id: '59' }\n [\n { score: 2.664059638977051, id: '59' },\n { score: 2.59483003616333, id: '854' }\n ]\n]\n","# 6.2 Search within a partition\nquery_vector = [0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592]\n\nres = client.search(\n collection_name=\"quick_setup\",\n data=[query_vector],\n limit=5,\n search_params={\"metric_type\": \"IP\", \"params\": {\"level\": 1}},\n partition_names=[\"red\"]\n)\n\nprint(res)\n","// 6.3 Search within partitions\nquery_vectors = Arrays.asList(Arrays.asList(0.3580376395471989f, -0.6023495712049978f, 0.18414012509913835f, -0.26286205330961354f, 0.9029438446296592f));\n\nsearchReq = SearchReq.builder()\n .collectionName(\"quick_setup\")\n .data(query_vectors)\n .partitionNames(Arrays.asList(\"red\"))\n .topK(5)\n .build();\n\nsearchResp = client.search(searchReq);\n\nSystem.out.println(JSONObject.toJSON(searchResp));\n","// 6.2 Search within partitions\nquery_vector = [0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592]\n\nres = await client.search({\n collection_name: \"quick_setup\",\n data: [query_vector],\n partition_names: [\"red\"],\n limit: 5,\n})\n\nconsole.log(res.results)\n","[\n [\n {\n \"id\": 16,\n \"distance\": 0.9200337529182434,\n \"entity\": {}\n },\n {\n \"id\": 14,\n \"distance\": 0.4505271911621094,\n \"entity\": {}\n },\n {\n \"id\": 15,\n \"distance\": 0.19924677908420563,\n \"entity\": {}\n },\n {\n \"id\": 17,\n \"distance\": 0.0075093843042850494,\n \"entity\": {}\n },\n {\n \"id\": 13,\n \"distance\": -0.14609718322753906,\n \"entity\": {}\n }\n ]\n]\n","{\"searchResults\": [\n [\n {\n \"score\": 1.1677284,\n \"fields\": {\n \"vector\": [\n 0.9986977,\n 0.17964739,\n 0.49086612,\n 0.23155272,\n 0.98438674\n ],\n \"id\": 1435\n }\n },\n {\n \"score\": 1.1476475,\n \"fields\": {\n \"vector\": [\n 0.6952647,\n 0.13417172,\n 0.91045254,\n 0.119336545,\n 0.9338931\n ],\n \"id\": 1291\n }\n },\n {\n \"score\": 1.0969629,\n \"fields\": {\n \"vector\": [\n 0.3363194,\n 0.028906643,\n 0.6675426,\n 0.030419827,\n 0.9735209\n ],\n \"id\": 1168\n }\n },\n {\n \"score\": 1.0741848,\n \"fields\": {\n \"vector\": [\n 0.9980543,\n 0.36063594,\n 0.66427994,\n 0.17359233,\n 0.94954175\n ],\n \"id\": 1164\n }\n },\n {\n \"score\": 1.0584627,\n \"fields\": {\n \"vector\": [\n 0.7187005,\n 0.12674773,\n 0.987718,\n 0.3110777,\n 0.86093885\n ],\n \"id\": 1085\n }\n }\n ],\n [\n {\n \"score\": 1.8030131,\n \"fields\": {\n \"vector\": [\n 0.59726167,\n 0.7054632,\n 0.9573117,\n 0.94529945,\n 0.8664103\n ],\n \"id\": 1203\n }\n },\n {\n \"score\": 1.7728865,\n \"fields\": {\n \"vector\": [\n 0.6672442,\n 0.60448086,\n 0.9325822,\n 0.80272985,\n 0.8861626\n ],\n \"id\": 1448\n }\n },\n {\n \"score\": 1.7536311,\n \"fields\": {\n \"vector\": [\n 0.59663296,\n 0.77831805,\n 0.8578314,\n 0.88818026,\n 0.9030075\n ],\n \"id\": 1010\n }\n },\n {\n \"score\": 1.7520742,\n \"fields\": {\n \"vector\": [\n 0.854198,\n 0.72294194,\n 0.9245805,\n 0.86126596,\n 0.7969224\n ],\n \"id\": 1219\n }\n },\n {\n \"score\": 1.7452049,\n \"fields\": {\n \"vector\": [\n 0.96419,\n 0.943535,\n 0.87611496,\n 0.8268136,\n 0.79786557\n ],\n \"id\": 1149\n }\n }\n ]\n]}\n","[\n { score: 3.0258803367614746, id: '1201' },\n { score: 3.004319190979004, id: '1458' },\n { score: 2.880324363708496, id: '1187' },\n { score: 2.8246407508850098, id: '1347' },\n { score: 2.797295093536377, id: '1406' }\n]\n","res = client.search(\n collection_name=\"quick_setup\",\n data=[query_vector],\n limit=5,\n search_params={\"metric_type\": \"IP\", \"params\": {\"level\": 1}},\n partition_names=[\"blue\"]\n)\n\nprint(res)\n","searchReq = SearchReq.builder()\n .collectionName(\"quick_setup\")\n .data(query_vectors)\n .partitionNames(Arrays.asList(\"blue\"))\n .topK(5)\n .build();\n\nsearchResp = client.search(searchReq);\n\nSystem.out.println(JSONObject.toJSON(searchResp));\n","res = await client.search({\n collection_name: \"quick_setup\",\n data: [query_vector],\n partition_names: [\"blue\"],\n limit: 5,\n})\n\nconsole.log(res.results)\n","[\n [\n {\n \"id\": 20,\n \"distance\": 2.363696813583374,\n \"entity\": {}\n },\n {\n \"id\": 26,\n \"distance\": 1.0665391683578491,\n \"entity\": {}\n },\n {\n \"id\": 23,\n \"distance\": 1.066049575805664,\n \"entity\": {}\n },\n {\n \"id\": 29,\n \"distance\": 0.8353596925735474,\n \"entity\": {}\n },\n {\n \"id\": 28,\n \"distance\": 0.7484277486801147,\n \"entity\": {}\n }\n ]\n]\n","{\"searchResults\": [\n [\n {\n \"score\": 1.1628494,\n \"fields\": {\n \"vector\": [\n 0.7442872,\n 0.046407282,\n 0.71031404,\n 0.3544345,\n 0.9819991\n ],\n \"id\": 1992\n }\n },\n {\n \"score\": 1.1470042,\n \"fields\": {\n \"vector\": [\n 0.5505825,\n 0.04367262,\n 0.9985836,\n 0.18922359,\n 0.93255126\n ],\n \"id\": 1977\n }\n },\n {\n \"score\": 1.1450152,\n \"fields\": {\n \"vector\": [\n 0.89994013,\n 0.052991092,\n 0.8645576,\n 0.6406729,\n 0.95679337\n ],\n \"id\": 1573\n }\n },\n {\n \"score\": 1.1439825,\n \"fields\": {\n \"vector\": [\n 0.9253267,\n 0.15890503,\n 0.7999555,\n 0.19126713,\n 0.898583\n ],\n \"id\": 1552\n }\n },\n {\n \"score\": 1.1029172,\n \"fields\": {\n \"vector\": [\n 0.95661926,\n 0.18777144,\n 0.38115507,\n 0.14323527,\n 0.93137646\n ],\n \"id\": 1823\n }\n }\n ],\n [\n {\n \"score\": 1.8005109,\n \"fields\": {\n \"vector\": [\n 0.5953582,\n 0.7794224,\n 0.9388869,\n 0.79825854,\n 0.9197286\n ],\n \"id\": 1888\n }\n },\n {\n \"score\": 1.7714822,\n \"fields\": {\n \"vector\": [\n 0.56805456,\n 0.89422905,\n 0.88187534,\n 0.914824,\n 0.8944365\n ],\n \"id\": 1648\n }\n },\n {\n \"score\": 1.7561421,\n \"fields\": {\n \"vector\": [\n 0.83421993,\n 0.39865613,\n 0.92319834,\n 0.42695504,\n 0.96633124\n ],\n \"id\": 1688\n }\n },\n {\n \"score\": 1.7553532,\n \"fields\": {\n \"vector\": [\n 0.89994013,\n 0.052991092,\n 0.8645576,\n 0.6406729,\n 0.95679337\n ],\n \"id\": 1573\n }\n },\n {\n \"score\": 1.7543385,\n \"fields\": {\n \"vector\": [\n 0.16542226,\n 0.38248396,\n 0.9888778,\n 0.80913955,\n 0.9501492\n ],\n \"id\": 1544\n }\n }\n ]\n]}\n","[\n { score: 2.8421106338500977, id: '1745' },\n { score: 2.838560104370117, id: '1782' },\n { score: 2.8134000301361084, id: '1511' },\n { score: 2.718268871307373, id: '1679' },\n { score: 2.7014894485473633, id: '1597' }\n]\n","# Search with output fields\nres = client.search(\n collection_name=\"test_collection\", # Replace with the actual name of your collection\n data=[[0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592]],\n limit=5, # Max. number of search results to return\n search_params={\"metric_type\": \"IP\", \"params\": {}}, # Search parameters\n output_fields=[\"color\"] # Output fields to return\n)\n\nresult = json.dumps(res, indent=4)\nprint(result)\n","// 7. Search with output fields\nquery_vectors = Arrays.asList(Arrays.asList(0.3580376395471989f, -0.6023495712049978f, 0.18414012509913835f, -0.26286205330961354f, 0.9029438446296592f));\n\nsearchReq = SearchReq.builder()\n .collectionName(\"quick_setup\")\n .data(query_vectors)\n .outputFields(Arrays.asList(\"color\"))\n .topK(5)\n .build();\n\nsearchResp = client.search(searchReq);\n\nSystem.out.println(JSONObject.toJSON(searchResp));\n","// 7. Search with output fields\nquery_vector = [0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592]\n\nres = await client.search({\n collection_name: \"quick_setup\",\n data: [query_vector],\n limit: 5,\n output_fields: [\"color\"],\n})\n\nconsole.log(res.results)\n","[\n [\n {\n \"id\": 0,\n \"distance\": 1.4093276262283325,\n \"entity\": {\n \"color\": \"pink_8682\"\n }\n },\n {\n \"id\": 16,\n \"distance\": 1.0159327983856201,\n \"entity\": {\n \"color\": \"yellow_1496\"\n }\n },\n {\n \"id\": 4,\n \"distance\": 0.9902134537696838,\n \"entity\": {\n \"color\": \"red_4794\"\n }\n },\n {\n \"id\": 14,\n \"distance\": 0.9803846478462219,\n \"entity\": {\n \"color\": \"green_2899\"\n }\n },\n {\n \"id\": 1,\n \"distance\": 0.8519943356513977,\n \"entity\": {\n \"color\": \"red_7025\"\n }\n }\n ]\n]\n","{\"searchResults\": [\n [\n {\n \"score\": 1.263043,\n \"fields\": {}\n },\n {\n \"score\": 1.2377806,\n \"fields\": {}\n },\n {\n \"score\": 1.1869997,\n \"fields\": {}\n },\n {\n \"score\": 1.1748955,\n \"fields\": {}\n },\n {\n \"score\": 1.1720343,\n \"fields\": {}\n }\n ]\n]}\n","\n[\n { score: 3.036271572113037, id: '59', color: 'orange' },\n { score: 3.0267879962921143, id: '1745', color: 'blue' },\n { score: 3.0069446563720703, id: '854', color: 'black' },\n { score: 2.984386682510376, id: '718', color: 'black' },\n { score: 2.916019916534424, id: '425', color: 'purple' }\n]\n","# Search with filter\nres = client.search(\n collection_name=\"test_collection\", # Replace with the actual name of your collection\n data=[[0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592]],\n limit=5, # Max. number of search results to return\n search_params={\"metric_type\": \"IP\", \"params\": {}}, # Search parameters\n output_fields=[\"color\"], # Output fields to return\n filter='color like \"red%\"'\n)\n\nresult = json.dumps(res, indent=4)\nprint(result)\n","// 8. Filtered search\nquery_vectors = Arrays.asList(Arrays.asList(0.3580376395471989f, -0.6023495712049978f, 0.18414012509913835f, -0.26286205330961354f, 0.9029438446296592f));\n\nsearchReq = SearchReq.builder()\n .collectionName(\"quick_setup\")\n .data(query_vectors)\n .outputFields(Arrays.asList(\"color_tag\"))\n .filter(\"color_tag like \\\"red%\\\"\")\n .topK(5)\n .build();\n\nsearchResp = client.search(searchReq);\n\nSystem.out.println(JSONObject.toJSON(searchResp));\n","// 8. Filtered search\n// 8.1 Filter with \"like\" operator and prefix wildcard\nquery_vector = [0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592]\n\nres = await client.search({\n collection_name: \"quick_setup\",\n data: [query_vector],\n limit: 5,\n filters: \"color_tag like \\\"red%\\\"\",\n output_fields: [\"color_tag\"]\n})\n\nconsole.log(res.results)\n","[\n [\n {\n \"id\": 4,\n \"distance\": 0.9902134537696838,\n \"entity\": {\n \"color\": \"red_4794\"\n }\n },\n {\n \"id\": 1,\n \"distance\": 0.8519943356513977,\n \"entity\": {\n \"color\": \"red_7025\"\n }\n },\n {\n \"id\": 6,\n \"distance\": -0.4113418459892273,\n \"entity\": {\n \"color\": \"red_9392\"\n }\n }\n ]\n]\n","{\"searchResults\": [\n [\n {\n \"score\": 1.1869997,\n \"fields\": {\"color_tag\": \"red_3026\"}\n },\n {\n \"score\": 1.1677284,\n \"fields\": {\"color_tag\": \"red_9030\"}\n },\n {\n \"score\": 1.1476475,\n \"fields\": {\"color_tag\": \"red_3744\"}\n },\n {\n \"score\": 1.0969629,\n \"fields\": {\"color_tag\": \"red_4168\"}\n },\n {\n \"score\": 1.0741848,\n \"fields\": {\"color_tag\": \"red_9678\"}\n }\n ]\n]}\n","[\n { score: 2.5080761909484863, id: '1201', color_tag: 'red_8904' },\n { score: 2.491129159927368, id: '425', color_tag: 'purple_8212' },\n { score: 2.4889798164367676, id: '1458', color_tag: 'red_6891' },\n { score: 2.42964243888855, id: '724', color_tag: 'black_9885' },\n { score: 2.4004223346710205, id: '854', color_tag: 'black_5990' }\n]\n","# Infix match on color field\nres = client.search(\n collection_name=\"test_collection\", # Replace with the actual name of your collection\n data=[[0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592]],\n limit=5, # Max. number of search results to return\n search_params={\"metric_type\": \"IP\", \"params\": {}}, # Search parameters\n output_fields=[\"color\"], # Output fields to return\n filter='color like \"%ll%\"' # Filter on color field, infix match on \"ll\"\n)\n\nresult = json.dumps(res, indent=4)\nprint(result)\n","// 8. Filtered search\nquery_vectors = Arrays.asList(Arrays.asList(0.3580376395471989f, -0.6023495712049978f, 0.18414012509913835f, -0.26286205330961354f, 0.9029438446296592f));\n\nsearchReq = SearchReq.builder()\n .collectionName(\"quick_setup\")\n .data(query_vectors)\n .outputFields(Arrays.asList(\"color_tag\"))\n .filter(\"color like \\\"%ll%\\\"\")\n .topK(5)\n .build();\n\nsearchResp = client.search(searchReq);\n\nSystem.out.println(JSONObject.toJSON(searchResp));\n","// 8. Filtered search\n// 8.1 Filter with \"like\" operator and prefix wildcard\nquery_vector = [0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592]\n\nres = await client.search({\n collection_name: \"quick_setup\",\n data: [query_vector],\n limit: 5,\n filters: \"color_tag like \\\"%ll%\\\"\",\n output_fields: [\"color_tag\"]\n})\n\nconsole.log(res.results)\n","[\n [\n {\n \"id\": 5,\n \"distance\": 0.7972343564033508,\n \"entity\": {\n \"color\": \"yellow_4222\"\n }\n }\n ]\n]\n","{\"searchResults\": [\n [\n {\n \"score\": 1.1869997,\n \"fields\": {\"color_tag\": \"yellow_4222\"}\n }\n ]\n]}\n","[\n { score: 2.5080761909484863, id: '1201', color_tag: 'yellow_4222' }\n]\n","# Conduct a range search\nsearch_params = {\n \"metric_type\": \"IP\",\n \"params\": {\n \"radius\": 0.8, # Radius of the search circle\n \"range_filter\": 1.0 # Range filter to filter out vectors that are not within the search circle\n }\n}\n\nres = client.search(\n collection_name=\"test_collection\", # Replace with the actual name of your collection\n data=[[0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592]],\n limit=3, # Max. number of search results to return\n search_params=search_params, # Search parameters\n output_fields=[\"color\"], # Output fields to return\n)\n\nresult = json.dumps(res, indent=4)\nprint(result)\n","// 9. Range search\nquery_vectors = Arrays.asList(Arrays.asList(0.3580376395471989f, -0.6023495712049978f, 0.18414012509913835f, -0.26286205330961354f, 0.9029438446296592f));\n\nsearchReq = SearchReq.builder()\n .collectionName(\"quick_setup\")\n .data(query_vectors)\n .outputFields(Arrays.asList(\"color_tag\"))\n .searchParams(Map.of(\"radius\", 0.1, \"range\", 1.0))\n .topK(5)\n .build();\n\nsearchResp = client.search(searchReq);\n\nSystem.out.println(JSONObject.toJSON(searchResp));\n","// 9. Range search\nquery_vector = [0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592]\n\nres = await client.search({\n collection_name: \"quick_setup\",\n data: [query_vector],\n limit: 5,\n params: {\n radius: 0.1,\n range: 1.0\n },\n output_fields: [\"color_tag\"]\n})\n\nconsole.log(res.results)\n","[\n [\n {\n \"id\": 4,\n \"distance\": 0.9902134537696838,\n \"entity\": {\n \"color\": \"red_4794\"\n }\n },\n {\n \"id\": 14,\n \"distance\": 0.9803846478462219,\n \"entity\": {\n \"color\": \"green_2899\"\n }\n },\n {\n \"id\": 1,\n \"distance\": 0.8519943356513977,\n \"entity\": {\n \"color\": \"red_7025\"\n }\n }\n ]\n]\n","{\"searchResults\": [\n [\n {\n \"score\": 1.263043,\n \"fields\": {\"color_tag\": \"green_2052\"}\n },\n {\n \"score\": 1.2377806,\n \"fields\": {\"color_tag\": \"purple_3709\"}\n },\n {\n \"score\": 1.1869997,\n \"fields\": {\"color_tag\": \"red_3026\"}\n },\n {\n \"score\": 1.1748955,\n \"fields\": {\"color_tag\": \"black_1646\"}\n },\n {\n \"score\": 1.1720343,\n \"fields\": {\"color_tag\": \"green_4853\"}\n }\n ]\n]}\n","[\n { score: 2.3387961387634277, id: '718', color_tag: 'black_7154' },\n { score: 2.3352415561676025, id: '1745', color_tag: 'blue_8741' },\n { score: 2.290485382080078, id: '1408', color_tag: 'red_2324' },\n { score: 2.285870313644409, id: '854', color_tag: 'black_5990' },\n { score: 2.2593345642089844, id: '1309', color_tag: 'red_8458' }\n]\n","# Connect to Milvus\nclient = MilvusClient(uri='http://localhost:19530') # Milvus server address\n\n# Load data into collection\nclient.load_collection(\"group_search\") # Collection name\n\n# Group search results\nres = client.search(\n collection_name=\"group_search\", # Collection name\n data=[[0.14529211512077012, 0.9147257273453546, 0.7965055218724449, 0.7009258593102812, 0.5605206522382088]], # Query vector\n search_params={\n \"metric_type\": \"L2\",\n \"params\": {\"nprobe\": 10},\n }, # Search parameters\n limit=10, # Max. number of search results to return\n group_by_field=\"doc_id\", # Group results by document ID\n output_fields=[\"doc_id\", \"passage_id\"]\n)\n\n# Retrieve the values in the `doc_id` column\ndoc_ids = [result['entity']['doc_id'] for result in res[0]]\n\nprint(doc_ids)\n","[5, 10, 1, 7, 9, 6, 3, 4, 8, 2]\n","# Connect to Milvus\nclient = MilvusClient(uri='http://localhost:19530') # Milvus server address\n\n# Load data into collection\nclient.load_collection(\"group_search\") # Collection name\n\n# Search without `group_by_field`\nres = client.search(\n collection_name=\"group_search\", # Collection name\n data=query_passage_vector, # Replace with your query vector\n search_params={\n \"metric_type\": \"L2\",\n \"params\": {\"nprobe\": 10},\n }, # Search parameters\n limit=10, # Max. number of search results to return\n # group_by_field=\"doc_id\", # Group results by document ID\n output_fields=[\"doc_id\", \"passage_id\"]\n)\n\n# Retrieve the values in the `doc_id` column\ndoc_ids = [result['entity']['doc_id'] for result in res[0]]\n\nprint(doc_ids)\n","[1, 10, 3, 10, 1, 9, 4, 4, 8, 6]\n","# In normal cases, you do not need to set search parameters manually\n# Except for range searches.\nsearch_parameters = {\n 'metric_type': 'L2',\n 'params': {\n 'nprobe': 10,\n 'level': 1,\n 'radius': 1.0\n 'range_filter': 0.8\n }\n}\n"],"headingContent":"","anchorList":[{"label":"Ricerca per singolo vettore","href":"Single-Vector-Search","type":1,"isActive":false},{"label":"Panoramica","href":"Overview","type":2,"isActive":false},{"label":"Preparazioni","href":"Preparations","type":2,"isActive":false},{"label":"Ricerca di base","href":"Basic-search","type":2,"isActive":false},{"label":"Ricerca filtrata","href":"Filtered-search","type":2,"isActive":false},{"label":"Ricerca per intervallo","href":"Range-search","type":2,"isActive":false},{"label":"Ricerca per raggruppamento","href":"Grouping-search","type":2,"isActive":false},{"label":"Parametri di ricerca","href":"Search-parameters","type":2,"isActive":false}]} \ No newline at end of file +{"codeList":["# 1. Set up a Milvus client\nclient = MilvusClient(\n uri=CLUSTER_ENDPOINT,\n token=TOKEN \n)\n\n# 2. Create a collection\nclient.create_collection(\n collection_name=\"quick_setup\",\n dimension=5,\n metric_type=\"IP\"\n)\n\n# 3. Insert randomly generated vectors \ncolors = [\"green\", \"blue\", \"yellow\", \"red\", \"black\", \"white\", \"purple\", \"pink\", \"orange\", \"brown\", \"grey\"]\ndata = []\n\nfor i in range(1000):\n current_color = random.choice(colors)\n data.append({\n \"id\": i,\n \"vector\": [ random.uniform(-1, 1) for _ in range(5) ],\n \"color\": current_color,\n \"color_tag\": f\"{current_color}_{str(random.randint(1000, 9999))}\"\n })\n\nres = client.insert(\n collection_name=\"quick_setup\",\n data=data\n)\n\nprint(res)\n\n# Output\n#\n# {\n# \"insert_count\": 1000,\n# \"ids\": [\n# 0,\n# 1,\n# 2,\n# 3,\n# 4,\n# 5,\n# 6,\n# 7,\n# 8,\n# 9,\n# \"(990 more items hidden)\"\n# ]\n# }\n\n# 6.1 Create partitions \nclient.create_partition(\n collection_name=\"quick_setup\",\n partition_name=\"red\"\n)\n\nclient.create_partition(\n collection_name=\"quick_setup\",\n partition_name=\"blue\"\n)\n\n# 6.1 Insert data into partitions\nred_data = [ {\"id\": i, \"vector\": [ random.uniform(-1, 1) for _ in range(5) ], \"color\": \"red\", \"color_tag\": f\"red_{str(random.randint(1000, 9999))}\" } for i in range(500) ]\nblue_data = [ {\"id\": i, \"vector\": [ random.uniform(-1, 1) for _ in range(5) ], \"color\": \"blue\", \"color_tag\": f\"blue_{str(random.randint(1000, 9999))}\" } for i in range(500) ]\n\nres = client.insert(\n collection_name=\"quick_setup\",\n data=red_data,\n partition_name=\"red\"\n)\n\nprint(res)\n\n# Output\n#\n# {\n# \"insert_count\": 500,\n# \"ids\": [\n# 0,\n# 1,\n# 2,\n# 3,\n# 4,\n# 5,\n# 6,\n# 7,\n# 8,\n# 9,\n# \"(490 more items hidden)\"\n# ]\n# }\n\nres = client.insert(\n collection_name=\"quick_setup\",\n data=blue_data,\n partition_name=\"blue\"\n)\n\nprint(res)\n\n# Output\n#\n# {\n# \"insert_count\": 500,\n# \"ids\": [\n# 0,\n# 1,\n# 2,\n# 3,\n# 4,\n# 5,\n# 6,\n# 7,\n# 8,\n# 9,\n# \"(490 more items hidden)\"\n# ]\n# }\n","import java.util.ArrayList;\nimport java.util.Arrays;\nimport java.util.List;\nimport java.util.Map;\nimport java.util.Random;\n\nimport com.alibaba.fastjson.JSONObject;\n\nimport io.milvus.v2.client.ConnectConfig;\nimport io.milvus.v2.client.MilvusClientV2;\nimport io.milvus.v2.service.collection.request.CreateCollectionReq;\nimport io.milvus.v2.service.collection.request.GetLoadStateReq;\nimport io.milvus.v2.service.vector.request.InsertReq;\nimport io.milvus.v2.service.vector.response.InsertResp; \n\nString CLUSTER_ENDPOINT = \"http://localhost:19530\";\n\n// 1. Connect to Milvus server\nConnectConfig connectConfig = ConnectConfig.builder()\n .uri(CLUSTER_ENDPOINT)\n .build();\n\nMilvusClientV2 client = new MilvusClientV2(connectConfig); \n\n// 2. Create a collection in quick setup mode\nCreateCollectionReq quickSetupReq = CreateCollectionReq.builder()\n .collectionName(\"quick_setup\")\n .dimension(5)\n .metricType(\"IP\")\n .build();\n\nclient.createCollection(quickSetupReq);\n\nGetLoadStateReq loadStateReq = GetLoadStateReq.builder()\n .collectionName(\"quick_setup\")\n .build();\n\nboolean state = client.getLoadState(loadStateReq);\n\nSystem.out.println(state);\n\n// Output:\n// true\n\n// 3. Insert randomly generated vectors into the collection\nList colors = Arrays.asList(\"green\", \"blue\", \"yellow\", \"red\", \"black\", \"white\", \"purple\", \"pink\", \"orange\", \"brown\", \"grey\");\nList data = new ArrayList<>();\n\nfor (int i=0; i<1000; i++) {\n Random rand = new Random();\n String current_color = colors.get(rand.nextInt(colors.size()-1));\n JSONObject row = new JSONObject();\n row.put(\"id\", Long.valueOf(i));\n row.put(\"vector\", Arrays.asList(rand.nextFloat(), rand.nextFloat(), rand.nextFloat(), rand.nextFloat(), rand.nextFloat()));\n row.put(\"color_tag\", current_color + \"_\" + String.valueOf(rand.nextInt(8999) + 1000));\n data.add(row);\n}\n\nInsertReq insertReq = InsertReq.builder()\n .collectionName(\"quick_setup\")\n .data(data)\n .build();\n\nInsertResp insertResp = client.insert(insertReq);\n\nSystem.out.println(JSONObject.toJSON(insertResp));\n\n// Output:\n// {\"insertCnt\": 1000}\n\n// 6.1. Create a partition\nCreatePartitionReq partitionReq = CreatePartitionReq.builder()\n .collectionName(\"quick_setup\")\n .partitionName(\"red\")\n .build();\n\nclient.createPartition(partitionReq);\n\npartitionReq = CreatePartitionReq.builder()\n .collectionName(\"quick_setup\")\n .partitionName(\"blue\")\n .build();\n\nclient.createPartition(partitionReq);\n\n// 6.2 Insert data into the partition\ndata = new ArrayList<>();\n\nfor (int i=1000; i<1500; i++) {\n Random rand = new Random();\n String current_color = \"red\";\n JSONObject row = new JSONObject();\n row.put(\"id\", Long.valueOf(i));\n row.put(\"vector\", Arrays.asList(rand.nextFloat(), rand.nextFloat(), rand.nextFloat(), rand.nextFloat(), rand.nextFloat()));\n row.put(\"color\", current_color);\n row.put(\"color_tag\", current_color + \"_\" + String.valueOf(rand.nextInt(8999) + 1000));\n data.add(row);\n} \n\ninsertReq = InsertReq.builder()\n .collectionName(\"quick_setup\")\n .data(data)\n .partitionName(\"red\")\n .build();\n\ninsertResp = client.insert(insertReq);\n\nSystem.out.println(JSONObject.toJSON(insertResp));\n\n// Output:\n// {\"insertCnt\": 500}\n\ndata = new ArrayList<>();\n\nfor (int i=1500; i<2000; i++) {\n Random rand = new Random();\n String current_color = \"blue\";\n JSONObject row = new JSONObject();\n row.put(\"id\", Long.valueOf(i));\n row.put(\"vector\", Arrays.asList(rand.nextFloat(), rand.nextFloat(), rand.nextFloat(), rand.nextFloat(), rand.nextFloat()));\n row.put(\"color\", current_color);\n row.put(\"color_tag\", current_color + \"_\" + String.valueOf(rand.nextInt(8999) + 1000));\n data.add(row);\n}\n\ninsertReq = InsertReq.builder()\n .collectionName(\"quick_setup\")\n .data(data)\n .partitionName(\"blue\")\n .build();\n\ninsertResp = client.insert(insertReq);\n\nSystem.out.println(JSONObject.toJSON(insertResp));\n\n// Output:\n// {\"insertCnt\": 500}\n","const { MilvusClient, DataType, sleep } = require(\"@zilliz/milvus2-sdk-node\")\n\nconst address = \"http://localhost:19530\"\n\n// 1. Set up a Milvus Client\nclient = new MilvusClient({address});\n\n// 2. Create a collection in quick setup mode\nawait client.createCollection({\n collection_name: \"quick_setup\",\n dimension: 5,\n metric_type: \"IP\"\n}); \n\n// 3. Insert randomly generated vectors\nconst colors = [\"green\", \"blue\", \"yellow\", \"red\", \"black\", \"white\", \"purple\", \"pink\", \"orange\", \"brown\", \"grey\"]\ndata = []\n\nfor (let i = 0; i < 1000; i++) {\n current_color = colors[Math.floor(Math.random() * colors.length)]\n data.push({\n id: i,\n vector: [Math.random(), Math.random(), Math.random(), Math.random(), Math.random()],\n color: current_color,\n color_tag: `${current_color}_${Math.floor(Math.random() * 8999) + 1000}`\n })\n}\n\nvar res = await client.insert({\n collection_name: \"quick_setup\",\n data: data\n})\n\nconsole.log(res.insert_cnt)\n\n// Output\n// \n// 1000\n// \n\nawait client.createPartition({\n collection_name: \"quick_setup\",\n partition_name: \"red\"\n})\n\nawait client.createPartition({\n collection_name: \"quick_setup\",\n partition_name: \"blue\"\n})\n\n// 6.1 Insert data into partitions\nvar red_data = []\nvar blue_data = []\n\nfor (let i = 1000; i < 1500; i++) {\n red_data.push({\n id: i,\n vector: [Math.random(), Math.random(), Math.random(), Math.random(), Math.random()],\n color: \"red\",\n color_tag: `red_${Math.floor(Math.random() * 8999) + 1000}`\n })\n}\n\nfor (let i = 1500; i < 2000; i++) {\n blue_data.push({\n id: i,\n vector: [Math.random(), Math.random(), Math.random(), Math.random(), Math.random()],\n color: \"blue\",\n color_tag: `blue_${Math.floor(Math.random() * 8999) + 1000}`\n })\n}\n\nres = await client.insert({\n collection_name: \"quick_setup\",\n data: red_data,\n partition_name: \"red\"\n})\n\nconsole.log(res.insert_cnt)\n\n// Output\n// \n// 500\n// \n\nres = await client.insert({\n collection_name: \"quick_setup\",\n data: blue_data,\n partition_name: \"blue\"\n})\n\nconsole.log(res.insert_cnt)\n\n// Output\n// \n// 500\n// \n","# Single vector search\nres = client.search(\n collection_name=\"test_collection\", # Replace with the actual name of your collection\n # Replace with your query vector\n data=[[0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592]],\n limit=5, # Max. number of search results to return\n search_params={\"metric_type\": \"IP\", \"params\": {}} # Search parameters\n)\n\n# Convert the output to a formatted JSON string\nresult = json.dumps(res, indent=4)\nprint(result)\n","// 4. Single vector search\nList> query_vectors = Arrays.asList(Arrays.asList(0.3580376395471989f, -0.6023495712049978f, 0.18414012509913835f, -0.26286205330961354f, 0.9029438446296592f));\n\nSearchReq searchReq = SearchReq.builder()\n .collectionName(\"quick_setup\")\n .data(query_vectors)\n .topK(3) // The number of results to return\n .build();\n\nSearchResp searchResp = client.search(searchReq);\n\nSystem.out.println(JSONObject.toJSON(searchResp));\n","// 4. Single vector search\nvar query_vector = [0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592],\n\nres = await client.search({\n collection_name: \"quick_setup\",\n data: [query_vector],\n limit: 3, // The number of results to return\n})\n\nconsole.log(res.results)\n","[\n [\n {\n \"id\": 0,\n \"distance\": 1.4093276262283325,\n \"entity\": {}\n },\n {\n \"id\": 4,\n \"distance\": 0.9902134537696838,\n \"entity\": {}\n },\n {\n \"id\": 1,\n \"distance\": 0.8519943356513977,\n \"entity\": {}\n },\n {\n \"id\": 5,\n \"distance\": 0.7972343564033508,\n \"entity\": {}\n },\n {\n \"id\": 2,\n \"distance\": 0.5928734540939331,\n \"entity\": {}\n }\n ]\n]\n","{\"searchResults\": [[\n {\n \"score\": 1.263043,\n \"fields\": {\n \"vector\": [\n 0.9533119,\n 0.02538395,\n 0.76714665,\n 0.35481733,\n 0.9845762\n ],\n \"id\": 740\n }\n },\n {\n \"score\": 1.2377806,\n \"fields\": {\n \"vector\": [\n 0.7411156,\n 0.08687937,\n 0.8254139,\n 0.08370924,\n 0.99095553\n ],\n \"id\": 640\n }\n },\n {\n \"score\": 1.1869997,\n \"fields\": {\n \"vector\": [\n 0.87928146,\n 0.05324632,\n 0.6312755,\n 0.28005534,\n 0.9542448\n ],\n \"id\": 455\n }\n }\n]]}\n","[\n { score: 1.7463608980178833, id: '854' },\n { score: 1.744946002960205, id: '425' },\n { score: 1.7258622646331787, id: '718' }\n]\n","# Bulk-vector search\nres = client.search(\n collection_name=\"test_collection\", # Replace with the actual name of your collection\n data=[\n [0.19886812562848388, 0.06023560599112088, 0.6976963061752597, 0.2614474506242501, 0.838729485096104],\n [0.3172005263489739, 0.9719044792798428, -0.36981146090600725, -0.4860894583077995, 0.95791889146345]\n ], # Replace with your query vectors\n limit=2, # Max. number of search results to return\n search_params={\"metric_type\": \"IP\", \"params\": {}} # Search parameters\n)\n\nresult = json.dumps(res, indent=4)\nprint(result)\n","// 5. Batch vector search\nquery_vectors = Arrays.asList(\n Arrays.asList(0.3580376395471989f, -0.6023495712049978f, 0.18414012509913835f, -0.26286205330961354f, 0.9029438446296592f),\n Arrays.asList(0.19886812562848388f, 0.06023560599112088f, 0.6976963061752597f, 0.2614474506242501f, 0.838729485096104f)\n);\n\nsearchReq = SearchReq.builder()\n .collectionName(\"quick_setup\")\n .data(query_vectors)\n .topK(2)\n .build();\n\nsearchResp = client.search(searchReq);\n\nSystem.out.println(JSONObject.toJSON(searchResp));\n","// 5. Batch vector search\nvar query_vectors = [\n [0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592],\n [0.19886812562848388, 0.06023560599112088, 0.6976963061752597, 0.2614474506242501, 0.838729485096104]\n]\n\nres = await client.search({\n collection_name: \"quick_setup\",\n data: query_vectors,\n limit: 2,\n})\n\nconsole.log(res.results)\n","[\n [\n {\n \"id\": 1,\n \"distance\": 1.3017789125442505,\n \"entity\": {}\n },\n {\n \"id\": 7,\n \"distance\": 1.2419954538345337,\n \"entity\": {}\n }\n ], # Result set 1\n [\n {\n \"id\": 3,\n \"distance\": 2.3358664512634277,\n \"entity\": {}\n },\n {\n \"id\": 8,\n \"distance\": 0.5642921924591064,\n \"entity\": {}\n }\n ] # Result set 2\n]\n","// Two sets of vectors are returned as expected\n\n{\"searchResults\": [\n [\n {\n \"score\": 1.263043,\n \"fields\": {\n \"vector\": [\n 0.9533119,\n 0.02538395,\n 0.76714665,\n 0.35481733,\n 0.9845762\n ],\n \"id\": 740\n }\n },\n {\n \"score\": 1.2377806,\n \"fields\": {\n \"vector\": [\n 0.7411156,\n 0.08687937,\n 0.8254139,\n 0.08370924,\n 0.99095553\n ],\n \"id\": 640\n }\n }\n ],\n [\n {\n \"score\": 1.8654699,\n \"fields\": {\n \"vector\": [\n 0.4671427,\n 0.8378432,\n 0.98844475,\n 0.82763994,\n 0.9729997\n ],\n \"id\": 638\n }\n },\n {\n \"score\": 1.8581753,\n \"fields\": {\n \"vector\": [\n 0.735541,\n 0.60140246,\n 0.86730254,\n 0.93152493,\n 0.98603314\n ],\n \"id\": 855\n }\n }\n ]\n]}\n","[\n [\n { score: 2.3590476512908936, id: '854' },\n { score: 2.2896690368652344, id: '59' }\n [\n { score: 2.664059638977051, id: '59' },\n { score: 2.59483003616333, id: '854' }\n ]\n]\n","# 6.2 Search within a partition\nquery_vector = [0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592]\n\nres = client.search(\n collection_name=\"quick_setup\",\n data=[query_vector],\n limit=5,\n search_params={\"metric_type\": \"IP\", \"params\": {\"level\": 1}},\n partition_names=[\"red\"]\n)\n\nprint(res)\n","// 6.3 Search within partitions\nquery_vectors = Arrays.asList(Arrays.asList(0.3580376395471989f, -0.6023495712049978f, 0.18414012509913835f, -0.26286205330961354f, 0.9029438446296592f));\n\nsearchReq = SearchReq.builder()\n .collectionName(\"quick_setup\")\n .data(query_vectors)\n .partitionNames(Arrays.asList(\"red\"))\n .topK(5)\n .build();\n\nsearchResp = client.search(searchReq);\n\nSystem.out.println(JSONObject.toJSON(searchResp));\n","// 6.2 Search within partitions\nquery_vector = [0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592]\n\nres = await client.search({\n collection_name: \"quick_setup\",\n data: [query_vector],\n partition_names: [\"red\"],\n limit: 5,\n})\n\nconsole.log(res.results)\n","[\n [\n {\n \"id\": 16,\n \"distance\": 0.9200337529182434,\n \"entity\": {}\n },\n {\n \"id\": 14,\n \"distance\": 0.4505271911621094,\n \"entity\": {}\n },\n {\n \"id\": 15,\n \"distance\": 0.19924677908420563,\n \"entity\": {}\n },\n {\n \"id\": 17,\n \"distance\": 0.0075093843042850494,\n \"entity\": {}\n },\n {\n \"id\": 13,\n \"distance\": -0.14609718322753906,\n \"entity\": {}\n }\n ]\n]\n","{\"searchResults\": [\n [\n {\n \"score\": 1.1677284,\n \"fields\": {\n \"vector\": [\n 0.9986977,\n 0.17964739,\n 0.49086612,\n 0.23155272,\n 0.98438674\n ],\n \"id\": 1435\n }\n },\n {\n \"score\": 1.1476475,\n \"fields\": {\n \"vector\": [\n 0.6952647,\n 0.13417172,\n 0.91045254,\n 0.119336545,\n 0.9338931\n ],\n \"id\": 1291\n }\n },\n {\n \"score\": 1.0969629,\n \"fields\": {\n \"vector\": [\n 0.3363194,\n 0.028906643,\n 0.6675426,\n 0.030419827,\n 0.9735209\n ],\n \"id\": 1168\n }\n },\n {\n \"score\": 1.0741848,\n \"fields\": {\n \"vector\": [\n 0.9980543,\n 0.36063594,\n 0.66427994,\n 0.17359233,\n 0.94954175\n ],\n \"id\": 1164\n }\n },\n {\n \"score\": 1.0584627,\n \"fields\": {\n \"vector\": [\n 0.7187005,\n 0.12674773,\n 0.987718,\n 0.3110777,\n 0.86093885\n ],\n \"id\": 1085\n }\n }\n ],\n [\n {\n \"score\": 1.8030131,\n \"fields\": {\n \"vector\": [\n 0.59726167,\n 0.7054632,\n 0.9573117,\n 0.94529945,\n 0.8664103\n ],\n \"id\": 1203\n }\n },\n {\n \"score\": 1.7728865,\n \"fields\": {\n \"vector\": [\n 0.6672442,\n 0.60448086,\n 0.9325822,\n 0.80272985,\n 0.8861626\n ],\n \"id\": 1448\n }\n },\n {\n \"score\": 1.7536311,\n \"fields\": {\n \"vector\": [\n 0.59663296,\n 0.77831805,\n 0.8578314,\n 0.88818026,\n 0.9030075\n ],\n \"id\": 1010\n }\n },\n {\n \"score\": 1.7520742,\n \"fields\": {\n \"vector\": [\n 0.854198,\n 0.72294194,\n 0.9245805,\n 0.86126596,\n 0.7969224\n ],\n \"id\": 1219\n }\n },\n {\n \"score\": 1.7452049,\n \"fields\": {\n \"vector\": [\n 0.96419,\n 0.943535,\n 0.87611496,\n 0.8268136,\n 0.79786557\n ],\n \"id\": 1149\n }\n }\n ]\n]}\n","[\n { score: 3.0258803367614746, id: '1201' },\n { score: 3.004319190979004, id: '1458' },\n { score: 2.880324363708496, id: '1187' },\n { score: 2.8246407508850098, id: '1347' },\n { score: 2.797295093536377, id: '1406' }\n]\n","res = client.search(\n collection_name=\"quick_setup\",\n data=[query_vector],\n limit=5,\n search_params={\"metric_type\": \"IP\", \"params\": {\"level\": 1}},\n partition_names=[\"blue\"]\n)\n\nprint(res)\n","searchReq = SearchReq.builder()\n .collectionName(\"quick_setup\")\n .data(query_vectors)\n .partitionNames(Arrays.asList(\"blue\"))\n .topK(5)\n .build();\n\nsearchResp = client.search(searchReq);\n\nSystem.out.println(JSONObject.toJSON(searchResp));\n","res = await client.search({\n collection_name: \"quick_setup\",\n data: [query_vector],\n partition_names: [\"blue\"],\n limit: 5,\n})\n\nconsole.log(res.results)\n","[\n [\n {\n \"id\": 20,\n \"distance\": 2.363696813583374,\n \"entity\": {}\n },\n {\n \"id\": 26,\n \"distance\": 1.0665391683578491,\n \"entity\": {}\n },\n {\n \"id\": 23,\n \"distance\": 1.066049575805664,\n \"entity\": {}\n },\n {\n \"id\": 29,\n \"distance\": 0.8353596925735474,\n \"entity\": {}\n },\n {\n \"id\": 28,\n \"distance\": 0.7484277486801147,\n \"entity\": {}\n }\n ]\n]\n","{\"searchResults\": [\n [\n {\n \"score\": 1.1628494,\n \"fields\": {\n \"vector\": [\n 0.7442872,\n 0.046407282,\n 0.71031404,\n 0.3544345,\n 0.9819991\n ],\n \"id\": 1992\n }\n },\n {\n \"score\": 1.1470042,\n \"fields\": {\n \"vector\": [\n 0.5505825,\n 0.04367262,\n 0.9985836,\n 0.18922359,\n 0.93255126\n ],\n \"id\": 1977\n }\n },\n {\n \"score\": 1.1450152,\n \"fields\": {\n \"vector\": [\n 0.89994013,\n 0.052991092,\n 0.8645576,\n 0.6406729,\n 0.95679337\n ],\n \"id\": 1573\n }\n },\n {\n \"score\": 1.1439825,\n \"fields\": {\n \"vector\": [\n 0.9253267,\n 0.15890503,\n 0.7999555,\n 0.19126713,\n 0.898583\n ],\n \"id\": 1552\n }\n },\n {\n \"score\": 1.1029172,\n \"fields\": {\n \"vector\": [\n 0.95661926,\n 0.18777144,\n 0.38115507,\n 0.14323527,\n 0.93137646\n ],\n \"id\": 1823\n }\n }\n ],\n [\n {\n \"score\": 1.8005109,\n \"fields\": {\n \"vector\": [\n 0.5953582,\n 0.7794224,\n 0.9388869,\n 0.79825854,\n 0.9197286\n ],\n \"id\": 1888\n }\n },\n {\n \"score\": 1.7714822,\n \"fields\": {\n \"vector\": [\n 0.56805456,\n 0.89422905,\n 0.88187534,\n 0.914824,\n 0.8944365\n ],\n \"id\": 1648\n }\n },\n {\n \"score\": 1.7561421,\n \"fields\": {\n \"vector\": [\n 0.83421993,\n 0.39865613,\n 0.92319834,\n 0.42695504,\n 0.96633124\n ],\n \"id\": 1688\n }\n },\n {\n \"score\": 1.7553532,\n \"fields\": {\n \"vector\": [\n 0.89994013,\n 0.052991092,\n 0.8645576,\n 0.6406729,\n 0.95679337\n ],\n \"id\": 1573\n }\n },\n {\n \"score\": 1.7543385,\n \"fields\": {\n \"vector\": [\n 0.16542226,\n 0.38248396,\n 0.9888778,\n 0.80913955,\n 0.9501492\n ],\n \"id\": 1544\n }\n }\n ]\n]}\n","[\n { score: 2.8421106338500977, id: '1745' },\n { score: 2.838560104370117, id: '1782' },\n { score: 2.8134000301361084, id: '1511' },\n { score: 2.718268871307373, id: '1679' },\n { score: 2.7014894485473633, id: '1597' }\n]\n","# Search with output fields\nres = client.search(\n collection_name=\"test_collection\", # Replace with the actual name of your collection\n data=[[0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592]],\n limit=5, # Max. number of search results to return\n search_params={\"metric_type\": \"IP\", \"params\": {}}, # Search parameters\n output_fields=[\"color\"] # Output fields to return\n)\n\nresult = json.dumps(res, indent=4)\nprint(result)\n","// 7. Search with output fields\nquery_vectors = Arrays.asList(Arrays.asList(0.3580376395471989f, -0.6023495712049978f, 0.18414012509913835f, -0.26286205330961354f, 0.9029438446296592f));\n\nsearchReq = SearchReq.builder()\n .collectionName(\"quick_setup\")\n .data(query_vectors)\n .outputFields(Arrays.asList(\"color\"))\n .topK(5)\n .build();\n\nsearchResp = client.search(searchReq);\n\nSystem.out.println(JSONObject.toJSON(searchResp));\n","// 7. Search with output fields\nquery_vector = [0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592]\n\nres = await client.search({\n collection_name: \"quick_setup\",\n data: [query_vector],\n limit: 5,\n output_fields: [\"color\"],\n})\n\nconsole.log(res.results)\n","[\n [\n {\n \"id\": 0,\n \"distance\": 1.4093276262283325,\n \"entity\": {\n \"color\": \"pink_8682\"\n }\n },\n {\n \"id\": 16,\n \"distance\": 1.0159327983856201,\n \"entity\": {\n \"color\": \"yellow_1496\"\n }\n },\n {\n \"id\": 4,\n \"distance\": 0.9902134537696838,\n \"entity\": {\n \"color\": \"red_4794\"\n }\n },\n {\n \"id\": 14,\n \"distance\": 0.9803846478462219,\n \"entity\": {\n \"color\": \"green_2899\"\n }\n },\n {\n \"id\": 1,\n \"distance\": 0.8519943356513977,\n \"entity\": {\n \"color\": \"red_7025\"\n }\n }\n ]\n]\n","{\"searchResults\": [\n [\n {\n \"score\": 1.263043,\n \"fields\": {}\n },\n {\n \"score\": 1.2377806,\n \"fields\": {}\n },\n {\n \"score\": 1.1869997,\n \"fields\": {}\n },\n {\n \"score\": 1.1748955,\n \"fields\": {}\n },\n {\n \"score\": 1.1720343,\n \"fields\": {}\n }\n ]\n]}\n","\n[\n { score: 3.036271572113037, id: '59', color: 'orange' },\n { score: 3.0267879962921143, id: '1745', color: 'blue' },\n { score: 3.0069446563720703, id: '854', color: 'black' },\n { score: 2.984386682510376, id: '718', color: 'black' },\n { score: 2.916019916534424, id: '425', color: 'purple' }\n]\n","# Search with filter\nres = client.search(\n collection_name=\"test_collection\", # Replace with the actual name of your collection\n data=[[0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592]],\n limit=5, # Max. number of search results to return\n search_params={\"metric_type\": \"IP\", \"params\": {}}, # Search parameters\n output_fields=[\"color\"], # Output fields to return\n filter='color like \"red%\"'\n)\n\nresult = json.dumps(res, indent=4)\nprint(result)\n","// 8. Filtered search\nquery_vectors = Arrays.asList(Arrays.asList(0.3580376395471989f, -0.6023495712049978f, 0.18414012509913835f, -0.26286205330961354f, 0.9029438446296592f));\n\nsearchReq = SearchReq.builder()\n .collectionName(\"quick_setup\")\n .data(query_vectors)\n .outputFields(Arrays.asList(\"color_tag\"))\n .filter(\"color_tag like \\\"red%\\\"\")\n .topK(5)\n .build();\n\nsearchResp = client.search(searchReq);\n\nSystem.out.println(JSONObject.toJSON(searchResp));\n","// 8. Filtered search\n// 8.1 Filter with \"like\" operator and prefix wildcard\nquery_vector = [0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592]\n\nres = await client.search({\n collection_name: \"quick_setup\",\n data: [query_vector],\n limit: 5,\n filters: \"color_tag like \\\"red%\\\"\",\n output_fields: [\"color_tag\"]\n})\n\nconsole.log(res.results)\n","[\n [\n {\n \"id\": 4,\n \"distance\": 0.9902134537696838,\n \"entity\": {\n \"color\": \"red_4794\"\n }\n },\n {\n \"id\": 1,\n \"distance\": 0.8519943356513977,\n \"entity\": {\n \"color\": \"red_7025\"\n }\n },\n {\n \"id\": 6,\n \"distance\": -0.4113418459892273,\n \"entity\": {\n \"color\": \"red_9392\"\n }\n }\n ]\n]\n","{\"searchResults\": [\n [\n {\n \"score\": 1.1869997,\n \"fields\": {\"color_tag\": \"red_3026\"}\n },\n {\n \"score\": 1.1677284,\n \"fields\": {\"color_tag\": \"red_9030\"}\n },\n {\n \"score\": 1.1476475,\n \"fields\": {\"color_tag\": \"red_3744\"}\n },\n {\n \"score\": 1.0969629,\n \"fields\": {\"color_tag\": \"red_4168\"}\n },\n {\n \"score\": 1.0741848,\n \"fields\": {\"color_tag\": \"red_9678\"}\n }\n ]\n]}\n","[\n { score: 2.5080761909484863, id: '1201', color_tag: 'red_8904' },\n { score: 2.491129159927368, id: '425', color_tag: 'purple_8212' },\n { score: 2.4889798164367676, id: '1458', color_tag: 'red_6891' },\n { score: 2.42964243888855, id: '724', color_tag: 'black_9885' },\n { score: 2.4004223346710205, id: '854', color_tag: 'black_5990' }\n]\n","# Infix match on color field\nres = client.search(\n collection_name=\"test_collection\", # Replace with the actual name of your collection\n data=[[0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592]],\n limit=5, # Max. number of search results to return\n search_params={\"metric_type\": \"IP\", \"params\": {}}, # Search parameters\n output_fields=[\"color\"], # Output fields to return\n filter='color like \"%ll%\"' # Filter on color field, infix match on \"ll\"\n)\n\nresult = json.dumps(res, indent=4)\nprint(result)\n","// 8. Filtered search\nquery_vectors = Arrays.asList(Arrays.asList(0.3580376395471989f, -0.6023495712049978f, 0.18414012509913835f, -0.26286205330961354f, 0.9029438446296592f));\n\nsearchReq = SearchReq.builder()\n .collectionName(\"quick_setup\")\n .data(query_vectors)\n .outputFields(Arrays.asList(\"color_tag\"))\n .filter(\"color like \\\"%ll%\\\"\")\n .topK(5)\n .build();\n\nsearchResp = client.search(searchReq);\n\nSystem.out.println(JSONObject.toJSON(searchResp));\n","// 8. Filtered search\n// 8.1 Filter with \"like\" operator and prefix wildcard\nquery_vector = [0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592]\n\nres = await client.search({\n collection_name: \"quick_setup\",\n data: [query_vector],\n limit: 5,\n filters: \"color_tag like \\\"%ll%\\\"\",\n output_fields: [\"color_tag\"]\n})\n\nconsole.log(res.results)\n","[\n [\n {\n \"id\": 5,\n \"distance\": 0.7972343564033508,\n \"entity\": {\n \"color\": \"yellow_4222\"\n }\n }\n ]\n]\n","{\"searchResults\": [\n [\n {\n \"score\": 1.1869997,\n \"fields\": {\"color_tag\": \"yellow_4222\"}\n }\n ]\n]}\n","[\n { score: 2.5080761909484863, id: '1201', color_tag: 'yellow_4222' }\n]\n","# Conduct a range search\nsearch_params = {\n \"metric_type\": \"IP\",\n \"params\": {\n \"radius\": 0.8, # Radius of the search circle\n \"range_filter\": 1.0 # Range filter to filter out vectors that are not within the search circle\n }\n}\n\nres = client.search(\n collection_name=\"test_collection\", # Replace with the actual name of your collection\n data=[[0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592]],\n limit=3, # Max. number of search results to return\n search_params=search_params, # Search parameters\n output_fields=[\"color\"], # Output fields to return\n)\n\nresult = json.dumps(res, indent=4)\nprint(result)\n","// 9. Range search\nquery_vectors = Arrays.asList(Arrays.asList(0.3580376395471989f, -0.6023495712049978f, 0.18414012509913835f, -0.26286205330961354f, 0.9029438446296592f));\n\nsearchReq = SearchReq.builder()\n .collectionName(\"quick_setup\")\n .data(query_vectors)\n .outputFields(Arrays.asList(\"color_tag\"))\n .searchParams(Map.of(\"radius\", 0.1, \"range\", 1.0))\n .topK(5)\n .build();\n\nsearchResp = client.search(searchReq);\n\nSystem.out.println(JSONObject.toJSON(searchResp));\n","// 9. Range search\nquery_vector = [0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592]\n\nres = await client.search({\n collection_name: \"quick_setup\",\n data: [query_vector],\n limit: 5,\n params: {\n radius: 0.1,\n range: 1.0\n },\n output_fields: [\"color_tag\"]\n})\n\nconsole.log(res.results)\n","[\n [\n {\n \"id\": 4,\n \"distance\": 0.9902134537696838,\n \"entity\": {\n \"color\": \"red_4794\"\n }\n },\n {\n \"id\": 14,\n \"distance\": 0.9803846478462219,\n \"entity\": {\n \"color\": \"green_2899\"\n }\n },\n {\n \"id\": 1,\n \"distance\": 0.8519943356513977,\n \"entity\": {\n \"color\": \"red_7025\"\n }\n }\n ]\n]\n","{\"searchResults\": [\n [\n {\n \"score\": 1.263043,\n \"fields\": {\"color_tag\": \"green_2052\"}\n },\n {\n \"score\": 1.2377806,\n \"fields\": {\"color_tag\": \"purple_3709\"}\n },\n {\n \"score\": 1.1869997,\n \"fields\": {\"color_tag\": \"red_3026\"}\n },\n {\n \"score\": 1.1748955,\n \"fields\": {\"color_tag\": \"black_1646\"}\n },\n {\n \"score\": 1.1720343,\n \"fields\": {\"color_tag\": \"green_4853\"}\n }\n ]\n]}\n","[\n { score: 2.3387961387634277, id: '718', color_tag: 'black_7154' },\n { score: 2.3352415561676025, id: '1745', color_tag: 'blue_8741' },\n { score: 2.290485382080078, id: '1408', color_tag: 'red_2324' },\n { score: 2.285870313644409, id: '854', color_tag: 'black_5990' },\n { score: 2.2593345642089844, id: '1309', color_tag: 'red_8458' }\n]\n","# Connect to Milvus\nclient = MilvusClient(uri='http://localhost:19530') # Milvus server address\n\n# Load data into collection\nclient.load_collection(\"group_search\") # Collection name\n\n# Group search results\nres = client.search(\n collection_name=\"group_search\", # Collection name\n data=[[0.14529211512077012, 0.9147257273453546, 0.7965055218724449, 0.7009258593102812, 0.5605206522382088]], # Query vector\n search_params={\n \"metric_type\": \"L2\",\n \"params\": {\"nprobe\": 10},\n }, # Search parameters\n limit=5, # Max. number of groups to return\n group_by_field=\"doc_id\", # Group results by document ID\n group_size=2, # returned at most 2 passages per document, the default value is 1\n group_strict_size=True, # ensure every group contains exactly 3 passages\n output_fields=[\"doc_id\", \"passage_id\"]\n)\n\n# Retrieve the values in the `doc_id` column\ndoc_ids = [result['entity']['doc_id'] for result in res[0]]\npassage_ids = [result['entity']['passage_id'] for result in res[0]]\n\nprint(doc_ids)\nprint(passage_ids)\n","[\"doc_11\", \"doc_11\", \"doc_7\", \"doc_7\", \"doc_3\", \"doc_3\", \"doc_2\", \"doc_2\", \"doc_8\", \"doc_8\"]\n[5, 10, 11, 10, 9, 6, 5, 4, 9, 2]\n","# Connect to Milvus\nclient = MilvusClient(uri='http://localhost:19530') # Milvus server address\n\n# Load data into collection\nclient.load_collection(\"group_search\") # Collection name\n\n# Search without `group_by_field`\nres = client.search(\n collection_name=\"group_search\", # Collection name\n data=query_passage_vector, # Replace with your query vector\n search_params={\n \"metric_type\": \"L2\",\n \"params\": {\"nprobe\": 10},\n }, # Search parameters\n limit=5, # Max. number of search results to return\n # group_by_field=\"doc_id\", # Group results by document ID\n # group_size=2, \n # group_strict_size=True,\n output_fields=[\"doc_id\", \"passage_id\"]\n)\n\n# Retrieve the values in the `doc_id` column\ndoc_ids = [result['entity']['doc_id'] for result in res[0]]\npassage_ids = [result['entity']['passage_id'] for result in res[0]]\n\nprint(doc_ids)\nprint(passage_ids)\n","[\"doc_11\", \"doc_11\", \"doc_11\", \"doc_11\", \"doc_11\"]\n[1, 10, 3, 12, 9]\n","# In normal cases, you do not need to set search parameters manually\n# Except for range searches.\nsearch_parameters = {\n 'metric_type': 'L2',\n 'params': {\n 'nprobe': 10,\n 'level': 1,\n 'radius': 1.0\n 'range_filter': 0.8\n }\n}\n"],"headingContent":"Single-Vector Search","anchorList":[{"label":"Ricerca per singolo vettore","href":"Single-Vector-Search","type":1,"isActive":false},{"label":"Panoramica","href":"Overview","type":2,"isActive":false},{"label":"Preparazioni","href":"Preparations","type":2,"isActive":false},{"label":"Ricerca di base","href":"Basic-search","type":2,"isActive":false},{"label":"Ricerca filtrata","href":"Filtered-search","type":2,"isActive":false},{"label":"Ricerca per intervallo","href":"Range-search","type":2,"isActive":false},{"label":"Ricerca per raggruppamento","href":"Grouping-search","type":2,"isActive":false},{"label":"Parametri di ricerca","href":"Search-parameters","type":2,"isActive":false}]} \ No newline at end of file diff --git a/localization/v2.4.x/site/it/userGuide/search-query-get/single-vector-search.md b/localization/v2.4.x/site/it/userGuide/search-query-get/single-vector-search.md index d014980aa..2c39fb7ee 100644 --- a/localization/v2.4.x/site/it/userGuide/search-query-get/single-vector-search.md +++ b/localization/v2.4.x/site/it/userGuide/search-query-get/single-vector-search.md @@ -4,7 +4,7 @@ order: 1 summary: >- Questo articolo descrive come cercare vettori in una collezione Milvus utilizzando un singolo vettore di interrogazione. -title: Ricerca a vettore singolo +title: Ricerca per singolo vettore ---

    Ricerca per singolo vettore

    Una volta inseriti i dati, il passo successivo consiste nell'eseguire ricerche di similarità sulla collezione in Milvus.

    Milvus consente di effettuare due tipi di ricerca, a seconda del numero di campi vettoriali presenti nella collezione:

      -
    • Ricerca a vettore singolo: Se la vostra collezione ha un solo campo vettoriale, utilizzate il metodo search() per trovare le entità più simili. Questo metodo confronta il vettore dell'interrogazione con i vettori esistenti nella collezione e restituisce gli ID delle corrispondenze più vicine insieme alle distanze tra loro. Opzionalmente, può anche restituire i valori del vettore e i metadati dei risultati.
    • +
    • Ricerca a vettore singolo: Se la vostra collezione ha un solo campo vettoriale, utilizzate il metodo search() per trovare le entità più simili. Questo metodo confronta il vettore interrogato con i vettori esistenti nella collezione e restituisce gli ID delle corrispondenze più vicine insieme alle distanze tra loro. Opzionalmente, può anche restituire i valori del vettore e i metadati dei risultati.
    • Ricerca ibrida: Per le raccolte con due o più campi vettoriali, utilizzare il metodo hybrid_search() . Questo metodo esegue più richieste di ricerca Approximate Nearest Neighbor (ANN) e combina i risultati per restituire le corrispondenze più rilevanti dopo una nuova classificazione.

    Questa guida si concentra su come eseguire una ricerca monovettoriale in Milvus. Per maggiori dettagli sulla ricerca ibrida, consultare la sezione Ricerca ibrida.

    @@ -440,7 +440,7 @@ res = await client. -

    Quando si invia una richiesta search, si possono fornire uno o più valori vettoriali che rappresentano gli embedding della query e un valore limit che indica il numero di risultati da restituire.

    +

    Quando si invia una richiesta search, è possibile fornire uno o più valori vettoriali che rappresentano gli embedding della query e un valore limit che indica il numero di risultati da restituire.

    A seconda dei dati e del vettore della query, è possibile che si ottengano meno di limit risultati. Questo accade quando limit è più grande del numero di vettori possibili per la query.

    La ricerca a vettore singolo è la forma più semplice delle operazioni di search in Milvus, che ha lo scopo di trovare i vettori più simili a un determinato vettore di interrogazione.

    Per eseguire una ricerca a vettore singolo, specificare il nome della collezione di destinazione, il vettore di interrogazione e il numero di risultati desiderato (limit). Questa operazione restituisce un insieme di risultati che comprende i vettori più simili, i loro ID e le distanze dal vettore di interrogazione.

    @@ -502,7 +502,7 @@ res = await client.limit -
    + @@ -528,7 +528,7 @@ res = await client.topK - +
    Il numero totale di entità da restituire.
    Si può usare questo parametro in combinazione con l'offset in param per abilitare la paginazione.
    La somma di questo valore e dell'offset in param deve essere inferiore a 16.384.
    Il numero totale di entità da restituire.
    È possibile utilizzare questo parametro in combinazione con l'offset in param per abilitare la paginazione.
    La somma di questo valore e dell'offset in param deve essere inferiore a 16.384.
    search_paramsIl numero di record da restituire nel risultato della ricerca. Questo parametro usa la stessa sintassi del parametro limit, quindi bisogna impostarne solo uno.
    È possibile usare questo parametro in combinazione con offset in param per abilitare la paginazione.
    La somma di questo valore e offset in param deve essere inferiore a 16.384.
    Il numero di record da restituire nel risultato della ricerca. Questo parametro usa la stessa sintassi del parametro limit, quindi è necessario impostarne solo uno.
    È possibile usare questo parametro in combinazione con offset in param per abilitare la paginazione.
    La somma di questo valore e offset in param deve essere inferiore a 16.384.
    @@ -1220,7 +1220,7 @@ searchResp = client.search(searchReq); { score: 2.7014894485473633, id: '1597' } ]
    -

    I dati contenuti in red differiscono da quelli contenuti in blue. Pertanto, i risultati della ricerca saranno limitati alla partizione specificata, riflettendo le caratteristiche uniche e la distribuzione dei dati di quel sottoinsieme.

    +

    I dati di red differiscono da quelli di blue. Pertanto, i risultati della ricerca saranno limitati alla partizione specificata, riflettendo le caratteristiche uniche e la distribuzione dei dati di quel sottoinsieme.

    Ricerca con campi di output

    La ricerca con campi di output consente di specificare quali attributi o campi dei vettori abbinati devono essere inclusi nei risultati della ricerca.

    È possibile specificare output_fields in una richiesta per restituire risultati con campi specifici.

    Ecco un esempio di restituzione dei risultati con i valori dell'attributo color:

    @@ -1357,11 +1357,11 @@ res = await client. -

    La ricerca filtrata applica filtri scalari alle ricerche vettoriali, consentendo di affinare i risultati della ricerca in base a criteri specifici. Maggiori informazioni sulle espressioni di filtro sono disponibili in Regole di espressione booleana ed esempi in Ottieni e Query scalari.

    +

    La ricerca filtrata applica filtri scalari alle ricerche vettoriali, consentendo di affinare i risultati della ricerca in base a criteri specifici. Maggiori informazioni sulle espressioni di filtro sono disponibili in Regole delle espressioni booleane ed esempi in Ottieni e Query scalari.

    Utilizzare l'operatore like

    L'operatore like migliora la ricerca di stringhe valutando modelli che includono prefissi, infissi e suffissi:

    • Corrispondenza dei prefissi: per trovare valori che iniziano con un prefisso specifico, utilizzare la sintassi 'like "prefix%"'.
    • -
    • Corrispondenza per prefisso: per trovare valori contenenti una specifica sequenza di caratteri in qualsiasi punto della stringa, utilizzare la sintassi 'like "%infix%"'.
    • +
    • Corrispondenza per prefisso: per trovare valori contenenti una specifica sequenza di caratteri in qualsiasi punto della stringa, usare la sintassi 'like "%infix%"'.
    • Corrispondenza per suffisso: per trovare i valori che terminano con un suffisso specifico, usare la sintassi 'like "%suffix"'.

    Per la ricerca di un singolo carattere, il trattino basso (_) funge da carattere jolly per un carattere, ad esempio 'like "y_llow"'.

    @@ -1697,7 +1697,7 @@ res = await client.L2
    Le distanze L2 più piccole indicano una maggiore somiglianza.Per escludere i vettori più vicini dai risultati, assicurarsi che:
    range_filter <= distanza < radius IPDistanze IP più grandi indicano una maggiore somiglianza.Per escludere i vettori più vicini dai risultati, accertarsi che:
    radius < distanza <= range_filter COSINEUn valore di coseno maggiore indica una maggiore somiglianza.Per escludere i vettori più vicini dai risultati, assicurarsi che:
    radius < distanza <= range_filter -JACCARDLe distanze di Jaccard più piccole indicano una maggiore somiglianza.Per escludere i vettori più vicini dai risultati, assicurarsi che:
    range_filter <= distanza < radius +JACCARDDistanze di Jaccard più piccole indicano una maggiore somiglianza.Per escludere i vettori più vicini dai risultati, assicurarsi che:
    range_filter <= distanza < radius HAMMINGDistanze di Hamming più piccole indicano una maggiore somiglianza.Per escludere i vettori più vicini dai risultati, assicurarsi che:
    range_filter <= distanza < radius @@ -1717,8 +1717,9 @@ res = await client. -

    In Milvus, il raggruppamento della ricerca in base a un campo specifico può evitare la ridondanza di voci dello stesso campo nei risultati. È possibile ottenere un insieme vario di risultati per il campo specifico.

    -

    Consideriamo una collezione di documenti, ogni documento è suddiviso in vari passaggi. Ogni passaggio è rappresentato da un vettore incorporato e appartiene a un documento. Per trovare documenti rilevanti invece di passaggi simili, si può includere l'argomento group_by_field nell'opzione search() per raggruppare i risultati in base all'ID del documento. Questo aiuta a restituire i documenti più rilevanti e unici, piuttosto che passaggi separati dello stesso documento.

    +

    In Milvus, la ricerca per gruppi è progettata per migliorare la completezza e l'accuratezza dei risultati di ricerca.

    +

    Si consideri uno scenario in RAG, in cui i documenti sono suddivisi in vari passaggi e ogni passaggio è rappresentato da un'incorporazione vettoriale. Gli utenti desiderano trovare i passaggi più rilevanti per richiedere con precisione i LLM. La normale funzione di ricerca di Milvus è in grado di soddisfare questo requisito, ma può dare luogo a risultati molto distorti e parziali: la maggior parte dei passaggi proviene solo da alcuni documenti e la completezza dei risultati della ricerca è molto scarsa. Ciò può compromettere seriamente l'accuratezza o addirittura la correttezza dei risultati forniti dal LLM e influenzare negativamente l'esperienza degli utenti del LLM.

    +

    La ricerca per gruppi può risolvere efficacemente questo problema. Passando un campo group_by_field e group_size, gli utenti di Milvus possono suddividere i risultati della ricerca in diversi gruppi e garantire che il numero di entità di ciascun gruppo non superi una specifica group_size. Questa funzione può aumentare in modo significativo la completezza e l'equità dei risultati della ricerca, migliorando sensibilmente la qualità dei risultati di LLM.

    Ecco un esempio di codice per raggruppare i risultati della ricerca per campo:

    # Connect to Milvus
     client = MilvusClient(uri='http://localhost:19530') # Milvus server address
    @@ -1734,21 +1735,26 @@ res = client.search(
         "metric_type": "L2",
         "params": {"nprobe": 10},
         }, # Search parameters
    -    limit=10, # Max. number of search results to return
    +    limit=5, # Max. number of groups to return
         group_by_field="doc_id", # Group results by document ID
    +    group_size=2, # returned at most 2 passages per document, the default value is 1
    +    group_strict_size=True, # ensure every group contains exactly 3 passages
         output_fields=["doc_id", "passage_id"]
     )
     
     # Retrieve the values in the `doc_id` column
     doc_ids = [result['entity']['doc_id'] for result in res[0]]
    +passage_ids = [result['entity']['passage_id'] for result in res[0]]
     
     print(doc_ids)
    +print(passage_ids)
     

    Il risultato è simile al seguente:

    -
    [5, 10, 1, 7, 9, 6, 3, 4, 8, 2]
    +
    ["doc_11", "doc_11", "doc_7", "doc_7", "doc_3", "doc_3", "doc_2", "doc_2", "doc_8", "doc_8"]
    +[5, 10, 11, 10, 9, 6, 5, 4, 9, 2]
     
    -

    Nell'output dato, si può osservare che le entità restituite non contengono valori doc_id duplicati.

    -

    Per un confronto, commentiamo group_by_field ed eseguiamo una ricerca regolare:

    +

    Nell'output dato, si può osservare che per ogni documento vengono recuperati esattamente due passaggi e un totale di 5 documenti che compongono i risultati.

    +

    Per fare un confronto, eliminiamo i parametri relativi al gruppo ed eseguiamo una ricerca regolare:

    # Connect to Milvus
     client = MilvusClient(uri='http://localhost:19530') # Milvus server address
     
    @@ -1763,27 +1769,33 @@ res = client.search(
         "metric_type": "L2",
         "params": {"nprobe": 10},
         }, # Search parameters
    -    limit=10, # Max. number of search results to return
    +    limit=5, # Max. number of search results to return
         # group_by_field="doc_id", # Group results by document ID
    +    # group_size=2, 
    +    # group_strict_size=True,
         output_fields=["doc_id", "passage_id"]
     )
     
     # Retrieve the values in the `doc_id` column
     doc_ids = [result['entity']['doc_id'] for result in res[0]]
    +passage_ids = [result['entity']['passage_id'] for result in res[0]]
     
     print(doc_ids)
    +print(passage_ids)
     

    Il risultato è simile al seguente:

    -
    [1, 10, 3, 10, 1, 9, 4, 4, 8, 6]
    +
    ["doc_11", "doc_11", "doc_11", "doc_11", "doc_11"]
    +[1, 10, 3, 12, 9]
     
    -

    Nell'output dato, si può osservare che le entità restituite contengono valori duplicati doc_id.

    +

    Nell'output dato, si può osservare che "doc_11" ha dominato completamente i risultati della ricerca, mettendo in ombra i paragrafi di alta qualità degli altri documenti, il che può essere un cattivo suggerimento per LLM.

    +

    Un altro punto da notare: per impostazione predefinita, grouping_search restituisce i risultati istantaneamente quando ha abbastanza gruppi, il che può far sì che il numero di risultati in ogni gruppo non sia sufficiente a soddisfare il group_size. Se vi interessa il numero di risultati per ogni gruppo, impostate group_strict_size=True come mostrato nel codice qui sopra. In questo modo Milvus si sforzerà di ottenere un numero sufficiente di risultati per ogni gruppo, con un leggero costo in termini di prestazioni.

    Limitazioni

    • Indicizzazione: Questa funzione di raggruppamento funziona solo per le collezioni indicizzate con il tipo HNSW, IVF_FLAT o FLAT. Per ulteriori informazioni, consultare Indice in memoria.

    • Vettore: Attualmente, la ricerca per raggruppamento non supporta un campo vettoriale di tipo BINARY_VECTOR. Per ulteriori informazioni sui tipi di dati, consultare Tipi di dati supportati.

    • Campo: Attualmente, la ricerca per raggruppamento consente solo una singola colonna. Non è possibile specificare più nomi di campi nella configurazione group_by_field. Inoltre, la ricerca per gruppi è incompatibile con i tipi di dati JSON, FLOAT, DOUBLE, ARRAY o campi vettoriali.

    • Impatto sulle prestazioni: Tenere presente che le prestazioni diminuiscono con l'aumentare del numero di vettori di query. Prendendo come esempio un cluster con 2 core CPU e 8 GB di memoria, il tempo di esecuzione della ricerca per raggruppamento aumenta proporzionalmente al numero di vettori di query in ingresso.

    • -
    • Funzionalità: Attualmente la ricerca per gruppi non è supportata dalla ricerca per intervallo, dagli iteratori di ricerca o dalla ricerca ibrida.

    • +
    • Funzionalità: Attualmente, la ricerca per raggruppamento non è supportata dalla ricerca per intervallo, dagli iteratori di ricerca e dai parametri di ricerca.

    Parametri di ricerca

    Milvus fornisce iteratori di ricerca e di query per iterare i risultati con un grande volume di entità. Poiché Milvus limita TopK a 16384, gli utenti possono usare gli iteratori per restituire grandi numeri o addirittura intere entità in una collezione in modalità batch.

    +

    Milvus fornisce iteratori di ricerca e di interrogazione per iterare un grande volume di entità. Poiché Milvus limita TopK a 16384, gli utenti possono usare gli iteratori per restituire grandi numeri o addirittura intere entità in una collezione in modalità batch.

    Panoramica

    Gli iteratori sono strumenti potenti che consentono di iterare un grande volume di dati o tutti i dati all'interno di un insieme utilizzando valori di chiavi primarie ed espressioni booleane. Questo può migliorare significativamente il modo in cui si recuperano i dati. A differenza dell'uso tradizionale dei parametri offset e limit, che possono diventare meno efficienti nel tempo, gli iteratori offrono una soluzione più scalabile.

    +

    Gli iteratori sono uno strumento efficiente per la scansione di un'intera collezione o per l'iterazione di un grande volume di entità specificando i valori delle chiavi primarie o un'espressione di filtro. Rispetto a una chiamata di ricerca o di query con parametri di offset e limite, l'uso degli iteratori è più efficiente e scalabile.

    Vantaggi dell'uso degli iteratori

      -
    • Semplicità: Eliminano le complesse impostazioni di offset e limite.

    • +
    • Semplicità: Elimina le complesse impostazioni di offset e limite.

    • Efficienza: Fornisce un recupero scalabile dei dati, recuperando solo i dati necessari.

    • Coerenza: Assicura una dimensione coerente del set di dati con i filtri booleani.

    @@ -64,8 +64,8 @@ title: Con gli iteratori d="M4 9h1v1H4c-1.5 0-3-1.69-3-3.5S2.55 3 4 3h4c1.45 0 3 1.69 3 3.5 0 1.41-.91 2.72-2 3.25V8.59c.58-.45 1-1.27 1-2.09C10 5.22 8.98 4 8 4H4c-.98 0-2 1.22-2 2.5S3 9 4 9zm9-3h-1v1h1c1 0 2 1.22 2 2.5S13.98 12 13 12H9c-.98 0-2-1.22-2-2.5 0-.83.42-1.64 1-2.09V6.25c-1.09.53-2 1.84-2 3.25C6 11.31 7.55 13 9 13h4c1.45 0 3-1.69 3-3.5S14.5 6 13 6z" > -

    I passaggi seguenti ripropongono il codice per connettersi a Milvus, impostare rapidamente una raccolta e inserire oltre 10.000 entità generate casualmente nella raccolta.

    -

    Passo 1: Creare una collezione

    +

    La seguente fase di preparazione si collega a Milvus e inserisce entità generate casualmente in una raccolta.

    +

    Passo 1: Creare una raccolta

    Utilizzare MilvusClient per connettersi al server Milvus e create_collection() per creare una raccolta.

    @@ -266,8 +266,9 @@ iterator = collection.search_iterator( batch_size=10, param=search_params, output_fields=["color_tag"], - limit=3 + limit=300 ) +# search 300 entities totally with 10 entities per page results = [] diff --git a/localization/v2.4.x/site/it/userGuide/tools/cli_commands.json b/localization/v2.4.x/site/it/userGuide/tools/cli_commands.json index 03f509e24..54061a93d 100644 --- a/localization/v2.4.x/site/it/userGuide/tools/cli_commands.json +++ b/localization/v2.4.x/site/it/userGuide/tools/cli_commands.json @@ -1 +1 @@ -{"codeList":["clear\n","connect [-uri (text)] [-t (text)]\n","milvus_cli > connect -uri http://127.0.0.1:19530 \n","create database -db (text) \n","milvus_cli > create database -db testdb\n","use database -db (text) \n","milvus_cli > use database -db testdb\n","list databases \n","milvus_cli > list databases\n","delete database -db (text) \n","milvus_cli > delete database -db testdb\n","create user -u (text) -p (text)\n","milvus_cli > create user -u zilliz -p zilliz\n","create alias -c (text) -a (text) [-A] \n","milvus_cli > create alias -c car -a carAlias1\n","milvus_cli > create alias -c car2 -A -a carAlias1\n","create collection -c (text) -f (text) -p (text) [-a] [-d (text)]\n","## For array field: --schema-field support :::(:if Varchar)\n\nmilvus_cli > create collection -c car -f id:INT64:primary_field -f vector:FLOAT_VECTOR:128 -f color:INT64:color -f brand:ARRAY:64:VARCHAR:128 -p id -A -d 'car_collection'\n","create partition -c (text) -p (text) [-d (text)]\n","milvus_cli > create partition -c car -p new_partition -d test_add_partition\n","create index\n","milvus_cli > create index\n\nCollection name (car, car2): car2\n\nThe name of the field to create an index for (vector): vector\n\nIndex name: vectorIndex\n\n# Default is ''\nIndex type FLAT, IVF_FLAT, IVF_SQ8, IVF_PQ, RNSG, HNSW, ANNOY, AUTOINDEX, DISKANN, GPU_IVF_FLAT, GPU_IVF_PQ, SPARSE_INVERTED_INDEX, SPARSE_WAND, SCANN, STL_SORT, Trie, INVERTED, ) []: IVF_FLAT \n\n# Default is ''\nIndex metric type (L2, IP, HAMMING, TANIMOTO, COSINE, ) []: \n\nTimeout []:\n","delete user -u (text)\n","milvus_cli > delete user -u zilliz\n","delete alias -a (text) \n","delete collection -c (text) \n","milvus_cli > delete collection -c car\n","delete entities -c (text) -p (text) \n","milvus_cli > delete entities -c car\n\nThe expression to specify entities to be deleted, such as \"film_id in [ 0, 1 ]\": film_id in [ 0, 1 ]\n\nYou are trying to delete the entities of collection. This action cannot be undone!\n\nDo you want to continue? [y/N]: y\n","delete partition -c (text) -p (text)\n","milvus_cli > delete partition -c car -p new_partition\n","delete index -c (text) -in (text)\n","milvus_cli > delete index -c car -in indexName\n","show collection -c (text)\n","milvus_cli > show collection -c test_collection_insert\n","show partition -c (text) -p (text)\n","milvus_cli > show partition -c test_collection_insert -p _default\n","show index -c (text) -in (text)\n","milvus_cli > show index -c test_collection -in index_name\n","exit\n","help \n","import -c (text)[-p (text)] \n","milvus_cli > import -c car 'examples/import_csv/vectors.csv'\n\nReading csv file... [####################################] 100%\n\nColumn names are ['vector', 'color', 'brand']\n\nProcessed 50001 lines.\n\nInserting ...\n\nInsert successfully.\n-------------------------- ------------------\nTotal insert entities: 50000\nTotal collection entities: 150000\nMilvus timestamp: 428849214449254403\n-------------------------- ------------------\n","milvus_cli > import -c car 'https://raw.githubusercontent.com/milvus-\nio/milvus_cli/main/examples/import_csv/vectors.csv'\n\nReading file from remote URL.\n\nReading csv file... [####################################] 100%\n\nColumn names are ['vector', 'color', 'brand']\n\nProcessed 50001 lines.\n\nInserting ...\n\nInsert successfully.\n\n-------------------------- ------------------\nTotal insert entities: 50000\nTotal collection entities: 150000\nMilvus timestamp: 428849214449254403\n-------------------------- ------------------\n","list users\n","list collections\n","list indexes -c (text)\n","list partitions -c (text)\n","load -c (text) [-p (text)]\n","query\n","milvus_cli > query\n\nCollection name: car\n\nThe query expression: id in [ 428960801420883491, 428960801420883492,\n428960801420883493 ]\n\nName of partitions that contain entities(split by \",\" if multiple) []:\ndefault\n\nA list of fields to return(split by \",\" if multiple) []: color, brand\n\ntimeout []:\n\nGuarantee timestamp. This instructs Milvus to see all operations performed before a provided timestamp. If no such timestamp is provided, then Milvus will search all operations performed to date. [0]:\nGraceful time. Only used in bounded consistency level. If graceful_time is set, PyMilvus will use current timestamp minus the graceful_time as the guarantee_timestamp. This option is 5s by default if not set. [5]:\n","milvus_cli > query\n\nCollection name: car\n\nThe query expression: id > 428960801420883491\n\nName of partitions that contain entities(split by \",\" if multiple) []:\ndefault\n\nA list of fields to return(split by \",\" if multiple) []: id, color,\nbrand\n\ntimeout []:\n\nGuarantee timestamp. This instructs Milvus to see all operations performed before a provided timestamp. If no such timestamp is provided, then Milvus will search all operations performed to date. [0]:\nGraceful time. Only used in bounded consistency level. If graceful_time is set, PyMilvus will use current timestamp minus the graceful_time as the guarantee_timestamp. This option is 5s by default if not set. [5]:\n","release -c (text) [-p (text)]\n","search\n","milvus_cli > search\n\nCollection name (car, test_collection): car\n\nThe vectors of search data(the length of data is number of query (nq), the dim of every vector in data must be equal to vector field’s of collection. You can also import a csv file\nout headers): examples/import_csv/search_vectors.csv\n\nThe vector field used to search of collection (vector): vector\n\nSearch parameter nprobe's value: 10\n\nThe max number of returned record, also known as topk: 2\n\nThe boolean expression used to filter attribute []: id > 0\n\nThe names of partitions to search (split by \",\" if multiple) ['_default'] []: _default\n\ntimeout []:\n\nGuarantee Timestamp(It instructs Milvus to see all operations performed before a provided timestamp. If no such timestamp is provided, then Milvus will search all operations performed to date) [0]:\n\n","milvus_cli > search\n\nCollection name (car, test_collection): car\n\nThe vectors of search data(the length of data is number of query (nq), the dim of every vector in data must be equal to vector field’s of collection. You can also import a csv file without headers):\n [[0.71, 0.76, 0.17, 0.13, 0.42, 0.07, 0.15, 0.67, 0.58, 0.02, 0.39, 0.47, 0.58, 0.88, 0.73, 0.31, 0.23, 0.57, 0.33, 0.2, 0.03, 0.43, 0.78, 0.49, 0.17, 0.56, 0.76, 0.54, 0.45, 0.46, 0.05, 0.1, 0.43, 0.63, 0.29, 0.44, 0.65, 0.01, 0.35, 0.46, 0.66, 0.7, 0.88, 0.07, 0.49, 0.92, 0.57, 0.5, 0.16, 0.77, 0.98, 0.1, 0.44, 0.88, 0.82, 0.16, 0.67, 0.63, 0.57, 0.55, 0.95, 0.13, 0.64, 0.43, 0.71, 0.81, 0.43, 0.65, 0.76, 0.7, 0.05, 0.24, 0.03, 0.9, 0.46, 0.28, 0.92, 0.25, 0.97, 0.79, 0.73, 0.97, 0.49, 0.28, 0.64, 0.19, 0.23, 0.51, 0.09, 0.1, 0.53, 0.03, 0.23, 0.94, 0.87, 0.14, 0.42, 0.82, 0.91, 0.11, 0.91, 0.37, 0.26, 0.6, 0.89, 0.6, 0.32, 0.11, 0.98, 0.67, 0.12, 0.66, 0.47, 0.02, 0.15, 0.6, 0.64, 0.57, 0.14, 0.81, 0.75, 0.11, 0.49, 0.78, 0.16, 0.63, 0.57, 0.18]]\n\nThe vector field used to search of collection (vector): vector\n\nSearch parameter nprobe's value: 10\n\nThe specified number of decimal places of returned distance [-1]: 5\n\nThe max number of returned record, also known as topk: 2\n\nThe boolean expression used to filter attribute []: id > 0\n\nThe names of partitions to search (split by \",\" if multiple) ['_default'] []: _default\n\ntimeout []:\n\nGuarantee Timestamp(It instructs Milvus to see all operations performed before a provided timestamp. If no such timestamp is provided, then Milvus will search all operations performed to date) [0]:\n\n","milvus_cli > search\n\nCollection name (car, car2): car\n\nThe vectors of search data(the length of data is number of query (nq), the dim of every vector in data must be equal to vector field’s of collection. You can also import a csv file without headers): examples/import_csv/search_vectors.csv\n\nThe vector field used to search of collection (vector): vector\n\nThe specified number of decimal places of returned distance [-1]: 5\n\nThe max number of returned record, also known as topk: 2\n\nThe boolean expression used to filter attribute []:\n\nThe names of partitions to search (split by \",\" if multiple) ['_default'] []:\n\ntimeout []:\n\nGuarantee Timestamp(It instructs Milvus to see all operations performed before a provided timestamp. If no such timestamp is provided, then Milvus will search all operations performed to date) [0]:\n\n","list connections \n","show index_progress -c (text) [-i (text)]\n","show loading_progress -c (text) [-p (text)]\n","version\n","$ milvus_cli --version\nMilvus_CLI v0.4.0\n"],"headingContent":"","anchorList":[{"label":"Riferimento comandi Milvus_CLI","href":"MilvusCLI-Command-Reference","type":1,"isActive":false},{"label":"Cancella","href":"clear","type":2,"isActive":false},{"label":"connetti","href":"connect","type":2,"isActive":false},{"label":"crea Database","href":"create-Database","type":2,"isActive":false},{"label":"usa database","href":"use-Database","type":2,"isActive":false},{"label":"Elenco dei database","href":"List-Databases","type":2,"isActive":false},{"label":"Cancellare un database","href":"Delete-Database","type":2,"isActive":false},{"label":"crea utente","href":"create-user","type":2,"isActive":false},{"label":"crea alias","href":"create-alias","type":2,"isActive":false},{"label":"crea collezione","href":"create-collection","type":2,"isActive":false},{"label":"crea partizione","href":"create-partition","type":2,"isActive":false},{"label":"crea indice","href":"create-index","type":2,"isActive":false},{"label":"elimina utente","href":"delete-user","type":2,"isActive":false},{"label":"elimina alias","href":"delete-alias","type":2,"isActive":false},{"label":"elimina raccolta","href":"delete-collection","type":2,"isActive":false},{"label":"elimina entità","href":"delete-entities","type":2,"isActive":false},{"label":"elimina partizione","href":"delete-partition","type":2,"isActive":false},{"label":"elimina indice","href":"delete-index","type":2,"isActive":false},{"label":"mostra collezione","href":"show-collection","type":2,"isActive":false},{"label":"mostra partizione","href":"show-partition","type":2,"isActive":false},{"label":"mostra indice","href":"show-index","type":2,"isActive":false},{"label":"Esci","href":"exit","type":2,"isActive":false},{"label":"Aiuto","href":"help","type":2,"isActive":false},{"label":"Importazione","href":"import","type":2,"isActive":false},{"label":"elenca utenti","href":"list-users","type":2,"isActive":false},{"label":"elenca collezioni","href":"list-collections","type":2,"isActive":false},{"label":"elenca indici","href":"list-indexes","type":2,"isActive":false},{"label":"elenca partizioni","href":"list-partitions","type":2,"isActive":false},{"label":"Carica","href":"load","type":2,"isActive":false},{"label":"query","href":"query","type":2,"isActive":false},{"label":"rilasciare","href":"release","type":2,"isActive":false},{"label":"ricerca","href":"search","type":2,"isActive":false},{"label":"Elenco delle connessioni","href":"List-connection","type":2,"isActive":false},{"label":"show index_progress","href":"show-indexprogress","type":2,"isActive":false},{"label":"mostra avanzamento_carico","href":"show-loadingprogress","type":2,"isActive":false},{"label":"versione","href":"version","type":2,"isActive":false}]} \ No newline at end of file +{"codeList":["clear\n","connect [-uri (text)] [-t (text)]\n","milvus_cli > connect -uri http://127.0.0.1:19530 \n","create database -db (text) \n","milvus_cli > create database -db testdb\n","use database -db (text) \n","milvus_cli > use database -db testdb\n","list databases \n","milvus_cli > list databases\n","delete database -db (text) \n","milvus_cli > delete database -db testdb\n","create user -u (text) -p (text)\n","milvus_cli > create user -u zilliz -p zilliz\n","create alias -c (text) -a (text) [-A] \n","milvus_cli > create alias -c car -a carAlias1\n","milvus_cli > create alias -c car2 -A -a carAlias1\n","create collection -c (text) -f (text) -p (text) [-a] [-d (text)]\n","## For array field: --schema-field support :::(:if Varchar)\n\nmilvus_cli > create collection -c car -f id:INT64:primary_field -f vector:FLOAT_VECTOR:128 -f color:INT64:color -f brand:ARRAY:64:VARCHAR:128 -p id -A -d 'car_collection'\n","create partition -c (text) -p (text) [-d (text)]\n","milvus_cli > create partition -c car -p new_partition -d test_add_partition\n","create index\n","milvus_cli > create index\n\nCollection name (car, car2): car2\n\nThe name of the field to create an index for (vector): vector\n\nIndex name: vectorIndex\n\n# Default is ''\nIndex type FLAT, IVF_FLAT, IVF_SQ8, IVF_PQ, RNSG, HNSW, ANNOY, AUTOINDEX, DISKANN, GPU_IVF_FLAT, GPU_IVF_PQ, SPARSE_INVERTED_INDEX, SPARSE_WAND, SCANN, STL_SORT, Trie, INVERTED, ) []: IVF_FLAT \n\n# Default is ''\nIndex metric type (L2, IP, HAMMING, TANIMOTO, COSINE, ) []: \n\nTimeout []:\n","delete user -u (text)\n","milvus_cli > delete user -u zilliz\n","delete alias -a (text) \n","delete collection -c (text) \n","milvus_cli > delete collection -c car\n","delete entities -c (text) -p (text) \n","milvus_cli > delete entities -c car\n\nThe expression to specify entities to be deleted, such as \"film_id in [ 0, 1 ]\": film_id in [ 0, 1 ]\n\nYou are trying to delete the entities of collection. This action cannot be undone!\n\nDo you want to continue? [y/N]: y\n","delete partition -c (text) -p (text)\n","milvus_cli > delete partition -c car -p new_partition\n","delete index -c (text) -in (text)\n","milvus_cli > delete index -c car -in indexName\n","show collection -c (text)\n","milvus_cli > show collection -c test_collection_insert\n","show partition -c (text) -p (text)\n","milvus_cli > show partition -c test_collection_insert -p _default\n","show index -c (text) -in (text)\n","milvus_cli > show index -c test_collection -in index_name\n","exit\n","help \n","import -c (text)[-p (text)] \n","milvus_cli > import -c car 'examples/import_csv/vectors.csv'\n\nReading csv file... [####################################] 100%\n\nColumn names are ['vector', 'color', 'brand']\n\nProcessed 50001 lines.\n\nInserting ...\n\nInsert successfully.\n-------------------------- ------------------\nTotal insert entities: 50000\nTotal collection entities: 150000\nMilvus timestamp: 428849214449254403\n-------------------------- ------------------\n","milvus_cli > import -c car 'https://raw.githubusercontent.com/milvus-\nio/milvus_cli/main/examples/import_csv/vectors.csv'\n\nReading file from remote URL.\n\nReading csv file... [####################################] 100%\n\nColumn names are ['vector', 'color', 'brand']\n\nProcessed 50001 lines.\n\nInserting ...\n\nInsert successfully.\n\n-------------------------- ------------------\nTotal insert entities: 50000\nTotal collection entities: 150000\nMilvus timestamp: 428849214449254403\n-------------------------- ------------------\n","list users\n","list collections\n","list indexes -c (text)\n","list partitions -c (text)\n","load -c (text) [-p (text)]\n","query\n","milvus_cli > query\n\nCollection name: car\n\nThe query expression: id in [ 428960801420883491, 428960801420883492,\n428960801420883493 ]\n\nName of partitions that contain entities(split by \",\" if multiple) []:\ndefault\n\nA list of fields to return(split by \",\" if multiple) []: color, brand\n\ntimeout []:\n\nGuarantee timestamp. This instructs Milvus to see all operations performed before a provided timestamp. If no such timestamp is provided, then Milvus will search all operations performed to date. [0]:\nGraceful time. Only used in bounded consistency level. If graceful_time is set, PyMilvus will use current timestamp minus the graceful_time as the guarantee_timestamp. This option is 5s by default if not set. [5]:\n","milvus_cli > query\n\nCollection name: car\n\nThe query expression: id > 428960801420883491\n\nName of partitions that contain entities(split by \",\" if multiple) []:\ndefault\n\nA list of fields to return(split by \",\" if multiple) []: id, color,\nbrand\n\ntimeout []:\n\nGuarantee timestamp. This instructs Milvus to see all operations performed before a provided timestamp. If no such timestamp is provided, then Milvus will search all operations performed to date. [0]:\nGraceful time. Only used in bounded consistency level. If graceful_time is set, PyMilvus will use current timestamp minus the graceful_time as the guarantee_timestamp. This option is 5s by default if not set. [5]:\n","release -c (text) [-p (text)]\n","search\n","milvus_cli > search\n\nCollection name (car, test_collection): car\n\nThe vectors of search data(the length of data is number of query (nq), the dim of every vector in data must be equal to vector field’s of collection. You can also import a csv file\nout headers): examples/import_csv/search_vectors.csv\n\nThe vector field used to search of collection (vector): vector\n\nSearch parameter nprobe's value: 10\n\nThe max number of returned record, also known as topk: 2\n\nThe boolean expression used to filter attribute []: id > 0\n\nThe names of partitions to search (split by \",\" if multiple) ['_default'] []: _default\n\ntimeout []:\n\nGuarantee Timestamp(It instructs Milvus to see all operations performed before a provided timestamp. If no such timestamp is provided, then Milvus will search all operations performed to date) [0]:\n\n","milvus_cli > search\n\nCollection name (car, test_collection): car\n\nThe vectors of search data(the length of data is number of query (nq), the dim of every vector in data must be equal to vector field’s of collection. You can also import a csv file without headers):\n [[0.71, 0.76, 0.17, 0.13, 0.42, 0.07, 0.15, 0.67, 0.58, 0.02, 0.39, 0.47, 0.58, 0.88, 0.73, 0.31, 0.23, 0.57, 0.33, 0.2, 0.03, 0.43, 0.78, 0.49, 0.17, 0.56, 0.76, 0.54, 0.45, 0.46, 0.05, 0.1, 0.43, 0.63, 0.29, 0.44, 0.65, 0.01, 0.35, 0.46, 0.66, 0.7, 0.88, 0.07, 0.49, 0.92, 0.57, 0.5, 0.16, 0.77, 0.98, 0.1, 0.44, 0.88, 0.82, 0.16, 0.67, 0.63, 0.57, 0.55, 0.95, 0.13, 0.64, 0.43, 0.71, 0.81, 0.43, 0.65, 0.76, 0.7, 0.05, 0.24, 0.03, 0.9, 0.46, 0.28, 0.92, 0.25, 0.97, 0.79, 0.73, 0.97, 0.49, 0.28, 0.64, 0.19, 0.23, 0.51, 0.09, 0.1, 0.53, 0.03, 0.23, 0.94, 0.87, 0.14, 0.42, 0.82, 0.91, 0.11, 0.91, 0.37, 0.26, 0.6, 0.89, 0.6, 0.32, 0.11, 0.98, 0.67, 0.12, 0.66, 0.47, 0.02, 0.15, 0.6, 0.64, 0.57, 0.14, 0.81, 0.75, 0.11, 0.49, 0.78, 0.16, 0.63, 0.57, 0.18]]\n\nThe vector field used to search of collection (vector): vector\n\nSearch parameter nprobe's value: 10\n\nThe specified number of decimal places of returned distance [-1]: 5\n\nThe max number of returned record, also known as topk: 2\n\nThe boolean expression used to filter attribute []: id > 0\n\nThe names of partitions to search (split by \",\" if multiple) ['_default'] []: _default\n\ntimeout []:\n\nGuarantee Timestamp(It instructs Milvus to see all operations performed before a provided timestamp. If no such timestamp is provided, then Milvus will search all operations performed to date) [0]:\n\n","milvus_cli > search\n\nCollection name (car, car2): car\n\nThe vectors of search data(the length of data is number of query (nq), the dim of every vector in data must be equal to vector field’s of collection. You can also import a csv file without headers): examples/import_csv/search_vectors.csv\n\nThe vector field used to search of collection (vector): vector\n\nThe specified number of decimal places of returned distance [-1]: 5\n\nThe max number of returned record, also known as topk: 2\n\nThe boolean expression used to filter attribute []:\n\nThe names of partitions to search (split by \",\" if multiple) ['_default'] []:\n\ntimeout []:\n\nGuarantee Timestamp(It instructs Milvus to see all operations performed before a provided timestamp. If no such timestamp is provided, then Milvus will search all operations performed to date) [0]:\n\n","list connections \n","show index_progress -c (text) [-i (text)]\n","show loading_progress -c (text) [-p (text)]\n","version\n","$ milvus_cli --version\nMilvus_CLI v0.4.0\n"],"headingContent":"Milvus_CLI Command Reference","anchorList":[{"label":"Riferimento comandi Milvus_CLI","href":"MilvusCLI-Command-Reference","type":1,"isActive":false},{"label":"Cancella","href":"clear","type":2,"isActive":false},{"label":"connetti","href":"connect","type":2,"isActive":false},{"label":"crea Database","href":"create-Database","type":2,"isActive":false},{"label":"usa database","href":"use-Database","type":2,"isActive":false},{"label":"elenca database","href":"list-Databases","type":2,"isActive":false},{"label":"Elimina database","href":"delete-Database","type":2,"isActive":false},{"label":"crea utente","href":"create-user","type":2,"isActive":false},{"label":"crea alias","href":"create-alias","type":2,"isActive":false},{"label":"crea collezione","href":"create-collection","type":2,"isActive":false},{"label":"crea partizione","href":"create-partition","type":2,"isActive":false},{"label":"crea indice","href":"create-index","type":2,"isActive":false},{"label":"elimina utente","href":"delete-user","type":2,"isActive":false},{"label":"elimina alias","href":"delete-alias","type":2,"isActive":false},{"label":"elimina raccolta","href":"delete-collection","type":2,"isActive":false},{"label":"elimina entità","href":"delete-entities","type":2,"isActive":false},{"label":"elimina partizione","href":"delete-partition","type":2,"isActive":false},{"label":"elimina indice","href":"delete-index","type":2,"isActive":false},{"label":"mostra collezione","href":"show-collection","type":2,"isActive":false},{"label":"mostra partizione","href":"show-partition","type":2,"isActive":false},{"label":"mostra indice","href":"show-index","type":2,"isActive":false},{"label":"Esci","href":"exit","type":2,"isActive":false},{"label":"Aiuto","href":"help","type":2,"isActive":false},{"label":"Importazione","href":"import","type":2,"isActive":false},{"label":"elenca utenti","href":"list-users","type":2,"isActive":false},{"label":"elenca collezioni","href":"list-collections","type":2,"isActive":false},{"label":"elenca indici","href":"list-indexes","type":2,"isActive":false},{"label":"elenca partizioni","href":"list-partitions","type":2,"isActive":false},{"label":"Carica","href":"load","type":2,"isActive":false},{"label":"query","href":"query","type":2,"isActive":false},{"label":"rilasciare","href":"release","type":2,"isActive":false},{"label":"ricerca","href":"search","type":2,"isActive":false},{"label":"elenco connessioni","href":"list-connection","type":2,"isActive":false},{"label":"show index_progress","href":"show-indexprogress","type":2,"isActive":false},{"label":"mostra avanzamento_caricamento","href":"show-loadingprogress","type":2,"isActive":false},{"label":"versione","href":"version","type":2,"isActive":false}]} \ No newline at end of file diff --git a/localization/v2.4.x/site/it/userGuide/tools/cli_commands.md b/localization/v2.4.x/site/it/userGuide/tools/cli_commands.md index f962dd7ca..5c22df076 100644 --- a/localization/v2.4.x/site/it/userGuide/tools/cli_commands.md +++ b/localization/v2.4.x/site/it/userGuide/tools/cli_commands.md @@ -1,7 +1,7 @@ --- id: cli_commands.md summary: Interagire con Milvus utilizzando i comandi. -title: Riferimento dei comandi Milvus_CLI +title: Riferimento comandi Milvus_CLI ---

    Riferimento comandi Milvus_CLI

    Esempio 1

    L'esempio seguente utilizza il database testdb in milvus.

    milvus_cli > use database -db testdb
     
    -

    Elenco dei database

    Esempio 1

    L'esempio seguente elenca i database di Milvus.

    milvus_cli > list databases
     
    -

    Cancellare un database

    -

    Per eseguire una ricerca su una raccolta non indicizzata e richiedere l'input richiesto:

    +

    Per eseguire una ricerca su una raccolta non indicizzata e ricevere l'input richiesto:

    milvus_cli > search
     
     Collection name (car, car2): car
    @@ -1165,7 +1165,7 @@ timeout []:
     Guarantee Timestamp(It instructs Milvus to see all operations performed before a provided timestamp. If no such timestamp is provided, then Milvus will search all operations performed to date) [0]:
     
     
    -

    Elenco delle connessioni

    Mostra l'avanzamento del caricamento delle entità.

    +

    Visualizza l'avanzamento del caricamento di una raccolta.

    Sintassi

    show loading_progress -c (text) [-p (text)]
     
    diff --git a/localization/v2.4.x/site/it/userGuide/tools/milvus_backup_overview.json b/localization/v2.4.x/site/it/userGuide/tools/milvus_backup_overview.json index f1079410f..fa3da1a5c 100644 --- a/localization/v2.4.x/site/it/userGuide/tools/milvus_backup_overview.json +++ b/localization/v2.4.x/site/it/userGuide/tools/milvus_backup_overview.json @@ -1 +1 @@ -{"codeList":[],"headingContent":"","anchorList":[{"label":"Milvus Backup","href":"Milvus-Backup","type":1,"isActive":false},{"label":"Prerequisiti","href":"Prerequisites","type":2,"isActive":false},{"label":"Architettura","href":"Architecture","type":2,"isActive":false},{"label":"Ultima versione","href":"Latest-release","type":2,"isActive":false}]} \ No newline at end of file +{"codeList":[],"headingContent":"Milvus Backup","anchorList":[{"label":"Milvus Backup","href":"Milvus-Backup","type":1,"isActive":false},{"label":"Prerequisiti","href":"Prerequisites","type":2,"isActive":false},{"label":"Architettura","href":"Architecture","type":2,"isActive":false},{"label":"Ultima versione","href":"Latest-release","type":2,"isActive":false}]} \ No newline at end of file diff --git a/localization/v2.4.x/site/it/userGuide/tools/milvus_backup_overview.md b/localization/v2.4.x/site/it/userGuide/tools/milvus_backup_overview.md index a29198f7f..f6c39e887 100644 --- a/localization/v2.4.x/site/it/userGuide/tools/milvus_backup_overview.md +++ b/localization/v2.4.x/site/it/userGuide/tools/milvus_backup_overview.md @@ -3,7 +3,7 @@ id: milvus_backup_overview.md summary: >- Milvus-Backup è uno strumento che consente agli utenti di eseguire il backup e il ripristino dei dati di Milvus. -title: Backup di Milvus +title: Milvus Backup ---

    Milvus Backup

    Prima di iniziare a usare Milvus Backup, assicurarsi che

    +

    Prima di iniziare a utilizzare Milvus Backup, assicurarsi che

    • Il sistema operativo sia CentOS 7.5+ o Ubuntu LTS 18.04+,
    • la versione di Go sia 1.20.2 o successiva.
    • @@ -79,5 +79,5 @@ title: Backup di Milvus > diff --git a/localization/v2.4.x/site/it/userGuide/use-partition-key.json b/localization/v2.4.x/site/it/userGuide/use-partition-key.json index e7630a3f7..ccf56d15d 100644 --- a/localization/v2.4.x/site/it/userGuide/use-partition-key.json +++ b/localization/v2.4.x/site/it/userGuide/use-partition-key.json @@ -1 +1 @@ -{"codeList":["import random, time\nfrom pymilvus import connections, MilvusClient, DataType\n\nSERVER_ADDR = \"http://localhost:19530\"\n\n# 1. Set up a Milvus client\nclient = MilvusClient(\n uri=SERVER_ADDR\n)\n\n# 2. Create a collection\nschema = MilvusClient.create_schema(\n auto_id=False,\n enable_dynamic_field=True,\n partition_key_field=\"color\",\n num_partitions=16 # Number of partitions. Defaults to 16.\n)\n\nschema.add_field(field_name=\"id\", datatype=DataType.INT64, is_primary=True)\nschema.add_field(field_name=\"vector\", datatype=DataType.FLOAT_VECTOR, dim=5)\nschema.add_field(field_name=\"color\", datatype=DataType.VARCHAR, max_length=512)\n","import io.milvus.v2.client.ConnectConfig;\nimport io.milvus.v2.client.MilvusClientV2;\nimport io.milvus.v2.common.DataType;\nimport io.milvus.v2.common.IndexParam;\nimport io.milvus.v2.service.collection.request.AddFieldReq;\nimport io.milvus.v2.service.collection.request.CreateCollectionReq;\n\nString CLUSTER_ENDPOINT = \"http://localhost:19530\";\n\n// 1. Connect to Milvus server\nConnectConfig connectConfig = ConnectConfig.builder()\n .uri(CLUSTER_ENDPOINT)\n .build();\n\nMilvusClientV2 client = new MilvusClientV2(connectConfig);\n\n// 2. Create a collection in customized setup mode\n\n// 2.1 Create schema\nCreateCollectionReq.CollectionSchema schema = client.createSchema();\n\n// 2.2 Add fields to schema\nschema.addField(AddFieldReq.builder()\n .fieldName(\"id\")\n .dataType(DataType.Int64)\n .isPrimaryKey(true)\n .autoID(false)\n .build());\n\nschema.addField(AddFieldReq.builder()\n .fieldName(\"vector\")\n .dataType(DataType.FloatVector)\n .dimension(5)\n .build());\n \nschema.addField(AddFieldReq.builder()\n .fieldName(\"color\")\n .dataType(DataType.VarChar)\n .maxLength(512)\n .isPartitionKey(true)\n .build());\n","const { MilvusClient, DataType, sleep } = require(\"@zilliz/milvus2-sdk-node\")\n\nconst address = \"http://localhost:19530\"\n\nasync function main() {\n// 1. Set up a Milvus Client\nclient = new MilvusClient({address}); \n\n// 2. Create a collection\n// 2.1 Define fields\nconst fields = [\n {\n name: \"id\",\n data_type: DataType.Int64,\n is_primary_key: true,\n auto_id: false\n },\n {\n name: \"vector\",\n data_type: DataType.FloatVector,\n dim: 5\n },\n {\n name: \"color\",\n data_type: DataType.VarChar,\n max_length: 512,\n is_partition_key: true\n }\n]\n","index_params = MilvusClient.prepare_index_params()\n\nindex_params.add_index(\n field_name=\"id\",\n index_type=\"STL_SORT\"\n)\n\nindex_params.add_index(\n field_name=\"color\",\n index_type=\"Trie\"\n)\n\nindex_params.add_index(\n field_name=\"vector\",\n index_type=\"IVF_FLAT\",\n metric_type=\"L2\",\n params={\"nlist\": 1024}\n)\n","// 2.3 Prepare index parameters\nIndexParam indexParamForVectorField = IndexParam.builder()\n .fieldName(\"vector\")\n .indexType(IndexParam.IndexType.IVF_FLAT)\n .metricType(IndexParam.MetricType.IP)\n .extraParams(Map.of(\"nlist\", 1024))\n .build();\n\nList indexParams = new ArrayList<>();\nindexParams.add(indexParamForVectorField);\n","// 2.2 Prepare index parameters\nconst index_params = [{\n field_name: \"color\",\n index_type: \"Trie\"\n},{\n field_name: \"id\",\n index_type: \"STL_SORT\"\n},{\n field_name: \"vector\",\n index_type: \"IVF_FLAT\",\n metric_type: \"IP\",\n params: { nlist: 1024}\n}]\n","client.create_collection(\n collection_name=\"test_collection\",\n schema=schema,\n index_params=index_params\n)\n","// 2.4 Create a collection with schema and index parameters\nCreateCollectionReq customizedSetupReq = CreateCollectionReq.builder()\n .collectionName(\"test_collection\")\n .collectionSchema(schema)\n .indexParams(indexParams) \n .build();\n\nclient.createCollection(customizedSetupReq);\n","// 2.3 Create a collection with fields and index parameters\nres = await client.createCollection({\n collection_name: \"test_collection\",\n fields: fields, \n index_params: index_params,\n})\n\nconsole.log(res.error_code)\n\n// Output\n// \n// Success\n//\n","# 3. Insert randomly generated vectors \ncolors = [\"green\", \"blue\", \"yellow\", \"red\", \"black\", \"white\", \"purple\", \"pink\", \"orange\", \"brown\", \"grey\"]\ndata = []\n\nfor i in range(1000):\n current_color = random.choice(colors)\n current_tag = random.randint(1000, 9999)\n data.append({\n \"id\": i,\n \"vector\": [ random.uniform(-1, 1) for _ in range(5) ],\n \"color\": current_color,\n \"tag\": current_tag,\n \"color_tag\": f\"{current_color}_{str(current_tag)}\"\n })\n\nprint(data[0])\n","// 3. Insert randomly generated vectors\nList colors = Arrays.asList(\"green\", \"blue\", \"yellow\", \"red\", \"black\", \"white\", \"purple\", \"pink\", \"orange\", \"brown\", \"grey\");\nList data = new ArrayList<>();\n\nfor (int i=0; i<1000; i++) {\n Random rand = new Random();\n String current_color = colors.get(rand.nextInt(colors.size()-1));\n int current_tag = rand.nextInt(8999) + 1000;\n JSONObject row = new JSONObject();\n row.put(\"id\", Long.valueOf(i));\n row.put(\"vector\", Arrays.asList(rand.nextFloat(), rand.nextFloat(), rand.nextFloat(), rand.nextFloat(), rand.nextFloat()));\n row.put(\"color\", current_color);\n row.put(\"tag\", current_tag);\n row.put(\"color_tag\", current_color + \"_\" + String.valueOf(rand.nextInt(8999) + 1000));\n data.add(row);\n}\n\nSystem.out.println(JSONObject.toJSON(data.get(0))); \n","// 3. Insert randomly generated vectors \nconst colors = [\"green\", \"blue\", \"yellow\", \"red\", \"black\", \"white\", \"purple\", \"pink\", \"orange\", \"brown\", \"grey\"]\nvar data = []\n\nfor (let i = 0; i < 1000; i++) {\n const current_color = colors[Math.floor(Math.random() * colors.length)]\n const current_tag = Math.floor(Math.random() * 8999 + 1000)\n data.push({\n id: i,\n vector: [Math.random(), Math.random(), Math.random(), Math.random(), Math.random()],\n color: current_color,\n tag: current_tag,\n color_tag: `${current_color}_${current_tag}`\n })\n}\n\nconsole.log(data[0])\n","{\n id: 0,\n vector: [\n 0.1275656405044483,\n 0.47417858592773277,\n 0.13858264437643286,\n 0.2390904907020377,\n 0.8447862593689635\n ],\n color: 'blue',\n tag: 2064,\n color_tag: 'blue_2064'\n}\n","res = client.insert(\n collection_name=\"test_collection\",\n data=data\n)\n\nprint(res)\n\n# Output\n#\n# {\n# \"insert_count\": 1000,\n# \"ids\": [\n# 0,\n# 1,\n# 2,\n# 3,\n# 4,\n# 5,\n# 6,\n# 7,\n# 8,\n# 9,\n# \"(990 more items hidden)\"\n# ]\n# }\n","// 3.1 Insert data into the collection\nInsertReq insertReq = InsertReq.builder()\n .collectionName(\"test_collection\")\n .data(data)\n .build();\n\nInsertResp insertResp = client.insert(insertReq);\n\nSystem.out.println(JSONObject.toJSON(insertResp));\n\n// Output:\n// {\"insertCnt\": 1000}\n","res = await client.insert({\n collection_name: \"test_collection\",\n data: data,\n})\n\nconsole.log(res.insert_cnt)\n\n// Output\n// \n// 1000\n// \n","# 4. Search with partition key\nquery_vectors = [[0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592]]\n\nres = client.search(\n collection_name=\"test_collection\",\n data=query_vectors,\n filter=\"color == 'green'\",\n search_params={\"metric_type\": \"L2\", \"params\": {\"nprobe\": 10}},\n output_fields=[\"id\", \"color_tag\"],\n limit=3\n)\n\nprint(res)\n\n# Output\n#\n# [\n# [\n# {\n# \"id\": 970,\n# \"distance\": 0.5770174264907837,\n# \"entity\": {\n# \"id\": 970,\n# \"color_tag\": \"green_9828\"\n# }\n# },\n# {\n# \"id\": 115,\n# \"distance\": 0.6898155808448792,\n# \"entity\": {\n# \"id\": 115,\n# \"color_tag\": \"green_4073\"\n# }\n# },\n# {\n# \"id\": 899,\n# \"distance\": 0.7028976678848267,\n# \"entity\": {\n# \"id\": 899,\n# \"color_tag\": \"green_9897\"\n# }\n# }\n# ]\n# ]\n","// 4. Search with partition key\nList> query_vectors = Arrays.asList(Arrays.asList(0.3580376395471989f, -0.6023495712049978f, 0.18414012509913835f, -0.26286205330961354f, 0.9029438446296592f));\n\nSearchReq searchReq = SearchReq.builder()\n .collectionName(\"test_collection\")\n .data(query_vectors)\n .filter(\"color == \\\"green\\\"\")\n .topK(3)\n .build();\n\nSearchResp searchResp = client.search(searchReq);\n\nSystem.out.println(JSONObject.toJSON(searchResp)); \n\n// Output:\n// {\"searchResults\": [[\n// {\n// \"distance\": 1.0586997,\n// \"id\": 414,\n// \"entity\": {}\n// },\n// {\n// \"distance\": 0.981384,\n// \"id\": 293,\n// \"entity\": {}\n// },\n// {\n// \"distance\": 0.9548756,\n// \"id\": 325,\n// \"entity\": {}\n// }\n// ]]}\n","// 4. Search with partition key\nconst query_vectors = [0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592]\n\nres = await client.search({\n collection_name: \"test_collection\",\n data: query_vectors,\n filter: \"color == 'green'\",\n output_fields: [\"color_tag\"],\n limit: 3\n})\n\nconsole.log(res.results)\n\n// Output\n// \n// [\n// { score: 2.402090549468994, id: '135', color_tag: 'green_2694' },\n// { score: 2.3938629627227783, id: '326', color_tag: 'green_7104' },\n// { score: 2.3235254287719727, id: '801', color_tag: 'green_3162' }\n// ]\n// \n"],"headingContent":"","anchorList":[{"label":"Use Partition Key","href":"Use-Partition-Key","type":1,"isActive":false},{"label":"Overview","href":"Overview","type":2,"isActive":false},{"label":"Enable partition key","href":"Enable-partition-key","type":2,"isActive":false},{"label":"List partitions","href":"List-partitions","type":2,"isActive":false},{"label":"Insert data","href":"Insert-data","type":2,"isActive":false},{"label":"Use partition key","href":"Use-partition-key","type":2,"isActive":false},{"label":"Typical use cases","href":"Typical-use-cases","type":2,"isActive":false}]} \ No newline at end of file +{"codeList":["import random, time\nfrom pymilvus import connections, MilvusClient, DataType\n\nSERVER_ADDR = \"http://localhost:19530\"\n\n# 1. Set up a Milvus client\nclient = MilvusClient(\n uri=SERVER_ADDR\n)\n\n# 2. Create a collection\nschema = MilvusClient.create_schema(\n auto_id=False,\n enable_dynamic_field=True,\n partition_key_field=\"color\",\n num_partitions=64 # Number of partitions. Defaults to 64.\n)\n\nschema.add_field(field_name=\"id\", datatype=DataType.INT64, is_primary=True)\nschema.add_field(field_name=\"vector\", datatype=DataType.FLOAT_VECTOR, dim=5)\nschema.add_field(field_name=\"color\", datatype=DataType.VARCHAR, max_length=512)\n","import io.milvus.v2.client.ConnectConfig;\nimport io.milvus.v2.client.MilvusClientV2;\nimport io.milvus.v2.common.DataType;\nimport io.milvus.v2.common.IndexParam;\nimport io.milvus.v2.service.collection.request.AddFieldReq;\nimport io.milvus.v2.service.collection.request.CreateCollectionReq;\n\nString CLUSTER_ENDPOINT = \"http://localhost:19530\";\n\n// 1. Connect to Milvus server\nConnectConfig connectConfig = ConnectConfig.builder()\n .uri(CLUSTER_ENDPOINT)\n .build();\n\nMilvusClientV2 client = new MilvusClientV2(connectConfig);\n\n// 2. Create a collection in customized setup mode\n\n// 2.1 Create schema\nCreateCollectionReq.CollectionSchema schema = client.createSchema();\n\n// 2.2 Add fields to schema\nschema.addField(AddFieldReq.builder()\n .fieldName(\"id\")\n .dataType(DataType.Int64)\n .isPrimaryKey(true)\n .autoID(false)\n .build());\n\nschema.addField(AddFieldReq.builder()\n .fieldName(\"vector\")\n .dataType(DataType.FloatVector)\n .dimension(5)\n .build());\n \nschema.addField(AddFieldReq.builder()\n .fieldName(\"color\")\n .dataType(DataType.VarChar)\n .maxLength(512)\n .isPartitionKey(true)\n .build());\n","const { MilvusClient, DataType, sleep } = require(\"@zilliz/milvus2-sdk-node\")\n\nconst address = \"http://localhost:19530\"\n\nasync function main() {\n// 1. Set up a Milvus Client\nclient = new MilvusClient({address}); \n\n// 2. Create a collection\n// 2.1 Define fields\nconst fields = [\n {\n name: \"id\",\n data_type: DataType.Int64,\n is_primary_key: true,\n auto_id: false\n },\n {\n name: \"vector\",\n data_type: DataType.FloatVector,\n dim: 5\n },\n {\n name: \"color\",\n data_type: DataType.VarChar,\n max_length: 512,\n is_partition_key: true\n }\n]\n","index_params = MilvusClient.prepare_index_params()\n\nindex_params.add_index(\n field_name=\"id\",\n index_type=\"STL_SORT\"\n)\n\nindex_params.add_index(\n field_name=\"color\",\n index_type=\"Trie\"\n)\n\nindex_params.add_index(\n field_name=\"vector\",\n index_type=\"IVF_FLAT\",\n metric_type=\"L2\",\n params={\"nlist\": 1024}\n)\n","// 2.3 Prepare index parameters\nIndexParam indexParamForVectorField = IndexParam.builder()\n .fieldName(\"vector\")\n .indexType(IndexParam.IndexType.IVF_FLAT)\n .metricType(IndexParam.MetricType.IP)\n .extraParams(Map.of(\"nlist\", 1024))\n .build();\n\nList indexParams = new ArrayList<>();\nindexParams.add(indexParamForVectorField);\n","// 2.2 Prepare index parameters\nconst index_params = [{\n field_name: \"color\",\n index_type: \"Trie\"\n},{\n field_name: \"id\",\n index_type: \"STL_SORT\"\n},{\n field_name: \"vector\",\n index_type: \"IVF_FLAT\",\n metric_type: \"IP\",\n params: { nlist: 1024}\n}]\n","client.create_collection(\n collection_name=\"test_collection\",\n schema=schema,\n index_params=index_params\n)\n","// 2.4 Create a collection with schema and index parameters\nCreateCollectionReq customizedSetupReq = CreateCollectionReq.builder()\n .collectionName(\"test_collection\")\n .collectionSchema(schema)\n .indexParams(indexParams) \n .build();\n\nclient.createCollection(customizedSetupReq);\n","// 2.3 Create a collection with fields and index parameters\nres = await client.createCollection({\n collection_name: \"test_collection\",\n fields: fields, \n index_params: index_params,\n})\n\nconsole.log(res.error_code)\n\n// Output\n// \n// Success\n//\n","# 3. Insert randomly generated vectors \ncolors = [\"green\", \"blue\", \"yellow\", \"red\", \"black\", \"white\", \"purple\", \"pink\", \"orange\", \"brown\", \"grey\"]\ndata = []\n\nfor i in range(1000):\n current_color = random.choice(colors)\n current_tag = random.randint(1000, 9999)\n data.append({\n \"id\": i,\n \"vector\": [ random.uniform(-1, 1) for _ in range(5) ],\n \"color\": current_color,\n \"tag\": current_tag,\n \"color_tag\": f\"{current_color}_{str(current_tag)}\"\n })\n\nprint(data[0])\n","// 3. Insert randomly generated vectors\nList colors = Arrays.asList(\"green\", \"blue\", \"yellow\", \"red\", \"black\", \"white\", \"purple\", \"pink\", \"orange\", \"brown\", \"grey\");\nList data = new ArrayList<>();\n\nfor (int i=0; i<1000; i++) {\n Random rand = new Random();\n String current_color = colors.get(rand.nextInt(colors.size()-1));\n int current_tag = rand.nextInt(8999) + 1000;\n JSONObject row = new JSONObject();\n row.put(\"id\", Long.valueOf(i));\n row.put(\"vector\", Arrays.asList(rand.nextFloat(), rand.nextFloat(), rand.nextFloat(), rand.nextFloat(), rand.nextFloat()));\n row.put(\"color\", current_color);\n row.put(\"tag\", current_tag);\n row.put(\"color_tag\", current_color + \"_\" + String.valueOf(rand.nextInt(8999) + 1000));\n data.add(row);\n}\n\nSystem.out.println(JSONObject.toJSON(data.get(0))); \n","// 3. Insert randomly generated vectors \nconst colors = [\"green\", \"blue\", \"yellow\", \"red\", \"black\", \"white\", \"purple\", \"pink\", \"orange\", \"brown\", \"grey\"]\nvar data = []\n\nfor (let i = 0; i < 1000; i++) {\n const current_color = colors[Math.floor(Math.random() * colors.length)]\n const current_tag = Math.floor(Math.random() * 8999 + 1000)\n data.push({\n id: i,\n vector: [Math.random(), Math.random(), Math.random(), Math.random(), Math.random()],\n color: current_color,\n tag: current_tag,\n color_tag: `${current_color}_${current_tag}`\n })\n}\n\nconsole.log(data[0])\n","{\n id: 0,\n vector: [\n 0.1275656405044483,\n 0.47417858592773277,\n 0.13858264437643286,\n 0.2390904907020377,\n 0.8447862593689635\n ],\n color: 'blue',\n tag: 2064,\n color_tag: 'blue_2064'\n}\n","res = client.insert(\n collection_name=\"test_collection\",\n data=data\n)\n\nprint(res)\n\n# Output\n#\n# {\n# \"insert_count\": 1000,\n# \"ids\": [\n# 0,\n# 1,\n# 2,\n# 3,\n# 4,\n# 5,\n# 6,\n# 7,\n# 8,\n# 9,\n# \"(990 more items hidden)\"\n# ]\n# }\n","// 3.1 Insert data into the collection\nInsertReq insertReq = InsertReq.builder()\n .collectionName(\"test_collection\")\n .data(data)\n .build();\n\nInsertResp insertResp = client.insert(insertReq);\n\nSystem.out.println(JSONObject.toJSON(insertResp));\n\n// Output:\n// {\"insertCnt\": 1000}\n","res = await client.insert({\n collection_name: \"test_collection\",\n data: data,\n})\n\nconsole.log(res.insert_cnt)\n\n// Output\n// \n// 1000\n// \n","# 4. Search with partition key\nquery_vectors = [[0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592]]\n\nres = client.search(\n collection_name=\"test_collection\",\n data=query_vectors,\n filter=\"color == 'green'\",\n search_params={\"metric_type\": \"L2\", \"params\": {\"nprobe\": 10}},\n output_fields=[\"id\", \"color_tag\"],\n limit=3\n)\n\nprint(res)\n\n# Output\n#\n# [\n# [\n# {\n# \"id\": 970,\n# \"distance\": 0.5770174264907837,\n# \"entity\": {\n# \"id\": 970,\n# \"color_tag\": \"green_9828\"\n# }\n# },\n# {\n# \"id\": 115,\n# \"distance\": 0.6898155808448792,\n# \"entity\": {\n# \"id\": 115,\n# \"color_tag\": \"green_4073\"\n# }\n# },\n# {\n# \"id\": 899,\n# \"distance\": 0.7028976678848267,\n# \"entity\": {\n# \"id\": 899,\n# \"color_tag\": \"green_9897\"\n# }\n# }\n# ]\n# ]\n","// 4. Search with partition key\nList> query_vectors = Arrays.asList(Arrays.asList(0.3580376395471989f, -0.6023495712049978f, 0.18414012509913835f, -0.26286205330961354f, 0.9029438446296592f));\n\nSearchReq searchReq = SearchReq.builder()\n .collectionName(\"test_collection\")\n .data(query_vectors)\n .filter(\"color == \\\"green\\\"\")\n .topK(3)\n .build();\n\nSearchResp searchResp = client.search(searchReq);\n\nSystem.out.println(JSONObject.toJSON(searchResp)); \n\n// Output:\n// {\"searchResults\": [[\n// {\n// \"distance\": 1.0586997,\n// \"id\": 414,\n// \"entity\": {}\n// },\n// {\n// \"distance\": 0.981384,\n// \"id\": 293,\n// \"entity\": {}\n// },\n// {\n// \"distance\": 0.9548756,\n// \"id\": 325,\n// \"entity\": {}\n// }\n// ]]}\n","// 4. Search with partition key\nconst query_vectors = [0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592]\n\nres = await client.search({\n collection_name: \"test_collection\",\n data: query_vectors,\n filter: \"color == 'green'\",\n output_fields: [\"color_tag\"],\n limit: 3\n})\n\nconsole.log(res.results)\n\n// Output\n// \n// [\n// { score: 2.402090549468994, id: '135', color_tag: 'green_2694' },\n// { score: 2.3938629627227783, id: '326', color_tag: 'green_7104' },\n// { score: 2.3235254287719727, id: '801', color_tag: 'green_3162' }\n// ]\n// \n"],"headingContent":"Use Partition Key","anchorList":[{"label":"Utilizzo della chiave di partizione","href":"Use-Partition-Key","type":1,"isActive":false},{"label":"Panoramica","href":"Overview","type":2,"isActive":false},{"label":"Abilitare la chiave di partizione","href":"Enable-partition-key","type":2,"isActive":false},{"label":"Elencare le partizioni","href":"List-partitions","type":2,"isActive":false},{"label":"Inserire i dati","href":"Insert-data","type":2,"isActive":false},{"label":"Utilizzare la chiave di partizione","href":"Use-partition-key","type":2,"isActive":false},{"label":"Casi d'uso tipici","href":"Typical-use-cases","type":2,"isActive":false}]} \ No newline at end of file diff --git a/localization/v2.4.x/site/it/userGuide/use-partition-key.md b/localization/v2.4.x/site/it/userGuide/use-partition-key.md index 990c59b03..86531708b 100644 --- a/localization/v2.4.x/site/it/userGuide/use-partition-key.md +++ b/localization/v2.4.x/site/it/userGuide/use-partition-key.md @@ -1,9 +1,8 @@ --- id: use-partition-key.md -title: Use Partition Key -summary: '' +title: Utilizzo della chiave di partizione --- -

      Use Partition Key

      This guide walks you through using the partition key to accelerate data retrieval from your collection.

      -

      Overview

      Questa guida illustra l'uso della chiave di partizione per accelerare il recupero dei dati dalla raccolta.

      +

      Panoramica

      You can set a particular field in a collection as the partition key so that Milvus distributes incoming entities into different partitions according to their respective partition values in this field. This allows entities with the same key value to be grouped in a partition, accelerating search performance by avoiding the need to scan irrelevant partitions when filtering by the key field. When compared to traditional filtering methods, the partition key can greatly enhance query performance.

      -

      You can use the partition key to implement multi-tenancy. For details on multi-tenancy, read Multi-tenancy for more.

      -

      Enable partition key

      È possibile impostare un campo particolare di una raccolta come chiave di partizione, in modo che Milvus distribuisca le entità in arrivo in diverse partizioni in base ai rispettivi valori di partizione in questo campo. In questo modo, le entità con lo stesso valore di chiave vengono raggruppate in una partizione, accelerando le prestazioni di ricerca ed evitando la scansione di partizioni irrilevanti quando si filtra in base al campo chiave. Rispetto ai metodi di filtraggio tradizionali, la chiave di partizione può migliorare notevolmente le prestazioni delle query.

      +

      È possibile utilizzare la chiave di partizione per implementare la multi-tenancy. Per maggiori dettagli sulla multi-tenancy, leggere Multi-tenancy.

      +

      Abilitare la chiave di partizione

      To set a field as the partition key, specify partition_key_field when creating a collection schema.

      -

      In the example code below, num_partitions determines the number of partitions that will be created. By default, it is set to 16. We recommend you retain the default value.

      +

      Per impostare un campo come chiave di partizione, specificare partition_key_field quando si crea uno schema di raccolta.

      +

      Nell'esempio di codice qui sotto, num_partitions determina il numero di partizioni che verranno create. Per impostazione predefinita, è impostato su 64. Si consiglia di mantenere il valore predefinito.

      -

      For more information on parameters, refer to MilvusClient, create_schema(), and add_field() in the SDK reference.

      +

      Per ulteriori informazioni sui parametri, consultare MilvusClient, create_schema(), e add_field() nel riferimento dell'SDK.

      -

      For more information on parameters, refer to MilvusClientV2, createSchema(), and addField() in the SDK reference.

      +

      Per ulteriori informazioni sui parametri, fare riferimento a MilvusClientV2, createSchema(), e addField() nel riferimento al programma SDK.

      -

      For more information on parameters, refer to MilvusClient and createCollection() in the SDK reference.

      +

      Per ulteriori informazioni sui parametri, fare riferimento a MilvusClient e createCollection() nel riferimento dell'SDK.

      + Python Java Node.js
      import random, time
       from pymilvus import connections, MilvusClient, DataType
       
      @@ -82,7 +78,7 @@ schema = MilvusClient.create_schema(
           auto_id=False,
           enable_dynamic_field=True,
           partition_key_field="color",
      -    num_partitions=16 # Number of partitions. Defaults to 16.
      +    num_partitions=64 # Number of partitions. Defaults to 64.
       )
       
       schema.add_field(field_name="id", datatype=DataType.INT64, is_primary=True)
      @@ -161,12 +157,9 @@ client = new M
           }
       ]
       
      -

      After you have defined the fields, set up the index parameters.

      +

      Dopo aver definito i campi, impostare i parametri dell'indice.

      + Python Java Node.js
      index_params = MilvusClient.prepare_index_params()
       
       index_params.add_index(
      @@ -211,12 +204,9 @@ indexParams.add(indexParamForVectorFie
           params: { nlist: 1024}
       }]
       
      -

      Finally, you can create a collection.

      +

      Infine, è possibile creare una collezione.

      + Python Java Node.js
      client.create_collection(
           collection_name="test_collection",
           schema=schema,
      @@ -246,7 +236,7 @@ res = await client.// Success
       //
       
      -

      List partitions

      Once a field of a collection is used as the partition key, Milvus creates the specified number of partitions and manages them on your behalf. Therefore, you cannot manipulate the partitions in this collection anymore.

      -

      The following snippet demonstrates that 64 partitions in a collection once one of its fields is used as the partition key.

      -

      Insert data

      Una volta che un campo di una collezione viene usato come chiave di partizione, Milvus crea il numero di partizioni specificato e le gestisce per conto dell'utente. Pertanto, non è più possibile manipolare le partizioni di questa collezione.

      +

      Lo snippet seguente dimostra che 64 partizioni in una collezione una volta che uno dei suoi campi viene usato come chiave di partizione.

      +

      Inserire i dati

      Once the collection is ready, start inserting data as follows:

      -

      Prepare data

      +

      Una volta che la raccolta è pronta, iniziare a inserire i dati come segue:

      +

      Preparare i dati

      # 3. Insert randomly generated vectors 
       colors = ["green", "blue", "yellow", "red", "black", "white", "purple", "pink", "orange", "brown", "grey"]
       data = []
      @@ -338,7 +325,7 @@ data = []
       
       console.log(data[0])
       
      -

      You can view the structure of the generated data by checking its first entry.

      +

      È possibile visualizzare la struttura dei dati generati controllando la prima voce.

      {
           id: 0,
           vector: [
      @@ -353,20 +340,17 @@ data = []
           color_tag: 'blue_2064'
       }
       
      -

      Insert data

      -

      Use the insert() method to insert the data into the collection.

      +

      Inserire i dati

      +

      Utilizzare il metodo insert() per inserire i dati nell'insieme.

      -

      Use the insert() method to insert the data into the collection.

      +

      Utilizzare il metodo insert() per inserire i dati nell'insieme.

      -

      Use the insert() method to insert the data into the collection.

      +

      Utilizzare il metodo insert() per inserire i dati nell'insieme.

      + Python Java Node.js
      res = client.insert(
           collection_name="test_collection",
           data=data
      @@ -418,7 +402,7 @@ data = []
       // 1000
       // 
       
      -

      Use partition key

      Once you have indexed and loaded the collection as well as inserted data, you can conduct a similarity search using the partition key.

      +

      Dopo aver indicizzato e caricato l'insieme e aver inserito i dati, è possibile eseguire una ricerca di similarità utilizzando la chiave di partizione.

      -

      For more information on parameters, refer to search() in the SDK reference.

      +

      Per ulteriori informazioni sui parametri, consultare la sezione search() nel riferimento dell'SDK.

      -

      For more information on parameters, refer to search() in the SDK reference.

      +

      Per ulteriori informazioni sui parametri, fare riferimento a search() nel riferimento al programma SDK.

      -

      For more information on parameters, refer to search() in the SDK reference.

      +

      Per ulteriori informazioni sui parametri, fare riferimento a search() nel riferimento al programma SDK.

      -

      notes

      -

      To conduct a similarity search using the partition key, you should include either of the following in the boolean expression of the search request:

      +

      note

      +

      Per effettuare una ricerca di similarità utilizzando la chiave di partizione, è necessario includere una delle seguenti opzioni nell'espressione booleana della richiesta di ricerca:

      • expr='<partition_key>=="xxxx"'

      • expr='<partition_key> in ["xxx", "xxx"]'

      -

      Do replace <partition_key> with the name of the field that is designated as the partition key.

      +

      Sostituire <partition_key> con il nome del campo designato come chiave di partizione.

      + Python Java Node.js
      # 4. Search with partition key
       query_vectors = [[0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592]]
       
      @@ -557,7 +538,7 @@ res = await client.// ]
       // 
       
      -

      Typical use cases

      You can utilize the partition key feature to achieve better search performance and enable multi-tenancy. This can be done by assigning a tenant-specific value as the partition key field for each entity. When searching or querying the collection, you can filter entities by the tenant-specific value by including the partition key field in the boolean expression. This approach ensures data isolation by tenants and avoids scanning unnecessary partitions.

      +

      È possibile utilizzare la funzione di chiave di partizione per ottenere migliori prestazioni di ricerca e abilitare la multi-tenancy. Questo può essere fatto assegnando un valore specifico per ogni tenancy come campo della chiave di partizione per ogni entità. Durante la ricerca o l'interrogazione della raccolta, è possibile filtrare le entità in base al valore specifico del tenant, includendo il campo della chiave di partizione nell'espressione booleana. Questo approccio garantisce l'isolamento dei dati per tenant ed evita la scansione di partizioni non necessarie.

      diff --git a/localization/v2.4.x/site/ja/adminGuide/clouds/aws/s3.json b/localization/v2.4.x/site/ja/adminGuide/clouds/aws/s3.json index b8a6958d3..169ecc854 100644 --- a/localization/v2.4.x/site/ja/adminGuide/clouds/aws/s3.json +++ b/localization/v2.4.x/site/ja/adminGuide/clouds/aws/s3.json @@ -1,54 +1 @@ -{ - "codeList": [ - "milvus_bucket_name=\"milvus-bucket-$(openssl rand -hex 12)\"\n\naws s3api create-bucket --bucket \"$milvus_bucket_name\" --region 'us-east-2' --acl private --object-ownership ObjectWriter --create-bucket-configuration LocationConstraint='us-east-2'\n\n\n# Output\n#\n# \"Location\": \"http://milvus-bucket-039dd013c0712f085d60e21f.s3.amazonaws.com/\"\n", - "echo '{\n \"Version\": \"2012-10-17\",\n \"Statement\": [\n {\n \"Effect\": \"Allow\",\n \"Action\": [\n \"s3:GetObject\",\n \"s3:PutObject\",\n \"s3:ListBucket\",\n \"s3:DeleteObject\"\n ],\n \"Resource\": [\n \"arn:aws:s3:::\",\n \"arn:aws:s3:::/*\"\n ]\n }\n ]\n}' > milvus-s3-policy.json\n\naws iam create-policy --policy-name MilvusS3ReadWrite --policy-document file://milvus-s3-policy.json\n\n\n# Get the ARN from the command output as follows:\n# {\n# \"Policy\": {\n# \"PolicyName\": \"MilvusS3ReadWrite\",\n# \"PolicyId\": \"AN5QQVVPM1BVTFlBNkdZT\",\n# \"Arn\": \"arn:aws:iam::12345678901:policy/MilvusS3ReadWrite\",\n# \"Path\": \"/\",\n# \"DefaultVersionId\": \"v1\",\n# \"AttachmentCount\": 0,\n# \"PermissionsBoundaryUsageCount\": 0,\n# \"IsAttachable\": true,\n# \"CreateDate\": \"2023-11-16T06:00:01+00:00\",\n# \"UpdateDate\": \"2023-11-16T06:00:01+00:00\"\n# }\n# } \n", - "eksctl create iamserviceaccount --name milvus-s3-access-sa --namespace milvus --cluster milvus-eks-cluster --role-name milvus-s3-access-sa \\\n --attach-policy-arn arn:aws:iam:::policy/MilvusS3ReadWrite --approve\n", - "aws iam get-role --role-name milvus-s3-access-sa --query Role.AssumeRolePolicyDocument\n# An example output is as follows\n{\n \"Version\": \"2012-10-17\",\n \"Statement\": [\n {\n \"Effect\": \"Allow\",\n \"Principal\": {\n \"Federated\": \"arn:aws:iam::111122223333:oidc-provider/oidc.eks.region-code.amazonaws.com/id/EXAMPLED539D4633E53DE1B71EXAMPLE\"\n },\n \"Action\": \"sts:AssumeRoleWithWebIdentity\",\n \"Condition\": {\n \"StringEquals\": {\n \"oidc.eks.region-code.amazonaws.com/id/EXAMPLED539D4633E53DE1B71EXAMPLE:sub\": \"system:serviceaccount:default:my-service-account\",\n \"oidc.eks.region-code.amazonaws.com/id/EXAMPLED539D4633E53DE1B71EXAMPLE:aud\": \"sts.amazonaws.com\"\n }\n }\n }\n ]\n}\n", - "aws iam list-attached-role-policies --role-name milvus-s3-access-sa --query 'AttachedPolicies[].PolicyArn' --output text\n# An example output is as follows\narn:aws:iam::12345678901:policy/MilvusS3ReadWrite\n", - "export policy_arn='arn:aws:iam::12345678901:policy/MilvusS3ReadWrite'\naws iam get-policy --policy-arn $policy_arn\n# An example output is as follows\n{\n \"Policy\": {\n \"PolicyName\": \"MilvusS3ReadWrite\",\n \"PolicyId\": \"EXAMPLEBIOWGLDEXAMPLE\",\n \"Arn\": \"arn:aws:iam::12345678901:policy/MilvusS3ReadWrite\",\n \"Path\": \"/\",\n \"DefaultVersionId\": \"v2\",\n [...]\n }\n}\n", - "aws iam get-policy-version --policy-arn $policy_arn --version-id v2\n# An example output is as follows\n{\n \"PolicyVersion\": {\n \"Document\": {\n \"Version\": \"2012-10-17\",\n \"Statement\": [\n {\n \"Effect\": \"Allow\",\n \"Action\": [\n \"s3:GetObject\",\n \"s3:PutObject\",\n \"s3:ListBucket\",\n \"s3:DeleteObject\"\n ],\n \"Resource\": [\n \"arn:aws:s3:::\",\n \"arn:aws:s3:::/*\"\n ]\n }\n ]\n },\n [...]\n }\n}\n", - "kubectl describe serviceaccount milvus-s3-access-sa -n milvus\n# An example output is as follows\nName: milvus-s3-access-sa\nNamespace: milvus\nLabels: app.kubernetes.io/managed-by=eksctl\nAnnotations: eks.amazonaws.com/role-arn: arn:aws:iam::12345678901:role/milvus-s3-access-sa\n[...]\n", - "helm repo add milvus https://zilliztech.github.io/milvus-helm/\nhelm repo update\n", - "cluster:\n enabled: true\n\nservice:\n type: LoadBalancer\n port: 19530\n annotations: \n service.beta.kubernetes.io/aws-load-balancer-type: external\n service.beta.kubernetes.io/aws-load-balancer-name: milvus-service\n service.beta.kubernetes.io/aws-load-balancer-scheme: internet-facing\n service.beta.kubernetes.io/aws-load-balancer-nlb-target-type: ip\n\nserviceAccount:\n create: false\n name: milvus-s3-access-sa\n\nminio:\n enabled: false\n\nexternalS3:\n enabled: true\n host: \"s3.us-east-2.amazonaws.com\"\n port: \"443\"\n useSSL: true\n bucketName: \"\"\n useIAM: true\n cloudProvider: \"aws\"\n iamEndpoint: \"\"\n\nrootCoordinator:\n replicas: 2\n activeStandby:\n enabled: true\n resources: \n limits:\n cpu: 1\n memory: 2Gi\n\nindexCoordinator:\n replicas: 2\n activeStandby:\n enabled: true\n resources: \n limits:\n cpu: \"0.5\"\n memory: 0.5Gi\n\nqueryCoordinator:\n replicas: 2\n activeStandby:\n enabled: true\n resources: \n limits:\n cpu: \"0.5\"\n memory: 0.5Gi\n\ndataCoordinator:\n replicas: 2\n activeStandby:\n enabled: true\n resources: \n limits:\n cpu: \"0.5\"\n memory: 0.5Gi\n\nproxy:\n replicas: 2\n resources: \n limits:\n cpu: 1\n memory: 2Gi \n", - "helm upgrade --install milvus-demo milvus/milvus -n milvus -f milvus.yaml\n" - ], - "headingContent": "", - "anchorList": [ - { - "label": "IAMロールによるS3アクセスの構成", - "href": "Configure-S3-Access-by-IAM-Role", - "type": 1, - "isActive": false - }, - { - "label": "始める前に", - "href": "Before-you-start", - "type": 2, - "isActive": false - }, - { - "label": "IAMロールをKubernetesサービスアカウントに関連付ける", - "href": "Associate-an-IAM-role-with-a-Kubernetes-service-account", - "type": 2, - "isActive": false - }, - { - "label": "ロールとサービス・アカウントのセットアップを確認する", - "href": "Verify-the-role-and-service-account-setup", - "type": 2, - "isActive": false - }, - { - "label": "Milvus配備", - "href": "Deploy-Milvus", - "type": 2, - "isActive": false - }, - { - "label": "インストールの確認", - "href": "Verify-the-installation", - "type": 2, - "isActive": false - } - ] -} +{"codeList":["milvus_bucket_name=\"milvus-bucket-$(openssl rand -hex 12)\"\n\naws s3api create-bucket --bucket \"$milvus_bucket_name\" --region 'us-east-2' --acl private --object-ownership ObjectWriter --create-bucket-configuration LocationConstraint='us-east-2'\n\n\n# Output\n#\n# \"Location\": \"http://milvus-bucket-039dd013c0712f085d60e21f.s3.amazonaws.com/\"\n","echo '{\n \"Version\": \"2012-10-17\",\n \"Statement\": [\n {\n \"Effect\": \"Allow\",\n \"Action\": [\n \"s3:ListBucket\"\n ],\n \"Resource\": [\n \"arn:aws:s3:::\"\n ]\n },\n {\n \"Effect\": \"Allow\",\n \"Action\": [\n \"s3:DeleteObject\",\n \"s3:GetObject\",\n \"s3:PutObject\"\n ],\n \"Resource\": [\n \"arn:aws:s3:::/*\"\n ]\n }\n ]\n}' > milvus-s3-policy.json\n\naws iam create-policy --policy-name MilvusS3ReadWrite --policy-document file://milvus-s3-policy.json\n\n\n# Get the ARN from the command output as follows:\n# {\n# \"Policy\": {\n# \"PolicyName\": \"MilvusS3ReadWrite\",\n# \"PolicyId\": \"AN5QQVVPM1BVTFlBNkdZT\",\n# \"Arn\": \"arn:aws:iam::12345678901:policy/MilvusS3ReadWrite\",\n# \"Path\": \"/\",\n# \"DefaultVersionId\": \"v1\",\n# \"AttachmentCount\": 0,\n# \"PermissionsBoundaryUsageCount\": 0,\n# \"IsAttachable\": true,\n# \"CreateDate\": \"2023-11-16T06:00:01+00:00\",\n# \"UpdateDate\": \"2023-11-16T06:00:01+00:00\"\n# }\n# } \n","eksctl create iamserviceaccount --name milvus-s3-access-sa --namespace milvus --cluster milvus-eks-cluster --role-name milvus-s3-access-sa \\\n --attach-policy-arn arn:aws:iam:::policy/MilvusS3ReadWrite --approve\n","aws iam get-role --role-name milvus-s3-access-sa --query Role.AssumeRolePolicyDocument\n# An example output is as follows\n{\n \"Version\": \"2012-10-17\",\n \"Statement\": [\n {\n \"Effect\": \"Allow\",\n \"Principal\": {\n \"Federated\": \"arn:aws:iam::111122223333:oidc-provider/oidc.eks.region-code.amazonaws.com/id/EXAMPLED539D4633E53DE1B71EXAMPLE\"\n },\n \"Action\": \"sts:AssumeRoleWithWebIdentity\",\n \"Condition\": {\n \"StringEquals\": {\n \"oidc.eks.region-code.amazonaws.com/id/EXAMPLED539D4633E53DE1B71EXAMPLE:sub\": \"system:serviceaccount:default:my-service-account\",\n \"oidc.eks.region-code.amazonaws.com/id/EXAMPLED539D4633E53DE1B71EXAMPLE:aud\": \"sts.amazonaws.com\"\n }\n }\n }\n ]\n}\n","aws iam list-attached-role-policies --role-name milvus-s3-access-sa --query 'AttachedPolicies[].PolicyArn' --output text\n# An example output is as follows\narn:aws:iam::12345678901:policy/MilvusS3ReadWrite\n","export policy_arn='arn:aws:iam::12345678901:policy/MilvusS3ReadWrite'\naws iam get-policy --policy-arn $policy_arn\n# An example output is as follows\n{\n \"Policy\": {\n \"PolicyName\": \"MilvusS3ReadWrite\",\n \"PolicyId\": \"EXAMPLEBIOWGLDEXAMPLE\",\n \"Arn\": \"arn:aws:iam::12345678901:policy/MilvusS3ReadWrite\",\n \"Path\": \"/\",\n \"DefaultVersionId\": \"v2\",\n [...]\n }\n}\n","aws iam get-policy-version --policy-arn $policy_arn --version-id v2\n# An example output is as follows\n{\n \"PolicyVersion\": {\n \"Document\": {\n \"Version\": \"2012-10-17\",\n \"Statement\": [\n {\n \"Effect\": \"Allow\",\n \"Action\": [\n \"s3:GetObject\",\n \"s3:PutObject\",\n \"s3:ListBucket\",\n \"s3:DeleteObject\"\n ],\n \"Resource\": [\n \"arn:aws:s3:::\",\n \"arn:aws:s3:::/*\"\n ]\n }\n ]\n },\n [...]\n }\n}\n","kubectl describe serviceaccount milvus-s3-access-sa -n milvus\n# An example output is as follows\nName: milvus-s3-access-sa\nNamespace: milvus\nLabels: app.kubernetes.io/managed-by=eksctl\nAnnotations: eks.amazonaws.com/role-arn: arn:aws:iam::12345678901:role/milvus-s3-access-sa\n[...]\n","helm repo add milvus https://zilliztech.github.io/milvus-helm/\nhelm repo update\n","cluster:\n enabled: true\n\nservice:\n type: LoadBalancer\n port: 19530\n annotations: \n service.beta.kubernetes.io/aws-load-balancer-type: external\n service.beta.kubernetes.io/aws-load-balancer-name: milvus-service\n service.beta.kubernetes.io/aws-load-balancer-scheme: internet-facing\n service.beta.kubernetes.io/aws-load-balancer-nlb-target-type: ip\n\nserviceAccount:\n create: false\n name: milvus-s3-access-sa\n\nminio:\n enabled: false\n\nexternalS3:\n enabled: true\n host: \"s3.us-east-2.amazonaws.com\"\n port: \"443\"\n useSSL: true\n bucketName: \"\"\n useIAM: true\n cloudProvider: \"aws\"\n iamEndpoint: \"\"\n\nrootCoordinator:\n replicas: 2\n activeStandby:\n enabled: true\n resources: \n limits:\n cpu: 1\n memory: 2Gi\n\nindexCoordinator:\n replicas: 2\n activeStandby:\n enabled: true\n resources: \n limits:\n cpu: \"0.5\"\n memory: 0.5Gi\n\nqueryCoordinator:\n replicas: 2\n activeStandby:\n enabled: true\n resources: \n limits:\n cpu: \"0.5\"\n memory: 0.5Gi\n\ndataCoordinator:\n replicas: 2\n activeStandby:\n enabled: true\n resources: \n limits:\n cpu: \"0.5\"\n memory: 0.5Gi\n\nproxy:\n replicas: 2\n resources: \n limits:\n cpu: 1\n memory: 2Gi \n","helm upgrade --install milvus-demo milvus/milvus -n milvus -f milvus.yaml\n"],"headingContent":"Configure S3 Access by IAM Role","anchorList":[{"label":"IAMロールによるS3アクセスの設定","href":"Configure-S3-Access-by-IAM-Role","type":1,"isActive":false},{"label":"始める前に","href":"Before-you-start","type":2,"isActive":false},{"label":"KubernetesサービスアカウントにIAMロールを関連付ける","href":"Associate-an-IAM-role-with-a-Kubernetes-service-account","type":2,"isActive":false},{"label":"ロールとサービスアカウントの設定を確認する","href":"Verify-the-role-and-service-account-setup","type":2,"isActive":false},{"label":"Milvusのデプロイ","href":"Deploy-Milvus","type":2,"isActive":false},{"label":"インストールの確認","href":"Verify-the-installation","type":2,"isActive":false}]} \ No newline at end of file diff --git a/localization/v2.4.x/site/ja/adminGuide/clouds/aws/s3.md b/localization/v2.4.x/site/ja/adminGuide/clouds/aws/s3.md index 5d4bcc112..d6bd23a1d 100644 --- a/localization/v2.4.x/site/ja/adminGuide/clouds/aws/s3.md +++ b/localization/v2.4.x/site/ja/adminGuide/clouds/aws/s3.md @@ -1,6 +1,6 @@ --- id: s3.md -title: IAMロールによるS3アクセスの構成 +title: IAMロールによるS3アクセスの設定 related_key: 's3, storage, iam' summary: s3をIAM Roleで設定する方法を学ぶ。 --- @@ -19,7 +19,7 @@ summary: s3をIAM Roleで設定する方法を学ぶ。 d="M4 9h1v1H4c-1.5 0-3-1.69-3-3.5S2.55 3 4 3h4c1.45 0 3 1.69 3 3.5 0 1.41-.91 2.72-2 3.25V8.59c.58-.45 1-1.27 1-2.09C10 5.22 8.98 4 8 4H4c-.98 0-2 1.22-2 2.5S3 9 4 9zm9-3h-1v1h1c1 0 2 1.22 2 2.5S13.98 12 13 12H9c-.98 0-2-1.22-2-2.5 0-.83.42-1.64 1-2.09V6.25c-1.09.53-2 1.84-2 3.25C6 11.31 7.55 13 9 13h4c1.45 0 3-1.69 3-3.5S14.5 6 13 6z" > -

      helmでMilvusをインストールした際に、IAM Roleでs3アクセスを設定する方法をご紹介します。 詳細はIAM Roleをご参照ください。

      +

      本トピックでは、Milvusをhelmでインストールした際のIAM Roleによるs3アクセスの設定方法をご紹介します。 詳細はIAM Roleをご参照ください。

      始める前に

      Cert ManagerはMilvus OperatorのTLS証明書を管理するために必要です。

      +

      Cert Managerは、Milvus OperatorのTLS証明書を管理するために必要です。

      1. お使いの OpenShift のバージョンに適した Cert Manager のバージョンを見つけてください:Cert Manager Releases

      2. 公式ガイドに従って Cert Manager をインストールします:Cert Manager のインストール

      3. @@ -96,30 +96,30 @@ summary: OpenShift上にMilvusクラスタをデプロイする方法をご紹 apiVersion: cert-manager.io/v1 kind: Certificate metadata: -name: milvus-operator-serving-cert -namespace: milvus-operator + name: milvus-operator-serving-cert + namespace: milvus-operator spec: -dnsNames: -- milvus-operator-webhook-service.milvus-operator.svc -- milvus-operator-webhook-service.milvus-operator.svc.cluster.local -issuerRef: + dnsNames: + - milvus-operator-webhook-service.milvus-operator.svc + - milvus-operator-webhook-service.milvus-operator.svc.cluster.local + issuerRef: kind: Issuer name: milvus-operator-selfsigned-issuer -secretName: milvus-operator-webhook-cert + secretName: milvus-operator-webhook-cert --- apiVersion: cert-manager.io/v1 kind: Issuer metadata: -name: milvus-operator-selfsigned-issuer -namespace: milvus-operator + name: milvus-operator-selfsigned-issuer + namespace: milvus-operator spec: -selfSigned: {} + selfSigned: {}
  • ファイルを適用します:

    kubectl apply -f milvus-operator-certificate.yaml
     
  • -

    ステップ 3: Milvus Operator のインストール

    Milvus Operatorのインストールを開始します。Milvus OperatorのインストールにはHelmを使用することをお勧めします。

    +

    Milvus Operatorのインストールを開始します。Milvus OperatorのインストールにはHelmを使用することを推奨します。

    1. Milvus Operator Helmリポジトリを追加します:

      helm repo add milvus-operator https://zilliztech.github.io/milvus-operator/
      @@ -175,9 +175,9 @@ helm repo update milvus-operator
                 d="M4 9h1v1H4c-1.5 0-3-1.69-3-3.5S2.55 3 4 3h4c1.45 0 3 1.69 3 3.5 0 1.41-.91 2.72-2 3.25V8.59c.58-.45 1-1.27 1-2.09C10 5.22 8.98 4 8 4H4c-.98 0-2 1.22-2 2.5S3 9 4 9zm9-3h-1v1h1c1 0 2 1.22 2 2.5S13.98 12 13 12H9c-.98 0-2-1.22-2-2.5 0-.83.42-1.64 1-2.09V6.25c-1.09.53-2 1.84-2 3.25C6 11.31 7.55 13 9 13h4c1.45 0 3-1.69 3-3.5S14.5 6 13 6z"
               >
             
      -    

      Milvusを他のクラウドにデプロイする方法を学びたい場合は、次のステップに進んでください:

      +

      Milvusを他のクラウドにデプロイする方法を学びたい場合:

      diff --git a/localization/v2.4.x/site/ja/adminGuide/configure-docker.json b/localization/v2.4.x/site/ja/adminGuide/configure-docker.json index 0bc11ed94..077f3a2c2 100644 --- a/localization/v2.4.x/site/ja/adminGuide/configure-docker.json +++ b/localization/v2.4.x/site/ja/adminGuide/configure-docker.json @@ -1,48 +1 @@ -{ - "codeList": [ - "$ wget https://raw.githubusercontent.com/milvus-io/milvus/v2.4.9/configs/milvus.yaml\n", - "# For Milvus standalone\n$ wget https://github.com/milvus-io/milvus/releases/download/v2.4.9/milvus-standalone-docker-compose.yml -O docker-compose.yml\n", - "...\n standalone:\n container_name: milvus-standalone\n image: milvusdb/milvus:v2.2.13\n command: [\"milvus\", \"run\", \"standalone\"]\n environment:\n ETCD_ENDPOINTS: etcd:2379\n MINIO_ADDRESS: minio:9000\n volumes:\n - /local/path/to/your/milvus.yaml:/milvus/configs/milvus.yaml # Map the local path to the container path\n - ${DOCKER_VOLUME_DIRECTORY:-.}/volumes/milvus:/var/lib/milvus\n ports:\n - \"19530:19530\"\n - \"9091:9091\"\n depends_on:\n - \"etcd\"\n - \"minio\"\n...\n", - "$ sudo docker compose up -d\n" - ], - "headingContent": "", - "anchorList": [ - { - "label": "Docker ComposeでMilvusを設定する", - "href": "Configure-Milvus-with-Docker-Compose", - "type": 1, - "isActive": false - }, - { - "label": "設定ファイルのダウンロード", - "href": "Download-a-configuration-file", - "type": 2, - "isActive": false - }, - { - "label": "設定ファイルの修正", - "href": "Modify-the-configuration-file", - "type": 2, - "isActive": false - }, - { - "label": "インストールファイルのダウンロード", - "href": "Download-an-installation-file", - "type": 2, - "isActive": false - }, - { - "label": "インストールファイルの修正", - "href": "Modify-the-installation-file", - "type": 2, - "isActive": false - }, - { - "label": "スタート・Milvus", - "href": "Start-Milvus", - "type": 2, - "isActive": false - }, - { "label": "次の記事", "href": "Whats-next", "type": 2, "isActive": false } - ] -} +{"codeList":["$ wget https://raw.githubusercontent.com/milvus-io/milvus/v2.4.13-hotfix/configs/milvus.yaml\n","# For Milvus standalone\n$ wget https://github.com/milvus-io/milvus/releases/download/v2.4.13-hotfix/milvus-standalone-docker-compose.yml -O docker-compose.yml\n","...\n standalone:\n container_name: milvus-standalone\n image: milvusdb/milvus:v2.2.13\n command: [\"milvus\", \"run\", \"standalone\"]\n environment:\n ETCD_ENDPOINTS: etcd:2379\n MINIO_ADDRESS: minio:9000\n volumes:\n - /local/path/to/your/milvus.yaml:/milvus/configs/milvus.yaml # Map the local path to the container path\n - ${DOCKER_VOLUME_DIRECTORY:-.}/volumes/milvus:/var/lib/milvus\n ports:\n - \"19530:19530\"\n - \"9091:9091\"\n depends_on:\n - \"etcd\"\n - \"minio\"\n...\n","$ sudo docker compose up -d\n"],"headingContent":"Configure Milvus with Docker Compose","anchorList":[{"label":"Docker ComposeでMilvusを設定する","href":"Configure-Milvus-with-Docker-Compose","type":1,"isActive":false},{"label":"設定ファイルのダウンロード","href":"Download-a-configuration-file","type":2,"isActive":false},{"label":"設定ファイルの変更","href":"Modify-the-configuration-file","type":2,"isActive":false},{"label":"インストールファイルのダウンロード","href":"Download-an-installation-file","type":2,"isActive":false},{"label":"インストールファイルの修正","href":"Modify-the-installation-file","type":2,"isActive":false},{"label":"Milvusの起動","href":"Start-Milvus","type":2,"isActive":false},{"label":"次の内容","href":"Whats-next","type":2,"isActive":false}]} \ No newline at end of file diff --git a/localization/v2.4.x/site/ja/adminGuide/configure-docker.md b/localization/v2.4.x/site/ja/adminGuide/configure-docker.md index ee89050c2..5bf5889b7 100644 --- a/localization/v2.4.x/site/ja/adminGuide/configure-docker.md +++ b/localization/v2.4.x/site/ja/adminGuide/configure-docker.md @@ -5,7 +5,7 @@ related_key: configure summary: Docker ComposeでMilvusを設定します。 title: Docker ComposeでMilvusを設定する --- -

      Docker ComposeによるMilvusの設定

      このトピックでは、Docker Composeを使用してMilvusコンポーネントとそのサードパーティの依存関係を設定する方法について説明します。

      +

      このトピックでは、Docker Composeを使用してMilvusコンポーネントおよびサードパーティの依存関係を設定する方法について説明します。

      現在のリリースでは、すべてのパラメータはMilvusの再起動後に有効になります。

      設定ファイルのダウンロード

      milvus.yaml を直接ダウンロードするか、以下のコマンドでダウンロードします。

      -
      $ wget https://raw.githubusercontent.com/milvus-io/milvus/v2.4.9/configs/milvus.yaml
      +    

      milvus.yaml を直接ダウンロードするか、以下のコマンドでダウンロードします。

      +
      $ wget https://raw.githubusercontent.com/milvus-io/milvus/v2.4.13-hotfix/configs/milvus.yaml
       

      設定ファイルの変更

      Milvusスタンドアロン用のインストールファイルをダウンロードし、docker-compose.yml として保存してください。

      +

      Milvusスタンドアロン用のインストールファイルをダウンロードし、docker-compose.yml として保存してください。

      以下のコマンドを実行するだけでもインストールできます。

      # For Milvus standalone
      -$ wget https://github.com/milvus-io/milvus/releases/download/v2.4.9/milvus-standalone-docker-compose.yml -O docker-compose.yml
      +$ wget https://github.com/milvus-io/milvus/releases/download/v2.4.13-hotfix/milvus-standalone-docker-compose.yml -O docker-compose.yml
       

      インストールファイルの修正

      docker-compose.yml で、各milvus-standalone の下にvolumes セクションを追加します。

      +

      docker-compose.yml に、各milvus-standalone の下にvolumes セクションを追加します。

      milvus.yaml ファイルへのローカルパスを、すべてのvolumes セクションの下にある設定ファイル/milvus/configs/milvus.yaml への対応する docker コンテナ・パスにマッピングします。

      ...
         standalone:
      @@ -269,7 +269,7 @@ $ wget https://github.com/milvus-io/milvus/releases/download/v2.4.9/milvus-stand
                 d="M4 9h1v1H4c-1.5 0-3-1.69-3-3.5S2.55 3 4 3h4c1.45 0 3 1.69 3 3.5 0 1.41-.91 2.72-2 3.25V8.59c.58-.45 1-1.27 1-2.09C10 5.22 8.98 4 8 4H4c-.98 0-2 1.22-2 2.5S3 9 4 9zm9-3h-1v1h1c1 0 2 1.22 2 2.5S13.98 12 13 12H9c-.98 0-2-1.22-2-2.5 0-.83.42-1.64 1-2.09V6.25c-1.09.53-2 1.84-2 3.25C6 11.31 7.55 13 9 13h4c1.45 0 3-1.69 3-3.5S14.5 6 13 6z"
               >
             
      -    

      設定ファイルとインストールファイルの修正が完了したら、Milvusを起動することができます。

      +

      設定ファイルとインストールファイルの修正が完了したら、Milvusを起動します。

      $ sudo docker compose up -d
       

      次の内容

      この記事ではローカルディスクを使用するMilvus QueryNodeの設定方法について説明します。

      +

      Milvus QueryNodeでローカルディスクを使用するための設定方法について説明します。

      概要

      MilvusはAIに特化したベクトルデータベースで、膨大なベクトルデータの効率的な保存と検索が可能です。画像・動画解析、自然言語処理、推薦システムなどのタスクに最適です。最適なパフォーマンスを確保するには、ディスクの読み取りレイテンシを最小限に抑えることが極めて重要です。遅延を防ぎ、システムの安定性を維持するためには、ローカルのNVMe SSDを使用することが強く推奨されます。

      +

      MilvusはAIに特化したベクトルデータベースであり、膨大なベクトルデータの効率的な保存と検索が可能です。画像・動画解析、自然言語処理、推薦システムなどのタスクに最適です。最適なパフォーマンスを確保するには、ディスクの読み取りレイテンシを最小限に抑えることが極めて重要です。遅延を防ぎ、システムの安定性を維持するためには、ローカルのNVMe SSDを使用することが強く推奨されます。

      ローカル・ディスク・ストレージが活躍する主な機能は以下のとおりです:

        -
      • チャンク・キャッシュ:データをローカル・ディスク・キャッシュにプリロードして検索を高速化します。
      • -
      • MMap:ファイルの内容をメモリに直接マッピングし、メモリ効率を向上させます。
      • +
      • チャンク・キャッシュ:データをローカル・ディスク・キャッシュにプリロードし、検索を高速化します。
      • +
      • MMap:ファイルの内容を直接メモリにマップし、メモリ効率を向上させます。
      • DiskANNインデックス:効率的なインデックス管理のためにディスク・ストレージを必要とする。
      -

      この記事では、クラウドプラットフォーム上でのMilvus Distributedの展開と、NVMeディスクストレージを使用するためのQueryNodeの設定方法に焦点を当てます。以下の表は、様々なクラウドプロバイダーの推奨マシンタイプの一覧です。

      +

      本記事では、Milvus Distributedをクラウドプラットフォームにデプロイし、NVMeディスクストレージを使用するようにQueryNodeを設定する方法に焦点を当てます。以下の表は、様々なクラウドプロバイダーの推奨マシンタイプの一覧です。

      @@ -181,7 +181,7 @@ state = "/mnt/nvme/containerd/state" d="M4 9h1v1H4c-1.5 0-3-1.69-3-3.5S2.55 3 4 3h4c1.45 0 3 1.69 3 3.5 0 1.41-.91 2.72-2 3.25V8.59c.58-.45 1-1.27 1-2.09C10 5.22 8.98 4 8 4H4c-.98 0-2 1.22-2 2.5S3 9 4 9zm9-3h-1v1h1c1 0 2 1.22 2 2.5S13.98 12 13 12H9c-.98 0-2-1.22-2-2.5 0-.83.42-1.64 1-2.09V6.25c-1.09.53-2 1.84-2 3.25C6 11.31 7.55 13 9 13h4c1.45 0 3-1.69 3-3.5S14.5 6 13 6z" > -

      ディスク性能をベンチマークするための一般的なツールであるFio を使用して、ディスク性能を検証することを推奨する。以下は、ディスク性能をテストするために Fio を実行する方法の例である。

      +

      ディスク性能をベンチマークするための一般的なツールであるFio を使用して、ディスク性能を検証することを推奨する。以下は、ディスク性能をテストするために Fio を実行する方法の例である。

      • NVMe ディスクのあるノードにテストポッドをデプロイします。

        kubectl create -f ubuntu.yaml
        @@ -215,7 +215,7 @@ apt-get install fio -y
         cd /data
         
         # write 10GB
        -fio -direct=1-iodepth=128 -rw=randwrite -ioengine=libaio -bs=4K -size=10G -numjobs=10 -runtime=600 -group_reporting -filename=test -name=Rand_Write_IOPS_Test
        +fio -direct=1 -iodepth=128 -rw=randwrite -ioengine=libaio -bs=4K -size=10G -numjobs=10 -runtime=600 -group_reporting -filename=test -name=Rand_Write_IOPS_Test
         
         # verify the read speed
         # compare with the disk performance indicators provided by various cloud providers.
        @@ -249,7 +249,7 @@ IO depths    : 1=0.1<
             latency   : target=0, window=0, percentile=100.00%, depth=64
         
      -

      Milvus Distributedをデプロイする

      検証結果が満足のいくものであれば、以下の手順でMilvus Distributedをデプロイすることができる:

      Helmを使用してMilvus Distributedをデプロイするためのヒント

      QueryNodeポッドはデフォルトでNVMeディスクをEmptyDirボリュームとして使用します。最適なパフォーマンスを確保するために、NVMeディスクをQueryNodeポッド内の/var/lib/milvus/data

      -

      Helmを使用してMilvus Distributedをデプロイする方法の詳細については、Helmを使用してKubernetesでMilvusを実行するを参照してください。

      +

      Helmを使用したMilvus Distributedのデプロイ方法の詳細については、「Run Milvus in Kubernetes with Helm」を参照してください。

      Milvus Operatorを使用してMilvus Distributedをデプロイするためのヒント

      Milvus Operatorは、NVMeディスクをEmptyDirボリュームとして使用するようにQueryNodeポッドを自動的に設定します。MilvusCluster カスタムリソースに以下の設定を追加することをお勧めします:

      ...
       spec:
      diff --git a/localization/v2.4.x/site/ja/adminGuide/limit_collection_counts.json b/localization/v2.4.x/site/ja/adminGuide/limit_collection_counts.json
      index 46a1a49ec..f9115ad25 100644
      --- a/localization/v2.4.x/site/ja/adminGuide/limit_collection_counts.json
      +++ b/localization/v2.4.x/site/ja/adminGuide/limit_collection_counts.json
      @@ -1 +1 @@
      -{"codeList":["rootCoord:\n    maxGeneralCapacity: 1024\n","60 (collections) x 2 (shards) x 4 (partitions) + 40 (collections) x 1 (shard) x 12 (partitions) = 960\n","failed checking constraint: sum_collections(parition*shard) exceeding the max general capacity:\n"],"headingContent":"","anchorList":[{"label":"限度徴収数","href":"Limit-Collection-Counts","type":1,"isActive":false},{"label":"設定オプション","href":"Configuration-options","type":2,"isActive":false},{"label":"コレクション数の計算","href":"Calculating-the-number-of-collections","type":2,"isActive":false}]}
      \ No newline at end of file
      +{"codeList":["rootCoord:\n    maxGeneralCapacity: 65536\n","60 (collections) x 2 (shards) x 4 (partitions) + 40 (collections) x 1 (shard) x 12 (partitions) = 960\n","failed checking constraint: sum_collections(parition*shard) exceeding the max general capacity:\n"],"headingContent":"Limit Collection Counts","anchorList":[{"label":"コレクション数の制限","href":"Limit-Collection-Counts","type":1,"isActive":false},{"label":"構成オプション","href":"Configuration-options","type":2,"isActive":false},{"label":"コレクション数の計算","href":"Calculating-the-number-of-collections","type":2,"isActive":false}]}
      \ No newline at end of file
      diff --git a/localization/v2.4.x/site/ja/adminGuide/limit_collection_counts.md b/localization/v2.4.x/site/ja/adminGuide/limit_collection_counts.md
      index 0cf97dfd1..a330ea84c 100644
      --- a/localization/v2.4.x/site/ja/adminGuide/limit_collection_counts.md
      +++ b/localization/v2.4.x/site/ja/adminGuide/limit_collection_counts.md
      @@ -1,7 +1,6 @@
       ---
       id: limit_collection_counts.md
       title: コレクション数に制限を設ける
      -summary: ''
       ---
       

      コレクション数の制限

      Milvusインスタンスでは最大65,536コレクションまで可能です。しかし、コレクション数が多すぎるとパフォーマンスに問題が生じる場合があります。そのため、Milvusインスタンスで作成するコレクション数を制限することをお勧めします。

      +

      Milvusインスタンスでは、最大65,536コレクションまで可能です。しかし、コレクション数が多すぎるとパフォーマンスに問題が生じる場合があります。そのため、Milvusインスタンスで作成されるコレクションの数を制限することをお勧めします。

      このガイドでは、Milvusインスタンス内のコレクション数に制限を設定する方法を説明します。

      設定はMilvusインスタンスのインストール方法によって異なります。

      • Helm Chartsを使用してインストールしたMilvusインスタンスの場合

        -

        values.yaml ファイルのconfig セクションに設定を追加します。詳細はHelm Chartsを使用したMilvusの設定を参照してください。

      • +

        values.yaml ファイルのconfig セクションに設定を追加します。詳細については、Helm Chartsを使用したMilvusの設定を参照してください。

      • Docker Composeを使用してインストールしたMilvusインスタンスの場合

        -

        Milvusインスタンスの起動に使用したmilvus.yaml ファイルに設定を追加します。詳細はDocker Composeを使用したMilvusの設定を参照してください。

      • +

        Milvusインスタンスの起動に使用したmilvus.yaml ファイルに設定を追加します。詳細については、Docker Composeを使用したMilvusの設定を参照してください。

      • Operatorを使用してインストールしたMilvusインスタンスの場合

        -

        Milvus カスタムリソースのspec.components セクションに設定を追加します。詳細は「Operatorを使用したMilvusの設定」を参照してください。

      • +

        Milvus カスタムリソースのspec.components セクションに設定を追加します。詳細については、「Operatorを使用したMilvusの設定」を参照してください。

      構成オプション

      rootCoord:
      -    maxGeneralCapacity: 1024
      +    maxGeneralCapacity: 65536
       
      -

      maxGeneralCapacity パラメータは、現在のMilvusインスタンスが保持できるコレクションの最大数を設定します。デフォルト値は1024 です。

      +

      maxGeneralCapacity パラメータは、現在のMilvusインスタンスが保持できるコレクションの最大数を設定します。デフォルト値は65536 です。

      コレクション数の計算

      ユーザ認証を有効にした後、ユーザ名とパスワードで構成されるtoken を使用してMilvusインスタンスに接続します。デフォルトでは、Milvus はroot ユーザとパスワードMilvus を使用します。

      +

      ユーザ認証を有効にした後、ユーザ名とパスワードで構成されるtoken 、Milvusインスタンスに接続します。デフォルトでは、Milvus はroot ユーザとパスワードMilvus を使用します。

      from pymilvus import MilvusClient
       
       client = MilvusClient(
      diff --git a/localization/v2.4.x/site/ja/adminGuide/resource_group.json b/localization/v2.4.x/site/ja/adminGuide/resource_group.json
      index ee93fd558..c8e678cf9 100644
      --- a/localization/v2.4.x/site/ja/adminGuide/resource_group.json
      +++ b/localization/v2.4.x/site/ja/adminGuide/resource_group.json
      @@ -1 +1 @@
      -{"codeList":["{\n    \"requests\": { \"nodeNum\": 1 },\n    \"limits\": { \"nodeNum\": 1 },\n    \"transfer_from\": [{ \"resource_group\": \"rg1\" }],\n    \"transfer_to\": [{ \"resource_group\": \"rg2\" }]\n}\n","import pymilvus\n\n# A resource group name should be a string of 1 to 255 characters, starting with a letter or an underscore (_) and containing only numbers, letters, and underscores (_).\nname = \"rg\"\nnode_num = 0\n\n# create a resource group that exactly hold no query node.\ntry:\n    utility.create_resource_group(name, config=utility.ResourceGroupConfig(\n        requests={\"node_num\": node_num},\n        limits={\"node_num\": node_num},\n    ), using='default')\n    print(f\"Succeeded in creating resource group {name}.\")\nexcept Exception:\n    print(\"Failed to create the resource group.\")\n","rgs = utility.list_resource_groups(using='default')\nprint(f\"Resource group list: {rgs}\")\n\n# Resource group list: ['__default_resource_group', 'rg']\n","info = utility.describe_resource_group(name, using=\"default\")\nprint(f\"Resource group description: {info}\")\n\n# Resource group description: \n#        ,           // string, rg name\n#        ,            // int, num_node which has been transfer to this rg\n#        ,  // int, available node_num, some node may shutdown\n#        , // map[string]int, from collection_name to loaded replica of each collecion in this rg\n#        ,  // map[string]int, from collection_name to outgoging accessed node num by replica loaded in this rg \n#        .  // map[string]int, from collection_name to incoming accessed node num by replica loaded in other rg\n","source = '__default_resource_group'\ntarget = 'rg'\nexpected_num_nodes_in_default = 0\nexpected_num_nodes_in_rg = 1\n\ntry:\n    utility.update_resource_groups({\n        source: ResourceGroupConfig(\n            requests={\"node_num\": expected_num_nodes_in_default},\n            limits={\"node_num\": expected_num_nodes_in_default},\n        ),\n        target: ResourceGroupConfig(\n            requests={\"node_num\": expected_num_nodes_in_rg},\n            limits={\"node_num\": expected_num_nodes_in_rg},\n        )\n    }, using=\"default\")\n    print(f\"Succeeded in move 1 node(s) from {source} to {target}.\")\nexcept Exception:\n    print(\"Something went wrong while moving nodes.\")\n\n# After a while, succeeded in moving 1 node(s) from __default_resource_group to rg.\n","from pymilvus import Collection\n\ncollection = Collection('demo')\n\n# Milvus loads the collection to the default resource group.\ncollection.load(replica_number=2)\n\n# Or, you can ask Milvus load the collection to the desired resource group.\n# make sure that query nodes num should be greater or equal to replica_number\nresource_groups = ['rg']\ncollection.load(replica_number=2, _resource_groups=resource_groups) \n","collection = Collection(\"Books\")\n\n# Use the load method of a collection to load one of its partition\ncollection.load([\"Novels\"], replica_number=2, _resource_groups=resource_groups)\n\n# Or, you can use the load method of a partition directly\npartition = Partition(collection, \"Novels\")\npartition.load(replica_number=2, _resource_groups=resource_groups)\n","source = '__default_resource_group'\ntarget = 'rg'\ncollection_name = 'c'\nnum_replicas = 1\n\ntry:\n    utility.transfer_replica(source, target, collection_name, num_replicas, using=\"default\")\n    print(f\"Succeeded in moving {num_node} replica(s) of {collection_name} from {source} to {target}.\")\nexcept Exception:\n    print(\"Something went wrong while moving replicas.\")\n\n# Succeeded in moving 1 replica(s) of c from __default_resource_group to rg.\n","try:\n    utility.update_resource_groups({\n        \"rg\": utility.ResourceGroupConfig(\n            requests={\"node_num\": 0},\n            limits={\"node_num\": 0},\n        ),\n    }, using=\"default\")\n    utility.drop_resource_group(\"rg\", using=\"default\")\n    print(f\"Succeeded in dropping {source}.\")\nexcept Exception:\n    print(f\"Something went wrong while dropping {source}.\")\n","from pymilvus import utility\nfrom pymilvus.client.types import ResourceGroupConfig\n\n_PENDING_NODES_RESOURCE_GROUP=\"__pending_nodes\"\n\ndef init_cluster(node_num: int):\n    print(f\"Init cluster with {node_num} nodes, all nodes will be put in default resource group\")\n    # create a pending resource group, which can used to hold the pending nodes that do not hold any data.\n    utility.create_resource_group(name=_PENDING_NODES_RESOURCE_GROUP, config=ResourceGroupConfig(\n        requests={\"node_num\": 0}, # this resource group can hold 0 nodes, no data will be load on it.\n        limits={\"node_num\": 10000}, # this resource group can hold at most 10000 nodes \n    ))\n\n    # update default resource group, which can used to hold the nodes that all initial node in it.\n    utility.update_resource_groups({\n        \"__default_resource_group\": ResourceGroupConfig(\n            requests={\"node_num\": node_num},\n            limits={\"node_num\": node_num},\n            transfer_from=[{\"resource_group\": _PENDING_NODES_RESOURCE_GROUP}], # recover missing node from pending resource group at high priority.\n            transfer_to=[{\"resource_group\": _PENDING_NODES_RESOURCE_GROUP}], # recover redundant node to pending resource group at low priority.\n        )})\n    utility.create_resource_group(name=\"rg1\", config=ResourceGroupConfig(\n        requests={\"node_num\": 0},\n        limits={\"node_num\": 0},\n        transfer_from=[{\"resource_group\": _PENDING_NODES_RESOURCE_GROUP}], \n        transfer_to=[{\"resource_group\": _PENDING_NODES_RESOURCE_GROUP}],\n    ))\n    utility.create_resource_group(name=\"rg2\", config=ResourceGroupConfig(\n        requests={\"node_num\": 0},\n        limits={\"node_num\": 0},\n        transfer_from=[{\"resource_group\": _PENDING_NODES_RESOURCE_GROUP}], \n        transfer_to=[{\"resource_group\": _PENDING_NODES_RESOURCE_GROUP}],\n    ))\n\ninit_cluster(1)\n","\ndef scale_to(node_num: int):\n    # scale the querynode number in Milvus into node_num.\n    pass\n","# scale rg1 into 3 nodes, rg2 into 1 nodes\nutility.update_resource_groups({\n    \"rg1\": ResourceGroupConfig(\n        requests={\"node_num\": 3},\n        limits={\"node_num\": 3},\n        transfer_from=[{\"resource_group\": _PENDING_NODES_RESOURCE_GROUP}],\n        transfer_to=[{\"resource_group\": _PENDING_NODES_RESOURCE_GROUP}],\n    ),\n    \"rg2\": ResourceGroupConfig(\n        requests={\"node_num\": 1},\n        limits={\"node_num\": 1},\n        transfer_from=[{\"resource_group\": _PENDING_NODES_RESOURCE_GROUP}],\n        transfer_to=[{\"resource_group\": _PENDING_NODES_RESOURCE_GROUP}],\n    ),\n})\nscale_to(5)\n# rg1 has 3 nodes, rg2 has 1 node, __default_resource_group has 1 node.\n","# scale rg1 from 3 nodes into 2 nodes\nutility.update_resource_groups({\n    \"rg1\": ResourceGroupConfig(\n        requests={\"node_num\": 2},\n        limits={\"node_num\": 2},\n        transfer_from=[{\"resource_group\": _PENDING_NODES_RESOURCE_GROUP}],\n        transfer_to=[{\"resource_group\": _PENDING_NODES_RESOURCE_GROUP}],\n    ),\n})\n\n# rg1 has 2 nodes, rg2 has 1 node, __default_resource_group has 1 node, __pending_nodes has 1 node.\nscale_to(4)\n# scale the node in __pending_nodes\n"],"headingContent":"","anchorList":[{"label":"リソースグループの管理","href":"Manage-Resource-Groups","type":1,"isActive":false},{"label":"リソースグループとは","href":"What-is-a-resource-group","type":2,"isActive":false},{"label":"リソースグループの概念","href":"Concepts-of-resource-group","type":2,"isActive":false},{"label":"宣言的APIを使ってリソースグループを管理する","href":"Use-declarative-api-to-manage-resource-group","type":2,"isActive":false},{"label":"クラスタのスケーリングを管理するためのグッドプラクティス","href":"A-good-practice-to-manage-cluster-scaling","type":2,"isActive":false},{"label":"リソースグループが複数のレプリカとどのように相互作用するか","href":"How-resource-groups-interacts-with-multiple-replicas","type":2,"isActive":false},{"label":"次の記事","href":"Whats-next","type":1,"isActive":false}]}
      \ No newline at end of file
      +{"codeList":["{\n    \"requests\": { \"nodeNum\": 1 },\n    \"limits\": { \"nodeNum\": 1 },\n    \"transfer_from\": [{ \"resource_group\": \"rg1\" }],\n    \"transfer_to\": [{ \"resource_group\": \"rg2\" }]\n}\n","import pymilvus\n\n# A resource group name should be a string of 1 to 255 characters, starting with a letter or an underscore (_) and containing only numbers, letters, and underscores (_).\nname = \"rg\"\nnode_num = 0\n\n# create a resource group that exactly hold no query node.\ntry:\n    utility.create_resource_group(name, config=utility.ResourceGroupConfig(\n        requests={\"node_num\": node_num},\n        limits={\"node_num\": node_num},\n    ), using='default')\n    print(f\"Succeeded in creating resource group {name}.\")\nexcept Exception:\n    print(\"Failed to create the resource group.\")\n","rgs = utility.list_resource_groups(using='default')\nprint(f\"Resource group list: {rgs}\")\n\n# Resource group list: ['__default_resource_group', 'rg']\n","info = utility.describe_resource_group(name, using=\"default\")\nprint(f\"Resource group description: {info}\")\n\n# Resource group description: \n#        ,           // string, rg name\n#        ,            // int, num_node which has been transfer to this rg\n#        ,  // int, available node_num, some node may shutdown\n#        , // map[string]int, from collection_name to loaded replica of each collecion in this rg\n#        ,  // map[string]int, from collection_name to outgoging accessed node num by replica loaded in this rg \n#        .  // map[string]int, from collection_name to incoming accessed node num by replica loaded in other rg\n","source = '__default_resource_group'\ntarget = 'rg'\nexpected_num_nodes_in_default = 0\nexpected_num_nodes_in_rg = 1\n\ntry:\n    utility.update_resource_groups({\n        source: ResourceGroupConfig(\n            requests={\"node_num\": expected_num_nodes_in_default},\n            limits={\"node_num\": expected_num_nodes_in_default},\n        ),\n        target: ResourceGroupConfig(\n            requests={\"node_num\": expected_num_nodes_in_rg},\n            limits={\"node_num\": expected_num_nodes_in_rg},\n        )\n    }, using=\"default\")\n    print(f\"Succeeded in move 1 node(s) from {source} to {target}.\")\nexcept Exception:\n    print(\"Something went wrong while moving nodes.\")\n\n# After a while, succeeded in moving 1 node(s) from __default_resource_group to rg.\n","from pymilvus import Collection\n\ncollection = Collection('demo')\n\n# Milvus loads the collection to the default resource group.\ncollection.load(replica_number=2)\n\n# Or, you can ask Milvus load the collection to the desired resource group.\n# make sure that query nodes num should be greater or equal to replica_number\nresource_groups = ['rg']\ncollection.load(replica_number=2, _resource_groups=resource_groups) \n","collection = Collection(\"Books\")\n\n# Use the load method of a collection to load one of its partition\ncollection.load([\"Novels\"], replica_number=2, _resource_groups=resource_groups)\n\n# Or, you can use the load method of a partition directly\npartition = Partition(collection, \"Novels\")\npartition.load(replica_number=2, _resource_groups=resource_groups)\n","source = '__default_resource_group'\ntarget = 'rg'\ncollection_name = 'c'\nnum_replicas = 1\n\ntry:\n    utility.transfer_replica(source, target, collection_name, num_replicas, using=\"default\")\n    print(f\"Succeeded in moving {num_node} replica(s) of {collection_name} from {source} to {target}.\")\nexcept Exception:\n    print(\"Something went wrong while moving replicas.\")\n\n# Succeeded in moving 1 replica(s) of c from __default_resource_group to rg.\n","try:\n    utility.update_resource_groups({\n        \"rg\": utility.ResourceGroupConfig(\n            requests={\"node_num\": 0},\n            limits={\"node_num\": 0},\n        ),\n    }, using=\"default\")\n    utility.drop_resource_group(\"rg\", using=\"default\")\n    print(f\"Succeeded in dropping {source}.\")\nexcept Exception:\n    print(f\"Something went wrong while dropping {source}.\")\n","from pymilvus import utility\nfrom pymilvus.client.types import ResourceGroupConfig\n\n_PENDING_NODES_RESOURCE_GROUP=\"__pending_nodes\"\n\ndef init_cluster(node_num: int):\n    print(f\"Init cluster with {node_num} nodes, all nodes will be put in default resource group\")\n    # create a pending resource group, which can used to hold the pending nodes that do not hold any data.\n    utility.create_resource_group(name=_PENDING_NODES_RESOURCE_GROUP, config=ResourceGroupConfig(\n        requests={\"node_num\": 0}, # this resource group can hold 0 nodes, no data will be load on it.\n        limits={\"node_num\": 10000}, # this resource group can hold at most 10000 nodes \n    ))\n\n    # update default resource group, which can used to hold the nodes that all initial node in it.\n    utility.update_resource_groups({\n        \"__default_resource_group\": ResourceGroupConfig(\n            requests={\"node_num\": node_num},\n            limits={\"node_num\": node_num},\n            transfer_from=[{\"resource_group\": _PENDING_NODES_RESOURCE_GROUP}], # recover missing node from pending resource group at high priority.\n            transfer_to=[{\"resource_group\": _PENDING_NODES_RESOURCE_GROUP}], # recover redundant node to pending resource group at low priority.\n        )})\n    utility.create_resource_group(name=\"rg1\", config=ResourceGroupConfig(\n        requests={\"node_num\": 0},\n        limits={\"node_num\": 0},\n        transfer_from=[{\"resource_group\": _PENDING_NODES_RESOURCE_GROUP}], \n        transfer_to=[{\"resource_group\": _PENDING_NODES_RESOURCE_GROUP}],\n    ))\n    utility.create_resource_group(name=\"rg2\", config=ResourceGroupConfig(\n        requests={\"node_num\": 0},\n        limits={\"node_num\": 0},\n        transfer_from=[{\"resource_group\": _PENDING_NODES_RESOURCE_GROUP}], \n        transfer_to=[{\"resource_group\": _PENDING_NODES_RESOURCE_GROUP}],\n    ))\n\ninit_cluster(1)\n","\ndef scale_to(node_num: int):\n    # scale the querynode number in Milvus into node_num.\n    pass\n","# scale rg1 into 3 nodes, rg2 into 1 nodes\nutility.update_resource_groups({\n    \"rg1\": ResourceGroupConfig(\n        requests={\"node_num\": 3},\n        limits={\"node_num\": 3},\n        transfer_from=[{\"resource_group\": _PENDING_NODES_RESOURCE_GROUP}],\n        transfer_to=[{\"resource_group\": _PENDING_NODES_RESOURCE_GROUP}],\n    ),\n    \"rg2\": ResourceGroupConfig(\n        requests={\"node_num\": 1},\n        limits={\"node_num\": 1},\n        transfer_from=[{\"resource_group\": _PENDING_NODES_RESOURCE_GROUP}],\n        transfer_to=[{\"resource_group\": _PENDING_NODES_RESOURCE_GROUP}],\n    ),\n})\nscale_to(5)\n# rg1 has 3 nodes, rg2 has 1 node, __default_resource_group has 1 node.\n","# scale rg1 from 3 nodes into 2 nodes\nutility.update_resource_groups({\n    \"rg1\": ResourceGroupConfig(\n        requests={\"node_num\": 2},\n        limits={\"node_num\": 2},\n        transfer_from=[{\"resource_group\": _PENDING_NODES_RESOURCE_GROUP}],\n        transfer_to=[{\"resource_group\": _PENDING_NODES_RESOURCE_GROUP}],\n    ),\n})\n\n# rg1 has 2 nodes, rg2 has 1 node, __default_resource_group has 1 node, __pending_nodes has 1 node.\nscale_to(4)\n# scale the node in __pending_nodes\n"],"headingContent":"Manage Resource Groups","anchorList":[{"label":"リソースグループの管理","href":"Manage-Resource-Groups","type":1,"isActive":false},{"label":"リソースグループとは","href":"What-is-a-resource-group","type":2,"isActive":false},{"label":"リソースグループの概念","href":"Concepts-of-resource-group","type":2,"isActive":false},{"label":"宣言型apiを使用してリソースグループを管理する","href":"Use-declarative-api-to-manage-resource-group","type":2,"isActive":false},{"label":"クラスタスケーリング管理のグッドプラクティス","href":"A-good-practice-to-manage-cluster-scaling","type":2,"isActive":false},{"label":"リソースグループが複数のレプリカとどのように相互作用するか","href":"How-resource-groups-interacts-with-multiple-replicas","type":2,"isActive":false},{"label":"次のステップ","href":"Whats-next","type":1,"isActive":false}]}
      \ No newline at end of file
      diff --git a/localization/v2.4.x/site/ja/adminGuide/resource_group.md b/localization/v2.4.x/site/ja/adminGuide/resource_group.md
      index 53ed1a627..405dda59c 100644
      --- a/localization/v2.4.x/site/ja/adminGuide/resource_group.md
      +++ b/localization/v2.4.x/site/ja/adminGuide/resource_group.md
      @@ -19,7 +19,7 @@ title: リソースグループの管理
                 d="M4 9h1v1H4c-1.5 0-3-1.69-3-3.5S2.55 3 4 3h4c1.45 0 3 1.69 3 3.5 0 1.41-.91 2.72-2 3.25V8.59c.58-.45 1-1.27 1-2.09C10 5.22 8.98 4 8 4H4c-.98 0-2 1.22-2 2.5S3 9 4 9zm9-3h-1v1h1c1 0 2 1.22 2 2.5S13.98 12 13 12H9c-.98 0-2-1.22-2-2.5 0-.83.42-1.64 1-2.09V6.25c-1.09.53-2 1.84-2 3.25C6 11.31 7.55 13 9 13h4c1.45 0 3-1.69 3-3.5S14.5 6 13 6z"
               >
             
      -    

      Milvusでは、リソースグループを使用して、特定のクエリノードを他のクエリノードから物理的に分離することができます。このガイドでは、カスタムリソースグループの作成と管理、およびグループ間でのノードの転送方法について説明します。

      +

      Milvusでは、リソースグループを使用して特定のクエリノードを他のノードから物理的に分離することができます。このガイドでは、カスタムリソースグループの作成と管理、およびグループ間でのノードの転送方法について説明します。

      リソースグループとは

      リソースグループはMilvusクラスタ内の複数またはすべてのクエリノードを保持することができます。リソースグループ間でクエリノードをどのように割り当てるかは、最も合理的な方法に基づいて決定します。例えば、マルチコレクションシナリオでは、各リソースグループに適切な数のクエリノードを割り当て、各コレクション内の操作が他のコレクション内の操作から物理的に独立するように、異なるリソースグループにコレクションをロードすることができます。

      -

      Milvusインスタンスは起動時に全てのクエリノードを保持するデフォルトのリソースグループを保持し、__default_resource_groupと命名することに注意してください。

      -

      バージョン2.4.1から、Milvusは宣言型リソースグループAPIを提供します。新しい宣言型APIにより、ユーザはidempotencyを実現し、クラウドネイティブ環境での二次開発を容易に行うことができるようになります。

      +

      リソースグループは、Milvusクラスタ内のクエリノードの一部またはすべてを保持することができます。リソースグループ間でクエリノードをどのように割り当てるかは、最も合理的な方法に基づいて決定します。例えば、マルチコレクションシナリオでは、各リソースグループに適切な数のクエリノードを割り当て、各コレクション内の操作が他のコレクション内の操作から物理的に独立するように、異なるリソースグループにコレクションをロードすることができます。

      +

      Milvusインスタンスは起動時にすべてのクエリノードを保持するデフォルトのリソースグループを保持し、__default_resource_groupと命名することに注意してください。

      +

      バージョン2.4.1から、Milvusは宣言型リソースグループAPIを提供し、従来のリソースグループAPIは廃止されました。新しい宣言型APIにより、ユーザはidempotencyを実現し、クラウドネイティブ環境での二次開発を容易に行うことができるようになります。

      リソースグループの概念

      -

      このページのコードサンプルはすべて PyMilvus 2.4.5 のものです。実行する前に PyMilvus をアップグレードしてください。

      +

      このページのコードサンプルはすべて PyMilvus 2.4.8 のものです。実行する前に PyMilvus をアップグレードしてください。

      1. リソースグループの作成

        -

        リソースグループを作成するには、Milvusインスタンスに接続した後に以下を実行します。以下のスニペットでは、default が Milvus 接続のエイリアスであると仮定しています。

        +

        リソースグループを作成するには、milvusインスタンスに接続した後に以下を実行してください。以下のスニペットでは、default が Milvus 接続のエイリアスであると仮定しています。

        import pymilvus
         
         # A resource group name should be a string of 1 to 255 characters, starting with a letter or an underscore (_) and containing only numbers, letters, and underscores (_).
        @@ -133,7 +133,7 @@ node_num = 0
         #        <num_incoming_node:{}>.  // map[string]int, from collection_name to incoming accessed node num by replica loaded in other rg
         
      2. リソースグループ間でノードを転送する。

        -

        記述されたリソースグループにはまだクエリノードがないことに気づくかもしれません。クラスタの__default_resource_groupに現在1つのQueryNodesがあり、1つのノードを作成したrgに転送したいとします。update_resource_groups 、複数の設定変更に対するアトミック性が保証されるため、中間状態はMilvusには見えません。

        +

        記述されたリソースグループにはまだクエリノードがないことに気づくかもしれません。クラスタの__default_resource_groupに現在1つのQueryNodesがあり、1つのノードを作成したrgに移したいとします。update_resource_groups 、複数の設定変更に対するアトミック性が保証されるため、中間状態はmilvusには見えません。

        source = '__default_resource_group'
         target = 'rg'
         expected_num_nodes_in_default = 0
        @@ -180,7 +180,7 @@ collection.load(["Novels"], replica_n
         partition = Partition(collection, "Novels")
         partition.load(replica_number=2, _resource_groups=resource_groups)
         
        -

        _resource_groups はオプションのパラメータで、指定しないままにしておくと、Milvusはデフォルトのリソースグループのクエリノードにレプリカをロードします。

        +

        なお、_resource_groups はオプションのパラメータで、指定しないままにしておくと、Milvusはデフォルトのリソースグループのクエリノードにレプリカをロードします。

        Milusにコレクションの各レプリカを個別のリソースグループにロードさせるには、リソースグループの数がレプリカの数と等しくなるようにします。

      3. リソースグループ間でレプリカを転送します。

        Milvusは、複数のクエリノードに分散したセグメント間の負荷分散を実現するためにレプリカを使用します。コレクションの特定のレプリカをあるリソースグループから別のリソースグループに移動するには、次のようにします:

        @@ -230,7 +230,7 @@ num_replicas = 1

        現在のところ、Milvusはクラウドネイティブ環境において独立してスケールイン/スケールアウトすることができません。しかし、Declarative Resource Group APIとコンテナオーケストレーションを併用することで、Milvusはリソースの分離とQueryNodeの管理を容易に実現することができます。 ここでは、クラウド環境でQueryNodeを管理するためのグッドプラクティスを紹介します:

          -
        1. デフォルトでは、Milvusは__default_resource_groupを作成します。このリソースグループは削除できず、すべてのコレクションのデフォルトのロードリソースグループとしても機能し、冗長なQueryNodeは常に割り当てられます。したがって、使用中のQueryNodeリソースを保持する保留リソースグループを作成し、QueryNodeリソースが__default_resource_groupによって占有されるのを防ぐことができます。

          +
        2. Milvusはデフォルトで__default_resource_groupを作成します。このリソースグループは削除できず、すべてのコレクションのデフォルトのロードリソースグループとしても機能し、冗長なQueryNodeは常に割り当てられます。したがって、使用中のQueryNodeリソースを保持する保留リソースグループを作成し、QueryNodeリソースが__default_resource_groupによって占有されるのを防ぐことができます。

          さらに、制約sum(.requests.nodeNum) <= queryNodeNum を厳密に適用すれば、クラスタ内のQueryNodeの割り当てを正確に制御することができます。現在クラスタにQueryNodeが1つしかないと仮定してクラスタを初期化してみましょう。 セットアップの例を示します:

          from pymilvus import utility
           from pymilvus.client.types import ResourceGroupConfig
          @@ -268,7 +268,7 @@ _PENDING_NODES_RESOURCE_GROUP="__pending_nodes&qu
           
           init_cluster(1)
           
          -

          上記のコード例を使用して、追加のQueryNodeを保持するために__pending_nodesというリソース・グループを作成します。また、rg1とrg2という2つのユーザー固有のリソース・グループを作成します。さらに、もう1つのリソースグループが、__pending_nodesから不足または冗長なQueryNodesをリカバリすることを優先するようにします。

        3. +

          上記のコード例を使用して、追加のQueryNodeを保持するために__pending_nodesというリソース・グループを作成します。また、rg1とrg2という2つのユーザー固有のリソース・グループを作成します。さらに、__pending_nodesから不足または冗長なQueryNodesをリカバリすることを、もう1つのリソース・グループが優先するようにします。

        4. クラスタのスケールアウト

          以下のスケーリング機能があると仮定します:

          
          diff --git a/localization/v2.4.x/site/ja/adminGuide/tls.json b/localization/v2.4.x/site/ja/adminGuide/tls.json
          index fddf6bb2e..e3675ddd3 100644
          --- a/localization/v2.4.x/site/ja/adminGuide/tls.json
          +++ b/localization/v2.4.x/site/ja/adminGuide/tls.json
          @@ -1 +1 @@
          -{"codeList":["openssl version\n","sudo apt install openssl\n","mkdir cert && cd cert\ntouch openssl.cnf gen.sh\n","#\n# OpenSSL example configuration file.\n# This is mostly being used for generation of certificate requests.\n#\n\n# This definition stops the following lines choking if HOME isn't\n# defined.\nHOME            = .\nRANDFILE        = $ENV::HOME/.rnd\n\n# Extra OBJECT IDENTIFIER info:\n#oid_file       = $ENV::HOME/.oid\noid_section     = new_oids\n\n# To use this configuration file with the \"-extfile\" option of the\n# \"openssl x509\" utility, name here the section containing the\n# X.509v3 extensions to use:\n# extensions        = \n# (Alternatively, use a configuration file that has only\n# X.509v3 extensions in its main [= default] section.)\n\n[ new_oids ]\n\n# We can add new OIDs in here for use by 'ca', 'req' and 'ts'.\n# Add a simple OID like this:\n# testoid1=1.2.3.4\n# Or use config file substitution like this:\n# testoid2=${testoid1}.5.6\n\n# Policies used by the TSA examples.\ntsa_policy1 = 1.2.3.4.1\ntsa_policy2 = 1.2.3.4.5.6\ntsa_policy3 = 1.2.3.4.5.7\n\n####################################################################\n[ ca ]\ndefault_ca  = CA_default        # The default ca section\n\n####################################################################\n[ CA_default ]\n\ndir     = ./demoCA      # Where everything is kept\ncerts       = $dir/certs        # Where the issued certs are kept\ncrl_dir     = $dir/crl      # Where the issued crl are kept\ndatabase    = $dir/index.txt    # database index file.\n#unique_subject = no            # Set to 'no' to allow creation of\n                    # several ctificates with same subject.\nnew_certs_dir   = $dir/newcerts     # default place for new certs.\n\ncertificate = $dir/cacert.pem   # The CA certificate\nserial      = $dir/serial       # The current serial number\ncrlnumber   = $dir/crlnumber    # the current crl number\n                    # must be commented out to leave a V1 CRL\ncrl     = $dir/crl.pem      # The current CRL\nprivate_key = $dir/private/cakey.pem# The private key\nRANDFILE    = $dir/private/.rand    # private random number file\n\nx509_extensions = usr_cert      # The extentions to add to the cert\n\n# Comment out the following two lines for the \"traditional\"\n# (and highly broken) format.\nname_opt    = ca_default        # Subject Name options\ncert_opt    = ca_default        # Certificate field options\n\n# Extension copying option: use with caution.\ncopy_extensions = copy\n\n# Extensions to add to a CRL. Note: Netscape communicator chokes on V2 CRLs\n# so this is commented out by default to leave a V1 CRL.\n# crlnumber must also be commented out to leave a V1 CRL.\n# crl_extensions    = crl_ext\n\ndefault_days    = 365           # how long to certify for\ndefault_crl_days= 30            # how long before next CRL\ndefault_md  = default       # use public key default MD\npreserve    = no            # keep passed DN ordering\n\n# A few difference way of specifying how similar the request should look\n# For type CA, the listed attributes must be the same, and the optional\n# and supplied fields are just that :-)\npolicy      = policy_match\n\n# For the CA policy\n[ policy_match ]\ncountryName     = match\nstateOrProvinceName = match\norganizationName    = match\norganizationalUnitName  = optional\ncommonName      = supplied\nemailAddress        = optional\n\n# For the 'anything' policy\n# At this point in time, you must list all acceptable 'object'\n# types.\n[ policy_anything ]\ncountryName     = optional\nstateOrProvinceName = optional\nlocalityName        = optional\norganizationName    = optional\norganizationalUnitName  = optional\ncommonName      = supplied\nemailAddress        = optional\n\n####################################################################\n[ req ]\ndefault_bits        = 2048\ndefault_keyfile     = privkey.pem\ndistinguished_name  = req_distinguished_name\nattributes      = req_attributes\nx509_extensions = v3_ca # The extentions to add to the self signed cert\n\n# Passwords for private keys if not present they will be prompted for\n# input_password = secret\n# output_password = secret\n\n# This sets a mask for permitted string types. There are several options. \n# default: PrintableString, T61String, BMPString.\n# pkix   : PrintableString, BMPString (PKIX recommendation before 2004)\n# utf8only: only UTF8Strings (PKIX recommendation after 2004).\n# nombstr : PrintableString, T61String (no BMPStrings or UTF8Strings).\n# MASK:XXXX a literal mask value.\n# WARNING: ancient versions of Netscape crash on BMPStrings or UTF8Strings.\nstring_mask = utf8only\n\nreq_extensions = v3_req # The extensions to add to a certificate request\n\n[ req_distinguished_name ]\ncountryName         = Country Name (2 letter code)\ncountryName_default     = AU\ncountryName_min         = 2\ncountryName_max         = 2\n\nstateOrProvinceName     = State or Province Name (full name)\nstateOrProvinceName_default = Some-State\n\nlocalityName            = Locality Name (eg, city)\n\n0.organizationName      = Organization Name (eg, company)\n0.organizationName_default  = Internet Widgits Pty Ltd\n\n# we can do this but it is not needed normally :-)\n#1.organizationName     = Second Organization Name (eg, company)\n#1.organizationName_default = World Wide Web Pty Ltd\n\norganizationalUnitName      = Organizational Unit Name (eg, section)\n#organizationalUnitName_default =\n\ncommonName          = Common Name (e.g. server FQDN or YOUR name)\ncommonName_max          = 64\n\nemailAddress            = Email Address\nemailAddress_max        = 64\n\n# SET-ex3           = SET extension number 3\n\n[ req_attributes ]\nchallengePassword       = A challenge password\nchallengePassword_min       = 4\nchallengePassword_max       = 20\n\nunstructuredName        = An optional company name\n\n[ usr_cert ]\n\n# These extensions are added when 'ca' signs a request.\n\n# This goes against PKIX guidelines but some CAs do it and some software\n# requires this to avoid interpreting an end user certificate as a CA.\n\nbasicConstraints=CA:FALSE\n\n# Here are some examples of the usage of nsCertType. If it is omitted\n# the certificate can be used for anything *except* object signing.\n\n# This is OK for an SSL server.\n# nsCertType            = server\n\n# For an object signing certificate this would be used.\n# nsCertType = objsign\n\n# For normal client use this is typical\n# nsCertType = client, email\n\n# and for everything including object signing:\n# nsCertType = client, email, objsign\n\n# This is typical in keyUsage for a client certificate.\n# keyUsage = nonRepudiation, digitalSignature, keyEncipherment\n\n# This will be displayed in Netscape's comment listbox.\nnsComment           = \"OpenSSL Generated Certificate\"\n\n# PKIX recommendations harmless if included in all certificates.\nsubjectKeyIdentifier=hash\nauthorityKeyIdentifier=keyid,issuer\n\n# This stuff is for subjectAltName and issuerAltname.\n# Import the email address.\n# subjectAltName=email:copy\n# An alternative to produce certificates that aren't\n# deprecated according to PKIX.\n# subjectAltName=email:move\n\n# Copy subject details\n# issuerAltName=issuer:copy\n\n#nsCaRevocationUrl      = http://www.domain.dom/ca-crl.pem\n#nsBaseUrl\n#nsRevocationUrl\n#nsRenewalUrl\n#nsCaPolicyUrl\n#nsSslServerName\n\n# This is required for TSA certificates.\n# extendedKeyUsage = critical,timeStamping\n\n[ v3_req ]\n\n# Extensions to add to a certificate request\n\nbasicConstraints = CA:FALSE\nkeyUsage = nonRepudiation, digitalSignature, keyEncipherment\n\n\n[ v3_ca ]\n\n\n# Extensions for a typical CA\n\n\n# PKIX recommendation.\n\nsubjectKeyIdentifier=hash\n\nauthorityKeyIdentifier=keyid:always,issuer\n\n# This is what PKIX recommends but some broken software chokes on critical\n# extensions.\n#basicConstraints = critical,CA:true\n# So we do this instead.\nbasicConstraints = CA:true\n\n# Key usage: this is typical for a CA certificate. However since it will\n# prevent it being used as an test self-signed certificate it is best\n# left out by default.\n# keyUsage = cRLSign, keyCertSign\n\n# Some might want this also\n# nsCertType = sslCA, emailCA\n\n# Include email address in subject alt name: another PKIX recommendation\n# subjectAltName=email:copy\n# Copy issuer details\n# issuerAltName=issuer:copy\n\n# DER hex encoding of an extension: beware experts only!\n# obj=DER:02:03\n# Where 'obj' is a standard or added object\n# You can even override a supported extension:\n# basicConstraints= critical, DER:30:03:01:01:FF\n\n[ crl_ext ]\n\n# CRL extensions.\n# Only issuerAltName and authorityKeyIdentifier make any sense in a CRL.\n\n# issuerAltName=issuer:copy\nauthorityKeyIdentifier=keyid:always\n\n[ proxy_cert_ext ]\n# These extensions should be added when creating a proxy certificate\n\n# This goes against PKIX guidelines but some CAs do it and some software\n# requires this to avoid interpreting an end user certificate as a CA.\n\nbasicConstraints=CA:FALSE\n\n# Here are some examples of the usage of nsCertType. If it is omitted\n# the certificate can be used for anything *except* object signing.\n\n# This is OK for an SSL server.\n# nsCertType            = server\n\n# For an object signing certificate this would be used.\n# nsCertType = objsign\n\n# For normal client use this is typical\n# nsCertType = client, email\n\n# and for everything including object signing:\n# nsCertType = client, email, objsign\n\n# This is typical in keyUsage for a client certificate.\n# keyUsage = nonRepudiation, digitalSignature, keyEncipherment\n\n# This will be displayed in Netscape's comment listbox.\nnsComment           = \"OpenSSL Generated Certificate\"\n\n# PKIX recommendations harmless if included in all certificates.\nsubjectKeyIdentifier=hash\nauthorityKeyIdentifier=keyid,issuer\n\n# This stuff is for subjectAltName and issuerAltname.\n# Import the email address.\n# subjectAltName=email:copy\n# An alternative to produce certificates that aren't\n# deprecated according to PKIX.\n# subjectAltName=email:move\n\n# Copy subject details\n# issuerAltName=issuer:copy\n\n#nsCaRevocationUrl      = http://www.domain.dom/ca-crl.pem\n#nsBaseUrl\n#nsRevocationUrl\n#nsRenewalUrl\n#nsCaPolicyUrl\n#nsSslServerName\n\n# This really needs to be in place for it to be a proxy certificate.\nproxyCertInfo=critical,language:id-ppl-anyLanguage,pathlen:3,policy:foo\n\n####################################################################\n[ tsa ]\n\ndefault_tsa = tsa_config1   # the default TSA section\n\n[ tsa_config1 ]\n\n# These are used by the TSA reply generation only.\ndir     = ./demoCA      # TSA root directory\nserial      = $dir/tsaserial    # The current serial number (mandatory)\ncrypto_device   = builtin       # OpenSSL engine to use for signing\nsigner_cert = $dir/tsacert.pem  # The TSA signing certificate\n                    # (optional)\ncerts       = $dir/cacert.pem   # Certificate chain to include in reply\n                    # (optional)\nsigner_key  = $dir/private/tsakey.pem # The TSA private key (optional)\n\ndefault_policy  = tsa_policy1       # Policy if request did not specify it\n                    # (optional)\nother_policies  = tsa_policy2, tsa_policy3  # acceptable policies (optional)\ndigests     = md5, sha1     # Acceptable message digests (mandatory)\naccuracy    = secs:1, millisecs:500, microsecs:100  # (optional)\nclock_precision_digits  = 0 # number of digits after dot. (optional)\nordering        = yes   # Is ordering defined for timestamps?\n                # (optional, default: no)\ntsa_name        = yes   # Must the TSA name be included in the reply?\n                # (optional, default: no)\ness_cert_id_chain   = no    # Must the ESS cert id chain be included?\n                # (optional, default: no)\n","#!/usr/bin/env sh\n# your variables\nCountry=\"CN\"\nState=\"Shanghai\"\nLocation=\"Shanghai\"\nOrganization=\"milvus\"\nOrganizational=\"milvus\"\nCommonName=\"localhost\"\n\necho \"generate ca.key\"\nopenssl genrsa -out ca.key 2048\n\necho \"generate ca.pem\"\nopenssl req -new -x509 -key ca.key -out ca.pem -days 3650 -subj \"/C=$Country/ST=$State/L=$Location/O=$Organization/OU=$Organizational/CN=$CommonName\"\n\necho \"generate server SAN certificate\"\nopenssl genpkey -algorithm RSA -out server.key\nopenssl req -new -nodes -key server.key -out server.csr -days 3650 -subj \"/C=$Country/O=$Organization/OU=$Organizational/CN=$CommonName\" -config ./openssl.cnf -extensions v3_req\nopenssl x509 -req -days 3650 -in server.csr -out server.pem -CA ca.pem -CAkey ca.key -CAcreateserial -extfile ./openssl.cnf -extensions v3_req\n\necho \"generate client SAN certificate\"\nopenssl genpkey -algorithm RSA -out client.key\nopenssl req -new -nodes -key client.key -out client.csr -days 3650 -subj \"/C=$Country/O=$Organization/OU=$Organizational/CN=$CommonName\" -config ./openssl.cnf -extensions v3_req\nopenssl x509 -req -days 3650 -in client.csr -out client.pem -CA ca.pem -CAkey ca.key -CAcreateserial -extfile ./openssl.cnf -extensions v3_req\n\n","chmod +x gen.sh\n./gen.sh\n","openssl genpkey -algorithm RSA -out ca.key\n","openssl req -new -x509 -key ca.key -out ca.pem -days 3650 -subj \"/C=$Country/ST=$State/L=$Location/O=$Organization/OU=$Organizational/CN=$CommonName\"\n","openssl genpkey -algorithm RSA -out server.key\n","openssl req -new -nodes -key server.key -out server.csr -days 3650 -subj \"/C=$Country/O=$Organization/OU=$Organizational/CN=$CommonName\" -config ./openssl.cnf -extensions v3_req\n","openssl x509 -req -days 3650 -in server.csr -out server.pem -CA ca.pem -CAkey ca.key -CAcreateserial -extfile ./openssl.cnf -extensions v3_req\n","tls:\n  serverPemPath: /milvus/tls/server.pem\n  serverKeyPath: /milvus/tls/server.key\n  caPemPath: /milvus/tls/ca.pem\n\ncommon:\n  security:\n    tlsMode: 1\n","├── docker-compose.yml\n├── milvus.yaml\n└── tls\n     ├── server.pem\n     ├── server.key\n     └── ca.pem\n","  standalone:\n    container_name: milvus-standalone\n    image: milvusdb/milvus:latest\n    command: [\"milvus\", \"run\", \"standalone\"]\n    security_opt:\n    - seccomp:unconfined\n    environment:\n      ETCD_ENDPOINTS: etcd:2379\n      MINIO_ADDRESS: minio:9000\n    volumes:\n      - ${DOCKER_VOLUME_DIRECTORY:-.}/volumes/milvus:/var/lib/milvus\n      - ${DOCKER_VOLUME_DIRECTORY:-.}/tls:/milvus/tls\n      - ${DOCKER_VOLUME_DIRECTORY:-.}/milvus.yaml:/milvus/configs/milvus.yaml\n","sudo docker compose up -d\n","from pymilvus import MilvusClient\n\nclient = MilvusClient(\n    uri=\"http://localhost:19530\",\n    secure=True,\n    server_pem_path=\"path_to/server.pem\",\n    server_name=\"localhost\"\n)\n","from pymilvus import MilvusClient\n\nclient = MilvusClient(\n    uri=\"http://localhost:19530\",\n    secure=True,\n    client_pem_path=\"path_to/client.pem\",\n    client_key_path=\"path_to/client.key\",\n    ca_pem_path=\"path_to/ca.pem\",\n    server_name=\"localhost\"\n)\n"],"headingContent":"","anchorList":[{"label":"輸送中の暗号化","href":"Encryption-in-Transit","type":1,"isActive":false},{"label":"独自の証明書を作成する","href":"Create-your-own-certificate","type":2,"isActive":false},{"label":"TLSでMilvusサーバーをセットアップする","href":"Set-up-a-Milvus-server-with-TLS","type":2,"isActive":false},{"label":"TLSでMilvusサーバーに接続する。","href":"Connect-to-the-Milvus-server-with-TLS","type":2,"isActive":false}]}
          \ No newline at end of file
          +{"codeList":["openssl version\n","sudo apt install openssl\n","mkdir cert && cd cert\ntouch openssl.cnf gen.sh\n","#\n# OpenSSL example configuration file.\n# This is mostly being used for generation of certificate requests.\n#\n\n# This definition stops the following lines choking if HOME isn't\n# defined.\nHOME            = .\nRANDFILE        = $ENV::HOME/.rnd\n\n# Extra OBJECT IDENTIFIER info:\n#oid_file       = $ENV::HOME/.oid\noid_section     = new_oids\n\n# To use this configuration file with the \"-extfile\" option of the\n# \"openssl x509\" utility, name here the section containing the\n# X.509v3 extensions to use:\n# extensions        = \n# (Alternatively, use a configuration file that has only\n# X.509v3 extensions in its main [= default] section.)\n\n[ new_oids ]\n\n# We can add new OIDs in here for use by 'ca', 'req' and 'ts'.\n# Add a simple OID like this:\n# testoid1=1.2.3.4\n# Or use config file substitution like this:\n# testoid2=${testoid1}.5.6\n\n# Policies used by the TSA examples.\ntsa_policy1 = 1.2.3.4.1\ntsa_policy2 = 1.2.3.4.5.6\ntsa_policy3 = 1.2.3.4.5.7\n\n####################################################################\n[ ca ]\ndefault_ca  = CA_default        # The default ca section\n\n####################################################################\n[ CA_default ]\n\ndir     = ./demoCA      # Where everything is kept\ncerts       = $dir/certs        # Where the issued certs are kept\ncrl_dir     = $dir/crl      # Where the issued crl are kept\ndatabase    = $dir/index.txt    # database index file.\n#unique_subject = no            # Set to 'no' to allow creation of\n                    # several ctificates with same subject.\nnew_certs_dir   = $dir/newcerts     # default place for new certs.\n\ncertificate = $dir/cacert.pem   # The CA certificate\nserial      = $dir/serial       # The current serial number\ncrlnumber   = $dir/crlnumber    # the current crl number\n                    # must be commented out to leave a V1 CRL\ncrl     = $dir/crl.pem      # The current CRL\nprivate_key = $dir/private/cakey.pem# The private key\nRANDFILE    = $dir/private/.rand    # private random number file\n\nx509_extensions = usr_cert      # The extentions to add to the cert\n\n# Comment out the following two lines for the \"traditional\"\n# (and highly broken) format.\nname_opt    = ca_default        # Subject Name options\ncert_opt    = ca_default        # Certificate field options\n\n# Extension copying option: use with caution.\ncopy_extensions = copy\n\n# Extensions to add to a CRL. Note: Netscape communicator chokes on V2 CRLs\n# so this is commented out by default to leave a V1 CRL.\n# crlnumber must also be commented out to leave a V1 CRL.\n# crl_extensions    = crl_ext\n\ndefault_days    = 365           # how long to certify for\ndefault_crl_days= 30            # how long before next CRL\ndefault_md  = default       # use public key default MD\npreserve    = no            # keep passed DN ordering\n\n# A few difference way of specifying how similar the request should look\n# For type CA, the listed attributes must be the same, and the optional\n# and supplied fields are just that :-)\npolicy      = policy_match\n\n# For the CA policy\n[ policy_match ]\ncountryName     = match\nstateOrProvinceName = match\norganizationName    = match\norganizationalUnitName  = optional\ncommonName      = supplied\nemailAddress        = optional\n\n# For the 'anything' policy\n# At this point in time, you must list all acceptable 'object'\n# types.\n[ policy_anything ]\ncountryName     = optional\nstateOrProvinceName = optional\nlocalityName        = optional\norganizationName    = optional\norganizationalUnitName  = optional\ncommonName      = supplied\nemailAddress        = optional\n\n####################################################################\n[ req ]\ndefault_bits        = 2048\ndefault_keyfile     = privkey.pem\ndistinguished_name  = req_distinguished_name\nattributes      = req_attributes\nx509_extensions = v3_ca # The extentions to add to the self signed cert\n\n# Passwords for private keys if not present they will be prompted for\n# input_password = secret\n# output_password = secret\n\n# This sets a mask for permitted string types. There are several options. \n# default: PrintableString, T61String, BMPString.\n# pkix   : PrintableString, BMPString (PKIX recommendation before 2004)\n# utf8only: only UTF8Strings (PKIX recommendation after 2004).\n# nombstr : PrintableString, T61String (no BMPStrings or UTF8Strings).\n# MASK:XXXX a literal mask value.\n# WARNING: ancient versions of Netscape crash on BMPStrings or UTF8Strings.\nstring_mask = utf8only\n\nreq_extensions = v3_req # The extensions to add to a certificate request\n\n[ req_distinguished_name ]\ncountryName         = Country Name (2 letter code)\ncountryName_default     = AU\ncountryName_min         = 2\ncountryName_max         = 2\n\nstateOrProvinceName     = State or Province Name (full name)\nstateOrProvinceName_default = Some-State\n\nlocalityName            = Locality Name (eg, city)\n\n0.organizationName      = Organization Name (eg, company)\n0.organizationName_default  = Internet Widgits Pty Ltd\n\n# we can do this but it is not needed normally :-)\n#1.organizationName     = Second Organization Name (eg, company)\n#1.organizationName_default = World Wide Web Pty Ltd\n\norganizationalUnitName      = Organizational Unit Name (eg, section)\n#organizationalUnitName_default =\n\ncommonName          = Common Name (e.g. server FQDN or YOUR name)\ncommonName_max          = 64\n\nemailAddress            = Email Address\nemailAddress_max        = 64\n\n# SET-ex3           = SET extension number 3\n\n[ req_attributes ]\nchallengePassword       = A challenge password\nchallengePassword_min       = 4\nchallengePassword_max       = 20\n\nunstructuredName        = An optional company name\n\n[ usr_cert ]\n\n# These extensions are added when 'ca' signs a request.\n\n# This goes against PKIX guidelines but some CAs do it and some software\n# requires this to avoid interpreting an end user certificate as a CA.\n\nbasicConstraints=CA:FALSE\n\n# Here are some examples of the usage of nsCertType. If it is omitted\n# the certificate can be used for anything *except* object signing.\n\n# This is OK for an SSL server.\n# nsCertType            = server\n\n# For an object signing certificate this would be used.\n# nsCertType = objsign\n\n# For normal client use this is typical\n# nsCertType = client, email\n\n# and for everything including object signing:\n# nsCertType = client, email, objsign\n\n# This is typical in keyUsage for a client certificate.\n# keyUsage = nonRepudiation, digitalSignature, keyEncipherment\n\n# This will be displayed in Netscape's comment listbox.\nnsComment           = \"OpenSSL Generated Certificate\"\n\n# PKIX recommendations harmless if included in all certificates.\nsubjectKeyIdentifier=hash\nauthorityKeyIdentifier=keyid,issuer\n\n# This stuff is for subjectAltName and issuerAltname.\n# Import the email address.\n# subjectAltName=email:copy\n# An alternative to produce certificates that aren't\n# deprecated according to PKIX.\n# subjectAltName=email:move\n\n# Copy subject details\n# issuerAltName=issuer:copy\n\n#nsCaRevocationUrl      = http://www.domain.dom/ca-crl.pem\n#nsBaseUrl\n#nsRevocationUrl\n#nsRenewalUrl\n#nsCaPolicyUrl\n#nsSslServerName\n\n# This is required for TSA certificates.\n# extendedKeyUsage = critical,timeStamping\n\n[ v3_req ]\n\n# Extensions to add to a certificate request\n\nbasicConstraints = CA:FALSE\nkeyUsage = nonRepudiation, digitalSignature, keyEncipherment\n\n\n[ v3_ca ]\n\n\n# Extensions for a typical CA\n\n\n# PKIX recommendation.\n\nsubjectKeyIdentifier=hash\n\nauthorityKeyIdentifier=keyid:always,issuer\n\n# This is what PKIX recommends but some broken software chokes on critical\n# extensions.\n#basicConstraints = critical,CA:true\n# So we do this instead.\nbasicConstraints = CA:true\n\n# Key usage: this is typical for a CA certificate. However since it will\n# prevent it being used as an test self-signed certificate it is best\n# left out by default.\n# keyUsage = cRLSign, keyCertSign\n\n# Some might want this also\n# nsCertType = sslCA, emailCA\n\n# Include email address in subject alt name: another PKIX recommendation\n# subjectAltName=email:copy\n# Copy issuer details\n# issuerAltName=issuer:copy\n\n# DER hex encoding of an extension: beware experts only!\n# obj=DER:02:03\n# Where 'obj' is a standard or added object\n# You can even override a supported extension:\n# basicConstraints= critical, DER:30:03:01:01:FF\n\n[ crl_ext ]\n\n# CRL extensions.\n# Only issuerAltName and authorityKeyIdentifier make any sense in a CRL.\n\n# issuerAltName=issuer:copy\nauthorityKeyIdentifier=keyid:always\n\n[ proxy_cert_ext ]\n# These extensions should be added when creating a proxy certificate\n\n# This goes against PKIX guidelines but some CAs do it and some software\n# requires this to avoid interpreting an end user certificate as a CA.\n\nbasicConstraints=CA:FALSE\n\n# Here are some examples of the usage of nsCertType. If it is omitted\n# the certificate can be used for anything *except* object signing.\n\n# This is OK for an SSL server.\n# nsCertType            = server\n\n# For an object signing certificate this would be used.\n# nsCertType = objsign\n\n# For normal client use this is typical\n# nsCertType = client, email\n\n# and for everything including object signing:\n# nsCertType = client, email, objsign\n\n# This is typical in keyUsage for a client certificate.\n# keyUsage = nonRepudiation, digitalSignature, keyEncipherment\n\n# This will be displayed in Netscape's comment listbox.\nnsComment           = \"OpenSSL Generated Certificate\"\n\n# PKIX recommendations harmless if included in all certificates.\nsubjectKeyIdentifier=hash\nauthorityKeyIdentifier=keyid,issuer\n\n# This stuff is for subjectAltName and issuerAltname.\n# Import the email address.\n# subjectAltName=email:copy\n# An alternative to produce certificates that aren't\n# deprecated according to PKIX.\n# subjectAltName=email:move\n\n# Copy subject details\n# issuerAltName=issuer:copy\n\n#nsCaRevocationUrl      = http://www.domain.dom/ca-crl.pem\n#nsBaseUrl\n#nsRevocationUrl\n#nsRenewalUrl\n#nsCaPolicyUrl\n#nsSslServerName\n\n# This really needs to be in place for it to be a proxy certificate.\nproxyCertInfo=critical,language:id-ppl-anyLanguage,pathlen:3,policy:foo\n\n####################################################################\n[ tsa ]\n\ndefault_tsa = tsa_config1   # the default TSA section\n\n[ tsa_config1 ]\n\n# These are used by the TSA reply generation only.\ndir     = ./demoCA      # TSA root directory\nserial      = $dir/tsaserial    # The current serial number (mandatory)\ncrypto_device   = builtin       # OpenSSL engine to use for signing\nsigner_cert = $dir/tsacert.pem  # The TSA signing certificate\n                    # (optional)\ncerts       = $dir/cacert.pem   # Certificate chain to include in reply\n                    # (optional)\nsigner_key  = $dir/private/tsakey.pem # The TSA private key (optional)\n\ndefault_policy  = tsa_policy1       # Policy if request did not specify it\n                    # (optional)\nother_policies  = tsa_policy2, tsa_policy3  # acceptable policies (optional)\ndigests     = md5, sha1     # Acceptable message digests (mandatory)\naccuracy    = secs:1, millisecs:500, microsecs:100  # (optional)\nclock_precision_digits  = 0 # number of digits after dot. (optional)\nordering        = yes   # Is ordering defined for timestamps?\n                # (optional, default: no)\ntsa_name        = yes   # Must the TSA name be included in the reply?\n                # (optional, default: no)\ness_cert_id_chain   = no    # Must the ESS cert id chain be included?\n                # (optional, default: no)\n","#!/usr/bin/env sh\n# your variables\nCountry=\"CN\"\nState=\"Shanghai\"\nLocation=\"Shanghai\"\nOrganization=\"milvus\"\nOrganizational=\"milvus\"\nCommonName=\"localhost\"\n\necho \"generate ca.key\"\nopenssl genrsa -out ca.key 2048\n\necho \"generate ca.pem\"\nopenssl req -new -x509 -key ca.key -out ca.pem -days 3650 -subj \"/C=$Country/ST=$State/L=$Location/O=$Organization/OU=$Organizational/CN=$CommonName\"\n\necho \"generate server SAN certificate\"\nopenssl genpkey -algorithm RSA -out server.key\nopenssl req -new -nodes -key server.key -out server.csr -days 3650 -subj \"/C=$Country/O=$Organization/OU=$Organizational/CN=$CommonName\" -config ./openssl.cnf -extensions v3_req\nopenssl x509 -req -days 3650 -in server.csr -out server.pem -CA ca.pem -CAkey ca.key -CAcreateserial -extfile ./openssl.cnf -extensions v3_req\n\necho \"generate client SAN certificate\"\nopenssl genpkey -algorithm RSA -out client.key\nopenssl req -new -nodes -key client.key -out client.csr -days 3650 -subj \"/C=$Country/O=$Organization/OU=$Organizational/CN=$CommonName\" -config ./openssl.cnf -extensions v3_req\nopenssl x509 -req -days 3650 -in client.csr -out client.pem -CA ca.pem -CAkey ca.key -CAcreateserial -extfile ./openssl.cnf -extensions v3_req\n\n","chmod +x gen.sh\n./gen.sh\n","openssl genpkey -algorithm RSA -out ca.key\n","openssl req -new -x509 -key ca.key -out ca.pem -days 3650 -subj \"/C=$Country/ST=$State/L=$Location/O=$Organization/OU=$Organizational/CN=$CommonName\"\n","openssl genpkey -algorithm RSA -out server.key\n","openssl req -new -nodes -key server.key -out server.csr -days 3650 -subj \"/C=$Country/O=$Organization/OU=$Organizational/CN=$CommonName\" -config ./openssl.cnf -extensions v3_req\n","openssl x509 -req -days 3650 -in server.csr -out server.pem -CA ca.pem -CAkey ca.key -CAcreateserial -extfile ./openssl.cnf -extensions v3_req\n","tls:\n  serverPemPath: /milvus/tls/server.pem\n  serverKeyPath: /milvus/tls/server.key\n  caPemPath: /milvus/tls/ca.pem\n\ncommon:\n  security:\n    tlsMode: 1\n","├── docker-compose.yml\n├── milvus.yaml\n└── tls\n     ├── server.pem\n     ├── server.key\n     └── ca.pem\n","  standalone:\n    container_name: milvus-standalone\n    image: milvusdb/milvus:latest\n    command: [\"milvus\", \"run\", \"standalone\"]\n    security_opt:\n    - seccomp:unconfined\n    environment:\n      ETCD_ENDPOINTS: etcd:2379\n      MINIO_ADDRESS: minio:9000\n    volumes:\n      - ${DOCKER_VOLUME_DIRECTORY:-.}/volumes/milvus:/var/lib/milvus\n      - ${DOCKER_VOLUME_DIRECTORY:-.}/tls:/milvus/tls\n      - ${DOCKER_VOLUME_DIRECTORY:-.}/milvus.yaml:/milvus/configs/milvus.yaml\n","sudo docker compose up -d\n","from pymilvus import MilvusClient\n\nclient = MilvusClient(\n    uri=\"https://localhost:19530\",\n    secure=True,\n    server_pem_path=\"path_to/server.pem\",\n    server_name=\"localhost\"\n)\n","from pymilvus import MilvusClient\n\nclient = MilvusClient(\n    uri=\"https://localhost:19530\",\n    secure=True,\n    client_pem_path=\"path_to/client.pem\",\n    client_key_path=\"path_to/client.key\",\n    ca_pem_path=\"path_to/ca.pem\",\n    server_name=\"localhost\"\n)\n","curl --cacert path_to/ca.pem https://localhost:19530/v2/vectordb/collections/list\n","curl --cert path_to/client.pem --key path_to/client.key --cacert path_to/ca.pem https://localhost:19530/v2/vectordb/collections/list\n"],"headingContent":"Encryption in Transit","anchorList":[{"label":"通信の暗号化","href":"Encryption-in-Transit","type":1,"isActive":false},{"label":"独自の証明書を作成する","href":"Create-your-own-certificate","type":2,"isActive":false},{"label":"TLSによるMilvusサーバーの設定","href":"Set-up-a-Milvus-server-with-TLS","type":2,"isActive":false},{"label":"TLSでMilvusサーバに接続します。","href":"Connect-to-the-Milvus-server-with-TLS","type":2,"isActive":false},{"label":"Milvus RESTfulサーバへのTLS接続","href":"Connect-to-the-Milvus-RESTful-server-with-TLS","type":2,"isActive":false}]}
          \ No newline at end of file
          diff --git a/localization/v2.4.x/site/ja/adminGuide/tls.md b/localization/v2.4.x/site/ja/adminGuide/tls.md
          index 5165a8609..303007db8 100644
          --- a/localization/v2.4.x/site/ja/adminGuide/tls.md
          +++ b/localization/v2.4.x/site/ja/adminGuide/tls.md
          @@ -1,7 +1,7 @@
           ---
           id: tls.md
          -title: 輸送中の暗号化
          -summary: MilvusでTLSプロキシを有効にする方法について説明します。
          +title: 通信の暗号化
          +summary: MilvusでTLSプロキシを有効にする方法を説明します。
           ---
           

          通信の暗号化

          TLS (Transport Layer Security) は通信の安全性を確保するための暗号化プロトコルです。MilvusプロキシはTLSの片方向認証と双方向認証を使用します。

          -

          このトピックでは、MilvusでTLSプロキシを有効にする方法について説明します。

          +

          このトピックでは、gRPCとRESTfulトラフィックの両方でMilvusプロキシでTLSを有効にする方法について説明します。

          TLSとユーザ認証は2つの異なるセキュリティアプローチです。Milvusシステムでユーザ認証とTLSの両方を有効にしている場合、ユーザ名、パスワード、証明書ファイルのパスを指定する必要があります。ユーザ認証を有効にする方法については、ユーザアクセスの認証を参照してください。

          @@ -437,7 +437,7 @@ openssl x509 -req -days 3650 -in client.csr -o

          -

          gen.sh ファイルの変数は、証明書署名要求ファイルを作成するプロセスで重要である。最初の5つの変数は、国、州、場所、組織、組織単位を含む基本的な署名情報である。クライアントとサーバーの通信中に検証されるため、CommonName を設定する際には注意が必要である。

          +

          gen.sh ファイル内の変数は、証明書署名要求ファイルの作成プロセスにおいて重要である。最初の5つの変数は、国、州、場所、組織、組織単位を含む基本的な署名情報である。クライアントとサーバーの通信中に検証されるため、CommonName を設定する際には注意が必要である。

          gen.sh を実行して証明書を生成する。

          gen.sh ファイルを実行して証明書を作成する。

          chmod +x gen.sh
           ./gen.sh
          @@ -562,7 +562,7 @@ openssl x509 -req -days 3650 -in client.csr -o
           
          from pymilvus import MilvusClient
           
           client = MilvusClient(
          -    uri="http://localhost:19530",
          +    uri="https://localhost:19530",
               secure=True,
               server_pem_path="path_to/server.pem",
               server_name="localhost"
          @@ -572,7 +572,7 @@ client = MilvusClient(
           
          from pymilvus import MilvusClient
           
           client = MilvusClient(
          -    uri="http://localhost:19530",
          +    uri="https://localhost:19530",
               secure=True,
               client_pem_path="path_to/client.pem",
               client_key_path="path_to/client.key",
          @@ -581,3 +581,23 @@ client = MilvusClient(
           )
           

          詳細はexample_tls1.pyと example_tls2.pyを参照。

          +

          Milvus RESTfulサーバへのTLS接続

          RESTful APIの場合、curl コマンドでTLSを確認することができます。

          +

          片方向TLS接続

          curl --cacert path_to/ca.pem https://localhost:19530/v2/vectordb/collections/list
          +
          +

          双方向TLS接続

          curl --cert path_to/client.pem --key path_to/client.key --cacert path_to/ca.pem https://localhost:19530/v2/vectordb/collections/list
          +
          diff --git a/localization/v2.4.x/site/ja/adminGuide/upgrade_milvus_cluster-docker.json b/localization/v2.4.x/site/ja/adminGuide/upgrade_milvus_cluster-docker.json index 622a6f6f7..9c33a0f8a 100644 --- a/localization/v2.4.x/site/ja/adminGuide/upgrade_milvus_cluster-docker.json +++ b/localization/v2.4.x/site/ja/adminGuide/upgrade_milvus_cluster-docker.json @@ -1,32 +1 @@ -{ - "codeList": [ - "...\nrootcoord:\n container_name: milvus-rootcoord\n image: milvusdb/milvus:v2.4.9\n...\nproxy:\n container_name: milvus-proxy\n image: milvusdb/milvus:v2.4.9\n...\nquerycoord:\n container_name: milvus-querycoord\n image: milvusdb/milvus:v2.4.9 \n...\nquerynode:\n container_name: milvus-querynode\n image: milvusdb/milvus:v2.4.9\n...\nindexcoord:\n container_name: milvus-indexcoord\n image: milvusdb/milvus:v2.4.9\n...\nindexnode:\n container_name: milvus-indexnode\n image: milvusdb/milvus:v2.4.9 \n...\ndatacoord:\n container_name: milvus-datacoord\n image: milvusdb/milvus:v2.4.9 \n...\ndatanode:\n container_name: milvus-datanode\n image: milvusdb/milvus:v2.4.9\n", - "docker compose down\ndocker compose up -d\n", - "docker stop \n", - "# migration.yaml\ncmd:\n # Option: run/backup/rollback\n type: run\n runWithBackup: true\nconfig:\n sourceVersion: 2.1.4 # Specify your milvus version\n targetVersion: 2.4.9\n backupFilePath: /tmp/migration.bak\nmetastore:\n type: etcd\netcd:\n endpoints:\n - milvus-etcd:2379 # Use the etcd container name\n rootPath: by-dev # The root path where data is stored in etcd\n metaSubPath: meta\n kvSubPath: kv\n", - "# Suppose your docker-compose run with the default milvus network,\n# and you put migration.yaml in the same directory with docker-compose.yaml.\ndocker run --rm -it --network milvus -v $(pwd)/migration.yaml:/milvus/configs/migration.yaml milvus/meta-migration:v2.2.0 /milvus/bin/meta-migration -config=/milvus/configs/migration.yaml\n", - "Update the milvus image tag in the docker-compose.yaml\ndocker compose down\ndocker compose up -d\n" - ], - "headingContent": "", - "anchorList": [ - { - "label": "Docker ComposeでMilvusクラスタをアップグレードする", - "href": "Upgrade-Milvus-Cluster-with-Docker-Compose", - "type": 1, - "isActive": false - }, - { - "label": "イメージを変えてMilvusをアップグレード", - "href": "Upgrade-Milvus-by-changing-its-image", - "type": 2, - "isActive": false - }, - { - "label": "メタデータの移行", - "href": "Migrate-the-metadata", - "type": 2, - "isActive": false - }, - { "label": "次の記事", "href": "Whats-next", "type": 2, "isActive": false } - ] -} +{"codeList":["...\nrootcoord:\n container_name: milvus-rootcoord\n image: milvusdb/milvus:v2.4.13-hotfix\n...\nproxy:\n container_name: milvus-proxy\n image: milvusdb/milvus:v2.4.13-hotfix\n...\nquerycoord:\n container_name: milvus-querycoord\n image: milvusdb/milvus:v2.4.13-hotfix \n...\nquerynode:\n container_name: milvus-querynode\n image: milvusdb/milvus:v2.4.13-hotfix\n...\nindexcoord:\n container_name: milvus-indexcoord\n image: milvusdb/milvus:v2.4.13-hotfix\n...\nindexnode:\n container_name: milvus-indexnode\n image: milvusdb/milvus:v2.4.13-hotfix \n...\ndatacoord:\n container_name: milvus-datacoord\n image: milvusdb/milvus:v2.4.13-hotfix \n...\ndatanode:\n container_name: milvus-datanode\n image: milvusdb/milvus:v2.4.13-hotfix\n","docker compose down\ndocker compose up -d\n","docker stop \n","# migration.yaml\ncmd:\n # Option: run/backup/rollback\n type: run\n runWithBackup: true\nconfig:\n sourceVersion: 2.1.4 # Specify your milvus version\n targetVersion: 2.4.13-hotfix\n backupFilePath: /tmp/migration.bak\nmetastore:\n type: etcd\netcd:\n endpoints:\n - milvus-etcd:2379 # Use the etcd container name\n rootPath: by-dev # The root path where data is stored in etcd\n metaSubPath: meta\n kvSubPath: kv\n","# Suppose your docker-compose run with the default milvus network,\n# and you put migration.yaml in the same directory with docker-compose.yaml.\ndocker run --rm -it --network milvus -v $(pwd)/migration.yaml:/milvus/configs/migration.yaml milvus/meta-migration:v2.2.0 /milvus/bin/meta-migration -config=/milvus/configs/migration.yaml\n","Update the milvus image tag in the docker-compose.yaml\ndocker compose down\ndocker compose up -d\n"],"headingContent":"Upgrade Milvus Cluster with Docker Compose","anchorList":[{"label":"Docker Composeを使用したMilvusクラスタのアップグレード","href":"Upgrade-Milvus-Cluster-with-Docker-Compose","type":1,"isActive":false},{"label":"Milvusのイメージ変更によるアップグレード","href":"Upgrade-Milvus-by-changing-its-image","type":2,"isActive":false},{"label":"メタデータの移行","href":"Migrate-the-metadata","type":2,"isActive":false},{"label":"次のステップ","href":"Whats-next","type":2,"isActive":false}]} \ No newline at end of file diff --git a/localization/v2.4.x/site/ja/adminGuide/upgrade_milvus_cluster-docker.md b/localization/v2.4.x/site/ja/adminGuide/upgrade_milvus_cluster-docker.md index 625ca93e4..edf8fd1d0 100644 --- a/localization/v2.4.x/site/ja/adminGuide/upgrade_milvus_cluster-docker.md +++ b/localization/v2.4.x/site/ja/adminGuide/upgrade_milvus_cluster-docker.md @@ -1,7 +1,7 @@ --- id: upgrade_milvus_cluster-docker.md summary: Docker ComposeでMilvusクラスタをアップグレードする方法をご紹介します。 -title: Docker ComposeでMilvusクラスタをアップグレードする +title: Docker Composeを使用したMilvusクラスタのアップグレード ---

          Docker Composeを使用したMilvusクラスタのアップグレード

          このトピックでは、Docker Composeを使用してMilvusをアップグレードする方法について説明します。

          -

          通常の場合、イメージを変更することでMilvusをアップグレードすることができます。ただし、v2.1.xからv2.4.9にアップグレードする場合は、事前にメタデータを移行する必要があります。

          -

          イメージ変更によるMilvusのアップグレード

          通常の場合、以下の手順でMilvusをアップグレードすることができます:

          +

          通常の場合、Milvusは以下の手順でアップグレードすることができます:

            -
          1. docker-compose.yaml のMilvusのイメージタグを変更する。

            -

            プロキシ、すべてのコーディネータ、すべてのワーカーノードのイメージタグを変更する必要があることに注意してください。

            +
          2. Milvus のイメージタグをdocker-compose.yaml で変更する。

            +

            プロキシ、全コーディネータ、全ワーカーノードのイメージタグを変更する必要があることに注意してください。

            ...
             rootcoord:
               container_name: milvus-rootcoord
            -  image: milvusdb/milvus:v2.4.9
            +  image: milvusdb/milvus:v2.4.13-hotfix
             ...
             proxy:
               container_name: milvus-proxy
            -  image: milvusdb/milvus:v2.4.9
            +  image: milvusdb/milvus:v2.4.13-hotfix
             ...
             querycoord:
               container_name: milvus-querycoord
            -  image: milvusdb/milvus:v2.4.9  
            +  image: milvusdb/milvus:v2.4.13-hotfix  
             ...
             querynode:
               container_name: milvus-querynode
            -  image: milvusdb/milvus:v2.4.9
            +  image: milvusdb/milvus:v2.4.13-hotfix
             ...
             indexcoord:
               container_name: milvus-indexcoord
            -  image: milvusdb/milvus:v2.4.9
            +  image: milvusdb/milvus:v2.4.13-hotfix
             ...
             indexnode:
               container_name: milvus-indexnode
            -  image: milvusdb/milvus:v2.4.9 
            +  image: milvusdb/milvus:v2.4.13-hotfix 
             ...
             datacoord:
               container_name: milvus-datacoord
            -  image: milvusdb/milvus:v2.4.9   
            +  image: milvusdb/milvus:v2.4.13-hotfix   
             ...
             datanode:
               container_name: milvus-datanode
            -  image: milvusdb/milvus:v2.4.9
            +  image: milvusdb/milvus:v2.4.13-hotfix
             
          3. 以下のコマンドを実行してアップグレードを実行します。

            docker compose down
            @@ -105,7 +105,7 @@ cmd:
               runWithBackup: true
             config:
               sourceVersion: 2.1.4   # Specify your milvus version
            -  targetVersion: 2.4.9
            +  targetVersion: 2.4.13-hotfix
               backupFilePath: /tmp/migration.bak
             metastore:
               type: etcd
            @@ -147,8 +147,8 @@ docker compose up -d
             
          4. Milvusクラスタのスケール
          5. クラウド上にクラスターをデプロイする準備ができている場合は、次の方法を参照してください:
          6. diff --git a/localization/v2.4.x/site/ja/adminGuide/upgrade_milvus_cluster-helm.json b/localization/v2.4.x/site/ja/adminGuide/upgrade_milvus_cluster-helm.json index d8a4d68ea..f9c9aae5d 100644 --- a/localization/v2.4.x/site/ja/adminGuide/upgrade_milvus_cluster-helm.json +++ b/localization/v2.4.x/site/ja/adminGuide/upgrade_milvus_cluster-helm.json @@ -1 +1 @@ -{"codeList":["$ helm repo update\n$ helm search repo zilliztech/milvus --versions\n","helm repo add zilliztech https://zilliztech.github.io/milvus-helm\nhelm repo update\n# upgrade existing helm release\nhelm upgrade my-release zilliztech/milvus\n","NAME CHART VERSION APP VERSION DESCRIPTION \nzilliztech/milvus 4.1.34 2.4.5 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.33 2.4.4 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.32 2.4.3 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.31 2.4.1 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.30 2.4.1 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.29 2.4.0 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.24 2.3.11 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.23 2.3.10 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.22 2.3.10 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.21 2.3.10 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.20 2.3.10 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.18 2.3.10 Milvus is an open-source vector database built ... \nzilliztech/milvus 4.1.18 2.3.9 Milvus is an open-source vector database built ... \nzilliztech/milvus 4.1.17 2.3.8 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.16 2.3.7 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.15 2.3.5 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.14 2.3.6 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.13 2.3.5 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.12 2.3.5 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.11 2.3.4 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.10 2.3.3 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.9 2.3.3 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.8 2.3.2 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.7 2.3.2 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.6 2.3.1 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.5 2.3.1 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.4 2.3.1 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.3 2.3.1 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.2 2.3.1 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.1 2.3.0 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.0 2.3.0 Milvus is an open-source vector database built ...\n","sh rollingUpdate.sh -n default -i my-release -o update -t 2.4.9 -w 'milvusdb/milvus:v2.4.9'\n","helm repo update\nhelm upgrade my-release zilliztech/milvus --reuse-values --version=4.1.24 # use the helm chart version here\n","NAME NAMESPACE REVISION UPDATED STATUS CHART APP VERSION \nnew-release default 1 2022-11-21 15:41:25.51539 +0800 CST deployed milvus-3.2.18 2.1.4 \n","NAME READY STATUS RESTARTS AGE\nmy-release-etcd-0 1/1 Running 0 21m\nmy-release-etcd-1 1/1 Running 0 21m\nmy-release-etcd-2 1/1 Running 0 21m\nmy-release-milvus-datacoord-664c58798d-fl75s 1/1 Running 0 21m\nmy-release-milvus-datanode-5f75686c55-xfg2r 1/1 Running 0 21m\nmy-release-milvus-indexcoord-5f98b97589-2l48r 1/1 Running 0 21m\nmy-release-milvus-indexnode-857b4ddf98-vmd75 1/1 Running 0 21m\nmy-release-milvus-proxy-6c548f787f-scspp 1/1 Running 0 21m\nmy-release-milvus-querycoord-c454f44cd-dwmwq 1/1 Running 0 21m\nmy-release-milvus-querynode-76bb4946d-lbrz6 1/1 Running 0 21m\nmy-release-milvus-rootcoord-7764c5b686-62msm 1/1 Running 0 21m\nmy-release-minio-0 1/1 Running 0 21m\nmy-release-minio-1 1/1 Running 0 21m\nmy-release-minio-2 1/1 Running 0 21m\nmy-release-minio-3 1/1 Running 0 21m\nmy-release-pulsar-bookie-0 1/1 Running 0 21m\nmy-release-pulsar-bookie-1 1/1 Running 0 21m\nmy-release-pulsar-bookie-2 1/1 Running 0 21m\nmy-release-pulsar-bookie-init-tjxpj 0/1 Completed 0 21m\nmy-release-pulsar-broker-0 1/1 Running 0 21m\nmy-release-pulsar-proxy-0 1/1 Running 0 21m\nmy-release-pulsar-pulsar-init-c8vvc 0/1 Completed 0 21m\nmy-release-pulsar-recovery-0 1/1 Running 0 21m\nmy-release-pulsar-zookeeper-0 1/1 Running 0 21m\nmy-release-pulsar-zookeeper-1 1/1 Running 0 20m\nmy-release-pulsar-zookeeper-2 1/1 Running 0 20m\n","$ kubectl get pods my-release-milvus-proxy-6c548f787f-scspp -o=jsonpath='{$.spec.containers[0].image}'\n# milvusdb/milvus:v2.1.4\n","./migrate.sh -i my-release -s 2.1.4 -t 2.4.9\n","./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.9\n","./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.9 -r by-dev\n","./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.9 -r by-dev -w milvusdb/milvus:v2.4.9\n","./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.9 -w milvusdb/milvus:v2.4.9 -d true\n","./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.9 -r by-dev -o rollback -w milvusdb/milvus:v2.1.1\n./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.9 -r by-dev -o migrate -w milvusdb/milvus:v2.4.9\n"],"headingContent":"","anchorList":[{"label":"MilvusクラスターをHelmチャートでアップグレード","href":"Upgrade-Milvus-Cluster-with-Helm-Chart","type":1,"isActive":false},{"label":"ミルヴァスのヘルムチャートをチェック","href":"Check-Milvus-Helm-Chart","type":2,"isActive":false},{"label":"ローリングアップグレードの実施","href":"Conduct-a-rolling-upgrade","type":2,"isActive":false},{"label":"Helmを使ってMilvusをアップグレード","href":"Upgrade-Milvus-using-Helm","type":2,"isActive":false},{"label":"メタデータの移行","href":"Migrate-the-metadata","type":2,"isActive":false}]} \ No newline at end of file +{"codeList":["$ helm repo update\n$ helm search repo zilliztech/milvus --versions\n","helm repo add zilliztech https://zilliztech.github.io/milvus-helm\nhelm repo update\n# upgrade existing helm release\nhelm upgrade my-release zilliztech/milvus\n","NAME CHART VERSION APP VERSION DESCRIPTION \nzilliztech/milvus 4.1.34 2.4.5 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.33 2.4.4 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.32 2.4.3 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.31 2.4.1 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.30 2.4.1 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.29 2.4.0 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.24 2.3.11 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.23 2.3.10 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.22 2.3.10 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.21 2.3.10 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.20 2.3.10 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.18 2.3.10 Milvus is an open-source vector database built ... \nzilliztech/milvus 4.1.18 2.3.9 Milvus is an open-source vector database built ... \nzilliztech/milvus 4.1.17 2.3.8 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.16 2.3.7 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.15 2.3.5 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.14 2.3.6 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.13 2.3.5 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.12 2.3.5 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.11 2.3.4 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.10 2.3.3 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.9 2.3.3 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.8 2.3.2 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.7 2.3.2 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.6 2.3.1 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.5 2.3.1 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.4 2.3.1 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.3 2.3.1 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.2 2.3.1 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.1 2.3.0 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.0 2.3.0 Milvus is an open-source vector database built ...\n","sh rollingUpdate.sh -n default -i my-release -o update -t 2.4.13-hotfix -w 'milvusdb/milvus:v2.4.13-hotfix'\n","helm repo update\nhelm upgrade my-release zilliztech/milvus --reuse-values --version=4.1.24 # use the helm chart version here\n","NAME NAMESPACE REVISION UPDATED STATUS CHART APP VERSION \nnew-release default 1 2022-11-21 15:41:25.51539 +0800 CST deployed milvus-3.2.18 2.1.4 \n","NAME READY STATUS RESTARTS AGE\nmy-release-etcd-0 1/1 Running 0 21m\nmy-release-etcd-1 1/1 Running 0 21m\nmy-release-etcd-2 1/1 Running 0 21m\nmy-release-milvus-datacoord-664c58798d-fl75s 1/1 Running 0 21m\nmy-release-milvus-datanode-5f75686c55-xfg2r 1/1 Running 0 21m\nmy-release-milvus-indexcoord-5f98b97589-2l48r 1/1 Running 0 21m\nmy-release-milvus-indexnode-857b4ddf98-vmd75 1/1 Running 0 21m\nmy-release-milvus-proxy-6c548f787f-scspp 1/1 Running 0 21m\nmy-release-milvus-querycoord-c454f44cd-dwmwq 1/1 Running 0 21m\nmy-release-milvus-querynode-76bb4946d-lbrz6 1/1 Running 0 21m\nmy-release-milvus-rootcoord-7764c5b686-62msm 1/1 Running 0 21m\nmy-release-minio-0 1/1 Running 0 21m\nmy-release-minio-1 1/1 Running 0 21m\nmy-release-minio-2 1/1 Running 0 21m\nmy-release-minio-3 1/1 Running 0 21m\nmy-release-pulsar-bookie-0 1/1 Running 0 21m\nmy-release-pulsar-bookie-1 1/1 Running 0 21m\nmy-release-pulsar-bookie-2 1/1 Running 0 21m\nmy-release-pulsar-bookie-init-tjxpj 0/1 Completed 0 21m\nmy-release-pulsar-broker-0 1/1 Running 0 21m\nmy-release-pulsar-proxy-0 1/1 Running 0 21m\nmy-release-pulsar-pulsar-init-c8vvc 0/1 Completed 0 21m\nmy-release-pulsar-recovery-0 1/1 Running 0 21m\nmy-release-pulsar-zookeeper-0 1/1 Running 0 21m\nmy-release-pulsar-zookeeper-1 1/1 Running 0 20m\nmy-release-pulsar-zookeeper-2 1/1 Running 0 20m\n","$ kubectl get pods my-release-milvus-proxy-6c548f787f-scspp -o=jsonpath='{$.spec.containers[0].image}'\n# milvusdb/milvus:v2.1.4\n","./migrate.sh -i my-release -s 2.1.4 -t 2.4.13-hotfix\n","./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.13-hotfix\n","./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.13-hotfix -r by-dev\n","./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.13-hotfix -r by-dev -w milvusdb/milvus:v2.4.13-hotfix\n","./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.13-hotfix -w milvusdb/milvus:v2.4.13-hotfix -d true\n","./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.13-hotfix -r by-dev -o rollback -w milvusdb/milvus:v2.1.1\n./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.13-hotfix -r by-dev -o migrate -w milvusdb/milvus:v2.4.13-hotfix\n"],"headingContent":"Upgrade Milvus Cluster with Helm Chart","anchorList":[{"label":"HelmチャートによるMilvusクラスタのアップグレード","href":"Upgrade-Milvus-Cluster-with-Helm-Chart","type":1,"isActive":false},{"label":"Milvus Helmチャートの確認","href":"Check-Milvus-Helm-Chart","type":2,"isActive":false},{"label":"ローリングアップグレードの実施","href":"Conduct-a-rolling-upgrade","type":2,"isActive":false},{"label":"Helmを使用したMilvusのアップグレード","href":"Upgrade-Milvus-using-Helm","type":2,"isActive":false},{"label":"メタデータの移行","href":"Migrate-the-metadata","type":2,"isActive":false}]} \ No newline at end of file diff --git a/localization/v2.4.x/site/ja/adminGuide/upgrade_milvus_cluster-helm.md b/localization/v2.4.x/site/ja/adminGuide/upgrade_milvus_cluster-helm.md index 9c53537cd..d043cb3e2 100644 --- a/localization/v2.4.x/site/ja/adminGuide/upgrade_milvus_cluster-helm.md +++ b/localization/v2.4.x/site/ja/adminGuide/upgrade_milvus_cluster-helm.md @@ -4,8 +4,8 @@ label: Helm order: 1 group: upgrade_milvus_cluster-operator.md related_key: upgrade Milvus Cluster -summary: Helm Chartを使用してMilvusクラスタをアップグレードする方法をご紹介します。 -title: MilvusクラスターをHelmチャートでアップグレード +summary: MilvusクラスタをHelm Chartでアップグレードする方法をご紹介します。 +title: HelmチャートによるMilvusクラスタのアップグレード ---

            HelmチャートによるMilvusクラスタのアップグレード

            以下のコマンドを実行して、Milvusの新しいバージョンを確認します。

            +

            以下のコマンドを実行し、Milvusの新バージョンを確認します。

            $ helm repo update
             $ helm search repo zilliztech/milvus --versions
             
            @@ -85,11 +85,11 @@ zilliztech/milvus 4.1.2 2.3.1 Milvus is an ope zilliztech/milvus 4.1.1 2.3.0 Milvus is an open-source vector database built ... zilliztech/milvus 4.1.0 2.3.0 Milvus is an open-source vector database built ...
          -

          あなたのMilvusのアップグレードパスは以下のように選択できます:

          -
          - ローリングアップグレードの実施](#conduct-a-rolling-upgrade) Milvus v2.2.3以降からv2.4.9へ。
          +

          Milvusのアップグレードパスは以下のように選択できます:

          +
          - Milvus v2.2.3以降をv2.4.13-hotfixにアップグレードする。

          ローリングアップグレードの実施

          Milvus 2.2.3以降では、Milvusコーディネータをアクティブスタンバイ モードで動作するように設定し、コーディネータのローリングアップグレード機能を有効にすることで、コーディネータのアップグレード中にMilvusが受信したリクエストに応答できるようになります。以前のリリースでは、アップグレード中にコーディネータを削除してから作成するため、サービスのダウンタイムが発生する可能性がありました。

          +

          Milvus 2.2.3以降では、Milvusコーディネータをアクティブスタンバイ モードで動作するように設定し、ローリングアップグレード機能を有効にすることで、コーディネータのアップグレード中にMilvusが受信したリクエストに応答できるようになります。以前のリリースでは、アップグレード中にコーディネータを削除してから作成するため、サービスのダウンタイムが発生する可能性がありました。

          ローリングアップグレードでは、コーディネータをアクティブスタンバイで動作させる必要があります。弊社が提供するスクリプトを使用して、コーディネーターをアクティブスタンバイ モードで動作するように設定し、ローリングアップグレードを開始できます。

          -

          Kubernetesが提供するローリングアップデートの機能に基づいて、上記のスクリプトは、デプロイメントの依存関係に従って順序付けられたアップデートを強制します。さらに、Milvusはアップグレード中もコンポーネントの互換性を維持し、サービスのダウンタイムを大幅に削減するメカニズムを実装しています。

          +

          Kubernetesが提供するローリングアップデートの機能に基づいて、上記のスクリプトは、デプロイメントの依存関係に従って順序付けられたアップデートを強制します。さらに、Milvusはアップグレード中もそのコンポーネントに依存しているコンポーネントとの互換性を維持するメカニズムを実装し、潜在的なサービスのダウンタイムを大幅に削減します。

          このスクリプトは、HelmとともにインストールされたMilvusのアップグレードにのみ適用されます。次の表は、スクリプトで使用可能なコマンドフラグの一覧です。

      クラウドプロバイダマシンタイプ
      @@ -123,14 +123,14 @@ zilliztech/milvus 4.1.0 2.3.0 Milvus is an ope
      o操作方法update
      -

      Milvusインスタンスのすべてのデプロイが正常な状態であることを確認したら、以下のコマンドを実行してMilvusインスタンスを2.4.9にアップグレードします。以下のコマンドを実行することで、Milvusインスタンスを2.4.9にアップグレードすることができます。

      -
      sh rollingUpdate.sh -n default -i my-release -o update -t 2.4.9 -w 'milvusdb/milvus:v2.4.9'
      +

      Milvusインスタンスのすべてのデプロイメントが正常な状態であることを確認したら、以下のコマンドを実行してMilvusインスタンスを2.4.13-hotfixにアップグレードします。以下のコマンドを実行することで、Milvusインスタンスを2.4.13-hotfixにアップグレードすることができます。

      +
      sh rollingUpdate.sh -n default -i my-release -o update -t 2.4.13-hotfix -w 'milvusdb/milvus:v2.4.13-hotfix'
       
        -
      1. スクリプトはデプロイのアップグレード順序をハードコードしており、変更することはできません。
      2. -
      3. このスクリプトは、kubectl patch を使用してデプロイメントを更新し、kubectl rollout status を使用してステータスを監視します。
      4. -
      5. スクリプトはkubectl patch を使って、デプロイメントのapp.kubernetes.io/version ラベルを、コマンドの-t フラグの後に指定したラベルに更新します。
      6. +
      7. このスクリプトはデプロイのアップグレード順序をハードコードしており、変更することはできません。
      8. +
      9. このスクリプトでは、kubectl patch を使用してデプロイメントを更新し、kubectl rollout status を使用してステータスを監視します。
      10. +
      11. スクリプトはkubectl patch を使用して、デプロイメントのapp.kubernetes.io/version ラベルを、コマンドの-t フラグの後に指定したラベルに更新します。
      @@ -153,7 +153,7 @@ zilliztech/milvus 4.1.0 2.3.0 Milvus is an ope
      helm repo update
       helm upgrade my-release zilliztech/milvus --reuse-values --version=4.1.24 # use the helm chart version here
       
      -

      直前のコマンドでHelm chartのバージョンを使用してください。Helmチャートバージョンの取得方法については、「Milvusのバージョンを確認する」を参照してください。

      +

      直前のコマンドでHelm chartのバージョンを使用してください。Helmチャートバージョンの取得方法については、「Milvusバージョンの確認」をご参照ください。

      メタデータの移行

      $ helm list を実行し、Milvusアプリのバージョンを確認します。APP VERSION は 2.1.4 です。

      +

      1.Milvusバージョンの確認

      $ helm list を実行し、Milvusアプリのバージョンを確認します。APP VERSION 、2.1.4であることがわかります。

      NAME                NAMESPACE   REVISION    UPDATED                                 STATUS      CHART           APP VERSION    
       new-release         default     1           2022-11-21 15:41:25.51539 +0800 CST     deployed    milvus-3.2.18   2.1.4 
       
      @@ -207,7 +207,7 @@ my-release-pulsar-zookeeper-2
      $ kubectl get pods my-release-milvus-proxy-6c548f787f-scspp -o=jsonpath='{$.spec.containers[0].image}'
       # milvusdb/milvus:v2.1.4
       
      -

      4.メタデータの移行

      Milvus 2.2での大きな変更点はセグメントインデックスのメタデータ構造です。そのため、Milvusをv2.1.xからv2.2.0にアップグレードする際には、Helmを使用してメタデータを移行する必要があります。以下はメタデータを安全に移行するためのスクリプトです

      +

      4.メタデータの移行

      Milvus2.2の大きな変更点はセグメントインデックスのメタデータ構造です。そのため、Milvusをv2.1.xからv2.2.0にアップグレードする際には、Helmを使用してメタデータを移行する必要があります。以下は、メタデータを安全に移行するためのスクリプトです

      このスクリプトはK8sクラスタにインストールされたMilvusにのみ適用されます。処理中にエラーが発生した場合は、まずロールバック操作で以前のバージョンにロールバックしてください。

      以下の表はメタマイグレーションで実行できる操作の一覧です。

      @@ -217,11 +217,11 @@ my-release-pulsar-zookeeper-2 - - + + - - + + @@ -230,30 +230,30 @@ my-release-pulsar-zookeeper-2
      iMilvusインスタンス名。None
      nMilvusがインストールされている名前空間。defaultFalse
      sソース Milvus バージョン。None
      tインストール先のMilvusのバージョンNone
      sMilvusのバージョン。None
      tインストール先のMilvusのバージョン。None
      rMilvusメタのルートパス。by-dev
      w新しいMilvus画像タグmilvusdb/milvus:v2.2.0False
      mmetaマイグレーションイメージタグ。milvusdb/meta-migration:v2.2.0
      wMilvusの新しい画像タグ。milvusdb/milvus:v2.2.0False
      mmetaマイグレーションイメージタグ。milvusdb/meta-migration:v2.2.0False
      oメタマイグレーション操作migrate
      dマイグレーション完了後にマイグレーションポッドを削除するかどうか。falseFalse
      cメタ・マイグレーションpvcのストレージ・クラス。default storage classFalse

      1.メタデータの移行

      1. 移行スクリプトをダウンロードします。
      2. -
      3. Milvus コンポーネントを停止します。Milvusのetcdにライブセッションがあるとマイグレーションに失敗する可能性があります。
      4. +
      5. Milvusコンポーネントを停止します。Milvusのetcdにライブセッションがあるとマイグレーションに失敗する可能性があります。
      6. Milvusメタデータのバックアップを作成します。
      7. -
      8. Milvusメタデータを移行する。
      9. +
      10. Milvusメタデータを移行します。
      11. 新しいイメージでMilvusコンポーネントを起動する。
      -

      2.Milvusをv2.1.xから2.4.9にアップグレードする。

      以下のコマンドは、Milvusをv2.1.4から2.4.9にアップグレードすることを前提としています。必要なバージョンに変更してください。

      +

      2.Milvusをv2.1.xから2.4.13-hotfixにアップグレードする。

      以下のコマンドは、Milvusをv2.1.4から2.4.13-hotfixにアップグレードすることを前提としています。必要なバージョンに変更してください。

      1. Milvusインスタンス名、ソースMilvusバージョン、ターゲットMilvusバージョンを指定します。

        -
        ./migrate.sh -i my-release -s 2.1.4 -t 2.4.9
        +
        ./migrate.sh -i my-release -s 2.1.4 -t 2.4.13-hotfix
         
      2. -
      3. MilvusがデフォルトのK8s名前空間にインストールされていない場合は、-n で名前空間を指定する。

        -
        ./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.9
        +
      4. MilvusがデフォルトのK8s名前空間にインストールされていない場合は、-n で名前空間を指定します。

        +
        ./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.13-hotfix
         
      5. Milvusがカスタムrootpath でインストールされている場合は、-r でルートパスを指定してください。

        -
        ./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.9 -r by-dev
        +
        ./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.13-hotfix -r by-dev
         
      6. Milvusがカスタムimage でインストールされている場合は、-w でイメージタグを指定してください。

        -
        ./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.9 -r by-dev -w milvusdb/milvus:v2.4.9
        +
        ./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.13-hotfix -r by-dev -w milvusdb/milvus:v2.4.13-hotfix
         
      7. マイグレーション完了後にマイグレーションポッドを自動的に削除する場合は、-d true を設定します。

        -
        ./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.9 -w milvusdb/milvus:v2.4.9 -d true
        +
        ./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.13-hotfix -w milvusdb/milvus:v2.4.13-hotfix -d true
         
      8. マイグレーションに失敗した場合は、ロールバックしてマイグレーションをやり直します。

        -
        ./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.9 -r by-dev -o rollback -w milvusdb/milvus:v2.1.1
        -./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.9 -r by-dev -o migrate -w milvusdb/milvus:v2.4.9
        +
        ./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.13-hotfix -r by-dev -o rollback -w milvusdb/milvus:v2.1.1
        +./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.13-hotfix -r by-dev -o migrate -w milvusdb/milvus:v2.4.13-hotfix
         
      diff --git a/localization/v2.4.x/site/ja/adminGuide/upgrade_milvus_cluster-operator.json b/localization/v2.4.x/site/ja/adminGuide/upgrade_milvus_cluster-operator.json index 498a7bc26..dab499888 100644 --- a/localization/v2.4.x/site/ja/adminGuide/upgrade_milvus_cluster-operator.json +++ b/localization/v2.4.x/site/ja/adminGuide/upgrade_milvus_cluster-operator.json @@ -1,47 +1 @@ -{ - "codeList": [ - "helm repo add zilliztech-milvus-operator https://zilliztech.github.io/milvus-operator/\nhelm repo update zilliztech-milvus-operator\nhelm -n milvus-operator upgrade milvus-operator zilliztech-milvus-operator/milvus-operator\n", - "apiVersion: milvus.io/v1beta1\nkind: Milvus\nmetadata:\n name: my-release\nspec:\n components:\n enableRollingUpdate: true\n imageUpdateMode: rollingUpgrade # Default value, can be omitted\n image: milvusdb/milvus:v2.4.9\n", - "apiVersion: milvus.io/v1beta1\nkind: Milvus\nmetadata:\n name: my-release\nspec:\n components:\n enableRollingUpdate: true\n imageUpdateMode: all\n image: milvusdb/milvus:v2.4.9\n", - "apiVersion: milvus.io/v1beta1\nkind: Milvus\nmetadata:\n name: my-release\nspec:\n components:\n enableRollingUpdate: true\n imageUpdateMode: rollingDowngrade\n image: milvusdb/milvus:\n", - "kubectl apply -f milvusupgrade.yml\n", - "apiVersion: milvus.io/v1beta1\nkind: Milvus\nmetadata:\n name: my-release\nspec:\n # Omit other fields ...\n components:\n image: milvusdb/milvus:v2.4.9\n", - "kubectl apply -f milvusupgrade.yaml\n", - "apiVersion: milvus.io/v1beta1\nkind: MilvusUpgrade\nmetadata:\n name: my-release-upgrade\nspec:\n milvus:\n namespace: default\n name: my-release\n sourceVersion: \"v2.1.4\"\n targetVersion: \"v2.4.9\"\n # below are some omit default values:\n # targetImage: \"milvusdb/milvus:v2.4.9\"\n # toolImage: \"milvusdb/meta-migration:v2.2.0\"\n # operation: upgrade\n # rollbackIfFailed: true\n # backupPVC: \"\"\n # maxRetry: 3\n", - "$ kubectl apply -f https://github.com/zilliztech/milvus-operator/blob/main/config/samples/beta/milvusupgrade.yaml\n", - "kubectl describe milvus release-name\n" - ], - "headingContent": "", - "anchorList": [ - { - "label": "Milvus OperatorでMilvusクラスタをアップグレードする", - "href": "Upgrade-Milvus-Cluster-with-Milvus-Operator", - "type": 1, - "isActive": false - }, - { - "label": "Milvusオペレーターのアップグレード", - "href": "Upgrade-your-Milvus-operator", - "type": 2, - "isActive": false - }, - { - "label": "ローリングアップグレードの実施", - "href": "Conduct-a-rolling-upgrade", - "type": 2, - "isActive": false - }, - { - "label": "イメージを変えてMilvusをアップグレード", - "href": "Upgrade-Milvus-by-changing-its-image", - "type": 2, - "isActive": false - }, - { - "label": "メタデータの移行", - "href": "Migrate-the-metadata", - "type": 2, - "isActive": false - } - ] -} +{"codeList":["helm repo add zilliztech-milvus-operator https://zilliztech.github.io/milvus-operator/\nhelm repo update zilliztech-milvus-operator\nhelm -n milvus-operator upgrade milvus-operator zilliztech-milvus-operator/milvus-operator\n","apiVersion: milvus.io/v1beta1\nkind: Milvus\nmetadata:\n name: my-release\nspec:\n components:\n enableRollingUpdate: true\n imageUpdateMode: rollingUpgrade # Default value, can be omitted\n image: milvusdb/milvus:v2.4.13-hotfix\n","apiVersion: milvus.io/v1beta1\nkind: Milvus\nmetadata:\n name: my-release\nspec:\n components:\n enableRollingUpdate: true\n imageUpdateMode: all\n image: milvusdb/milvus:v2.4.13-hotfix\n","apiVersion: milvus.io/v1beta1\nkind: Milvus\nmetadata:\n name: my-release\nspec:\n components:\n enableRollingUpdate: true\n imageUpdateMode: rollingDowngrade\n image: milvusdb/milvus:\n","kubectl apply -f milvusupgrade.yml\n","apiVersion: milvus.io/v1beta1\nkind: Milvus\nmetadata:\n name: my-release\nspec:\n # Omit other fields ...\n components:\n image: milvusdb/milvus:v2.4.13-hotfix\n","kubectl apply -f milvusupgrade.yaml\n","apiVersion: milvus.io/v1beta1\nkind: MilvusUpgrade\nmetadata:\n name: my-release-upgrade\nspec:\n milvus:\n namespace: default\n name: my-release\n sourceVersion: \"v2.1.4\"\n targetVersion: \"v2.4.13-hotfix\"\n # below are some omit default values:\n # targetImage: \"milvusdb/milvus:v2.4.13-hotfix\"\n # toolImage: \"milvusdb/meta-migration:v2.2.0\"\n # operation: upgrade\n # rollbackIfFailed: true\n # backupPVC: \"\"\n # maxRetry: 3\n","$ kubectl apply -f https://github.com/zilliztech/milvus-operator/blob/main/config/samples/beta/milvusupgrade.yaml\n","kubectl describe milvus release-name\n"],"headingContent":"Upgrade Milvus Cluster with Milvus Operator","anchorList":[{"label":"Milvus OperatorでMilvusクラスタをアップグレードする","href":"Upgrade-Milvus-Cluster-with-Milvus-Operator","type":1,"isActive":false},{"label":"Milvus operatorのアップグレード","href":"Upgrade-your-Milvus-operator","type":2,"isActive":false},{"label":"ローリングアップグレードの実施","href":"Conduct-a-rolling-upgrade","type":2,"isActive":false},{"label":"イメージを変更してMilvusをアップグレードする","href":"Upgrade-Milvus-by-changing-its-image","type":2,"isActive":false},{"label":"メタデータの移行","href":"Migrate-the-metadata","type":2,"isActive":false}]} \ No newline at end of file diff --git a/localization/v2.4.x/site/ja/adminGuide/upgrade_milvus_cluster-operator.md b/localization/v2.4.x/site/ja/adminGuide/upgrade_milvus_cluster-operator.md index 56184db55..732dcad41 100644 --- a/localization/v2.4.x/site/ja/adminGuide/upgrade_milvus_cluster-operator.md +++ b/localization/v2.4.x/site/ja/adminGuide/upgrade_milvus_cluster-operator.md @@ -24,7 +24,7 @@ title: Milvus OperatorでMilvusクラスタをアップグレードする >

      このガイドでは、Milvus Operatorを使用してMilvusクラスタをアップグレードする方法について説明します。

      -

      Milvusオペレータのアップグレード

      Milvus operatorを最新バージョンにアップグレードすると、以下の選択肢があります:

      ローリングアップグレードの実施

      -

      上記の設定ファイルでは、spec.components.enableRollingUpdatetrue に設定し、spec.components.image を希望の Milvus バージョンに設定します。

      -

      デフォルトでは、Milvusはコーディネーターのローリングアップグレードを順番に実行し、コーディネーターのポッドイメージを次々に置き換えます。アップグレード時間を短縮するには、spec.components.imageUpdateModeall に設定し、Milvus がすべてのポッドイメージを同時に置き換えるようにします。

      +

      上記の設定ファイルでは、spec.components.enableRollingUpdatetrue に設定し、spec.components.image を任意の Milvus バージョンに設定します。

      +

      デフォルトでは、Milvusはコーディネーターのローリングアップグレードを順番に実行し、コーディネーターのポッドイメージを次々に置き換えていきます。アップグレード時間を短縮するには、spec.components.imageUpdateModeall に設定し、Milvus がすべてのポッドイメージを同時に置き換えるようにします。

      apiVersion: milvus.io/v1beta1
       kind: Milvus
       metadata:
      @@ -88,9 +88,9 @@ spec:
         components:
           enableRollingUpdate: true
           imageUpdateMode: all
      -    image: milvusdb/milvus:v2.4.9
      +    image: milvusdb/milvus:v2.4.13-hotfix
       
      -

      また、spec.components.imageUpdateModerollingDowngrade に設定すると、Milvus がコーディネータポッドイメージを低バージョンのものに置き換えます。

      +

      Milvusがコーディネータポッドイメージを低いバージョンに置き換えるようにするには、spec.components.imageUpdateModerollingDowngrade に設定します。

      apiVersion: milvus.io/v1beta1
       kind: Milvus
       metadata:
      @@ -101,7 +101,7 @@ spec:
           imageUpdateMode: rollingDowngrade
           image: milvusdb/milvus:<some-old-version>
       
      -

      次に、設定をYAMLファイル(たとえば、milvusupgrade.yml )として保存し、この設定ファイルを次のようにMilvusインスタンスに適用します:

      +

      次に、設定をYAMLファイル(例えば、milvusupgrade.yml)として保存し、この設定ファイルを以下のようにMilvusインスタンスに適用します:

      kubectl apply -f milvusupgrade.yml
       

      イメージを変更してMilvusをアップグレードする

      通常の場合、Milvusのイメージを変更することで、Milvusを最新のものに更新することができます。ただし、この方法でMilvusをアップグレードする場合、一定のダウンタイムが発生することに注意してください。

      +

      通常の場合、Milvusのイメージを変更することで、Milvusを最新のものにアップデートすることができます。ただし、この方法でMilvusをアップグレードする場合、一定のダウンタイムが発生することに注意してください。

      以下のように設定ファイルを作成し、milvusupgrade.yamlとして保存します:

      apiVersion: milvus.io/v1beta1
       kind: Milvus
      @@ -128,7 +128,7 @@ metadata:
       spec:
         # Omit other fields ...
         components:
      -   image: milvusdb/milvus:v2.4.9
      +   image: milvusdb/milvus:v2.4.13-hotfix
       

      その後、以下を実行してアップグレードを実行します:

      kubectl apply -f milvusupgrade.yaml
      @@ -148,8 +148,8 @@ spec:
                 d="M4 9h1v1H4c-1.5 0-3-1.69-3-3.5S2.55 3 4 3h4c1.45 0 3 1.69 3 3.5 0 1.41-.91 2.72-2 3.25V8.59c.58-.45 1-1.27 1-2.09C10 5.22 8.98 4 8 4H4c-.98 0-2 1.22-2 2.5S3 9 4 9zm9-3h-1v1h1c1 0 2 1.22 2 2.5S13.98 12 13 12H9c-.98 0-2-1.22-2-2.5 0-.83.42-1.64 1-2.09V6.25c-1.09.53-2 1.84-2 3.25C6 11.31 7.55 13 9 13h4c1.45 0 3-1.69 3-3.5S14.5 6 13 6z"
               >
             
      -    

      Milvus 2.2.0以降、メタデータは以前のリリースと互換性がありません。以下の例では、Milvus 2.1.4からMilvus 2.4.9へのアップグレードを想定しています。

      -

      1.メタデータ移行用ファイル.yaml の作成

      メタデータ移行用ファイルを作成します。以下はその例です。設定ファイルには、namesourceVersiontargetVersion を指定する必要があります。以下の例では、namemy-release-upgrade に、sourceVersionv2.1.4 に、targetVersionv2.4.9 に設定しています。これは、Milvusクラスタがv2.1.4からv2.4.9にアップグレードされることを意味します。

      +

      Milvus 2.2.0以降、メタデータは以前のリリースと互換性がありません。以下の例は、Milvus 2.1.4からMilvus 2.4.13-hotfixへのアップグレードを想定しています。

      +

      1.メタデータ移行用ファイル.yaml の作成

      メタデータ移行用ファイルを作成します。以下はその例です。設定ファイルには、namesourceVersiontargetVersion を指定する必要があります。以下の例では、namemy-release-upgrade に、sourceVersionv2.1.4 に、targetVersionv2.4.13-hotfix に設定しています。これは、Milvusクラスタがv2.1.4からv2.4.13-hotfixにアップグレードされることを意味します。

      apiVersion: milvus.io/v1beta1
       kind: MilvusUpgrade
       metadata:
      @@ -159,9 +159,9 @@ spec:
           namespace: default
           name: my-release
         sourceVersion: "v2.1.4"
      -  targetVersion: "v2.4.9"
      +  targetVersion: "v2.4.13-hotfix"
         # below are some omit default values:
      -  # targetImage: "milvusdb/milvus:v2.4.9"
      +  # targetImage: "milvusdb/milvus:v2.4.13-hotfix"
         # toolImage: "milvusdb/meta-migration:v2.2.0"
         # operation: upgrade
         # rollbackIfFailed: true
      @@ -171,7 +171,7 @@ spec:
       

      2.新しい設定の適用

      以下のコマンドを実行して新しい設定を適用します。

      $ kubectl apply -f https://github.com/zilliztech/milvus-operator/blob/main/config/samples/beta/milvusupgrade.yaml
       
      -

      3.メタデータの移行状況の確認

      以下のコマンドを実行して、メタデータ移行のステータスを確認します。

      +

      3.メタデータの移行ステータスの確認

      以下のコマンドを実行して、メタデータ移行のステータスを確認します。

      kubectl describe milvus release-name
       

      出力のステータスがready の場合は、メタデータの移行が成功したことを意味します。

      diff --git a/localization/v2.4.x/site/ja/adminGuide/upgrade_milvus_standalone-docker.json b/localization/v2.4.x/site/ja/adminGuide/upgrade_milvus_standalone-docker.json index b64f02e02..74481b355 100644 --- a/localization/v2.4.x/site/ja/adminGuide/upgrade_milvus_standalone-docker.json +++ b/localization/v2.4.x/site/ja/adminGuide/upgrade_milvus_standalone-docker.json @@ -1,32 +1 @@ -{ - "codeList": [ - "...\nstandalone:\n container_name: milvus-standalone\n image: milvusdb/milvus:v2.4.9\n", - "docker compose down\ndocker compose up -d\n", - "docker stop \n", - "# migration.yaml\ncmd:\n # Option: run/backup/rollback\n type: run\n runWithBackup: true\nconfig:\n sourceVersion: 2.1.4 # Specify your milvus version\n targetVersion: 2.4.9\n backupFilePath: /tmp/migration.bak\nmetastore:\n type: etcd\netcd:\n endpoints:\n - milvus-etcd:2379 # Use the etcd container name\n rootPath: by-dev # The root path where data is stored in etcd\n metaSubPath: meta\n kvSubPath: kv\n", - "# Suppose your docker-compose run with the default milvus network,\n# and you put migration.yaml in the same directory with docker-compose.yaml.\ndocker run --rm -it --network milvus -v $(pwd)/migration.yaml:/milvus/configs/migration.yaml milvusdb/meta-migration:v2.2.0 /milvus/bin/meta-migration -config=/milvus/configs/migration.yaml\n", - "// Run the following only after update the milvus image tag in the docker-compose.yaml\ndocker compose down\ndocker compose up -d\n" - ], - "headingContent": "", - "anchorList": [ - { - "label": "Docker ComposeでMilvus Standaloneをアップグレードする", - "href": "Upgrade-Milvus-Standalone-with-Docker-Compose", - "type": 1, - "isActive": false - }, - { - "label": "イメージを変えてMilvusをアップグレード", - "href": "Upgrade-Milvus-by-changing-its-image", - "type": 2, - "isActive": false - }, - { - "label": "メタデータの移行", - "href": "Migrate-the-metadata", - "type": 2, - "isActive": false - }, - { "label": "次の記事", "href": "Whats-next", "type": 2, "isActive": false } - ] -} +{"codeList":["...\nstandalone:\n container_name: milvus-standalone\n image: milvusdb/milvus:v2.4.13-hotfix\n","docker compose down\ndocker compose up -d\n","docker stop \n","# migration.yaml\ncmd:\n # Option: run/backup/rollback\n type: run\n runWithBackup: true\nconfig:\n sourceVersion: 2.1.4 # Specify your milvus version\n targetVersion: 2.4.13-hotfix\n backupFilePath: /tmp/migration.bak\nmetastore:\n type: etcd\netcd:\n endpoints:\n - milvus-etcd:2379 # Use the etcd container name\n rootPath: by-dev # The root path where data is stored in etcd\n metaSubPath: meta\n kvSubPath: kv\n","# Suppose your docker-compose run with the default milvus network,\n# and you put migration.yaml in the same directory with docker-compose.yaml.\ndocker run --rm -it --network milvus -v $(pwd)/migration.yaml:/milvus/configs/migration.yaml milvusdb/meta-migration:v2.2.0 /milvus/bin/meta-migration -config=/milvus/configs/migration.yaml\n","// Run the following only after update the milvus image tag in the docker-compose.yaml\ndocker compose down\ndocker compose up -d\n"],"headingContent":"Upgrade Milvus Standalone with Docker Compose","anchorList":[{"label":"Docker Composeを使用したMilvusスタンドアロンのアップグレード","href":"Upgrade-Milvus-Standalone-with-Docker-Compose","type":1,"isActive":false},{"label":"イメージを変更してMilvusをアップグレードする","href":"Upgrade-Milvus-by-changing-its-image","type":2,"isActive":false},{"label":"メタデータの移行","href":"Migrate-the-metadata","type":2,"isActive":false},{"label":"次のステップ","href":"Whats-next","type":2,"isActive":false}]} \ No newline at end of file diff --git a/localization/v2.4.x/site/ja/adminGuide/upgrade_milvus_standalone-docker.md b/localization/v2.4.x/site/ja/adminGuide/upgrade_milvus_standalone-docker.md index 12523d4b5..1878bfc68 100644 --- a/localization/v2.4.x/site/ja/adminGuide/upgrade_milvus_standalone-docker.md +++ b/localization/v2.4.x/site/ja/adminGuide/upgrade_milvus_standalone-docker.md @@ -5,9 +5,9 @@ order: 1 group: upgrade_milvus_standalone-operator.md related_key: upgrade Milvus Standalone summary: Docker ComposeでMilvusをスタンドアロンでアップグレードする方法をご紹介します。 -title: Docker ComposeでMilvus Standaloneをアップグレードする +title: Docker Composeを使用したMilvusスタンドアロンのアップグレード --- - +

      Docker Composeを使用したMilvusスタンドアロンのアップグレード

      このトピックでは、Docker Composeを使用してMilvusをアップグレードする方法について説明します。

      -

      通常の場合、イメージを変更することでMilvusをアップグレードすることができます。ただし、v2.1.xからv2.4.9へアップグレードする場合は、事前にメタデータを移行する必要があります。

      +

      通常の場合、Milvusのイメージを変更することでアップグレードが可能です。ただし、v2.1.xからv2.4.13-hotfixにアップグレードする場合は、事前にメタデータを移行する必要があります。

      -

      セキュリティ上の懸念から、Milvusはv2.2.5のリリースと同時にMinIOをRELEASE.2023-03-20T20-16-18Zにアップグレードしています。Docker Composeを使用してインストールされた以前のMilvus Standaloneリリースからアップグレードする前に、Single-Node Single-Drive MinIOデプロイメントを作成し、既存のMinIO設定とコンテンツを新しいデプロイメントに移行する必要があります。詳細については、このガイドを参照してください。

      +

      セキュリティ上の懸念から、Milvusはv2.2.5のリリースと同時にMinIOをRELEASE.2023-03-20T20-16-18Zにアップグレードしています。Docker Composeを使用してインストールされた以前のMilvus Standaloneリリースからのアップグレードの前に、Single-Node Single-Drive MinIOデプロイメントを作成し、既存のMinIO設定とコンテンツを新しいデプロイメントに移行する必要があります。詳細については、このガイドを参照してください。

      イメージを変更してMilvusをアップグレードする

    2. 以下のコマンドを実行してアップグレードを実行します。

      docker compose down
      @@ -83,7 +83,7 @@ cmd:
         runWithBackup: true
       config:
         sourceVersion: 2.1.4   # Specify your milvus version
      -  targetVersion: 2.4.9
      +  targetVersion: 2.4.13-hotfix
         backupFilePath: /tmp/migration.bak
       metastore:
         type: etcd
      @@ -125,8 +125,8 @@ docker compose up -d
       
    3. Milvusクラスタのスケール
    4. クラウド上にクラスターをデプロイする準備ができている場合は、次の方法を参照してください:
    5. diff --git a/localization/v2.4.x/site/ja/adminGuide/upgrade_milvus_standalone-helm.json b/localization/v2.4.x/site/ja/adminGuide/upgrade_milvus_standalone-helm.json index a0cf89db4..5af310943 100644 --- a/localization/v2.4.x/site/ja/adminGuide/upgrade_milvus_standalone-helm.json +++ b/localization/v2.4.x/site/ja/adminGuide/upgrade_milvus_standalone-helm.json @@ -1,51 +1 @@ -{ - "codeList": [ - "$ helm repo update\n$ helm search repo zilliztech/milvus --versions\n", - "helm repo add zilliztech https://zilliztech.github.io/milvus-helm\nhelm repo update\n# upgrade existing helm release\nhelm upgrade my-release zilliztech/milvus\n", - "NAME CHART VERSION APP VERSION DESCRIPTION \nzilliztech/milvus 4.1.34 2.4.5 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.33 2.4.4 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.32 2.4.3 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.31 2.4.1 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.30 2.4.1 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.29 2.4.0 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.24 2.3.11 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.23 2.3.10 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.22 2.3.10 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.21 2.3.10 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.20 2.3.10 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.18 2.3.10 Milvus is an open-source vector database built ... \nzilliztech/milvus 4.1.18 2.3.9 Milvus is an open-source vector database built ... \nzilliztech/milvus 4.1.17 2.3.8 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.16 2.3.7 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.15 2.3.5 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.14 2.3.6 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.13 2.3.5 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.12 2.3.5 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.11 2.3.4 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.10 2.3.3 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.9 2.3.3 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.8 2.3.2 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.7 2.3.2 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.6 2.3.1 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.5 2.3.1 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.4 2.3.1 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.3 2.3.1 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.2 2.3.1 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.1 2.3.0 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.0 2.3.0 Milvus is an open-source vector database built ...\n", - "sh rollingUpdate.sh -n default -i my-release -o update -t 2.4.9 -w 'milvusdb/milvus:v2.4.9'\n", - "helm repo update\nhelm upgrade my-release milvus/milvus --reuse-values --version=4.1.24 # use the helm chart version here\n", - "NAME NAMESPACE REVISION UPDATED STATUS CHART APP VERSION \nmy-release default 1 2022-11-21 15:41:25.51539 +0800 CST deployed milvus-3.2.18 2.1.4\n", - "NAME READY STATUS RESTARTS AGE\nmy-release-etcd-0 1/1 Running 0 84s\nmy-release-milvus-standalone-75c599fffc-6rwlj 1/1 Running 0 84s\nmy-release-minio-744dd9586f-qngzv 1/1 Running 0 84s\n", - "$ kubectl get pods my-release-milvus-proxy-6c548f787f-scspp -o=jsonpath='{$.spec.containers[0].image}'\n# milvusdb/milvus:v2.1.4\n", - "./migrate.sh -i my-release -s 2.1.4 -t 2.4.9\n", - "./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.9\n", - "./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.9 -r by-dev\n", - "./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.9 -r by-dev -w milvusdb/milvus:v2.4.9\n", - "./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.9 -w milvusdb/milvus:v2.4.9 -d true\n", - "./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.9 -r by-dev -o rollback -w milvusdb/milvus:v2.1.1\n./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.9 -r by-dev -o migrate -w milvusdb/milvus:v2.4.9\n" - ], - "headingContent": "", - "anchorList": [ - { - "label": "ヘルムチャートでMilvus単体をアップグレード", - "href": "Upgrade-Milvus-Standalone-with-Helm-Chart", - "type": 1, - "isActive": false - }, - { - "label": "Milvusバージョンをチェック", - "href": "Check-the-Milvus-version", - "type": 2, - "isActive": false - }, - { - "label": "ローリングアップグレードの実施", - "href": "Conduct-a-rolling-upgrade", - "type": 2, - "isActive": false - }, - { - "label": "Helmを使ってMilvusをアップグレード", - "href": "Upgrade-Milvus-using-Helm", - "type": 2, - "isActive": false - }, - { - "label": "メタデータの移行", - "href": "Migrate-the-metadata", - "type": 2, - "isActive": false - } - ] -} +{"codeList":["$ helm repo update\n$ helm search repo zilliztech/milvus --versions\n","helm repo add zilliztech https://zilliztech.github.io/milvus-helm\nhelm repo update\n# upgrade existing helm release\nhelm upgrade my-release zilliztech/milvus\n","NAME CHART VERSION APP VERSION DESCRIPTION \nzilliztech/milvus 4.1.34 2.4.5 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.33 2.4.4 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.32 2.4.3 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.31 2.4.1 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.30 2.4.1 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.29 2.4.0 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.24 2.3.11 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.23 2.3.10 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.22 2.3.10 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.21 2.3.10 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.20 2.3.10 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.18 2.3.10 Milvus is an open-source vector database built ... \nzilliztech/milvus 4.1.18 2.3.9 Milvus is an open-source vector database built ... \nzilliztech/milvus 4.1.17 2.3.8 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.16 2.3.7 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.15 2.3.5 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.14 2.3.6 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.13 2.3.5 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.12 2.3.5 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.11 2.3.4 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.10 2.3.3 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.9 2.3.3 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.8 2.3.2 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.7 2.3.2 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.6 2.3.1 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.5 2.3.1 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.4 2.3.1 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.3 2.3.1 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.2 2.3.1 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.1 2.3.0 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.0 2.3.0 Milvus is an open-source vector database built ...\n","sh rollingUpdate.sh -n default -i my-release -o update -t 2.4.13-hotfix -w 'milvusdb/milvus:v2.4.13-hotfix'\n","helm repo update\nhelm upgrade my-release milvus/milvus --reuse-values --version=4.1.24 # use the helm chart version here\n","NAME NAMESPACE REVISION UPDATED STATUS CHART APP VERSION \nmy-release default 1 2022-11-21 15:41:25.51539 +0800 CST deployed milvus-3.2.18 2.1.4\n","NAME READY STATUS RESTARTS AGE\nmy-release-etcd-0 1/1 Running 0 84s\nmy-release-milvus-standalone-75c599fffc-6rwlj 1/1 Running 0 84s\nmy-release-minio-744dd9586f-qngzv 1/1 Running 0 84s\n","$ kubectl get pods my-release-milvus-proxy-6c548f787f-scspp -o=jsonpath='{$.spec.containers[0].image}'\n# milvusdb/milvus:v2.1.4\n","./migrate.sh -i my-release -s 2.1.4 -t 2.4.13-hotfix\n","./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.13-hotfix\n","./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.13-hotfix -r by-dev\n","./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.13-hotfix -r by-dev -w milvusdb/milvus:v2.4.13-hotfix\n","./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.13-hotfix -w milvusdb/milvus:v2.4.13-hotfix -d true\n","./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.13-hotfix -r by-dev -o rollback -w milvusdb/milvus:v2.1.1\n./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.13-hotfix -r by-dev -o migrate -w milvusdb/milvus:v2.4.13-hotfix\n"],"headingContent":"Upgrade Milvus Standalone with Helm Chart","anchorList":[{"label":"MilvusスタンドアロンとHelmチャートのアップグレード","href":"Upgrade-Milvus-Standalone-with-Helm-Chart","type":1,"isActive":false},{"label":"Milvusバージョンの確認","href":"Check-the-Milvus-version","type":2,"isActive":false},{"label":"ローリングアップグレードの実施","href":"Conduct-a-rolling-upgrade","type":2,"isActive":false},{"label":"Helmを使用したMilvusのアップグレード","href":"Upgrade-Milvus-using-Helm","type":2,"isActive":false},{"label":"メタデータの移行","href":"Migrate-the-metadata","type":2,"isActive":false}]} \ No newline at end of file diff --git a/localization/v2.4.x/site/ja/adminGuide/upgrade_milvus_standalone-helm.md b/localization/v2.4.x/site/ja/adminGuide/upgrade_milvus_standalone-helm.md index d02b8bea5..eaeaa53c3 100644 --- a/localization/v2.4.x/site/ja/adminGuide/upgrade_milvus_standalone-helm.md +++ b/localization/v2.4.x/site/ja/adminGuide/upgrade_milvus_standalone-helm.md @@ -4,12 +4,11 @@ label: Helm order: 1 group: upgrade_milvus_standalone-operator.md related_key: upgrade Milvus Standalone -summary: Helm ChartでMilvusをスタンドアロンでアップグレードする方法をご紹介します。 -title: ヘルムチャートでMilvus単体をアップグレード +summary: MilvusスタンドアロンをHelm Chartでアップグレードする方法をご紹介します。 +title: MilvusスタンドアロンとHelmチャートのアップグレード --- - - -

      HelmチャートによるMilvusスタンドアロンのアップグレード

    -

    あなたのMilvusのアップグレードパスは以下のように選択できます:

    -
    - ローリングアップグレードの実施](#conduct-a-rolling-upgrade) Milvus v2.2.3以降からv2.4.9へ。
    +

    Milvusのアップグレードパスは以下のように選択できます:

    +
    - Milvus v2.2.3以降をv2.4.13-hotfixにアップグレードする。

    ローリングアップグレードの実施

    Milvus 2.2.3以降では、Milvusコーディネータをアクティブスタンバイ モードで動作するように設定し、コーディネータのローリングアップグレード機能を有効にすることで、コーディネータのアップグレード中にMilvusが受信したリクエストに応答できるようになります。以前のリリースでは、アップグレード中にコーディネータを削除してから作成するため、サービスのダウンタイムが発生する可能性がありました。

    +

    Milvus 2.2.3以降では、Milvusコーディネータをアクティブスタンバイ モードで動作するように設定し、ローリングアップグレード機能を有効にすることで、コーディネータのアップグレード中にMilvusが受信したリクエストに応答できるようになります。以前のリリースでは、アップグレード中にコーディネータを削除してから作成するため、サービスのダウンタイムが発生する可能性がありました。

    ローリングアップグレードでは、コーディネータをアクティブスタンバイで動作させる必要があります。弊社が提供するスクリプトを使用して、コーディネーターをアクティブスタンバイ モードで動作するように設定し、ローリングアップグレードを開始できます。

    -

    Kubernetesが提供するローリングアップデートの機能に基づいて、上記のスクリプトは、デプロイメントの依存関係に従って順序付けられたアップデートを強制します。さらに、Milvusはアップグレード中もコンポーネントの互換性を維持し、サービスのダウンタイムを大幅に削減するメカニズムを実装しています。

    +

    Kubernetesが提供するローリングアップデートの機能に基づいて、上記のスクリプトは、デプロイメントの依存関係に従って順序付けられたアップデートを強制します。さらに、Milvusはアップグレード中もそのコンポーネントに依存しているコンポーネントとの互換性を維持するメカニズムを実装し、潜在的なサービスダウンタイムを大幅に削減します。

    このスクリプトは、HelmとともにインストールされたMilvusのアップグレードにのみ適用されます。次の表は、スクリプトで使用可能なコマンドフラグの一覧です。

    @@ -124,14 +123,14 @@ zilliztech/milvus 4.1.0 2.3.0 Milvus is an ope
    o操作方法update
    -

    Milvusインスタンスのすべてのデプロイが正常な状態であることを確認したら、以下のコマンドを実行してMilvusインスタンスを2.4.9にアップグレードします。以下のコマンドを実行することで、Milvusインスタンスを2.4.9にアップグレードすることができます。

    -
    sh rollingUpdate.sh -n default -i my-release -o update -t 2.4.9 -w 'milvusdb/milvus:v2.4.9'
    +

    Milvusインスタンスのすべてのデプロイメントが正常な状態であることを確認したら、以下のコマンドを実行してMilvusインスタンスを2.4.13-hotfixにアップグレードします。以下のコマンドを実行することで、Milvusインスタンスを2.4.13-hotfixにアップグレードすることができます。

    +
    sh rollingUpdate.sh -n default -i my-release -o update -t 2.4.13-hotfix -w 'milvusdb/milvus:v2.4.13-hotfix'
     
    1. このスクリプトはRocksMQと一緒にインストールされたMilvusインスタンスには適用されません
    2. スクリプトはデプロイのアップグレード順序をハードコードしており、変更することはできません。
    3. -
    4. このスクリプトでは、kubectl patch を使用してデプロイメントを更新し、kubectl rollout status を使用してデプロイメントの状態を監視します。
    5. +
    6. このスクリプトは、kubectl patch を使用してデプロイメントを更新し、kubectl rollout status を使用してデプロイメントの状態を監視します。
    7. スクリプトはkubectl patch を使用して、デプロイメントのapp.kubernetes.io/version ラベルをコマンドの-t フラグの後に指定されたものに更新します。
    @@ -155,7 +154,7 @@ zilliztech/milvus 4.1.0 2.3.0 Milvus is an ope
    helm repo update
     helm upgrade my-release milvus/milvus --reuse-values --version=4.1.24 # use the helm chart version here
     
    -

    直前のコマンドでHelm chartのバージョンを使用してください。Helmチャートバージョンの取得方法については、「Milvusのバージョンを確認する」を参照してください。

    +

    直前のコマンドでHelm chartのバージョンを使用してください。Helmチャートバージョンの取得方法については、「Milvusバージョンの確認」をご参照ください。

    メタデータの移行

    $ helm list を実行し、Milvusアプリのバージョンを確認します。APP VERSION は 2.1.4 です。

    +

    1.Milvusバージョンの確認

    $ helm list を実行し、Milvusアプリのバージョンを確認します。APP VERSION 、2.1.4であることがわかります。

    NAME                NAMESPACE   REVISION    UPDATED                                 STATUS      CHART           APP VERSION     
     my-release          default     1           2022-11-21 15:41:25.51539 +0800 CST     deployed    milvus-3.2.18   2.1.4
     
    @@ -186,7 +185,7 @@ my-release-minio-744dd9586f-qngzv 1/1 Running 0 84s
    $ kubectl get pods my-release-milvus-proxy-6c548f787f-scspp -o=jsonpath='{$.spec.containers[0].image}'
     # milvusdb/milvus:v2.1.4
     
    -

    4.メタデータの移行

    Milvus 2.2での大きな変更点はセグメントインデックスのメタデータ構造です。そのため、Milvusをv2.1.xからv2.2.0にアップグレードする際には、Helmを使用してメタデータを移行する必要があります。以下はメタデータを安全に移行するためのスクリプトです

    +

    4.メタデータの移行

    Milvus2.2での大きな変更点はセグメントインデックスのメタデータ構造です。そのため、Milvusをv2.1.xからv2.2.0にアップグレードする際には、Helmを使用してメタデータを移行する必要があります。以下は、メタデータを安全に移行するためのスクリプトです

    このスクリプトはK8sクラスタにインストールされたMilvusにのみ適用されます。処理中にエラーが発生した場合は、まずロールバック操作で以前のバージョンにロールバックしてください。

    以下の表はメタマイグレーションで実行できる操作の一覧です。

    @@ -196,10 +195,10 @@ my-release-minio-744dd9586f-qngzv 1/1 Running 0 84s - - + + - + @@ -209,30 +208,30 @@ my-release-minio-744dd9586f-qngzv 1/1 Running 0 84s
    iMilvusインスタンス名。None
    nMilvusがインストールされている名前空間。defaultFalse
    sソース Milvus バージョン。None
    tインストール先のMilvusのバージョンNone
    sMilvusのバージョン。None
    tインストール先のMilvusのバージョン。None
    rMilvusメタのルートパス。by-dev
    w新しいMilvus画像タグmilvusdb/milvus:v2.2.0False
    wMilvusの新しい画像タグ。milvusdb/milvus:v2.2.0False
    mmetaマイグレーションイメージタグ。milvusdb/meta-migration:v2.2.0
    oメタマイグレーション操作migrate
    dマイグレーション完了後にマイグレーションポッドを削除するかどうか。falseFalse

    1.メタデータの移行

    1. 移行スクリプトをダウンロードします。
    2. -
    3. Milvus コンポーネントを停止します。Milvusのetcdにライブセッションがあるとマイグレーションに失敗する可能性があります。
    4. +
    5. Milvusコンポーネントを停止します。Milvusのetcdにライブセッションがあるとマイグレーションに失敗する可能性があります。
    6. Milvusメタデータのバックアップを作成します。
    7. -
    8. Milvusメタデータを移行する。
    9. +
    10. Milvusメタデータを移行します。
    11. 新しいイメージでMilvusコンポーネントを起動する。
    -

    2.Milvusをv2.1.xから2.4.9にアップグレードする。

    以下のコマンドは、Milvusをv2.1.4から2.4.9にアップグレードすることを前提としています。必要なバージョンに変更してください。

    +

    2.Milvusをv2.1.xから2.4.13-hotfixにアップグレードする。

    以下のコマンドは、Milvusをv2.1.4から2.4.13-hotfixにアップグレードすることを前提としています。必要なバージョンに変更してください。

    1. Milvusインスタンス名、ソースMilvusバージョン、ターゲットMilvusバージョンを指定します。

      -
      ./migrate.sh -i my-release -s 2.1.4 -t 2.4.9
      +
      ./migrate.sh -i my-release -s 2.1.4 -t 2.4.13-hotfix
       
    2. -
    3. MilvusがデフォルトのK8s名前空間にインストールされていない場合は、-n で名前空間を指定する。

      -
      ./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.9
      +
    4. MilvusがデフォルトのK8s名前空間にインストールされていない場合は、-n で名前空間を指定します。

      +
      ./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.13-hotfix
       
    5. Milvusがカスタムrootpath でインストールされている場合は、-r でルートパスを指定してください。

      -
      ./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.9 -r by-dev
      +
      ./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.13-hotfix -r by-dev
       
    6. Milvusがカスタムimage でインストールされている場合は、-w でイメージタグを指定してください。

      -
      ./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.9 -r by-dev -w milvusdb/milvus:v2.4.9
      +
      ./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.13-hotfix -r by-dev -w milvusdb/milvus:v2.4.13-hotfix
       
    7. マイグレーション完了後にマイグレーションポッドを自動的に削除する場合は、-d true を設定します。

      -
      ./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.9 -w milvusdb/milvus:v2.4.9 -d true
      +
      ./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.13-hotfix -w milvusdb/milvus:v2.4.13-hotfix -d true
       
    8. マイグレーションに失敗した場合は、ロールバックしてマイグレーションをやり直します。

      -
      ./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.9 -r by-dev -o rollback -w milvusdb/milvus:v2.1.1
      -./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.9 -r by-dev -o migrate -w milvusdb/milvus:v2.4.9
      +
      ./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.13-hotfix -r by-dev -o rollback -w milvusdb/milvus:v2.1.1
      +./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.13-hotfix -r by-dev -o migrate -w milvusdb/milvus:v2.4.13-hotfix
       
    diff --git a/localization/v2.4.x/site/ja/adminGuide/upgrade_milvus_standalone-operator.json b/localization/v2.4.x/site/ja/adminGuide/upgrade_milvus_standalone-operator.json index 56f05417f..651f9815d 100644 --- a/localization/v2.4.x/site/ja/adminGuide/upgrade_milvus_standalone-operator.json +++ b/localization/v2.4.x/site/ja/adminGuide/upgrade_milvus_standalone-operator.json @@ -1,47 +1 @@ -{ - "codeList": [ - "helm repo add zilliztech-milvus-operator https://zilliztech.github.io/milvus-operator/\nhelm repo update zilliztech-milvus-operator\nhelm -n milvus-operator upgrade milvus-operator zilliztech-milvus-operator/milvus-operator\n", - "apiVersion: milvus.io/v1beta1\nkind: Milvus\nmetadata:\n name: my-release\nspec:\n components:\n enableRollingUpdate: true\n imageUpdateMode: rollingUpgrade # Default value, can be omitted\n image: milvusdb/milvus:v2.4.9\n", - "apiVersion: milvus.io/v1beta1\nkind: Milvus\nmetadata:\n name: my-release\nspec:\n components:\n enableRollingUpdate: true\n imageUpdateMode: all\n image: milvusdb/milvus:v2.4.9\n", - "apiVersion: milvus.io/v1beta1\nkind: Milvus\nmetadata:\n name: my-release\nspec:\n components:\n enableRollingUpdate: true\n imageUpdateMode: rollingDowngrade\n image: milvusdb/milvus:\n", - "kubectl apply -f milvusupgrade.yml\n", - "apiVersion: milvus.io/v1beta1\nkind: Milvus\nmetadata:\n name: my-release\nlabels:\n app: milvus\nspec:\n # Omit other fields ...\n components:\n image: milvusdb/milvus:v2.4.9\n", - "kubectl apply -f milvusupgrade.yaml\n", - "apiVersion: milvus.io/v1beta1\nkind: MilvusUpgrade\nmetadata:\n name: my-release-upgrade\nspec:\n milvus:\n namespace: default\n name: my-release\n sourceVersion: \"v2.1.4\"\n targetVersion: \"v2.4.9\"\n # below are some omit default values:\n # targetImage: \"milvusdb/milvus:v2.4.9\"\n # toolImage: \"milvusdb/meta-migration:v2.2.0\"\n # operation: upgrade\n # rollbackIfFailed: true\n # backupPVC: \"\"\n # maxRetry: 3\n", - "$ kubectl apply -f https://raw.githubusercontent.com/zilliztech/milvus-operator/main/config/samples/milvusupgrade.yaml\n", - "kubectl describe milvus release-name\n" - ], - "headingContent": "", - "anchorList": [ - { - "label": "Milvus OperatorでMilvus Standaloneをアップグレードする", - "href": "Upgrade-Milvus-Standalone-with-Milvus-Operator", - "type": 1, - "isActive": false - }, - { - "label": "Milvusオペレーターのアップグレード", - "href": "Upgrade-your-Milvus-operator", - "type": 2, - "isActive": false - }, - { - "label": "ローリングアップグレードの実施", - "href": "Conduct-a-rolling-upgrade", - "type": 2, - "isActive": false - }, - { - "label": "イメージを変えてMilvusをアップグレード", - "href": "Upgrade-Milvus-by-changing-its-image", - "type": 2, - "isActive": false - }, - { - "label": "メタデータの移行", - "href": "Migrate-the-metadata", - "type": 2, - "isActive": false - } - ] -} +{"codeList":["helm repo add zilliztech-milvus-operator https://zilliztech.github.io/milvus-operator/\nhelm repo update zilliztech-milvus-operator\nhelm -n milvus-operator upgrade milvus-operator zilliztech-milvus-operator/milvus-operator\n","apiVersion: milvus.io/v1beta1\nkind: Milvus\nmetadata:\n name: my-release\nspec:\n components:\n enableRollingUpdate: true\n imageUpdateMode: rollingUpgrade # Default value, can be omitted\n image: milvusdb/milvus:v2.4.13-hotfix\n","apiVersion: milvus.io/v1beta1\nkind: Milvus\nmetadata:\n name: my-release\nspec:\n components:\n enableRollingUpdate: true\n imageUpdateMode: all\n image: milvusdb/milvus:v2.4.13-hotfix\n","apiVersion: milvus.io/v1beta1\nkind: Milvus\nmetadata:\n name: my-release\nspec:\n components:\n enableRollingUpdate: true\n imageUpdateMode: rollingDowngrade\n image: milvusdb/milvus:\n","kubectl apply -f milvusupgrade.yml\n","apiVersion: milvus.io/v1beta1\nkind: Milvus\nmetadata:\n name: my-release\nlabels:\n app: milvus\nspec:\n # Omit other fields ...\n components:\n image: milvusdb/milvus:v2.4.13-hotfix\n","kubectl apply -f milvusupgrade.yaml\n","apiVersion: milvus.io/v1beta1\nkind: MilvusUpgrade\nmetadata:\n name: my-release-upgrade\nspec:\n milvus:\n namespace: default\n name: my-release\n sourceVersion: \"v2.1.4\"\n targetVersion: \"v2.4.13-hotfix\"\n # below are some omit default values:\n # targetImage: \"milvusdb/milvus:v2.4.13-hotfix\"\n # toolImage: \"milvusdb/meta-migration:v2.2.0\"\n # operation: upgrade\n # rollbackIfFailed: true\n # backupPVC: \"\"\n # maxRetry: 3\n","$ kubectl apply -f https://raw.githubusercontent.com/zilliztech/milvus-operator/main/config/samples/milvusupgrade.yaml\n","kubectl describe milvus release-name\n"],"headingContent":"Upgrade Milvus Standalone with Milvus Operator","anchorList":[{"label":"Milvus OperatorによるMilvusスタンドアロンのアップグレード","href":"Upgrade-Milvus-Standalone-with-Milvus-Operator","type":1,"isActive":false},{"label":"Milvusオペレータのアップグレード","href":"Upgrade-your-Milvus-operator","type":2,"isActive":false},{"label":"ローリングアップグレードの実施","href":"Conduct-a-rolling-upgrade","type":2,"isActive":false},{"label":"イメージを変更してMilvusをアップグレードする","href":"Upgrade-Milvus-by-changing-its-image","type":2,"isActive":false},{"label":"メタデータの移行","href":"Migrate-the-metadata","type":2,"isActive":false}]} \ No newline at end of file diff --git a/localization/v2.4.x/site/ja/adminGuide/upgrade_milvus_standalone-operator.md b/localization/v2.4.x/site/ja/adminGuide/upgrade_milvus_standalone-operator.md index db25c0bc1..5294acf7f 100644 --- a/localization/v2.4.x/site/ja/adminGuide/upgrade_milvus_standalone-operator.md +++ b/localization/v2.4.x/site/ja/adminGuide/upgrade_milvus_standalone-operator.md @@ -4,8 +4,8 @@ label: Milvus Operator order: 0 group: upgrade_milvus_standalone-operator.md related_key: upgrade Milvus Standalone -summary: Milvus Operatorを使用したMilvus Standaloneのアップグレード方法をご紹介します。 -title: Milvus OperatorでMilvus Standaloneをアップグレードする +summary: Milvus Operatorを使用したMilvusスタンドアロンのアップグレード方法をご紹介します。 +title: Milvus OperatorによるMilvusスタンドアロンのアップグレード ---

    Milvus OperatorによるMilvusスタンドアロンのアップグレード

    このガイドでは、Milvus OperatorでMilvus Standaloneをアップグレードする方法を説明します。

    +

    このガイドでは、Milvus OperatorでMilvusスタンドアロンをアップグレードする方法を説明します。

    Milvusオペレータのアップグレード

    -

    Milvusオペレータを最新バージョンにアップグレードした後は、以下の選択肢があります:

    +

    Milvusオペレータを最新バージョンにアップグレードすると、以下の選択肢があります:

    ローリングアップグレードの実施

    -

    上記の設定ファイルでは、spec.components.enableRollingUpdatetrue に設定し、spec.components.image を希望の Milvus バージョンに設定します。

    -

    デフォルトでは、Milvusはコーディネーターのローリングアップグレードを順番に実行し、コーディネーターのポッドイメージを次々に置き換えます。アップグレード時間を短縮するには、spec.components.imageUpdateModeall に設定し、Milvus がすべてのポッドイメージを同時に置き換えるようにします。

    +

    上記の設定ファイルでは、spec.components.enableRollingUpdatetrue に設定し、spec.components.image を任意の Milvus バージョンに設定します。

    +

    デフォルトでは、Milvusはコーディネーターのローリングアップグレードを順番に実行し、コーディネーターのポッドイメージを次々に置き換えていきます。アップグレード時間を短縮するには、spec.components.imageUpdateModeall に設定し、Milvus がすべてのポッドイメージを同時に置き換えるようにします。

    apiVersion: milvus.io/v1beta1
     kind: Milvus
     metadata:
    @@ -88,9 +88,9 @@ spec:
       components:
         enableRollingUpdate: true
         imageUpdateMode: all
    -    image: milvusdb/milvus:v2.4.9
    +    image: milvusdb/milvus:v2.4.13-hotfix
     
    -

    また、spec.components.imageUpdateModerollingDowngrade に設定すると、Milvus がコーディネータポッドイメージを低バージョンのものに置き換えます。

    +

    Milvusがコーディネータポッドイメージを低いバージョンに置き換えるようにするには、spec.components.imageUpdateModerollingDowngrade に設定します。

    apiVersion: milvus.io/v1beta1
     kind: Milvus
     metadata:
    @@ -101,7 +101,7 @@ spec:
         imageUpdateMode: rollingDowngrade
         image: milvusdb/milvus:<some-older-version>
     
    -

    次に、設定をYAMLファイル(たとえば、milvusupgrade.yml )として保存し、この設定ファイルを次のようにMilvusインスタンスに適用します:

    +

    次に、設定をYAMLファイル(例えば、milvusupgrade.yml)として保存し、この設定ファイルを以下のようにMilvusインスタンスに適用します:

    kubectl apply -f milvusupgrade.yml
     

    イメージを変更してMilvusをアップグレードする

    通常の場合、Milvusのイメージを変更することで、Milvusを最新のものに更新することができます。ただし、この方法でMilvusをアップグレードする場合、一定のダウンタイムが発生することに注意してください。

    +

    通常の場合、Milvusのイメージを変更することで、Milvusを最新のものにアップデートすることができます。ただし、この方法でMilvusをアップグレードする場合、一定のダウンタイムが発生することに注意してください。

    以下のように設定ファイルを作成し、milvusupgrade.yamlとして保存します:

    apiVersion: milvus.io/v1beta1
     kind: Milvus
    @@ -130,7 +130,7 @@ labels:
     spec:
       # Omit other fields ...
       components:
    -   image: milvusdb/milvus:v2.4.9
    +   image: milvusdb/milvus:v2.4.13-hotfix
     

    その後、以下を実行してアップグレードを実行します:

    kubectl apply -f milvusupgrade.yaml
    @@ -150,8 +150,8 @@ spec:
               d="M4 9h1v1H4c-1.5 0-3-1.69-3-3.5S2.55 3 4 3h4c1.45 0 3 1.69 3 3.5 0 1.41-.91 2.72-2 3.25V8.59c.58-.45 1-1.27 1-2.09C10 5.22 8.98 4 8 4H4c-.98 0-2 1.22-2 2.5S3 9 4 9zm9-3h-1v1h1c1 0 2 1.22 2 2.5S13.98 12 13 12H9c-.98 0-2-1.22-2-2.5 0-.83.42-1.64 1-2.09V6.25c-1.09.53-2 1.84-2 3.25C6 11.31 7.55 13 9 13h4c1.45 0 3-1.69 3-3.5S14.5 6 13 6z"
             >
           
    -    

    Milvus 2.2.0以降、メタデータは以前のリリースと互換性がありません。以下の例は、Milvus 2.1.4からMilvus v2.4.9へのアップグレードを想定しています。

    -

    1.メタデータ移行用ファイル.yaml の作成

    メタデータ移行用ファイルを作成します。以下はその例です。設定ファイルには、namesourceVersiontargetVersion を指定する必要があります。以下の例では、namemy-release-upgrade に、sourceVersionv2.1.4 に、targetVersionv2.4.9 に設定しています。 これは、Milvusインスタンスがv2.1.4からv2.4.9にアップグレードされることを意味します。

    +

    Milvus 2.2.0以降、メタデータは以前のリリースと互換性がありません。以下の例は、Milvus 2.1.4からMilvus v2.4.13-hotfixへのアップグレードを想定しています。

    +

    1.メタデータ移行用ファイル.yaml の作成

    メタデータ移行用ファイルを作成します。以下はその例です。設定ファイルには、namesourceVersiontargetVersion を指定する必要があります。以下の例では、namemy-release-upgrade に、sourceVersionv2.1.4 に、targetVersionv2.4.13-hotfix に設定しています。 これは、Milvusインスタンスがv2.1.4からv2.4.13-hotfixにアップグレードされることを意味します。

    apiVersion: milvus.io/v1beta1
     kind: MilvusUpgrade
     metadata:
    @@ -161,9 +161,9 @@ spec:
         namespace: default
         name: my-release
       sourceVersion: "v2.1.4"
    -  targetVersion: "v2.4.9"
    +  targetVersion: "v2.4.13-hotfix"
       # below are some omit default values:
    -  # targetImage: "milvusdb/milvus:v2.4.9"
    +  # targetImage: "milvusdb/milvus:v2.4.13-hotfix"
       # toolImage: "milvusdb/meta-migration:v2.2.0"
       # operation: upgrade
       # rollbackIfFailed: true
    @@ -173,7 +173,7 @@ spec:
     

    2.新しい設定の適用

    以下のコマンドを実行し、新しい設定を適用します。

    $ kubectl apply -f https://raw.githubusercontent.com/zilliztech/milvus-operator/main/config/samples/milvusupgrade.yaml
     
    -

    3.メタデータの移行状況の確認

    以下のコマンドを実行して、メタデータ移行のステータスを確認します。

    +

    3.メタデータの移行ステータスの確認

    以下のコマンドを実行して、メタデータ移行のステータスを確認します。

    kubectl describe milvus release-name
     

    出力のステータスがready の場合は、メタデータの移行が成功したことを意味します。

    diff --git a/localization/v2.4.x/site/ja/embeddings/embed-with-cohere.json b/localization/v2.4.x/site/ja/embeddings/embed-with-cohere.json index 6131a6e15..3ce35a476 100644 --- a/localization/v2.4.x/site/ja/embeddings/embed-with-cohere.json +++ b/localization/v2.4.x/site/ja/embeddings/embed-with-cohere.json @@ -1 +1 @@ -{"codeList":["pip install --upgrade pymilvus\npip install \"pymilvus[model]\"\n","cohere_ef = CohereEmbeddingFunction(\n model_name=\"embed-english-light-v3.0\",\n api_key=\"YOUR_COHERE_API_KEY\",\n input_type=\"search_document\",\n embedding_types=[\"float\"]\n)\n","docs = [\n \"Artificial intelligence was founded as an academic discipline in 1956.\",\n \"Alan Turing was the first person to conduct substantial research in AI.\",\n \"Born in Maida Vale, London, Turing was raised in southern England.\",\n]\n\ndocs_embeddings = cohere_ef.encode_documents(docs)\n\n# Print embeddings\nprint(\"Embeddings:\", docs_embeddings)\n# Print dimension and shape of embeddings\nprint(\"Dim:\", cohere_ef.dim, docs_embeddings[0].shape)\n","Embeddings: [array([ 3.43322754e-02, 1.16252899e-03, -5.25207520e-02, 1.32846832e-03,\n -6.80541992e-02, 6.10961914e-02, -7.06176758e-02, 1.48925781e-01,\n 1.54174805e-01, 1.98516846e-02, 2.43835449e-02, 3.55224609e-02,\n 1.82952881e-02, 7.57446289e-02, -2.40783691e-02, 4.40063477e-02,\n...\n 0.06359863, -0.01971436, -0.02253723, 0.00354195, 0.00222015,\n 0.00184727, 0.03408813, -0.00777817, 0.04919434, 0.01519775,\n -0.02862549, 0.04760742, -0.07891846, 0.0124054 ], dtype=float32)]\nDim: 384 (384,)\n","queries = [\"When was artificial intelligence founded\", \n \"Where was Alan Turing born?\"]\n\nquery_embeddings = cohere_ef.encode_queries(queries)\n\nprint(\"Embeddings:\", query_embeddings)\nprint(\"Dim\", cohere_ef.dim, query_embeddings[0].shape)\n","Embeddings: [array([-1.33361816e-02, 9.79423523e-04, -7.28759766e-02, -1.93786621e-02,\n -9.71679688e-02, 4.34875488e-02, -9.81445312e-02, 1.16882324e-01,\n 5.89904785e-02, -4.19921875e-02, 4.95910645e-02, 5.83496094e-02,\n 3.47595215e-02, -5.87463379e-03, -7.30514526e-03, 2.92816162e-02,\n...\n 0.00749969, -0.01192474, 0.02719116, 0.03347778, 0.07696533,\n 0.01409149, 0.00964355, -0.01681519, -0.0073204 , 0.00043154,\n -0.04577637, 0.03591919, -0.02807617, -0.04812622], dtype=float32)]\nDim 384 (384,)\n"],"headingContent":"","anchorList":[{"label":"コヒーレ","href":"Cohere","type":1,"isActive":false}]} \ No newline at end of file +{"codeList":["pip install --upgrade pymilvus\npip install \"pymilvus[model]\"\n","from pymilvus.model.dense import CohereEmbeddingFunction\n\ncohere_ef = CohereEmbeddingFunction(\n model_name=\"embed-english-light-v3.0\",\n api_key=\"YOUR_COHERE_API_KEY\",\n input_type=\"search_document\",\n embedding_types=[\"float\"]\n)\n","docs = [\n \"Artificial intelligence was founded as an academic discipline in 1956.\",\n \"Alan Turing was the first person to conduct substantial research in AI.\",\n \"Born in Maida Vale, London, Turing was raised in southern England.\",\n]\n\ndocs_embeddings = cohere_ef.encode_documents(docs)\n\n# Print embeddings\nprint(\"Embeddings:\", docs_embeddings)\n# Print dimension and shape of embeddings\nprint(\"Dim:\", cohere_ef.dim, docs_embeddings[0].shape)\n","Embeddings: [array([ 3.43322754e-02, 1.16252899e-03, -5.25207520e-02, 1.32846832e-03,\n -6.80541992e-02, 6.10961914e-02, -7.06176758e-02, 1.48925781e-01,\n 1.54174805e-01, 1.98516846e-02, 2.43835449e-02, 3.55224609e-02,\n 1.82952881e-02, 7.57446289e-02, -2.40783691e-02, 4.40063477e-02,\n...\n 0.06359863, -0.01971436, -0.02253723, 0.00354195, 0.00222015,\n 0.00184727, 0.03408813, -0.00777817, 0.04919434, 0.01519775,\n -0.02862549, 0.04760742, -0.07891846, 0.0124054 ], dtype=float32)]\nDim: 384 (384,)\n","queries = [\"When was artificial intelligence founded\", \n \"Where was Alan Turing born?\"]\n\nquery_embeddings = cohere_ef.encode_queries(queries)\n\nprint(\"Embeddings:\", query_embeddings)\nprint(\"Dim\", cohere_ef.dim, query_embeddings[0].shape)\n","Embeddings: [array([-1.33361816e-02, 9.79423523e-04, -7.28759766e-02, -1.93786621e-02,\n -9.71679688e-02, 4.34875488e-02, -9.81445312e-02, 1.16882324e-01,\n 5.89904785e-02, -4.19921875e-02, 4.95910645e-02, 5.83496094e-02,\n 3.47595215e-02, -5.87463379e-03, -7.30514526e-03, 2.92816162e-02,\n...\n 0.00749969, -0.01192474, 0.02719116, 0.03347778, 0.07696533,\n 0.01409149, 0.00964355, -0.01681519, -0.0073204 , 0.00043154,\n -0.04577637, 0.03591919, -0.02807617, -0.04812622], dtype=float32)]\nDim 384 (384,)\n"],"headingContent":"Cohere","anchorList":[{"label":"Cohere","href":"Cohere","type":1,"isActive":false}]} \ No newline at end of file diff --git a/localization/v2.4.x/site/ja/embeddings/embed-with-cohere.md b/localization/v2.4.x/site/ja/embeddings/embed-with-cohere.md index 954f8130e..f5151b950 100644 --- a/localization/v2.4.x/site/ja/embeddings/embed-with-cohere.md +++ b/localization/v2.4.x/site/ja/embeddings/embed-with-cohere.md @@ -28,7 +28,9 @@ title: コヒーレを埋め込む pip install "pymilvus[model]"

    そして、CohereEmbeddingFunction をインスタンス化してください:

    -
    cohere_ef = CohereEmbeddingFunction(
    +
    from pymilvus.model.dense import CohereEmbeddingFunction
    +
    +cohere_ef = CohereEmbeddingFunction(
         model_name="embed-english-light-v3.0",
         api_key="YOUR_COHERE_API_KEY",
         input_type="search_document",
    diff --git a/localization/v2.4.x/site/ja/embeddings/embed-with-jina.json b/localization/v2.4.x/site/ja/embeddings/embed-with-jina.json
    index 8de8bb172..4cee744ff 100644
    --- a/localization/v2.4.x/site/ja/embeddings/embed-with-jina.json
    +++ b/localization/v2.4.x/site/ja/embeddings/embed-with-jina.json
    @@ -1 +1 @@
    -{"codeList":["pip install --upgrade pymilvus\npip install \"pymilvus[model]\"\n","from pymilvus.model.dense import JinaEmbeddingFunction\n\njina_ef = JinaEmbeddingFunction(\n    model_name=\"jina-embeddings-v2-base-en\", # Defaults to `jina-embeddings-v2-base-en`\n    api_key=JINAAI_API_KEY # Provide your Jina AI API key\n)\n","docs = [\n    \"Artificial intelligence was founded as an academic discipline in 1956.\",\n    \"Alan Turing was the first person to conduct substantial research in AI.\",\n    \"Born in Maida Vale, London, Turing was raised in southern England.\",\n]\n\ndocs_embeddings = jina_ef.encode_documents(docs)\n\n# Print embeddings\nprint(\"Embeddings:\", docs_embeddings)\n# Print dimension and shape of embeddings\nprint(\"Dim:\", jina_ef.dim, docs_embeddings[0].shape)\n","Embeddings: [array([-4.88487840e-01, -4.28095880e-01,  4.90086500e-01, -1.63274320e-01,\n        3.43437800e-01,  3.21476880e-01,  2.83173790e-02, -3.10403670e-01,\n        4.76985040e-01, -1.77410420e-01, -3.84803180e-01, -2.19224200e-01,\n       -2.52898000e-01,  6.62411900e-02, -8.58173100e-01,  1.05221800e+00,\n...\n       -2.04462400e-01,  7.14229800e-01, -1.66823000e-01,  8.72551440e-01,\n        5.53560140e-01,  8.92506300e-01, -2.39408610e-01, -4.22413560e-01,\n       -3.19551350e-01,  5.59153850e-01,  2.44338100e-01, -8.60452100e-01])]\nDim: 768 (768,)\n","queries = [\"When was artificial intelligence founded\", \n           \"Where was Alan Turing born?\"]\n\nquery_embeddings = jina_ef.encode_queries(queries)\n\nprint(\"Embeddings:\", query_embeddings)\nprint(\"Dim\", jina_ef.dim, query_embeddings[0].shape)\n","Embeddings: [array([-5.99164660e-01, -3.49827350e-01,  8.22405160e-01, -1.18632730e-01,\n        5.78107540e-01,  1.09789170e-01,  2.91604200e-01, -3.29306450e-01,\n        2.93779640e-01, -2.17880800e-01, -6.84535440e-01, -3.79752000e-01,\n       -3.47541800e-01,  9.20846100e-02, -6.13804400e-01,  6.31312800e-01,\n...\n       -1.84993740e-02,  9.38629150e-01,  2.74858470e-02,  1.09396360e+00,\n        3.96270750e-01,  7.44445800e-01, -1.95404050e-01, -6.08383200e-01,\n       -3.75076300e-01,  3.87512200e-01,  8.11889650e-01, -3.76407620e-01])]\nDim 768 (768,)\n"],"headingContent":"","anchorList":[{"label":"ジーナAI","href":"Jina-AI","type":1,"isActive":false}]}
    \ No newline at end of file
    +{"codeList":["pip install --upgrade pymilvus\npip install \"pymilvus[model]\"\n","from pymilvus.model.dense import JinaEmbeddingFunction\n\njina_ef = JinaEmbeddingFunction(\n    model_name=\"jina-embeddings-v3\", # Defaults to `jina-embeddings-v3`\n    api_key=JINAAI_API_KEY, # Provide your Jina AI API key\n    task=\"retrieval.passage\", # Specify the task\n    dimensions=1024, # Defaults to 1024\n)\n","\n```python\ndocs = [\n    \"Artificial intelligence was founded as an academic discipline in 1956.\",\n    \"Alan Turing was the first person to conduct substantial research in AI.\",\n    \"Born in Maida Vale, London, Turing was raised in southern England.\",\n]\n\ndocs_embeddings = jina_ef.encode_documents(docs)\n\n# Print embeddings\nprint(\"Embeddings:\", docs_embeddings)\n# Print dimension and shape of embeddings\nprint(\"Dim:\", jina_ef.dim, docs_embeddings[0].shape)\n","Embeddings: [array([9.80641991e-02, -8.51697400e-02,  7.36531913e-02,  1.42558888e-02,\n       -2.23589484e-02,  1.68494112e-03, -3.50753777e-02, -3.11530549e-02,\n       -3.26012149e-02,  5.04568312e-03,  3.69836427e-02,  3.48948985e-02,\n        8.19722563e-03,  5.88679723e-02, -6.71099266e-03, -1.82369724e-02,\n...\n        2.48654783e-02,  3.43279652e-02, -1.66154150e-02, -9.90478322e-03,\n       -2.96043139e-03, -8.57473817e-03, -7.39028037e-04,  6.25024503e-03,\n       -1.08831357e-02, -4.00776342e-02,  3.25369164e-02, -1.42691191e-03])]\nDim: 1024 (1024,)\n","queries = [\"When was artificial intelligence founded\", \n           \"Where was Alan Turing born?\"]\n\nquery_embeddings = jina_ef.encode_queries(queries)\n\nprint(\"Embeddings:\", query_embeddings)\nprint(\"Dim\", jina_ef.dim, query_embeddings[0].shape)\n","Embeddings: [array([8.79201014e-03,  1.47551354e-02,  4.02722731e-02, -2.52991207e-02,\n        1.12719582e-02,  3.75947170e-02,  3.97946090e-02, -7.36681819e-02,\n       -2.17952449e-02, -1.16298944e-02, -6.83426252e-03, -5.12507409e-02,\n        5.26071340e-02,  6.75181448e-02,  3.92445624e-02, -1.40817231e-02,\n...\n        8.81703943e-03,  4.24629413e-02, -2.32944116e-02, -2.05193572e-02,\n       -3.22035812e-02,  2.81896023e-03,  3.85326855e-02,  3.64372656e-02,\n       -1.65050142e-02, -4.26847413e-02,  2.02664156e-02, -1.72684863e-02])]\nDim 1024 (1024,)\n","from pymilvus.model.dense import JinaEmbeddingFunction\n\njina_ef = JinaEmbeddingFunction(\n    model_name=\"jina-embeddings-v3\", # Defaults to `jina-embeddings-v3`\n    api_key=JINA_API_KEY, # Provide your Jina AI API key\n    task=\"text-matching\",\n    dimensions=1024, # Defaults to 1024\n)\n\ntexts = [\n    \"Follow the white rabbit.\",  # English\n    \"Sigue al conejo blanco.\",  # Spanish\n    \"Suis le lapin blanc.\",  # French\n    \"跟着白兔走。\",  # Chinese\n    \"اتبع الأرنب الأبيض.\",  # Arabic\n    \"Folge dem weißen Kaninchen.\",  # German\n]\n\nembeddings = jina_ef(texts)\n\n# Compute similarities\nprint(embeddings[0] @ embeddings[1].T)\n"],"headingContent":"Jina AI","anchorList":[{"label":"Jina AI","href":"Jina-AI","type":1,"isActive":false}]}
    \ No newline at end of file
    diff --git a/localization/v2.4.x/site/ja/embeddings/embed-with-jina.md b/localization/v2.4.x/site/ja/embeddings/embed-with-jina.md
    index 5072a4713..43e163ca4 100644
    --- a/localization/v2.4.x/site/ja/embeddings/embed-with-jina.md
    +++ b/localization/v2.4.x/site/ja/embeddings/embed-with-jina.md
    @@ -20,28 +20,45 @@ title: ジーナAI - エンベッド
             >
           
         

    Jina AIの埋め込みモデルは、テキスト入力を数値表現に変換し、テキストのセマンティクスを捉えることができる高性能テキスト埋め込みモデルです。これらのモデルは、高密度検索、意味的テキスト類似性、多言語理解などの用途に優れています。

    -

    Milvusは、JinaEmbeddingFunction クラスを介してJina AIの埋め込みモデルと統合します。このクラスは、Jina AIの埋め込みモデルを用いて文書やクエリをエンコードし、Milvusのインデックスと互換性のある密なベクトルとして埋め込みを返すためのメソッドを提供します。この機能を利用するには、Jina AIからAPIキーを取得してください。

    +

    Milvusは、JinaEmbeddingFunction クラスを介してJina AIの埋め込みモデルと統合されています。このクラスは、Jina AIの埋め込みモデルを使って文書やクエリをエンコードし、Milvusのインデックスと互換性のある密なベクトルとして埋め込みを返すためのメソッドを提供します。この機能を利用するには、Jina AIからAPIキーを取得してください。

    この機能を使用するには、必要な依存関係をインストールします:

    pip install --upgrade pymilvus
     pip install "pymilvus[model]"
     
    -

    そして、JinaEmbeddingFunction をインスタンス化する:

    +

    次に、JinaEmbeddingFunction をインスタンス化する:

    from pymilvus.model.dense import JinaEmbeddingFunction
     
     jina_ef = JinaEmbeddingFunction(
    -    model_name="jina-embeddings-v2-base-en", # Defaults to `jina-embeddings-v2-base-en`
    -    api_key=JINAAI_API_KEY # Provide your Jina AI API key
    +    model_name="jina-embeddings-v3", # Defaults to `jina-embeddings-v3`
    +    api_key=JINAAI_API_KEY, # Provide your Jina AI API key
    +    task="retrieval.passage", # Specify the task
    +    dimensions=1024, # Defaults to 1024
     )
     

    パラメータ

    • model_name (文字列)

      -

      エンコードに使用するJina AIエンベッディングモデルの名前。利用可能なJina AIエンベッディング・モデル名のいずれかを指定できます。例えば、jina-embeddings-v2-base-enjina-embeddings-v2-small-en など。このパラメータを未指定のままにすると、jina-embeddings-v2-base-en が使用されます。利用可能なモデルのリストについては、Jina Embeddingsを参照してください。

    • +

      エンコードに使用するJina AIエンベッディングモデルの名前。利用可能なJina AIエンベッディング・モデル名のいずれかを指定できます。例えば、jina-embeddings-v3jina-embeddings-v2-base-en など。このパラメータを未指定のままにすると、jina-embeddings-v3 が使用されます。利用可能なモデルのリストについては、Jina Embeddingsを参照してください。

    • api_key (文字列)

      Jina AI APIにアクセスするためのAPIキー。

    • +
    • task (文字列)

      +

      モデルに渡される入力のタイプ。エンベッディングモデルv3以降では必須。

      +
        +
      • "retrieval.passage":インデックス作成時の検索タスクで、大きな文書をエンコードするために使われる。
      • +
      • "retrieval.query":検索タスクにおいて、ユーザーからの問い合わせや質問を符号化するために使用される。
      • +
      • "classification":テキスト分類タスクでテキストを符号化する。
      • +
      • "text-matching":2つの文の類似度を測定するような、類似度マッチングのためにテキストを符号化する。
      • +
      • "clustering":クラスタリングやリランキングに使用される。
      • +
    • +
    • dimensions (int)

      +

      埋め込み結果の次元数。デフォルトは1024です。埋め込みモデルv3以降でのみサポートされる。

    • +
    • late_chunking (bool)

      +

      このパラメータは、Jina AIが先月導入した新しいチャンキング手法を、文の一括エンコーディングに使用するかどうかを制御する。デフォルトはFalseTrue に設定すると、Jina AI APIは入力フィールドの全ての文章を連結し、一つの文字列としてモデルに送り込む。内部的には、モデルはこの長く連結された文字列を埋め込み、その後チャンキングを行い、入力リストのサイズに一致する埋め込みリストを返す。

    -

    ドキュメントの埋め込みを作成するには、encode_documents() メソッドを使用する:

    -
    docs = [
    +

    ドキュメントの埋め込みを作成するには、encode_documents() メソッドを使います。このメソッドは、検索や推薦タスクのための文書のインデックス付けのような、非対称検索タスクにおける文書の埋め込み用に設計されている。このメソッドはタスクとしてretrieval.passage

    +
    
    +```python
    +docs = [
         "Artificial intelligence was founded as an academic discipline in 1956.",
         "Alan Turing was the first person to conduct substantial research in AI.",
         "Born in Maida Vale, London, Turing was raised in southern England.",
    @@ -55,17 +72,17 @@ docs_embeddings = jina_ef.encode_documents(docs)
     print("Dim:", jina_ef.dim, docs_embeddings[0].shape)
     

    期待される出力は以下のようなものです:

    -
    Embeddings: [array([-4.88487840e-01, -4.28095880e-01,  4.90086500e-01, -1.63274320e-01,
    -        3.43437800e-01,  3.21476880e-01,  2.83173790e-02, -3.10403670e-01,
    -        4.76985040e-01, -1.77410420e-01, -3.84803180e-01, -2.19224200e-01,
    -       -2.52898000e-01,  6.62411900e-02, -8.58173100e-01,  1.05221800e+00,
    +
    Embeddings: [array([9.80641991e-02, -8.51697400e-02,  7.36531913e-02,  1.42558888e-02,
    +       -2.23589484e-02,  1.68494112e-03, -3.50753777e-02, -3.11530549e-02,
    +       -3.26012149e-02,  5.04568312e-03,  3.69836427e-02,  3.48948985e-02,
    +        8.19722563e-03,  5.88679723e-02, -6.71099266e-03, -1.82369724e-02,
     ...
    -       -2.04462400e-01,  7.14229800e-01, -1.66823000e-01,  8.72551440e-01,
    -        5.53560140e-01,  8.92506300e-01, -2.39408610e-01, -4.22413560e-01,
    -       -3.19551350e-01,  5.59153850e-01,  2.44338100e-01, -8.60452100e-01])]
    -Dim: 768 (768,)
    +        2.48654783e-02,  3.43279652e-02, -1.66154150e-02, -9.90478322e-03,
    +       -2.96043139e-03, -8.57473817e-03, -7.39028037e-04,  6.25024503e-03,
    +       -1.08831357e-02, -4.00776342e-02,  3.25369164e-02, -1.42691191e-03])]
    +Dim: 1024 (1024,)
     
    -

    クエリ用の埋め込みを作成するには、encode_queries() メソッドを使用します:

    +

    クエリの埋め込みを作成するには、encode_queries() 。このメソッドは、検索クエリや質問などの非対称検索タスクにおけるクエリ埋め込み用に設計されています。このメソッドはretrieval.query をタスクとして使用します。

    queries = ["When was artificial intelligence founded", 
                "Where was Alan Turing born?"]
     
    @@ -75,13 +92,37 @@ query_embeddings = jina_ef.encode_queries(queries)
     print("Dim", jina_ef.dim, query_embeddings[0].shape)
     

    期待される出力は以下のようなものです:

    -
    Embeddings: [array([-5.99164660e-01, -3.49827350e-01,  8.22405160e-01, -1.18632730e-01,
    -        5.78107540e-01,  1.09789170e-01,  2.91604200e-01, -3.29306450e-01,
    -        2.93779640e-01, -2.17880800e-01, -6.84535440e-01, -3.79752000e-01,
    -       -3.47541800e-01,  9.20846100e-02, -6.13804400e-01,  6.31312800e-01,
    +
    Embeddings: [array([8.79201014e-03,  1.47551354e-02,  4.02722731e-02, -2.52991207e-02,
    +        1.12719582e-02,  3.75947170e-02,  3.97946090e-02, -7.36681819e-02,
    +       -2.17952449e-02, -1.16298944e-02, -6.83426252e-03, -5.12507409e-02,
    +        5.26071340e-02,  6.75181448e-02,  3.92445624e-02, -1.40817231e-02,
     ...
    -       -1.84993740e-02,  9.38629150e-01,  2.74858470e-02,  1.09396360e+00,
    -        3.96270750e-01,  7.44445800e-01, -1.95404050e-01, -6.08383200e-01,
    -       -3.75076300e-01,  3.87512200e-01,  8.11889650e-01, -3.76407620e-01])]
    -Dim 768 (768,)
    +        8.81703943e-03,  4.24629413e-02, -2.32944116e-02, -2.05193572e-02,
    +       -3.22035812e-02,  2.81896023e-03,  3.85326855e-02,  3.64372656e-02,
    +       -1.65050142e-02, -4.26847413e-02,  2.02664156e-02, -1.72684863e-02])]
    +Dim 1024 (1024,)
    +
    +

    類似マッチング(STS や対称検索タスクなど)、テキスト分類、クラスタリング、リランキングタスクの入力の埋め込みを作成するには、JinaEmbeddingFunction クラスをインスタンス化するときに、適切なtask パラメータ値を使用します。

    +
    from pymilvus.model.dense import JinaEmbeddingFunction
    +
    +jina_ef = JinaEmbeddingFunction(
    +    model_name="jina-embeddings-v3", # Defaults to `jina-embeddings-v3`
    +    api_key=JINA_API_KEY, # Provide your Jina AI API key
    +    task="text-matching",
    +    dimensions=1024, # Defaults to 1024
    +)
    +
    +texts = [
    +    "Follow the white rabbit.",  # English
    +    "Sigue al conejo blanco.",  # Spanish
    +    "Suis le lapin blanc.",  # French
    +    "跟着白兔走。",  # Chinese
    +    "اتبع الأرنب الأبيض.",  # Arabic
    +    "Folge dem weißen Kaninchen.",  # German
    +]
    +
    +embeddings = jina_ef(texts)
    +
    +# Compute similarities
    +print(embeddings[0] @ embeddings[1].T)
     
    diff --git a/localization/v2.4.x/site/ja/embeddings/embed-with-voyage.json b/localization/v2.4.x/site/ja/embeddings/embed-with-voyage.json index 724dfba41..ba7547304 100644 --- a/localization/v2.4.x/site/ja/embeddings/embed-with-voyage.json +++ b/localization/v2.4.x/site/ja/embeddings/embed-with-voyage.json @@ -1 +1 @@ -{"codeList":["pip install --upgrade pymilvus\npip install \"pymilvus[model]\"\n","from pymilvus.model.dense import VoyageEmbeddingFunction\n\nvoyage_ef = VoyageEmbeddingFunction(\n model_name=\"voyage-lite-02-instruct\", # Defaults to `voyage-2`\n api_key=VOYAGE_API_KEY # Provide your Voyage API key\n)\n","docs = [\n \"Artificial intelligence was founded as an academic discipline in 1956.\",\n \"Alan Turing was the first person to conduct substantial research in AI.\",\n \"Born in Maida Vale, London, Turing was raised in southern England.\",\n]\n\ndocs_embeddings = voyage_ef.encode_documents(docs)\n\n# Print embeddings\nprint(\"Embeddings:\", docs_embeddings)\n# Print dimension and shape of embeddings\nprint(\"Dim:\", voyage_ef.dim, docs_embeddings[0].shape)\n","Embeddings: [array([ 0.02582654, -0.00907086, -0.04604037, ..., -0.01227521,\n 0.04420955, -0.00038829]), array([ 0.03844212, -0.01597065, -0.03728884, ..., -0.02118733,\n 0.03349845, 0.0065346 ]), array([ 0.05143557, -0.01096631, -0.02690451, ..., -0.02416254,\n 0.07658645, 0.03064499])]\nDim: 1024 (1024,)\n","queries = [\"When was artificial intelligence founded\", \n \"Where was Alan Turing born?\"]\n\nquery_embeddings = voyage_ef.encode_queries(queries)\n\nprint(\"Embeddings:\", query_embeddings)\nprint(\"Dim\", voyage_ef.dim, query_embeddings[0].shape)\n","Embeddings: [array([ 0.01733501, -0.0230672 , -0.05208827, ..., -0.00957995,\n 0.04493361, 0.01485138]), array([ 0.05937521, -0.00729363, -0.02184347, ..., -0.02107683,\n 0.05706626, 0.0263358 ])]\nDim 1024 (1024,)\n"],"headingContent":"","anchorList":[{"label":"航海","href":"Voyage","type":1,"isActive":false}]} \ No newline at end of file +{"codeList":["pip install --upgrade pymilvus\npip install \"pymilvus[model]\"\n","from pymilvus.model.dense import VoyageEmbeddingFunction\n\nvoyage_ef = VoyageEmbeddingFunction(\n model_name=\"voyage-3\", # Defaults to `voyage-3`\n api_key=VOYAGE_API_KEY # Provide your Voyage API key\n)\n","docs = [\n \"Artificial intelligence was founded as an academic discipline in 1956.\",\n \"Alan Turing was the first person to conduct substantial research in AI.\",\n \"Born in Maida Vale, London, Turing was raised in southern England.\",\n]\n\ndocs_embeddings = voyage_ef.encode_documents(docs)\n\n# Print embeddings\nprint(\"Embeddings:\", docs_embeddings)\n# Print dimension and shape of embeddings\nprint(\"Dim:\", voyage_ef.dim, docs_embeddings[0].shape)\n","Embeddings: [array([ 0.02582654, -0.00907086, -0.04604037, ..., -0.01227521,\n 0.04420955, -0.00038829]), array([ 0.03844212, -0.01597065, -0.03728884, ..., -0.02118733,\n 0.03349845, 0.0065346 ]), array([ 0.05143557, -0.01096631, -0.02690451, ..., -0.02416254,\n 0.07658645, 0.03064499])]\nDim: 1024 (1024,)\n","queries = [\"When was artificial intelligence founded\", \n \"Where was Alan Turing born?\"]\n\nquery_embeddings = voyage_ef.encode_queries(queries)\n\nprint(\"Embeddings:\", query_embeddings)\nprint(\"Dim\", voyage_ef.dim, query_embeddings[0].shape)\n","Embeddings: [array([ 0.01733501, -0.0230672 , -0.05208827, ..., -0.00957995,\n 0.04493361, 0.01485138]), array([ 0.05937521, -0.00729363, -0.02184347, ..., -0.02107683,\n 0.05706626, 0.0263358 ])]\nDim 1024 (1024,)\n"],"headingContent":"Voyage","anchorList":[{"label":"Voyage","href":"Voyage","type":1,"isActive":false}]} \ No newline at end of file diff --git a/localization/v2.4.x/site/ja/embeddings/embed-with-voyage.md b/localization/v2.4.x/site/ja/embeddings/embed-with-voyage.md index 413589245..5737ebdcc 100644 --- a/localization/v2.4.x/site/ja/embeddings/embed-with-voyage.md +++ b/localization/v2.4.x/site/ja/embeddings/embed-with-voyage.md @@ -20,7 +20,7 @@ title: エンベデッド・ボヤージュ d="M4 9h1v1H4c-1.5 0-3-1.69-3-3.5S2.55 3 4 3h4c1.45 0 3 1.69 3 3.5 0 1.41-.91 2.72-2 3.25V8.59c.58-.45 1-1.27 1-2.09C10 5.22 8.98 4 8 4H4c-.98 0-2 1.22-2 2.5S3 9 4 9zm9-3h-1v1h1c1 0 2 1.22 2 2.5S13.98 12 13 12H9c-.98 0-2-1.22-2-2.5 0-.83.42-1.64 1-2.09V6.25c-1.09.53-2 1.84-2 3.25C6 11.31 7.55 13 9 13h4c1.45 0 3-1.69 3-3.5S14.5 6 13 6z" > -

    MilvusはVoyageEmbeddingFunctionクラスを通してVoyageのモデルと統合します。このクラスはVoyageのモデルを用いてドキュメントやクエリをエンコードし、Milvusのインデックスと互換性のある密なベクトルとしてエンベッディングを返すメソッドを提供します。この機能を利用するには、Voyageのプラットフォームでアカウントを作成し、APIキーを取得してください。

    +

    MilvusはVoyageEmbeddingFunctionクラスを通してVoyageのモデルと統合しています。このクラスはVoyageのモデルを用いてドキュメントやクエリをエンコードし、Milvusのインデックスと互換性のある密なベクトルとしてエンベッディングを返すメソッドを提供します。この機能を利用するには、Voyageのプラットフォームでアカウントを作成し、APIキーを取得してください。

    この機能を使用するには、必要な依存関係をインストールします:

    pip install --upgrade pymilvus
     pip install "pymilvus[model]"
    @@ -29,13 +29,13 @@ pip install "pymilvus[model]"
     
    from pymilvus.model.dense import VoyageEmbeddingFunction
     
     voyage_ef = VoyageEmbeddingFunction(
    -    model_name="voyage-lite-02-instruct", # Defaults to `voyage-2`
    +    model_name="voyage-3", # Defaults to `voyage-3`
         api_key=VOYAGE_API_KEY # Provide your Voyage API key
     )
     

    パラメータ

      -
    • model_name (string) エンコードに使用する Voyage モデルの名前。例えば、 、 などです。このパラメータを指定しない場合、 が使用されます。利用可能なモデルのリストについては、voyage-law-2 voyage-code-2 voyage-2 Voyage公式ドキュメントを参照してください。
    • +
    • model_name (string) エンコードに使用する Voyage モデルの名前。例えば、 、 などです。このパラメータを指定しない場合、 が使用されます。利用可能なモデルのリストについては、voyage-3-lite voyage-finance-2 voyage-3 Voyageの公式ドキュメントを参照してください。
    • api_key (文字列) Voyage APIにアクセスするためのAPIキー。APIキーの作成方法についてはAPIキーとPythonクライアントを参照してください。

    ドキュメントの埋め込みを作成するには、encode_documents() メソッドを使用します:

    diff --git a/localization/v2.4.x/site/ja/embeddings/embeddings.json b/localization/v2.4.x/site/ja/embeddings/embeddings.json index 2d78124f3..8e7a01630 100644 --- a/localization/v2.4.x/site/ja/embeddings/embeddings.json +++ b/localization/v2.4.x/site/ja/embeddings/embeddings.json @@ -1 +1 @@ -{"codeList":["pip install \"pymilvus[model]\"\n","from pymilvus import model\n\n# This will download \"all-MiniLM-L6-v2\", a light weight model.\nef = model.DefaultEmbeddingFunction()\n\n# Data from which embeddings are to be generated \ndocs = [\n \"Artificial intelligence was founded as an academic discipline in 1956.\",\n \"Alan Turing was the first person to conduct substantial research in AI.\",\n \"Born in Maida Vale, London, Turing was raised in southern England.\",\n]\n\nembeddings = ef.encode_documents(docs)\n\n# Print embeddings\nprint(\"Embeddings:\", embeddings)\n# Print dimension and shape of embeddings\nprint(\"Dim:\", ef.dim, embeddings[0].shape)\n","Embeddings: [array([-3.09392996e-02, -1.80662833e-02, 1.34775648e-02, 2.77156215e-02,\n -4.86349640e-03, -3.12581174e-02, -3.55921760e-02, 5.76934684e-03,\n 2.80773244e-03, 1.35783911e-01, 3.59678417e-02, 6.17732145e-02,\n...\n -4.61330153e-02, -4.85207550e-02, 3.13997865e-02, 7.82178566e-02,\n -4.75336798e-02, 5.21207601e-02, 9.04406682e-02, -5.36676683e-02],\n dtype=float32)]\nDim: 384 (384,)\n","from pymilvus.model.hybrid import BGEM3EmbeddingFunction\nfrom pymilvus import (\n utility,\n FieldSchema, CollectionSchema, DataType,\n Collection, AnnSearchRequest, RRFRanker, connections,\n)\n","# 1. prepare a small corpus to search\ndocs = [\n \"Artificial intelligence was founded as an academic discipline in 1956.\",\n \"Alan Turing was the first person to conduct substantial research in AI.\",\n \"Born in Maida Vale, London, Turing was raised in southern England.\",\n]\nquery = \"Who started AI research?\"\n\n# BGE-M3 model can embed texts as dense and sparse vectors.\n# It is included in the optional `model` module in pymilvus, to install it,\n# simply run \"pip install pymilvus[model]\".\n\nbge_m3_ef = BGEM3EmbeddingFunction(use_fp16=False, device=\"cpu\")\n\ndocs_embeddings = bge_m3_ef(docs)\nquery_embeddings = bge_m3_ef([query])\n","from pymilvus.model.sparse import BM25EmbeddingFunction\n","# 1. prepare a small corpus to search\ndocs = [\n \"Artificial intelligence was founded as an academic discipline in 1956.\",\n \"Alan Turing was the first person to conduct substantial research in AI.\",\n \"Born in Maida Vale, London, Turing was raised in southern England.\",\n]\nquery = \"Where was Turing born?\"\nbm25_ef = BM25EmbeddingFunction()\n\n# 2. fit the corpus to get BM25 model parameters on your documents.\nbm25_ef.fit(docs)\n\n# 3. store the fitted parameters to disk to expedite future processing.\nbm25_ef.save(\"bm25_params.json\")\n\n# 4. load the saved params\nnew_bm25_ef = BM25EmbeddingFunction()\nnew_bm25_ef.load(\"bm25_params.json\")\n\ndocs_embeddings = new_bm25_ef.encode_documents(docs)\nquery_embeddings = new_bm25_ef.encode_queries([query])\nprint(\"Dim:\", new_bm25_ef.dim, list(docs_embeddings)[0].shape)\n","Dim: 21 (1, 21)\n"],"headingContent":"","anchorList":[{"label":"エンベッディングの概要","href":"Embedding-Overview","type":1,"isActive":false},{"label":"例1:デフォルトの埋め込み関数を使って密なベクトルを生成する","href":"Example-1-Use-default-embedding-function-to-generate-dense-vectors","type":2,"isActive":false},{"label":"例2: BGE M3モデルを使用して、1回の呼び出しで密なベクトルと疎なベクトルを生成する","href":"Example-2-Generate-dense-and-sparse-vectors-in-one-call-with-BGE-M3-model","type":2,"isActive":false},{"label":"例3:BM25モデルを使用した疎ベクトルの生成","href":"Example-3-Generate--sparse-vectors-using-BM25-model","type":2,"isActive":false}]} \ No newline at end of file +{"codeList":["pip install \"pymilvus[model]\"\n","from pymilvus import model\n\n# This will download \"all-MiniLM-L6-v2\", a light weight model.\nef = model.DefaultEmbeddingFunction()\n\n# Data from which embeddings are to be generated \ndocs = [\n \"Artificial intelligence was founded as an academic discipline in 1956.\",\n \"Alan Turing was the first person to conduct substantial research in AI.\",\n \"Born in Maida Vale, London, Turing was raised in southern England.\",\n]\n\nembeddings = ef.encode_documents(docs)\n\n# Print embeddings\nprint(\"Embeddings:\", embeddings)\n# Print dimension and shape of embeddings\nprint(\"Dim:\", ef.dim, embeddings[0].shape)\n","Embeddings: [array([-3.09392996e-02, -1.80662833e-02, 1.34775648e-02, 2.77156215e-02,\n -4.86349640e-03, -3.12581174e-02, -3.55921760e-02, 5.76934684e-03,\n 2.80773244e-03, 1.35783911e-01, 3.59678417e-02, 6.17732145e-02,\n...\n -4.61330153e-02, -4.85207550e-02, 3.13997865e-02, 7.82178566e-02,\n -4.75336798e-02, 5.21207601e-02, 9.04406682e-02, -5.36676683e-02],\n dtype=float32)]\nDim: 384 (384,)\n","from pymilvus.model.hybrid import BGEM3EmbeddingFunction\nfrom pymilvus import (\n utility,\n FieldSchema, CollectionSchema, DataType,\n Collection, AnnSearchRequest, RRFRanker, connections,\n)\n","# 1. prepare a small corpus to search\ndocs = [\n \"Artificial intelligence was founded as an academic discipline in 1956.\",\n \"Alan Turing was the first person to conduct substantial research in AI.\",\n \"Born in Maida Vale, London, Turing was raised in southern England.\",\n]\nquery = \"Who started AI research?\"\n\n# BGE-M3 model can embed texts as dense and sparse vectors.\n# It is included in the optional `model` module in pymilvus, to install it,\n# simply run \"pip install pymilvus[model]\".\n\nbge_m3_ef = BGEM3EmbeddingFunction(use_fp16=False, device=\"cpu\")\n\ndocs_embeddings = bge_m3_ef(docs)\nquery_embeddings = bge_m3_ef([query])\n","from pymilvus.model.sparse import BM25EmbeddingFunction\n","# 1. prepare a small corpus to search\ndocs = [\n \"Artificial intelligence was founded as an academic discipline in 1956.\",\n \"Alan Turing was the first person to conduct substantial research in AI.\",\n \"Born in Maida Vale, London, Turing was raised in southern England.\",\n]\nquery = \"Where was Turing born?\"\nbm25_ef = BM25EmbeddingFunction()\n\n# 2. fit the corpus to get BM25 model parameters on your documents.\nbm25_ef.fit(docs)\n\n# 3. store the fitted parameters to disk to expedite future processing.\nbm25_ef.save(\"bm25_params.json\")\n\n# 4. load the saved params\nnew_bm25_ef = BM25EmbeddingFunction()\nnew_bm25_ef.load(\"bm25_params.json\")\n\ndocs_embeddings = new_bm25_ef.encode_documents(docs)\nquery_embeddings = new_bm25_ef.encode_queries([query])\nprint(\"Dim:\", new_bm25_ef.dim, list(docs_embeddings)[0].shape)\n","Dim: 21 (1, 21)\n"],"headingContent":"Embedding Overview","anchorList":[{"label":"エンベッディングの概要","href":"Embedding-Overview","type":1,"isActive":false},{"label":"例1: 密なベクトルを生成するためにデフォルトの埋め込み関数を使う","href":"Example-1-Use-default-embedding-function-to-generate-dense-vectors","type":2,"isActive":false},{"label":"例2: BGE M3モデルを使用して、1回の呼び出しで密なベクトルと疎なベクトルを生成する","href":"Example-2-Generate-dense-and-sparse-vectors-in-one-call-with-BGE-M3-model","type":2,"isActive":false},{"label":"例3: BM25モデルを使ったスパースベクトルの生成","href":"Example-3-Generate--sparse-vectors-using-BM25-model","type":2,"isActive":false}]} \ No newline at end of file diff --git a/localization/v2.4.x/site/ja/embeddings/embeddings.md b/localization/v2.4.x/site/ja/embeddings/embeddings.md index 629cd396d..614881239 100644 --- a/localization/v2.4.x/site/ja/embeddings/embeddings.md +++ b/localization/v2.4.x/site/ja/embeddings/embeddings.md @@ -20,7 +20,7 @@ title: エンベッディングの概要 >

    エンベッディングは、データを高次元空間にマッピングするための機械学習の概念であり、類似したセマンティックのデータが近くに配置される。通常、BERTまたは他のTransformerファミリーのDeep Neural Networkであるエンベッディング・モデルは、テキスト、画像、および他のデータタイプのセマンティクスを、ベクトルとして知られる一連の数値で効果的に表現することができます。これらのモデルの主な特徴は、高次元空間におけるベクトル間の数学的距離が、元のテキストや画像のセマンティクスの類似性を示すことができることです。この特性は、GoogleやBingのようなウェブ検索エンジン、eコマースサイトの商品検索やレコメンデーション、そして最近話題の生成AIにおけるRAG(Retrieval Augmented Generation)パラダイムなど、多くの情報検索アプリケーションを解き放つ。

    -

    エンベッディングには2つの主要なカテゴリがあり、それぞれが異なるタイプのベクトルを生成する:

    +

    エンベッディングには主に2つのカテゴリがあり、それぞれが異なるタイプのベクトルを生成する:

    • 高密度埋め込み:ほとんどの埋め込みモデルは、情報を数百から数千次元の浮動小数点ベクトルとして表現します。ほとんどの次元がゼロでないため、出力は「密な」ベクトルと呼ばれます。例えば、一般的なオープンソースの埋め込みモデルBAAI/bge-base-en-v1.5は、768個の浮動小数点数からなるベクトル(768次元浮動小数点ベクトル)を出力します。

    • 疎な埋め込み:これに対して、スパース埋め込みは、ほとんどの次元がゼロのベクトル(スパースベクトル)を出力します。これらのベクトルは、トークン語彙のサイズによって決定される、はるかに高い次元(数万またはそれ以上)を持つことがよくあります。スパース・ベクトルはディープ・ニューラル・ネットワークやテキスト・コーパスの統計解析によって生成される。その解釈のしやすさと、より優れた領域外汎化能力により、スパース埋め込みは密な埋め込みを補完するものとして開発者に採用されつつあります。

    • @@ -40,9 +40,13 @@ title: エンベッディングの概要 ボヤガイ密なAPI ジーナ密なAPI コヒーレ濃いAPI +講師デンスオープンソース +ミストラルAIデンスAPI +ノミック密なAPI +mGTEハイブリッドオープンソース -

      例 1: 密なベクトルを生成するためにデフォルトの埋め込み関数を使う

      Milvusでエンベッディング関数を使うには、まずPyMilvusクライアントライブラリを、エンベッディング生成のためのユーティリティをすべてラップしたmodel サブパッケージとともにインストールします。

      +

      Milvusで埋め込み関数を使うには、まずPyMilvusクライアントライブラリを、埋め込み生成のための全てのユーティリティをラップしたmodel サブパッケージとともにインストールします。

      pip install "pymilvus[model]"
       

      model サブパッケージはOpenAI,Sentence Transformers,BGE M3,BM25,SPLADE事前学習モデルなど様々な埋め込みモデルをサポートしています。簡略化のため、この例ではDefaultEmbeddingFunction を使用します。これはすべてMiniLM-L6-v2の文変換モデルで、モデルは約70MBあり、最初の使用時にダウンロードされます:

      @@ -108,8 +112,8 @@ Dim: 384 (384,

      この例では、BGE M3ハイブリッドモデルを使用して、テキストを密なベクトルと疎なベクトルの両方に埋め込み、関連文書を検索するために使用します。全体的な手順は以下の通り:

      1. BGE-M3モデルを使ってテキストを密なベクトルと疎なベクトルに埋め込む;

      2. -
      3. 密なベクトルと疎なベクトルを格納するためにMilvusコレクションをセットアップする;

      4. -
      5. データをMilvusに挿入する;

      6. +
      7. 密なベクトルと疎なベクトルを格納するMilvusコレクションをセットアップする;

      8. +
      9. Milvusにデータを挿入;

      10. 検索と結果の検査。

      まず、必要な依存関係をインストールする必要がある。

      @@ -153,11 +157,11 @@ query_embeddings = bge_m3_ef([query]) d="M4 9h1v1H4c-1.5 0-3-1.69-3-3.5S2.55 3 4 3h4c1.45 0 3 1.69 3 3.5 0 1.41-.91 2.72-2 3.25V8.59c.58-.45 1-1.27 1-2.09C10 5.22 8.98 4 8 4H4c-.98 0-2 1.22-2 2.5S3 9 4 9zm9-3h-1v1h1c1 0 2 1.22 2 2.5S13.98 12 13 12H9c-.98 0-2-1.22-2-2.5 0-.83.42-1.64 1-2.09V6.25c-1.09.53-2 1.84-2 3.25C6 11.31 7.55 13 9 13h4c1.45 0 3-1.69 3-3.5S14.5 6 13 6z" > -

      BM25は、クエリと文書間の関連性を決定するために単語の出現頻度を使用するよく知られた方法である。この例では、BM25EmbeddingFunction を使って、クエリとドキュメントの両方にスパース埋め込みを生成する方法を示します。

      +

      BM25は、クエリと文書間の関連性を決定するために単語の出現頻度を使用するよく知られた方法である。この例では、BM25EmbeddingFunction を使って、クエリとドキュメントの両方に対してスパース埋め込みを生成する方法を示します。

      まず、BM25EmbeddingFunctionクラスをインポートします。

      from pymilvus.model.sparse import BM25EmbeddingFunction
       
      -

      BM25では、ドキュメントのパターンを表すことができるIDF(逆ドキュメント頻度)を得るために、ドキュメントの統計量を計算することが重要です。IDFは、ある単語がどれだけの情報を提供するか、つまり、すべての文書でその単語が一般的か稀であるかを示す尺度です。

      +

      BM25では、文書のパターンを表すことができるIDF(逆文書頻度)を得るために、文書の統計量を計算することが重要です。IDFは、ある単語がどれだけの情報を提供するか、つまり、すべての文書でその単語が一般的か稀であるかを示す尺度です。

      # 1. prepare a small corpus to search
       docs = [
           "Artificial intelligence was founded as an academic discipline in 1956.",
      diff --git a/localization/v2.4.x/site/ja/faq/operational_faq.json b/localization/v2.4.x/site/ja/faq/operational_faq.json
      index 35cd5d401..a1cd689e8 100644
      --- a/localization/v2.4.x/site/ja/faq/operational_faq.json
      +++ b/localization/v2.4.x/site/ja/faq/operational_faq.json
      @@ -1 +1 @@
      -{"codeList":["{\n  \"registry-mirrors\": [\"https://registry.docker-cn.com\"]\n}\n","$ lscpu | grep -e sse4_2 -e avx -e avx2 -e avx512\n","pip install pymilvus>=2.4.2\n"],"headingContent":"","anchorList":[{"label":"運用に関するFAQ","href":"Operational-FAQ","type":1,"isActive":false}]}
      \ No newline at end of file
      +{"codeList":["{\n  \"registry-mirrors\": [\"https://registry.docker-cn.com\"]\n}\n","$ lscpu | grep -e sse4_2 -e avx -e avx2 -e avx512\n","pip install pymilvus>=2.4.2\n","# Python Example: result of len() str cannot be used as \"max-length\" in Milvus \n>>> s = \"你好,世界!\"\n>>> len(s) # Number of characters of s.\n6\n>>> len(bytes(s, \"utf-8\")) # Size in bytes of s, max-length in Milvus.\n18\n"],"headingContent":"Operational FAQ","anchorList":[{"label":"運用に関するFAQ","href":"Operational-FAQ","type":1,"isActive":false}]}
      \ No newline at end of file
      diff --git a/localization/v2.4.x/site/ja/faq/operational_faq.md b/localization/v2.4.x/site/ja/faq/operational_faq.md
      index 31a9b1a4a..21b8014b9 100644
      --- a/localization/v2.4.x/site/ja/faq/operational_faq.md
      +++ b/localization/v2.4.x/site/ja/faq/operational_faq.md
      @@ -1,9 +1,8 @@
       ---
       id: operational_faq.md
      -summary: Milvusでのオペレーションに関するよくある質問と回答をご覧いただけます。
      +summary: Milvusでの業務に関するよくある質問と回答をご覧いただけます。
       title: 運用に関するFAQ
       ---
      -
       

      運用に関するFAQ

      Docker HubからのMilvus Dockerイメージの取得に失敗した場合はどうすればよいですか?

      Docker HubからのMilvus Dockerイメージの取得に失敗した場合は、他のレジストリミラーを追加してみてください。

      +

      Docker HubからのMilvus Dockerイメージのpullに失敗した場合は?

      Docker HubからのMilvus Dockerイメージの引き抜きに失敗した場合は、他のレジストリミラーを追加してみてください。

      中国本土のユーザは、/etc.docker/daemon.jsonのregistry-mirrors配列に "https://registry.docker-cn.com "というURLを追加することができます。

      {
         "registry-mirrors": ["https://registry.docker-cn.com"]
       }
       
      -

      DockerはMilvusをインストールして実行する唯一の方法ですか?

      DockerはMilvusを配備するための効率的な方法ですが、唯一の方法ではありません。ソースコードからMilvusをデプロイすることもできます。これにはUbuntu (18.04以上) または CentOS (7以上) が必要です。詳しくはMilvusをソースコードからビルドするをご覧ください。

      +

      Milvusをインストールし、実行する唯一の方法はDockerですか?

      DockerはMilvusの効率的なデプロイ方法ですが、唯一の方法ではありません。ソースコードからMilvusをデプロイすることもできます。これにはUbuntu (18.04以上) または CentOS (7以上) が必要です。詳しくはMilvusをソースコードからビルドするをご覧ください。

      再現率に影響を与える主な要因は何ですか?

      検索結果は主にインデックスタイプと検索パラメータに影響されます。

      -

      FLATインデックスの場合、Milvusはコレクション内の網羅的なスキャンを行い、100%の検索結果を返します。

      +

      FLATインデックスの場合、Milvusはコレクション内を網羅的にスキャンし、100%の検索結果を返します。

      IVFインデックスでは、nprobeパラメータがコレクション内の検索範囲を決定します。nprobeを増加させると、検索されるベクトルの割合が増加し、リコールが増加しますが、クエリ性能は低下します。

      HNSWインデックスでは、efパラメータがグラフ検索の幅を決定する。efを増加させると、グラフ上で検索されるポイント数が増加し、リコールも増加しますが、クエリ性能は低下します。

      詳細については、ベクトル・インデックスを参照してください。

      -

      設定ファイルを変更しても反映されません。

      Milvusは実行中の設定ファイルの変更をサポートしていません。設定ファイルの変更を有効にするにはMilvus Dockerを再起動する必要があります。

      -

      Milvusが正常に起動したかどうかはどのように確認できますか?

      Docker Composeを使用してMilvusを起動した場合、docker ps 。Dockerコンテナの起動状況を確認し、Milvusのサービスが正常に起動したかどうかを確認することができます。

      +

      設定ファイルを変更しても反映されません。

      Milvusは実行中の設定ファイルの変更をサポートしていません。設定ファイルの変更を有効にするには、Milvus Dockerを再起動する必要があります。

      +

      Milvusが正常に起動したかどうかはどのように確認できますか?

      Docker Composeを使用してMilvusを起動した場合、docker ps 、実行中のDockerコンテナ数を確認し、Milvusのサービスが正常に起動したかどうかを確認してください。

      Milvusスタンドアロンの場合、少なくとも3つのDockerコンテナが動作していることが確認できるはずです。1つはMilvusサービス、残りの2つはetcd管理とストレージサービスです。詳細については、Milvusスタンドアロンのインストールを参照してください。

      ログファイルの時刻がシステム時刻と異なるのはなぜですか?

      時間の違いは通常、ホストマシンが協定世界時(UTC)を使用していないことが原因です。

      Dockerイメージ内のログファイルはデフォルトでUTCを使用しています。ホストマシンがUTCを使用していない場合、この問題が発生する可能性があります。

      -

      自分のCPUがMilvusをサポートしているかどうかを知るには?

      Milvusの演算処理は、CPUがSIMD(Single Instruction, Multiple Data)拡張命令セットをサポートしているかどうかに依存します。お使いのCPUがSIMD拡張命令セットに対応しているかどうかは、Milvusのインデックス構築およびベクトル類似検索において非常に重要です。CPUが以下のSIMD命令セットの少なくとも1つをサポートしていることを確認してください:

      +

      自分のCPUがMilvusに対応しているかどうかは、どうすれば分かりますか?

      Milvusの演算処理は、CPUがSIMD(Single Instruction, Multiple Data)拡張命令セットをサポートしているかどうかに依存します。お使いのCPUがSIMD拡張命令セットに対応しているかどうかは、Milvusのインデックス構築およびベクトル類似検索において非常に重要です。CPUが以下のSIMD命令セットの少なくとも1つをサポートしていることを確認してください:

      • SSE4.2
      • AVX
      • @@ -46,35 +45,60 @@ title: 運用に関するFAQ

        lscpuコマンドを実行し、CPUが上記のSIMD命令セットをサポートしているか確認してください:

        $ lscpu | grep -e sse4_2 -e avx -e avx2 -e avx512
         
        -

        起動中にMilvusがillegal instruction

        MilvusはCPUがSIMD命令セットをサポートしている必要があります:SSE4.2、AVX、AVX2、またはAVX512です。Milvusが正常に動作するためには、CPUがこれらのうち少なくとも1つをサポートしている必要があります。起動時に返されるillegal instruction のエラーは、CPUが上記4つの命令セットのいずれにも対応していないことを示唆しています。

        +

        Milvusが起動中にillegal instruction

        MilvusはCPUがSIMD命令セットをサポートしている必要があります:SSE4.2、AVX、AVX2、またはAVX512です。Milvusが正常に動作するためには、CPUがこれらのうち少なくとも1つをサポートしている必要があります。起動時に返されるillegal instruction のエラーは、CPUが上記4つの命令セットのいずれにも対応していないことを示唆しています。

        CPUのSIMD命令セット対応状況をご覧ください。

        -

        WindowsにMilvusをインストールできますか?

        ソースコードからコンパイルする方法とバイナリパッケージからコンパイルする方法があります。

        -

        WindowsへのMilvusのインストール方法については、WindowsでMilvusを動かすを参照してください。

        -

        Windowsにpymilvusをインストールする際にエラーが発生しました。どうすればよいですか?

        WindowsにPyMilvusをインストールすることは推奨されません。しかし、もしWindowsにPyMilvusをインストールしなければならないのにエラーが出る場合は、Conda環境にインストールしてみてください。Conda環境にPyMilvusをインストールする方法の詳細については、Milvus SDKのインストールを参照してください。

        -

        インターネットに接続していない状態でもMilvusをデプロイできますか?

        はい、オフライン環境にMilvusをインストールすることができます。詳しくはInstall Milvus Offlineを参照してください。

        -

        Milvusが生成したログはどこにありますか?

        Milvusのログはデフォルトでstout (標準出力) とstderr (標準エラー) に出力されますが、本番環境ではログを永続ボリュームにリダイレクトすることを強く推奨します。そのためには、milvus.yamlの log.file.rootPath 。また、Milvusをmilvus-helm チャートでデプロイする場合、--set log.persistence.enabled=true を使ってログの永続化を有効にする必要があります。

        +

        MilvusをWindowsにインストールできますか?

        MilvusをWindowsにインストールするには、ソースコードからコンパイルする方法とバイナリパッケージからコンパイルする方法があります。

        +

        WindowsにMilvusをインストールする方法については「WindowsでMilvusを動かす」を参照してください。

        +

        Windowsにpymilvusをインストールする際にエラーが発生しました。どうすればよいですか?

        WindowsにPyMilvusをインストールすることは推奨されません。しかし、WindowsにPyMilvusをインストールしなければならないのにエラーが出る場合は、Conda環境にインストールしてみてください。Conda環境にPyMilvusをインストールする方法の詳細については、Milvus SDKのインストールを参照してください。

        +

        Milvusはインターネットに接続していない状態でもデプロイできますか?

        Milvusはオフライン環境でインストールすることができます。詳細はMilvusのオフラインインストールを参照してください。

        +

        Milvusが生成したログはどこにありますか?

        Milvusのログはデフォルトでstout(標準出力)とstderr(標準エラー)に出力されますが、本番環境ではログを永続ボリュームにリダイレクトすることを強く推奨します。そのためには、milvus.yamlの log.file.rootPath 。また、milvusをmilvus-helm チャートでデプロイする場合、--set log.persistence.enabled=true を使ってログの永続化を有効にする必要があります。

        設定を変更していない場合は、kubectl logs <pod-name>やdocker logs CONTAINERを使ってもログを見つけることができます。

        セグメントにデータを挿入する前にインデックスを作成できますか?

        はい、できます。しかし、各セグメントにインデックスを作成する前に、256MBを超えない範囲でまとめてデータを挿入することをお勧めします。

        -

        複数のMilvusインスタンス間でetcdインスタンスを共有できますか?

        はい、複数のMilvusインスタンス間でetcdインスタンスを共有することができます。そのためには、各Milvusインスタンスを起動する前に、各Milvusインスタンスの設定ファイルでetcd.rootPath を別々の値に変更する必要があります。

        +

        複数のMilvusインスタンス間でetcdインスタンスを共有できますか?

        複数のMilvusインスタンス間でetcdインスタンスを共有することは可能です。そのためには、Milvusインスタンスを起動する前に、各Milvusインスタンスの設定ファイルでetcd.rootPath を別々の値に変更する必要があります。

        複数のMilvusインスタンス間でPulsarインスタンスを共有できますか?

        はい、複数のMilvusインスタンス間でPulsarインスタンスを共有することができます。そのためには

          -
        • Pulsarインスタンスでマルチ・テナントが有効になっている場合は、各Milvusインスタンスに個別のテナントまたはネームスペースを割り当てることを検討してください。そのためには、Milvusインスタンスを起動する前に、設定ファイル内のpulsar.tenant またはpulsar.namespace 、それぞれ固有の値に変更する必要があります。
        • -
        • Pulsarインスタンスでマルチテナントを有効にする予定がない場合は、Milvusインスタンスを起動する前に、各インスタンスの構成ファイル内のmsgChannel.chanNamePrefix.cluster
        • +
        • Pulsarインスタンスでマルチ・テナントが有効になっている場合は、Milvusインスタンスごとに個別のテナントまたはネームスペースを割り当てることを検討してください。そのためには、Milvusインスタンスを起動する前に、Milvusインスタンスの設定ファイル内のpulsar.tenant またはpulsar.namespace をそれぞれ固有の値に変更する必要があります。
        • +
        • Pulsarインスタンスでマルチテナントを有効にする予定がない場合は、Milvusインスタンスを起動する前に、Milvusインスタンスの構成ファイル内のmsgChannel.chanNamePrefix.cluster
        -

        複数のMilvusインスタンス間でMinIOインスタンスを共有できますか?

        はい、複数のMilvusインスタンス間でMinIOインスタンスを共有することができます。そのためには、各Milvusインスタンスを起動する前に、各Milvusインスタンスの設定ファイルで、minio.rootPath を一意な値に変更する必要があります。

        +

        複数のMilvusインスタンス間でMinIOインスタンスを共有できますか?

        複数のMilvusインスタンス間でMinIOインスタンスを共有することは可能です。そのためには、Milvusインスタンスを起動する前に、それぞれのMilvusインスタンスの設定ファイルで、minio.rootPath を一意な値に変更する必要があります。

        エラーメッセージpymilvus.exceptions.ConnectionConfigException: <ConnectionConfigException: (code=1, message=Illegal uri: [example.db], expected form 'https://user:pwd@example.com:12345')> の対処方法は?

        エラーメッセージIllegal uri [example.db] は、この接続タイプをサポートしていない以前のバージョンのPyMilvusを使ってMilvus Liteに接続しようとしていることを示しています。この問題を解決するには、インストールしたPyMilvusを、Milvus Liteへの接続がサポートされているバージョン2.4.2以上にアップグレードしてください。

        PyMilvus は以下のコマンドでアップグレードできます:

        pip install pymilvus>=2.4.2
         

        検索/クエリで設定したlimit よりも少ない結果しか得られないのはなぜですか?

        指定したlimit より少ない結果しか得られない理由はいくつかあります:

          -
        • データが限られている:限られたデータ: コレクションには、要求した制限を満たすのに十分なエンティティがない可能性があります。コレクション内のエンティティの総数が制限を下回ると、当然、結果の数も少なくなります。

        • -
        • 主キーの重複:Milvusは、検索中に主キーの重複に遭遇した場合、特定のエンティティに優先順位を付けます。この動作は検索タイプによって異なります:

        • -
        • クエリ(完全一致):ANN検索:この優先順位付けにより、重複する主キーが多い場合、一意の検索結果が制限数より少なくなることがあります。

        • -
        • 不十分な一致:検索フィルタリング式が厳しすぎるため、類似度のしきい値を満たすエンティティが少なくなっている可能性があります。検索条件の設定が厳しすぎると、一致するエンティティが少なくなり、期待される結果よりも少ない結果になります。

        • +
        • データが限られている:限られたデータ: コレクションには、要求した制限を満たすのに十分なエンティティがない可能性があります。コレクション内のエンティティの総数が制限を下回ると、当然、結果が少なくなります。

        • +
        • 主キーの重複:Milvusは、検索中に主キーの重複に遭遇した場合、特定のエンティティを優先します。この動作は検索タイプによって異なります:

        • +
        • クエリ (完全一致):ANN検索:Milvusは、同じPKを持つエンティティであっても、最も高い類似度スコアを持つエンティティを選択します。 この優先順位付けにより、重複する主キーが多いコレクションでは、ユニークな検索結果が制限数より少なくなる可能性があります。

        • +
        • 不十分な一致:検索フィルタリング式が厳しすぎるため、類似度のしきい値を満たすエンティティが少なくなっている可能性があります。検索に設定する条件が厳しすぎると、一致するエンティティが少なくなり、期待される結果よりも少なくなります。

        +

        MilvusClient("milvus_demo.db") gives an error: ModuleNotFoundError: No module named 'milvus_lite'.このエラーの原因と解決方法を教えてください。

        このエラーはMilvus LiteをWindowsプラットフォームで使用しようとした場合に発生します。Milvus Liteは主にLinux環境向けに設計されており、Windowsをネイティブサポートしていない可能性があります。

        +

        解決策としては、Linux環境を利用することです:

        +
          +
        • Linuxベースのオペレーティングシステムまたは仮想マシンを使用してMilvus Liteを実行してください。
        • +
        • この方法により、ライブラリの依存関係や機能との互換性が確保されます。
        • +
        +

        Milvusの "length exceeds max length "エラーとは何ですか?

        Milvusの "Length exceeds max length "エラーは、データ要素のサイズがコレクションまたはフィールドの最大許容サイズを超えた場合に発生します。以下はその例と説明です:

        +
          +
        • JSONフィールドエラー<MilvusException: (code=1100, message=the length (398324) of json field (metadata) exceeds max length (65536): expected=valid length json string, actual=length exceeds max length: invalid parameter)>

        • +
        • 文字列長エラー:<ParamError: (code=1, message=invalid input, length of string exceeds max length. length: 74238, max length: 60535)>

        • +
        • VarChar フィールドエラー:<MilvusException: (code=1100, message=the length (60540) of 0th VarChar paragraph exceeds max length (0)%!(EXTRA int64=60535): invalid parameter)>

        • +
        +

        これらのエラーを理解し、対処する:

        +
          +
        • Pythonのlen(str) はバイト数ではなく文字数を表すことを理解してください。
        • +
        • VARCHARやJSONのような文字列ベースのデータ型では、len(bytes(str, encoding='utf-8')) 、Milvusが "max-length "に使用している実際のサイズをバイト単位で判断してください。
        • +
        +

        Pythonでの例

        +
        # Python Example: result of len() str cannot be used as "max-length" in Milvus 
        +>>> s = "你好,世界!"
        +>>> len(s) # Number of characters of s.
        +6
        +>>> len(bytes(s, "utf-8")) # Size in bytes of s, max-length in Milvus.
        +18
        +

        まだ質問がありますか?

        できます:

          -
        • GitHubでMilvusをチェックしてください。自由に質問し、アイデアを共有し、他の人を助けてください。
        • -
        • Milvusフォーラムや Slackチャンネルに参加して、オープンソースコミュニティに参加してください。
        • +
        • GitHubでMilvusをチェックしてください。自由に質問したり、アイデアを共有したり、他の人を助けたりしてください。
        • +
        • Milvusフォーラムや Slackチャンネルに参加して、オープンソースコミュニティに参加しましょう。
        diff --git a/localization/v2.4.x/site/ja/faq/performance_faq.json b/localization/v2.4.x/site/ja/faq/performance_faq.json index bc383670f..710f039c1 100644 --- a/localization/v2.4.x/site/ja/faq/performance_faq.json +++ b/localization/v2.4.x/site/ja/faq/performance_faq.json @@ -1 +1 @@ -{"codeList":[],"headingContent":"","anchorList":[{"label":"パフォーマンスFAQ","href":"Performance-FAQ","type":1,"isActive":false}]} \ No newline at end of file +{"codeList":[],"headingContent":"Performance FAQ","anchorList":[{"label":"パフォーマンスFAQ","href":"Performance-FAQ","type":1,"isActive":false}]} \ No newline at end of file diff --git a/localization/v2.4.x/site/ja/faq/performance_faq.md b/localization/v2.4.x/site/ja/faq/performance_faq.md index 580948a47..062e15b3b 100644 --- a/localization/v2.4.x/site/ja/faq/performance_faq.md +++ b/localization/v2.4.x/site/ja/faq/performance_faq.md @@ -21,25 +21,25 @@ title: パフォーマンスFAQ

        IVFインデックスのnlistnprobe の設定方法を教えてください。

        nlist の設定はシナリオによって異なります。経験則として、nlist の推奨値は4 × sqrt(n) で、n はセグメント内のエンティティの総数です。

        各セグメントのサイズはdatacoord.segment.maxSize パラメータによって決定され、デフォルトでは 512 MB に設定されている。セグメント内のエンティティの総数 n は、datacoord.segment.maxSize を各エンティティのサイズで割ることで推定できる。

        nprobe の設定は、データセットとシナリオに固有であり、精度とクエリパフォーマンスのトレードオフを伴います。実験を繰り返して理想的な値を見つけることをお勧めします。

        -

        以下のグラフは、sift50mデータセットとIVF_SQ8インデックスで実行したテストの結果です。nlist/nprobe の異なるペアのリコールとクエリのパフォーマンスを比較しています。

        +

        以下のチャートは、sift50mデータセットとIVF_SQ8インデックスで実行したテストの結果で、nlist/nprobe の異なるペアのリコールとクエリのパフォーマンスを比較しています。

        Accuracy test 精度テスト Performance testパフォーマンステスト

        -

        なぜ小さいデータセットではクエリに時間がかかるのか?

        クエリ操作はセグメントに対して行われます。インデックスを使うことで、セグメントへのクエリにかかる時間が短縮されます。セグメントがインデックス化されていない場合、Milvus は生データの総当り検索に頼ることになり、クエリ時間が大幅に増加する。

        -

        そのため、インデックスが作成されていない小さなデータセット(コレクション)に対するクエリには通常時間がかかる。これはセグメントのサイズがrootCoord.minSegmentSizeToEnableindex で設定されたインデックス構築のしきい値に達していないためである。create_index() を呼び出すことで、Milvusに閾値に達しているがまだ自動的にインデックスが作成されていないセグメントに強制的にインデックスを作成させ、クエリパフォーマンスを大幅に向上させることができます。

        -

        CPU使用率に影響を与える要因は何ですか?

        Milvusがインデックスを構築したり、クエリを実行したりするとCPU使用率が増加します。一般的に、インデックス構築はシングルスレッドで実行されるAnnoyを使用する場合を除き、CPUを集中的に使用します。

        +

        なぜ小さいデータセットではクエリに時間がかかるのか?

        クエリー操作はセグメントに対して行われる。インデックスがあると、セグメントへのクエリにかかる時間が短縮される。セグメントがインデックス化されていない場合、Milvusは生データに対して総当り検索を行うため、クエリ時間が大幅に増加します。

        +

        そのため、インデックスが作成されていない小さなデータセット(コレクション)に対するクエリには通常時間がかかる。これはセグメントのサイズがrootCoord.minSegmentSizeToEnableindex で設定されたインデックス構築のしきい値に達していないためである。create_index() を呼び出すと、Milvusは閾値に達しているがまだ自動的にインデックスが作成されていないセグメントに強制的にインデックスを作成し、クエリパフォーマンスを大幅に向上させることができます。

        +

        CPU使用率に影響を与える要因は何ですか?

        Milvusがインデックスを作成したり、クエリを実行したりすると、CPU使用率が増加します。一般的に、インデックス構築はシングルスレッドで実行されるAnnoyを使用する場合を除き、CPUを集中的に使用します。

        クエリを実行する場合、CPU使用率はnqnprobe の影響を受けます。nqnprobe が小さい場合、同時実行性は低く、CPU使用率は低く保たれる。

        -

        データの挿入と検索を同時に行うと、クエリのパフォーマンスに影響しますか?

        挿入操作に CPU が集中することはありません。しかし、新しいセグメントがインデックス構築の閾値に達していない可能性があるため、Milvusは総当たり検索に頼り、クエリ性能に大きな影響を与えます。

        +

        データの挿入と検索を同時に行うと、クエリのパフォーマンスに影響しますか?

        挿入操作に CPU が集中することはありません。しかし、新しいセグメントがインデックス構築のしきい値に達していない可能性があるため、milvusは総当たり検索に頼り、クエリ性能に大きな影響を与えます。

        rootcoord.minSegmentSizeToEnableIndex パラメータはセグメントのインデックス構築しきい値を決定し、デフォルトでは1024行に設定されています。詳細はシステム構成を参照してください。

        -

        Milvusでデータを削除した後、ストレージ容量はすぐに解放されますか?

        いいえ、Milvusでデータを削除してもストレージ領域はすぐに解放されません。データを削除するとエンティティは「論理的に削除された」ことになりますが、実際の容量はすぐに解放されない場合があります。その理由は以下の通りです:

        +

        Milvusでデータを削除した後、ストレージ容量はすぐに解放されますか?

        いいえ、Milvusでデータを削除してもストレージ容量はすぐに解放されません。データを削除するとエンティティは「論理的に削除された」ことになりますが、実際の容量はすぐに解放されない場合があります。その理由は以下の通りです:

          -
        • コンパクション:Milvusはバックグラウンドで自動的にデータを圧縮します。このプロセスは、より小さなデータセグメントをより大きなデータセグメントに統合し、論理的に削除されたデータ(削除マークが付けられたエンティティ)またはTTL(Time-To-Live)を超えたデータを削除します。ただし、コンパクションは新しいセグメントを作成する一方で、古いセグメントには "Dropped "というマークを付ける。
        • -
        • ガベージコレクション:ガベージコレクション (GC) と呼ばれる別プロセスが、定期的に "Dropped" セグメントを削除する。これにより、ストレージの効率的な使用が保証されますが、削除とスペースの再利用の間に若干の遅延が生じることがあります。
        • +
        • コンパクション:Milvusはバックグラウンドで自動的にデータを圧縮します。このプロセスは、より小さなデータセグメントをより大きなデータセグメントに統合し、論理的に削除されたデータ(削除マークが付けられたエンティティ)やTTL(Time-To-Live)を超えたデータを削除します。ただし、コンパクションは新しいセグメントを作成する一方で、古いセグメントには "Dropped "というマークを付ける。
        • +
        • ガベージコレクション:ガベージコレクション (GC) と呼ばれる別プロセスが、定期的に "Dropped" セグメントを削除する。これにより、ストレージの効率的な使用が保証されますが、削除とスペースの再利用の間に若干の遅延が生じる可能性があります。
        -

        挿入、削除、またはアップサートされたデータを、フラッシュを待たずに操作直後に見ることはできますか?

        はい、Milvusでは、ストレージとコンピュートを分離したアーキテクチャのため、データの可視性はフラッシュ操作に直接関係しません。一貫性レベルを使用してデータの可読性を管理することができます。

        -

        一貫性レベルを選択する際には、一貫性とパフォーマンスのトレードオフを考慮してください。即時の可視性が必要な操作には、"Strong "一貫性レベルを使用する。書き込みを高速に行うには、一貫性を弱くすることを優先する(データはすぐには見えないかもしれない)。詳細は一貫性を参照してください。

        -

        VARCHARフィールドにインデックスを付けると削除速度が向上しますか?

        VARCHARフィールドにインデックスを付けることで、"Delete By Expression "操作を高速化できますが、特定の条件下でのみ可能です:

        +

        挿入、削除、またはアップサートされたデータを、フラッシュを待たずに操作直後に見ることはできますか?

        Milvusでは、ストレージとコンピュートの分離アーキテクチャを採用しているため、データの可視性はフラッシュ操作に直接関係しません。一貫性レベルを使用してデータの可読性を管理することができます。

        +

        一貫性レベルを選択する際には、一貫性とパフォーマンスのトレードオフを考慮してください。即時の可視性が必要な操作には、"Strong "一貫性レベルを使用する。書き込みを高速に行うには、一貫性を弱くすることを優先する(データはすぐには見えないかもしれない)。詳細は一貫性を参照してください。

        +

        VARCHARフィールドにインデックスを付けると削除速度が向上しますか?

        VARCHARフィールドにインデックスを作成すると、"Delete By Expression "操作を高速化できますが、特定の条件下でのみ可能です:

        • INVERTEDインデックス:INVERTED インデックス:このインデックスは、プライマリ・キーでない VARCHAR フィールドのIN または== 式に役立ちます。
        • トライ・インデックス:このインデックスは、主キーでないVARCHARフィールドに対する接頭辞クエリ(例えば、LIKE prefix% )に役立ちます。
        • @@ -51,6 +51,6 @@ title: パフォーマンスFAQ

        まだ質問がありますか?

        できます:

          -
        • GitHub でMilvus をチェックしてください。気軽に質問したり、アイデアを共有したり、他の人を助けたりしてください。
        • -
        • Slackチャンネルに参加して、オープンソースコミュニティに参加してください
        • +
        • GitHubでMilvusをチェックしてください。気軽に質問したり、アイデアを共有したり、他の人を助けたりしてください。
        • +
        • Slackチャンネルに参加して、オープンソースコミュニティに参加してください。
        diff --git a/localization/v2.4.x/site/ja/faq/product_faq.json b/localization/v2.4.x/site/ja/faq/product_faq.json index c7e9dd83b..0fd07b1ef 100644 --- a/localization/v2.4.x/site/ja/faq/product_faq.json +++ b/localization/v2.4.x/site/ja/faq/product_faq.json @@ -1 +1 @@ -{"codeList":["60 * 2 * 4 + 40 * 1 * 12 = 960\n"],"headingContent":"","anchorList":[{"label":"製品FAQ","href":"Product-FAQ","type":1,"isActive":false}]} \ No newline at end of file +{"codeList":["60 * 2 * 4 + 40 * 1 * 12 = 960\n"],"headingContent":"Product FAQ","anchorList":[{"label":"製品に関するFAQ","href":"Product-FAQ","type":1,"isActive":false}]} \ No newline at end of file diff --git a/localization/v2.4.x/site/ja/faq/product_faq.md b/localization/v2.4.x/site/ja/faq/product_faq.md index 08656e2d7..cf50afb37 100644 --- a/localization/v2.4.x/site/ja/faq/product_faq.md +++ b/localization/v2.4.x/site/ja/faq/product_faq.md @@ -1,7 +1,7 @@ --- id: product_faq.md summary: 世界で最も先進的なベクターデータベースに関するよくある質問の回答をご覧ください。 -title: 製品FAQ +title: 製品に関するFAQ ---

        製品に関するFAQ

        Milvusの価格はいくらですか?

        Milvusは100%無償のオープンソースプロジェクトです。

        -

        Milvusを生産または配布目的で使用する場合は、Apache License 2.0を遵守してください。

        -

        Milvusの開発元であるZilliz社では、分散インスタンスの構築や保守が不要な方向けに、完全マネージド型のクラウドプラットフォームも提供しています。Zilliz Cloudは自動的にデータの信頼性を維持し、ユーザーは使用した分だけ支払うことができる。

        -

        Milvusはx86以外のアーキテクチャに対応していますか?

        Milvusはx86以外のプラットフォームにはインストールも実行もできません。

        -

        Milvusを実行するにはCPUがSSE4.2、AVX、AVX2、AVX512のいずれかの命令セットをサポートしている必要があります。これらはすべてx86専用のSIMD命令セットです。

        -

        Milvusで扱えるデータセットの最大サイズは?

        理論上、Milvusが扱えるデータセットの最大サイズは実行するハードウェア、特にシステムメモリとストレージによって決まります:

        +

        Milvusを使用する際は、Apache License 2.0を遵守してください。

        +

        Milvusの開発元であるZilliz社では、分散インスタンスの構築や保守が不要な方向けに、完全マネージド型のクラウドプラットフォームも提供しております。Zilliz Cloudはデータの信頼性を自動的に維持し、ユーザーは使用した分だけ支払うことができる。

        +

        Milvusはx86以外のアーキテクチャに対応していますか?

        Milvusはx86以外のプラットフォームにはインストール、実行できません。

        +

        Milvusを実行するには、CPUが以下の命令セットのいずれかをサポートしている必要があります: SSE4.2、AVX、AVX2、AVX512。これらはすべてx86専用のSIMD命令セットです。

        +

        Milvusで扱えるデータセットの最大サイズは?

        理論上、Milvusが扱えるデータセットの最大サイズは、実行するハードウェア、特にシステムメモリとストレージによって決まります:

        • Milvusはクエリを実行する前に、指定された全てのコレクションとパーティションをメモリにロードします。したがって、メモリサイズがMilvusがクエリできる最大データ量を決定します。
        • -
        • Milvusに新しいエンティティやコレクション関連のスキーマ(現在のところデータ永続化のためにMinIOのみがサポートされています)が追加された場合、システムストレージが挿入可能なデータの最大サイズを決定します。
        • +
        • Milvusに新しいエンティティやコレクション関連のスキーマ(現在のところデータ永続化のためにMinIOのみがサポートされています)が追加された場合、システムストレージが挿入可能なデータの最大サイズを決定します。
        -

        Milvusはどこにデータを保存するのか?

        Milvusは挿入データとメタデータの2種類のデータを扱います。

        +

        Milvusはどこにデータを保存しますか?

        Milvusは挿入データとメタデータの2種類のデータを扱います。

        ベクターデータ、スカラーデータ、コレクション固有のスキーマを含む挿入データは、インクリメンタルログとして永続ストレージに保存されます。Milvusは、MinIOAWS S3Google Cloud Storage(GCS)、Azure Blob StorageAlibaba Cloud OSSTencent Cloud Object Storage(COS)など、複数のオブジェクトストレージバックエンドをサポートしています。

        -

        メタデータはMilvus内で生成されます。各Milvusモジュールは、etcdに格納される独自のメタデータを持っています。

        +

        メタデータはMilvus内で生成されます。Milvusモジュールはそれぞれ独自のメタデータを持ち、etcdに保存されます。

        なぜetcdにはベクターデータがないのか?

        etcdにはMilvusモジュールのメタデータが格納され、MinIOにはエンティティが格納されます。

        Milvusはデータの挿入と検索を同時にサポートしていますか?

        挿入操作と検索操作は、互いに独立した2つのモジュールによって処理されます。クライアントから見ると、挿入されたデータがメッセージキューに入った時点で挿入操作は完了します。しかし、挿入されたデータはクエリ・ノードにロードされるまで検索できません。セグメントサイズがインデックス構築のしきい値(デフォルトでは512MB)に達しない場合、Milvusはブルートフォース検索に頼り、クエリのパフォーマンスが低下する可能性があります。

        主キーが重複しているベクターをMilvusに挿入できますか?

        はい。Milvusはベクターの主キーが重複しているかどうかをチェックしません。

        -

        主キーが重複しているベクターが挿入された場合、Milvusはそれを更新操作として扱いますか?

        いいえ。現在、Milvusは更新操作に対応しておらず、エンティティのプライマリキーが重複しているかどうかのチェックも行っていません。エンティティの主キーが一意であることを確認するのはお客様の責任であり、そうでない場合、Milvusには主キーが重複する複数のエンティティが含まれる可能性があります。

        +

        主キーが重複しているベクターが挿入された場合、Milvusはそれを更新操作として扱いますか?

        いいえ。Milvusは現在更新操作に対応しておらず、エンティティの主キーが重複しているかどうかのチェックも行っていません。エンティティの主キーが一意であることを確認するのはお客様の責任であり、そうでない場合、Milvusには主キーが重複した複数のエンティティが含まれる可能性があります。

        このような場合、クエリ時にどのデータコピーが返されるかは未知のままです。この制限は将来のリリースで修正される予定です。

        自分で定義したエンティティの主キーの最大長は?

        エンティティ主キーは非負の64ビット整数でなければなりません。

        1回の挿入操作で追加できるデータ量の上限は?

        挿入操作のサイズは1,024 MBを超えてはなりません。これはgRPCによる制限です。

        @@ -43,43 +43,43 @@ title: 製品FAQ
        • データがどのパーティションにあるかわかっている場合、load_partition() を呼び出して目的のパーティションをロードし、 search() メソッド呼び出しでパーティションを指定します。
        • 正確なパーティションがわからない場合は、search() を呼び出す前にload_collection() を呼び出します。
        • -
        • 検索前にコレクションやパーティションのロードに失敗した場合、Milvusはエラーを返します。
        • +
        • 検索前にコレクションやパーティションをロードしなかった場合、Milvusはエラーを返します。
        -

        ベクター挿入後にインデックスを作成することはできますか?

        以前、create_index() によってコレクションにインデックスが作成されていた場合、Milvusはその後に挿入されたベクターに自動的にインデックスを作成します。ただし、新しく挿入されたベクターがセグメント全体を満たし、新しく作成されたインデックスファイルが以前のものから分離されるまで、Milvusはインデックスを作成しません。

        -

        FLATインデックスとIVF_FLATインデックスの違いは何ですか?

        IVF_FLATインデックスはベクター空間をリスト・クラスターに分割します。デフォルトのリスト値16,384の場合、Milvusはターゲットベクトルと16,384クラスタすべてのセントロイド間の距離を比較し、最も近いクラスタを返します。次にMilvusは、ターゲットベクトルと選択されたクラスタ内のベクトルとの距離を比較し、最近接ベクトルを取得します。IVF_FLATとは異なり、FLATはターゲットベクトルと他のすべてのベクトルとの距離を直接比較します。

        +

        ベクター挿入後にインデックスを作成することはできますか?

        Milvusは、以前にcreate_index() 、コレクションにインデックスを作成したことがあれば、その後に挿入されたベクターにも自動的にインデックスを作成します。ただし、Milvusは、新しく挿入されたベクターがセグメント全体を満たし、新しく作成されたインデックスファイルが以前のものから分離されるまで、インデックスを作成しません。

        +

        FLATインデックスとIVF_FLATインデックスの違いは何ですか?

        IVF_FLATインデックスはベクター空間をリスト・クラスターに分割します。デフォルトのリスト値16,384の場合、Milvusはターゲットベクトルと16,384クラスタすべてのセントロイド間の距離を比較し、最も近いクラスタを返します。その後、Milvusはターゲットベクトルと選択されたクラスタ内のベクトルとの距離を比較し、最近接ベクトルを取得します。IVF_FLATとは異なり、FLATはターゲットベクトルと他のすべてのベクトルとの距離を直接比較します。

        ベクトルの総数がnlistにほぼ等しい場合、IVF_FLATとFLATの間には計算要件と探索性能の点でほとんど差がありません。しかし、ベクトル数が nlist の 2 倍以上になると、IVF_FLAT の方が性能面で有利になります。

        詳細はベクターインデックスを参照してください。

        -

        Milvusはどのようにデータをフラッシュするのですか?

        挿入されたデータがメッセージキューにロードされると、Milvusは成功を返します。しかし、データはまだディスクにフラッシュされていません。その後、Milvusのデータノードがメッセージキュー内のデータをインクリメンタルログとして永続ストレージに書き込みます。flush() が呼び出された場合、データノードはメッセージキュー内の全データを直ちに永続ストレージに書き込むよう強制されます。

        +

        Milvusはどのようにデータをフラッシュするのですか?

        挿入されたデータがメッセージキューにロードされると、Milvusは成功を返します。しかし、データはまだディスクにフラッシュされていません。その後、Milvusのデータノードがメッセージキュー内のデータをインクリメンタルログとして永続ストレージに書き込みます。flush() が呼び出された場合、データノードはメッセージキュー内の全データを直ちに永続ストレージに書き込むよう強制されます。

        正規化とは何ですか?なぜ正規化が必要なのですか?

        正規化とは、ノルムが1になるようにベクトルを変換する処理のことです。ベクトルの類似度を計算するために内積を使用する場合、ベクトルは正規化されなければなりません。正規化後、内積は余弦類似度に等しくなります。

        詳しくはウィキペディアを参照。

        なぜユークリッド距離 (L2) と内積 (IP) は異なる結果を返すのですか?

        正規化されたベクトルでは、ユークリッド距離 (L2) は内積 (IP) と数学的に等価です。これらの類似度メトリクスが異なる結果を返す場合、ベクトルが正規化されているかどうかを確認してください。

        -

        Milvusのコレクションとパーティションの総数に制限はありますか?

        Milvusインスタンスでは最大65,535コレクションまで作成できます。既存のコレクション数を計算する際、Milvusはシャードとパーティションを含むすべてのコレクションをカウントします。

        +

        Milvusのコレクションとパーティションの総数に制限はありますか?

        Milvusインスタンスでは65,535コレクションまで作成することができます。既存のコレクション数を計算する際、Milvusはシャードとパーティションを含むすべてのコレクションをカウントします。

        例えば、既に100のコレクションを作成し、そのうち60に2シャードと4パーティション、残りの40に1シャードと12パーティションを作成したとします。現在のコレクション数は次のように計算できます:

        60 * 2 * 4 + 40 * 1 * 12 = 960
         
        -

        topk ベクトルを検索すると、なぜk個以下のベクトルしか得られないのですか?

        Milvusがサポートするインデックスのうち、IVF_FLATとIVF_SQ8はk-meansクラスタリング法を実装しています。データ空間はnlist クラスタに分割され、挿入されたベクトルはこれらのクラスタに分配されます。そしてMilvusはnprobe 最も近いクラスタを選択し、ターゲットベクトルと選択されたクラスタ内のすべてのベクトルとの距離を比較して最終結果を返します。

        +

        topk ベクトルを検索すると、なぜk個以下のベクトルしか得られないのですか?

        Milvusがサポートしているインデックスのうち、IVF_FLATとIVF_SQ8はk-meansクラスタリング法を実装しています。データ空間はnlist クラスタに分割され、挿入されたベクトルはこれらのクラスタに分配されます。そしてmilvusはnprobe 最も近いクラスタを選択し、ターゲットベクトルと選択されたクラスタ内のすべてのベクトルとの距離を比較して最終結果を返す。

        nlisttopk が大きく、nprobe が小さい場合、nprobe クラスタ内のベクトル数がk より少なくなることがあります。そのため、topk に最も近いベクトルを検索すると、返されるベクトル数がk より少なくなります。

        これを避けるには、nprobe を大きく、nlistk を小さく設定してみてください。

        詳しくはベクトル・インデックスをご覧ください。

        -

        Milvusでサポートされる最大ベクトル次元は?

        Milvusはデフォルトで最大32,768次元のベクトルを管理できます。Proxy.maxDimension の値を大きくすることで、より大きな次元のベクトルを扱うことができます。

        -

        MilvusはApple M1 CPUをサポートしていますか?

        現在のMilvusはApple M1 CPUをサポートしておりません。

        -

        Milvusは主キーフィールドでどのようなデータタイプをサポートしていますか?

        現在のリリースでは、INT64と文字列の両方をサポートしています。

        -

        Milvusはスケーラブルですか?

        はい、Kubernetes上のHelm Chart経由で複数ノードのMilvusクラスタをデプロイすることができます。詳しくはスケールガイドをご参照ください。

        +

        Milvusでサポートされる最大ベクトル次元は?

        Milvusはデフォルトで32,768次元までのベクターを管理できます。Proxy.maxDimension の値を大きくすることで、より大きな次元のベクトルを扱うことができます。

        +

        MilvusはApple M1 CPUをサポートしていますか?

        現在のMilvusはApple M1 CPUを直接サポートしておりません。Milvus 2.3以降では、ARM64アーキテクチャ用のDockerイメージが提供されます。

        +

        Milvusは主キーフィールドでどのようなデータタイプをサポートしていますか?

        現在のリリースでは、MilvusはINT64と文字列の両方をサポートしています。

        +

        Milvusはスケーラブルですか?

        Kubernetes上のHelm Chartを利用することで、複数ノードのMilvusクラスタをデプロイすることができます。詳しくはスケールガイドをご参照ください。

        クエリはメモリ上で実行されますか?増分データ、履歴データとは何ですか?

        はい。Milvusはクエリリクエストが来ると、インクリメンタルデータとヒストリカルデータの両方をメモリにロードして検索します。インクリメンタルデータは、ストレージエンジンに永続化される閾値に達する前にメモリにバッファリングされる成長セグメントのデータであり、履歴データはオブジェクトストレージに保存される封印されたセグメントのデータです。インクリメンタルデータとヒストリカルデータを合わせて、検索対象のデータセット全体を構成します。

        -

        はい。同じコレクションに対するクエリの場合、Milvusはインクリメンタルデータと履歴データを同時に検索します。ただし、異なるコレクションに対するクエリは直列に行われます。履歴データは非常に巨大なデータセットになる可能性がありますが、履歴データに対する検索は比較的時間がかかり、基本的に直列に実行されます。

        +

        はい。Milvusは、同じコレクションに対するクエリの場合、インクリメンタルデータと履歴データを同時に検索します。ただし、異なるコレクションに対するクエリは直列に行われます。履歴データは非常に巨大なデータセットになる可能性がありますが、履歴データに対する検索は比較的時間がかかり、基本的に直列に実行されます。

        対応するコレクションが削除された後も、MinIOのデータが残るのはなぜですか?

        MinIOのデータは、データのロールバックの便宜のため、一定期間残るように設計されています。

        -

        MilvusはPulsar以外のメッセージ・エンジンをサポートしていますか?

        はい。KafkaはMilvus 2.1.0でサポートされています。

        +

        MilvusはPulsar以外のメッセージ・エンジンをサポートしていますか?

        はい。Milvus 2.1.0ではKafkaがサポートされています。

        検索とクエリの違いは何ですか?

        Milvusでは、ベクトル類似度検索は類似度計算とベクトル・インデックス加速に基づいてベクトルを検索します。ベクトル類似性検索とは異なり、ベクトル検索はブーリアン式に基づくスカラーフィルタリングによってベクトルを検索します。ブーリアン式はスカラーフィールドまたは主キーフィールドをフィルタリングし、フィルタに一致するすべての結果を取得します。クエリでは、類似度メトリクスもベクトル・インデックスも関与しません。

        -

        なぜMilvusではfloatベクトル値の精度が小数点以下7桁なのですか?

        MilvusはベクトルをFloat32配列として格納することをサポートしています。Float32の値の精度は小数点以下7桁です。1.3476964684980388のようなFloat64の値であっても、Milvusは1.347696として格納します。したがって、このようなベクトルをMilvusから取り出すと、Float64の値の精度は失われます。

        -

        Milvusはベクタのデータ型と精度をどのように扱うのですか?

        MilvusはBinary、Float32、Float16、BFloat16のベクトル型をサポートしています。

        +

        なぜmilvusではfloatベクトル値の精度が小数点以下7桁なのですか?

        MilvusはベクトルをFloat32配列として格納することをサポートしています。Float32の値の精度は小数点以下7桁です。1.3476964684980388のようなFloat64の値であっても、Milvusは1.347696として格納します。したがって、このようなベクトルをMilvusから取り出すと、Float64の値の精度は失われてしまいます。

        +

        Milvusではベクトルのデータ型と精度をどのように扱っているのですか?

        MilvusはBinary、Float32、Float16、BFloat16のベクトル型をサポートしています。

        • バイナリベクタ:0と1のシーケンスとしてバイナリデータを格納し、画像処理や情報検索に使用されます。
        • Float32ベクトル:10進数約7桁の精度で格納される。Float64の値もFloat32の精度で格納されるため、検索時に精度が低下する可能性がある。
        • -
        • Float16 および BFloat16 ベクタ:精度とメモリ使用量が削減されます。Float16は帯域幅とストレージが限られたアプリケーションに適しており、BFloat16は範囲と効率のバランスが取れており、精度に大きな影響を与えることなく計算量を減らすためにディープラーニングでよく使用されます。
        • +
        • Float16 および BFloat16 ベクタ:精度とメモリ使用量が低減されている。Float16は帯域幅とストレージが限られたアプリケーションに適しており、BFloat16は範囲と効率のバランスが取れており、精度に大きな影響を与えることなく計算量を減らすためにディープラーニングでよく使用されます。
        -

        Milvusはスカラーフィールドやベクトルフィールドのデフォルト値の指定に対応していますか?

        現在のところ、Milvus 2.4.xではスカラーフィールドやベクトルフィールドのデフォルト値の指定はサポートしていません。この機能は将来のリリースを予定しています。

        +

        Milvusはスカラーフィールドやベクトルフィールドのデフォルト値の指定に対応していますか?

        現在のところ、Milvus 2.4.xではスカラーフィールドやベクトルフィールドのデフォルト値を指定することはできません。この機能は将来のリリースを予定しています。

        まだ質問がありますか?

        できます:

          -
        • GitHubでMilvusをチェックしてください。質問をしたり、アイデアを共有したり、他の人を助けたりすることができます。
        • +
        • GitHubでMilvusをチェックしてください。質問を投げかけたり、アイデアを共有したり、他の人を助けたりすることができます。
        • Slackのコミュニティに参加して、オープンソースコミュニティに参加してください。
        diff --git a/localization/v2.4.x/site/ja/getstarted/install_SDKs/install-java.json b/localization/v2.4.x/site/ja/getstarted/install_SDKs/install-java.json index fd60cd9c2..c3262de39 100644 --- a/localization/v2.4.x/site/ja/getstarted/install_SDKs/install-java.json +++ b/localization/v2.4.x/site/ja/getstarted/install_SDKs/install-java.json @@ -1 +1 @@ -{"codeList":["\n io.milvus\n milvus-sdk-java\n 2.4.3\n\n","implementation 'io.milvus:milvus-sdk-java:2.4.3'\n"],"headingContent":"","anchorList":[{"label":"Install Milvus Java SDK","href":"Install-Milvus-Java-SDK","type":1,"isActive":false},{"label":"Requirement","href":"Requirement","type":2,"isActive":false},{"label":"Install Milvus Java SDK","href":"Install-Milvus-Java-SDK","type":2,"isActive":false},{"label":"What's next","href":"Whats-next","type":2,"isActive":false}]} \ No newline at end of file +{"codeList":["\n io.milvus\n milvus-sdk-java\n 2.4.5\n\n","implementation 'io.milvus:milvus-sdk-java:2.4.5'\n"],"headingContent":"Install Milvus Java SDK","anchorList":[{"label":"Milvus Java SDKのインストール","href":"Install-Milvus-Java-SDK","type":1,"isActive":false},{"label":"必要環境","href":"Requirement","type":2,"isActive":false},{"label":"Milvus Java SDKのインストール","href":"Install-Milvus-Java-SDK","type":2,"isActive":false},{"label":"次のステップ","href":"Whats-next","type":2,"isActive":false}]} \ No newline at end of file diff --git a/localization/v2.4.x/site/ja/getstarted/install_SDKs/install-java.md b/localization/v2.4.x/site/ja/getstarted/install_SDKs/install-java.md index 1baa4a747..2056d441e 100644 --- a/localization/v2.4.x/site/ja/getstarted/install_SDKs/install-java.md +++ b/localization/v2.4.x/site/ja/getstarted/install_SDKs/install-java.md @@ -20,7 +20,7 @@ title: Milvus Java SDKのインストール d="M4 9h1v1H4c-1.5 0-3-1.69-3-3.5S2.55 3 4 3h4c1.45 0 3 1.69 3 3.5 0 1.41-.91 2.72-2 3.25V8.59c.58-.45 1-1.27 1-2.09C10 5.22 8.98 4 8 4H4c-.98 0-2 1.22-2 2.5S3 9 4 9zm9-3h-1v1h1c1 0 2 1.22 2 2.5S13.98 12 13 12H9c-.98 0-2-1.22-2-2.5 0-.83.42-1.64 1-2.09V6.25c-1.09.53-2 1.84-2 3.25C6 11.31 7.55 13 9 13h4c1.45 0 3-1.69 3-3.5S14.5 6 13 6z" > -

        このトピックでは、Milvus Java SDKのインストール方法について説明します。

        +

        ここでは、Milvus Java SDKのインストール方法について説明します。

        現行バージョンのMilvusでは、Python、Node.js、GO、JavaのSDKをサポートしています。

        必要環境

      • Gradle/Grails
      -
      implementation 'io.milvus:milvus-sdk-java:2.4.3'
      +
      implementation 'io.milvus:milvus-sdk-java:2.4.5'
       

      次のステップ

      Milvus Java SDKをインストールすると、以下のことができるようになります:

        -
      • Milvusの基本操作を学ぶ

        +
      • Milvusの基本操作を学びます:

        • コレクションの管理
        • パーティションの管理
        • diff --git a/localization/v2.4.x/site/ja/getstarted/install_SDKs/install-pymilvus.json b/localization/v2.4.x/site/ja/getstarted/install_SDKs/install-pymilvus.json index 6764685aa..d7ce7d69e 100644 --- a/localization/v2.4.x/site/ja/getstarted/install_SDKs/install-pymilvus.json +++ b/localization/v2.4.x/site/ja/getstarted/install_SDKs/install-pymilvus.json @@ -1 +1 @@ -{"codeList":["$ python3 -m pip install pymilvus==2.4.5\n","$ python3 -c \"from pymilvus import Collection\"\n"],"headingContent":"","anchorList":[{"label":"Install Milvus Python SDK","href":"Install-Milvus-Python-SDK","type":1,"isActive":false},{"label":"Requirements","href":"Requirements","type":2,"isActive":false},{"label":"Install PyMilvus via pip","href":"Install-PyMilvus-via-pip","type":2,"isActive":false},{"label":"Verify installation","href":"Verify-installation","type":2,"isActive":false},{"label":"What's next","href":"Whats-next","type":2,"isActive":false}]} \ No newline at end of file +{"codeList":["$ python3 -m pip install pymilvus==2.4.8\n","$ python3 -c \"from pymilvus import Collection\"\n"],"headingContent":"Install Milvus Python SDK","anchorList":[{"label":"Milvus Python SDKのインストール","href":"Install-Milvus-Python-SDK","type":1,"isActive":false},{"label":"必要条件","href":"Requirements","type":2,"isActive":false},{"label":"PyMilvusをpipでインストールする","href":"Install-PyMilvus-via-pip","type":2,"isActive":false},{"label":"インストールの確認","href":"Verify-installation","type":2,"isActive":false},{"label":"次に行うこと","href":"Whats-next","type":2,"isActive":false}]} \ No newline at end of file diff --git a/localization/v2.4.x/site/ja/getstarted/install_SDKs/install-pymilvus.md b/localization/v2.4.x/site/ja/getstarted/install_SDKs/install-pymilvus.md index 1862dac5f..71a3479e2 100644 --- a/localization/v2.4.x/site/ja/getstarted/install_SDKs/install-pymilvus.md +++ b/localization/v2.4.x/site/ja/getstarted/install_SDKs/install-pymilvus.md @@ -20,8 +20,8 @@ title: Milvus Python SDKのインストール d="M4 9h1v1H4c-1.5 0-3-1.69-3-3.5S2.55 3 4 3h4c1.45 0 3 1.69 3 3.5 0 1.41-.91 2.72-2 3.25V8.59c.58-.45 1-1.27 1-2.09C10 5.22 8.98 4 8 4H4c-.98 0-2 1.22-2 2.5S3 9 4 9zm9-3h-1v1h1c1 0 2 1.22 2 2.5S13.98 12 13 12H9c-.98 0-2-1.22-2-2.5 0-.83.42-1.64 1-2.09V6.25c-1.09.53-2 1.84-2 3.25C6 11.31 7.55 13 9 13h4c1.45 0 3-1.69 3-3.5S14.5 6 13 6z" > -

          本トピックでは、Milvus Python SDK pymilvusのインストール方法について説明します。

          -

          現在のバージョンでは、Python、Node.js、GO、JavaのSDKをサポートしています。

          +

          MilvusのPython SDK pymilvusのインストール方法について説明します。

          +

          現行バージョンのMilvusでは、Python、Node.js、GO、JavaのSDKをサポートしています。

          必要条件

          • Python 3.7以降が必要です。
          • Google protobufがインストールされていること。pip3 install protobuf==3.20.0 コマンドでインストールできます。
          • -
          • grpcio-toolsがインストールされていること。pip3 install grpcio-tools コマンドでインストールできる。
          • +
          • grpcio-toolsがインストールされていること。pip3 install grpcio-tools コマンドでインストールできます。
          -

          pipでPyMilvusをインストールする

          PyMilvusはPython Package Indexにあります。

          -インストールしたMilvusサーバのバージョンに合ったPyMilvusのバージョンをインストールすることを推奨します。詳細については、リリースノートを参照してください。
          -
          $ python3 -m pip install pymilvus==2.4.5
          +インストールしたMilvusサーバのバージョンに合ったバージョンのPyMilvusをインストールすることを推奨します。詳細については、リリースノートを参照してください。
          +
          $ python3 -m pip install pymilvus==2.4.8
           

          インストールの確認

          PyMilvusをインストールしたら、次のことができます:

            -
          • Milvusの基本操作を学ぶ

            +
          • Milvusの基本操作を学ぶ:

            • コレクションの管理
            • パーティションの管理
            • diff --git a/localization/v2.4.x/site/ja/getstarted/quickstart.json b/localization/v2.4.x/site/ja/getstarted/quickstart.json index a78880a85..90b90c043 100644 --- a/localization/v2.4.x/site/ja/getstarted/quickstart.json +++ b/localization/v2.4.x/site/ja/getstarted/quickstart.json @@ -1 +1 @@ -{"codeList":["$ pip install -U pymilvus\n","from pymilvus import MilvusClient\n\nclient = MilvusClient(\"milvus_demo.db\")\n","if client.has_collection(collection_name=\"demo_collection\"):\n client.drop_collection(collection_name=\"demo_collection\")\nclient.create_collection(\n collection_name=\"demo_collection\",\n dimension=768, # The vectors we will use in this demo has 768 dimensions\n)\n","$ pip install \"pymilvus[model]\"\n","from pymilvus import model\n\n# If connection to https://huggingface.co/ failed, uncomment the following path\n# import os\n# os.environ['HF_ENDPOINT'] = 'https://hf-mirror.com'\n\n# This will download a small embedding model \"paraphrase-albert-small-v2\" (~50MB).\nembedding_fn = model.DefaultEmbeddingFunction()\n\n# Text strings to search from.\ndocs = [\n \"Artificial intelligence was founded as an academic discipline in 1956.\",\n \"Alan Turing was the first person to conduct substantial research in AI.\",\n \"Born in Maida Vale, London, Turing was raised in southern England.\",\n]\n\nvectors = embedding_fn.encode_documents(docs)\n# The output vector has 768 dimensions, matching the collection that we just created.\nprint(\"Dim:\", embedding_fn.dim, vectors[0].shape) # Dim: 768 (768,)\n\n# Each entity has id, vector representation, raw text, and a subject label that we use\n# to demo metadata filtering later.\ndata = [\n {\"id\": i, \"vector\": vectors[i], \"text\": docs[i], \"subject\": \"history\"}\n]\n\nprint(\"Data has\", len(data), \"entities, each with fields: \", data[0].keys())\nprint(\"Vector dim:\", len(data[0][\"vector\"]))\n","Dim: 768 (768,)\nData has 3 entities, each with fields: dict_keys(['id', 'vector', 'text', 'subject'])\nVector dim: 768\n","import random\n\n# Text strings to search from.\ndocs = [\n \"Artificial intelligence was founded as an academic discipline in 1956.\",\n \"Alan Turing was the first person to conduct substantial research in AI.\",\n \"Born in Maida Vale, London, Turing was raised in southern England.\",\n]\n# Use fake representation with random vectors (768 dimension).\nvectors = [[random.uniform(-1, 1) for _ in range(768)] for _ in docs]\ndata = [\n {\"id\": i, \"vector\": vectors[i], \"text\": docs[i], \"subject\": \"history\"}\n for i in range(len(vectors))\n]\n\nprint(\"Data has\", len(data), \"entities, each with fields: \", data[0].keys())\nprint(\"Vector dim:\", len(data[0][\"vector\"]))\n","Data has 3 entities, each with fields: dict_keys(['id', 'vector', 'text', 'subject'])\nVector dim: 768\n","res = client.insert(collection_name=\"demo_collection\", data=data)\n\nprint(res)\n","{'insert_count': 3, 'ids': [0, 1, 2], 'cost': 0}\n","query_vectors = embedding_fn.encode_queries([\"Who is Alan Turing?\"])\n# If you don't have the embedding function you can use a fake vector to finish the demo:\n# query_vectors = [ [ random.uniform(-1, 1) for _ in range(768) ] ]\n\nres = client.search(\n collection_name=\"demo_collection\", # target collection\n data=query_vectors, # query vectors\n limit=2, # number of returned entities\n output_fields=[\"text\", \"subject\"], # specifies fields to be returned\n)\n\nprint(res)\n","data: [\"[{'id': 2, 'distance': 0.5859944820404053, 'entity': {'text': 'Born in Maida Vale, London, Turing was raised in southern England.', 'subject': 'history'}}, {'id': 1, 'distance': 0.5118255615234375, 'entity': {'text': 'Alan Turing was the first person to conduct substantial research in AI.', 'subject': 'history'}}]\"] , extra_info: {'cost': 0}\n","# Insert more docs in another subject.\ndocs = [\n \"Machine learning has been used for drug design.\",\n \"Computational synthesis with AI algorithms predicts molecular properties.\",\n \"DDR1 is involved in cancers and fibrosis.\",\n]\nvectors = embedding_fn.encode_documents(docs)\ndata = [\n {\"id\": 3 + i, \"vector\": vectors[i], \"text\": docs[i], \"subject\": \"biology\"}\n for i in range(len(vectors))\n]\n\nclient.insert(collection_name=\"demo_collection\", data=data)\n\n# This will exclude any text in \"history\" subject despite close to the query vector.\nres = client.search(\n collection_name=\"demo_collection\",\n data=embedding_fn.encode_queries([\"tell me AI related information\"]),\n filter=\"subject == 'biology'\",\n limit=2,\n output_fields=[\"text\", \"subject\"],\n)\n\nprint(res)\n","data: [\"[{'id': 4, 'distance': 0.27030569314956665, 'entity': {'text': 'Computational synthesis with AI algorithms predicts molecular properties.', 'subject': 'biology'}}, {'id': 3, 'distance': 0.16425910592079163, 'entity': {'text': 'Machine learning has been used for drug design.', 'subject': 'biology'}}]\"] , extra_info: {'cost': 0}\n","res = client.query(\n collection_name=\"demo_collection\",\n filter=\"subject == 'history'\",\n output_fields=[\"text\", \"subject\"],\n)\n","res = client.query(\n collection_name=\"demo_collection\",\n ids=[0, 2],\n output_fields=[\"vector\", \"text\", \"subject\"],\n)\n","# Delete entities by primary key\nres = client.delete(collection_name=\"demo_collection\", ids=[0, 2])\n\nprint(res)\n\n# Delete entities by a filter expression\nres = client.delete(\n collection_name=\"demo_collection\",\n filter=\"subject == 'biology'\",\n)\n\nprint(res)\n","[0, 2]\n[3, 4, 5]\n","from pymilvus import MilvusClient\n\nclient = MilvusClient(\"milvus_demo.db\")\n","# Drop collection\nclient.drop_collection(collection_name=\"demo_collection\")\n","client = MilvusClient(uri=\"http://localhost:19530\", token=\"root:Milvus\")\n"],"headingContent":"","anchorList":[{"label":"Quickstart with Milvus Lite","href":"Quickstart-with-Milvus-Lite","type":1,"isActive":false},{"label":"Install Milvus","href":"Install-Milvus","type":2,"isActive":false},{"label":"Set Up Vector Database","href":"Set-Up-Vector-Database","type":2,"isActive":false},{"label":"Create a Collection","href":"Create-a-Collection","type":2,"isActive":false},{"label":"Prepare Data","href":"Prepare-Data","type":2,"isActive":false},{"label":"Represent text with vectors","href":"Represent-text-with-vectors","type":2,"isActive":false},{"label":"[Alternatively] Use fake representation with random vectors","href":"Alternatively-Use-fake-representation-with-random-vectors","type":2,"isActive":false},{"label":"Insert Data","href":"Insert-Data","type":2,"isActive":false},{"label":"Semantic Search","href":"Semantic-Search","type":2,"isActive":false},{"label":"Vector Search with Metadata Filtering","href":"Vector-Search-with-Metadata-Filtering","type":2,"isActive":false},{"label":"Delete Entities","href":"Delete-Entities","type":2,"isActive":false},{"label":"Load Existing Data","href":"Load-Existing-Data","type":2,"isActive":false},{"label":"Drop the collection","href":"Drop-the-collection","type":2,"isActive":false},{"label":"Learn More","href":"Learn-More","type":2,"isActive":false}]} \ No newline at end of file +{"codeList":["$ pip install -U pymilvus\n","from pymilvus import MilvusClient\n\nclient = MilvusClient(\"milvus_demo.db\")\n","if client.has_collection(collection_name=\"demo_collection\"):\n client.drop_collection(collection_name=\"demo_collection\")\nclient.create_collection(\n collection_name=\"demo_collection\",\n dimension=768, # The vectors we will use in this demo has 768 dimensions\n)\n","$ pip install \"pymilvus[model]\"\n","from pymilvus import model\n\n# If connection to https://huggingface.co/ failed, uncomment the following path\n# import os\n# os.environ['HF_ENDPOINT'] = 'https://hf-mirror.com'\n\n# This will download a small embedding model \"paraphrase-albert-small-v2\" (~50MB).\nembedding_fn = model.DefaultEmbeddingFunction()\n\n# Text strings to search from.\ndocs = [\n \"Artificial intelligence was founded as an academic discipline in 1956.\",\n \"Alan Turing was the first person to conduct substantial research in AI.\",\n \"Born in Maida Vale, London, Turing was raised in southern England.\",\n]\n\nvectors = embedding_fn.encode_documents(docs)\n# The output vector has 768 dimensions, matching the collection that we just created.\nprint(\"Dim:\", embedding_fn.dim, vectors[0].shape) # Dim: 768 (768,)\n\n# Each entity has id, vector representation, raw text, and a subject label that we use\n# to demo metadata filtering later.\ndata = [\n {\"id\": i, \"vector\": vectors[i], \"text\": docs[i], \"subject\": \"history\"}\n for i in range(len(vectors))\n]\n\nprint(\"Data has\", len(data), \"entities, each with fields: \", data[0].keys())\nprint(\"Vector dim:\", len(data[0][\"vector\"]))\n","Dim: 768 (768,)\nData has 3 entities, each with fields: dict_keys(['id', 'vector', 'text', 'subject'])\nVector dim: 768\n","import random\n\n# Text strings to search from.\ndocs = [\n \"Artificial intelligence was founded as an academic discipline in 1956.\",\n \"Alan Turing was the first person to conduct substantial research in AI.\",\n \"Born in Maida Vale, London, Turing was raised in southern England.\",\n]\n# Use fake representation with random vectors (768 dimension).\nvectors = [[random.uniform(-1, 1) for _ in range(768)] for _ in docs]\ndata = [\n {\"id\": i, \"vector\": vectors[i], \"text\": docs[i], \"subject\": \"history\"}\n for i in range(len(vectors))\n]\n\nprint(\"Data has\", len(data), \"entities, each with fields: \", data[0].keys())\nprint(\"Vector dim:\", len(data[0][\"vector\"]))\n","Data has 3 entities, each with fields: dict_keys(['id', 'vector', 'text', 'subject'])\nVector dim: 768\n","res = client.insert(collection_name=\"demo_collection\", data=data)\n\nprint(res)\n","{'insert_count': 3, 'ids': [0, 1, 2], 'cost': 0}\n","query_vectors = embedding_fn.encode_queries([\"Who is Alan Turing?\"])\n# If you don't have the embedding function you can use a fake vector to finish the demo:\n# query_vectors = [ [ random.uniform(-1, 1) for _ in range(768) ] ]\n\nres = client.search(\n collection_name=\"demo_collection\", # target collection\n data=query_vectors, # query vectors\n limit=2, # number of returned entities\n output_fields=[\"text\", \"subject\"], # specifies fields to be returned\n)\n\nprint(res)\n","data: [\"[{'id': 2, 'distance': 0.5859944820404053, 'entity': {'text': 'Born in Maida Vale, London, Turing was raised in southern England.', 'subject': 'history'}}, {'id': 1, 'distance': 0.5118255615234375, 'entity': {'text': 'Alan Turing was the first person to conduct substantial research in AI.', 'subject': 'history'}}]\"] , extra_info: {'cost': 0}\n","# Insert more docs in another subject.\ndocs = [\n \"Machine learning has been used for drug design.\",\n \"Computational synthesis with AI algorithms predicts molecular properties.\",\n \"DDR1 is involved in cancers and fibrosis.\",\n]\nvectors = embedding_fn.encode_documents(docs)\ndata = [\n {\"id\": 3 + i, \"vector\": vectors[i], \"text\": docs[i], \"subject\": \"biology\"}\n for i in range(len(vectors))\n]\n\nclient.insert(collection_name=\"demo_collection\", data=data)\n\n# This will exclude any text in \"history\" subject despite close to the query vector.\nres = client.search(\n collection_name=\"demo_collection\",\n data=embedding_fn.encode_queries([\"tell me AI related information\"]),\n filter=\"subject == 'biology'\",\n limit=2,\n output_fields=[\"text\", \"subject\"],\n)\n\nprint(res)\n","data: [\"[{'id': 4, 'distance': 0.27030569314956665, 'entity': {'text': 'Computational synthesis with AI algorithms predicts molecular properties.', 'subject': 'biology'}}, {'id': 3, 'distance': 0.16425910592079163, 'entity': {'text': 'Machine learning has been used for drug design.', 'subject': 'biology'}}]\"] , extra_info: {'cost': 0}\n","res = client.query(\n collection_name=\"demo_collection\",\n filter=\"subject == 'history'\",\n output_fields=[\"text\", \"subject\"],\n)\n","res = client.query(\n collection_name=\"demo_collection\",\n ids=[0, 2],\n output_fields=[\"vector\", \"text\", \"subject\"],\n)\n","# Delete entities by primary key\nres = client.delete(collection_name=\"demo_collection\", ids=[0, 2])\n\nprint(res)\n\n# Delete entities by a filter expression\nres = client.delete(\n collection_name=\"demo_collection\",\n filter=\"subject == 'biology'\",\n)\n\nprint(res)\n","[0, 2]\n[3, 4, 5]\n","from pymilvus import MilvusClient\n\nclient = MilvusClient(\"milvus_demo.db\")\n","# Drop collection\nclient.drop_collection(collection_name=\"demo_collection\")\n","client = MilvusClient(uri=\"http://localhost:19530\", token=\"root:Milvus\")\n"],"headingContent":"Quickstart with Milvus Lite","anchorList":[{"label":"Milvus Liteのクイックスタート","href":"Quickstart-with-Milvus-Lite","type":1,"isActive":false},{"label":"Milvusのインストール","href":"Install-Milvus","type":2,"isActive":false},{"label":"ベクターデータベースのセットアップ","href":"Set-Up-Vector-Database","type":2,"isActive":false},{"label":"コレクションの作成","href":"Create-a-Collection","type":2,"isActive":false},{"label":"データの準備","href":"Prepare-Data","type":2,"isActive":false},{"label":"ベクトルでテキストを表現する","href":"Represent-text-with-vectors","type":2,"isActive":false},{"label":"[代替] ランダムなベクトルによる偽の表現を使用する。","href":"Alternatively-Use-fake-representation-with-random-vectors","type":2,"isActive":false},{"label":"データの挿入","href":"Insert-Data","type":2,"isActive":false},{"label":"セマンティック検索","href":"Semantic-Search","type":2,"isActive":false},{"label":"メタデータフィルタリングによるベクトル検索","href":"Vector-Search-with-Metadata-Filtering","type":2,"isActive":false},{"label":"エンティティの削除","href":"Delete-Entities","type":2,"isActive":false},{"label":"既存データの読み込み","href":"Load-Existing-Data","type":2,"isActive":false},{"label":"コレクションの削除","href":"Drop-the-collection","type":2,"isActive":false},{"label":"詳細","href":"Learn-More","type":2,"isActive":false}]} \ No newline at end of file diff --git a/localization/v2.4.x/site/ja/getstarted/quickstart.md b/localization/v2.4.x/site/ja/getstarted/quickstart.md index e0d2aae60..631d25d04 100644 --- a/localization/v2.4.x/site/ja/getstarted/quickstart.md +++ b/localization/v2.4.x/site/ja/getstarted/quickstart.md @@ -1,9 +1,8 @@ --- id: quickstart.md -summary: Milvusで始めよう。 +summary: Milvusを始めよう。 title: クイックスタート --- -

              Milvus Liteのクイックスタート

              Open In Colab

              +

              Open In Colab +GitHub Repository

              ニューラルネットワークモデルの出力データフォーマットであるベクトルは、情報を効果的にエンコードすることができ、知識ベース、セマンティック検索、RAG(Retrieval Augmented Generation)などのAIアプリケーションで重要な役割を果たします。

              -

              Milvusはオープンソースのベクトル・データベースで、Jupyterノートブックでのデモ・チャットボットの実行から、何十億ものユーザーにサービスを提供するウェブスケールの検索構築まで、あらゆる規模のAIアプリケーションに適しています。このガイドでは、数分でMilvusをローカルにセットアップし、Pythonクライアント・ライブラリを使用してベクトルを生成、保存、検索する方法を説明します。

              +

              Milvusはオープンソースのベクトルデータベースであり、Jupyterノートブックでのデモチャットボットの実行から、何十億ものユーザーにサービスを提供するウェブスケール検索の構築まで、あらゆる規模のAIアプリケーションに適している。このガイドでは、Milvusを数分でローカルにセットアップし、Pythonクライアントライブラリを使用してベクトルを生成、保存、検索する方法を説明します。

              Milvusのインストール

              このガイドでは、pymilvus に含まれるPythonライブラリであり、クライアントアプリケーションに組み込むことができるMilvus Liteを使用します。MilvusはDockerや Kubernetesへのデプロイもサポートしており、本番環境で使用することができます。

              +

              このガイドでは、クライアントアプリケーションに組み込むことができるpymilvus に含まれるPythonライブラリであるMilvus Liteを使用します。MilvusはDockerや Kubernetesへのデプロイもサポートしており、本番環境での利用も可能です。

              始める前に、ローカル環境でPython 3.8+が利用可能であることを確認してください。pythonクライアントライブラリとMilvus Liteの両方を含むpymilvus

              $ pip install -U pymilvus
               
              @@ -61,12 +61,11 @@ title: クイックスタート d="M4 9h1v1H4c-1.5 0-3-1.69-3-3.5S2.55 3 4 3h4c1.45 0 3 1.69 3 3.5 0 1.41-.91 2.72-2 3.25V8.59c.58-.45 1-1.27 1-2.09C10 5.22 8.98 4 8 4H4c-.98 0-2 1.22-2 2.5S3 9 4 9zm9-3h-1v1h1c1 0 2 1.22 2 2.5S13.98 12 13 12H9c-.98 0-2-1.22-2-2.5 0-.83.42-1.64 1-2.09V6.25c-1.09.53-2 1.84-2 3.25C6 11.31 7.55 13 9 13h4c1.45 0 3-1.69 3-3.5S14.5 6 13 6z" > -

              ローカルのMilvusベクトルデータベースを作成するには、"milvus_demo.db "のように全てのデータを保存するファイル名を指定してMilvusClient

              +

              ローカルにMilvusベクトルデータベースを作成するには、"milvus_demo.db "のように全てのデータを保存するファイル名を指定してMilvusClient

              from pymilvus import MilvusClient
               
               client = MilvusClient("milvus_demo.db")
               
              -

              コレクションの作成

              まず、モデルライブラリをインストールする。このパッケージには、PyTorchのような必須MLツールが含まれています。PyTorchがインストールされていない環境では、ダウンロードに時間がかかるかもしれません。

              +

              まず、モデルライブラリをインストールする。このパッケージにはPyTorchのようなMLに不可欠なツールが含まれています。PyTorchがインストールされていない環境では、ダウンロードに時間がかかるかもしれません。

              $ pip install "pymilvus[model]"
               
              -

              デフォルトモデルでベクトル埋め込みを生成します。Milvusはデータが辞書のリストとして挿入されることを想定しており、各辞書はentityと呼ばれるデータレコードを表します。

              +

              デフォルトモデルでベクトル埋め込みを生成します。Milvusはデータが辞書のリストとして挿入されることを想定しており、各辞書はentityと呼ばれるデータレコードを表す。

              from pymilvus import model
               
               # If connection to https://huggingface.co/ failed, uncomment the following path
              @@ -142,25 +141,25 @@ embedding_fn = model.DefaultEmbeddingFunction()
               
               # Text strings to search from.
               docs = [
              -"Artificial intelligence was founded as an academic discipline in 1956.",
              -"Alan Turing was the first person to conduct substantial research in AI.",
              -"Born in Maida Vale, London, Turing was raised in southern England.",
              +    "Artificial intelligence was founded as an academic discipline in 1956.",
              +    "Alan Turing was the first person to conduct substantial research in AI.",
              +    "Born in Maida Vale, London, Turing was raised in southern England.",
               ]
               
               vectors = embedding_fn.encode_documents(docs)
               # The output vector has 768 dimensions, matching the collection that we just created.
              -print("Dim:", embedding_fn.dim, vectors[0].shape) # Dim: 768 (768,)
              +print("Dim:", embedding_fn.dim, vectors[0].shape)  # Dim: 768 (768,)
               
               # Each entity has id, vector representation, raw text, and a subject label that we use
               # to demo metadata filtering later.
               data = [
              -{"id": i, "vector": vectors[i], "text": docs[i], "subject": "history"}
              +    {"id": i, "vector": vectors[i], "text": docs[i], "subject": "history"}
              +    for i in range(len(vectors))
               ]
               
               print("Data has", len(data), "entities, each with fields: ", data[0].keys())
               print("Vector dim:", len(data[0]["vector"]))
               
              -
              Dim: 768 (768,)
               Data has 3 entities, each with fields:  dict_keys(['id', 'vector', 'text', 'subject'])
               Vector dim: 768
              @@ -180,26 +179,25 @@ data = [
                         d="M4 9h1v1H4c-1.5 0-3-1.69-3-3.5S2.55 3 4 3h4c1.45 0 3 1.69 3 3.5 0 1.41-.91 2.72-2 3.25V8.59c.58-.45 1-1.27 1-2.09C10 5.22 8.98 4 8 4H4c-.98 0-2 1.22-2 2.5S3 9 4 9zm9-3h-1v1h1c1 0 2 1.22 2 2.5S13.98 12 13 12H9c-.98 0-2-1.22-2-2.5 0-.83.42-1.64 1-2.09V6.25c-1.09.53-2 1.84-2 3.25C6 11.31 7.55 13 9 13h4c1.45 0 3-1.69 3-3.5S14.5 6 13 6z"
                       >
                     
              -    

              ネットワークの問題でモデルをダウンロードできなかった場合、ウォークアラウンドとして、ランダムベクターを使ってテキストを表現することができます。ただ、そのベクトルは偽物なので、検索結果に意味的類似性が反映されないことに注意してください。

              +

              ネットワークの問題でモデルをダウンロードできなかった場合、ウォークアラウンドとして、ランダムなベクトルを使ってテキストを表現することができます。ただ、そのベクトルは偽物なので、検索結果に意味的類似性が反映されないことに注意してください。

              import random
               
               # Text strings to search from.
               docs = [
              -"Artificial intelligence was founded as an academic discipline in 1956.",
              -"Alan Turing was the first person to conduct substantial research in AI.",
              -"Born in Maida Vale, London, Turing was raised in southern England.",
              +    "Artificial intelligence was founded as an academic discipline in 1956.",
              +    "Alan Turing was the first person to conduct substantial research in AI.",
              +    "Born in Maida Vale, London, Turing was raised in southern England.",
               ]
               # Use fake representation with random vectors (768 dimension).
              -vectors = [[random.uniform(-1, 1) for _ in range(768)] for \_ in docs]
              +vectors = [[random.uniform(-1, 1) for _ in range(768)] for _ in docs]
               data = [
              -{"id": i, "vector": vectors[i], "text": docs[i], "subject": "history"}
              -for i in range(len(vectors))
              +    {"id": i, "vector": vectors[i], "text": docs[i], "subject": "history"}
              +    for i in range(len(vectors))
               ]
               
               print("Data has", len(data), "entities, each with fields: ", data[0].keys())
               print("Vector dim:", len(data[0]["vector"]))
               
              -
              Data has 3 entities, each with fields:  dict_keys(['id', 'vector', 'text', 'subject'])
               Vector dim: 768
               
              @@ -223,7 +221,6 @@ data = [ print(res)
              -
              {'insert_count': 3, 'ids': [0, 1, 2], 'cost': 0}
               

              検索クエリテキストをベクトルとして表現し、Milvus上でベクトルの類似性検索を行うことで、セマンティック検索を行うことができる。

              -

              Milvusは1つまたは複数のベクトル検索要求を同時に受け付ける。query_vectors変数の値はベクトルのリストであり、各ベクトルは浮動小数点数の配列である。

              +

              検索クエリテキストをベクトルで表現し、milvus上でベクトルの類似性検索を行うことで、セマンティック検索を行うことができる。

              +

              Milvusは1つまたは複数のベクトル検索リクエストを同時に受け付ける。query_vectors変数の値はベクトルのリストであり、各ベクトルは浮動小数点数の配列である。

              query_vectors = embedding_fn.encode_queries(["Who is Alan Turing?"])
               # If you don't have the embedding function you can use a fake vector to finish the demo:
               # query_vectors = [ [ random.uniform(-1, 1) for _ in range(768) ] ]
               
               res = client.search(
              -collection_name="demo_collection", # target collection
              -data=query_vectors, # query vectors
              -limit=2, # number of returned entities
              -output_fields=["text", "subject"], # specifies fields to be returned
              +    collection_name="demo_collection",  # target collection
              +    data=query_vectors,  # query vectors
              +    limit=2,  # number of returned entities
              +    output_fields=["text", "subject"],  # specifies fields to be returned
               )
               
               print(res)
               
              -
              data: ["[{'id': 2, 'distance': 0.5859944820404053, 'entity': {'text': 'Born in Maida Vale, London, Turing was raised in southern England.', 'subject': 'history'}}, {'id': 1, 'distance': 0.5118255615234375, 'entity': {'text': 'Alan Turing was the first person to conduct substantial research in AI.', 'subject': 'history'}}]"] , extra_info: {'cost': 0}
               

              出力は結果のリストで、それぞれがベクトル検索クエリに対応します。各クエリには結果のリストが含まれ、各結果には、エンティティの主キー、クエリ・ベクタまでの距離、指定されたoutput_fields を含むエンティティの詳細が含まれます。

              @@ -275,7 +271,7 @@ output_fields=["text", -

              メタデータ(Milvusでは "スカラー "フィールドと呼ばれ、スカラーはベクトル以外のデータを指す)の値を考慮しながらベクトル検索を行うこともできます。これは特定の条件を指定したフィルタ式で行います。次の例でsubject フィールドを使った検索とフィルタの方法を見てみよう。

              +

              メタデータの値(Milvusでは "スカラー "フィールドと呼ばれ、スカラーはベクトル以外のデータを指す)を考慮しながらベクトル検索を行うこともできます。これは特定の条件を指定したフィルタ式で行います。次の例でsubject フィールドを使った検索とフィルタの方法を見てみよう。

              # Insert more docs in another subject.
               docs = [
                   "Machine learning has been used for drug design.",
              @@ -292,21 +288,20 @@ client.insert(collection_name="demo_collection&qu
               
               # This will exclude any text in "history" subject despite close to the query vector.
               res = client.search(
              -collection_name="demo_collection",
              -data=embedding_fn.encode_queries(["tell me AI related information"]),
              -filter="subject == 'biology'",
              -limit=2,
              -output_fields=["text", "subject"],
              +    collection_name="demo_collection",
              +    data=embedding_fn.encode_queries(["tell me AI related information"]),
              +    filter="subject == 'biology'",
              +    limit=2,
              +    output_fields=["text", "subject"],
               )
               
               print(res)
               
              -
              data: ["[{'id': 4, 'distance': 0.27030569314956665, 'entity': {'text': 'Computational synthesis with AI algorithms predicts molecular properties.', 'subject': 'biology'}}, {'id': 3, 'distance': 0.16425910592079163, 'entity': {'text': 'Machine learning has been used for drug design.', 'subject': 'biology'}}]"] , extra_info: {'cost': 0}
               

              デフォルトでは、スカラー・フィールドはインデックスされません。大規模なデータセットでメタデータ・フィルター検索を行う必要がある場合は、固定スキーマの使用を検討し、検索パフォーマンスを向上させるためにインデックスをオンにすることもできる。

              ベクトル検索に加えて、他のタイプの検索を実行することもできます:

              -

              クエリー

              query()は、フィルタ式やいくつかのidにマッチするような条件にマッチするすべてのエンティティを検索する操作です。

              +

              クエリー

              query()は、フィルタ式やいくつかのIDに一致するなどの条件に一致するすべてのエンティティを検索する操作です。

              たとえば、スカラ・フィールドが特定の値を持つすべてのエンティティを検索します:

              res = client.query(
                   collection_name="demo_collection",
              @@ -344,13 +339,12 @@ res = client.delete(collection_name="demo_collect
               
               # Delete entities by a filter expression
               res = client.delete(
              -collection_name="demo_collection",
              -filter="subject == 'biology'",
              +    collection_name="demo_collection",
              +    filter="subject == 'biology'",
               )
               
               print(res)
               
              -
              [0, 2]
               [3, 4, 5]
               
              @@ -374,7 +368,6 @@ collection_name="demo_collection", client = MilvusClient("milvus_demo.db")
          -

          コレクションの削除

          Milvus LiteはローカルのPythonプログラムで始めるには最適です。大規模なデータをお持ちの場合、または本番環境でMilvusを使用したい場合は、Dockerと Kubernetes上でのMilvusのデプロイについて学ぶことができます。Milvusのすべてのデプロイメントモードは同じAPIを共有しているため、他のデプロイメントモードに移行する場合でもクライアント側のコードを大きく変更する必要はありません。どこにでもデプロイされたMilvusサーバのURIとTokenを指定するだけです:

          +

          Milvus LiteはローカルのPythonプログラムで始めるには最適です。大規模なデータをお持ちの場合や、Milvusを本番環境で使用したい場合は、Dockerや Kubernetes上でのMilvusのデプロイについて学ぶことができます。Milvusのすべてのデプロイモードは同じAPIを共有しているため、他のデプロイモードに移行する場合でもクライアント側のコードを大きく変更する必要はありません。どこにでもデプロイされたMilvusサーバのURIとTokenを指定するだけです:

          client = MilvusClient(uri="http://localhost:19530", token="root:Milvus")
           

          MilvusはRESTとgRPC APIを提供し、PythonJavaGo、C#、Node.jsなどの言語でクライアントライブラリを提供しています。

          diff --git a/localization/v2.4.x/site/ja/getstarted/run-milvus-docker/install_standalone-docker-compose.json b/localization/v2.4.x/site/ja/getstarted/run-milvus-docker/install_standalone-docker-compose.json index a11827dcf..58bd1f543 100644 --- a/localization/v2.4.x/site/ja/getstarted/run-milvus-docker/install_standalone-docker-compose.json +++ b/localization/v2.4.x/site/ja/getstarted/run-milvus-docker/install_standalone-docker-compose.json @@ -1,29 +1 @@ -{ - "codeList": [ - "# Download the configuration file\n$ wget https://github.com/milvus-io/milvus/releases/download/v2.4.9/milvus-standalone-docker-compose.yml -O docker-compose.yml\n\n# Start Milvus\n$ sudo docker compose up -d\n\nCreating milvus-etcd ... done\nCreating milvus-minio ... done\nCreating milvus-standalone ... done\n", - "$ sudo docker compose ps\n\n Name Command State Ports\n--------------------------------------------------------------------------------------------------------------------\nmilvus-etcd etcd -advertise-client-url ... Up 2379/tcp, 2380/tcp\nmilvus-minio /usr/bin/docker-entrypoint ... Up (healthy) 9000/tcp\nmilvus-standalone /tini -- milvus run standalone Up 0.0.0.0:19530->19530/tcp, 0.0.0.0:9091->9091/tcp\n", - "# Stop Milvus\n$ sudo docker compose down\n\n# Delete service data\n$ sudo rm -rf volumes\n" - ], - "headingContent": "", - "anchorList": [ - { - "label": "Docker ComposeでMilvusを実行する", - "href": "Run-Milvus-with-Docker-Compose", - "type": 1, - "isActive": false - }, - { - "label": "前提条件", - "href": "Prerequisites", - "type": 2, - "isActive": false - }, - { - "label": "Milvusを設置する", - "href": "Install-Milvus", - "type": 2, - "isActive": false - }, - { "label": "次の記事", "href": "Whats-next", "type": 2, "isActive": false } - ] -} +{"codeList":["# Download the configuration file\n$ wget https://github.com/milvus-io/milvus/releases/download/v2.4.13-hotfix/milvus-standalone-docker-compose.yml -O docker-compose.yml\n\n# Start Milvus\n$ sudo docker-compose up -d\n\nCreating milvus-etcd ... done\nCreating milvus-minio ... done\nCreating milvus-standalone ... done\n","$ sudo docker-compose ps\n\n Name Command State Ports\n--------------------------------------------------------------------------------------------------------------------\nmilvus-etcd etcd -advertise-client-url ... Up 2379/tcp, 2380/tcp\nmilvus-minio /usr/bin/docker-entrypoint ... Up (healthy) 9000/tcp\nmilvus-standalone /tini -- milvus run standalone Up 0.0.0.0:19530->19530/tcp, 0.0.0.0:9091->9091/tcp\n","# Stop Milvus\n$ sudo docker-compose down\n\n# Delete service data\n$ sudo rm -rf volumes\n"],"headingContent":"Run Milvus with Docker Compose","anchorList":[{"label":"Docker ComposeでMilvusを起動する","href":"Run-Milvus-with-Docker-Compose","type":1,"isActive":false},{"label":"前提条件","href":"Prerequisites","type":2,"isActive":false},{"label":"Milvusのインストール","href":"Install-Milvus","type":2,"isActive":false},{"label":"次のステップ","href":"Whats-next","type":2,"isActive":false}]} \ No newline at end of file diff --git a/localization/v2.4.x/site/ja/getstarted/run-milvus-docker/install_standalone-docker-compose.md b/localization/v2.4.x/site/ja/getstarted/run-milvus-docker/install_standalone-docker-compose.md index 4abb91beb..e08c7027c 100644 --- a/localization/v2.4.x/site/ja/getstarted/run-milvus-docker/install_standalone-docker-compose.md +++ b/localization/v2.4.x/site/ja/getstarted/run-milvus-docker/install_standalone-docker-compose.md @@ -2,8 +2,8 @@ id: install_standalone-docker-compose.md label: Docker Compose related_key: Docker Compose -summary: Docker ComposeでMilvusをスタンドアロンインストールする方法をご紹介します。 -title: Docker ComposeでMilvusを実行する +summary: Docker Composeを使ってmilvusをスタンドアロンインストールする方法をご紹介します。 +title: Docker ComposeでMilvusを起動する ---

          Docker ComposeでMilvusを起動する

          MilvusではDocker Composeの設定ファイルをMilvusリポジトリに用意しています。Docker Composeを使用してMilvusをインストールするには、以下のコマンドを実行してください。

          +

          MilvusのリポジトリにはDocker Composeの設定ファイルが用意されています。Docker Composeを使用してMilvusをインストールするには、以下のコマンドを実行してください。

          # Download the configuration file
          -$ wget https://github.com/milvus-io/milvus/releases/download/v2.4.9/milvus-standalone-docker-compose.yml -O docker-compose.yml
          +$ wget https://github.com/milvus-io/milvus/releases/download/v2.4.13-hotfix/milvus-standalone-docker-compose.yml -O docker-compose.yml
           
           # Start Milvus
          -$ sudo docker compose up -d
          +$ sudo docker-compose up -d
           
           Creating milvus-etcd  ... done
           Creating milvus-minio ... done
          @@ -69,19 +69,19 @@ Creating milvus-standalone ... done
           
          • 上記コマンドの実行に失敗した場合は、システムにDocker Compose V1がインストールされているか確認してください。もしそうであれば、このページの注意事項に従い、Docker Compose V2に移行することをお勧めします。

          • -
          • もしイメージのプルアップに問題が発生した場合は、community@zilliz.comまで問題の詳細をご連絡ください。

          • +
          • もしイメージのプルに関して問題が発生した場合は、community@zilliz.comまで問題の詳細をご連絡ください。

          -

          Milvusの起動後、

          +

          milvusの起動後、

            -
          • milvus-standalone,milvus-minio,milvus-etcdという名前のコンテナが立ち上がっています。
              +
            • milvus-standalonemilvus-miniomilvus-etcdという名前のコンテナが立ち上がっています。
              • milvus-etcdコンテナはホストにポートを公開せず、カレントフォルダ内のvolumes/etcdにデータをマッピングする。
              • -
              • milvus-minioコンテナは、デフォルトの認証資格情報でポート90909091をローカルに提供し、そのデータを現在のフォルダ内のvolumes/minioにマップする。
              • -
              • milvus-standaloneコンテナは、デフォルト設定でポート19530をローカルに提供し、そのデータを現在のフォルダのvolumes/milvusにマップする。
              • +
              • milvus-minioコンテナは、デフォルトの認証情報を使用してポート9090および9091をローカルに提供し、そのデータを現在のフォルダ内のvolumes/minioにマップする。
              • +
              • milvus-standaloneコンテナは、デフォルト設定でローカルにポート19530を提供し、そのデータを現在のフォルダ内のvolumes/milvusにマップする。

            コンテナが稼働しているかどうかは、以下のコマンドで確認できる:

            -
            $ sudo docker compose ps
            +
            $ sudo docker-compose ps
             
                   Name                     Command                  State                            Ports
             --------------------------------------------------------------------------------------------------------------------
            @@ -91,7 +91,7 @@ milvus-standalone   /tini -- milvus run standalone   Up             0.0.0.0:1953
             

            このコンテナは、以下のようにして停止および削除できる。

            # Stop Milvus
            -$ sudo docker compose down
            +$ sudo docker-compose down
             
             # Delete service data
             $ sudo rm -rf volumes
            @@ -111,7 +111,7 @@ $ sudo rm
                       d="M4 9h1v1H4c-1.5 0-3-1.69-3-3.5S2.55 3 4 3h4c1.45 0 3 1.69 3 3.5 0 1.41-.91 2.72-2 3.25V8.59c.58-.45 1-1.27 1-2.09C10 5.22 8.98 4 8 4H4c-.98 0-2 1.22-2 2.5S3 9 4 9zm9-3h-1v1h1c1 0 2 1.22 2 2.5S13.98 12 13 12H9c-.98 0-2-1.22-2-2.5 0-.83.42-1.64 1-2.09V6.25c-1.09.53-2 1.84-2 3.25C6 11.31 7.55 13 9 13h4c1.45 0 3-1.69 3-3.5S14.5 6 13 6z"
                     >
                   
            -    

            DockerにMilvusをインストールしたら、次のことができる:

            +

            DockerにMilvusをインストールしたら、次のことができます:

          • Helm Chartを使用したMilvusのアップグレード

          • -
          • Milvusクラスタのスケール

          • -
          • Milvuクラスタをクラウドにデプロイする:

            +
          • Milvusクラスタをスケールする

          • +
          • Milvusクラスターをクラウド上にデプロイする:

          • -
          • MilvusのデータバックアップのためのオープンソースツールであるMilvus Backupを紹介します。

          • -
          • オープンソースのMilvusデバッグツールであるBirdwatcherのご紹介。

          • +
          • MilvusのデータバックアップのためのオープンソースツールであるMilvus Backupをご紹介します。

          • +
          • Milvusのデバッグとダイナミックコンフィギュレーションアップデートのためのオープンソースツール、Birdwatcherのご紹介。

          • Milvusを直感的に管理するオープンソースのGUIツールAttuをご覧ください。

          • PrometheusでMilvusを監視する

          diff --git a/localization/v2.4.x/site/ja/getstarted/run-milvus-gpu/install_standalone-docker-compose-gpu.json b/localization/v2.4.x/site/ja/getstarted/run-milvus-gpu/install_standalone-docker-compose-gpu.json index f0980da01..63660c047 100644 --- a/localization/v2.4.x/site/ja/getstarted/run-milvus-gpu/install_standalone-docker-compose-gpu.json +++ b/localization/v2.4.x/site/ja/getstarted/run-milvus-gpu/install_standalone-docker-compose-gpu.json @@ -1,45 +1 @@ -{ - "codeList": [ - "$ wget https://github.com/milvus-io/milvus/releases/download/v2.4.9/milvus-standalone-docker-compose-gpu.yml -O docker-compose.yml\n", - "...\nstandalone:\n ...\n deploy:\n resources:\n reservations:\n devices:\n - driver: nvidia\n capabilities: [\"gpu\"]\n device_ids: [\"0\"]\n...\n", - "...\nstandalone:\n ...\n deploy:\n resources:\n reservations:\n devices:\n - driver: nvidia\n capabilities: [\"gpu\"]\n device_ids: ['0', '1']\n...\n", - "$ sudo docker compose up -d\n\nCreating milvus-etcd ... done\nCreating milvus-minio ... done\nCreating milvus-standalone ... done\n", - "$ sudo docker compose ps\n\n Name Command State Ports\n--------------------------------------------------------------------------------------------------------------------\nmilvus-etcd etcd -advertise-client-url ... Up 2379/tcp, 2380/tcp\nmilvus-minio /usr/bin/docker-entrypoint ... Up (healthy) 9000/tcp\nmilvus-standalone /tini -- milvus run standalone Up 0.0.0.0:19530->19530/tcp, 0.0.0.0:9091->9091/tcp\n", - "$ CUDA_VISIBLE_DEVICES=0 ./milvus run standalone\n", - "$ CUDA_VISIBLE_DEVICES=0,1 ./milvus run standalone\n", - "# Stop Milvus\n$ sudo docker compose down\n\n# Delete service data\n$ sudo rm -rf volumes\n", - "docker cp :/milvus/configs/milvus.yaml milvus.yaml\n", - "vim milvus.yaml\n", - "...\ngpu:\n initMemSize: 0\n maxMemSize: 0\n...\n", - "docker cp milvus.yaml :/milvus/configs/milvus.yaml\n", - "docker stop \ndocker start \n" - ], - "headingContent": "", - "anchorList": [ - { - "label": "Docker Composeを使ってGPUをサポートしたMilvusを実行する", - "href": "Run-Milvus-with-GPU-Support-Using-Docker-Compose", - "type": 1, - "isActive": false - }, - { - "label": "前提条件", - "href": "Prerequisites", - "type": 2, - "isActive": false - }, - { - "label": "Milvusを設置する", - "href": "Install-Milvus", - "type": 2, - "isActive": false - }, - { - "label": "メモリプールの設定", - "href": "Configure-memory-pool", - "type": 2, - "isActive": false - }, - { "label": "次の記事", "href": "Whats-next", "type": 2, "isActive": false } - ] -} +{"codeList":["$ wget https://github.com/milvus-io/milvus/releases/download/v2.4.13-hotfix/milvus-standalone-docker-compose-gpu.yml -O docker-compose.yml\n","...\nstandalone:\n ...\n deploy:\n resources:\n reservations:\n devices:\n - driver: nvidia\n capabilities: [\"gpu\"]\n device_ids: [\"0\"]\n...\n","...\nstandalone:\n ...\n deploy:\n resources:\n reservations:\n devices:\n - driver: nvidia\n capabilities: [\"gpu\"]\n device_ids: ['0', '1']\n...\n","$ sudo docker compose up -d\n\nCreating milvus-etcd ... done\nCreating milvus-minio ... done\nCreating milvus-standalone ... done\n","$ sudo docker compose ps\n\n Name Command State Ports\n--------------------------------------------------------------------------------------------------------------------\nmilvus-etcd etcd -advertise-client-url ... Up 2379/tcp, 2380/tcp\nmilvus-minio /usr/bin/docker-entrypoint ... Up (healthy) 9000/tcp\nmilvus-standalone /tini -- milvus run standalone Up 0.0.0.0:19530->19530/tcp, 0.0.0.0:9091->9091/tcp\n","$ CUDA_VISIBLE_DEVICES=0 ./milvus run standalone\n","$ CUDA_VISIBLE_DEVICES=0,1 ./milvus run standalone\n","# Stop Milvus\n$ sudo docker compose down\n\n# Delete service data\n$ sudo rm -rf volumes\n","docker cp :/milvus/configs/milvus.yaml milvus.yaml\n","vim milvus.yaml\n","...\ngpu:\n initMemSize: 0\n maxMemSize: 0\n...\n","docker cp milvus.yaml :/milvus/configs/milvus.yaml\n","docker stop \ndocker start \n"],"headingContent":"Run Milvus with GPU Support Using Docker Compose","anchorList":[{"label":"Docker Composeを使用したGPUサポート付きMilvusの実行","href":"Run-Milvus-with-GPU-Support-Using-Docker-Compose","type":1,"isActive":false},{"label":"前提条件","href":"Prerequisites","type":2,"isActive":false},{"label":"Milvusのインストール","href":"Install-Milvus","type":2,"isActive":false},{"label":"メモリプールの設定","href":"Configure-memory-pool","type":2,"isActive":false},{"label":"次の作業","href":"Whats-next","type":2,"isActive":false}]} \ No newline at end of file diff --git a/localization/v2.4.x/site/ja/getstarted/run-milvus-gpu/install_standalone-docker-compose-gpu.md b/localization/v2.4.x/site/ja/getstarted/run-milvus-gpu/install_standalone-docker-compose-gpu.md index 316c0dfc7..54ee00002 100644 --- a/localization/v2.4.x/site/ja/getstarted/run-milvus-gpu/install_standalone-docker-compose-gpu.md +++ b/localization/v2.4.x/site/ja/getstarted/run-milvus-gpu/install_standalone-docker-compose-gpu.md @@ -2,8 +2,8 @@ id: install_standalone-docker-compose-gpu.md label: Standalone (Docker Compose) related_key: Kubernetes -summary: KubernetesにMilvusクラスタをインストールする方法を学ぶ。 -title: Docker Composeを使ってGPUをサポートしたMilvusを実行する +summary: KubernetesにMilvusクラスタをインストールする方法をご紹介します。 +title: Docker Composeを使用したGPUサポート付きMilvusの実行 ---

          Docker Composeを使用したGPUサポート付きMilvusの実行

          Docker Composeを使用してGPUをサポートしたMilvusをインストールするには、以下の手順に従ってください。

          -

          1.YAMLファイルのダウンロードと設定

          ダウンロード milvus-standalone-docker-compose-gpu.ymlをダウンロードし、docker-compose.ymlとして手動または以下のコマンドで保存します。

          -
          $ wget https://github.com/milvus-io/milvus/releases/download/v2.4.9/milvus-standalone-docker-compose-gpu.yml -O docker-compose.yml
          +

          1.YAMLファイルのダウンロードと設定

          ダウンロード milvus-standalone-docker-compose-gpu.ymlをダウンロードし、docker-compose.ymlとして手動または以下のコマンドで保存します。

          +
          $ wget https://github.com/milvus-io/milvus/releases/download/v2.4.13-hotfix/milvus-standalone-docker-compose-gpu.yml -O docker-compose.yml
           

          YAMLファイル内のスタンドアロンサービスの環境変数に以下のように変更を加える必要があります:

            -
          • 特定の GPU デバイスを Milvus に割り当てるには、standalone サービスの定義でdeploy.resources.reservations.devices[0].devices_ids フィールドを探し、その値を目的の GPU の ID に置き換えます。NVIDIA GPUディスプレイドライバに含まれるnvidia-smi ツールを使用して、GPUデバイスのIDを決定することができます。Milvus は複数の GPU デバイスをサポートしています。
          • +
          • 特定の GPU デバイスを Milvus に割り当てるには、standalone サービスの定義でdeploy.resources.reservations.devices[0].devices_ids フィールドを探し、その値を目的の GPU の ID に置き換えます。NVIDIA GPUディスプレイドライバに含まれるnvidia-smi ツールを使用して、GPUデバイスのIDを決定することができます。Milvusは複数のGPUデバイスをサポートしています。

          単一のGPUデバイスをMilvusに割り当てます:

          ...
          @@ -79,7 +79,7 @@ title: Docker Composeを使ってGPUをサポートしたMilvusを実行する
                       device_ids: ["0"]
           ...
           
          -

          複数のGPUデバイスをMilvusに割り当てる:

          +

          Milvusに複数のGPUデバイスを割り当てる:

          ...
           standalone:
             ...
          @@ -92,7 +92,7 @@ title: Docker Composeを使ってGPUをサポートしたMilvusを実行する
                       device_ids: ['0', '1']
           ...
           
          -

          2.Milvus を起動します。

          docker-compose.ymlが格納されているディレクトリで、Milvusを起動します:

          +

          2.Milvusを起動します。

          docker-compose.ymlが格納されているディレクトリで、Milvusを起動します:

          $ sudo docker compose up -d
           
           Creating milvus-etcd  ... done
          @@ -102,12 +102,12 @@ Creating milvus-standalone ... done
           

          上記のコマンドを実行できなかった場合は、システムにDocker Compose V1がインストールされているかどうかを確認してください。上記のコマンドを実行できなかった場合は、Docker Compose V1がインストールされているか確認してください。

          -

          Milvusを起動します、

          +

          milvusの起動後、

          • milvus-standalonemilvus-miniomilvus-etcdという名前のコンテナが立ち上がっています。
            • milvus-etcdコンテナはホストにポートを公開せず、カレントフォルダ内のvolumes/etcdにデータをマッピングする。
            • -
            • milvus-minioコンテナは、デフォルトの認証資格情報でポート90909091をローカルに提供し、そのデータを現在のフォルダ内のvolumes/minioにマップする。
            • -
            • milvus-standaloneコンテナは、デフォルト設定でローカルにポート19530を提供し、そのデータを現在のフォルダのvolumes/milvusにマップする。
            • +
            • milvus-minioコンテナは、デフォルトの認証情報を使用してポート9090および9091をローカルに提供し、そのデータをカレントフォルダ内のvolumes/minioにマップする。
            • +
            • milvus-standaloneコンテナは、デフォルト設定でローカルにポート19530を提供し、そのデータを現在のフォルダ内のvolumes/milvusにマップする。

          コンテナが稼働しているかどうかは、以下のコマンドで確認できます:

          @@ -119,7 +119,7 @@ milvus-etcd etcd -advertise-client-url ... Up 2379/tcp, 23 milvus-minio /usr/bin/docker-entrypoint ... Up (healthy) 9000/tcp milvus-standalone /tini -- milvus run standalone Up 0.0.0.0:19530->19530/tcp, 0.0.0.0:9091->9091/tcp
          -

          docker-compose.ymlでMilvusに複数のGPUデバイスを割り当てている場合は、どのGPUデバイスを可視化するか、または使用可能にするかを指定できます。

          +

          docker-compose.ymlでMilvusに複数のGPUデバイスを割り当てている場合は、どのGPUデバイスを可視または使用可能にするかを指定できます。

          GPU デバイス0 を Milvus から見えるようにします:

          $ CUDA_VISIBLE_DEVICES=0 ./milvus run standalone
           
          @@ -207,7 +207,7 @@ docker start <milvus_container_id>
        • ハイブリッド検索
      • Helm Chartを使用したMilvusのアップグレード

      • -
      • Milvusクラスタのスケール

      • +
      • Milvusクラスタをスケールする

      • Milvuクラスタをクラウドにデプロイする:

      • MilvusのデータバックアップのためのオープンソースツールであるMilvus Backupを紹介します。

      • -
      • オープンソースのMilvusデバッグツールであるBirdwatcherのご紹介。

      • +
      • Milvusのデバッグとダイナミックコンフィギュレーションアップデートのためのオープンソースツール、Birdwatcherのご紹介。

      • Milvusを直感的に管理するオープンソースのGUIツールAttuをご覧ください。

      • -
      • PrometheusによるMilvusの監視

      • +
      • PrometheusでMilvusを監視する

      diff --git a/localization/v2.4.x/site/ja/integrations/evaluation_with_deepeval.md b/localization/v2.4.x/site/ja/integrations/evaluation_with_deepeval.md index 4084dc74f..e82f9434d 100644 --- a/localization/v2.4.x/site/ja/integrations/evaluation_with_deepeval.md +++ b/localization/v2.4.x/site/ja/integrations/evaluation_with_deepeval.md @@ -20,7 +20,8 @@ title: DeepEvalによる評価 d="M4 9h1v1H4c-1.5 0-3-1.69-3-3.5S2.55 3 4 3h4c1.45 0 3 1.69 3 3.5 0 1.41-.91 2.72-2 3.25V8.59c.58-.45 1-1.27 1-2.09C10 5.22 8.98 4 8 4H4c-.98 0-2 1.22-2 2.5S3 9 4 9zm9-3h-1v1h1c1 0 2 1.22 2 2.5S13.98 12 13 12H9c-.98 0-2-1.22-2-2.5 0-.83.42-1.64 1-2.09V6.25c-1.09.53-2 1.84-2 3.25C6 11.31 7.55 13 9 13h4c1.45 0 3-1.69 3-3.5S14.5 6 13 6z" > -

      Open In Colab

      +

      Open In Colab +GitHub Repository

      このガイドでは、Milvusをベースに構築されたRAG(Retrieval-Augmented Generation)パイプラインを評価するためにDeepEvalを使用する方法を示します。

      RAGシステムは、検索システムと生成モデルを組み合わせて、与えられたプロンプトに基づいて新しいテキストを生成します。このシステムは、まずMilvusを使用してコーパスから関連文書を検索し、次に生成モデルを使用して検索された文書に基づいて新しいテキストを生成する。

      DeepEvalは、RAGパイプラインの評価を支援するフレームワークである。このようなパイプラインの構築を支援する既存のツールやフレームワークはありますが、パイプラインを評価し、パイプラインのパフォーマンスを定量化することは困難です。そこでDeepEvalの出番です。

      @@ -183,7 +184,7 @@ my_rag = RAG(openai_client=openai_clie

      MilvusClient の引数については以下の通り:

      • uri の引数をローカルファイル、例えば./milvus.db に設定するのが最も便利である。
      • -
      • データ規模が大きい場合は、dockerやkubernetes上に、よりパフォーマンスの高いMilvusサーバを構築することができます。このセットアップでは、http://localhost:19530 などのサーバ uri をuri として使用してください。
      • +
      • データ規模が大きい場合は、dockerやkubernetes上に、よりパフォーマンスの高いMilvusサーバを構築することができます。このセットアップでは、サーバの uri、例えばhttp://localhost:19530uri として使用してください。
      • MilvusのフルマネージドクラウドサービスであるZilliz Cloudを使用する場合は、Zilliz CloudのPublic EndpointとApi keyに対応するuritoken を調整してください。
      @@ -366,9 +367,9 @@ result = evaluate(
      /Users/eureka/miniconda3/envs/zilliz/lib/python3.9/site-packages/deepeval/__init__.py:49: UserWarning: You are using deepeval version 1.1.6, however version 1.2.2 is available. You should consider upgrading via the "pip install --upgrade deepeval" command.
         warnings.warn(
       
      -
      DeepEvalの最新のContextual Precision Metricを実行しています!(gpt-4o、 strict=Falseasync_mode=True を使用 )...
      -
      DeepEval の最新のコンテキスト・リコール測定を実行しています!(gpt-4o、 strict=Falseasync_mode=True使用 )...
      -
      DeepEval の最新のContextual Relevancy Metric を実行しています!(gpt-4o を使用、 strict=Falseasync_mode=True)...
      +
      DeepEvalの最新のContextual Precision Metricを実行しています!(gpt-4o、 strict=Falseasync_mode=True を使用 )...
      +
      DeepEval の最新のコンテキスト・リコール測定を実行しています!(gpt-4o、 strict=Falseasync_mode=True使用 )...
      +
      DeepEval の最新のContextual Relevancy Metric を実行しています!(gpt-4o を使用、 strict=Falseasync_mode=True)...
      Event loop is already running. Applying nest_asyncio patch to allow async execution...
       
       
      @@ -422,12 +423,12 @@ result = evaluate(
           print_results=False,  # Change to True to see detailed metric results
       )
       
      -
      DeepEval の最新の回答関連性メトリックを実行している!(gpt-4o、 strict=Falseasync_mode=True を使用 )...
      -
      DeepEval の最新の忠実度メトリックを実行しています!(gpt-4o を使用、 strict=Falseasync_mode=True)...
      +
      DeepEval の最新の回答関連性メトリックを実行しています!(gpt-4o、 strict=Falseasync_mode=True を使用 )...
      +
      DeepEval の最新の忠実度メトリックを実行しています!(gpt-4o を使用、 strict=Falseasync_mode=True)...
      Event loop is already running. Applying nest_asyncio patch to allow async execution...
       
       
       Evaluating 3 test case(s) in parallel: |██████████|100% (3/3) [Time Taken: 00:11,  3.97s/test case]
       
      テストが終了しましたConfident AIで評価結果を見るには'deepeval login'を実行する。 
      -‼️ 注意:代わりにConfident AI上で直接deepevalの全メトリクスの評価を実行することもできます。
      +‼️ 注意: 代わりに Confident AI 上で直接、deepeval のすべてのメトリクスに対する評価を実行することもできます。
      diff --git a/localization/v2.4.x/site/ja/integrations/evaluation_with_phoenix.md b/localization/v2.4.x/site/ja/integrations/evaluation_with_phoenix.md index 68ac7e7d4..d44348075 100644 --- a/localization/v2.4.x/site/ja/integrations/evaluation_with_phoenix.md +++ b/localization/v2.4.x/site/ja/integrations/evaluation_with_phoenix.md @@ -20,7 +20,8 @@ title: Arize Pheonixによる評価 d="M4 9h1v1H4c-1.5 0-3-1.69-3-3.5S2.55 3 4 3h4c1.45 0 3 1.69 3 3.5 0 1.41-.91 2.72-2 3.25V8.59c.58-.45 1-1.27 1-2.09C10 5.22 8.98 4 8 4H4c-.98 0-2 1.22-2 2.5S3 9 4 9zm9-3h-1v1h1c1 0 2 1.22 2 2.5S13.98 12 13 12H9c-.98 0-2-1.22-2-2.5 0-.83.42-1.64 1-2.09V6.25c-1.09.53-2 1.84-2 3.25C6 11.31 7.55 13 9 13h4c1.45 0 3-1.69 3-3.5S14.5 6 13 6z" > -

      Open In Colab

      +

      Open In Colab +GitHub Repository

      このガイドでは、Arize Pheonixを使用して、Milvusをベースに構築されたRAG(Retrieval-Augmented Generation)パイプラインを評価する方法を示します。

      RAGシステムは、検索システムと生成モデルを組み合わせて、与えられたプロンプトに基づいて新しいテキストを生成します。このシステムは、まずMilvusを使ってコーパスから関連文書を検索し、次に生成モデルを使って検索された文書に基づいて新しいテキストを生成する。

      Arize Pheonixは、RAGパイプラインの評価を支援するフレームワークである。パイプラインの構築を支援する既存のツールやフレームワークはありますが、パイプラインを評価し、パイプラインのパフォーマンスを定量化することは困難です。そこでArize Pheonixの登場です。

      @@ -334,10 +335,10 @@ Answering questions: 100%|██████████| 3/3 [00:03<00:00,
  • QA評価:入力クエリに対するモデルの回答の正確性を評価する。

      -
    • QA説明:回答が正しい、または正しくない理由を詳細に説明します。
    • +
    • QA説明:回答が正しいか正しくないかの理由を詳細に説明します。
  • -

    Phoenixトレースの概要

    Phoenixは、Langchainや LlamaIndexのようなフレームワーク、OpenAIや MistralのようなSDKとの統合により、LLMアプリケーションにOTEL互換のトレースを提供します。トレースはリクエストフロー全体をキャプチャし、以下のような洞察を提供します:

    +

    Phoenixトレースの概要

    PhoenixはOTEL互換のトレースをLLMアプリケーションに提供し、Langchainや LlamaIndexのようなフレームワーク、OpenAIや MistralのようなSDKを統合します。トレースはリクエストフロー全体をキャプチャし、以下のような洞察を提供します:

    • アプリケーション遅延:遅いLLMの呼び出しとコンポーネントのパフォーマンスを特定し、最適化します。
    • トークンの使用状況:コスト最適化のためにトークン消費を分解します。
    • @@ -359,7 +360,7 @@ OpenAIInstrumentor().instrument()

    - Alt Text + Alt Text Altテキスト

    import nest_asyncio
     
    diff --git a/localization/v2.4.x/site/ja/integrations/integrate_with_bentoml.json b/localization/v2.4.x/site/ja/integrations/integrate_with_bentoml.json
    index 52fa4dfaa..5d1eda2ee 100644
    --- a/localization/v2.4.x/site/ja/integrations/integrate_with_bentoml.json
    +++ b/localization/v2.4.x/site/ja/integrations/integrate_with_bentoml.json
    @@ -1 +1 @@
    -{"codeList":["$ pip install -U pymilvus bentoml\n","import bentoml\n\nBENTO_EMBEDDING_MODEL_END_POINT = \"BENTO_EMBEDDING_MODEL_END_POINT\"\nBENTO_API_TOKEN = \"BENTO_API_TOKEN\"\n\nembedding_client = bentoml.SyncHTTPClient(\n    BENTO_EMBEDDING_MODEL_END_POINT, token=BENTO_API_TOKEN\n)\n","# naively chunk on newlines\ndef chunk_text(filename: str) -> list:\n    with open(filename, \"r\") as f:\n        text = f.read()\n    sentences = text.split(\"\\n\")\n    return sentences\n","import os\nimport requests\nimport urllib.request\n\n# set up the data source\nrepo = \"ytang07/bento_octo_milvus_RAG\"\ndirectory = \"data\"\nsave_dir = \"./city_data\"\napi_url = f\"https://api.github.com/repos/{repo}/contents/{directory}\"\n\n\nresponse = requests.get(api_url)\ndata = response.json()\n\nif not os.path.exists(save_dir):\n    os.makedirs(save_dir)\n\nfor item in data:\n    if item[\"type\"] == \"file\":\n        file_url = item[\"download_url\"]\n        file_path = os.path.join(save_dir, item[\"name\"])\n        urllib.request.urlretrieve(file_url, file_path)\n","# please upload your data directory under this file's folder\ncities = os.listdir(\"city_data\")\n# store chunked text for each of the cities in a list of dicts\ncity_chunks = []\nfor city in cities:\n    chunked = chunk_text(f\"city_data/{city}\")\n    cleaned = []\n    for chunk in chunked:\n        if len(chunk) > 7:\n            cleaned.append(chunk)\n    mapped = {\"city_name\": city.split(\".\")[0], \"chunks\": cleaned}\n    city_chunks.append(mapped)\n","def get_embeddings(texts: list) -> list:\n    if len(texts) > 25:\n        splits = [texts[x : x + 25] for x in range(0, len(texts), 25)]\n        embeddings = []\n        for split in splits:\n            embedding_split = embedding_client.encode(sentences=split)\n            embeddings += embedding_split\n        return embeddings\n    return embedding_client.encode(\n        sentences=texts,\n    )\n","entries = []\nfor city_dict in city_chunks:\n    # No need for the embeddings list if get_embeddings already returns a list of lists\n    embedding_list = get_embeddings(city_dict[\"chunks\"])  # returns a list of lists\n    # Now match texts with embeddings and city name\n    for i, embedding in enumerate(embedding_list):\n        entry = {\n            \"embedding\": embedding,\n            \"sentence\": city_dict[\"chunks\"][\n                i\n            ],  # Assume \"chunks\" has the corresponding texts for the embeddings\n            \"city\": city_dict[\"city_name\"],\n        }\n        entries.append(entry)\n    print(entries)\n","from pymilvus import MilvusClient\n\nCOLLECTION_NAME = \"Bento_Milvus_RAG\"  # random name for your collection\nDIMENSION = 384\n\n# Initialize a Milvus Lite client\nmilvus_client = MilvusClient(\"milvus_demo.db\")\n","from pymilvus import connections\n\nconnections.connect(uri=\"milvus_demo.db\")\n","from pymilvus import MilvusClient, DataType, Collection\n\n# Create schema\nschema = MilvusClient.create_schema(\n    auto_id=True,\n    enable_dynamic_field=True,\n)\n\n# 3.2. Add fields to schema\nschema.add_field(field_name=\"id\", datatype=DataType.INT64, is_primary=True)\nschema.add_field(field_name=\"embedding\", datatype=DataType.FLOAT_VECTOR, dim=DIMENSION)\n","# prepare index parameters\nindex_params = milvus_client.prepare_index_params()\n\n# add index\nindex_params.add_index(\n    field_name=\"embedding\",\n    index_type=\"AUTOINDEX\",  # use autoindex instead of other complex indexing method\n    metric_type=\"COSINE\",  # L2, COSINE, or IP\n)\n\n# create collection\nif milvus_client.has_collection(collection_name=COLLECTION_NAME):\n    milvus_client.drop_collection(collection_name=COLLECTION_NAME)\nmilvus_client.create_collection(\n    collection_name=COLLECTION_NAME, schema=schema, index_params=index_params\n)\n\n# Outside the loop, now you upsert all the entries at once\nmilvus_client.insert(collection_name=COLLECTION_NAME, data=entries)\n","BENTO_LLM_END_POINT = \"BENTO_LLM_END_POINT\"\n\nllm_client = bentoml.SyncHTTPClient(BENTO_LLM_END_POINT, token=BENTO_API_TOKEN)\n","def dorag(question: str, context: str):\n\n    prompt = (\n        f\"You are a helpful assistant. The user has a question. Answer the user question based only on the context: {context}. \\n\"\n        f\"The user question is {question}\"\n    )\n\n    results = llm_client.generate(\n        max_tokens=1024,\n        prompt=prompt,\n    )\n\n    res = \"\"\n    for result in results:\n        res += result\n\n    return res\n","question = \"What state is Cambridge in?\"\n\n\ndef ask_a_question(question):\n    embeddings = get_embeddings([question])\n    res = milvus_client.search(\n        collection_name=COLLECTION_NAME,\n        data=embeddings,  # search for the one (1) embedding returned as a list of lists\n        anns_field=\"embedding\",  # Search across embeddings\n        limit=5,  # get me the top 5 results\n        output_fields=[\"sentence\"],  # get the sentence/chunk and city\n    )\n\n    sentences = []\n    for hits in res:\n        for hit in hits:\n            print(hit)\n            sentences.append(hit[\"entity\"][\"sentence\"])\n    context = \". \".join(sentences)\n    return context\n\n\ncontext = ask_a_question(question=question)\nprint(context)\n","print(dorag(question=question, context=context))\n"],"headingContent":"","anchorList":[{"label":"MilvusとBentoMLによる検索支援型生成(RAG)","href":"Retrieval-Augmented-Generation-RAG-with-Milvus-and-BentoML","type":1,"isActive":false},{"label":"はじめに","href":"Introduction","type":2,"isActive":false},{"label":"始める前に","href":"Before-you-begin","type":2,"isActive":false},{"label":"BentoML/BentoCloudでエンベッディングを扱う","href":"Serving-Embeddings-with-BentoMLBentoCloud","type":2,"isActive":false},{"label":"データをベクターデータベースに挿入して検索する","href":"Inserting-Data-into-a-Vector-Database-for-Retrieval","type":2,"isActive":false},{"label":"Milvus Liteコレクションの作成","href":"Creating-Your-Milvus-Lite-Collection","type":2,"isActive":false},{"label":"RAGのためのLLMの設定","href":"Set-up-Your-LLM-for-RAG","type":2,"isActive":false},{"label":"LLMインストラクション","href":"LLM-Instructions","type":2,"isActive":false},{"label":"RAGの例","href":"A-RAG-Example","type":2,"isActive":false}]}
    \ No newline at end of file
    +{"codeList":["$ pip install -U pymilvus bentoml\n","import bentoml\n\nBENTO_EMBEDDING_MODEL_END_POINT = \"BENTO_EMBEDDING_MODEL_END_POINT\"\nBENTO_API_TOKEN = \"BENTO_API_TOKEN\"\n\nembedding_client = bentoml.SyncHTTPClient(\n    BENTO_EMBEDDING_MODEL_END_POINT, token=BENTO_API_TOKEN\n)\n","# naively chunk on newlines\ndef chunk_text(filename: str) -> list:\n    with open(filename, \"r\") as f:\n        text = f.read()\n    sentences = text.split(\"\\n\")\n    return sentences\n","import os\nimport requests\nimport urllib.request\n\n# set up the data source\nrepo = \"ytang07/bento_octo_milvus_RAG\"\ndirectory = \"data\"\nsave_dir = \"./city_data\"\napi_url = f\"https://api.github.com/repos/{repo}/contents/{directory}\"\n\n\nresponse = requests.get(api_url)\ndata = response.json()\n\nif not os.path.exists(save_dir):\n    os.makedirs(save_dir)\n\nfor item in data:\n    if item[\"type\"] == \"file\":\n        file_url = item[\"download_url\"]\n        file_path = os.path.join(save_dir, item[\"name\"])\n        urllib.request.urlretrieve(file_url, file_path)\n","# please upload your data directory under this file's folder\ncities = os.listdir(\"city_data\")\n# store chunked text for each of the cities in a list of dicts\ncity_chunks = []\nfor city in cities:\n    chunked = chunk_text(f\"city_data/{city}\")\n    cleaned = []\n    for chunk in chunked:\n        if len(chunk) > 7:\n            cleaned.append(chunk)\n    mapped = {\"city_name\": city.split(\".\")[0], \"chunks\": cleaned}\n    city_chunks.append(mapped)\n","def get_embeddings(texts: list) -> list:\n    if len(texts) > 25:\n        splits = [texts[x : x + 25] for x in range(0, len(texts), 25)]\n        embeddings = []\n        for split in splits:\n            embedding_split = embedding_client.encode(sentences=split)\n            embeddings += embedding_split\n        return embeddings\n    return embedding_client.encode(\n        sentences=texts,\n    )\n","entries = []\nfor city_dict in city_chunks:\n    # No need for the embeddings list if get_embeddings already returns a list of lists\n    embedding_list = get_embeddings(city_dict[\"chunks\"])  # returns a list of lists\n    # Now match texts with embeddings and city name\n    for i, embedding in enumerate(embedding_list):\n        entry = {\n            \"embedding\": embedding,\n            \"sentence\": city_dict[\"chunks\"][\n                i\n            ],  # Assume \"chunks\" has the corresponding texts for the embeddings\n            \"city\": city_dict[\"city_name\"],\n        }\n        entries.append(entry)\n    print(entries)\n","from pymilvus import MilvusClient\n\nCOLLECTION_NAME = \"Bento_Milvus_RAG\"  # random name for your collection\nDIMENSION = 384\n\n# Initialize a Milvus Lite client\nmilvus_client = MilvusClient(\"milvus_demo.db\")\n","from pymilvus import connections\n\nconnections.connect(uri=\"milvus_demo.db\")\n","from pymilvus import MilvusClient, DataType, Collection\n\n# Create schema\nschema = MilvusClient.create_schema(\n    auto_id=True,\n    enable_dynamic_field=True,\n)\n\n# 3.2. Add fields to schema\nschema.add_field(field_name=\"id\", datatype=DataType.INT64, is_primary=True)\nschema.add_field(field_name=\"embedding\", datatype=DataType.FLOAT_VECTOR, dim=DIMENSION)\n","# prepare index parameters\nindex_params = milvus_client.prepare_index_params()\n\n# add index\nindex_params.add_index(\n    field_name=\"embedding\",\n    index_type=\"AUTOINDEX\",  # use autoindex instead of other complex indexing method\n    metric_type=\"COSINE\",  # L2, COSINE, or IP\n)\n\n# create collection\nif milvus_client.has_collection(collection_name=COLLECTION_NAME):\n    milvus_client.drop_collection(collection_name=COLLECTION_NAME)\nmilvus_client.create_collection(\n    collection_name=COLLECTION_NAME, schema=schema, index_params=index_params\n)\n\n# Outside the loop, now you upsert all the entries at once\nmilvus_client.insert(collection_name=COLLECTION_NAME, data=entries)\n","BENTO_LLM_END_POINT = \"BENTO_LLM_END_POINT\"\n\nllm_client = bentoml.SyncHTTPClient(BENTO_LLM_END_POINT, token=BENTO_API_TOKEN)\n","def dorag(question: str, context: str):\n\n    prompt = (\n        f\"You are a helpful assistant. The user has a question. Answer the user question based only on the context: {context}. \\n\"\n        f\"The user question is {question}\"\n    )\n\n    results = llm_client.generate(\n        max_tokens=1024,\n        prompt=prompt,\n    )\n\n    res = \"\"\n    for result in results:\n        res += result\n\n    return res\n","question = \"What state is Cambridge in?\"\n\n\ndef ask_a_question(question):\n    embeddings = get_embeddings([question])\n    res = milvus_client.search(\n        collection_name=COLLECTION_NAME,\n        data=embeddings,  # search for the one (1) embedding returned as a list of lists\n        anns_field=\"embedding\",  # Search across embeddings\n        limit=5,  # get me the top 5 results\n        output_fields=[\"sentence\"],  # get the sentence/chunk and city\n    )\n\n    sentences = []\n    for hits in res:\n        for hit in hits:\n            print(hit)\n            sentences.append(hit[\"entity\"][\"sentence\"])\n    context = \". \".join(sentences)\n    return context\n\n\ncontext = ask_a_question(question=question)\nprint(context)\n","print(dorag(question=question, context=context))\n"],"headingContent":"Retrieval-Augmented Generation (RAG) with Milvus and BentoML","anchorList":[{"label":"MilvusとBentoMLによる検索支援型生成(RAG)","href":"Retrieval-Augmented-Generation-RAG-with-Milvus-and-BentoML","type":1,"isActive":false},{"label":"はじめに","href":"Introduction","type":2,"isActive":false},{"label":"始める前に","href":"Before-you-begin","type":2,"isActive":false},{"label":"BentoML/BentoCloud でエンベッディングを提供する","href":"Serving-Embeddings-with-BentoMLBentoCloud","type":2,"isActive":false},{"label":"データをベクターデータベースに挿入して検索する","href":"Inserting-Data-into-a-Vector-Database-for-Retrieval","type":2,"isActive":false},{"label":"Milvus Liteコレクションの作成","href":"Creating-Your-Milvus-Lite-Collection","type":2,"isActive":false},{"label":"RAG用にLLMをセットアップする","href":"Set-up-Your-LLM-for-RAG","type":2,"isActive":false},{"label":"LLMの使い方","href":"LLM-Instructions","type":2,"isActive":false},{"label":"RAGの例","href":"A-RAG-Example","type":2,"isActive":false}]}
    \ No newline at end of file
    diff --git a/localization/v2.4.x/site/ja/integrations/integrate_with_bentoml.md b/localization/v2.4.x/site/ja/integrations/integrate_with_bentoml.md
    index a37fcbbef..c839816f7 100644
    --- a/localization/v2.4.x/site/ja/integrations/integrate_with_bentoml.md
    +++ b/localization/v2.4.x/site/ja/integrations/integrate_with_bentoml.md
    @@ -3,7 +3,7 @@ id: integrate_with_bentoml.md
     summary: >-
       このガイドでは、MilvusベクトルデータベースとBentoCloud上でオープンソースのエンベッディングモデルと大規模言語モデルを使用して、RAG(Retrieval
       Augmented Generation)アプリケーションを構築する方法を紹介します。
    -title: MilvusとBentoMLによる検索支援型生成(RAG)
    +title: MilvusとBentoMLによる検索支援型生成(RAG)
     ---
     

    MilvusとBentoMLによる検索支援型生成(RAG)

    Open In Colab

    +

    Open In Colab +GitHub Repository

    はじめに

    このガイドでは、BentoCloud上のオープンソースの埋め込みモデルと大規模言語モデルとMilvusベクトルデータベースを使用して、RAG(Retrieval Augmented Generation)アプリケーションを構築する方法を説明します。 BentoCloudは、モデル推論用に調整されたフルマネージドインフラストラクチャを提供する、動きの速いAIチームのためのAI推論プラットフォームです。オープンソースのモデル・サービング・フレームワークである BentoML と連携し、高性能なモデル・サービスの簡単な作成とデプロイを容易にします。このデモでは、Pythonアプリケーションに組み込むことができるMilvusの軽量版であるMilvus Liteをベクターデータベースとして使用します。

    +

    このガイドでは、BentoCloud上のオープンソースのエンベッディングモデルと大規模言語モデルをMilvusベクトルデータベースと組み合わせて使用し、RAG(Retrieval Augmented Generation)アプリケーションを構築する方法を説明します。 BentoCloudは、動きの速いAIチームのためのAI推論プラットフォームであり、モデル推論用に調整されたフルマネージドインフラストラクチャを提供します。オープンソースのモデル・サービング・フレームワークである BentoML と連携し、高性能なモデル・サービスの簡単な作成とデプロイを容易にします。このデモでは、Pythonアプリケーションに組み込むことができるMilvusの軽量版であるMilvus Liteをベクターデータベースとして使用します。

    始める前に

    -

    次に、持っているファイルをそれぞれ処理します。

    +

    次に、持っている各ファイルを処理します。

    # please upload your data directory under this file's folder
     cities = os.listdir("city_data")
     # store chunked text for each of the cities in a list of dicts
    @@ -176,7 +177,7 @@ city_chunks = []
               d="M4 9h1v1H4c-1.5 0-3-1.69-3-3.5S2.55 3 4 3h4c1.45 0 3 1.69 3 3.5 0 1.41-.91 2.72-2 3.25V8.59c.58-.45 1-1.27 1-2.09C10 5.22 8.98 4 8 4H4c-.98 0-2 1.22-2 2.5S3 9 4 9zm9-3h-1v1h1c1 0 2 1.22 2 2.5S13.98 12 13 12H9c-.98 0-2-1.22-2-2.5 0-.83.42-1.64 1-2.09V6.25c-1.09.53-2 1.84-2 3.25C6 11.31 7.55 13 9 13h4c1.45 0 3-1.69 3-3.5S14.5 6 13 6z"
             >
           
    -    

    埋め込みとデータの準備ができたら、Milvus Liteにメタデータとともにベクトルを挿入し、後でベクトル検索を行う。ここではまず、Milvus Liteに接続してクライアントを起動します。MilvusClient モジュールをインポートして、Milvus Lite ベクトルデータベースに接続するMilvus Lite クライアントを初期化します。次元サイズは埋め込みモデルのサイズに由来します。例えば、Sentence Transformerモデルall-MiniLM-L6-v2 は384次元のベクトルを生成します。

    +

    埋め込みとデータの準備ができたら、Milvus Liteにメタデータとともにベクトルを挿入し、後でベクトル検索を行う。ここではまず、Milvus Liteに接続してクライアントを起動します。MilvusClient モジュールをインポートし、Milvus Lite ベクトルデータベースに接続するMilvus Lite クライアントを初期化します。次元サイズは埋め込みモデルのサイズに由来します。例えば、Sentence Transformerモデルall-MiniLM-L6-v2 は384次元のベクトルを生成します。

    from pymilvus import MilvusClient
     
     COLLECTION_NAME = "Bento_Milvus_RAG"  # random name for your collection
    @@ -213,7 +214,7 @@ connections.connect(uri=
           
    -    

    Milvus Liteを使用してコレクションを作成するには2つのステップがあります。このセクションでは、1つのモジュールが必要です:DataTypeはフィールドにどのようなデータタイプが入るかを示します。create_schema():コレクションのスキーマを作成し、add_field():コレクションのスキーマにフィールドを追加します。

    +

    Milvus Liteを使ってコレクションを作成するには、スキーマの定義とインデックスの定義の2つのステップがあります。このセクションでは、1つのモジュールが必要です:DataTypeはフィールドにどのようなデータタイプが入るかを示します。create_schema():コレクションのスキーマを作成し、add_field():コレクションのスキーマにフィールドを追加します。

    from pymilvus import MilvusClient, DataType, Collection
     
     # Create schema
    @@ -226,7 +227,7 @@ schema = MilvusClient.create_schema(
     schema.add_field(field_name="id", datatype=DataType.INT64, is_primary=True)
     schema.add_field(field_name="embedding", datatype=DataType.FLOAT_VECTOR, dim=DIMENSION)
     
    -

    スキーマを作成し、データ・フィールドをうまく定義できたので、インデックスを定義する必要があります。検索に関しては、"インデックス "はデータを検索するためにどのようにマッピングするかを定義します。このプロジェクトでは、デフォルトのAUTOINDEXを使用してインデックスを作成します。

    +

    スキーマを作成し、データ・フィールドをうまく定義できたので、次はインデックスを定義する必要があります。検索に関しては、"インデックス "はデータを検索するためにどのようにマッピングするかを定義します。このプロジェクトでは、デフォルトのAUTOINDEXを使用してインデックスを作成します。

    次に、先に指定した名前、スキーマ、インデックスでコレクションを作成します。最後に、前に処理したデータを挿入します。

    # prepare index parameters
     index_params = milvus_client.prepare_index_params()
    @@ -283,7 +284,7 @@ llm_client = bentoml.SyncHTTPClient(
           
    -    

    次に、プロンプト、コンテキスト、質問でLLM命令をセットアップします。以下は、LLMとして動作し、クライアントからの出力を文字列形式で返す関数です。

    +

    さて、プロンプト、コンテキスト、質問でLLM命令をセットアップします。以下は、LLMとして動作し、クライアントからの出力を文字列形式で返す関数です。

    def dorag(question: str, context: str):
     
         prompt = (
    @@ -346,4 +347,4 @@ context = ask_a_question(question=question)
     

    RAGの実装

    print(dorag(question=question, context=context))
     
    -

    ケンブリッジがどの州にあるかという質問例では、BentoMLから回答全体を表示することができます。しかし、時間をかけて解析すれば、より見栄えが良くなり、ケンブリッジがマサチューセッツ州にあることを教えてくれるはずです。

    +

    ケンブリッジがどの州にあるかという質問例では、BentoMLから回答全体を表示することができます。しかし、時間をかけて解析すれば、より見栄えがよくなり、ケンブリッジがマサチューセッツ州にあることを教えてくれるはずです。

    diff --git a/localization/v2.4.x/site/ja/integrations/integrate_with_camel.json b/localization/v2.4.x/site/ja/integrations/integrate_with_camel.json index f58566dfa..ff9b14683 100644 --- a/localization/v2.4.x/site/ja/integrations/integrate_with_camel.json +++ b/localization/v2.4.x/site/ja/integrations/integrate_with_camel.json @@ -1 +1 @@ -{"codeList":["$ pip install -U \"camel-ai[all]\" pymilvus\n","import os\nimport requests\n\nos.makedirs(\"local_data\", exist_ok=True)\n\nurl = \"https://arxiv.org/pdf/2303.17760.pdf\"\nresponse = requests.get(url)\nwith open(\"local_data/camel paper.pdf\", \"wb\") as file:\n file.write(response.content)\n","os.environ[\"OPENAI_API_KEY\"] = \"Your Key\"\n","from camel.embeddings import OpenAIEmbedding\n\nembedding_instance = OpenAIEmbedding()\n","from camel.storages import MilvusStorage\n\nstorage_instance = MilvusStorage(\n vector_dim=embedding_instance.get_output_dim(),\n url_and_api_key=(\n \"./milvus_demo.db\", # Your Milvus connection URI\n \"\", # Your Milvus token\n ),\n collection_name=\"camel_paper\",\n)\n","from camel.retrievers import VectorRetriever\n\nvector_retriever = VectorRetriever(\n embedding_model=embedding_instance, storage=storage_instance\n)\n","vector_retriever.process(content_input_path=\"local_data/camel paper.pdf\")\n","retrieved_info = vector_retriever.query(query=\"What is CAMEL?\", top_k=1)\nprint(retrieved_info)\n","retrieved_info_irrelevant = vector_retriever.query(\n query=\"Compared with dumpling and rice, which should I take for dinner?\", top_k=1\n)\n\nprint(retrieved_info_irrelevant)\n","from camel.retrievers import AutoRetriever\nfrom camel.types import StorageType\n\nauto_retriever = AutoRetriever(\n url_and_api_key=(\n \"./milvus_demo.db\", # Your Milvus connection URI\n \"\", # Your Milvus token\n ),\n storage_type=StorageType.MILVUS,\n embedding_model=embedding_instance,\n)\n\nretrieved_info = auto_retriever.run_vector_retriever(\n query=\"What is CAMEL-AI\",\n content_input_paths=[\n \"local_data/camel paper.pdf\", # example local path\n \"https://www.camel-ai.org/\", # example remote url\n ],\n top_k=1,\n return_detailed_info=True,\n)\n\nprint(retrieved_info)\n","from camel.agents import ChatAgent\nfrom camel.messages import BaseMessage\nfrom camel.types import RoleType\nfrom camel.retrievers import AutoRetriever\nfrom camel.types import StorageType\n\n\ndef single_agent(query: str) -> str:\n # Set agent role\n assistant_sys_msg = BaseMessage(\n role_name=\"Assistant\",\n role_type=RoleType.ASSISTANT,\n meta_dict=None,\n content=\"\"\"You are a helpful assistant to answer question,\n I will give you the Original Query and Retrieved Context,\n answer the Original Query based on the Retrieved Context,\n if you can't answer the question just say I don't know.\"\"\",\n )\n\n # Add auto retriever\n auto_retriever = AutoRetriever(\n url_and_api_key=(\n \"./milvus_demo.db\", # Your Milvus connection URI\n \"\", # Your Milvus token\n ),\n storage_type=StorageType.MILVUS,\n embedding_model=embedding_instance,\n )\n\n retrieved_info = auto_retriever.run_vector_retriever(\n query=query,\n content_input_paths=[\n \"local_data/camel paper.pdf\", # example local path\n \"https://www.camel-ai.org/\", # example remote url\n ],\n # vector_storage_local_path=\"storage_default_run\",\n top_k=1,\n return_detailed_info=True,\n )\n\n # Pass the retrieved infomation to agent\n user_msg = BaseMessage.make_user_message(role_name=\"User\", content=retrieved_info)\n agent = ChatAgent(assistant_sys_msg)\n\n # Get response\n assistant_response = agent.step(user_msg)\n return assistant_response.msg.content\n\n\nprint(single_agent(\"What is CAMEL-AI\"))\n","from typing import List\nfrom colorama import Fore\n\nfrom camel.agents.chat_agent import FunctionCallingRecord\nfrom camel.configs import ChatGPTConfig\nfrom camel.functions import (\n MATH_FUNCS,\n RETRIEVAL_FUNCS,\n)\nfrom camel.societies import RolePlaying\nfrom camel.types import ModelType\nfrom camel.utils import print_text_animated\n\n\ndef role_playing_with_rag(\n task_prompt, model_type=ModelType.GPT_4O, chat_turn_limit=10\n) -> None:\n task_prompt = task_prompt\n\n user_model_config = ChatGPTConfig(temperature=0.0)\n\n function_list = [\n *MATH_FUNCS,\n *RETRIEVAL_FUNCS,\n ]\n assistant_model_config = ChatGPTConfig(\n tools=function_list,\n temperature=0.0,\n )\n\n role_play_session = RolePlaying(\n assistant_role_name=\"Searcher\",\n user_role_name=\"Professor\",\n assistant_agent_kwargs=dict(\n model_type=model_type,\n model_config=assistant_model_config,\n tools=function_list,\n ),\n user_agent_kwargs=dict(\n model_type=model_type,\n model_config=user_model_config,\n ),\n task_prompt=task_prompt,\n with_task_specify=False,\n )\n\n print(\n Fore.GREEN\n + f\"AI Assistant sys message:\\n{role_play_session.assistant_sys_msg}\\n\"\n )\n print(Fore.BLUE + f\"AI User sys message:\\n{role_play_session.user_sys_msg}\\n\")\n\n print(Fore.YELLOW + f\"Original task prompt:\\n{task_prompt}\\n\")\n print(\n Fore.CYAN\n + f\"Specified task prompt:\\n{role_play_session.specified_task_prompt}\\n\"\n )\n print(Fore.RED + f\"Final task prompt:\\n{role_play_session.task_prompt}\\n\")\n\n n = 0\n input_msg = role_play_session.init_chat()\n while n < chat_turn_limit:\n n += 1\n assistant_response, user_response = role_play_session.step(input_msg)\n\n if assistant_response.terminated:\n print(\n Fore.GREEN\n + (\n \"AI Assistant terminated. Reason: \"\n f\"{assistant_response.info['termination_reasons']}.\"\n )\n )\n break\n if user_response.terminated:\n print(\n Fore.GREEN\n + (\n \"AI User terminated. \"\n f\"Reason: {user_response.info['termination_reasons']}.\"\n )\n )\n break\n\n # Print output from the user\n print_text_animated(Fore.BLUE + f\"AI User:\\n\\n{user_response.msg.content}\\n\")\n\n # Print output from the assistant, including any function\n # execution information\n print_text_animated(Fore.GREEN + \"AI Assistant:\")\n tool_calls: List[FunctionCallingRecord] = assistant_response.info[\"tool_calls\"]\n for func_record in tool_calls:\n print_text_animated(f\"{func_record}\")\n print_text_animated(f\"{assistant_response.msg.content}\\n\")\n\n if \"CAMEL_TASK_DONE\" in user_response.msg.content:\n break\n\n input_msg = assistant_response.msg\n","role_playing_with_rag(\n task_prompt=\"\"\"What is the main termination reasons for AI Society\n dataset, how many number of messages did camel decided to\n limit, what's the value plus 100? You should refer to the\n content in path camel/local_data/camel paper.pdf\"\"\"\n)\n"],"headingContent":"","anchorList":[{"label":"Milvusとキャメルによる検索機能付きジェネレーション(RAG)","href":"Retrieval-Augmented-Generation-RAG-with-Milvus-and-Camel","type":1,"isActive":false},{"label":"負荷データ","href":"Load-Data","type":2,"isActive":false},{"label":"1.カスタマイズされたRAG","href":"1-Customized-RAG","type":2,"isActive":false},{"label":"2.自動RAG","href":"2-Auto-RAG","type":2,"isActive":false},{"label":"3.自動RAG付きシングルエージェント","href":"3-Single-Agent-with-Auto-RAG","type":2,"isActive":false},{"label":"4.オートRAGによるロールプレイング","href":"4-Role-playing-with-Auto-RAG","type":2,"isActive":false}]} \ No newline at end of file +{"codeList":["$ pip install -U \"camel-ai[all]\" pymilvus\n","import os\nimport requests\n\nos.makedirs(\"local_data\", exist_ok=True)\n\nurl = \"https://arxiv.org/pdf/2303.17760.pdf\"\nresponse = requests.get(url)\nwith open(\"local_data/camel paper.pdf\", \"wb\") as file:\n file.write(response.content)\n","os.environ[\"OPENAI_API_KEY\"] = \"Your Key\"\n","from camel.embeddings import OpenAIEmbedding\n\nembedding_instance = OpenAIEmbedding()\n","from camel.storages import MilvusStorage\n\nstorage_instance = MilvusStorage(\n vector_dim=embedding_instance.get_output_dim(),\n url_and_api_key=(\n \"./milvus_demo.db\", # Your Milvus connection URI\n \"\", # Your Milvus token\n ),\n collection_name=\"camel_paper\",\n)\n","from camel.retrievers import VectorRetriever\n\nvector_retriever = VectorRetriever(\n embedding_model=embedding_instance, storage=storage_instance\n)\n","vector_retriever.process(content_input_path=\"local_data/camel paper.pdf\")\n","retrieved_info = vector_retriever.query(query=\"What is CAMEL?\", top_k=1)\nprint(retrieved_info)\n","retrieved_info_irrelevant = vector_retriever.query(\n query=\"Compared with dumpling and rice, which should I take for dinner?\", top_k=1\n)\n\nprint(retrieved_info_irrelevant)\n","from camel.retrievers import AutoRetriever\nfrom camel.types import StorageType\n\nauto_retriever = AutoRetriever(\n url_and_api_key=(\n \"./milvus_demo.db\", # Your Milvus connection URI\n \"\", # Your Milvus token\n ),\n storage_type=StorageType.MILVUS,\n embedding_model=embedding_instance,\n)\n\nretrieved_info = auto_retriever.run_vector_retriever(\n query=\"What is CAMEL-AI\",\n content_input_paths=[\n \"local_data/camel paper.pdf\", # example local path\n \"https://www.camel-ai.org/\", # example remote url\n ],\n top_k=1,\n return_detailed_info=True,\n)\n\nprint(retrieved_info)\n","from camel.agents import ChatAgent\nfrom camel.messages import BaseMessage\nfrom camel.types import RoleType\nfrom camel.retrievers import AutoRetriever\nfrom camel.types import StorageType\n\n\ndef single_agent(query: str) -> str:\n # Set agent role\n assistant_sys_msg = BaseMessage(\n role_name=\"Assistant\",\n role_type=RoleType.ASSISTANT,\n meta_dict=None,\n content=\"\"\"You are a helpful assistant to answer question,\n I will give you the Original Query and Retrieved Context,\n answer the Original Query based on the Retrieved Context,\n if you can't answer the question just say I don't know.\"\"\",\n )\n\n # Add auto retriever\n auto_retriever = AutoRetriever(\n url_and_api_key=(\n \"./milvus_demo.db\", # Your Milvus connection URI\n \"\", # Your Milvus token\n ),\n storage_type=StorageType.MILVUS,\n embedding_model=embedding_instance,\n )\n\n retrieved_info = auto_retriever.run_vector_retriever(\n query=query,\n content_input_paths=[\n \"local_data/camel paper.pdf\", # example local path\n \"https://www.camel-ai.org/\", # example remote url\n ],\n # vector_storage_local_path=\"storage_default_run\",\n top_k=1,\n return_detailed_info=True,\n )\n\n # Pass the retrieved infomation to agent\n user_msg = BaseMessage.make_user_message(role_name=\"User\", content=retrieved_info)\n agent = ChatAgent(assistant_sys_msg)\n\n # Get response\n assistant_response = agent.step(user_msg)\n return assistant_response.msg.content\n\n\nprint(single_agent(\"What is CAMEL-AI\"))\n","from typing import List\nfrom colorama import Fore\n\nfrom camel.agents.chat_agent import FunctionCallingRecord\nfrom camel.configs import ChatGPTConfig\nfrom camel.functions import (\n MATH_FUNCS,\n RETRIEVAL_FUNCS,\n)\nfrom camel.societies import RolePlaying\nfrom camel.types import ModelType\nfrom camel.utils import print_text_animated\n\n\ndef role_playing_with_rag(\n task_prompt, model_type=ModelType.GPT_4O, chat_turn_limit=10\n) -> None:\n task_prompt = task_prompt\n\n user_model_config = ChatGPTConfig(temperature=0.0)\n\n function_list = [\n *MATH_FUNCS,\n *RETRIEVAL_FUNCS,\n ]\n assistant_model_config = ChatGPTConfig(\n tools=function_list,\n temperature=0.0,\n )\n\n role_play_session = RolePlaying(\n assistant_role_name=\"Searcher\",\n user_role_name=\"Professor\",\n assistant_agent_kwargs=dict(\n model_type=model_type,\n model_config=assistant_model_config,\n tools=function_list,\n ),\n user_agent_kwargs=dict(\n model_type=model_type,\n model_config=user_model_config,\n ),\n task_prompt=task_prompt,\n with_task_specify=False,\n )\n\n print(\n Fore.GREEN\n + f\"AI Assistant sys message:\\n{role_play_session.assistant_sys_msg}\\n\"\n )\n print(Fore.BLUE + f\"AI User sys message:\\n{role_play_session.user_sys_msg}\\n\")\n\n print(Fore.YELLOW + f\"Original task prompt:\\n{task_prompt}\\n\")\n print(\n Fore.CYAN\n + f\"Specified task prompt:\\n{role_play_session.specified_task_prompt}\\n\"\n )\n print(Fore.RED + f\"Final task prompt:\\n{role_play_session.task_prompt}\\n\")\n\n n = 0\n input_msg = role_play_session.init_chat()\n while n < chat_turn_limit:\n n += 1\n assistant_response, user_response = role_play_session.step(input_msg)\n\n if assistant_response.terminated:\n print(\n Fore.GREEN\n + (\n \"AI Assistant terminated. Reason: \"\n f\"{assistant_response.info['termination_reasons']}.\"\n )\n )\n break\n if user_response.terminated:\n print(\n Fore.GREEN\n + (\n \"AI User terminated. \"\n f\"Reason: {user_response.info['termination_reasons']}.\"\n )\n )\n break\n\n # Print output from the user\n print_text_animated(Fore.BLUE + f\"AI User:\\n\\n{user_response.msg.content}\\n\")\n\n # Print output from the assistant, including any function\n # execution information\n print_text_animated(Fore.GREEN + \"AI Assistant:\")\n tool_calls: List[FunctionCallingRecord] = assistant_response.info[\"tool_calls\"]\n for func_record in tool_calls:\n print_text_animated(f\"{func_record}\")\n print_text_animated(f\"{assistant_response.msg.content}\\n\")\n\n if \"CAMEL_TASK_DONE\" in user_response.msg.content:\n break\n\n input_msg = assistant_response.msg\n","role_playing_with_rag(\n task_prompt=\"\"\"What is the main termination reasons for AI Society\n dataset, how many number of messages did camel decided to\n limit, what's the value plus 100? You should refer to the\n content in path camel/local_data/camel paper.pdf\"\"\"\n)\n"],"headingContent":"Retrieval-Augmented Generation (RAG) with Milvus and Camel","anchorList":[{"label":"MilvusとCamelを使用した検索拡張ジェネレーション(RAG)","href":"Retrieval-Augmented-Generation-RAG-with-Milvus-and-Camel","type":1,"isActive":false},{"label":"データのロード","href":"Load-Data","type":2,"isActive":false},{"label":"1.カスタマイズされたRAG","href":"1-Customized-RAG","type":2,"isActive":false},{"label":"2.自動RAG","href":"2-Auto-RAG","type":2,"isActive":false},{"label":"3.自動RAGによるシングルエージェント","href":"3-Single-Agent-with-Auto-RAG","type":2,"isActive":false},{"label":"4.オート RAG によるロールプレイング","href":"4-Role-playing-with-Auto-RAG","type":2,"isActive":false}]} \ No newline at end of file diff --git a/localization/v2.4.x/site/ja/integrations/integrate_with_camel.md b/localization/v2.4.x/site/ja/integrations/integrate_with_camel.md index 31c3f7b68..6fdb66be9 100644 --- a/localization/v2.4.x/site/ja/integrations/integrate_with_camel.md +++ b/localization/v2.4.x/site/ja/integrations/integrate_with_camel.md @@ -3,7 +3,7 @@ id: integrate_with_camel.md summary: >- このガイドでは、MilvusベクトルデータベースとBentoCloud上でオープンソースのエンベッディングモデルと大規模言語モデルを使用して、RAG(Retrieval Augmented Generation)アプリケーションを構築する方法を紹介します。 -title: MilvusとBentoMLによる検索支援型生成(RAG) +title: MilvusとBentoMLによる検索補強型生成(RAG) ---

    MilvusとCamelを使用した検索拡張ジェネレーション(RAG)

    Open In Colab

    +

    Open In Colab +GitHub Repository

    このガイドでは、CAMELとMilvusを使用したRAG(Retrieval-Augmented Generation)システムの構築方法を示します。

    RAGシステムは検索システムと生成モデルを組み合わせ、与えられたプロンプトに基づいて新しいテキストを生成する。このシステムは、まずMilvusを用いてコーパスから関連文書を検索し、次に生成モデルを用いて検索された文書に基づいて新しいテキストを生成する。

    CAMELはマルチエージェントフレームワークである。Milvusは世界で最も先進的なオープンソースのベクトルデータベースであり、埋め込み類似検索やAIアプリケーションのために構築されている。

    このノートブックでは、CAMEL Retrieve Moduleの使い方を、カスタマイズ方法と自動方法の両方で紹介する。また、AutoRetrieverChatAgent と組み合わせ、さらにFunction Calling を使ってAutoRetrieverRolePlaying と組み合わせる方法も紹介します。

    -

    4つの主要な部分が含まれる:

    +

    4つの主要な部分を含む:

    • カスタマイズRAG
    • 自動RAG
    • @@ -102,7 +103,7 @@ storage_instance = MilvusStorage(

      url_and_api_key

        -
      • ローカルファイル、例えば./milvus.dbMilvusの接続URIとして使用する方法が最も便利です。
      • +
      • ローカルファイル、例えば./milvus.dbMilvus接続URIとして使用するのが最も便利です。
      • データ規模が大きい場合は、dockerやkubernetes上に、よりパフォーマンスの高いMilvusサーバを構築することができます。この場合、URLはサーバのURI、例えばhttp://localhost:19530 を使用してください。
      • MilvusのフルマネージドクラウドサービスであるZilliz Cloudを利用する場合は、Zilliz CloudのPublic EndpointとApi keyに対応する接続uriとtokenを調整してください。
      diff --git a/localization/v2.4.x/site/ja/integrations/integrate_with_dspy.json b/localization/v2.4.x/site/ja/integrations/integrate_with_dspy.json index 80fe2d0c7..6add84cad 100644 --- a/localization/v2.4.x/site/ja/integrations/integrate_with_dspy.json +++ b/localization/v2.4.x/site/ja/integrations/integrate_with_dspy.json @@ -1,43 +1 @@ -{ - "codeList": [ - "$ pip install \"dspy-ai[milvus]\"\n$ pip install -U pymilvus\n", - "from dspy.datasets import HotPotQA\n\n# Load the dataset.\ndataset = HotPotQA(\n train_seed=1, train_size=20, eval_seed=2023, dev_size=50, test_size=0\n)\n\n# Tell DSPy that the 'question' field is the input. Any other fields are labels and/or metadata.\ntrainset = [x.with_inputs(\"question\") for x in dataset.train]\ndevset = [x.with_inputs(\"question\") for x in dataset.dev]\n", - "import requests\nimport os\n\nos.environ[\"OPENAI_API_KEY\"] = \"\"\nMILVUS_URI = \"example.db\"\nMILVUS_TOKEN = \"\"\n\nfrom pymilvus import MilvusClient, DataType, Collection\nfrom dspy.retrieve.milvus_rm import openai_embedding_function\n\nclient = MilvusClient(uri=MILVUS_URI, token=MILVUS_TOKEN)\n\nif \"dspy_example\" not in client.list_collections():\n client.create_collection(\n collection_name=\"dspy_example\",\n overwrite=True,\n dimension=1536,\n primary_field_name=\"id\",\n vector_field_name=\"embedding\",\n id_type=\"int\",\n metric_type=\"IP\",\n max_length=65535,\n enable_dynamic=True,\n )\ntext = requests.get(\n \"https://raw.githubusercontent.com/wxywb/dspy_dataset_sample/master/sample_data.txt\"\n).text\n\nfor idx, passage in enumerate(text.split(\"\\n\")):\n if len(passage) == 0:\n continue\n client.insert(\n collection_name=\"dspy_example\",\n data=[\n {\n \"id\": idx,\n \"embedding\": openai_embedding_function(passage)[0],\n \"text\": passage,\n }\n ],\n )\n", - "from dspy.retrieve.milvus_rm import MilvusRM\nimport dspy\n\nretriever_model = MilvusRM(\n collection_name=\"dspy_example\",\n uri=MILVUS_URI,\n token=MILVUS_TOKEN, # ignore this if no token is required for Milvus connection\n embedding_function=openai_embedding_function,\n)\nturbo = dspy.OpenAI(model=\"gpt-3.5-turbo\")\ndspy.settings.configure(lm=turbo)\n", - "class GenerateAnswer(dspy.Signature):\n \"\"\"Answer questions with short factoid answers.\"\"\"\n\n context = dspy.InputField(desc=\"may contain relevant facts\")\n question = dspy.InputField()\n answer = dspy.OutputField(desc=\"often between 1 and 5 words\")\n", - "class RAG(dspy.Module):\n def __init__(self, rm):\n super().__init__()\n self.retrieve = rm\n\n # This signature indicates the task imposed on the COT module.\n self.generate_answer = dspy.ChainOfThought(GenerateAnswer)\n\n def forward(self, question):\n # Use milvus_rm to retrieve context for the question.\n context = self.retrieve(question).passages\n # COT module takes \"context, query\" and output \"answer\".\n prediction = self.generate_answer(context=context, question=question)\n return dspy.Prediction(\n context=[item.long_text for item in context], answer=prediction.answer\n )\n", - "rag = RAG(retriever_model)\nprint(rag(\"who write At My Window\").answer)\n", - "from dspy.evaluate.evaluate import Evaluate\nfrom dspy.datasets import HotPotQA\n\nevaluate_on_hotpotqa = Evaluate(\n devset=devset, num_threads=1, display_progress=False, display_table=5\n)\n\nmetric = dspy.evaluate.answer_exact_match\nscore = evaluate_on_hotpotqa(rag, metric=metric)\nprint(\"rag:\", score)\n", - "from dspy.teleprompt import BootstrapFewShot\n\n# Validation logic: check that the predicted answer is correct.# Also check that the retrieved context does contain that answer.\n\n\ndef validate_context_and_answer(example, pred, trace=None):\n answer_EM = dspy.evaluate.answer_exact_match(example, pred)\n answer_PM = dspy.evaluate.answer_passage_match(example, pred)\n return answer_EM and answer_PM\n\n\n# Set up a basic teleprompter, which will compile our RAG program.\nteleprompter = BootstrapFewShot(metric=validate_context_and_answer)\n\n# Compile!\ncompiled_rag = teleprompter.compile(rag, trainset=trainset)\n\n# Now compiled_rag is optimized and ready to answer your new question!\n# Now, let’s evaluate the compiled RAG program.\nscore = evaluate_on_hotpotqa(compiled_rag, metric=metric)\nprint(score)\nprint(\"compile_rag:\", score)\n" - ], - "headingContent": "", - "anchorList": [ - { - "label": "MilvusとDSPyの統合", - "href": "Integrate-Milvus-with-DSPy", - "type": 1, - "isActive": false - }, - { - "label": "DSPyとは", - "href": "What-is-DSPy", - "type": 2, - "isActive": false - }, - { - "label": "DSPyを使うメリット", - "href": "Benefits-of-using-DSPy", - "type": 2, - "isActive": false - }, - { "label": "モジュール", "href": "Modules", "type": 2, "isActive": false }, - { - "label": "なぜDSPyでMilvusなのか", - "href": "Why-Milvus-in-DSPy", - "type": 2, - "isActive": false - }, - { "label": "例", "href": "Examples", "type": 2, "isActive": false }, - { "label": "概要", "href": "Summary", "type": 2, "isActive": false } - ] -} +{"codeList":["$ pip install \"dspy-ai[milvus]\"\n$ pip install -U pymilvus\n","from dspy.datasets import HotPotQA\n\n# Load the dataset.\ndataset = HotPotQA(\n train_seed=1, train_size=20, eval_seed=2023, dev_size=50, test_size=0\n)\n\n# Tell DSPy that the 'question' field is the input. Any other fields are labels and/or metadata.\ntrainset = [x.with_inputs(\"question\") for x in dataset.train]\ndevset = [x.with_inputs(\"question\") for x in dataset.dev]\n","import requests\nimport os\n\nos.environ[\"OPENAI_API_KEY\"] = \"\"\nMILVUS_URI = \"example.db\"\nMILVUS_TOKEN = \"\"\n\nfrom pymilvus import MilvusClient, DataType, Collection\nfrom dspy.retrieve.milvus_rm import openai_embedding_function\n\nclient = MilvusClient(uri=MILVUS_URI, token=MILVUS_TOKEN)\n\nif \"dspy_example\" not in client.list_collections():\n client.create_collection(\n collection_name=\"dspy_example\",\n overwrite=True,\n dimension=1536,\n primary_field_name=\"id\",\n vector_field_name=\"embedding\",\n id_type=\"int\",\n metric_type=\"IP\",\n max_length=65535,\n enable_dynamic=True,\n )\ntext = requests.get(\n \"https://raw.githubusercontent.com/wxywb/dspy_dataset_sample/master/sample_data.txt\"\n).text\n\nfor idx, passage in enumerate(text.split(\"\\n\")):\n if len(passage) == 0:\n continue\n client.insert(\n collection_name=\"dspy_example\",\n data=[\n {\n \"id\": idx,\n \"embedding\": openai_embedding_function(passage)[0],\n \"text\": passage,\n }\n ],\n )\n","from dspy.retrieve.milvus_rm import MilvusRM\nimport dspy\n\nretriever_model = MilvusRM(\n collection_name=\"dspy_example\",\n uri=MILVUS_URI,\n token=MILVUS_TOKEN, # ignore this if no token is required for Milvus connection\n embedding_function=openai_embedding_function,\n)\nturbo = dspy.OpenAI(model=\"gpt-3.5-turbo\")\ndspy.settings.configure(lm=turbo)\n","class GenerateAnswer(dspy.Signature):\n \"\"\"Answer questions with short factoid answers.\"\"\"\n\n context = dspy.InputField(desc=\"may contain relevant facts\")\n question = dspy.InputField()\n answer = dspy.OutputField(desc=\"often between 1 and 5 words\")\n","class RAG(dspy.Module):\n def __init__(self, rm):\n super().__init__()\n self.retrieve = rm\n\n # This signature indicates the task imposed on the COT module.\n self.generate_answer = dspy.ChainOfThought(GenerateAnswer)\n\n def forward(self, question):\n # Use milvus_rm to retrieve context for the question.\n context = self.retrieve(question).passages\n # COT module takes \"context, query\" and output \"answer\".\n prediction = self.generate_answer(context=context, question=question)\n return dspy.Prediction(\n context=[item.long_text for item in context], answer=prediction.answer\n )\n","rag = RAG(retriever_model)\nprint(rag(\"who write At My Window\").answer)\n","from dspy.evaluate.evaluate import Evaluate\nfrom dspy.datasets import HotPotQA\n\nevaluate_on_hotpotqa = Evaluate(\n devset=devset, num_threads=1, display_progress=False, display_table=5\n)\n\nmetric = dspy.evaluate.answer_exact_match\nscore = evaluate_on_hotpotqa(rag, metric=metric)\nprint(\"rag:\", score)\n","from dspy.teleprompt import BootstrapFewShot\n\n# Validation logic: check that the predicted answer is correct.# Also check that the retrieved context does contain that answer.\n\n\ndef validate_context_and_answer(example, pred, trace=None):\n answer_EM = dspy.evaluate.answer_exact_match(example, pred)\n answer_PM = dspy.evaluate.answer_passage_match(example, pred)\n return answer_EM and answer_PM\n\n\n# Set up a basic teleprompter, which will compile our RAG program.\nteleprompter = BootstrapFewShot(metric=validate_context_and_answer)\n\n# Compile!\ncompiled_rag = teleprompter.compile(rag, trainset=trainset)\n\n# Now compiled_rag is optimized and ready to answer your new question!\n# Now, let’s evaluate the compiled RAG program.\nscore = evaluate_on_hotpotqa(compiled_rag, metric=metric)\nprint(score)\nprint(\"compile_rag:\", score)\n"],"headingContent":"Integrate Milvus with DSPy","anchorList":[{"label":"MilvusとDSPyの連携","href":"Integrate-Milvus-with-DSPy","type":1,"isActive":false},{"label":"DSPyとは","href":"What-is-DSPy","type":2,"isActive":false},{"label":"DSPyを使用するメリット","href":"Benefits-of-using-DSPy","type":2,"isActive":false},{"label":"モジュール","href":"Modules","type":2,"isActive":false},{"label":"DSPyでmilvusを使う理由","href":"Why-Milvus-in-DSPy","type":2,"isActive":false},{"label":"例","href":"Examples","type":2,"isActive":false},{"label":"まとめ","href":"Summary","type":2,"isActive":false}]} \ No newline at end of file diff --git a/localization/v2.4.x/site/ja/integrations/integrate_with_dspy.md b/localization/v2.4.x/site/ja/integrations/integrate_with_dspy.md index c842ce6af..d04648cb1 100644 --- a/localization/v2.4.x/site/ja/integrations/integrate_with_dspy.md +++ b/localization/v2.4.x/site/ja/integrations/integrate_with_dspy.md @@ -1,7 +1,7 @@ --- id: integrate_with_dspy.md -summary: このガイドでは、DSPyのレトリーバーモジュールの1つであるMilvusRMを使用して、RAGプログラムを最適化する方法を説明します。 -title: MilvusとDSPyの統合 +summary: このガイドでは、DSPyのレトリーバーモジュールの1つであるMilvusRMを使用してRAGプログラムを最適化する方法を説明します。 +title: MilvusとDSPyの連携 ---

      MilvusとDSPyの連携

      Open In Colab

      +

      Open In Colab +GitHub Repository

      DSPyとは

        -
      • プログラミングアプローチ:DSPyは、LLMにプロンプトを与えるだけでなく、パイプラインをテキスト変換グラフとして抽象化することで、LMパイプライン開発のための体系的なプログラミングアプローチを提供します。DSPyの宣言型モジュールは、従来のプロンプトテンプレートによる試行錯誤的な手法に代わって、構造化された設計と最適化を可能にします。
      • -
      • パフォーマンスの向上:DSPyは、既存の手法と比較して大幅な性能向上を示しています。ケーススタディを通じて、標準的なプロンプトや専門家が作成したデモを凌駕し、より小さなLMモデルにコンパイルした場合でも、その汎用性と有効性を示しています。
      • +
      • プログラミングアプローチ:DSPyは、LLMのプロンプトを表示するだけでなく、パイプラインをテキスト変換グラフとして抽象化することで、LMパイプライン開発のための体系的なプログラミングアプローチを提供します。DSPyの宣言型モジュールは、従来のプロンプトテンプレートによる試行錯誤的な手法に代わって、構造化された設計と最適化を可能にします。
      • +
      • パフォーマンスの向上:DSPyは、既存の手法と比較して大幅な性能向上を示しています。ケーススタディを通じて、標準的なプロンプトや専門家が作成したデモンストレーションを凌駕し、より小さなLMモデルにコンパイルした場合でも、その汎用性と有効性を示しています。
      • モジュール化された抽象化DSPyは、分解、微調整、モデル選択など、LMパイプライン開発の複雑な側面を効果的に抽象化します。DSPyを使用すると、簡潔なプログラムをGPT-4、Llama2-13b、T5-baseなどのさまざまなモデルの命令にシームレスに変換できるため、開発が効率化され、性能が向上します。

      モジュール

      それでは、RAGアプリケーションを最適化するためにDSPyでMilvusを活用する方法を簡単な例で説明します。

      +

      それでは、RAGアプリケーションを最適化するためにDSPyでMilvusを活用する方法を、簡単な例で説明します。

      前提条件

      RAGアプリをビルドする前に、DSPyとPyMilvusをインストールしてください。

      $ pip install "dspy-ai[milvus]"
       $ pip install -U pymilvus
       
      Google Colabを使用している場合、インストールしたばかりの依存関係を有効にするために、**ランタイムを再起動**する必要があるかもしれません(画面上部の "Runtime "メニューをクリックし、ドロップダウンメニューから "Restart session "を選択してください)。
      -

      データセットのロード

      この例では複雑な質問と答えのペアのコレクションであるHotPotQAをトレーニングデータセットとして使用します。HotPotQAクラスを通して読み込むことができます。

      +

      データセットのロード

      この例では、複雑な質問と回答のペアのコレクションであるHotPotQAをトレーニングデータセットとして使用します。HotPotQAクラスを通して読み込むことができます。

      from dspy.datasets import HotPotQA
       
       # Load the dataset.
      @@ -126,7 +127,7 @@ dataset = HotPotQA(
       trainset = [x.with_inputs("question") for x in dataset.train]
       devset = [x.with_inputs("question") for x in dataset.dev]
       
      -

      Milvusベクトルデータベースにデータを取り込む

      ベクトル検索のためにコンテキスト情報をMilvusコレクションに取り込む。このコレクションにはembedding フィールドとtext フィールドが必要です。この場合、デフォルトのクエリ埋め込み関数としてOpenAIのtext-embedding-3-small モデルを使用する。

      +

      Milvusベクトルデータベースにデータを取り込む

      ベクトル検索のためにコンテキスト情報をmilvusコレクションに取り込む。このコレクションにはembedding フィールドとtext フィールドが必要です。この場合、デフォルトのクエリ埋め込み関数としてOpenAIのtext-embedding-3-small モデルを使用する。

      import requests
       import os
       
      @@ -272,4 +273,4 @@ score = evaluate_on_hotpotqa(compiled_rag, metric=metric)
                 d="M4 9h1v1H4c-1.5 0-3-1.69-3-3.5S2.55 3 4 3h4c1.45 0 3 1.69 3 3.5 0 1.41-.91 2.72-2 3.25V8.59c.58-.45 1-1.27 1-2.09C10 5.22 8.98 4 8 4H4c-.98 0-2 1.22-2 2.5S3 9 4 9zm9-3h-1v1h1c1 0 2 1.22 2 2.5S13.98 12 13 12H9c-.98 0-2-1.22-2-2.5 0-.83.42-1.64 1-2.09V6.25c-1.09.53-2 1.84-2 3.25C6 11.31 7.55 13 9 13h4c1.45 0 3-1.69 3-3.5S14.5 6 13 6z"
               >
             
      -    

      DSPyは、モデル・プロンプトとウェイトのアルゴリズム的かつ自動的な最適化を容易にするプログラマブルなインターフェースを通じて、言語モデル・インタラクションの飛躍的な進歩を示しています。RAGの実装にDSPyを活用することで、様々な言語モデルやデータセットへの適応が容易になり、面倒な手作業の必要性が大幅に減少します。

      +

      DSPyは、プログラマブルなインターフェイスにより、言語モデルのインタラクションを飛躍的に向上させ、モデルのプロンプトとウェイトのアルゴリズムによる自動最適化を容易にします。RAGの実装にDSPyを活用することで、様々な言語モデルやデータセットへの適応が容易になり、面倒な手作業の必要性が大幅に減少します。

      diff --git a/localization/v2.4.x/site/ja/integrations/integrate_with_haystack.json b/localization/v2.4.x/site/ja/integrations/integrate_with_haystack.json index 570d75fb2..2b9900d4d 100644 --- a/localization/v2.4.x/site/ja/integrations/integrate_with_haystack.json +++ b/localization/v2.4.x/site/ja/integrations/integrate_with_haystack.json @@ -1 +1 @@ -{"codeList":["! pip install --upgrade --quiet pymilvus milvus-haystack markdown-it-py mdit_plain\n","import os\n\nos.environ[\"OPENAI_API_KEY\"] = \"sk-***********\"\n","import os\nimport urllib.request\n\nurl = \"https://www.gutenberg.org/cache/epub/7785/pg7785.txt\"\nfile_path = \"./davinci.txt\"\n\nif not os.path.exists(file_path):\n urllib.request.urlretrieve(url, file_path)\n","from haystack import Pipeline\nfrom haystack.components.converters import MarkdownToDocument\nfrom haystack.components.embedders import OpenAIDocumentEmbedder, OpenAITextEmbedder\nfrom haystack.components.preprocessors import DocumentSplitter\nfrom haystack.components.writers import DocumentWriter\nfrom haystack.utils import Secret\n\nfrom milvus_haystack import MilvusDocumentStore\nfrom milvus_haystack.milvus_embedding_retriever import MilvusEmbeddingRetriever\n\n\ndocument_store = MilvusDocumentStore(\n connection_args={\"uri\": \"./milvus.db\"},\n # connection_args={\"uri\": \"http://localhost:19530\"},\n # connection_args={\"uri\": YOUR_ZILLIZ_CLOUD_URI, \"token\": Secret.from_env_var(\"ZILLIZ_CLOUD_API_KEY\")},\n drop_old=True,\n)\n","indexing_pipeline = Pipeline()\nindexing_pipeline.add_component(\"converter\", MarkdownToDocument())\nindexing_pipeline.add_component(\n \"splitter\", DocumentSplitter(split_by=\"sentence\", split_length=2)\n)\nindexing_pipeline.add_component(\"embedder\", OpenAIDocumentEmbedder())\nindexing_pipeline.add_component(\"writer\", DocumentWriter(document_store))\nindexing_pipeline.connect(\"converter\", \"splitter\")\nindexing_pipeline.connect(\"splitter\", \"embedder\")\nindexing_pipeline.connect(\"embedder\", \"writer\")\nindexing_pipeline.run({\"converter\": {\"sources\": [file_path]}})\n\nprint(\"Number of documents:\", document_store.count_documents())\n","question = 'Where is the painting \"Warrior\" currently stored?'\n\nretrieval_pipeline = Pipeline()\nretrieval_pipeline.add_component(\"embedder\", OpenAITextEmbedder())\nretrieval_pipeline.add_component(\n \"retriever\", MilvusEmbeddingRetriever(document_store=document_store, top_k=3)\n)\nretrieval_pipeline.connect(\"embedder\", \"retriever\")\n\nretrieval_results = retrieval_pipeline.run({\"embedder\": {\"text\": question}})\n\nfor doc in retrieval_results[\"retriever\"][\"documents\"]:\n print(doc.content)\n print(\"-\" * 10)\n","from haystack.utils import Secret\nfrom haystack.components.builders import PromptBuilder\nfrom haystack.components.generators import OpenAIGenerator\n\nprompt_template = \"\"\"Answer the following query based on the provided context. If the context does\n not include an answer, reply with 'I don't know'.\\n\n Query: {{query}}\n Documents:\n {% for doc in documents %}\n {{ doc.content }}\n {% endfor %}\n Answer:\n \"\"\"\n\nrag_pipeline = Pipeline()\nrag_pipeline.add_component(\"text_embedder\", OpenAITextEmbedder())\nrag_pipeline.add_component(\n \"retriever\", MilvusEmbeddingRetriever(document_store=document_store, top_k=3)\n)\nrag_pipeline.add_component(\"prompt_builder\", PromptBuilder(template=prompt_template))\nrag_pipeline.add_component(\n \"generator\",\n OpenAIGenerator(\n api_key=Secret.from_token(os.getenv(\"OPENAI_API_KEY\")),\n generation_kwargs={\"temperature\": 0},\n ),\n)\nrag_pipeline.connect(\"text_embedder.embedding\", \"retriever.query_embedding\")\nrag_pipeline.connect(\"retriever.documents\", \"prompt_builder.documents\")\nrag_pipeline.connect(\"prompt_builder\", \"generator\")\n\nresults = rag_pipeline.run(\n {\n \"text_embedder\": {\"text\": question},\n \"prompt_builder\": {\"query\": question},\n }\n)\nprint(\"RAG answer:\", results[\"generator\"][\"replies\"][0])\n"],"headingContent":"","anchorList":[{"label":"MilvusとHaystackによる検索補強型ジェネレーション(RAG)","href":"Retrieval-Augmented-Generation-RAG-with-Milvus-and-Haystack","type":1,"isActive":false},{"label":"前提条件","href":"Prerequisites","type":2,"isActive":false},{"label":"データを準備する","href":"Prepare-the-data","type":2,"isActive":false},{"label":"インデックス作成パイプラインの作成","href":"Create-the-indexing-Pipeline","type":2,"isActive":false},{"label":"検索パイプラインの作成","href":"Create-the-retrieval-pipeline","type":2,"isActive":false},{"label":"RAGパイプラインの作成","href":"Create-the-RAG-pipeline","type":2,"isActive":false}]} \ No newline at end of file +{"codeList":["! pip install --upgrade --quiet pymilvus milvus-haystack markdown-it-py mdit_plain\n","import os\n\nos.environ[\"OPENAI_API_KEY\"] = \"sk-***********\"\n","import os\nimport urllib.request\n\nurl = \"https://www.gutenberg.org/cache/epub/7785/pg7785.txt\"\nfile_path = \"./davinci.txt\"\n\nif not os.path.exists(file_path):\n urllib.request.urlretrieve(url, file_path)\n","from haystack import Pipeline\nfrom haystack.components.converters import MarkdownToDocument\nfrom haystack.components.embedders import OpenAIDocumentEmbedder, OpenAITextEmbedder\nfrom haystack.components.preprocessors import DocumentSplitter\nfrom haystack.components.writers import DocumentWriter\nfrom haystack.utils import Secret\n\nfrom milvus_haystack import MilvusDocumentStore\nfrom milvus_haystack.milvus_embedding_retriever import MilvusEmbeddingRetriever\n\n\ndocument_store = MilvusDocumentStore(\n connection_args={\"uri\": \"./milvus.db\"},\n # connection_args={\"uri\": \"http://localhost:19530\"},\n # connection_args={\"uri\": YOUR_ZILLIZ_CLOUD_URI, \"token\": Secret.from_env_var(\"ZILLIZ_CLOUD_API_KEY\")},\n drop_old=True,\n)\n","indexing_pipeline = Pipeline()\nindexing_pipeline.add_component(\"converter\", MarkdownToDocument())\nindexing_pipeline.add_component(\n \"splitter\", DocumentSplitter(split_by=\"sentence\", split_length=2)\n)\nindexing_pipeline.add_component(\"embedder\", OpenAIDocumentEmbedder())\nindexing_pipeline.add_component(\"writer\", DocumentWriter(document_store))\nindexing_pipeline.connect(\"converter\", \"splitter\")\nindexing_pipeline.connect(\"splitter\", \"embedder\")\nindexing_pipeline.connect(\"embedder\", \"writer\")\nindexing_pipeline.run({\"converter\": {\"sources\": [file_path]}})\n\nprint(\"Number of documents:\", document_store.count_documents())\n","question = 'Where is the painting \"Warrior\" currently stored?'\n\nretrieval_pipeline = Pipeline()\nretrieval_pipeline.add_component(\"embedder\", OpenAITextEmbedder())\nretrieval_pipeline.add_component(\n \"retriever\", MilvusEmbeddingRetriever(document_store=document_store, top_k=3)\n)\nretrieval_pipeline.connect(\"embedder\", \"retriever\")\n\nretrieval_results = retrieval_pipeline.run({\"embedder\": {\"text\": question}})\n\nfor doc in retrieval_results[\"retriever\"][\"documents\"]:\n print(doc.content)\n print(\"-\" * 10)\n","from haystack.utils import Secret\nfrom haystack.components.builders import PromptBuilder\nfrom haystack.components.generators import OpenAIGenerator\n\nprompt_template = \"\"\"Answer the following query based on the provided context. If the context does\n not include an answer, reply with 'I don't know'.\\n\n Query: {{query}}\n Documents:\n {% for doc in documents %}\n {{ doc.content }}\n {% endfor %}\n Answer:\n \"\"\"\n\nrag_pipeline = Pipeline()\nrag_pipeline.add_component(\"text_embedder\", OpenAITextEmbedder())\nrag_pipeline.add_component(\n \"retriever\", MilvusEmbeddingRetriever(document_store=document_store, top_k=3)\n)\nrag_pipeline.add_component(\"prompt_builder\", PromptBuilder(template=prompt_template))\nrag_pipeline.add_component(\n \"generator\",\n OpenAIGenerator(\n api_key=Secret.from_token(os.getenv(\"OPENAI_API_KEY\")),\n generation_kwargs={\"temperature\": 0},\n ),\n)\nrag_pipeline.connect(\"text_embedder.embedding\", \"retriever.query_embedding\")\nrag_pipeline.connect(\"retriever.documents\", \"prompt_builder.documents\")\nrag_pipeline.connect(\"prompt_builder\", \"generator\")\n\nresults = rag_pipeline.run(\n {\n \"text_embedder\": {\"text\": question},\n \"prompt_builder\": {\"query\": question},\n }\n)\nprint(\"RAG answer:\", results[\"generator\"][\"replies\"][0])\n"],"headingContent":"Retrieval-Augmented Generation (RAG) with Milvus and Haystack","anchorList":[{"label":"MilvusとHaystackを使用した検索拡張生成(RAG)","href":"Retrieval-Augmented-Generation-RAG-with-Milvus-and-Haystack","type":1,"isActive":false},{"label":"前提条件","href":"Prerequisites","type":2,"isActive":false},{"label":"データの準備","href":"Prepare-the-data","type":2,"isActive":false},{"label":"インデックス作成パイプラインの作成","href":"Create-the-indexing-Pipeline","type":2,"isActive":false},{"label":"検索パイプラインの作成","href":"Create-the-retrieval-pipeline","type":2,"isActive":false},{"label":"RAGパイプラインの作成","href":"Create-the-RAG-pipeline","type":2,"isActive":false}]} \ No newline at end of file diff --git a/localization/v2.4.x/site/ja/integrations/integrate_with_haystack.md b/localization/v2.4.x/site/ja/integrations/integrate_with_haystack.md index e4a84826f..c737cd4ea 100644 --- a/localization/v2.4.x/site/ja/integrations/integrate_with_haystack.md +++ b/localization/v2.4.x/site/ja/integrations/integrate_with_haystack.md @@ -1,9 +1,9 @@ --- id: integrate_with_haystack.md summary: >- - このガイドでは、HaystackとMilvusを使用したRAG(Retrieval-Augmented + このガイドでは、Haystackとmilvusを使用したRAG(Retrieval-Augmented Generation)システムの構築方法を紹介します。 -title: MilvusとHaystackによる検索補強型ジェネレーション(RAG) +title: MilvusとHaystackを使用した検索拡張生成(RAG) ---

      MilvusとHaystackを使用した検索拡張生成(RAG)

      Open In Colab

      -

      このガイドでは、HaystackとMilvusを使用したRAG(Retrieval-Augmented Generation)システムの構築方法を説明します。

      +

      Open In Colab +GitHub Repository

      +

      このガイドでは、HaystackとMilvusを使用したRAG(Retrieval-Augmented Generation)システムの構築方法を示します。

      RAGシステムは検索システムと生成モデルを組み合わせ、与えられたプロンプトに基づいて新しいテキストを生成する。システムはまずMilvusを使ってコーパスから関連文書を検索し、次に生成モデルを使って検索された文書に基づいて新しいテキストを生成する。

      -

      Haystackは、大規模言語モデル(LLM)でカスタムアプリケーションを構築するための、deepsetによるオープンソースのPythonフレームワークである。Milvusは、世界で最も先進的なオープンソースのベクトルデータベースであり、埋め込み類似検索やAIアプリケーションを強化するために構築されています。

      +

      Haystackは、大規模言語モデル(LLM)でカスタムアプリを構築するための、deepsetによるオープンソースのPythonフレームワークである。Milvusは、世界で最も先進的なオープンソースのベクトルデータベースであり、埋め込み類似検索やAIアプリケーションのために構築されています。

      前提条件

      diff --git a/localization/v2.4.x/site/ja/integrations/integrate_with_hugging-face.json b/localization/v2.4.x/site/ja/integrations/integrate_with_hugging-face.json index d7a7a75b5..e26e38900 100644 --- a/localization/v2.4.x/site/ja/integrations/integrate_with_hugging-face.json +++ b/localization/v2.4.x/site/ja/integrations/integrate_with_hugging-face.json @@ -1 +1 @@ -{"codeList":["$ pip install --upgrade pymilvus transformers datasets torch\n","from datasets import load_dataset\n\n\nDATASET = \"squad\" # Name of dataset from HuggingFace Datasets\nINSERT_RATIO = 0.001 # Ratio of example dataset to be inserted\n\ndata = load_dataset(DATASET, split=\"validation\")\n# Generates a fixed subset. To generate a random subset, remove the seed.\ndata = data.train_test_split(test_size=INSERT_RATIO, seed=42)[\"test\"]\n# Clean up the data structure in the dataset.\ndata = data.map(\n lambda val: {\"answer\": val[\"answers\"][\"text\"][0]},\n remove_columns=[\"id\", \"answers\", \"context\"],\n)\n\n# View summary of example data\nprint(data)\n","from transformers import AutoTokenizer, AutoModel\nimport torch\n\nMODEL = (\n \"sentence-transformers/all-MiniLM-L6-v2\" # Name of model from HuggingFace Models\n)\nINFERENCE_BATCH_SIZE = 64 # Batch size of model inference\n\n# Load tokenizer & model from HuggingFace Hub\ntokenizer = AutoTokenizer.from_pretrained(MODEL)\nmodel = AutoModel.from_pretrained(MODEL)\n\n\ndef encode_text(batch):\n # Tokenize sentences\n encoded_input = tokenizer(\n batch[\"question\"], padding=True, truncation=True, return_tensors=\"pt\"\n )\n\n # Compute token embeddings\n with torch.no_grad():\n model_output = model(**encoded_input)\n\n # Perform pooling\n token_embeddings = model_output[0]\n attention_mask = encoded_input[\"attention_mask\"]\n input_mask_expanded = (\n attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float()\n )\n sentence_embeddings = torch.sum(\n token_embeddings * input_mask_expanded, 1\n ) / torch.clamp(input_mask_expanded.sum(1), min=1e-9)\n\n # Normalize embeddings\n batch[\"question_embedding\"] = torch.nn.functional.normalize(\n sentence_embeddings, p=2, dim=1\n )\n return batch\n\n\ndata = data.map(encode_text, batched=True, batch_size=INFERENCE_BATCH_SIZE)\ndata_list = data.to_list()\n","from pymilvus import MilvusClient\n\n\nMILVUS_URI = \"./huggingface_milvus_test.db\" # Connection URI\nCOLLECTION_NAME = \"huggingface_test\" # Collection name\nDIMENSION = 384 # Embedding dimension depending on model\n\nmilvus_client = MilvusClient(MILVUS_URI)\nif milvus_client.has_collection(collection_name=COLLECTION_NAME):\n milvus_client.drop_collection(collection_name=COLLECTION_NAME)\nmilvus_client.create_collection(\n collection_name=COLLECTION_NAME,\n dimension=DIMENSION,\n auto_id=True, # Enable auto id\n enable_dynamic_field=True, # Enable dynamic fields\n vector_field_name=\"question_embedding\", # Map vector field name and embedding column in dataset\n consistency_level=\"Strong\", # To enable search with latest data\n)\n","milvus_client.insert(collection_name=COLLECTION_NAME, data=data_list)\n","questions = {\n \"question\": [\n \"What is LGM?\",\n \"When did Massachusetts first mandate that children be educated in schools?\",\n ]\n}\n\n# Generate question embeddings\nquestion_embeddings = [v.tolist() for v in encode_text(questions)[\"question_embedding\"]]\n\n# Search across Milvus\nsearch_results = milvus_client.search(\n collection_name=COLLECTION_NAME,\n data=question_embeddings,\n limit=3, # How many search results to output\n output_fields=[\"answer\", \"question\"], # Include these fields in search results\n)\n\n# Print out results\nfor q, res in zip(questions[\"question\"], search_results):\n print(\"Question:\", q)\n for r in res:\n print(\n {\n \"answer\": r[\"entity\"][\"answer\"],\n \"score\": r[\"distance\"],\n \"original question\": r[\"entity\"][\"question\"],\n }\n )\n print(\"\\n\")\n"],"headingContent":"","anchorList":[{"label":"Milvusとハグ顔を使った質問応答","href":"Question-Answering-Using-Milvus-and-Hugging-Face","type":1,"isActive":false},{"label":"始める前に","href":"Before-you-begin","type":2,"isActive":false},{"label":"データの準備","href":"Prepare-data","type":2,"isActive":false},{"label":"データを挿入する","href":"Insert-data","type":2,"isActive":false},{"label":"質問する","href":"Ask-questions","type":2,"isActive":false}]} \ No newline at end of file +{"codeList":["$ pip install --upgrade pymilvus transformers datasets torch\n","from datasets import load_dataset\n\n\nDATASET = \"squad\" # Name of dataset from HuggingFace Datasets\nINSERT_RATIO = 0.001 # Ratio of example dataset to be inserted\n\ndata = load_dataset(DATASET, split=\"validation\")\n# Generates a fixed subset. To generate a random subset, remove the seed.\ndata = data.train_test_split(test_size=INSERT_RATIO, seed=42)[\"test\"]\n# Clean up the data structure in the dataset.\ndata = data.map(\n lambda val: {\"answer\": val[\"answers\"][\"text\"][0]},\n remove_columns=[\"id\", \"answers\", \"context\"],\n)\n\n# View summary of example data\nprint(data)\n","from transformers import AutoTokenizer, AutoModel\nimport torch\n\nMODEL = (\n \"sentence-transformers/all-MiniLM-L6-v2\" # Name of model from HuggingFace Models\n)\nINFERENCE_BATCH_SIZE = 64 # Batch size of model inference\n\n# Load tokenizer & model from HuggingFace Hub\ntokenizer = AutoTokenizer.from_pretrained(MODEL)\nmodel = AutoModel.from_pretrained(MODEL)\n\n\ndef encode_text(batch):\n # Tokenize sentences\n encoded_input = tokenizer(\n batch[\"question\"], padding=True, truncation=True, return_tensors=\"pt\"\n )\n\n # Compute token embeddings\n with torch.no_grad():\n model_output = model(**encoded_input)\n\n # Perform pooling\n token_embeddings = model_output[0]\n attention_mask = encoded_input[\"attention_mask\"]\n input_mask_expanded = (\n attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float()\n )\n sentence_embeddings = torch.sum(\n token_embeddings * input_mask_expanded, 1\n ) / torch.clamp(input_mask_expanded.sum(1), min=1e-9)\n\n # Normalize embeddings\n batch[\"question_embedding\"] = torch.nn.functional.normalize(\n sentence_embeddings, p=2, dim=1\n )\n return batch\n\n\ndata = data.map(encode_text, batched=True, batch_size=INFERENCE_BATCH_SIZE)\ndata_list = data.to_list()\n","from pymilvus import MilvusClient\n\n\nMILVUS_URI = \"./huggingface_milvus_test.db\" # Connection URI\nCOLLECTION_NAME = \"huggingface_test\" # Collection name\nDIMENSION = 384 # Embedding dimension depending on model\n\nmilvus_client = MilvusClient(MILVUS_URI)\nif milvus_client.has_collection(collection_name=COLLECTION_NAME):\n milvus_client.drop_collection(collection_name=COLLECTION_NAME)\nmilvus_client.create_collection(\n collection_name=COLLECTION_NAME,\n dimension=DIMENSION,\n auto_id=True, # Enable auto id\n enable_dynamic_field=True, # Enable dynamic fields\n vector_field_name=\"question_embedding\", # Map vector field name and embedding column in dataset\n consistency_level=\"Strong\", # To enable search with latest data\n)\n","milvus_client.insert(collection_name=COLLECTION_NAME, data=data_list)\n","questions = {\n \"question\": [\n \"What is LGM?\",\n \"When did Massachusetts first mandate that children be educated in schools?\",\n ]\n}\n\n# Generate question embeddings\nquestion_embeddings = [v.tolist() for v in encode_text(questions)[\"question_embedding\"]]\n\n# Search across Milvus\nsearch_results = milvus_client.search(\n collection_name=COLLECTION_NAME,\n data=question_embeddings,\n limit=3, # How many search results to output\n output_fields=[\"answer\", \"question\"], # Include these fields in search results\n)\n\n# Print out results\nfor q, res in zip(questions[\"question\"], search_results):\n print(\"Question:\", q)\n for r in res:\n print(\n {\n \"answer\": r[\"entity\"][\"answer\"],\n \"score\": r[\"distance\"],\n \"original question\": r[\"entity\"][\"question\"],\n }\n )\n print(\"\\n\")\n"],"headingContent":"Question Answering Using Milvus and Hugging Face","anchorList":[{"label":"milvusとハグ顔を使った質問応答","href":"Question-Answering-Using-Milvus-and-Hugging-Face","type":1,"isActive":false},{"label":"始める前に","href":"Before-you-begin","type":2,"isActive":false},{"label":"データの準備","href":"Prepare-data","type":2,"isActive":false},{"label":"データを挿入する","href":"Insert-data","type":2,"isActive":false},{"label":"質問する","href":"Ask-questions","type":2,"isActive":false}]} \ No newline at end of file diff --git a/localization/v2.4.x/site/ja/integrations/integrate_with_hugging-face.md b/localization/v2.4.x/site/ja/integrations/integrate_with_hugging-face.md index 0e5efbc17..44f19ab33 100644 --- a/localization/v2.4.x/site/ja/integrations/integrate_with_hugging-face.md +++ b/localization/v2.4.x/site/ja/integrations/integrate_with_hugging-face.md @@ -1,11 +1,11 @@ --- id: integrate_with_hugging-face.md summary: >- - このチュートリアルでは、データ処理のためのデータローダーと埋め込みジェネレーターとしてHugging + このチュートリアルでは、データ処理のためのデータローダーとエンベッディングジェネレーターとしてHugging Faceを、意味検索のためのベクトルデータベースとしてMilvusを使用して、質問応答システムを構築する方法を示します。 -title: Milvusとハグ顔を使った質問応答 +title: milvusとハグ顔を使った質問応答 --- -

      Milvusとハグ顔を使った質問応答

      Open In Colab

      -

      意味検索に基づく質問応答システムは、与えられた質問に対する質問と答えのペアのデータセットから最も類似した質問を見つけることによって機能する。最も類似した質問が特定されると、データセットから対応する回答がクエリの回答として考慮される。このアプローチは、質問間の類似性を決定し、関連する答えを検索するために、意味的類似性尺度に依存します。

      +

      Open In Colab +GitHub Repository

      +

      意味検索に基づく質問応答システムは、与えられた質問に対する質問と答えのペアのデータセットから最も類似した質問を見つけることによって機能する。最も類似した質問が特定されると、データセットから対応する答えがクエリの答えとみなされる。このアプローチは、質問間の類似性を決定し、関連する答えを検索するために、意味的類似性尺度に依存します。

      このチュートリアルでは、データ処理のためのデータローダーと埋め込みジェネレーターとしてHugging Faceを、意味検索のためのベクトルデータベースとしてMilvusを使用して、質問応答システムを構築する方法を示します。

      始める前に

      必要な依存関係がすべてインストールされていることを確認してください:

        -
      • pymilvusPythonパッケージは、MilvusまたはZilliz Cloudによって提供されるベクターデータベースサービスで動作します。
      • +
      • pymilvuspythonパッケージは、MilvusまたはZilliz Cloudによって提供されるベクターデータベースサービスで動作します。
      • datasets transformers: Hugging Faceパッケージはデータを管理し、モデルを利用する。
      • torch強力なライブラリは、効率的なテンソル計算とディープラーニングツールを提供します。
      $ pip install --upgrade pymilvus transformers datasets torch
       
      -

      Google Colabを使用している場合、インストールしたばかりの依存関係を有効にするには、ランタイムを再起動する必要があります。(画面上部の "Runtime "メニューをクリックし、ドロップダウンメニューから "Restart session "を選択してください)。

      +

      Google Colabを使用している場合、インストールしたばかりの依存関係を有効にするには、ランタイムを再起動する必要があるかもしれません。(画面上部の "Runtime "メニューをクリックし、ドロップダウンメニューから "Restart session "を選択してください)。

      データの準備

      すべてのデータがMilvusに挿入されたら、質問をして、最も近い答えが何かを見ることができる。

      +

      すべてのデータがMilvusに挿入されたら、質問をして、最も近い答えが何であるかを見ることができる。

      questions = {
           "question": [
               "What is LGM?",
      diff --git a/localization/v2.4.x/site/ja/integrations/integrate_with_jina.json b/localization/v2.4.x/site/ja/integrations/integrate_with_jina.json
      index 7b15eedfc..aa3bb8645 100644
      --- a/localization/v2.4.x/site/ja/integrations/integrate_with_jina.json
      +++ b/localization/v2.4.x/site/ja/integrations/integrate_with_jina.json
      @@ -1 +1 @@
      -{"codeList":["$ pip install -U pymilvus\n$ pip install \"pymilvus[model]\"\n","from pymilvus.model.dense import JinaEmbeddingFunction\n\njina_api_key = \"\"\nef = JinaEmbeddingFunction(\"jina-embeddings-v2-base-en\", jina_api_key)\n\nquery = \"what is information retrieval?\"\ndoc = \"Information retrieval is the process of finding relevant information from a large collection of data or documents.\"\n\nqvecs = ef.encode_queries([query])\ndvecs = ef.encode_documents([doc])\n","from pymilvus.model.dense import JinaEmbeddingFunction\n\njina_api_key = \"\"\nef = JinaEmbeddingFunction(\"jina-embeddings-v2-base-de\", jina_api_key)\n\nquery = \"what is information retrieval?\"\ndoc = \"Information Retrieval ist der Prozess, relevante Informationen aus einer großen Sammlung von Daten oder Dokumenten zu finden.\"\n\nqvecs = ef.encode_queries([query])\ndvecs = ef.encode_documents([doc])\n","from pymilvus.model.dense import JinaEmbeddingFunction\n\njina_api_key = \"\"\nef = JinaEmbeddingFunction(\"jina-embeddings-v2-base-code\", jina_api_key)\n\n# Case1: Enhanced Code Navigation\n# query: text description of the functionality\n# document: relevant code snippet\n\nquery = \"function to calculate average in Python.\"\ndoc = \"\"\"\ndef calculate_average(numbers):\n    total = sum(numbers)\n    count = len(numbers)\n    return total / count\n\"\"\"\n\n# Case2: Streamlined Code Review\n# query: text description of the programming concept\n# document: relevante code snippet or PR\n\nquery = \"pull quest related to Collection\"\ndoc = \"fix:[restful v2] parameters of create collection ...\"\n\n# Case3: Automatic Documentation Assistance\n# query: code snippet you need explanation\n# document: relevante document or DocsString\n\nquery = \"What is Collection in Milvus\"\ndoc = \"\"\"\nIn Milvus, you store your vector embeddings in collections. All vector embeddings within a collection share the same dimensionality and distance metric for measuring similarity.\nMilvus collections support dynamic fields (i.e., fields not pre-defined in the schema) and automatic incrementation of primary keys.\n\"\"\"\n\nqvecs = ef.encode_queries([query])\ndvecs = ef.encode_documents([doc])\n","from pymilvus.model.dense import JinaEmbeddingFunction\nfrom pymilvus import MilvusClient\n\njina_api_key = \"\"\nef = JinaEmbeddingFunction(\"jina-embeddings-v2-base-en\", jina_api_key)\nDIMENSION = 768  # size of jina-embeddings-v2-base-en\n\ndoc = [\n    \"In 1950, Alan Turing published his seminal paper, 'Computing Machinery and Intelligence,' proposing the Turing Test as a criterion of intelligence, a foundational concept in the philosophy and development of artificial intelligence.\",\n    \"The Dartmouth Conference in 1956 is considered the birthplace of artificial intelligence as a field; here, John McCarthy and others coined the term 'artificial intelligence' and laid out its basic goals.\",\n    \"In 1951, British mathematician and computer scientist Alan Turing also developed the first program designed to play chess, demonstrating an early example of AI in game strategy.\",\n    \"The invention of the Logic Theorist by Allen Newell, Herbert A. Simon, and Cliff Shaw in 1955 marked the creation of the first true AI program, which was capable of solving logic problems, akin to proving mathematical theorems.\",\n]\n\ndvecs = ef.encode_documents(doc)\n\ndata = [\n    {\"id\": i, \"vector\": dvecs[i], \"text\": doc[i], \"subject\": \"history\"}\n    for i in range(len(dvecs))\n]\n\nmilvus_client = MilvusClient(\"./milvus_jina_demo.db\")\nCOLLECTION_NAME = \"demo_collection\"  # Milvus collection name\nif milvus_client.has_collection(collection_name=COLLECTION_NAME):\n    milvus_client.drop_collection(collection_name=COLLECTION_NAME)\nmilvus_client.create_collection(collection_name=COLLECTION_NAME, dimension=DIMENSION)\n\nres = milvus_client.insert(collection_name=COLLECTION_NAME, data=data)\n\nprint(res[\"insert_count\"])\n","queries = \"What event in 1956 marked the official birth of artificial intelligence as a discipline?\"\nqvecs = ef.encode_queries([queries])\n\nres = milvus_client.search(\n    collection_name=COLLECTION_NAME,  # target collection\n    data=[qvecs[0]],  # query vectors\n    limit=3,  # number of returned entities\n    output_fields=[\"text\", \"subject\"],  # specifies fields to be returned\n)[0]\n\nfor result in res:\n    print(result)\n","from pymilvus.model.reranker import JinaRerankFunction\n\njina_api_key = \"\"\n\nrf = JinaRerankFunction(\"jina-reranker-v1-base-en\", jina_api_key)\n\nquery = \"What event in 1956 marked the official birth of artificial intelligence as a discipline?\"\n\ndocuments = [\n    \"In 1950, Alan Turing published his seminal paper, 'Computing Machinery and Intelligence,' proposing the Turing Test as a criterion of intelligence, a foundational concept in the philosophy and development of artificial intelligence.\",\n    \"The Dartmouth Conference in 1956 is considered the birthplace of artificial intelligence as a field; here, John McCarthy and others coined the term 'artificial intelligence' and laid out its basic goals.\",\n    \"In 1951, British mathematician and computer scientist Alan Turing also developed the first program designed to play chess, demonstrating an early example of AI in game strategy.\",\n    \"The invention of the Logic Theorist by Allen Newell, Herbert A. Simon, and Cliff Shaw in 1955 marked the creation of the first true AI program, which was capable of solving logic problems, akin to proving mathematical theorems.\",\n]\n\nrf(query, documents)\n"],"headingContent":"","anchorList":[{"label":"Jina AIとMilvusの統合","href":"Integrate-Milvus-with-Jina-AI","type":1,"isActive":false},{"label":"Jina AIとは","href":"Who-is-Jina-AI","type":2,"isActive":false},{"label":"MilvusとJina AIのエンベッディング","href":"Milvus-and-Jina-AIs-Embedding","type":2,"isActive":false},{"label":"例","href":"Examples","type":2,"isActive":false},{"label":"汎用エンベッディング","href":"General-Purpose-Embedding","type":2,"isActive":false},{"label":"バイリンガル埋め込み","href":"Bilingual-Embeddings","type":2,"isActive":false},{"label":"コード埋め込み","href":"Code-Embeddings","type":2,"isActive":false},{"label":"JinaとMilvusのセマンティック検索","href":"Semantic-Search-with-Jina--Milvus","type":2,"isActive":false},{"label":"ジーナ再ランカー","href":"Jina-Reranker","type":2,"isActive":false}]}
      \ No newline at end of file
      +{"codeList":["$ pip install -U pymilvus\n$ pip install \"pymilvus[model]\"\n","from pymilvus.model.dense import JinaEmbeddingFunction\n\njina_api_key = \"\"\nef = JinaEmbeddingFunction(\n    \"jina-embeddings-v3\", \n    jina_api_key,\n    task=\"retrieval.passage\",\n    dimensions=1024\n)\n\nquery = \"what is information retrieval?\"\ndoc = \"Information retrieval is the process of finding relevant information from a large collection of data or documents.\"\n\nqvecs = ef.encode_queries([query])  # This method uses `retrieval.query` as the task\ndvecs = ef.encode_documents([doc])  # This method uses `retrieval.passage` as the task\n","from pymilvus.model.dense import JinaEmbeddingFunction\n\njina_api_key = \"\"\nef = JinaEmbeddingFunction(\"jina-embeddings-v2-base-de\", jina_api_key)\n\nquery = \"what is information retrieval?\"\ndoc = \"Information Retrieval ist der Prozess, relevante Informationen aus einer großen Sammlung von Daten oder Dokumenten zu finden.\"\n\nqvecs = ef.encode_queries([query])\ndvecs = ef.encode_documents([doc])\n","from pymilvus.model.dense import JinaEmbeddingFunction\n\njina_api_key = \"\"\nef = JinaEmbeddingFunction(\"jina-embeddings-v2-base-code\", jina_api_key)\n\n# Case1: Enhanced Code Navigation\n# query: text description of the functionality\n# document: relevant code snippet\n\nquery = \"function to calculate average in Python.\"\ndoc = \"\"\"\ndef calculate_average(numbers):\n    total = sum(numbers)\n    count = len(numbers)\n    return total / count\n\"\"\"\n\n# Case2: Streamlined Code Review\n# query: text description of the programming concept\n# document: relevante code snippet or PR\n\nquery = \"pull quest related to Collection\"\ndoc = \"fix:[restful v2] parameters of create collection ...\"\n\n# Case3: Automatic Documentation Assistance\n# query: code snippet you need explanation\n# document: relevante document or DocsString\n\nquery = \"What is Collection in Milvus\"\ndoc = \"\"\"\nIn Milvus, you store your vector embeddings in collections. All vector embeddings within a collection share the same dimensionality and distance metric for measuring similarity.\nMilvus collections support dynamic fields (i.e., fields not pre-defined in the schema) and automatic incrementation of primary keys.\n\"\"\"\n\nqvecs = ef.encode_queries([query])\ndvecs = ef.encode_documents([doc])\n","from pymilvus.model.dense import JinaEmbeddingFunction\nfrom pymilvus import MilvusClient\n\njina_api_key = \"\"\nDIMENSION = 1024  # `jina-embeddings-v3` supports flexible embedding sizes (32, 64, 128, 256, 512, 768, 1024), allowing for truncating embeddings to fit your application. \nef = JinaEmbeddingFunction(\n    \"jina-embeddings-v3\", \n    jina_api_key,\n    task=\"retrieval.passage\",\n    dimensions=DIMENSION,\n)\n\n\ndoc = [\n    \"In 1950, Alan Turing published his seminal paper, 'Computing Machinery and Intelligence,' proposing the Turing Test as a criterion of intelligence, a foundational concept in the philosophy and development of artificial intelligence.\",\n    \"The Dartmouth Conference in 1956 is considered the birthplace of artificial intelligence as a field; here, John McCarthy and others coined the term 'artificial intelligence' and laid out its basic goals.\",\n    \"In 1951, British mathematician and computer scientist Alan Turing also developed the first program designed to play chess, demonstrating an early example of AI in game strategy.\",\n    \"The invention of the Logic Theorist by Allen Newell, Herbert A. Simon, and Cliff Shaw in 1955 marked the creation of the first true AI program, which was capable of solving logic problems, akin to proving mathematical theorems.\",\n]\n\ndvecs = ef.encode_documents(doc) # This method uses `retrieval.passage` as the task\n\ndata = [\n    {\"id\": i, \"vector\": dvecs[i], \"text\": doc[i], \"subject\": \"history\"}\n    for i in range(len(dvecs))\n]\n\nmilvus_client = MilvusClient(\"./milvus_jina_demo.db\")\nCOLLECTION_NAME = \"demo_collection\"  # Milvus collection name\nif milvus_client.has_collection(collection_name=COLLECTION_NAME):\n    milvus_client.drop_collection(collection_name=COLLECTION_NAME)\nmilvus_client.create_collection(collection_name=COLLECTION_NAME, dimension=DIMENSION)\n\nres = milvus_client.insert(collection_name=COLLECTION_NAME, data=data)\n\nprint(res[\"insert_count\"])\n","queries = \"What event in 1956 marked the official birth of artificial intelligence as a discipline?\"\nqvecs = ef.encode_queries([queries]) # This method uses `retrieval.query` as the task\n\nres = milvus_client.search(\n    collection_name=COLLECTION_NAME,  # target collection\n    data=[qvecs[0]],  # query vectors\n    limit=3,  # number of returned entities\n    output_fields=[\"text\", \"subject\"],  # specifies fields to be returned\n)[0]\n\nfor result in res:\n    print(result)\n","from pymilvus.model.reranker import JinaRerankFunction\n\njina_api_key = \"\"\n\nrf = JinaRerankFunction(\"jina-reranker-v1-base-en\", jina_api_key)\n\nquery = \"What event in 1956 marked the official birth of artificial intelligence as a discipline?\"\n\ndocuments = [\n    \"In 1950, Alan Turing published his seminal paper, 'Computing Machinery and Intelligence,' proposing the Turing Test as a criterion of intelligence, a foundational concept in the philosophy and development of artificial intelligence.\",\n    \"The Dartmouth Conference in 1956 is considered the birthplace of artificial intelligence as a field; here, John McCarthy and others coined the term 'artificial intelligence' and laid out its basic goals.\",\n    \"In 1951, British mathematician and computer scientist Alan Turing also developed the first program designed to play chess, demonstrating an early example of AI in game strategy.\",\n    \"The invention of the Logic Theorist by Allen Newell, Herbert A. Simon, and Cliff Shaw in 1955 marked the creation of the first true AI program, which was capable of solving logic problems, akin to proving mathematical theorems.\",\n]\n\nrf(query, documents)\n"],"headingContent":"Integrate Milvus with Jina AI","anchorList":[{"label":"MilvusとJina AIを統合する","href":"Integrate-Milvus-with-Jina-AI","type":1,"isActive":false},{"label":"Jina AIとは","href":"Who-is-Jina-AI","type":2,"isActive":false},{"label":"MilvusとJina AIのエンベッディング","href":"Milvus-and-Jina-AIs-Embedding","type":2,"isActive":false},{"label":"例","href":"Examples","type":2,"isActive":false},{"label":"汎用エンベッディング","href":"General-Purpose-Embedding","type":2,"isActive":false},{"label":"バイリンガル埋め込み","href":"Bilingual-Embeddings","type":2,"isActive":false},{"label":"コード埋め込み","href":"Code-Embeddings","type":2,"isActive":false},{"label":"ジーナとmilvusのセマンティック検索","href":"Semantic-Search-with-Jina--Milvus","type":2,"isActive":false},{"label":"ジーナ再ランカー","href":"Jina-Reranker","type":2,"isActive":false}]}
      \ No newline at end of file
      diff --git a/localization/v2.4.x/site/ja/integrations/integrate_with_jina.md b/localization/v2.4.x/site/ja/integrations/integrate_with_jina.md
      index 02566143f..7ab2d5c7f 100644
      --- a/localization/v2.4.x/site/ja/integrations/integrate_with_jina.md
      +++ b/localization/v2.4.x/site/ja/integrations/integrate_with_jina.md
      @@ -3,7 +3,7 @@ id: integrate_with_jina.md
       summary: このガイドでは、JinaエンベッディングとMilvusを使って類似検索を行う方法を説明します。
       title: MilvusとJinaの統合
       ---
      -

      Jina AIとMilvusの統合

      Open In Colab

      -

      このガイドでは、Jina AIエンベッディングとMilvusを使用して類似検索タスクを実行する方法を示します。

      +

      Open In Colab +GitHub Repository

      +

      このガイドでは、Jina AIエンベッディングとMilvusを使って、類似検索タスクを実行する方法を説明します。

      Jina AIとは

      JinaエンベッディングはPyMilvusモデルライブラリに統合されています。では、Jinaエンベッディングを実際にどのように使うか、コード例を示します。

      +

      JinaエンベッディングはPyMilvusモデルライブラリに統合されています。では、実際にJinaエンベッディングをどのように使うか、コード例を示します。

      始める前に、PyMilvus用のモデルライブラリをインストールする必要があります。

      $ pip install -U pymilvus
       $ pip install "pymilvus[model]"
      @@ -91,16 +92,21 @@ $ pip install "pymilvus[model]"
               >
             
           

      Jina AIのコアとなるエンベッディングモデルは、詳細なテキストの理解に優れており、セマンティック検索、コンテンツ分類、高度な感情分析、テキスト要約、パーソナライズされた推薦システムに最適です。

      -
      from pymilvus.model.dense import JinaEmbeddingFunction
      +
      from pymilvus.model.dense import JinaEmbeddingFunction
       
       jina_api_key = "<YOUR_JINA_API_KEY>"
      -ef = JinaEmbeddingFunction("jina-embeddings-v2-base-en", jina_api_key)
      +ef = JinaEmbeddingFunction(
      +    "jina-embeddings-v3", 
      +    jina_api_key,
      +    task="retrieval.passage",
      +    dimensions=1024
      +)
       
       query = "what is information retrieval?"
       doc = "Information retrieval is the process of finding relevant information from a large collection of data or documents."
       
      -qvecs = ef.encode_queries([query])
      -dvecs = ef.encode_documents([doc])
      +qvecs = ef.encode_queries([query])  # This method uses `retrieval.query` as the task
      +dvecs = ef.encode_documents([doc])  # This method uses `retrieval.passage` as the task
       

      バイリンガル埋め込み

      -

      JinaとMilvusのセマンティック検索

      強力なベクトル埋め込み機能により、Jina AIモデルを利用して検索された埋め込みとMilvus Liteベクトルデータベースを組み合わせて意味検索を行うことができます。

      +

      強力なベクトル埋め込み機能により、JinaのAIモデルとMilvus Liteのベクトルデータベースを組み合わせて意味検索を行うことができます。

      from pymilvus.model.dense import JinaEmbeddingFunction
       from pymilvus import MilvusClient
       
       jina_api_key = "<YOUR_JINA_API_KEY>"
      -ef = JinaEmbeddingFunction("jina-embeddings-v2-base-en", jina_api_key)
      -DIMENSION = 768  # size of jina-embeddings-v2-base-en
      +DIMENSION = 1024  # `jina-embeddings-v3` supports flexible embedding sizes (32, 64, 128, 256, 512, 768, 1024), allowing for truncating embeddings to fit your application. 
      +ef = JinaEmbeddingFunction(
      +    "jina-embeddings-v3", 
      +    jina_api_key,
      +    task="retrieval.passage",
      +    dimensions=DIMENSION,
      +)
      +
       
       doc = [
           "In 1950, Alan Turing published his seminal paper, 'Computing Machinery and Intelligence,' proposing the Turing Test as a criterion of intelligence, a foundational concept in the philosophy and development of artificial intelligence.",
      @@ -212,7 +224,7 @@ doc = [
           "The invention of the Logic Theorist by Allen Newell, Herbert A. Simon, and Cliff Shaw in 1955 marked the creation of the first true AI program, which was capable of solving logic problems, akin to proving mathematical theorems.",
       ]
       
      -dvecs = ef.encode_documents(doc)
      +dvecs = ef.encode_documents(doc) # This method uses `retrieval.passage` as the task
       
       data = [
           {"id": i, "vector": dvecs[i], "text": doc[i], "subject": "history"}
      @@ -232,14 +244,14 @@ res = milvus_client.insert(collection_name=COLLECTION_NAME, data=data)
       

      引数のMilvusClient

        -
      • ./milvus.db のように、uri をローカルファイルとして設定する方法は、自動的にMilvus Lite を利用し、すべてのデータをこのファイルに格納するため、最も便利な方法です。
      • -
      • データ規模が大きい場合は、dockerやkubernetes上に、よりパフォーマンスの高いMilvusサーバを構築することができます。このセットアップでは、http://localhost:19530 などのサーバ uri をuri として使用してください。
      • -
      • MilvusのフルマネージドクラウドサービスであるZilliz Cloudを利用する場合は、Zilliz CloudのPublic EndpointとApi keyに対応するuritoken を調整してください。
      • +
      • ./milvus.db のように、uri をローカルファイルとして設定する方法が、Milvus Lite を自動的に利用して全てのデータをこのファイルに格納することができるため、最も便利な方法である。
      • +
      • データ規模が大きい場合は、dockerやkubernetes上に、よりパフォーマンスの高いMilvusサーバを構築することができます。このセットアップでは、サーバの uri、例えばhttp://localhost:19530uri として使用してください。
      • +
      • MilvusのフルマネージドクラウドサービスであるZilliz Cloudを使用する場合は、Zilliz CloudのPublic EndpointとApi keyに対応するuritoken を調整してください。
      -

      Milvusのベクトルデータベースにすべてのデータが登録されたので、クエリに対するベクトル埋め込みを生成してベクトル検索を行うことで、セマンティック検索を行うことができる。

      +

      Milvusのベクトル・データベースにすべてのデータが登録されたので、クエリに対するベクトル埋め込みを生成してベクトル検索を行うことで、セマンティック検索を行うことができる。

      queries = "What event in 1956 marked the official birth of artificial intelligence as a discipline?"
      -qvecs = ef.encode_queries([queries])
      +qvecs = ef.encode_queries([queries]) # This method uses `retrieval.query` as the task
       
       res = milvus_client.search(
           collection_name=COLLECTION_NAME,  # target collection
      diff --git a/localization/v2.4.x/site/ja/integrations/integrate_with_llamaindex.json b/localization/v2.4.x/site/ja/integrations/integrate_with_llamaindex.json
      index c92525ef6..cd976f958 100644
      --- a/localization/v2.4.x/site/ja/integrations/integrate_with_llamaindex.json
      +++ b/localization/v2.4.x/site/ja/integrations/integrate_with_llamaindex.json
      @@ -1 +1 @@
      -{"codeList":["$ pip install pymilvus>=2.4.2\n","$ pip install llama-index-vector-stores-milvus\n","$ pip install llama-index\n","import openai\n\nopenai.api_key = \"sk-***********\"\n","! mkdir -p 'data/'\n! wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham_essay.txt'\n! wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/docs/examples/data/10k/uber_2021.pdf' -O 'data/uber_2021.pdf'\n","from llama_index.core import SimpleDirectoryReader\n\n# load documents\ndocuments = SimpleDirectoryReader(\n    input_files=[\"./data/paul_graham_essay.txt\"]\n).load_data()\n\nprint(\"Document ID:\", documents[0].doc_id)\n","# Create an index over the documents\nfrom llama_index.core import VectorStoreIndex, StorageContext\nfrom llama_index.vector_stores.milvus import MilvusVectorStore\n\n\nvector_store = MilvusVectorStore(uri=\"./milvus_demo.db\", dim=1536, overwrite=True)\nstorage_context = StorageContext.from_defaults(vector_store=vector_store)\nindex = VectorStoreIndex.from_documents(documents, storage_context=storage_context)\n","query_engine = index.as_query_engine()\nres = query_engine.query(\"What did the author learn?\")\nprint(res)\n","res = query_engine.query(\"What challenges did the disease pose for the author?\")\nprint(res)\n","from llama_index.core import Document\n\n\nvector_store = MilvusVectorStore(uri=\"./milvus_demo.db\", dim=1536, overwrite=True)\nstorage_context = StorageContext.from_defaults(vector_store=vector_store)\nindex = VectorStoreIndex.from_documents(\n    [Document(text=\"The number that is being searched for is ten.\")],\n    storage_context,\n)\nquery_engine = index.as_query_engine()\nres = query_engine.query(\"Who is the author?\")\nprint(res)\n","del index, vector_store, storage_context, query_engine\n\nvector_store = MilvusVectorStore(uri=\"./milvus_demo.db\", overwrite=False)\nstorage_context = StorageContext.from_defaults(vector_store=vector_store)\nindex = VectorStoreIndex.from_documents(documents, storage_context=storage_context)\nquery_engine = index.as_query_engine()\nres = query_engine.query(\"What is the number?\")\nprint(res)\n","res = query_engine.query(\"Who is the author?\")\nprint(res)\n","from llama_index.core.vector_stores import ExactMatchFilter, MetadataFilters\n\n# Load all the two documents loaded before\ndocuments_all = SimpleDirectoryReader(\"./data/\").load_data()\n\nvector_store = MilvusVectorStore(uri=\"./milvus_demo.db\", dim=1536, overwrite=True)\nstorage_context = StorageContext.from_defaults(vector_store=vector_store)\nindex = VectorStoreIndex.from_documents(documents_all, storage_context)\n","filters = MetadataFilters(\n    filters=[ExactMatchFilter(key=\"file_name\", value=\"uber_2021.pdf\")]\n)\nquery_engine = index.as_query_engine(filters=filters)\nres = query_engine.query(\"What challenges did the disease pose for the author?\")\n\nprint(res)\n","filters = MetadataFilters(\n    filters=[ExactMatchFilter(key=\"file_name\", value=\"paul_graham_essay.txt\")]\n)\nquery_engine = index.as_query_engine(filters=filters)\nres = query_engine.query(\"What challenges did the disease pose for the author?\")\n\nprint(res)\n"],"headingContent":"","anchorList":[{"label":"MilvusとLlamaIndexによる検索補強型生成(RAG)","href":"Retrieval-Augmented-Generation-RAG-with-Milvus-and-LlamaIndex","type":1,"isActive":false},{"label":"始める前に","href":"Before-you-begin","type":2,"isActive":false},{"label":"はじめに","href":"Getting-Started","type":2,"isActive":false},{"label":"メタデータのフィルタリング","href":"Metadata-filtering","type":2,"isActive":false}]}
      \ No newline at end of file
      +{"codeList":["$ pip install pymilvus>=2.4.2\n","$ pip install llama-index-vector-stores-milvus\n","$ pip install llama-index\n","import openai\n\nopenai.api_key = \"sk-***********\"\n","! mkdir -p 'data/'\n! wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham_essay.txt'\n! wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/docs/examples/data/10k/uber_2021.pdf' -O 'data/uber_2021.pdf'\n","from llama_index.core import SimpleDirectoryReader\n\n# load documents\ndocuments = SimpleDirectoryReader(\n    input_files=[\"./data/paul_graham_essay.txt\"]\n).load_data()\n\nprint(\"Document ID:\", documents[0].doc_id)\n","# Create an index over the documents\nfrom llama_index.core import VectorStoreIndex, StorageContext\nfrom llama_index.vector_stores.milvus import MilvusVectorStore\n\n\nvector_store = MilvusVectorStore(uri=\"./milvus_demo.db\", dim=1536, overwrite=True)\nstorage_context = StorageContext.from_defaults(vector_store=vector_store)\nindex = VectorStoreIndex.from_documents(documents, storage_context=storage_context)\n","query_engine = index.as_query_engine()\nres = query_engine.query(\"What did the author learn?\")\nprint(res)\n","res = query_engine.query(\"What challenges did the disease pose for the author?\")\nprint(res)\n","from llama_index.core import Document\n\n\nvector_store = MilvusVectorStore(uri=\"./milvus_demo.db\", dim=1536, overwrite=True)\nstorage_context = StorageContext.from_defaults(vector_store=vector_store)\nindex = VectorStoreIndex.from_documents(\n    [Document(text=\"The number that is being searched for is ten.\")],\n    storage_context,\n)\nquery_engine = index.as_query_engine()\nres = query_engine.query(\"Who is the author?\")\nprint(res)\n","del index, vector_store, storage_context, query_engine\n\nvector_store = MilvusVectorStore(uri=\"./milvus_demo.db\", overwrite=False)\nstorage_context = StorageContext.from_defaults(vector_store=vector_store)\nindex = VectorStoreIndex.from_documents(documents, storage_context=storage_context)\nquery_engine = index.as_query_engine()\nres = query_engine.query(\"What is the number?\")\nprint(res)\n","res = query_engine.query(\"Who is the author?\")\nprint(res)\n","from llama_index.core.vector_stores import ExactMatchFilter, MetadataFilters\n\n# Load all the two documents loaded before\ndocuments_all = SimpleDirectoryReader(\"./data/\").load_data()\n\nvector_store = MilvusVectorStore(uri=\"./milvus_demo.db\", dim=1536, overwrite=True)\nstorage_context = StorageContext.from_defaults(vector_store=vector_store)\nindex = VectorStoreIndex.from_documents(documents_all, storage_context)\n","filters = MetadataFilters(\n    filters=[ExactMatchFilter(key=\"file_name\", value=\"uber_2021.pdf\")]\n)\nquery_engine = index.as_query_engine(filters=filters)\nres = query_engine.query(\"What challenges did the disease pose for the author?\")\n\nprint(res)\n","filters = MetadataFilters(\n    filters=[ExactMatchFilter(key=\"file_name\", value=\"paul_graham_essay.txt\")]\n)\nquery_engine = index.as_query_engine(filters=filters)\nres = query_engine.query(\"What challenges did the disease pose for the author?\")\n\nprint(res)\n"],"headingContent":"Retrieval-Augmented Generation (RAG) with Milvus and LlamaIndex","anchorList":[{"label":"MilvusとLlamaIndexによる検索拡張生成(RAG)","href":"Retrieval-Augmented-Generation-RAG-with-Milvus-and-LlamaIndex","type":1,"isActive":false},{"label":"始める前に","href":"Before-you-begin","type":2,"isActive":false},{"label":"はじめに","href":"Getting-Started","type":2,"isActive":false},{"label":"メタデータのフィルタリング","href":"Metadata-filtering","type":2,"isActive":false}]}
      \ No newline at end of file
      diff --git a/localization/v2.4.x/site/ja/integrations/integrate_with_llamaindex.md b/localization/v2.4.x/site/ja/integrations/integrate_with_llamaindex.md
      index 69e2b16f5..8a57311a9 100644
      --- a/localization/v2.4.x/site/ja/integrations/integrate_with_llamaindex.md
      +++ b/localization/v2.4.x/site/ja/integrations/integrate_with_llamaindex.md
      @@ -1,11 +1,11 @@
       ---
       id: integrate_with_llamaindex.md
       summary: >-
      -  このガイドでは、LlamaIndexとMilvusを使用したRAG(Retrieval-Augmented
      +  このガイドでは、LlamaIndexとmilvusを使用したRAG(Retrieval-Augmented
         Generation)システムの構築方法を説明します。
      -title: MilvusとLlamaIndexによる検索補強型生成(RAG)
      +title: MilvusとLlamaIndexによる検索拡張生成(RAG)
       ---
      -

      MilvusとLlamaIndexによる検索補強型生成(RAG)

      Open In Colab

      +

      Open In Colab +GitHub Repository

      このガイドでは、LlamaIndexとMilvusを使ったRAG(Retrieval-Augmented Generation)システムの構築方法を説明する。

      -

      RAGシステムは検索システムと生成モデルを組み合わせ、与えられたプロンプトに基づいて新しいテキストを生成する。このシステムはまずMilvusを使ってコーパスから関連文書を検索し、次に生成モデルを使って検索された文書に基づいて新しいテキストを生成する。

      -

      LlamaIndexは、カスタムデータソースを大規模言語モデル(LLM)に接続するためのシンプルで柔軟なデータフレームワークである。Milvusは世界で最も先進的なオープンソースのベクトル・データベースで、埋め込み類似検索やAIアプリケーションのために構築されている。

      -

      このノートブックでは、MilvusVectorStoreの簡単なデモを紹介します。

      +

      RAGシステムは検索システムと生成モデルを組み合わせ、与えられたプロンプトに基づいて新しいテキストを生成する。システムはまずMilvusを使ってコーパスから関連文書を検索し、次に生成モデルを使って検索された文書に基づいて新しいテキストを生成する。

      +

      LlamaIndexは、カスタムデータソースを大規模言語モデル(LLM)に接続するためのシンプルで柔軟なデータフレームワークである。Milvusは世界で最も先進的なオープンソースのベクトル・データベースであり、埋め込み類似検索やAIアプリケーションのために構築されている。

      +

      このノートブックでは、MilvusVectorStoreの簡単なデモをお見せします。

      始める前に

      データを横断するインデックスの作成

      文書ができたので、インデックスを作成し、文書を挿入することができます。

      -

      なお、Milvus Liteには pymilvus>=2.4.2 が必要です。

      +

      Milvus Liteには pymilvus>=2.4.2 が必要です。

      # Create an index over the documents
       from llama_index.core import VectorStoreIndex, StorageContext
      @@ -103,7 +104,7 @@ index = VectorStoreIndex.from_documents(documents, storage_context=storage_conte
       

      MilvusVectorStore

        -
      • ./milvus.db のようにuri をローカルファイルとして設定すると、自動的にMilvus Liteを利用してすべてのデータをこのファイルに格納するため、最も便利な方法です。
      • +
      • uri をローカルファイル、例えば./milvus.db に設定するのが最も便利な方法です。このファイルにすべてのデータが格納されるため、Milvus Lite が自動的に利用されます。
      • データ規模が大きい場合は、dockerやkubernetes上に、よりパフォーマンスの高いMilvusサーバを構築することができます。このセットアップでは、http://localhost:19530 などのサーバ uri をuri として使用してください。
      • MilvusのフルマネージドクラウドサービスであるZilliz Cloudを利用する場合は、Zilliz CloudのPublic EndpointとApi keyに対応するuritoken を調整してください。
      @@ -189,7 +190,7 @@ res = query_engine.query("What challenges did the
      The disease posed challenges related to the adverse impact on the business and operations, including reduced demand for Mobility offerings globally, affecting travel behavior and demand. Additionally, the pandemic led to driver supply constraints, impacted by concerns regarding COVID-19, with uncertainties about when supply levels would return to normal. The rise of the Omicron variant further affected travel, resulting in advisories and restrictions that could adversely impact both driver supply and consumer demand for Mobility offerings.
       
      -

      こ こ で フ ァ イ ルpaul_graham_essay.txt か ら 文書を取得す る と 、 別の結果が得 ら れます。

      +

      今度はファイルpaul_graham_essay.txt から取得すると、異なる結果が得られます。

      filters = MetadataFilters(
           filters=[ExactMatchFilter(key="file_name", value="paul_graham_essay.txt")]
       )
      diff --git a/localization/v2.4.x/site/ja/integrations/integrate_with_openai.json b/localization/v2.4.x/site/ja/integrations/integrate_with_openai.json
      index 4819fb701..a6af49045 100644
      --- a/localization/v2.4.x/site/ja/integrations/integrate_with_openai.json
      +++ b/localization/v2.4.x/site/ja/integrations/integrate_with_openai.json
      @@ -1 +1 @@
      -{"codeList":["pip install --upgrade openai pymilvus\n","from openai import OpenAI\nfrom pymilvus import MilvusClient\n\nMODEL_NAME = \"text-embedding-3-small\"  # Which model to use, please check https://platform.openai.com/docs/guides/embeddings for available models\nDIMENSION = 1536  # Dimension of vector embedding\n\n# Connect to OpenAI with API Key.\nopenai_client = OpenAI(api_key=\"\")\n\ndocs = [\n    \"Artificial intelligence was founded as an academic discipline in 1956.\",\n    \"Alan Turing was the first person to conduct substantial research in AI.\",\n    \"Born in Maida Vale, London, Turing was raised in southern England.\",\n]\n\nvectors = [\n    vec.embedding\n    for vec in openai_client.embeddings.create(input=docs, model=MODEL_NAME).data\n]\n\n# Prepare data to be stored in Milvus vector database.\n# We can store the id, vector representation, raw text and labels such as \"subject\" in this case in Milvus.\ndata = [\n    {\"id\": i, \"vector\": vectors[i], \"text\": docs[i], \"subject\": \"history\"}\n    for i in range(len(docs))\n]\n\n\n# Connect to Milvus, all data is stored in a local file named \"milvus_openai_demo.db\"\n# in current directory. You can also connect to a remote Milvus server following this\n# instruction: https://milvus.io/docs/install_standalone-docker.md.\nmilvus_client = MilvusClient(uri=\"milvus_openai_demo.db\")\nCOLLECTION_NAME = \"demo_collection\"  # Milvus collection name\n# Create a collection to store the vectors and text.\nif milvus_client.has_collection(collection_name=COLLECTION_NAME):\n    milvus_client.drop_collection(collection_name=COLLECTION_NAME)\nmilvus_client.create_collection(collection_name=COLLECTION_NAME, dimension=DIMENSION)\n\n# Insert all data into Milvus vector database.\nres = milvus_client.insert(collection_name=\"demo_collection\", data=data)\n\nprint(res[\"insert_count\"])\n","queries = [\"When was artificial intelligence founded?\"]\n\nquery_vectors = [\n    vec.embedding\n    for vec in openai_client.embeddings.create(input=queries, model=MODEL_NAME).data\n]\n\nres = milvus_client.search(\n    collection_name=COLLECTION_NAME,  # target collection\n    data=query_vectors,  # query vectors\n    limit=2,  # number of returned entities\n    output_fields=[\"text\", \"subject\"],  # specifies fields to be returned\n)\n\nfor q in queries:\n    print(\"Query:\", q)\n    for result in res:\n        print(result)\n    print(\"\\n\")\n","[\n    {\n        \"id\": 0,\n        \"distance\": -0.772376537322998,\n        \"entity\": {\n            \"text\": \"Artificial intelligence was founded as an academic discipline in 1956.\",\n            \"subject\": \"history\",\n        },\n    },\n    {\n        \"id\": 1,\n        \"distance\": -0.58596271276474,\n        \"entity\": {\n            \"text\": \"Alan Turing was the first person to conduct substantial research in AI.\",\n            \"subject\": \"history\",\n        },\n    },\n]\n"],"headingContent":"","anchorList":[{"label":"MilvusとOpenAIによるセマンティック検索","href":"Semantic-Search-with-Milvus-and-OpenAI","type":1,"isActive":false},{"label":"はじめに","href":"Getting-started","type":2,"isActive":false},{"label":"OpenAI & Milvusで本のタイトルを検索する","href":"Searching-book-titles-with-OpenAI--Milvus","type":2,"isActive":false}]}
      \ No newline at end of file
      +{"codeList":["pip install --upgrade openai pymilvus\n","from openai import OpenAI\nfrom pymilvus import MilvusClient\n\nMODEL_NAME = \"text-embedding-3-small\"  # Which model to use, please check https://platform.openai.com/docs/guides/embeddings for available models\nDIMENSION = 1536  # Dimension of vector embedding\n\n# Connect to OpenAI with API Key.\nopenai_client = OpenAI(api_key=\"\")\n\ndocs = [\n    \"Artificial intelligence was founded as an academic discipline in 1956.\",\n    \"Alan Turing was the first person to conduct substantial research in AI.\",\n    \"Born in Maida Vale, London, Turing was raised in southern England.\",\n]\n\nvectors = [\n    vec.embedding\n    for vec in openai_client.embeddings.create(input=docs, model=MODEL_NAME).data\n]\n\n# Prepare data to be stored in Milvus vector database.\n# We can store the id, vector representation, raw text and labels such as \"subject\" in this case in Milvus.\ndata = [\n    {\"id\": i, \"vector\": vectors[i], \"text\": docs[i], \"subject\": \"history\"}\n    for i in range(len(docs))\n]\n\n\n# Connect to Milvus, all data is stored in a local file named \"milvus_openai_demo.db\"\n# in current directory. You can also connect to a remote Milvus server following this\n# instruction: https://milvus.io/docs/install_standalone-docker.md.\nmilvus_client = MilvusClient(uri=\"milvus_openai_demo.db\")\nCOLLECTION_NAME = \"demo_collection\"  # Milvus collection name\n# Create a collection to store the vectors and text.\nif milvus_client.has_collection(collection_name=COLLECTION_NAME):\n    milvus_client.drop_collection(collection_name=COLLECTION_NAME)\nmilvus_client.create_collection(collection_name=COLLECTION_NAME, dimension=DIMENSION)\n\n# Insert all data into Milvus vector database.\nres = milvus_client.insert(collection_name=\"demo_collection\", data=data)\n\nprint(res[\"insert_count\"])\n","queries = [\"When was artificial intelligence founded?\"]\n\nquery_vectors = [\n    vec.embedding\n    for vec in openai_client.embeddings.create(input=queries, model=MODEL_NAME).data\n]\n\nres = milvus_client.search(\n    collection_name=COLLECTION_NAME,  # target collection\n    data=query_vectors,  # query vectors\n    limit=2,  # number of returned entities\n    output_fields=[\"text\", \"subject\"],  # specifies fields to be returned\n)\n\nfor q in queries:\n    print(\"Query:\", q)\n    for result in res:\n        print(result)\n    print(\"\\n\")\n","[\n    {\n        \"id\": 0,\n        \"distance\": -0.772376537322998,\n        \"entity\": {\n            \"text\": \"Artificial intelligence was founded as an academic discipline in 1956.\",\n            \"subject\": \"history\",\n        },\n    },\n    {\n        \"id\": 1,\n        \"distance\": -0.58596271276474,\n        \"entity\": {\n            \"text\": \"Alan Turing was the first person to conduct substantial research in AI.\",\n            \"subject\": \"history\",\n        },\n    },\n]\n"],"headingContent":"Semantic Search with Milvus and OpenAI","anchorList":[{"label":"MilvusとOpenAIによるセマンティック検索","href":"Semantic-Search-with-Milvus-and-OpenAI","type":1,"isActive":false},{"label":"はじめに","href":"Getting-started","type":2,"isActive":false},{"label":"OpenAI & Milvusで本のタイトルを検索する","href":"Searching-book-titles-with-OpenAI--Milvus","type":2,"isActive":false}]}
      \ No newline at end of file
      diff --git a/localization/v2.4.x/site/ja/integrations/integrate_with_openai.md b/localization/v2.4.x/site/ja/integrations/integrate_with_openai.md
      index 1913b0a29..fd8a5ab0c 100644
      --- a/localization/v2.4.x/site/ja/integrations/integrate_with_openai.md
      +++ b/localization/v2.4.x/site/ja/integrations/integrate_with_openai.md
      @@ -18,8 +18,9 @@ summary: このページでは、OpenAIのエンベッディングAPIを使っ
                 d="M4 9h1v1H4c-1.5 0-3-1.69-3-3.5S2.55 3 4 3h4c1.45 0 3 1.69 3 3.5 0 1.41-.91 2.72-2 3.25V8.59c.58-.45 1-1.27 1-2.09C10 5.22 8.98 4 8 4H4c-.98 0-2 1.22-2 2.5S3 9 4 9zm9-3h-1v1h1c1 0 2 1.22 2 2.5S13.98 12 13 12H9c-.98 0-2-1.22-2-2.5 0-.83.42-1.64 1-2.09V6.25c-1.09.53-2 1.84-2 3.25C6 11.31 7.55 13 9 13h4c1.45 0 3-1.69 3-3.5S14.5 6 13 6z"
               >
             
      -    

      Open In Colab

      -

      このガイドでは、OpenAIのEmbedding APIとMilvusベクトルデータベースを使って、テキストをセマンティック検索する方法を紹介します。

      +

      Open In Colab +GitHub Repository

      +

      このガイドでは、OpenAIのEmbedding APIをMilvusベクトルデータベースとどのように組み合わせて、テキストのセマンティック検索を行うかを紹介します。

      はじめに

      Open In Colab

      +

      Open In Colab +GitHub Repository

      このガイドでは、Milvusをベースに構築されたRAG(Retrieval-Augmented Generation)パイプラインを評価するためにRagasを使用する方法を示します。

      RAGシステムは検索システムと生成モデルを組み合わせ、与えられたプロンプトに基づいて新しいテキストを生成します。システムはまずMilvusを使ってコーパスから関連文書を検索し、次に生成モデルを使って検索された文書に基づいて新しいテキストを生成する。

      RagasはRAGパイプラインの評価を支援するフレームワークである。パイプラインの構築を支援する既存のツールやフレームワークはありますが、パイプラインを評価し、パイプラインのパフォーマンスを定量化することは困難です。そこでRagas(RAGアセスメント)の登場です。

      @@ -65,7 +66,7 @@ os.environ["OP d="M4 9h1v1H4c-1.5 0-3-1.69-3-3.5S2.55 3 4 3h4c1.45 0 3 1.69 3 3.5 0 1.41-.91 2.72-2 3.25V8.59c.58-.45 1-1.27 1-2.09C10 5.22 8.98 4 8 4H4c-.98 0-2 1.22-2 2.5S3 9 4 9zm9-3h-1v1h1c1 0 2 1.22 2 2.5S13.98 12 13 12H9c-.98 0-2-1.22-2-2.5 0-.83.42-1.64 1-2.09V6.25c-1.09.53-2 1.84-2 3.25C6 11.31 7.55 13 9 13h4c1.45 0 3-1.69 3-3.5S14.5 6 13 6z" > -

      ベクトルストアとしてMilvus、LLMとしてOpenAIを使用するRAGクラスを定義します。このクラスには、テキストデータをMilvusにロードするload メソッド、与えられた質問に最も類似したテキストデータを検索するretrieve メソッド、検索された知識を使って与えられた質問に回答するanswer メソッドが含まれます。

      +

      Milvusをベクトルストアとして、OpenAIをLLMとして使用するRAGクラスを定義します。このクラスには、テキストデータをMilvusにロードするload メソッド、与えられた質問に最も類似したテキストデータを検索するretrieve メソッド、検索された知識を用いて与えられた質問に回答するanswer メソッドが含まれます。

      from typing import List
       from tqdm import tqdm
       from openai import OpenAI
      @@ -174,7 +175,7 @@ Use the following pieces of information enclosed in <context> tags to prov
               else:
                   return response.choices[0].message.content, retrieved_texts
       
      -

      OpenAIとMilvusのクライアントでRAGクラスを初期化しよう。

      +

      RAGクラスをOpenAIとMilvusクライアントで初期化してみよう。

      openai_client = OpenAI()
       milvus_client = MilvusClient(uri="./milvus_demo.db")
       
      @@ -183,7 +184,7 @@ my_rag = RAG(openai_client=openai_clie
       

      MilvusClient の引数については以下の通り:

        -
      • uri の引数をローカルファイル、例えば./milvus.db に設定するのが最も便利です。
      • +
      • uri の引数をローカルファイル、例えば./milvus.db に設定するのが最も便利である。
      • データ規模が大きい場合は、dockerやkubernetes上に、よりパフォーマンスの高いMilvusサーバを構築することができます。このセットアップでは、http://localhost:19530 などのサーバ uri をuri として使用してください。
      • MilvusのフルマネージドクラウドサービスであるZilliz Cloudを使用する場合は、Zilliz CloudのPublic EndpointとApi keyに対応するuritoken を調整してください。
      @@ -203,7 +204,7 @@ my_rag = RAG(openai_client=openai_clie d="M4 9h1v1H4c-1.5 0-3-1.69-3-3.5S2.55 3 4 3h4c1.45 0 3 1.69 3 3.5 0 1.41-.91 2.72-2 3.25V8.59c.58-.45 1-1.27 1-2.09C10 5.22 8.98 4 8 4H4c-.98 0-2 1.22-2 2.5S3 9 4 9zm9-3h-1v1h1c1 0 2 1.22 2 2.5S13.98 12 13 12H9c-.98 0-2-1.22-2-2.5 0-.83.42-1.64 1-2.09V6.25c-1.09.53-2 1.84-2 3.25C6 11.31 7.55 13 9 13h4c1.45 0 3-1.69 3-3.5S14.5 6 13 6z" > -

      Milvusの開発ガイドをRAGのプライベート知識として使用します。

      +

      Milvusの開発ガイドをRAGのプライベートナレッジとして使用します。

      ダウンロードし、RAGパイプラインにロードする。

      import os
       import urllib.request
      diff --git a/localization/v2.4.x/site/ja/integrations/integrate_with_vanna.json b/localization/v2.4.x/site/ja/integrations/integrate_with_vanna.json
      index 28567287e..27fe13abf 100644
      --- a/localization/v2.4.x/site/ja/integrations/integrate_with_vanna.json
      +++ b/localization/v2.4.x/site/ja/integrations/integrate_with_vanna.json
      @@ -1 +1 @@
      -{"codeList":["$ pip install \"vanna[milvus,openai]\"\n","import os\n\nos.environ[\"OPENAI_API_KEY\"] = \"sk-***********\"\n","from pymilvus import MilvusClient, model\nfrom vanna.milvus import Milvus_VectorStore\nfrom vanna.openai import OpenAI_Chat\n\n\nclass VannaMilvus(Milvus_VectorStore, OpenAI_Chat):\n    def __init__(self, config=None):\n        Milvus_VectorStore.__init__(self, config=config)\n        OpenAI_Chat.__init__(self, config=config)\n","milvus_uri = \"./milvus_vanna.db\"\n\nmilvus_client = MilvusClient(uri=milvus_uri)\n\nvn_milvus = VannaMilvus(\n    config={\n        \"api_key\": os.getenv(\"OPENAI_API_KEY\"),\n        \"model\": \"gpt-3.5-turbo\",\n        \"milvus_client\": milvus_client,\n        \"embedding_function\": model.DefaultEmbeddingFunction(),\n        \"n_results\": 2,  # The number of results to return from Milvus semantic search.\n    }\n)\n","import sqlite3\n\nsqlite_path = \"./my-database.sqlite\"\nsql_connect = sqlite3.connect(sqlite_path)\nc = sql_connect.cursor()\n\ninit_sqls = \"\"\"\nCREATE TABLE IF NOT EXISTS Customer (\n    ID INTEGER PRIMARY KEY AUTOINCREMENT,\n    Name TEXT NOT NULL,\n    Company TEXT NOT NULL,\n    City TEXT NOT NULL,\n    Phone TEXT NOT NULL\n);\n\nCREATE TABLE IF NOT EXISTS Company (\n    ID INTEGER PRIMARY KEY AUTOINCREMENT,\n    Name TEXT NOT NULL,\n    Industry TEXT NOT NULL,\n    Location TEXT NOT NULL,\n    EmployeeCount INTEGER NOT NULL\n);\n\nCREATE TABLE IF NOT EXISTS User (\n    ID INTEGER PRIMARY KEY AUTOINCREMENT,\n    Username TEXT NOT NULL UNIQUE,\n    Email TEXT NOT NULL UNIQUE\n);\n\nINSERT INTO Customer (Name, Company, City, Phone) \nVALUES ('John Doe', 'ABC Corp', 'New York', '123-456-7890');\n\nINSERT INTO Customer (Name, Company, City, Phone) \nVALUES ('Jane Smith', 'XYZ Inc', 'Los Angeles', '098-765-4321');\n\nINSERT INTO Company (Name, Industry, Location, EmployeeCount)\nVALUES ('ABC Corp', 'cutting-edge technology', 'New York', 100);\n\nINSERT INTO User (Username, Email)\nVALUES ('johndoe123', 'johndoe123@example.com');\n\"\"\"\n\nfor sql in init_sqls.split(\";\"):\n    c.execute(sql)\n\nsql_connect.commit()\n\n# Connect to the SQLite database\nvn_milvus.connect_to_sqlite(sqlite_path)\n","# If there exists training data, we should remove it before training.\nexisting_training_data = vn_milvus.get_training_data()\nif len(existing_training_data) > 0:\n    for _, training_data in existing_training_data.iterrows():\n        vn_milvus.remove_training_data(training_data[\"id\"])\n\n# Get the DDL of the SQLite database\ndf_ddl = vn_milvus.run_sql(\"SELECT type, sql FROM sqlite_master WHERE sql is not null\")\n\n# Train the model on the DDL data\nfor ddl in df_ddl[\"sql\"].to_list():\n    vn_milvus.train(ddl=ddl)\n","# Add documentation about your business terminology or definitions.\nvn_milvus.train(\n    documentation=\"ABC Corp specializes in cutting-edge technology solutions and innovation.\"\n)\nvn_milvus.train(\n    documentation=\"XYZ Inc is a global leader in manufacturing and supply chain management.\"\n)\n\n# You can also add SQL queries to your training data.\nvn_milvus.train(sql=\"SELECT * FROM Customer WHERE Name = 'John Doe'\")\n","training_data = vn_milvus.get_training_data()\ntraining_data\n","sql = vn_milvus.generate_sql(\"what is the phone number of John Doe?\")\nvn_milvus.run_sql(sql)\n","sql = vn_milvus.generate_sql(\"which customer works for a manufacturing corporation?\")\nvn_milvus.run_sql(sql)\n","sql_connect.close()\nmilvus_client.close()\n\nos.remove(sqlite_path)\nif os.path.exists(milvus_uri):\n    os.remove(milvus_uri)\n"],"headingContent":"","anchorList":[{"label":"VannaとMilvusでSQLを書く","href":"Write-SQL-with-Vanna-and-Milvus","type":1,"isActive":false},{"label":"前提条件","href":"Prerequisites","type":2,"isActive":false},{"label":"データの準備","href":"Data-preparation","type":2,"isActive":false},{"label":"データで訓練する","href":"Train-with-data","type":2,"isActive":false},{"label":"SQLの生成と実行","href":"Generate-SQLs-and-execute-them","type":2,"isActive":false}]}
      \ No newline at end of file
      +{"codeList":["$ pip install \"vanna[milvus,openai]\"\n","import os\n\nos.environ[\"OPENAI_API_KEY\"] = \"sk-***********\"\n","from pymilvus import MilvusClient, model\nfrom vanna.milvus import Milvus_VectorStore\nfrom vanna.openai import OpenAI_Chat\n\n\nclass VannaMilvus(Milvus_VectorStore, OpenAI_Chat):\n    def __init__(self, config=None):\n        Milvus_VectorStore.__init__(self, config=config)\n        OpenAI_Chat.__init__(self, config=config)\n","milvus_uri = \"./milvus_vanna.db\"\n\nmilvus_client = MilvusClient(uri=milvus_uri)\n\nvn_milvus = VannaMilvus(\n    config={\n        \"api_key\": os.getenv(\"OPENAI_API_KEY\"),\n        \"model\": \"gpt-3.5-turbo\",\n        \"milvus_client\": milvus_client,\n        \"embedding_function\": model.DefaultEmbeddingFunction(),\n        \"n_results\": 2,  # The number of results to return from Milvus semantic search.\n    }\n)\n","import sqlite3\n\nsqlite_path = \"./my-database.sqlite\"\nsql_connect = sqlite3.connect(sqlite_path)\nc = sql_connect.cursor()\n\ninit_sqls = \"\"\"\nCREATE TABLE IF NOT EXISTS Customer (\n    ID INTEGER PRIMARY KEY AUTOINCREMENT,\n    Name TEXT NOT NULL,\n    Company TEXT NOT NULL,\n    City TEXT NOT NULL,\n    Phone TEXT NOT NULL\n);\n\nCREATE TABLE IF NOT EXISTS Company (\n    ID INTEGER PRIMARY KEY AUTOINCREMENT,\n    Name TEXT NOT NULL,\n    Industry TEXT NOT NULL,\n    Location TEXT NOT NULL,\n    EmployeeCount INTEGER NOT NULL\n);\n\nCREATE TABLE IF NOT EXISTS User (\n    ID INTEGER PRIMARY KEY AUTOINCREMENT,\n    Username TEXT NOT NULL UNIQUE,\n    Email TEXT NOT NULL UNIQUE\n);\n\nINSERT INTO Customer (Name, Company, City, Phone) \nVALUES ('John Doe', 'ABC Corp', 'New York', '123-456-7890');\n\nINSERT INTO Customer (Name, Company, City, Phone) \nVALUES ('Jane Smith', 'XYZ Inc', 'Los Angeles', '098-765-4321');\n\nINSERT INTO Company (Name, Industry, Location, EmployeeCount)\nVALUES ('ABC Corp', 'cutting-edge technology', 'New York', 100);\n\nINSERT INTO User (Username, Email)\nVALUES ('johndoe123', 'johndoe123@example.com');\n\"\"\"\n\nfor sql in init_sqls.split(\";\"):\n    c.execute(sql)\n\nsql_connect.commit()\n\n# Connect to the SQLite database\nvn_milvus.connect_to_sqlite(sqlite_path)\n","# If there exists training data, we should remove it before training.\nexisting_training_data = vn_milvus.get_training_data()\nif len(existing_training_data) > 0:\n    for _, training_data in existing_training_data.iterrows():\n        vn_milvus.remove_training_data(training_data[\"id\"])\n\n# Get the DDL of the SQLite database\ndf_ddl = vn_milvus.run_sql(\"SELECT type, sql FROM sqlite_master WHERE sql is not null\")\n\n# Train the model on the DDL data\nfor ddl in df_ddl[\"sql\"].to_list():\n    vn_milvus.train(ddl=ddl)\n","# Add documentation about your business terminology or definitions.\nvn_milvus.train(\n    documentation=\"ABC Corp specializes in cutting-edge technology solutions and innovation.\"\n)\nvn_milvus.train(\n    documentation=\"XYZ Inc is a global leader in manufacturing and supply chain management.\"\n)\n\n# You can also add SQL queries to your training data.\nvn_milvus.train(sql=\"SELECT * FROM Customer WHERE Name = 'John Doe'\")\n","training_data = vn_milvus.get_training_data()\ntraining_data\n","sql = vn_milvus.generate_sql(\"what is the phone number of John Doe?\")\nvn_milvus.run_sql(sql)\n","sql = vn_milvus.generate_sql(\"which customer works for a manufacturing corporation?\")\nvn_milvus.run_sql(sql)\n","sql_connect.close()\nmilvus_client.close()\n\nos.remove(sqlite_path)\nif os.path.exists(milvus_uri):\n    os.remove(milvus_uri)\n"],"headingContent":"Write SQL with Vanna and Milvus","anchorList":[{"label":"VannaとmilvusでSQLを書く","href":"Write-SQL-with-Vanna-and-Milvus","type":1,"isActive":false},{"label":"前提条件","href":"Prerequisites","type":2,"isActive":false},{"label":"データの準備","href":"Data-preparation","type":2,"isActive":false},{"label":"データで訓練する","href":"Train-with-data","type":2,"isActive":false},{"label":"SQLの生成と実行","href":"Generate-SQLs-and-execute-them","type":2,"isActive":false}]}
      \ No newline at end of file
      diff --git a/localization/v2.4.x/site/ja/integrations/integrate_with_vanna.md b/localization/v2.4.x/site/ja/integrations/integrate_with_vanna.md
      index 7198577f0..8e6d6f5bf 100644
      --- a/localization/v2.4.x/site/ja/integrations/integrate_with_vanna.md
      +++ b/localization/v2.4.x/site/ja/integrations/integrate_with_vanna.md
      @@ -1,10 +1,9 @@
       ---
       id: integrate_with_vanna.md
       summary: このガイドでは、Vannaを使用して、データベースに格納されたデータに基づいてSQLクエリを生成し、実行する方法を示します。
      -title: バンナとMilvusとSQLを書く
      +title: VannaとmilvusでSQLを書く
       ---
      -
      -

      VannaとMilvusでSQLを書く

      VannaはSQL生成と関連機能のためのオープンソースのPython RAG (Retrieval-Augmented Generation)フレームワークです。Milvusは世界で最も先進的なオープンソースのベクトル・データベースで、埋め込み類似検索やAIアプリケーションのために構築されています。

      +

      Open In Colab +GitHub Repository

      +

      VannaはSQL生成と関連機能のためのオープンソースのPython RAG (Retrieval-Augmented Generation)フレームワークです。Milvusは世界で最も先進的なオープンソースのベクターデータベースで、埋め込み類似検索やAIアプリケーションのために構築されています。

      Vannaは2つの簡単なステップで動作します - あなたのデータ上でRAG「モデル」を訓練し、次にあなたのデータベース上で実行するように設定することができるSQLクエリを返す質問をします。このガイドでは、Vannaを使用して、データベースに保存されたデータに基づいてSQLクエリを生成し、実行する方法を示します。

      前提条件

      -

      データの準備

      -

      VannaMilvus クラスを必要な設定パラメータで初期化する。エンベッディングの格納にはmilvus_client のインスタンスを使い、エンベッディングの生成にはmilvus_modelから初期化したmodel.DefaultEmbeddingFunction() を使います。

      +class VannaMilvus(Milvus_VectorStore, OpenAI_Chat): + def __init__(self, config=None): + Milvus_VectorStore.__init__(self, config=config) + OpenAI_Chat.__init__(self, config=config) +
      +

      VannaMilvus クラスを必要な設定パラメータで初期化する。エンベッディングの保存にはmilvus_client のインスタンスを使い、エンベッディングの生成にはmilvus_modelから初期化したmodel.DefaultEmbeddingFunction() を使います。

      MilvusClient の引数については、次のようにします:

        -
      • uri をローカルファイル、例えば./milvus.db に設定するのが最も便利な方法です。
      • -
      • データ規模が大きい場合は、dockerやkubernetes上に、よりパフォーマンスの高いMilvusサーバを構築することができます。このセットアップでは、http://localhost:19530 などのサーバ uri をuri として使用してください。
      • +
      • uri をローカルファイル、例えば./milvus.db に設定するのが最も便利である。
      • +
      • データ規模が大きい場合は、dockerやkubernetes上に、よりパフォーマンスの高いMilvusサーバを構築することができます。このセットアップでは、サーバの uri、例えばhttp://localhost:19530uri として使用してください。
      • MilvusのフルマネージドクラウドサービスであるZilliz Cloudを利用する場合は、Zilliz CloudのPublic EndpointとApi keyに対応するuritoken を調整してください。
      @@ -88,16 +88,15 @@ OpenAI*Chat.**init**(self, config=c milvus_client = MilvusClient(uri=milvus_uri) vn_milvus = VannaMilvus( -config={ -"api_key": os.getenv("OPENAI_API_KEY"), -"model": "gpt-3.5-turbo", -"milvus_client": milvus_client, -"embedding_function": model.DefaultEmbeddingFunction(), -"n_results": 2, # The number of results to return from Milvus semantic search. -} + config={ + "api_key": os.getenv("OPENAI_API_KEY"), + "model": "gpt-3.5-turbo", + "milvus_client": milvus_client, + "embedding_function": model.DefaultEmbeddingFunction(), + "n_results": 2, # The number of results to return from Milvus semantic search. + } )
      -

      これは数サンプルのデータしかない単純な例なので、n_results を 2 に設定し、最も類似した上位2つの結果を検索するようにしています。実際には、より大きなトレーニングデータセットを扱う場合は、n_results をより大きな値に設定する必要があります。

      いくつかのサンプルデータを含むいくつかのテーブルを持つサンプルSQLiteデータベースを使用します。

      import sqlite3
      @@ -108,31 +107,31 @@ c = sql_connect.cursor()
       
       init_sqls = """
       CREATE TABLE IF NOT EXISTS Customer (
      -ID INTEGER PRIMARY KEY AUTOINCREMENT,
      -Name TEXT NOT NULL,
      -Company TEXT NOT NULL,
      -City TEXT NOT NULL,
      -Phone TEXT NOT NULL
      +    ID INTEGER PRIMARY KEY AUTOINCREMENT,
      +    Name TEXT NOT NULL,
      +    Company TEXT NOT NULL,
      +    City TEXT NOT NULL,
      +    Phone TEXT NOT NULL
       );
       
       CREATE TABLE IF NOT EXISTS Company (
      -ID INTEGER PRIMARY KEY AUTOINCREMENT,
      -Name TEXT NOT NULL,
      -Industry TEXT NOT NULL,
      -Location TEXT NOT NULL,
      -EmployeeCount INTEGER NOT NULL
      +    ID INTEGER PRIMARY KEY AUTOINCREMENT,
      +    Name TEXT NOT NULL,
      +    Industry TEXT NOT NULL,
      +    Location TEXT NOT NULL,
      +    EmployeeCount INTEGER NOT NULL
       );
       
       CREATE TABLE IF NOT EXISTS User (
      -ID INTEGER PRIMARY KEY AUTOINCREMENT,
      -Username TEXT NOT NULL UNIQUE,
      -Email TEXT NOT NULL UNIQUE
      +    ID INTEGER PRIMARY KEY AUTOINCREMENT,
      +    Username TEXT NOT NULL UNIQUE,
      +    Email TEXT NOT NULL UNIQUE
       );
       
      -INSERT INTO Customer (Name, Company, City, Phone)
      +INSERT INTO Customer (Name, Company, City, Phone) 
       VALUES ('John Doe', 'ABC Corp', 'New York', '123-456-7890');
       
      -INSERT INTO Customer (Name, Company, City, Phone)
      +INSERT INTO Customer (Name, Company, City, Phone) 
       VALUES ('Jane Smith', 'XYZ Inc', 'Los Angeles', '098-765-4321');
       
       INSERT INTO Company (Name, Industry, Location, EmployeeCount)
      @@ -143,14 +142,13 @@ VALUES ('johndoe123', 'johndoe123@example.com');
       """
       
       for sql in init_sqls.split(";"):
      -c.execute(sql)
      +    c.execute(sql)
       
       sql_connect.commit()
       
       # Connect to the SQLite database
       vn_milvus.connect_to_sqlite(sqlite_path)
       
      -

      データで訓練する

      -
      Adding ddl: CREATE TABLE Customer (
           ID INTEGER PRIMARY KEY AUTOINCREMENT,
           Name TEXT NOT NULL,
      @@ -212,9 +209,8 @@ vn_milvus.train(
       )
       
       # You can also add SQL queries to your training data.
      -vn_milvus.train(sql="SELECT \* FROM Customer WHERE Name = 'John Doe'")
      +vn_milvus.train(sql="SELECT * FROM Customer WHERE Name = 'John Doe'")
       
      -
      Adding documentation....
       Adding documentation....
       Using model gpt-3.5-turbo for 65.0 tokens (approx)
      @@ -367,5 +363,5 @@ milvus_client.close()
       
       os.remove(sqlite_path)
       if os.path.exists(milvus_uri):
      -os.remove(milvus_uri)
      +    os.remove(milvus_uri)
       
      diff --git a/localization/v2.4.x/site/ja/integrations/integrate_with_voyageai.json b/localization/v2.4.x/site/ja/integrations/integrate_with_voyageai.json index 1add93052..22746dcc8 100644 --- a/localization/v2.4.x/site/ja/integrations/integrate_with_voyageai.json +++ b/localization/v2.4.x/site/ja/integrations/integrate_with_voyageai.json @@ -1 +1 @@ -{"codeList":["$ pip install --upgrade voyageai pymilvus\n","import voyageai\nfrom pymilvus import MilvusClient\n\nMODEL_NAME = \"voyage-law-2\" # Which model to use, please check https://docs.voyageai.com/docs/embeddings for available models\nDIMENSION = 1024 # Dimension of vector embedding\n\n# Connect to VoyageAI with API Key.\nvoyage_client = voyageai.Client(api_key=\"\")\n\ndocs = [\n \"Artificial intelligence was founded as an academic discipline in 1956.\",\n \"Alan Turing was the first person to conduct substantial research in AI.\",\n \"Born in Maida Vale, London, Turing was raised in southern England.\",\n]\n\nvectors = voyage_client.embed(texts=docs, model=MODEL_NAME, truncation=False).embeddings\n\n# Prepare data to be stored in Milvus vector database.\n# We can store the id, vector representation, raw text and labels such as \"subject\" in this case in Milvus.\ndata = [\n {\"id\": i, \"vector\": vectors[i], \"text\": docs[i], \"subject\": \"history\"}\n for i in range(len(docs))\n]\n\n\n# Connect to Milvus, all data is stored in a local file named \"milvus_voyage_demo.db\"\n# in current directory. You can also connect to a remote Milvus server following this\n# instruction: https://milvus.io/docs/install_standalone-docker.md.\nmilvus_client = MilvusClient(uri=\"milvus_voyage_demo.db\")\nCOLLECTION_NAME = \"demo_collection\" # Milvus collection name\n# Create a collection to store the vectors and text.\nif milvus_client.has_collection(collection_name=COLLECTION_NAME):\n milvus_client.drop_collection(collection_name=COLLECTION_NAME)\nmilvus_client.create_collection(collection_name=COLLECTION_NAME, dimension=DIMENSION)\n\n# Insert all data into Milvus vector database.\nres = milvus_client.insert(collection_name=\"demo_collection\", data=data)\n\nprint(res[\"insert_count\"])\n","queries = [\"When was artificial intelligence founded?\"]\n\nquery_vectors = voyage_client.embed(\n texts=queries, model=MODEL_NAME, truncation=False\n).embeddings\n\nres = milvus_client.search(\n collection_name=COLLECTION_NAME, # target collection\n data=query_vectors, # query vectors\n limit=2, # number of returned entities\n output_fields=[\"text\", \"subject\"], # specifies fields to be returned\n)\n\nfor q in queries:\n print(\"Query:\", q)\n for result in res:\n print(result)\n print(\"\\n\")\n"],"headingContent":"","anchorList":[{"label":"MilvusとVoyageAIによるセマンティック検索","href":"Semantic-Search-with-Milvus-and-VoyageAI","type":1,"isActive":false},{"label":"はじめに","href":"Getting-started","type":2,"isActive":false},{"label":"VoyageAI & Milvusによる書籍タイトルの検索","href":"Searching-book-titles-with-VoyageAI--Milvus","type":2,"isActive":false}]} \ No newline at end of file +{"codeList":["$ pip install --upgrade voyageai pymilvus\n","import voyageai\nfrom pymilvus import MilvusClient\n\nMODEL_NAME = \"voyage-law-2\" # Which model to use, please check https://docs.voyageai.com/docs/embeddings for available models\nDIMENSION = 1024 # Dimension of vector embedding\n\n# Connect to VoyageAI with API Key.\nvoyage_client = voyageai.Client(api_key=\"\")\n\ndocs = [\n \"Artificial intelligence was founded as an academic discipline in 1956.\",\n \"Alan Turing was the first person to conduct substantial research in AI.\",\n \"Born in Maida Vale, London, Turing was raised in southern England.\",\n]\n\nvectors = voyage_client.embed(texts=docs, model=MODEL_NAME, truncation=False).embeddings\n\n# Prepare data to be stored in Milvus vector database.\n# We can store the id, vector representation, raw text and labels such as \"subject\" in this case in Milvus.\ndata = [\n {\"id\": i, \"vector\": vectors[i], \"text\": docs[i], \"subject\": \"history\"}\n for i in range(len(docs))\n]\n\n\n# Connect to Milvus, all data is stored in a local file named \"milvus_voyage_demo.db\"\n# in current directory. You can also connect to a remote Milvus server following this\n# instruction: https://milvus.io/docs/install_standalone-docker.md.\nmilvus_client = MilvusClient(uri=\"milvus_voyage_demo.db\")\nCOLLECTION_NAME = \"demo_collection\" # Milvus collection name\n# Create a collection to store the vectors and text.\nif milvus_client.has_collection(collection_name=COLLECTION_NAME):\n milvus_client.drop_collection(collection_name=COLLECTION_NAME)\nmilvus_client.create_collection(collection_name=COLLECTION_NAME, dimension=DIMENSION)\n\n# Insert all data into Milvus vector database.\nres = milvus_client.insert(collection_name=\"demo_collection\", data=data)\n\nprint(res[\"insert_count\"])\n","queries = [\"When was artificial intelligence founded?\"]\n\nquery_vectors = voyage_client.embed(\n texts=queries, model=MODEL_NAME, truncation=False\n).embeddings\n\nres = milvus_client.search(\n collection_name=COLLECTION_NAME, # target collection\n data=query_vectors, # query vectors\n limit=2, # number of returned entities\n output_fields=[\"text\", \"subject\"], # specifies fields to be returned\n)\n\nfor q in queries:\n print(\"Query:\", q)\n for result in res:\n print(result)\n print(\"\\n\")\n"],"headingContent":"Semantic Search with Milvus and VoyageAI","anchorList":[{"label":"MilvusとVoyageAIによるセマンティック検索","href":"Semantic-Search-with-Milvus-and-VoyageAI","type":1,"isActive":false},{"label":"はじめに","href":"Getting-started","type":2,"isActive":false},{"label":"VoyageAI & Milvusによる書籍タイトルの検索","href":"Searching-book-titles-with-VoyageAI--Milvus","type":2,"isActive":false}]} \ No newline at end of file diff --git a/localization/v2.4.x/site/ja/integrations/integrate_with_voyageai.md b/localization/v2.4.x/site/ja/integrations/integrate_with_voyageai.md index e732a99bc..ae0019198 100644 --- a/localization/v2.4.x/site/ja/integrations/integrate_with_voyageai.md +++ b/localization/v2.4.x/site/ja/integrations/integrate_with_voyageai.md @@ -18,8 +18,9 @@ summary: このページでは、VoyageAIのエンベッディングAPIを使っ d="M4 9h1v1H4c-1.5 0-3-1.69-3-3.5S2.55 3 4 3h4c1.45 0 3 1.69 3 3.5 0 1.41-.91 2.72-2 3.25V8.59c.58-.45 1-1.27 1-2.09C10 5.22 8.98 4 8 4H4c-.98 0-2 1.22-2 2.5S3 9 4 9zm9-3h-1v1h1c1 0 2 1.22 2 2.5S13.98 12 13 12H9c-.98 0-2-1.22-2-2.5 0-.83.42-1.64 1-2.09V6.25c-1.09.53-2 1.84-2 3.25C6 11.31 7.55 13 9 13h4c1.45 0 3-1.69 3-3.5S14.5 6 13 6z" > -

      Open In Colab

      -

      このガイドでは、VoyageAIのEmbedding APIとMilvusベクトルデータベースを使用して、テキストをセマンティック検索する方法を紹介します。

      +

      Open In Colab +GitHub Repository

      +

      このガイドでは、MilvusベクトルデータベースとVoyageAIのEmbedding APIを使用して、テキストをセマンティック検索する方法を紹介します。

      はじめに

      Open In Colab

      +

      Open In Colab +GitHub Repository

      このガイドではLangChainとMilvusを使ったRAG(Retrieval-Augmented Generation)システムの構築方法を説明します。

      RAGシステムは検索システムと生成モデルを組み合わせ、与えられたプロンプトに基づいて新しいテキストを生成します。システムはまずMilvusを使ってコーパスから関連文書を検索し、次に生成モデルを使って検索された文書に基づいて新しいテキストを生成する。

      LangChainは大規模言語モデル(LLM)を利用したアプリケーション開発のためのフレームワークである。Milvusは世界で最も先進的なオープンソースのベクトルデータベースであり、埋め込み類似検索やAIアプリケーションのために構築されています。

      @@ -96,7 +97,7 @@ docs[1]
      Document(page_content='Fig. 1. Overview of a LLM-powered autonomous agent system.\nComponent One: Planning#\nA complicated task usually involves many steps. An agent needs to know what they are and plan ahead.\nTask Decomposition#\nChain of thought (CoT; Wei et al. 2022) has become a standard prompting technique for enhancing model performance on complex tasks. The model is instructed to “think step by step” to utilize more test-time computation to decompose hard tasks into smaller and simpler steps. CoT transforms big tasks into multiple manageable tasks and shed lights into an interpretation of the model’s thinking process.\nTree of Thoughts (Yao et al. 2023) extends CoT by exploring multiple reasoning possibilities at each step. It first decomposes the problem into multiple thought steps and generates multiple thoughts per step, creating a tree structure. The search process can be BFS (breadth-first search) or DFS (depth-first search) with each state evaluated by a classifier (via a prompt) or majority vote.\nTask decomposition can be done (1) by LLM with simple prompting like "Steps for XYZ.\\n1.", "What are the subgoals for achieving XYZ?", (2) by using task-specific instructions; e.g. "Write a story outline." for writing a novel, or (3) with human inputs.\nAnother quite distinct approach, LLM+P (Liu et al. 2023), involves relying on an external classical planner to do long-horizon planning. This approach utilizes the Planning Domain Definition Language (PDDL) as an intermediate interface to describe the planning problem. In this process, LLM (1) translates the problem into “Problem PDDL”, then (2) requests a classical planner to generate a PDDL plan based on an existing “Domain PDDL”, and finally (3) translates the PDDL plan back into natural language. Essentially, the planning step is outsourced to an external tool, assuming the availability of domain-specific PDDL and a suitable planner which is common in certain robotic setups but not in many other domains.\nSelf-Reflection#', metadata={'source': 'https://lilianweng.github.io/posts/2023-06-23-agent/'})
       

      ご覧のように、ドキュメントはすでにチャンクに分割されています。そして、データの内容はAIエージェントに関するものである。

      -

      Milvus Vector Storeを使ったRAGチェーンの構築

      @@ -267,7 +268,7 @@ rag_chain2.with_config(
    "Self-reflection of an AI agent involves the process of synthesizing memories into higher-level inferences over time to guide the agent's future behavior. It serves as a mechanism to create higher-level summaries of past events. One approach to self-reflection involves prompting the language model with the 100 most recent observations and asking it to generate the 3 most salient high-level questions based on those observations. This process helps the AI agent optimize believability in the current moment and over time."
     
    -

    検索条件を変更し、2つ目のソースでドキュメントをフィルタリングすると、このブログソースのコンテンツはクエリの質問とは何の関係もないため、関連する情報のない回答が得られます。

    +

    検索条件を変更し、2つ目のソースによってドキュメントをフィルタリングすると、このブログソースのコンテンツはクエリの質問とは無関係であるため、関連する情報のない回答が得られます。

    rag_chain2.with_config(
         configurable={
             "retriever_search_kwargs": dict(
    @@ -279,4 +280,4 @@ rag_chain2.with_config(
     
    "I'm sorry, but based on the provided context, there is no specific information or statistical data available regarding the self-reflection of an AI agent."
     

    -

    このチュートリアルでは、Milvus LangChain統合の基本的な使用方法とシンプルなRAGアプローチに焦点を当てています。より高度なRAGテクニックについては、advanced rag bootcampを参照してください。

    +

    このチュートリアルでは、Milvus LangChain統合の基本的な使い方とシンプルなRAGアプローチに焦点を当てています。より高度なRAGテクニックについては、advanced rag bootcampを参照してください。

    diff --git a/localization/v2.4.x/site/ja/menuStructure/ja.json b/localization/v2.4.x/site/ja/menuStructure/ja.json index 6f3136b8b..1c0ad3261 100644 --- a/localization/v2.4.x/site/ja/menuStructure/ja.json +++ b/localization/v2.4.x/site/ja/menuStructure/ja.json @@ -311,7 +311,7 @@ "children": [] }, { - "label": "インメモリーレプリカ", + "label": "インメモリ・レプリカ", "id": "replica.md", "order": 11, "children": [] @@ -569,7 +569,7 @@ "children": [] }, { - "label": "インストラクター", + "label": "講師", "id": "embed-with-instructor.md", "order": 9, "children": [] @@ -641,7 +641,7 @@ ] }, { - "label": "Milvus移住", + "label": "Milvusマイグレーション", "id": "milvus_migration", "isMenu": true, "order": 5, @@ -1585,7 +1585,7 @@ "children": [] }, { - "label": "推薦システム", + "label": "レコメンダー・システム", "id": "recommendation_system.md", "order": 6, "children": [] diff --git a/localization/v2.4.x/site/ja/reference/architecture/architecture_overview.json b/localization/v2.4.x/site/ja/reference/architecture/architecture_overview.json index 3881744e4..bf1c1ac37 100644 --- a/localization/v2.4.x/site/ja/reference/architecture/architecture_overview.json +++ b/localization/v2.4.x/site/ja/reference/architecture/architecture_overview.json @@ -1 +1 @@ -{"codeList":[],"headingContent":"","anchorList":[{"label":"Milvusアーキテクチャの概要","href":"Milvus-Architecture-Overview","type":1,"isActive":false},{"label":"次のページ","href":"Whats-next","type":2,"isActive":false}]} \ No newline at end of file +{"codeList":[],"headingContent":"Milvus Architecture Overview","anchorList":[{"label":"Milvusアーキテクチャの概要","href":"Milvus-Architecture-Overview","type":1,"isActive":false},{"label":"次のページ","href":"Whats-next","type":2,"isActive":false}]} \ No newline at end of file diff --git a/localization/v2.4.x/site/ja/reference/architecture/architecture_overview.md b/localization/v2.4.x/site/ja/reference/architecture/architecture_overview.md index bf96a219c..160ece79e 100644 --- a/localization/v2.4.x/site/ja/reference/architecture/architecture_overview.md +++ b/localization/v2.4.x/site/ja/reference/architecture/architecture_overview.md @@ -1,9 +1,8 @@ --- id: architecture_overview.md summary: Milvusは、類似検索と人工知能のために特別に構築された、高速で信頼性が高く、安定したベクトルデータベースを提供します。 -title: Milvus・アーキテクチャの概要 +title: Milvusアーキテクチャの概要 --- -

    Milvusアーキテクチャの概要

    Faiss、HNSW、DiskANN、SCANNなどの一般的なベクトル検索ライブラリの上に構築されたMilvusは、数百万、数十億、あるいは数兆のベクトルを含む高密度ベクトルデータセットの類似検索のために設計されました。先に進む前に、埋め込み検索の基本原理をよく理解してください。

    +

    Milvusは、Faiss、HNSW、DiskANN、SCANNなどの一般的なベクトル検索ライブラリの上に構築されており、数百万、数十億、あるいは数兆のベクトルを含む高密度ベクトルデータセットの類似検索のために設計されています。先に進む前に、埋め込み検索の基本原理をよく理解してください。

    Milvusはまた、データのシャーディング、ストリーミングデータの取り込み、動的スキーマ、ベクトルとスカラーデータを組み合わせた検索、マルチベクトルとハイブリッド検索、スパースベクトル、その他多くの高度な機能をサポートしています。このプラットフォームはオンデマンドでパフォーマンスを提供し、あらゆる埋め込み検索シナリオに合わせて最適化することができます。最適な可用性と弾力性のために、Kubernetesを使用してMilvusをデプロイすることをお勧めします。

    Milvusは、ストレージとコンピューティングの分離とコンピューティングノードの水平スケーラビリティを特徴とする共有ストレージアーキテクチャを採用しています。データプレーンとコントロールプレーンの分離という原則に従い、Milvusはアクセスレイヤー、コーディネータサービス、ワーカーノード、ストレージという4つのレイヤーで構成されている。これらのレイヤーは、スケーリングやディザスタリカバリに関しては相互に独立している。

    Architecture_diagram アーキテクチャ図

    +

    図によると、インターフェースは以下のカテゴリーに分類できる:

    +
      +
    • DDL / DCL:createCollection / createPartition / dropCollection / dropPartition / hasCollection / hasPartition
    • +
    • DML / プロデュース:insert / delete / upsert
    • +
    • DQL:検索 / クエリー
    • +

    次のページ

    diff --git a/localization/v2.4.x/site/ja/reference/disk_index.json b/localization/v2.4.x/site/ja/reference/disk_index.json index 79db8adfa..0f0395b02 100644 --- a/localization/v2.4.x/site/ja/reference/disk_index.json +++ b/localization/v2.4.x/site/ja/reference/disk_index.json @@ -1 +1 @@ -{"codeList":["...\nDiskIndex:\n MaxDegree: 56\n SearchListSize: 100\n PQCodeBugetGBRatio: 0.125\n SearchCacheBudgetGBRatio: 0.125\n BeamWidthRatio: 4.0\n...\n"],"headingContent":"","anchorList":[{"label":"ディスク上のインデックス","href":"On-disk-Index","type":1,"isActive":false},{"label":"前提条件","href":"Prerequisites","type":2,"isActive":false},{"label":"制限","href":"Limits","type":2,"isActive":false},{"label":"インデックスと検索の設定","href":"Index-and-search-settings","type":2,"isActive":false},{"label":"DiskANN関連のMilvus設定","href":"DiskANN-related-Milvus-configurations","type":2,"isActive":false},{"label":"トラブルシューティング","href":"Troubleshooting","type":2,"isActive":false}]} \ No newline at end of file +{"codeList":["...\nDiskIndex:\n MaxDegree: 56\n SearchListSize: 100\n PQCodeBugetGBRatio: 0.125\n SearchCacheBudgetGBRatio: 0.125\n BeamWidthRatio: 4.0\n...\n"],"headingContent":"On-disk Index","anchorList":[{"label":"ディスク上のインデックス","href":"On-disk-Index","type":1,"isActive":false},{"label":"前提条件","href":"Prerequisites","type":2,"isActive":false},{"label":"制限","href":"Limits","type":2,"isActive":false},{"label":"インデックスと検索の設定","href":"Index-and-search-settings","type":2,"isActive":false},{"label":"DiskANN関連のMilvus設定","href":"DiskANN-related-Milvus-configurations","type":2,"isActive":false},{"label":"トラブルシューティング","href":"Troubleshooting","type":2,"isActive":false}]} \ No newline at end of file diff --git a/localization/v2.4.x/site/ja/reference/disk_index.md b/localization/v2.4.x/site/ja/reference/disk_index.md index 13f3a1b7d..4b4794b69 100644 --- a/localization/v2.4.x/site/ja/reference/disk_index.md +++ b/localization/v2.4.x/site/ja/reference/disk_index.md @@ -22,7 +22,7 @@ title: ディスク上のインデックス

    この記事では、DiskANN と名付けられたディスク上のインデックス作成アルゴリズムを紹介する。Vamana グラフに基づき、DiskANN は大規模データセット内の効率的な検索を可能にします。

    クエリー性能を向上させるため、各ベクトルフィールドにインデックスタイプを指定することができます。

    -現在、ベクトルフィールドは1つのインデックスタイプしかサポートしていません。インデックスタイプを切り替えると、Milvusは古いインデックスを自動的に削除します。
    +現在、ベクトルフィールドは1つのインデックスタイプしかサポートしていません。Milvusはインデックスタイプを切り替えると、古いインデックスを自動的に削除します。

    前提条件

    DiskANNを使用するには、以下の点に注意してください。

      -
    • DiskANNはデフォルトで有効になっています。オンディスクインデックスよりもインメモリインデックスを好む場合は、より良いパフォーマンスのためにこの機能を無効にすることをお勧めします。
        -
      • 無効にするには、milvus設定ファイルのqueryNode.enableDiskfalse に変更します。
      • +
      • DiskANNはデフォルトで有効になっています。オンディスクインデックスよりもインメモリインデックスを使用したい場合は、パフォーマンスを向上させるためにこの機能を無効にすることをお勧めします。
          +
        • 無効にするには、milvus設定ファイルのqueryNode.enableDiskfalse に変更してください。
        • 再び有効にするには、queryNode.enableDisktrue に設定します。
      • -
      • MilvusインスタンスはUbuntu 18.04.6またはそれ以降のリリースで動作します。
      • +
      • milvusインスタンスはUbuntu 18.04.6またはそれ以降のリリースで動作します。
      • MilvusデータパスはNVMe SSDにマウントしてください:
        • Milvusスタンドアロンインスタンスの場合、データパスはインスタンスが動作するコンテナ内の/var/lib/milvus/dataにする。
        • Milvusクラスタインスタンスの場合、データパスはQueryNodesおよびIndexNodesが実行されるコンテナ内の/var/lib/milvus/dataである必要があります。
        • @@ -68,7 +68,7 @@ title: ディスク上のインデックス

          DiskANNを使用するには、以下を確認してください。

          • データに少なくとも 1 次元の浮動小数点ベクトルだけを使用する。
          • -
          • ベクトル間の距離を測定するためにユークリッド距離 (L2) または内積 (IP) のみを使用する。
          • +
          • ベクトル間の距離の測定にはユークリッド距離 (L2)、内積 (IP)、または COSINE のみを使用する。

          インデックスと検索の設定

          このトピックでは、パフォーマンスと可用性を向上させるためにワーキングメモリ内で複数のセグメント複製を可能にするMilvusのインメモリレプリカ(複製)メカニズムについて紹介します。

          +

          このトピックでは、Milvusのインメモリレプリカ(レプリケーション)メカニズムについて紹介します。このメカニズムでは、パフォーマンスと可用性を向上させるためにワーキングメモリ内で複数のセグメントレプリケーションを可能にします。

          インメモリ・レプリカの設定方法については、クエリ・ノード関連の設定を参照してください。

          概要

          インメモリーレプリカはレプリカグループとして構成される。各レプリカグループにはシャードレプリカが含まれる。各シャードレプリカは、シャード内の成長セグメントと封印セグメント(つまりDMLチャネル)に対応するストリーミングレプリカとヒストリカルレプリカを持つ。

          - An illustration of how in-memory replica works + An illustration of how in-memory replica works インメモリ・レプリカの動作の例

          レプリカグループ

          レプリカグループは、履歴データとレプリカの処理を担当する複数のクエリノードで構成されます。

          シャード・レプリカ

          シャードレプリカは、同じシャードに属するストリーミングレプリカとヒストリカルレプリカから構成されます。レプリカグループ内のシャードレプリカの数は、指定されたコレクション内のシャードの数によって決まります。

          @@ -83,9 +83,9 @@ title: インメモリ・レプリカ >

          バランス

          ロードが必要な新しいセグメントは、複数の異なるクエリノードに割り当てられる。少なくとも1つのレプリカが正常にロードされれば、検索リクエストを処理できる。

          -

          キャッシュ

          プロキシはセグメントをクエリノードにマップするキャッシュを維持し、定期的に更新します。プロキシがリクエストを受け取ると、Milvus は検索が必要なすべてのセグメントをキャッシュから取得し、クエリノードに均等に割り当てようとします。

          +

          キャッシュ

          プロキシはセグメントをクエリノードにマップするキャッシュを維持し、定期的に更新します。プロキシがリクエストを受け取ると、milvus は検索が必要なすべてのセグメントをキャッシュから取得し、それらをクエリノードに均等に割り当てようとする。

          成長しているセグメントについては、プロキシはチャネルからクエリノードへのキャッシュも保持し、対応するクエリノードにリクエストを送信する。

          フェイルオーバー

          プロキシのキャッシュは常に最新というわけではありません。リクエストが来たときに、いくつかのセグメントやチャンネルが 他のクエリノードに移動しているかもしれません。この場合、プロキシはエラー応答を受け取り、キャッシュを更新し、別のクエリノードに割り当てようとする。

          プロキシがキャッシュを更新した後もそれを見つけられない場合、 セグメントは無視される。これは、セグメントがコンパク ト化されている場合に起こりうる。

          -

          キャッシュが正確でない場合、プロキシはセグメントを見逃すことがある。DML チャンネルを持つクエリノード (成長しているセグメント) は、プロキシがキャッシュを比較して更新できる、信頼できるセグメントのリストとともに検索応答を返します。

          +

          キャッシュが正確でない場合、プロキシはセグメントを見逃すことがある。DML チャンネルを持つクエリノード (成長するセグメント) は、プロキシが比較しキャッシュを更新できる、信頼できるセグメントのリストとともに検索応答を返します。

          機能拡張

          プロキシは検索リクエストをクエリノードに完全に均等に割り当てることはできません。リソースのロングテール分布を避けるために、プロキシは他のクエリノード上のアクティブなセグメントを、これらのセグメントも持っているアイドルクエリノードに割り当てます。

          diff --git a/localization/v2.4.x/site/ja/release_notes.json b/localization/v2.4.x/site/ja/release_notes.json index 562d7eaf7..655592892 100644 --- a/localization/v2.4.x/site/ja/release_notes.json +++ b/localization/v2.4.x/site/ja/release_notes.json @@ -1 +1 @@ -{"codeList":[],"headingContent":"Release Notes","anchorList":[{"label":"リリースノート","href":"Release-Notes","type":1,"isActive":false},{"label":"v2.4.11","href":"v2411","type":2,"isActive":false},{"label":"v2.4.10","href":"v2410","type":2,"isActive":false},{"label":"v2.4.9","href":"v249","type":2,"isActive":false},{"label":"v2.4.8","href":"v248","type":2,"isActive":false},{"label":"v2.4.6","href":"v246","type":2,"isActive":false},{"label":"v2.4.5","href":"v245","type":2,"isActive":false},{"label":"v2.4.4","href":"v244","type":2,"isActive":false},{"label":"v2.4.3","href":"v243","type":2,"isActive":false},{"label":"v2.4.1","href":"v241","type":2,"isActive":false},{"label":"v2.4.0","href":"v240","type":2,"isActive":false},{"label":"v2.4.0-rc.1","href":"v240-rc1","type":2,"isActive":false}]} \ No newline at end of file +{"codeList":[],"headingContent":"Release Notes","anchorList":[{"label":"リリースノート","href":"Release-Notes","type":1,"isActive":false},{"label":"v2.4.13-ホットフィックス","href":"v2413-hotfix","type":2,"isActive":false},{"label":"[非推奨] v2.4.13","href":"Deprecated-v2413","type":2,"isActive":false},{"label":"v2.4.12","href":"v2412","type":2,"isActive":false},{"label":"v2.4.11","href":"v2411","type":2,"isActive":false},{"label":"v2.4.10","href":"v2410","type":2,"isActive":false},{"label":"v2.4.9","href":"v249","type":2,"isActive":false},{"label":"v2.4.8","href":"v248","type":2,"isActive":false},{"label":"v2.4.6","href":"v246","type":2,"isActive":false},{"label":"v2.4.5","href":"v245","type":2,"isActive":false},{"label":"v2.4.4","href":"v244","type":2,"isActive":false},{"label":"v2.4.3","href":"v243","type":2,"isActive":false},{"label":"v2.4.1","href":"v241","type":2,"isActive":false},{"label":"v2.4.0","href":"v240","type":2,"isActive":false},{"label":"v2.4.0-rc.1","href":"v240-rc1","type":2,"isActive":false}]} \ No newline at end of file diff --git a/localization/v2.4.x/site/ja/release_notes.md b/localization/v2.4.x/site/ja/release_notes.md index f76cf3a83..eb205f395 100644 --- a/localization/v2.4.x/site/ja/release_notes.md +++ b/localization/v2.4.x/site/ja/release_notes.md @@ -19,6 +19,163 @@ title: リリースノート >

          Milvusの新機能をご確認ください!このページでは、各リリースの新機能、改善点、既知の問題、バグ修正についてまとめています。v2.4.0以降の各バージョンのリリースノートはこのセクションにあります。定期的にこのページをご覧いただき、アップデート情報をご確認ください。

          +

          v2.4.13-ホットフィックス

          リリース日2024年10月17日

          + + + + + + + +
          MilvusバージョンPython SDKバージョンJava SDKバージョンNode.js SDKバージョン
          2.4.13-hotfix2.4.82.4.52.4.9
          +

          Milvus v2.4.13-hotfixでは、v2.4.13特有の致命的な問題である、すべてのMetaKVスナップショットがガベージコレクションされている場合にMilvusが再起動後にコレクション情報を取得できない場合がある問題が修正されました(#36933)。現在v2.4.13を実行しているユーザは、できるだけ早い機会にv2.4.13-hotfixにアップグレードすることを推奨します

          +

          重要な修正

            +
          • タイムスタンプがMaxTimestampの場合にオリジナルキーをロードするようにした (#36935)。
          • +
          +

          [非推奨] v2.4.13

          リリース日2024年10月12日

          + + + + + + + +
          milvusバージョンPython SDKバージョンJava SDKバージョンNode.js SDKバージョン
          2.4.132.4.82.4.52.4.9
          +

          Milvus 2.4.13では、ダイナミックレプリカロードが導入され、ユーザーはコレクションをリリースして再ロードすることなく、コレクションレプリカの数を調整できるようになりました。また、このバージョンでは、バルクインポート、式の解析、ロードバランシング、および障害回復に関連するいくつかの重大なバグに対処しています。さらに、MMAPリソースの使用とインポートのパフォーマンスが大幅に改善され、システム全体の効率が向上しました。パフォーマンスと安定性を向上させるために、このリリースへのアップグレードを強くお勧めします。

          +

          機能

            +
          • ロードされたコレクションの動的レプリカ調整(#36417)
          • +
          • 成長するセグメントタイプにおけるスパースベクトルMMAP(#36565)
          • +
          +

          バグ修正

            +
          • フラッシュ性能に関する問題を修正した(#36741)。
          • +
          • "[]"内のJSON式のバグを修正(#36722)
          • +
          • コンパクトなターゲットがインデックスされていない場合、ネイバーを削除(#36694)
          • +
          • Rocksmqのチャネルが一杯の場合のパフォーマンスを改善(#36618)
          • +
          • アンピン時のエラーが遅延されない問題を修正した(#36665)。
          • +
          • セグメントマネージャで、インポートされたセグメントのメモリリークを修正した(#36631)
          • +
          • プロキシ内のクエリノードの不要なヘルスチェックをスキップするようにした(#36553)
          • +
          • 条件式のオーバーフローの問題を修正した(#36534)
          • +
          • タスクの誤割り当てを防ぐため、タスクを割り当てる前にノードIDを記録するようにした(#36493)
          • +
          • クラスタリングコンパクションにおけるデータ競合の問題を解決(#36499)
          • +
          • 型照合後の文字列配列の最大長のチェックを追加した(#36497)
          • +
          • 混合モードまたはスタンドアロンモードでの競合状態に対処(#36459)
          • +
          • ロードとリリースの繰り返し操作後のセグメントの不均衡を修正した(#36543)
          • +
          • 停止しているノードからセグメントを移動できない問題を修正(#36475)
          • +
          • 一部のセグメントが欠落していた場合でも、セグメント情報を適切に更新するようにした(#36729)
          • +
          • スナップショットKVにおいて、etcdトランザクションが最大制限を超えないようにした(#36773)
          • +
          +

          改良点

            +
          • MMAP リソースの見積もりを強化:
              +
            • column.hのMMAP関連コードを改善しました(#36521)。
            • +
            • コレクションロード時のリソース見積もりを改善した(#36728)
            • +
          • +
          • パフォーマンスの向上:
              +
            • UnicodeをASCIIに変換することで式の解析効率を改善しました(#36676)。
            • +
            • 複数のトピックに対するメッセージの並列生成を可能にした(#36462)
            • +
            • インデックスファイルのサイズを計算する際のCPUオーバーヘッドを削減した(#36580)
            • +
            • ヘッダからメッセージタイプを取得し、アンマーシャルを最小化(#36454)
            • +
            • ワークロードベースのレプリカ選択ポリシーを最適化(#36384)
            • +
          • +
          • 削除タスクメッセージを最大メッセージサイズ制限内に収まるように分割した(#36574)
          • +
          • インポートジョブを記述するための新しいRESTful URLを追加しました(#36754)。
          • +
          • インポートスケジューリングを最適化し、時間コスト指標を追加しました(#36684)。
          • +
          • クエリコーディネータバランサのバランスレポートログを追加しました(#36749)
          • +
          • 共通のGC設定を使用するように変更した(#36670)
          • +
          • デリゲータにストリーミングフォワードポリシーのスイッチを追加しました(#36712)。
          • +
          • インデックスを持たないコレクションの手動コンパクションを可能にした(#36581)
          • +
          • メモリ容量が異なるクエリノードの負荷分散を可能にした(#36625)
          • +
          • 受信ラベルのケースをmetrics.labelで統一した(#36616)
          • +
          • 転送チャネル/セグメント操作をべき等とした(#36552)
          • +
          • インポートのスループットとインポートされた行数を監視するメトリクスを追加(#36588)
          • +
          • ターゲットに複数のタイマーオブジェクトが作成されないようにした(#36573)
          • +
          • 式のバージョンとHTTPレスポンスの書式を更新した(#36467)
          • +
          • スナップショットKVのガベージコレクションを強化した(#36793)
          • +
          • コンテキストパラメータを持つメソッドの実行をサポート(#36798)
          • +
          +

          v2.4.12

          リリース日2024年9月26日

          + + + + + + + +
          MilvusバージョンPython SDKバージョンJava SDKバージョンNode.js SDKバージョン
          2.4.122.4.72.4.42.4.9
          +

          Milvus 2.4.12では、大幅な機能強化と重要なバグ修正が行われました。このリリースでは、データ重複の問題が解決され、特に大量の削除を処理する際の障害回復速度が向上しています。しかしながら、大量のデータを削除した場合に障害復旧に時間がかかるという既知の問題が残っています。私たちはこの問題の解決に積極的に取り組んでいます。

          +

          改良点

            +
          • flowgraphマネージャのグレースフル・ストップを実装しました(#36358)。
          • +
          • ロードされていないベクトルフィールドのインデックスチェックを無効にした(#36280)
          • +
          • デルタロード時にヒットしない削除レコードを除外するようにした(#36272)
          • +
          • std::stoi例外のエラー処理を改善(#36296)
          • +
          • フィールド名または動的フィールド名としてキーワードを使用できないようにした(#36108)
          • +
          • L0 セグメントの削除エントリのメトリクスを追加(#36227)
          • +
          • リモートロードをサポートするためにL0フォワードポリシーを実装(#36208)
          • +
          • プロキシにおけるANNフィールドのロードチェックを追加(#36194)
          • +
          • 空のスパース行のサポートを有効にした(#36061)。
          • +
          • セキュリティ脆弱性の修正(#36156)
          • +
          • リクエスト/レスポンスのサイズメトリクスの統計情報ハンドラを実装(#36118)
          • +
          • エンコードされた配列データのサイズ推定を修正した(#36379)
          • +
          +

          バグ修正

            +
          • 2つのベクトルフィールドを持つコレクションのメトリクス型エラーを修正した(#36473)
          • +
          • メッセージキューの受信失敗の原因となる長いバッファリングの問題を修正した(#36425)。
          • +
          • 分割後のコンパクトからセグメントへの適切な復帰を実装した(#36429)
          • +
          • ノードIDチェックゴルーチンのデータ競合問題を解決(#36377)
          • +
          • 要素タイプチェックを削除した(#36324)
          • +
          • 成長しているセグメントと封印されたセグメントの同時アクセスの問題を修正(#36288)
          • +
          • 将来のステートフルロックを実装(#36333)。
          • +
          • HybridSearchにおけるオフセットの使用方法を修正した(#36287,#36253)。
          • +
          • QueryNodeにおけるダーティセグメント/チャネルのリークを修正した(#36259)。
          • +
          • 主キーの重複処理を修正した(#36274)
          • +
          • 検索リクエストにおけるメトリックタイプの設定を強制した(#36279)
          • +
          • stored_index_files_sizeのメトリクスクリア問題を修正した(#36161)
          • +
          • グローバルAPIアクセスにおける読み書き権限グループの動作を修正した(#36145)
          • +

          v2.4.11

        バグ修正

          -
        • レートリミットインタセプタにおいて、データベースが存在しない場合にパニックを起こす問題を修正した(#33308)。
        • -
        • quotacenter メトリクスの収集で、不正なパラメータが原因で失敗する問題を修正しました(#33399)
        • +
        • レートリミットインタセプタにおいて、データベースが存在しない場合にパニックを起こす問題を修正しました(#33308)。
        • +
        • quotacenter メトリクスの収集で、不正なパラメータが原因で失敗する問題を修正しました(#33399)。
        • processactivestandbyがエラーを返した場合のパニックを修正しました(#33372)。
        • restful v2でnq > 1の場合に検索結果が切り捨てられる問題を修正しました(#33363)。
        • restful v2のロール操作にデータベース名フィールドを追加した(#33291)
        • グローバルレート制限が動作しない問題を修正(#33336)
        • インデックス構築の失敗によるパニックを修正した(#33314)
        • -
        • segcoreにおけるスパースベクタの検証を追加し、合法性を確保した(#33312)
        • +
        • segcoreにおけるスパースベクタの検証を追加し、合法性を確保した(#33312)
        • タスク完了後にsyncmgrからタスクを削除するようにした(#33303)
        • データインポート時のパーティションキーフィルタリングの失敗を修正した(#33277)
        • noop exporterを使用した場合にtraceIDを生成できない問題を修正(#33208)
        • クエリ結果の取得を改善した(#33179)
        • -
        • チェックポイントラグメトリクスの漏えいを防ぐため、チャネルのチェックポイントを削除するようにした(#33201)
        • +
        • チェックポイントラグメトリクスの漏えいを防ぐため、チャネルのチェックポイントを削除するようにした(#33201)
        • 進行停止中にクエリノードがスタックする問題を修正(#33154)
        • フラッシュレスポンスに欠落していたセグメントを修正(#33061)
        • サブミット操作がべき等であるようにした(#33053)
        • -
        • ストリーミングリーダでバッチごとに新しいスライスを割り当てた(#33360)
        • -
        • QueryCoord再起動後にリソースグループからオフラインノードを削除しました(#33233)
        • +
        • ストリーミングリーダでバッチごとに新しいスライスを割り当てた(#33360)
        • +
        • QueryCoord再起動後にリソースグループからオフラインノードを削除しました(#33233)。
        • completedCompactorのl0コンパクタを削除しました(#33216)。
        • -
        • リミッター初期化時にクォータ値をリセットするようにした(#33152)
        • +
        • リミッター初期化時にクォータ値をリセットするようにした(#33152)
        • etcdの制限を超過する問題を修正した(#33041)
        • フィールド数が多すぎることによるetcdトランザクションの上限超過を解決した(#33040)
        • -
        • GetNumRowsOfPartitionにおけるRLockの再入力を削除した(#33045)。
        • +
        • GetNumRowsOfPartitionにおけるRLockの再入力を削除した(#33045)。
        • LeaderCacheObserverをSyncAllの前に起動するようにした(#33035)。
        • 解放されたスタンバイチャネルのバランシングを有効にした(#32986)
        • -
        • サーバ初期化の前にアクセスロガーを初期化するようにした(#32976)
        • -
        • コンパクタが空のセグメントをクリアできるようにした(#32821)
        • +
        • サーバ初期化の前にアクセスロガーを初期化するようにした(#32976)
        • +
        • コンパクタが空のセグメントをクリアできるようにした(#32821)
        • l0 コンパクションでデルタログエントリ番号と時間範囲を埋めるようにした(#33004)。
        • シャードリーダのキャッシュデータの競合によるプロキシのクラッシュを修正(#32971)
        • ロードインデックスメトリックの時間単位を修正しました(#32935)。
        • 停止中のクエリノードのセグメントを正常に解放できない問題を修正(#32929)
        • インデックスリソースの見積もりを修正しました(#32842)。
        • -
        • チャネルチェックポイントをデルタ位置に設定するようにした(#32878)。
        • -
        • syncmgrがfutureを返す前にキーをロックするようにした(#32865)。
        • -
        • 転置インデックスが1セグメントしか持たないようにした(#32858)。
        • +
        • チャネルチェックポイントをデルタ位置に設定するようにした(#32878)。
        • +
        • syncmgrがfutureを返す前にキーをロックするようにした(#32865)。
        • +
        • 転置インデックスが1セグメントしか持たないようにした(#32858)。
        • コンパクショントリガーが2つの同じセグメントを選択する問題を修正(#32800)
        • binlogインポートでパーティション名を指定できない問題を修正した(#32730,#33027)。
        • 寄木細工のインポートで動的カラムをオプションにした(#32738)
        • -
        • データ挿入時に自動IDチェックをスキップするようにした(#32775)
        • +
        • データ挿入時に自動IDチェックをスキップするようにした(#32775)
        • 挿入フィールドデータの行数をスキーマで検証するようにした(#32770)
        • CTraceContextのIDにWrapperとKeepaliveを追加した(#32746)
        • datacoordメタオブジェクトでデータベース名が見つからない問題を修正(#33412)
        • ドロップされたパーティションのドロップされたセグメントを同期するようにした(#33332)
        • -
        • 不適切なパラメータによるquotaCenterのメトリクス収集の失敗を修正した(#33399)
        • +
        • 不適切なパラメータによるquotaCenterのメトリクス収集の失敗を修正(#33399)

        v2.4.1

      @@ -704,11 +861,11 @@ title: リリースノート
    • ARMベースのGPUイメージにおけるクラッシュ問題を修正(#31980)
    • 正規表現クエリが改行を含むテキストを扱えない問題を修正(#32569)
    • GetShardLeadersが空のノードリストを返すことにより検索結果が空になる問題を修正(#32685)
    • -
    • BulkInsertがnumpyファイルの動的フィールドに遭遇した場合にエラーを発生する問題を修正しました(#32596)
    • +
    • BulkInsertがnumpyファイルの動的フィールドに遭遇した場合にエラーを発生する問題を修正しました(#32596)。
    • RESTFulV2インターフェイスに関するバグを修正。リクエストの数値パラメータが文字列型ではなく数値入力を受け付けるようにする重要な修正も含まれる(#32485,#32355)。
    • レートリミッターのwatching configイベントを削除したことによる、 プロキシのメモリリークを修正(#32313)
    • -
    • パーティション名(partitionName)が指定されていない場合に、レートリミッターがパーティションが見つからないと不正に報告する問題を修正した(#32647)
    • -
    • エラータイプにコレクションが回復状態にある場合とロードされていない場合の検出を追加。(#32447)
    • +
    • パーティション名(partitionName)が指定されていない場合に、レートリミッターがパーティションが見つからないと不正に報告する問題を修正した(#32647)
    • +
    • エラータイプにコレクションが回復状態にある場合とロードされていない場合の検知を追加。(#32447)
    • 負のクエリ可能なnumエンティティメトリックを修正した(#32361)

    v2.4.0

    Open In Colab

    +

    Open In Colab +GitHub Repository

    -

    このチュートリアルでは、Milvusを使ってRAG(Retrieval-Augmented Generation)パイプラインを構築する方法を紹介します。

    +

    このチュートリアルでは、Milvusを使ってRAG(Retrieval-Augmented Generation)パイプラインを構築する方法を紹介します。

    RAGシステムは検索システムと生成モデルを組み合わせ、与えられたプロンプトに基づいて新しいテキストを生成します。このシステムでは、まずMilvusを使ってコーパスから関連文書を検索し、次に生成モデルを使って検索された文書に基づいて新しいテキストを生成する。

    準備

    -

    データの準備

    Milvusドキュメント2.4.xのFAQページをRAGのプライベートナレッジとして使用します。

    zipファイルをダウンロードし、milvus_docs フォルダにドキュメントを展開する。

    $ wget https://github.com/milvus-io/milvus-docs/releases/download/v2.4.6-preview/milvus_docs_2.4.x_en.zip
    @@ -59,20 +58,17 @@ $ unzip -q milvus_docs_2.4.x_en.for file_path in glob("milvus_docs/en/faq/\*.md", recursive=True):
    -with open(file_path, "r") as file:
    -file_text = file.read()
    +for file_path in glob("milvus_docs/en/faq/*.md", recursive=True):
    +    with open(file_path, "r") as file:
    +        file_text = file.read()
     
         text_lines += file_text.split("# ")
    -
     
    -

    埋め込みモデルの準備

    埋め込みモデルを準備するために、OpenAIクライアントを初期化します。

    from openai import OpenAI
     
    -openai*client = OpenAI()
    +openai_client = OpenAI()
     
    -

    OpenAIクライアントを使って、テキスト埋め込みを生成する関数を定義します。例として、text-embedding-3-smallモデルを使います。

    def emb_text(text):
         return (
    @@ -107,17 +103,16 @@ embedding_dim = len(test_embedding)
           
         

    コレクションの作成

    from pymilvus import MilvusClient
     
    -milvus*client = MilvusClient(uri="./milvus_demo.db")
    +milvus_client = MilvusClient(uri="./milvus_demo.db")
     
     collection_name = "my_rag_collection"
     
    -

    MilvusClient の引数については、以下の通りです:

      -
    • uri をローカルファイル、例えば./milvus.db とするのが最も便利です。
    • -
    • データ規模が大きい場合は、dockerやkubernetes上に、よりパフォーマンスの高いMilvusサーバを構築することができます。このセットアップでは、http://localhost:19530 などのサーバ uri をuri として使用してください。
    • -
    • MilvusのフルマネージドクラウドサービスであるZilliz Cloudを使用する場合は、Zilliz CloudのPublic EndpointとApi keyに対応するuritoken を調整してください。
    • +
    • ./milvus.db のように、uri をローカルファイルとして設定する方法が、Milvus Liteを自動的に利用して、すべてのデータをこのファイルに格納することができるので、最も便利な方法です。
    • +
    • データ規模が大きい場合は、dockerやkubernetes上に、よりパフォーマンスの高いMilvusサーバを構築することができます。このセットアップでは、サーバの uri、例えばhttp://localhost:19530uri として使用してください。
    • +
    • MilvusのフルマネージドクラウドサービスであるZilliz Cloudを利用する場合は、Zilliz CloudのPublic EndpointとApi keyに対応するuritoken を調整してください。

    コレクションが既に存在するか確認し、存在する場合は削除します。

    @@ -133,18 +128,17 @@ collection_name = "my_rag_collection" consistency_level="Strong", # Strong consistency level )
    -

    データの挿入

    テキスト行を繰り返し、埋め込みを作成し、Milvusにデータを挿入します。

    -

    ここに新しいフィールドtext 、コレクションスキーマで定義されていないフィールドがあります。これは、予約されたJSONダイナミックフィールドに自動的に追加され、高レベルでは通常のフィールドとして扱うことができます。

    +

    データの挿入

    テキスト行を繰り返し、エンベッディングを作成し、milvusにデータを挿入します。

    +

    ここに新しいフィールドtext 、コレクションスキーマで定義されていないフィールドです。これは予約されたJSONダイナミックフィールドに自動的に追加され、高レベルでは通常のフィールドとして扱われます。

    from tqdm import tqdm
     
     data = []
     
     for i, line in enumerate(tqdm(text_lines, desc="Creating embeddings")):
    -data.append({"id": i, "vector": emb_text(line), "text": line})
    +    data.append({"id": i, "vector": emb_text(line), "text": line})
     
     milvus_client.insert(collection_name=collection_name, data=data)
     
    -
    Creating embeddings: 100%|██████████| 72/72 [00:27<00:00,  2.67it/s]
     
     
    @@ -188,11 +182,10 @@ milvus_client.insert(collection_name=collection_name, data=data)
     
    import json
     
     retrieved_lines_with_distances = [
    -(res["entity"]["text"], res["distance"]) for res in search_res[0]
    +    (res["entity"]["text"], res["distance"]) for res in search_res[0]
     ]
     print(json.dumps(retrieved_lines_with_distances, indent=4))
     
    -
    [
         [
             " Where does Milvus store data?\n\nMilvus deals with two types of data, inserted data and metadata. \n\nInserted data, including vector data, scalar data, and collection-specific schema, are stored in persistent storage as incremental log. Milvus supports multiple object storage backends, including [MinIO](https://min.io/), [AWS S3](https://aws.amazon.com/s3/?nc1=h_ls), [Google Cloud Storage](https://cloud.google.com/storage?hl=en#object-storage-for-companies-of-all-sizes) (GCS), [Azure Blob Storage](https://azure.microsoft.com/en-us/products/storage/blobs), [Alibaba Cloud OSS](https://www.alibabacloud.com/product/object-storage-service), and [Tencent Cloud Object Storage](https://www.tencentcloud.com/products/cos) (COS).\n\nMetadata are generated within Milvus. Each Milvus module has its own metadata that are stored in etcd.\n\n###",
    @@ -213,7 +206,7 @@ retrieved_lines_with_distances = [
         [line_with_distance[0] for line_with_distance in retrieved_lines_with_distances]
     )
     
    -

    ラネージ・モデルのシステム・プロンプトとユーザー・プロンプトを定義する。このプロンプトはMilvusから検索されたドキュメントで組み立てられる。

    +

    ラネージ・モデルのシステム・プロンプトとユーザー・プロンプトを定義する。このプロンプトはmilvusから検索されたドキュメントで組み立てられる。

    SYSTEM_PROMPT = """
     Human: You are an AI assistant. You are able to find answers to the questions from the contextual passage snippets provided.
     """
    diff --git a/localization/v2.4.x/site/ja/tutorials/graph_rag_with_milvus.md b/localization/v2.4.x/site/ja/tutorials/graph_rag_with_milvus.md
    index 5581cb6c7..0f106225b 100644
    --- a/localization/v2.4.x/site/ja/tutorials/graph_rag_with_milvus.md
    +++ b/localization/v2.4.x/site/ja/tutorials/graph_rag_with_milvus.md
    @@ -18,7 +18,8 @@ title: MilvusによるグラフRAG
               d="M4 9h1v1H4c-1.5 0-3-1.69-3-3.5S2.55 3 4 3h4c1.45 0 3 1.69 3 3.5 0 1.41-.91 2.72-2 3.25V8.59c.58-.45 1-1.27 1-2.09C10 5.22 8.98 4 8 4H4c-.98 0-2 1.22-2 2.5S3 9 4 9zm9-3h-1v1h1c1 0 2 1.22 2 2.5S13.98 12 13 12H9c-.98 0-2-1.22-2-2.5 0-.83.42-1.64 1-2.09V6.25c-1.09.53-2 1.84-2 3.25C6 11.31 7.55 13 9 13h4c1.45 0 3-1.69 3-3.5S14.5 6 13 6z"
             >
           
    -    

    Open In Colab

    +

    Open In Colab +GitHub Repository

    大規模な言語モデルの広範な応用は、その応答の精度と関連性を向上させることの重要性を強調している。検索補強型生成(RAG)は、外部知識ベースでモデルを強化し、より多くの文脈情報を提供し、幻覚や知識不足のような問題を軽減します。しかし、単純なRAGパラダイムだけに頼ることには限界があり、特に複雑なエンティティ関係やマルチホップの質問を扱う場合には、モデルが正確な回答を提供するのに苦労することが多い。

    知識グラフ(KG)をRAGシステムに導入することは、新しい解決策を提供する。KGは、より正確な検索情報を提供し、RAGが複雑な質問応答タスクをよりよく処理するのを助ける、構造化された方法でエンティティとその関係を提示する。KG-RAGはまだ初期段階にあり、KGから実体と関係を効果的に検索する方法や、ベクトル類似性検索とグラフ構造を統合する方法についてのコンセンサスは得られていない。

    本ノートブックでは、このシナリオの性能を大幅に向上させるシンプルかつ強力なアプローチを紹介する。これは、多方向検索とその後の再ランク付けという単純なRAGパラダイムであるが、グラフRAGを論理的に実装し、マルチホップ問題の処理において最先端の性能を達成している。どのように実装されるか見てみましょう。

    @@ -79,7 +80,7 @@ embedding_model = OpenAIEmbeddings(model=

    MilvusClientのargsについて:

    • 例えば、./milvus.db のように、uri をローカルファイルとして設定する方法は、Milvus Liteを自動的に利用してすべてのデータをこのファイルに格納するため、最も便利な方法です。
    • -
    • データ規模が大きい場合は、dockerやkubernetes上に、よりパフォーマンスの高いMilvusサーバを構築することができます。このセットアップでは、http://localhost:19530 などのサーバ uri をuri として使用してください。
    • +
    • データ規模が大きい場合は、dockerやkubernetes上でよりパフォーマンスの高いMilvusサーバを構築することができます。このセットアップでは、http://localhost:19530 などのサーバ uri をuri として使用してください。
    • MilvusのフルマネージドクラウドサービスであるZilliz Cloudを利用する場合は、Zilliz CloudのPublic EndpointとApi keyに対応するuritoken を調整してください。
    @@ -98,7 +99,7 @@ embedding_model = OpenAIEmbeddings(model= d="M4 9h1v1H4c-1.5 0-3-1.69-3-3.5S2.55 3 4 3h4c1.45 0 3 1.69 3 3.5 0 1.41-.91 2.72-2 3.25V8.59c.58-.45 1-1.27 1-2.09C10 5.22 8.98 4 8 4H4c-.98 0-2 1.22-2 2.5S3 9 4 9zm9-3h-1v1h1c1 0 2 1.22 2 2.5S13.98 12 13 12H9c-.98 0-2-1.22-2-2.5 0-.83.42-1.64 1-2.09V6.25c-1.09.53-2 1.84-2 3.25C6 11.31 7.55 13 9 13h4c1.45 0 3-1.69 3-3.5S14.5 6 13 6z" > -

    データの準備

    例として、ベルヌーイファミリーとオイラーの関係を紹介するナノデータセットを使用する。このナノデータセットには4つの文章とそれに対応するトリプレットのセットが含まれており、各トリプレットには主語、述語、目的語が含まれている。 実際には、独自のコーパスからトリプレットを抽出するためにどのようなアプローチを使用することもできる。

    +

    データの準備

    例として、ベルヌーイファミリーとオイラーの関係を紹介するナノデータセットを使用する。このナノデータセットには4つの文章とそれに対応するトリプレットが含まれており、各トリプレットには主語、述語、目的語が含まれている。 実際には、独自のコーパスからトリプレットを抽出するためにどのようなアプローチを使用してもよい。

    nano_dataset = [
         {
             "passage": "Jakob Bernoulli (1654–1705): Jakob was one of the earliest members of the Bernoulli family to gain prominence in mathematics. He made significant contributions to calculus, particularly in the development of the theory of probability. He is known for the Bernoulli numbers and the Bernoulli theorem, a precursor to the law of large numbers. He was the older brother of Johann Bernoulli, another influential mathematician, and the two had a complex relationship that involved both collaboration and rivalry.",
    @@ -162,7 +163,7 @@ embedding_model = OpenAIEmbeddings(model=
     

    エンティティと関係を次のように構築します:

      -
    • エンティティはトリプレット内の主語または目的語であるため、トリプレットから直接抽出します。
    • +
    • エンティティは、トリプレット内の主語または目的語であるため、トリプレットから直接抽出します。
    • ここでは、主語、述語、目的語をスペースを挟んで直接連結することで、関係の概念を構築します。

    また、後で使用するために、エンティティIDと関係IDを対応付けるdictと、関係IDと通過IDを対応付けるdictを用意します。

    @@ -212,7 +213,7 @@ create_milvus_collection(entity_col_name) create_milvus_collection(relation_col_name) create_milvus_collection(passage_col_name)
    -

    データをメタデータ情報とともにMilvusコレクションに挿入する。メタデータ情報には、通過IDと隣接エンティティまたは関係IDが含まれる。

    +

    データをメタデータ情報とともにMilvusコレクションに挿入する。メタデータ情報には、通路IDと隣接エンティティまたは関係IDが含まれる。

    def milvus_insert(
         collection_name: str,
         text_list: list[str],
    @@ -305,7 +306,7 @@ relation_search_res = milvus_client.search(
         
       
     

    -

    ここでは、隣接行列を構築し、行列の乗算を用いて、数度以内の隣接マッピング情報を計算する。こうすることで、どのような拡張度の情報でも素早く得ることができる。

    +

    ここでは、隣接行列を構築し、行列の乗算を用いて数度以内の隣接マッピング情報を計算する。こうすることで、どのような拡張度の情報でも素早く得ることができる。

    # Construct the adjacency matrix of entities and relations where the value of the adjacency matrix is 1 if an entity is related to a relation, otherwise 0.
     entity_relation_adj = np.zeros((len(entities), len(relations)))
     for entity_id, entity in enumerate(entities):
    @@ -332,7 +333,7 @@ relation_adj_target_degree = relation_adj_1_degree
     
     entity_relation_adj_target_degree = entity_adj_target_degree @ entity_relation_adj
     
    -

    対象の次数展開行列から値を取り出し、検索された実体と関係から対応する次数を展開することで、部分グラフの全ての関係を簡単に得ることができる。

    +

    対象となる次数展開行列から値を取り出し、検索された実体と関係から対応する次数を展開することで、部分グラフの全ての関係を簡単に得ることができる。

    expanded_relations_from_relation = set()
     expanded_relations_from_entity = set()
     # You can set the similarity threshold here to guarantee the quality of the retrieved ones.
    diff --git a/localization/v2.4.x/site/ja/tutorials/hybrid_search_with_milvus.json b/localization/v2.4.x/site/ja/tutorials/hybrid_search_with_milvus.json
    index 6c13e9a9f..c60a22c22 100644
    --- a/localization/v2.4.x/site/ja/tutorials/hybrid_search_with_milvus.json
    +++ b/localization/v2.4.x/site/ja/tutorials/hybrid_search_with_milvus.json
    @@ -1 +1 @@
    -{"codeList":["$ pip install --upgrade pymilvus \"pymilvus[model]\"\n","# Run this cell to download the dataset\n$ wget http://qim.fs.quoracdn.net/quora_duplicate_questions.tsv\n","import pandas as pd\n\nfile_path = \"quora_duplicate_questions.tsv\"\ndf = pd.read_csv(file_path, sep=\"\\t\")\nquestions = set()\nfor _, row in df.iterrows():\n    obj = row.to_dict()\n    questions.add(obj[\"question1\"][:512])\n    questions.add(obj[\"question2\"][:512])\n    if len(questions) > 500:  # Skip this if you want to use the full dataset\n        break\n\ndocs = list(questions)\n\n# example question\nprint(docs[0])\n","from milvus_model.hybrid import BGEM3EmbeddingFunction\n\nef = BGEM3EmbeddingFunction(use_fp16=False, device=\"cpu\")\ndense_dim = ef.dim[\"dense\"]\n\n# Generate embeddings using BGE-M3 model\ndocs_embeddings = ef(docs)\n","from pymilvus import (\n    connections,\n    utility,\n    FieldSchema,\n    CollectionSchema,\n    DataType,\n    Collection,\n)\n\n# Connect to Milvus given URI\nconnections.connect(uri=\"./milvus.db\")\n\n# Specify the data schema for the new Collection\nfields = [\n    # Use auto generated id as primary key\n    FieldSchema(\n        name=\"pk\", dtype=DataType.VARCHAR, is_primary=True, auto_id=True, max_length=100\n    ),\n    # Store the original text to retrieve based on semantically distance\n    FieldSchema(name=\"text\", dtype=DataType.VARCHAR, max_length=512),\n    # Milvus now supports both sparse and dense vectors,\n    # we can store each in a separate field to conduct hybrid search on both vectors\n    FieldSchema(name=\"sparse_vector\", dtype=DataType.SPARSE_FLOAT_VECTOR),\n    FieldSchema(name=\"dense_vector\", dtype=DataType.FLOAT_VECTOR, dim=dense_dim),\n]\nschema = CollectionSchema(fields)\n\n# Create collection (drop the old one if exists)\ncol_name = \"hybrid_demo\"\nif utility.has_collection(col_name):\n    Collection(col_name).drop()\ncol = Collection(col_name, schema, consistency_level=\"Strong\")\n\n# To make vector search efficient, we need to create indices for the vector fields\nsparse_index = {\"index_type\": \"SPARSE_INVERTED_INDEX\", \"metric_type\": \"IP\"}\ncol.create_index(\"sparse_vector\", sparse_index)\ndense_index = {\"index_type\": \"AUTOINDEX\", \"metric_type\": \"IP\"}\ncol.create_index(\"dense_vector\", dense_index)\ncol.load()\n","# For efficiency, we insert 50 records in each small batch\nfor i in range(0, len(docs), 50):\n    batched_entities = [\n        docs[i : i + 50],\n        docs_embeddings[\"sparse\"][i : i + 50],\n        docs_embeddings[\"dense\"][i : i + 50],\n    ]\n    col.insert(batched_entities)\nprint(\"Number of entities inserted:\", col.num_entities)\n","# Enter your search query\nquery = input(\"Enter your search query: \")\nprint(query)\n\n# Generate embeddings for the query\nquery_embeddings = ef([query])\n# print(query_embeddings)\n","from pymilvus import (\n    AnnSearchRequest,\n    WeightedRanker,\n)\n\n\ndef dense_search(col, query_dense_embedding, limit=10):\n    search_params = {\"metric_type\": \"IP\", \"params\": {}}\n    res = col.search(\n        [query_dense_embedding],\n        anns_field=\"dense_vector\",\n        limit=limit,\n        output_fields=[\"text\"],\n        param=search_params,\n    )[0]\n    return [hit.get(\"text\") for hit in res]\n\n\ndef sparse_search(col, query_sparse_embedding, limit=10):\n    search_params = {\n        \"metric_type\": \"IP\",\n        \"params\": {},\n    }\n    res = col.search(\n        [query_sparse_embedding],\n        anns_field=\"sparse_vector\",\n        limit=limit,\n        output_fields=[\"text\"],\n        param=search_params,\n    )[0]\n    return [hit.get(\"text\") for hit in res]\n\n\ndef hybrid_search(\n    col,\n    query_dense_embedding,\n    query_sparse_embedding,\n    sparse_weight=1.0,\n    dense_weight=1.0,\n    limit=10,\n):\n    dense_search_params = {\"metric_type\": \"IP\", \"params\": {}}\n    dense_req = AnnSearchRequest(\n        [query_dense_embedding], \"dense_vector\", dense_search_params, limit=limit\n    )\n    sparse_search_params = {\"metric_type\": \"IP\", \"params\": {}}\n    sparse_req = AnnSearchRequest(\n        [query_sparse_embedding], \"sparse_vector\", sparse_search_params, limit=limit\n    )\n    rerank = WeightedRanker(sparse_weight, dense_weight)\n    res = col.hybrid_search(\n        [sparse_req, dense_req], rerank=rerank, limit=limit, output_fields=[\"text\"]\n    )[0]\n    return [hit.get(\"text\") for hit in res]\n","dense_results = dense_search(col, query_embeddings[\"dense\"][0])\nsparse_results = sparse_search(col, query_embeddings[\"sparse\"][0])\nhybrid_results = hybrid_search(\n    col,\n    query_embeddings[\"dense\"][0],\n    query_embeddings[\"sparse\"][0],\n    sparse_weight=0.7,\n    dense_weight=1.0,\n)\n","def doc_text_formatting(ef, query, docs):\n    tokenizer = ef.model.tokenizer\n    query_tokens_ids = tokenizer.encode(query, return_offsets_mapping=True)\n    query_tokens = tokenizer.convert_ids_to_tokens(query_tokens_ids)\n    formatted_texts = []\n\n    for doc in docs:\n        ldx = 0\n        landmarks = []\n        encoding = tokenizer.encode_plus(doc, return_offsets_mapping=True)\n        tokens = tokenizer.convert_ids_to_tokens(encoding[\"input_ids\"])[1:-1]\n        offsets = encoding[\"offset_mapping\"][1:-1]\n        for token, (start, end) in zip(tokens, offsets):\n            if token in query_tokens:\n                if len(landmarks) != 0 and start == landmarks[-1]:\n                    landmarks[-1] = end\n                else:\n                    landmarks.append(start)\n                    landmarks.append(end)\n        close = False\n        formatted_text = \"\"\n        for i, c in enumerate(doc):\n            if ldx == len(landmarks):\n                pass\n            elif i == landmarks[ldx]:\n                if close:\n                    formatted_text += \"\"\n                else:\n                    formatted_text += \"\"\n                close = not close\n                ldx = ldx + 1\n            formatted_text += c\n        if close is True:\n            formatted_text += \"\"\n        formatted_texts.append(formatted_text)\n    return formatted_texts\n","from IPython.display import Markdown, display\n\n# Dense search results\ndisplay(Markdown(\"**Dense Search Results:**\"))\nformatted_results = doc_text_formatting(ef, query, dense_results)\nfor result in dense_results:\n    display(Markdown(result))\n\n# Sparse search results\ndisplay(Markdown(\"\\n**Sparse Search Results:**\"))\nformatted_results = doc_text_formatting(ef, query, sparse_results)\nfor result in formatted_results:\n    display(Markdown(result))\n\n# Hybrid search results\ndisplay(Markdown(\"\\n**Hybrid Search Results:**\"))\nformatted_results = doc_text_formatting(ef, query, hybrid_results)\nfor result in formatted_results:\n    display(Markdown(result))\n"],"headingContent":"","anchorList":[{"label":"Milvusによるハイブリッド検索","href":"Hybrid-Search-with-Milvus","type":1,"isActive":false}]}
    \ No newline at end of file
    +{"codeList":["$ pip install --upgrade pymilvus \"pymilvus[model]\"\n","# Run this cell to download the dataset\n$ wget http://qim.fs.quoracdn.net/quora_duplicate_questions.tsv\n","import pandas as pd\n\nfile_path = \"quora_duplicate_questions.tsv\"\ndf = pd.read_csv(file_path, sep=\"\\t\")\nquestions = set()\nfor _, row in df.iterrows():\n    obj = row.to_dict()\n    questions.add(obj[\"question1\"][:512])\n    questions.add(obj[\"question2\"][:512])\n    if len(questions) > 500:  # Skip this if you want to use the full dataset\n        break\n\ndocs = list(questions)\n\n# example question\nprint(docs[0])\n","from milvus_model.hybrid import BGEM3EmbeddingFunction\n\nef = BGEM3EmbeddingFunction(use_fp16=False, device=\"cpu\")\ndense_dim = ef.dim[\"dense\"]\n\n# Generate embeddings using BGE-M3 model\ndocs_embeddings = ef(docs)\n","from pymilvus import (\n    connections,\n    utility,\n    FieldSchema,\n    CollectionSchema,\n    DataType,\n    Collection,\n)\n\n# Connect to Milvus given URI\nconnections.connect(uri=\"./milvus.db\")\n\n# Specify the data schema for the new Collection\nfields = [\n    # Use auto generated id as primary key\n    FieldSchema(\n        name=\"pk\", dtype=DataType.VARCHAR, is_primary=True, auto_id=True, max_length=100\n    ),\n    # Store the original text to retrieve based on semantically distance\n    FieldSchema(name=\"text\", dtype=DataType.VARCHAR, max_length=512),\n    # Milvus now supports both sparse and dense vectors,\n    # we can store each in a separate field to conduct hybrid search on both vectors\n    FieldSchema(name=\"sparse_vector\", dtype=DataType.SPARSE_FLOAT_VECTOR),\n    FieldSchema(name=\"dense_vector\", dtype=DataType.FLOAT_VECTOR, dim=dense_dim),\n]\nschema = CollectionSchema(fields)\n\n# Create collection (drop the old one if exists)\ncol_name = \"hybrid_demo\"\nif utility.has_collection(col_name):\n    Collection(col_name).drop()\ncol = Collection(col_name, schema, consistency_level=\"Strong\")\n\n# To make vector search efficient, we need to create indices for the vector fields\nsparse_index = {\"index_type\": \"SPARSE_INVERTED_INDEX\", \"metric_type\": \"IP\"}\ncol.create_index(\"sparse_vector\", sparse_index)\ndense_index = {\"index_type\": \"AUTOINDEX\", \"metric_type\": \"IP\"}\ncol.create_index(\"dense_vector\", dense_index)\ncol.load()\n","# For efficiency, we insert 50 records in each small batch\nfor i in range(0, len(docs), 50):\n    batched_entities = [\n        docs[i : i + 50],\n        docs_embeddings[\"sparse\"][i : i + 50],\n        docs_embeddings[\"dense\"][i : i + 50],\n    ]\n    col.insert(batched_entities)\nprint(\"Number of entities inserted:\", col.num_entities)\n","# Enter your search query\nquery = input(\"Enter your search query: \")\nprint(query)\n\n# Generate embeddings for the query\nquery_embeddings = ef([query])\n# print(query_embeddings)\n","from pymilvus import (\n    AnnSearchRequest,\n    WeightedRanker,\n)\n\n\ndef dense_search(col, query_dense_embedding, limit=10):\n    search_params = {\"metric_type\": \"IP\", \"params\": {}}\n    res = col.search(\n        [query_dense_embedding],\n        anns_field=\"dense_vector\",\n        limit=limit,\n        output_fields=[\"text\"],\n        param=search_params,\n    )[0]\n    return [hit.get(\"text\") for hit in res]\n\n\ndef sparse_search(col, query_sparse_embedding, limit=10):\n    search_params = {\n        \"metric_type\": \"IP\",\n        \"params\": {},\n    }\n    res = col.search(\n        [query_sparse_embedding],\n        anns_field=\"sparse_vector\",\n        limit=limit,\n        output_fields=[\"text\"],\n        param=search_params,\n    )[0]\n    return [hit.get(\"text\") for hit in res]\n\n\ndef hybrid_search(\n    col,\n    query_dense_embedding,\n    query_sparse_embedding,\n    sparse_weight=1.0,\n    dense_weight=1.0,\n    limit=10,\n):\n    dense_search_params = {\"metric_type\": \"IP\", \"params\": {}}\n    dense_req = AnnSearchRequest(\n        [query_dense_embedding], \"dense_vector\", dense_search_params, limit=limit\n    )\n    sparse_search_params = {\"metric_type\": \"IP\", \"params\": {}}\n    sparse_req = AnnSearchRequest(\n        [query_sparse_embedding], \"sparse_vector\", sparse_search_params, limit=limit\n    )\n    rerank = WeightedRanker(sparse_weight, dense_weight)\n    res = col.hybrid_search(\n        [sparse_req, dense_req], rerank=rerank, limit=limit, output_fields=[\"text\"]\n    )[0]\n    return [hit.get(\"text\") for hit in res]\n","dense_results = dense_search(col, query_embeddings[\"dense\"][0])\nsparse_results = sparse_search(col, query_embeddings[\"sparse\"]._getrow(0))\nhybrid_results = hybrid_search(\n    col,\n    query_embeddings[\"dense\"][0],\n    query_embeddings[\"sparse\"]._getrow(0),\n    sparse_weight=0.7,\n    dense_weight=1.0,\n)\n","def doc_text_formatting(ef, query, docs):\n    tokenizer = ef.model.tokenizer\n    query_tokens_ids = tokenizer.encode(query, return_offsets_mapping=True)\n    query_tokens = tokenizer.convert_ids_to_tokens(query_tokens_ids)\n    formatted_texts = []\n\n    for doc in docs:\n        ldx = 0\n        landmarks = []\n        encoding = tokenizer.encode_plus(doc, return_offsets_mapping=True)\n        tokens = tokenizer.convert_ids_to_tokens(encoding[\"input_ids\"])[1:-1]\n        offsets = encoding[\"offset_mapping\"][1:-1]\n        for token, (start, end) in zip(tokens, offsets):\n            if token in query_tokens:\n                if len(landmarks) != 0 and start == landmarks[-1]:\n                    landmarks[-1] = end\n                else:\n                    landmarks.append(start)\n                    landmarks.append(end)\n        close = False\n        formatted_text = \"\"\n        for i, c in enumerate(doc):\n            if ldx == len(landmarks):\n                pass\n            elif i == landmarks[ldx]:\n                if close:\n                    formatted_text += \"\"\n                else:\n                    formatted_text += \"\"\n                close = not close\n                ldx = ldx + 1\n            formatted_text += c\n        if close is True:\n            formatted_text += \"\"\n        formatted_texts.append(formatted_text)\n    return formatted_texts\n","from IPython.display import Markdown, display\n\n# Dense search results\ndisplay(Markdown(\"**Dense Search Results:**\"))\nformatted_results = doc_text_formatting(ef, query, dense_results)\nfor result in dense_results:\n    display(Markdown(result))\n\n# Sparse search results\ndisplay(Markdown(\"\\n**Sparse Search Results:**\"))\nformatted_results = doc_text_formatting(ef, query, sparse_results)\nfor result in formatted_results:\n    display(Markdown(result))\n\n# Hybrid search results\ndisplay(Markdown(\"\\n**Hybrid Search Results:**\"))\nformatted_results = doc_text_formatting(ef, query, hybrid_results)\nfor result in formatted_results:\n    display(Markdown(result))\n"],"headingContent":"Hybrid Search with Milvus","anchorList":[{"label":"Milvusを使ったハイブリッド検索","href":"Hybrid-Search-with-Milvus","type":1,"isActive":false}]}
    \ No newline at end of file
    diff --git a/localization/v2.4.x/site/ja/tutorials/hybrid_search_with_milvus.md b/localization/v2.4.x/site/ja/tutorials/hybrid_search_with_milvus.md
    index b87fa92cc..2cfa6019b 100644
    --- a/localization/v2.4.x/site/ja/tutorials/hybrid_search_with_milvus.md
    +++ b/localization/v2.4.x/site/ja/tutorials/hybrid_search_with_milvus.md
    @@ -1,10 +1,9 @@
     ---
     id: hybrid_search_with_milvus.md
     summary: Milvusとのハイブリッド検索
    -title: Milvusとのハイブリッド検索
    +title: Milvusを使ったハイブリッド検索
     ---
    -
    -

    Milvusによるハイブリッド検索

    Open In Colab

    +

    Open In Colab +GitHub Repository

    -

    このチュートリアルでは、Milvusと BGE-M3モデルを使ったハイブリッド検索の方法を紹介します。BGE-M3モデルはテキストを密なベクトルと疎なベクトルに変換することができます。Milvusは1つのコレクションに両方のタイプのベクトルを格納することをサポートし、結果の関連性を高めるハイブリッド検索を可能にします。

    +

    このチュートリアルでは、Milvusと BGE-M3モデルを使ったハイブリッド検索の方法を説明します。BGE-M3モデルはテキストを密なベクトルと疎なベクトルに変換することができます。Milvusは1つのコレクションに両方のタイプのベクトルを格納することをサポートし、結果の関連性を高めるハイブリッド検索を可能にします。

    Milvusは密検索、疎検索、ハイブリッド検索をサポートしています:

    • 密検索:クエリの背後にある意味を理解するためにセマンティックコンテキストを利用します。
    • スパース検索:キーワードのマッチングを重視し、全文検索に相当する特定の用語に基づいた検索結果を得る。
    • ハイブリッド検索:DenseとSparseの両アプローチを組み合わせ、包括的な検索結果のために完全な文脈と特定のキーワードを捕捉する。
    -

    これらの方法を統合することで、Milvusハイブリッド検索は意味と語彙の類似性のバランスをとり、検索結果の全体的な関連性を向上させます。このノートブックでは、これらの検索ストラテジーのセットアップと使用方法を説明し、様々な検索シナリオにおける有効性を強調します。

    +

    Milvusハイブリッド検索は、これらの手法を統合することで、意味的な類似性と語彙的な類似性のバランスをとり、検索結果の全体的な関連性を向上させます。このノートブックでは、これらの検索ストラテジーのセットアップと使用方法を説明し、様々な検索シナリオにおける有効性を強調します。

    依存関係と環境

    $ pip install --upgrade pymilvus "pymilvus[model]"
     

    データセットのダウンロード

    検索を実証するには、文書のコーパスが必要だ。Quora Duplicate Questionsデータセットを使い、ローカルディレクトリに置いてみよう。

    @@ -39,22 +39,21 @@ $ wget http://qim.fs.quoracdn.net/quora_duplicate_questions.tsv

    データのロードと準備

    データセットをロードし、検索用の小さなコーパスを準備する。

    import pandas as pd
     
    -file*path = "quora_duplicate_questions.tsv"
    +file_path = "quora_duplicate_questions.tsv"
     df = pd.read_csv(file_path, sep="\t")
     questions = set()
    -for *, row in df.iterrows():
    -obj = row.to_dict()
    -questions.add(obj["question1"][:512])
    -questions.add(obj["question2"][:512])
    -if len(questions) > 500: # Skip this if you want to use the full dataset
    -break
    +for _, row in df.iterrows():
    +    obj = row.to_dict()
    +    questions.add(obj["question1"][:512])
    +    questions.add(obj["question2"][:512])
    +    if len(questions) > 500:  # Skip this if you want to use the full dataset
    +        break
     
     docs = list(questions)
     
     # example question
     print(docs[0])
     
    -
    What is the strongest Kevlar cord?
     

    埋め込みにBGE-M3モデルを使う

    BGE-M3モデルはテキストを密なベクトルと疎なベクトルとして埋め込むことができる。

    @@ -66,7 +65,6 @@ dense_dim = ef.dim["dense"] # Generate embeddings using BGE-M3 model docs_embeddings = ef(docs)
    -
    Fetching 30 files: 100%|██████████| 30/30 [00:00<00:00, 302473.85it/s]
     Inference Embeddings: 100%|██████████| 32/32 [01:59<00:00,  3.74s/it]
     
    @@ -92,23 +90,23 @@ connections.connect(uri="./milvus.db" # Specify the data schema for the new Collection fields = [ -# Use auto generated id as primary key -FieldSchema( -name="pk", dtype=DataType.VARCHAR, is_primary=True, auto_id=True, max_length=100 -), -# Store the original text to retrieve based on semantically distance -FieldSchema(name="text", dtype=DataType.VARCHAR, max_length=512), -# Milvus now supports both sparse and dense vectors, -# we can store each in a separate field to conduct hybrid search on both vectors -FieldSchema(name="sparse_vector", dtype=DataType.SPARSE_FLOAT_VECTOR), -FieldSchema(name="dense_vector", dtype=DataType.FLOAT_VECTOR, dim=dense_dim), + # Use auto generated id as primary key + FieldSchema( + name="pk", dtype=DataType.VARCHAR, is_primary=True, auto_id=True, max_length=100 + ), + # Store the original text to retrieve based on semantically distance + FieldSchema(name="text", dtype=DataType.VARCHAR, max_length=512), + # Milvus now supports both sparse and dense vectors, + # we can store each in a separate field to conduct hybrid search on both vectors + FieldSchema(name="sparse_vector", dtype=DataType.SPARSE_FLOAT_VECTOR), + FieldSchema(name="dense_vector", dtype=DataType.FLOAT_VECTOR, dim=dense_dim), ] schema = CollectionSchema(fields) # Create collection (drop the old one if exists) col_name = "hybrid_demo" if utility.has_collection(col_name): -Collection(col_name).drop() + Collection(col_name).drop() col = Collection(col_name, schema, consistency_level="Strong") # To make vector search efficient, we need to create indices for the vector fields @@ -118,8 +116,7 @@ dense_index = {"index_type": "dense_vector", dense_index) col.load()
    - -

    Milvusコレクションにデータを挿入する

    ドキュメントとその埋め込みデータをコレクションに挿入します。

    +

    Milvusコレクションへのデータ挿入

    ドキュメントとその埋め込みデータをコレクションに挿入します。

    # For efficiency, we insert 50 records in each small batch
     for i in range(0, len(docs), 50):
         batched_entities = [
    @@ -140,7 +137,6 @@ query = input("
     query_embeddings = ef([query])
     # print(query_embeddings)
     
    -
    How to start learning programming?
     

    まず、検索に役立つ関数を用意します:

    @@ -154,61 +150,63 @@ query_embeddings = ef([query]) WeightedRanker, ) + def dense_search(col, query_dense_embedding, limit=10): -search_params = {"metric_type": "IP", "params": {}} -res = col.search( -[query_dense_embedding], -anns_field="dense_vector", -limit=limit, -output_fields=["text"], -param=search_params, -)[0] -return [hit.get("text") for hit in res] + search_params = {"metric_type": "IP", "params": {}} + res = col.search( + [query_dense_embedding], + anns_field="dense_vector", + limit=limit, + output_fields=["text"], + param=search_params, + )[0] + return [hit.get("text") for hit in res] + def sparse_search(col, query_sparse_embedding, limit=10): -search_params = { -"metric_type": "IP", -"params": {}, -} -res = col.search( -[query_sparse_embedding], -anns_field="sparse_vector", -limit=limit, -output_fields=["text"], -param=search_params, -)[0] -return [hit.get("text") for hit in res] + search_params = { + "metric_type": "IP", + "params": {}, + } + res = col.search( + [query_sparse_embedding], + anns_field="sparse_vector", + limit=limit, + output_fields=["text"], + param=search_params, + )[0] + return [hit.get("text") for hit in res] + def hybrid_search( -col, -query_dense_embedding, -query_sparse_embedding, -sparse_weight=1.0, -dense_weight=1.0, -limit=10, + col, + query_dense_embedding, + query_sparse_embedding, + sparse_weight=1.0, + dense_weight=1.0, + limit=10, ): -dense_search_params = {"metric_type": "IP", "params": {}} -dense_req = AnnSearchRequest( -[query_dense_embedding], "dense_vector", dense_search_params, limit=limit -) -sparse_search_params = {"metric_type": "IP", "params": {}} -sparse_req = AnnSearchRequest( -[query_sparse_embedding], "sparse_vector", sparse_search_params, limit=limit -) -rerank = WeightedRanker(sparse_weight, dense_weight) -res = col.hybrid_search( -[sparse_req, dense_req], rerank=rerank, limit=limit, output_fields=["text"] -)[0] -return [hit.get("text") for hit in res] + dense_search_params = {"metric_type": "IP", "params": {}} + dense_req = AnnSearchRequest( + [query_dense_embedding], "dense_vector", dense_search_params, limit=limit + ) + sparse_search_params = {"metric_type": "IP", "params": {}} + sparse_req = AnnSearchRequest( + [query_sparse_embedding], "sparse_vector", sparse_search_params, limit=limit + ) + rerank = WeightedRanker(sparse_weight, dense_weight) + res = col.hybrid_search( + [sparse_req, dense_req], rerank=rerank, limit=limit, output_fields=["text"] + )[0] + return [hit.get("text") for hit in res]
    -

    定義された関数を使って3種類の検索を実行してみよう:

    dense_results = dense_search(col, query_embeddings["dense"][0])
    -sparse_results = sparse_search(col, query_embeddings["sparse"][0])
    +sparse_results = sparse_search(col, query_embeddings["sparse"]._getrow(0))
     hybrid_results = hybrid_search(
         col,
         query_embeddings["dense"][0],
    -    query_embeddings["sparse"][0],
    +    query_embeddings["sparse"]._getrow(0),
         sparse_weight=0.7,
         dense_weight=1.0,
     )
    @@ -250,9 +248,7 @@ hybrid_results = hybrid_search(
                 formatted_text += "</span>"
             formatted_texts.append(formatted_text)
         return formatted_texts
    -
     
    -

    そうすれば、検索結果をテキストでハイライト表示することができる:

    from IPython.display import Markdown, display
     
    @@ -260,21 +256,20 @@ hybrid_results = hybrid_search(
     display(Markdown("**Dense Search Results:**"))
     formatted_results = doc_text_formatting(ef, query, dense_results)
     for result in dense_results:
    -display(Markdown(result))
    +    display(Markdown(result))
     
     # Sparse search results
     display(Markdown("\n**Sparse Search Results:**"))
     formatted_results = doc_text_formatting(ef, query, sparse_results)
     for result in formatted_results:
    -display(Markdown(result))
    +    display(Markdown(result))
     
     # Hybrid search results
     display(Markdown("\n**Hybrid Search Results:**"))
     formatted_results = doc_text_formatting(ef, query, hybrid_results)
     for result in formatted_results:
    -display(Markdown(result))
    +    display(Markdown(result))
     
    -

    密な検索結果

    ロボット工学の学習を始めるのに最も良い方法は?

    javaのようなコンピュータ言語を学ぶにはどうしたらいいですか?

    @@ -292,27 +287,27 @@ display(Markdown(result))

    機械 学習に代わるものは何 ですか?

    C プログラミングを使ってLinuxで新しいターミナルと新しいシェルを作成するには

    C プログラミングを使用して、新しいターミナルで新しいシェルを作成する方法を教えてください(Linuxターミナル)

    -

    ハイデラバードで 起業するのに適したビジネスはどれですか?

    ハイデラバードで 起業するのに適したビジネスはどれですか?

    -

    ロボット工学を 始めるのに最適な方法は何ですか?私が作業を 開始できる最適な開発ボードはどれですか?

    -

    全くの初心者がコンピュータ・ プログラミングのアルゴリズムを理解 するには、どのような数学が必要 ですか?全くの初心者に適したアルゴリズムに関する本は何ですか?

    +

    ハイデラバードで 起業するのに適したビジネスはどれですか?

    +

    ロボット工学を 始めるのに最適な方法は何ですか?私が作業を 開始できる最適な開発ボードはどれですか?

    +

    全くの初心者がコンピュータ・ プログラミングのアルゴリズムを理解するにはどのような数学が必要ですか全くの初心者に適したアルゴリズムに関する本は何ですか?

    人生を自分に合ったものにし、精神的・感情的に虐待れないようにするには

    ハイブリッドの検索結果

    -

    ロボット工学を 始めるのに最適な方法は開発ボードはどれがいいですか?

    +

    ロボット工学を 始めるのに最適な方法は開発ボードはどれがいいですか

    Java プログラミングとは ですか?Javaプログラミング言語を 学ぶには

    -

    ロボット工学を 学ぶ最良の方法は何ですか?

    +

    ロボット工学を 学ぶ最良の方法は何ですか

    どのようにUPSCの準備をするのですか?

    物理を 簡単に学ぶには

    フランス語を学ぶ最善の 方法はですか?

    -

    どうすれば英語を流暢に話せるようになりますか?

    +

    どうすれば英語を流暢に話せる ようになりますか?

    コンピュータ・セキュリティを学ぶにはどうしたらいいですか?

    -

    情報 セキュリティを学ぶにはどうしたらいいですか?

    -

    Javaのようなコンピュータ言語はどのように学べばいいですか?

    +

    情報 セキュリティを学ぶにはどうしたらいいですか?

    +

    Javaのようなコンピュータ言語はどのように学べばいいですか?

    機械 学習に代わるものは何 ですか?

    -

    LinuxでC 言語を使って新しいターミナルとシェルを作成するにはどうすれば いいですか?

    -

    C プログラミングを使用して、新しいターミナルに新しいシェルを作成する方法を教えてください。

    -

    ハイデラバードで 起業するのに適したビジネスはどれですか?

    +

    LinuxでC言語を使って新しいターミナルとシェルを作成するにはどうすれば いいですか?

    +

    C プログラミングを使用して、新しいターミナルに新しいシェルを作成する方法を教えてください

    +

    ハイデラバードで 起業するのに適したビジネスはどれですか?

    ハイデラバードで 起業するのに適したビジネスはどれですか?

    全くの初心者がコンピュータ プログラミングのアルゴリズムを理解する ために必要な数学は何 ですか?全くの初心者に適したアルゴリズムに関する本は何ですか?

    -

    人生を自分に合ったものにし、精神的・感情的に虐待れないようにするにはどうしたらいいですか?

    +

    人生を自分に合ったものにし、精神的・感情的に虐待れないようにするにはどうしたらいいですか?

    クイックデプロイ

    このチュートリアルでオンライン・デモを始める方法については、アプリケーションの例を参照してください。

    diff --git a/localization/v2.4.x/site/ja/tutorials/image_similarity_search.json b/localization/v2.4.x/site/ja/tutorials/image_similarity_search.json index 9d95b845e..a9b1eecf8 100644 --- a/localization/v2.4.x/site/ja/tutorials/image_similarity_search.json +++ b/localization/v2.4.x/site/ja/tutorials/image_similarity_search.json @@ -1 +1 @@ -{"codeList":["!wget https://github.com/milvus-io/pymilvus-assets/releases/download/imagedata/reverse_image_search.zip\n!unzip -q -o reverse_image_search.zip\n","$ pip install pymilvus --upgrade\n$ pip install timm\n","import torch\nfrom PIL import Image\nimport timm\nfrom sklearn.preprocessing import normalize\nfrom timm.data import resolve_data_config\nfrom timm.data.transforms_factory import create_transform\n\n\nclass FeatureExtractor:\n def __init__(self, modelname):\n # Load the pre-trained model\n self.model = timm.create_model(\n modelname, pretrained=True, num_classes=0, global_pool=\"avg\"\n )\n self.model.eval()\n\n # Get the input size required by the model\n self.input_size = self.model.default_cfg[\"input_size\"]\n\n config = resolve_data_config({}, model=modelname)\n # Get the preprocessing function provided by TIMM for the model\n self.preprocess = create_transform(**config)\n\n def __call__(self, imagepath):\n # Preprocess the input image\n input_image = Image.open(imagepath).convert(\"RGB\") # Convert to RGB if needed\n input_image = self.preprocess(input_image)\n\n # Convert the image to a PyTorch tensor and add a batch dimension\n input_tensor = input_image.unsqueeze(0)\n\n # Perform inference\n with torch.no_grad():\n output = self.model(input_tensor)\n\n # Extract the feature vector\n feature_vector = output.squeeze().numpy()\n\n return normalize(feature_vector.reshape(1, -1), norm=\"l2\").flatten()\n","from pymilvus import MilvusClient\n\n# Set up a Milvus client\nclient = MilvusClient(uri=\"example.db\")\n# Create a collection in quick setup mode\nif client.has_collection(collection_name=\"image_embeddings\"):\n client.drop_collection(collection_name=\"image_embeddings\")\nclient.create_collection(\n collection_name=\"image_embeddings\",\n vector_field_name=\"vector\",\n dimension=512,\n auto_id=True,\n enable_dynamic_field=True,\n metric_type=\"COSINE\",\n)\n","import os\n\nextractor = FeatureExtractor(\"resnet34\")\n\nroot = \"./train\"\ninsert = True\nif insert is True:\n for dirpath, foldername, filenames in os.walk(root):\n for filename in filenames:\n if filename.endswith(\".JPEG\"):\n filepath = dirpath + \"/\" + filename\n image_embedding = extractor(filepath)\n client.insert(\n \"image_embeddings\",\n {\"vector\": image_embedding, \"filename\": filepath},\n )\n","from IPython.display import display\n\nquery_image = \"./test/Afghan_hound/n02088094_4261.JPEG\"\n\nresults = client.search(\n \"image_embeddings\",\n data=[extractor(query_image)],\n output_fields=[\"filename\"],\n search_params={\"metric_type\": \"COSINE\"},\n)\nimages = []\nfor result in results:\n for hit in result[:10]:\n filename = hit[\"entity\"][\"filename\"]\n img = Image.open(filename)\n img = img.resize((150, 150))\n images.append(img)\n\nwidth = 150 * 5\nheight = 150 * 2\nconcatenated_image = Image.new(\"RGB\", (width, height))\n\nfor idx, img in enumerate(images):\n x = idx % 5\n y = idx // 5\n concatenated_image.paste(img, (x * 150, y * 150))\ndisplay(\"query\")\ndisplay(Image.open(query_image).resize((150, 150)))\ndisplay(\"results\")\ndisplay(concatenated_image)\n"],"headingContent":"","anchorList":[{"label":"Milvusによる画像検索","href":"Image-Search-with-Milvus","type":1,"isActive":false},{"label":"データセットの準備","href":"Dataset-Preparation","type":2,"isActive":false},{"label":"前提条件","href":"Prequisites","type":2,"isActive":false},{"label":"フィーチャー・エクストラクターの定義","href":"Define-the-Feature-Extractor","type":2,"isActive":false},{"label":"Milvusコレクションの作成","href":"Create-a-Milvus-Collection","type":2,"isActive":false},{"label":"エンベッディングをMilvusに挿入する","href":"Insert-the-Embeddings-to-Milvus","type":2,"isActive":false},{"label":"クイックデプロイ","href":"Quick-Deploy","type":2,"isActive":false}]} \ No newline at end of file +{"codeList":["!wget https://github.com/milvus-io/pymilvus-assets/releases/download/imagedata/reverse_image_search.zip\n!unzip -q -o reverse_image_search.zip\n","$ pip install pymilvus --upgrade\n$ pip install timm\n","import torch\nfrom PIL import Image\nimport timm\nfrom sklearn.preprocessing import normalize\nfrom timm.data import resolve_data_config\nfrom timm.data.transforms_factory import create_transform\n\n\nclass FeatureExtractor:\n def __init__(self, modelname):\n # Load the pre-trained model\n self.model = timm.create_model(\n modelname, pretrained=True, num_classes=0, global_pool=\"avg\"\n )\n self.model.eval()\n\n # Get the input size required by the model\n self.input_size = self.model.default_cfg[\"input_size\"]\n\n config = resolve_data_config({}, model=modelname)\n # Get the preprocessing function provided by TIMM for the model\n self.preprocess = create_transform(**config)\n\n def __call__(self, imagepath):\n # Preprocess the input image\n input_image = Image.open(imagepath).convert(\"RGB\") # Convert to RGB if needed\n input_image = self.preprocess(input_image)\n\n # Convert the image to a PyTorch tensor and add a batch dimension\n input_tensor = input_image.unsqueeze(0)\n\n # Perform inference\n with torch.no_grad():\n output = self.model(input_tensor)\n\n # Extract the feature vector\n feature_vector = output.squeeze().numpy()\n\n return normalize(feature_vector.reshape(1, -1), norm=\"l2\").flatten()\n","from pymilvus import MilvusClient\n\n# Set up a Milvus client\nclient = MilvusClient(uri=\"example.db\")\n# Create a collection in quick setup mode\nif client.has_collection(collection_name=\"image_embeddings\"):\n client.drop_collection(collection_name=\"image_embeddings\")\nclient.create_collection(\n collection_name=\"image_embeddings\",\n vector_field_name=\"vector\",\n dimension=512,\n auto_id=True,\n enable_dynamic_field=True,\n metric_type=\"COSINE\",\n)\n","import os\n\nextractor = FeatureExtractor(\"resnet34\")\n\nroot = \"./train\"\ninsert = True\nif insert is True:\n for dirpath, foldername, filenames in os.walk(root):\n for filename in filenames:\n if filename.endswith(\".JPEG\"):\n filepath = dirpath + \"/\" + filename\n image_embedding = extractor(filepath)\n client.insert(\n \"image_embeddings\",\n {\"vector\": image_embedding, \"filename\": filepath},\n )\n","from IPython.display import display\n\nquery_image = \"./test/Afghan_hound/n02088094_4261.JPEG\"\n\nresults = client.search(\n \"image_embeddings\",\n data=[extractor(query_image)],\n output_fields=[\"filename\"],\n search_params={\"metric_type\": \"COSINE\"},\n)\nimages = []\nfor result in results:\n for hit in result[:10]:\n filename = hit[\"entity\"][\"filename\"]\n img = Image.open(filename)\n img = img.resize((150, 150))\n images.append(img)\n\nwidth = 150 * 5\nheight = 150 * 2\nconcatenated_image = Image.new(\"RGB\", (width, height))\n\nfor idx, img in enumerate(images):\n x = idx % 5\n y = idx // 5\n concatenated_image.paste(img, (x * 150, y * 150))\ndisplay(\"query\")\ndisplay(Image.open(query_image).resize((150, 150)))\ndisplay(\"results\")\ndisplay(concatenated_image)\n"],"headingContent":"Image Search with Milvus","anchorList":[{"label":"Milvusを使った画像検索","href":"Image-Search-with-Milvus","type":1,"isActive":false},{"label":"データセットの準備","href":"Dataset-Preparation","type":2,"isActive":false},{"label":"前提条件","href":"Prequisites","type":2,"isActive":false},{"label":"フィーチャー・エクストラクターの定義","href":"Define-the-Feature-Extractor","type":2,"isActive":false},{"label":"Milvusコレクションの作成","href":"Create-a-Milvus-Collection","type":2,"isActive":false},{"label":"Milvusへのエンベッディングの挿入","href":"Insert-the-Embeddings-to-Milvus","type":2,"isActive":false},{"label":"クイックデプロイ","href":"Quick-Deploy","type":2,"isActive":false}]} \ No newline at end of file diff --git a/localization/v2.4.x/site/ja/tutorials/image_similarity_search.md b/localization/v2.4.x/site/ja/tutorials/image_similarity_search.md index c3de79860..bc49b15e3 100644 --- a/localization/v2.4.x/site/ja/tutorials/image_similarity_search.md +++ b/localization/v2.4.x/site/ja/tutorials/image_similarity_search.md @@ -1,10 +1,9 @@ --- id: image_similarity_search.md summary: Milvusによる画像検索 -title: Milvusによる画像検索 +title: Milvusを使った画像検索 --- - -

    Milvusによる画像検索

    Open In Colab

    +

    Open In Colab +GitHub Repository

    このノートブックでは、Milvusを使ってデータセット内の類似画像を検索する方法を紹介します。ImageNetデータセットのサブセットを使用し、アフガンハウンドの画像を検索します。

    データセットの準備

    -

    Milvusコレクションの作成

    次に、埋め込み画像を格納するMilvusコレクションを作成します。

    +

    埋め込み画像を格納するMilvusコレクションを作成します。

    from pymilvus import MilvusClient
     
     # Set up a Milvus client
     client = MilvusClient(uri="example.db")
     # Create a collection in quick setup mode
     if client.has_collection(collection_name="image_embeddings"):
    -client.drop_collection(collection_name="image_embeddings")
    +    client.drop_collection(collection_name="image_embeddings")
     client.create_collection(
    -collection_name="image_embeddings",
    -vector_field_name="vector",
    -dimension=512,
    -auto_id=True,
    -enable_dynamic_field=True,
    -metric_type="COSINE",
    +    collection_name="image_embeddings",
    +    vector_field_name="vector",
    +    dimension=512,
    +    auto_id=True,
    +    enable_dynamic_field=True,
    +    metric_type="COSINE",
     )
     
    -
    -

    引数はMilvusClient

    +

    引数としてMilvusClient を指定します:

      -
    • uri をローカルファイル、例えば./milvus.db とするのが最も便利な方法である。
    • -
    • データ規模が大きい場合は、dockerやkubernetes上に、よりパフォーマンスの高いMilvusサーバを構築することができます。このセットアップでは、http://localhost:19530 などのサーバ uri をuri として使用してください。
    • +
    • ./milvus.db のように、uri をローカルファイルとして設定するのが最も便利な方法である。このファイルには自動的にMilvus Liteが利用され、すべてのデータが格納される。
    • +
    • データ規模が大きい場合は、dockerやkubernetes上に、よりパフォーマンスの高いMilvusサーバを構築することができます。このセットアップでは、サーバの uri、例えばhttp://localhost:19530uri として使用してください。
    • MilvusのフルマネージドクラウドサービスであるZilliz Cloudを使用する場合は、Zilliz CloudのPublic EndpointとApi keyに対応するuritoken を調整してください。
    -

    エンベッディングをMilvusに挿入する

    ResNet34モデルを用いて各画像のエンベッディングを抽出し、トレーニングセットの画像をMilvusに挿入します。

    +

    ResNet34モデルを用いて各画像のエンベッディングを抽出し、学習セットからMilvusに画像を挿入します。

    import os
     
     extractor = FeatureExtractor("resnet34")
    @@ -193,17 +191,16 @@ extractor = FeatureExtractor("resnet34""./train"
     insert = True
     if insert is True:
    -for dirpath, foldername, filenames in os.walk(root):
    -for filename in filenames:
    -if filename.endswith(".JPEG"):
    -filepath = dirpath + "/" + filename
    -image_embedding = extractor(filepath)
    -client.insert(
    -"image_embeddings",
    -{"vector": image_embedding, "filename": filepath},
    -)
    +    for dirpath, foldername, filenames in os.walk(root):
    +        for filename in filenames:
    +            if filename.endswith(".JPEG"):
    +                filepath = dirpath + "/" + filename
    +                image_embedding = extractor(filepath)
    +                client.insert(
    +                    "image_embeddings",
    +                    {"vector": image_embedding, "filename": filepath},
    +                )
     
    -
    from IPython.display import display
     
     query_image = "./test/Afghan_hound/n02088094_4261.JPEG"
    diff --git a/localization/v2.4.x/site/ja/tutorials/multimodal_rag_with_milvus.json b/localization/v2.4.x/site/ja/tutorials/multimodal_rag_with_milvus.json
    index 0fb175140..9adf426a9 100644
    --- a/localization/v2.4.x/site/ja/tutorials/multimodal_rag_with_milvus.json
    +++ b/localization/v2.4.x/site/ja/tutorials/multimodal_rag_with_milvus.json
    @@ -1 +1 @@
    -{"codeList":["$ pip install --upgrade pymilvus openai datasets opencv-python timm einops ftfy peft tqdm\n","$ git clone https://github.com/FlagOpen/FlagEmbedding.git\n$ pip install -e FlagEmbedding\n","$ wget https://github.com/milvus-io/bootcamp/releases/download/data/amazon_reviews_2023_subset.tar.gz\n$ tar -xzf amazon_reviews_2023_subset.tar.gz\n","$ wget https://huggingface.co/BAAI/bge-visualized/resolve/main/Visualized_base_en_v1.5.pth\n","import torch\nfrom FlagEmbedding.visual.modeling import Visualized_BGE\n\n\nclass Encoder:\n    def __init__(self, model_name: str, model_path: str):\n        self.model = Visualized_BGE(model_name_bge=model_name, model_weight=model_path)\n        self.model.eval()\n\n    def encode_query(self, image_path: str, text: str) -> list[float]:\n        with torch.no_grad():\n            query_emb = self.model.encode(image=image_path, text=text)\n        return query_emb.tolist()[0]\n\n    def encode_image(self, image_path: str) -> list[float]:\n        with torch.no_grad():\n            query_emb = self.model.encode(image=image_path)\n        return query_emb.tolist()[0]\n\n\nmodel_name = \"BAAI/bge-base-en-v1.5\"\nmodel_path = \"./Visualized_base_en_v1.5.pth\"  # Change to your own value if using a different model path\nencoder = Encoder(model_name, model_path)\n","import os\nfrom tqdm import tqdm\nfrom glob import glob\n\n\n# Generate embeddings for the image dataset\ndata_dir = (\n    \"./images_folder\"  # Change to your own value if using a different data directory\n)\nimage_list = glob(\n    os.path.join(data_dir, \"images\", \"*.jpg\")\n)  # We will only use images ending with \".jpg\"\nimage_dict = {}\nfor image_path in tqdm(image_list, desc=\"Generating image embeddings: \"):\n    try:\n        image_dict[image_path] = encoder.encode_image(image_path)\n    except Exception as e:\n        print(f\"Failed to generate embedding for {image_path}. Skipped.\")\n        continue\nprint(\"Number of encoded images:\", len(image_dict))\n","from pymilvus import MilvusClient\n\n\ndim = len(list(image_dict.values())[0])\ncollection_name = \"multimodal_rag_demo\"\n\n# Connect to Milvus client given URI\nmilvus_client = MilvusClient(uri=\"./milvus_demo.db\")\n\n# Create Milvus Collection\n# By default, vector field name is \"vector\"\nmilvus_client.create_collection(\n    collection_name=collection_name,\n    auto_id=True,\n    dimension=dim,\n    enable_dynamic_field=True,\n)\n\n# Insert data into collection\nmilvus_client.insert(\n    collection_name=collection_name,\n    data=[{\"image_path\": k, \"vector\": v} for k, v in image_dict.items()],\n)\n","query_image = os.path.join(\n    data_dir, \"leopard.jpg\"\n)  # Change to your own query image path\nquery_text = \"phone case with this image theme\"\n\n# Generate query embedding given image and text instructions\nquery_vec = encoder.encode_query(image_path=query_image, text=query_text)\n\nsearch_results = milvus_client.search(\n    collection_name=collection_name,\n    data=[query_vec],\n    output_fields=[\"image_path\"],\n    limit=9,  # Max number of search results to return\n    search_params={\"metric_type\": \"COSINE\", \"params\": {}},  # Search parameters\n)[0]\n\nretrieved_images = [hit.get(\"entity\").get(\"image_path\") for hit in search_results]\nprint(retrieved_images)\n","import numpy as np\nimport cv2\n\nimg_height = 300\nimg_width = 300\nrow_count = 3\n\n\ndef create_panoramic_view(query_image_path: str, retrieved_images: list) -> np.ndarray:\n    \"\"\"\n    creates a 5x5 panoramic view image from a list of images\n\n    args:\n        images: list of images to be combined\n\n    returns:\n        np.ndarray: the panoramic view image\n    \"\"\"\n    panoramic_width = img_width * row_count\n    panoramic_height = img_height * row_count\n    panoramic_image = np.full(\n        (panoramic_height, panoramic_width, 3), 255, dtype=np.uint8\n    )\n\n    # create and resize the query image with a blue border\n    query_image_null = np.full((panoramic_height, img_width, 3), 255, dtype=np.uint8)\n    query_image = Image.open(query_image_path).convert(\"RGB\")\n    query_array = np.array(query_image)[:, :, ::-1]\n    resized_image = cv2.resize(query_array, (img_width, img_height))\n\n    border_size = 10\n    blue = (255, 0, 0)  # blue color in BGR\n    bordered_query_image = cv2.copyMakeBorder(\n        resized_image,\n        border_size,\n        border_size,\n        border_size,\n        border_size,\n        cv2.BORDER_CONSTANT,\n        value=blue,\n    )\n\n    query_image_null[img_height * 2 : img_height * 3, 0:img_width] = cv2.resize(\n        bordered_query_image, (img_width, img_height)\n    )\n\n    # add text \"query\" below the query image\n    text = \"query\"\n    font_scale = 1\n    font_thickness = 2\n    text_org = (10, img_height * 3 + 30)\n    cv2.putText(\n        query_image_null,\n        text,\n        text_org,\n        cv2.FONT_HERSHEY_SIMPLEX,\n        font_scale,\n        blue,\n        font_thickness,\n        cv2.LINE_AA,\n    )\n\n    # combine the rest of the images into the panoramic view\n    retrieved_imgs = [\n        np.array(Image.open(img).convert(\"RGB\"))[:, :, ::-1] for img in retrieved_images\n    ]\n    for i, image in enumerate(retrieved_imgs):\n        image = cv2.resize(image, (img_width - 4, img_height - 4))\n        row = i // row_count\n        col = i % row_count\n        start_row = row * img_height\n        start_col = col * img_width\n\n        border_size = 2\n        bordered_image = cv2.copyMakeBorder(\n            image,\n            border_size,\n            border_size,\n            border_size,\n            border_size,\n            cv2.BORDER_CONSTANT,\n            value=(0, 0, 0),\n        )\n        panoramic_image[\n            start_row : start_row + img_height, start_col : start_col + img_width\n        ] = bordered_image\n\n        # add red index numbers to each image\n        text = str(i)\n        org = (start_col + 50, start_row + 30)\n        (font_width, font_height), baseline = cv2.getTextSize(\n            text, cv2.FONT_HERSHEY_SIMPLEX, 1, 2\n        )\n\n        top_left = (org[0] - 48, start_row + 2)\n        bottom_right = (org[0] - 48 + font_width + 5, org[1] + baseline + 5)\n\n        cv2.rectangle(\n            panoramic_image, top_left, bottom_right, (255, 255, 255), cv2.FILLED\n        )\n        cv2.putText(\n            panoramic_image,\n            text,\n            (start_col + 10, start_row + 30),\n            cv2.FONT_HERSHEY_SIMPLEX,\n            1,\n            (0, 0, 255),\n            2,\n            cv2.LINE_AA,\n        )\n\n    # combine the query image with the panoramic view\n    panoramic_image = np.hstack([query_image_null, panoramic_image])\n    return panoramic_image\n","from PIL import Image\n\ncombined_image_path = os.path.join(data_dir, \"combined_image.jpg\")\npanoramic_image = create_panoramic_view(query_image, retrieved_images)\ncv2.imwrite(combined_image_path, panoramic_image)\n\ncombined_image = Image.open(combined_image_path)\nshow_combined_image = combined_image.resize((300, 300))\nshow_combined_image.show()\n","import requests\nimport base64\n\nopenai_api_key = \"sk-***\"  # Change to your OpenAI API Key\n\n\ndef generate_ranking_explanation(\n    combined_image_path: str, caption: str, infos: dict = None\n) -> tuple[list[int], str]:\n    with open(combined_image_path, \"rb\") as image_file:\n        base64_image = base64.b64encode(image_file.read()).decode(\"utf-8\")\n\n    information = (\n        \"You are responsible for ranking results for a Composed Image Retrieval. \"\n        \"The user retrieves an image with an 'instruction' indicating their retrieval intent. \"\n        \"For example, if the user queries a red car with the instruction 'change this car to blue,' a similar type of car in blue would be ranked higher in the results. \"\n        \"Now you would receive instruction and query image with blue border. Every item has its red index number in its top left. Do not misunderstand it. \"\n        f\"User instruction: {caption} \\n\\n\"\n    )\n\n    # add additional information for each image\n    if infos:\n        for i, info in enumerate(infos[\"product\"]):\n            information += f\"{i}. {info}\\n\"\n\n    information += (\n        \"Provide a new ranked list of indices from most suitable to least suitable, followed by an explanation for the top 1 most suitable item only. \"\n        \"The format of the response has to be 'Ranked list: []' with the indices in brackets as integers, followed by 'Reasons:' plus the explanation why this most fit user's query intent.\"\n    )\n\n    headers = {\n        \"Content-Type\": \"application/json\",\n        \"Authorization\": f\"Bearer {openai_api_key}\",\n    }\n\n    payload = {\n        \"model\": \"gpt-4o\",\n        \"messages\": [\n            {\n                \"role\": \"user\",\n                \"content\": [\n                    {\"type\": \"text\", \"text\": information},\n                    {\n                        \"type\": \"image_url\",\n                        \"image_url\": {\"url\": f\"data:image/jpeg;base64,{base64_image}\"},\n                    },\n                ],\n            }\n        ],\n        \"max_tokens\": 300,\n    }\n\n    response = requests.post(\n        \"https://api.openai.com/v1/chat/completions\", headers=headers, json=payload\n    )\n    result = response.json()[\"choices\"][0][\"message\"][\"content\"]\n\n    # parse the ranked indices from the response\n    start_idx = result.find(\"[\")\n    end_idx = result.find(\"]\")\n    ranked_indices_str = result[start_idx + 1 : end_idx].split(\",\")\n    ranked_indices = [int(index.strip()) for index in ranked_indices_str]\n\n    # extract explanation\n    explanation = result[end_idx + 1 :].strip()\n\n    return ranked_indices, explanation\n","ranked_indices, explanation = generate_ranking_explanation(\n    combined_image_path, query_text\n)\n","print(explanation)\n\nbest_index = ranked_indices[0]\nbest_img = Image.open(retrieved_images[best_index])\nbest_img = best_img.resize((150, 150))\nbest_img.show()\n"],"headingContent":"","anchorList":[{"label":"MilvusによるマルチモーダルRAG","href":"Multimodal-RAG-with-Milvus","type":1,"isActive":false},{"label":"準備","href":"Preparation","type":2,"isActive":false},{"label":"データのロード","href":"Load-Data","type":2,"isActive":false},{"label":"生成リランカーによるマルチモーダル検索","href":"Multimodal-Search-with-Generative-Reranker","type":2,"isActive":false}]}
    \ No newline at end of file
    +{"codeList":["$ pip install --upgrade pymilvus openai datasets opencv-python timm einops ftfy peft tqdm\n","$ git clone https://github.com/FlagOpen/FlagEmbedding.git\n$ pip install -e FlagEmbedding\n","$ wget https://github.com/milvus-io/bootcamp/releases/download/data/amazon_reviews_2023_subset.tar.gz\n$ tar -xzf amazon_reviews_2023_subset.tar.gz\n","$ wget https://huggingface.co/BAAI/bge-visualized/resolve/main/Visualized_base_en_v1.5.pth\n","import torch\nfrom FlagEmbedding.visual.modeling import Visualized_BGE\n\n\nclass Encoder:\n    def __init__(self, model_name: str, model_path: str):\n        self.model = Visualized_BGE(model_name_bge=model_name, model_weight=model_path)\n        self.model.eval()\n\n    def encode_query(self, image_path: str, text: str) -> list[float]:\n        with torch.no_grad():\n            query_emb = self.model.encode(image=image_path, text=text)\n        return query_emb.tolist()[0]\n\n    def encode_image(self, image_path: str) -> list[float]:\n        with torch.no_grad():\n            query_emb = self.model.encode(image=image_path)\n        return query_emb.tolist()[0]\n\n\nmodel_name = \"BAAI/bge-base-en-v1.5\"\nmodel_path = \"./Visualized_base_en_v1.5.pth\"  # Change to your own value if using a different model path\nencoder = Encoder(model_name, model_path)\n","import os\nfrom tqdm import tqdm\nfrom glob import glob\n\n\n# Generate embeddings for the image dataset\ndata_dir = (\n    \"./images_folder\"  # Change to your own value if using a different data directory\n)\nimage_list = glob(\n    os.path.join(data_dir, \"images\", \"*.jpg\")\n)  # We will only use images ending with \".jpg\"\nimage_dict = {}\nfor image_path in tqdm(image_list, desc=\"Generating image embeddings: \"):\n    try:\n        image_dict[image_path] = encoder.encode_image(image_path)\n    except Exception as e:\n        print(f\"Failed to generate embedding for {image_path}. Skipped.\")\n        continue\nprint(\"Number of encoded images:\", len(image_dict))\n","from pymilvus import MilvusClient\n\n\ndim = len(list(image_dict.values())[0])\ncollection_name = \"multimodal_rag_demo\"\n\n# Connect to Milvus client given URI\nmilvus_client = MilvusClient(uri=\"./milvus_demo.db\")\n\n# Create Milvus Collection\n# By default, vector field name is \"vector\"\nmilvus_client.create_collection(\n    collection_name=collection_name,\n    auto_id=True,\n    dimension=dim,\n    enable_dynamic_field=True,\n)\n\n# Insert data into collection\nmilvus_client.insert(\n    collection_name=collection_name,\n    data=[{\"image_path\": k, \"vector\": v} for k, v in image_dict.items()],\n)\n","query_image = os.path.join(\n    data_dir, \"leopard.jpg\"\n)  # Change to your own query image path\nquery_text = \"phone case with this image theme\"\n\n# Generate query embedding given image and text instructions\nquery_vec = encoder.encode_query(image_path=query_image, text=query_text)\n\nsearch_results = milvus_client.search(\n    collection_name=collection_name,\n    data=[query_vec],\n    output_fields=[\"image_path\"],\n    limit=9,  # Max number of search results to return\n    search_params={\"metric_type\": \"COSINE\", \"params\": {}},  # Search parameters\n)[0]\n\nretrieved_images = [hit.get(\"entity\").get(\"image_path\") for hit in search_results]\nprint(retrieved_images)\n","import numpy as np\nimport cv2\n\nimg_height = 300\nimg_width = 300\nrow_count = 3\n\n\ndef create_panoramic_view(query_image_path: str, retrieved_images: list) -> np.ndarray:\n    \"\"\"\n    creates a 5x5 panoramic view image from a list of images\n\n    args:\n        images: list of images to be combined\n\n    returns:\n        np.ndarray: the panoramic view image\n    \"\"\"\n    panoramic_width = img_width * row_count\n    panoramic_height = img_height * row_count\n    panoramic_image = np.full(\n        (panoramic_height, panoramic_width, 3), 255, dtype=np.uint8\n    )\n\n    # create and resize the query image with a blue border\n    query_image_null = np.full((panoramic_height, img_width, 3), 255, dtype=np.uint8)\n    query_image = Image.open(query_image_path).convert(\"RGB\")\n    query_array = np.array(query_image)[:, :, ::-1]\n    resized_image = cv2.resize(query_array, (img_width, img_height))\n\n    border_size = 10\n    blue = (255, 0, 0)  # blue color in BGR\n    bordered_query_image = cv2.copyMakeBorder(\n        resized_image,\n        border_size,\n        border_size,\n        border_size,\n        border_size,\n        cv2.BORDER_CONSTANT,\n        value=blue,\n    )\n\n    query_image_null[img_height * 2 : img_height * 3, 0:img_width] = cv2.resize(\n        bordered_query_image, (img_width, img_height)\n    )\n\n    # add text \"query\" below the query image\n    text = \"query\"\n    font_scale = 1\n    font_thickness = 2\n    text_org = (10, img_height * 3 + 30)\n    cv2.putText(\n        query_image_null,\n        text,\n        text_org,\n        cv2.FONT_HERSHEY_SIMPLEX,\n        font_scale,\n        blue,\n        font_thickness,\n        cv2.LINE_AA,\n    )\n\n    # combine the rest of the images into the panoramic view\n    retrieved_imgs = [\n        np.array(Image.open(img).convert(\"RGB\"))[:, :, ::-1] for img in retrieved_images\n    ]\n    for i, image in enumerate(retrieved_imgs):\n        image = cv2.resize(image, (img_width - 4, img_height - 4))\n        row = i // row_count\n        col = i % row_count\n        start_row = row * img_height\n        start_col = col * img_width\n\n        border_size = 2\n        bordered_image = cv2.copyMakeBorder(\n            image,\n            border_size,\n            border_size,\n            border_size,\n            border_size,\n            cv2.BORDER_CONSTANT,\n            value=(0, 0, 0),\n        )\n        panoramic_image[\n            start_row : start_row + img_height, start_col : start_col + img_width\n        ] = bordered_image\n\n        # add red index numbers to each image\n        text = str(i)\n        org = (start_col + 50, start_row + 30)\n        (font_width, font_height), baseline = cv2.getTextSize(\n            text, cv2.FONT_HERSHEY_SIMPLEX, 1, 2\n        )\n\n        top_left = (org[0] - 48, start_row + 2)\n        bottom_right = (org[0] - 48 + font_width + 5, org[1] + baseline + 5)\n\n        cv2.rectangle(\n            panoramic_image, top_left, bottom_right, (255, 255, 255), cv2.FILLED\n        )\n        cv2.putText(\n            panoramic_image,\n            text,\n            (start_col + 10, start_row + 30),\n            cv2.FONT_HERSHEY_SIMPLEX,\n            1,\n            (0, 0, 255),\n            2,\n            cv2.LINE_AA,\n        )\n\n    # combine the query image with the panoramic view\n    panoramic_image = np.hstack([query_image_null, panoramic_image])\n    return panoramic_image\n","from PIL import Image\n\ncombined_image_path = os.path.join(data_dir, \"combined_image.jpg\")\npanoramic_image = create_panoramic_view(query_image, retrieved_images)\ncv2.imwrite(combined_image_path, panoramic_image)\n\ncombined_image = Image.open(combined_image_path)\nshow_combined_image = combined_image.resize((300, 300))\nshow_combined_image.show()\n","import requests\nimport base64\n\nopenai_api_key = \"sk-***\"  # Change to your OpenAI API Key\n\n\ndef generate_ranking_explanation(\n    combined_image_path: str, caption: str, infos: dict = None\n) -> tuple[list[int], str]:\n    with open(combined_image_path, \"rb\") as image_file:\n        base64_image = base64.b64encode(image_file.read()).decode(\"utf-8\")\n\n    information = (\n        \"You are responsible for ranking results for a Composed Image Retrieval. \"\n        \"The user retrieves an image with an 'instruction' indicating their retrieval intent. \"\n        \"For example, if the user queries a red car with the instruction 'change this car to blue,' a similar type of car in blue would be ranked higher in the results. \"\n        \"Now you would receive instruction and query image with blue border. Every item has its red index number in its top left. Do not misunderstand it. \"\n        f\"User instruction: {caption} \\n\\n\"\n    )\n\n    # add additional information for each image\n    if infos:\n        for i, info in enumerate(infos[\"product\"]):\n            information += f\"{i}. {info}\\n\"\n\n    information += (\n        \"Provide a new ranked list of indices from most suitable to least suitable, followed by an explanation for the top 1 most suitable item only. \"\n        \"The format of the response has to be 'Ranked list: []' with the indices in brackets as integers, followed by 'Reasons:' plus the explanation why this most fit user's query intent.\"\n    )\n\n    headers = {\n        \"Content-Type\": \"application/json\",\n        \"Authorization\": f\"Bearer {openai_api_key}\",\n    }\n\n    payload = {\n        \"model\": \"gpt-4o\",\n        \"messages\": [\n            {\n                \"role\": \"user\",\n                \"content\": [\n                    {\"type\": \"text\", \"text\": information},\n                    {\n                        \"type\": \"image_url\",\n                        \"image_url\": {\"url\": f\"data:image/jpeg;base64,{base64_image}\"},\n                    },\n                ],\n            }\n        ],\n        \"max_tokens\": 300,\n    }\n\n    response = requests.post(\n        \"https://api.openai.com/v1/chat/completions\", headers=headers, json=payload\n    )\n    result = response.json()[\"choices\"][0][\"message\"][\"content\"]\n\n    # parse the ranked indices from the response\n    start_idx = result.find(\"[\")\n    end_idx = result.find(\"]\")\n    ranked_indices_str = result[start_idx + 1 : end_idx].split(\",\")\n    ranked_indices = [int(index.strip()) for index in ranked_indices_str]\n\n    # extract explanation\n    explanation = result[end_idx + 1 :].strip()\n\n    return ranked_indices, explanation\n","ranked_indices, explanation = generate_ranking_explanation(\n    combined_image_path, query_text\n)\n","print(explanation)\n\nbest_index = ranked_indices[0]\nbest_img = Image.open(retrieved_images[best_index])\nbest_img = best_img.resize((150, 150))\nbest_img.show()\n"],"headingContent":"Multimodal RAG with Milvus","anchorList":[{"label":"MilvusによるマルチモーダルRAG","href":"Multimodal-RAG-with-Milvus","type":1,"isActive":false},{"label":"準備","href":"Preparation","type":2,"isActive":false},{"label":"データのロード","href":"Load-Data","type":2,"isActive":false},{"label":"生成リランカーによるマルチモーダル検索","href":"Multimodal-Search-with-Generative-Reranker","type":2,"isActive":false}]}
    \ No newline at end of file
    diff --git a/localization/v2.4.x/site/ja/tutorials/multimodal_rag_with_milvus.md b/localization/v2.4.x/site/ja/tutorials/multimodal_rag_with_milvus.md
    index 24c4122c2..2144f6da3 100644
    --- a/localization/v2.4.x/site/ja/tutorials/multimodal_rag_with_milvus.md
    +++ b/localization/v2.4.x/site/ja/tutorials/multimodal_rag_with_milvus.md
    @@ -1,9 +1,8 @@
     ---
     id: multimodal_rag_with_milvus.md
     summary: MilvusとのマルチモーダルRAG
    -title: MilvusとのマルチモーダルRAG
    +title: MilvusによるマルチモーダルRAG
     ---
    -
     

    MilvusによるマルチモーダルRAG

    Open In Colab

    +

    Open In Colab +GitHub Repository

    -

    このチュートリアルでは、Milvus、Visualized BGEモデルGPT-4oを用いたマルチモーダルRAGを紹介します。このシステムでは、ユーザは画像をアップロードし、テキスト指示を編集することができます。このテキスト指示はBGEの構成検索モデルによって処理され、候補画像を検索します。その後、GPT-4oがリランカーとして機能し、最適な画像を選択し、選択の根拠を提供します。この強力な組み合わせにより、Milvusによる効率的な検索、BGEモデルによる正確な画像処理とマッチング、GPT-4oによる高度なリランキングという、シームレスで直感的な画像検索が可能になります。

    +

    このチュートリアルでは、Milvus、可視化BGEモデルGPT-4oを用いたマルチモーダルRAGを紹介します。このシステムでは、ユーザは画像をアップロードし、テキスト指示を編集することができます。このテキスト指示はBGEの構成検索モデルによって処理され、候補画像を検索します。その後、GPT-4oがリランカーとして機能し、最適な画像を選択し、選択の根拠を提供します。この強力な組み合わせにより、Milvusによる効率的な検索、BGEモデルによる正確な画像処理とマッチング、GPT-4oによる高度なリランキングを活用した、シームレスで直感的な画像検索が可能になります。

    準備

    -

    データのロード

    -
    Generating image embeddings: 100%|██████████| 900/900 [00:20<00:00, 44.08it/s]
     
     Number of encoded images: 900
     
    -

    Milvusへの挿入

    Milvusコレクションに、対応するパスとエンベッディングを持つ画像を挿入する。

    +

    Milvusへの挿入

    Milvusに画像を挿入します。

    引数はMilvusClient

      -
    • ./milvus_demo.db のように、uri をローカルファイルとして設定すると、自動的にMilvus Liteを利用して、すべてのデータをこのファイルに格納するので、最も便利な方法です。
    • -
    • データ規模が大きい場合は、dockerやkubernetes上に、よりパフォーマンスの高いMilvusサーバを構築することができます。このセットアップでは、http://localhost:19530 などのサーバ uri をuri として使用してください。
    • +
    • ./milvus_demo.db のように、uri をローカルファイルとして設定すると、Milvus Lite を利用して自動的にすべてのデータをこのファイルに格納することができるので、最も便利な方法です。
    • +
    • データ規模が大きい場合は、dockerやkubernetes上に、よりパフォーマンスの高いMilvusサーバを構築することができます。このセットアップでは、サーバの uri、例えばhttp://localhost:19530uri として使用してください。
    • MilvusのフルマネージドクラウドサービスであるZilliz Cloudを使用する場合は、Zilliz CloudのPublic EndpointとApi keyに対応するuritoken を調整してください。
    from pymilvus import MilvusClient
     
    +
     dim = len(list(image_dict.values())[0])
     collection_name = "multimodal_rag_demo"
     
    @@ -144,19 +146,18 @@ milvus_client = MilvusClient(uri="./milvus_demo.d
     # Create Milvus Collection
     # By default, vector field name is "vector"
     milvus_client.create_collection(
    -collection_name=collection_name,
    -auto_id=True,
    -dimension=dim,
    -enable_dynamic_field=True,
    +    collection_name=collection_name,
    +    auto_id=True,
    +    dimension=dim,
    +    enable_dynamic_field=True,
     )
     
     # Insert data into collection
     milvus_client.insert(
    -collection_name=collection_name,
    -data=[{"image_path": k, "vector": v} for k, v in image_dict.items()],
    +    collection_name=collection_name,
    +    data=[{"image_path": k, "vector": v} for k, v in image_dict.items()],
     )
     
    -
    {'insert_count': 900,
      'ids': [451537887696781312, 451537887696781313, ..., 451537887696782211],
      'cost': 0}
    @@ -187,17 +188,16 @@ query_text = "phone case with this image theme&qu
     query_vec = encoder.encode_query(image_path=query_image, text=query_text)
     
     search_results = milvus_client.search(
    -collection_name=collection_name,
    -data=[query_vec],
    -output_fields=["image_path"],
    -limit=9, # Max number of search results to return
    -search_params={"metric_type": "COSINE", "params": {}}, # Search parameters
    +    collection_name=collection_name,
    +    data=[query_vec],
    +    output_fields=["image_path"],
    +    limit=9,  # Max number of search results to return
    +    search_params={"metric_type": "COSINE", "params": {}},  # Search parameters
     )[0]
     
     retrieved_images = [hit.get("entity").get("image_path") for hit in search_results]
     print(retrieved_images)
     
    -
    ['./images_folder/images/518Gj1WQ-RL._AC_.jpg', './images_folder/images/41n00AOfWhL._AC_.jpg', './images_folder/images/51Wqge9HySL._AC_.jpg', './images_folder/images/51R2SZiywnL._AC_.jpg', './images_folder/images/516PebbMAcL._AC_.jpg', './images_folder/images/51RrgfYKUfL._AC_.jpg', './images_folder/images/515DzQVKKwL._AC_.jpg', './images_folder/images/51BsgVw6RhL._AC_.jpg', './images_folder/images/51INtcXu9FL._AC_.jpg']
     

    GPT-4oによる再ランク

    LLMを使って画像のランク付けを行い、ユーザのクエリと検索結果に基づいて最適な結果の説明を生成します。

    @@ -209,9 +209,10 @@ img_height = 300 img_width = 300 row_count = 3 + def create_panoramic_view(query_image_path: str, retrieved_images: list) -> np.ndarray: -""" -creates a 5x5 panoramic view image from a list of images + """ + creates a 5x5 panoramic view image from a list of images args: images: list of images to be combined @@ -315,9 +316,7 @@ creates a 5x5 panoramic view image from a list of images # combine the query image with the panoramic view panoramic_image = np.hstack([query_image_null, panoramic_image]) return panoramic_image -
    -

    クエリ画像と検索された画像をインデックスと組み合わせてパノラマビューにします。

    from PIL import Image
     
    @@ -329,23 +328,23 @@ combined_image = Image.open(combined_image_pa
     show_combined_image = combined_image.resize((300, 300))
     show_combined_image.show()
     
    -

    Create a panoramic view パノラマビューの作成

    2.再ランク付けと説明

    -

    合成された画像をマルチモーダルLLMサービスに送信し、適切なプロンプトとともに、検索結果のランク付けと説明を行います。GPT-4oをLLMとして有効にするには、OpenAIのAPI Keyを用意する必要があります。

    +

    合成された画像を、適切なプロンプトとともにマルチモーダルLLMサービスに送信し、検索された結果を説明付きでランク付けします。GPT-4oをLLMとして有効にするには、OpenAIのAPI Keyを用意する必要があります。

    import requests
     import base64
     
    -openai_api_key = "sk-\*\*\*" # Change to your OpenAI API Key
    +openai_api_key = "sk-***"  # Change to your OpenAI API Key
    +
     
     def generate_ranking_explanation(
    -combined_image_path: str, caption: str, infos: dict = None
    +    combined_image_path: str, caption: str, infos: dict = None
     ) -> tuple[list[int], str]:
    -with open(combined_image_path, "rb") as image_file:
    -base64_image = base64.b64encode(image_file.read()).decode("utf-8")
    +    with open(combined_image_path, "rb") as image_file:
    +        base64_image = base64.b64encode(image_file.read()).decode("utf-8")
     
         information = (
             "You are responsible for ranking results for a Composed Image Retrieval. "
    @@ -402,9 +401,7 @@ base64_image = base64.b64encode(image_file.read()).decode(1 :].strip()
     
         return ranked_indices, explanation
    -
     
    -

    ランク付け後の画像インデックスと最も良い結果の理由を取得します:

    ranked_indices, explanation = generate_ranking_explanation(
         combined_image_path, query_text
    @@ -418,7 +415,6 @@ best_img = Image.open(retrieved_images[best_i
     best_img = best_img.resize((150, 150))
     best_img.show()
     
    -
    Reasons: The most suitable item for the user's query intent is index 6 because the instruction specifies a phone case with the theme of the image, which is a leopard. The phone case with index 6 has a thematic design resembling the leopard pattern, making it the closest match to the user's request for a phone case with the image theme.
     

    diff --git a/localization/v2.4.x/site/ja/tutorials/tutorials-overview.json b/localization/v2.4.x/site/ja/tutorials/tutorials-overview.json index a6d040ad8..56df23120 100644 --- a/localization/v2.4.x/site/ja/tutorials/tutorials-overview.json +++ b/localization/v2.4.x/site/ja/tutorials/tutorials-overview.json @@ -1 +1 @@ -{"codeList":[],"headingContent":"","anchorList":[{"label":"チュートリアル概要","href":"Tutorials-Overview","type":1,"isActive":false}]} \ No newline at end of file +{"codeList":[],"headingContent":"Tutorials Overview","anchorList":[{"label":"チュートリアル概要","href":"Tutorials-Overview","type":1,"isActive":false}]} \ No newline at end of file diff --git a/localization/v2.4.x/site/ja/tutorials/tutorials-overview.md b/localization/v2.4.x/site/ja/tutorials/tutorials-overview.md index e23b73ff3..10890a900 100644 --- a/localization/v2.4.x/site/ja/tutorials/tutorials-overview.md +++ b/localization/v2.4.x/site/ja/tutorials/tutorials-overview.md @@ -25,11 +25,10 @@ title: チュートリアル概要 MilvusでRAGを構築するRAGベクトル検索 -MilvusによるマルチモーダルRAGRAGベクトル検索, ダイナミックフィールド -Milvusによる画像検索セマンティック検索ベクトル検索, ダイナミックフィールド +Milvusを使ったマルチモーダルRAGRAGベクトル検索, ダイナミックフィールド +Milvusによる画像検索セマンティック検索ベクトル検索、ダイナミックフィールド Milvusのハイブリッド検索ハイブリッド検索ハイブリッド検索, マルチベクトル, 密包埋, 疎包埋 マルチベクトルによるマルチモーダル検索セマンティック検索マルチベクトル、ハイブリッド検索 - 質問応答システム質問応答ベクトル検索 推薦システム推薦システムベクトル検索 ビデオ類似検索セマンティック検索ベクトル検索 @@ -38,5 +37,7 @@ title: チュートリアル概要 テキスト検索エンジンセマンティック検索ベクトル検索 テキストによる画像検索セマンティック検索ベクトル検索 画像の重複排除重複排除ベクトル検索 +MilvusによるグラフRAGRAGグラフ検索 +Milvusによる文脈検索クイックスタートベクトル検索 diff --git a/localization/v2.4.x/site/ja/userGuide/clustering-compaction.json b/localization/v2.4.x/site/ja/userGuide/clustering-compaction.json index ce419ec3c..c4c7e92da 100644 --- a/localization/v2.4.x/site/ja/userGuide/clustering-compaction.json +++ b/localization/v2.4.x/site/ja/userGuide/clustering-compaction.json @@ -1 +1 @@ -{"codeList":["dataCoord:\n compaction:\n clustering:\n enable: true \n autoEnable: false \n triggerInterval: 600 \n minInterval: 3600 \n maxInterval: 259200 \n newDataSizeThreshold: 512m \n timeout: 7200\n \nqueryNode:\n enableSegmentPrune: true \n\ndatanode:\n clusteringCompaction:\n memoryBufferRatio: 0.1 \n workPoolSize: 8 \ncommon:\n usePartitionKeyAsClusteringKey: true \n","default_fields = [\n FieldSchema(name=\"id\", dtype=DataType.INT64, is_primary=True),\n FieldSchema(name=\"key\", dtype=DataType.INT64, is_clustering_key=True),\n FieldSchema(name=\"var\", dtype=DataType.VARCHAR, max_length=1000, is_primary=False),\n FieldSchema(name=\"embeddings\", dtype=DataType.FLOAT_VECTOR, dim=dim)\n]\n\ndefault_schema = CollectionSchema(\n fields=default_fields, \n description=\"test clustering-key collection\"\n)\n\ncoll1 = Collection(name=\"clustering_test\", schema=default_schema)\n","coll1.compact(is_clustering=True)\ncoll1.get_compaction_state(is_clustering=True)\ncoll1.wait_for_compaction_completed(is_clustering=True)\n"],"headingContent":"","anchorList":[{"label":"クラスタリング・コンパクション","href":"Clustering-Compaction","type":1,"isActive":false},{"label":"概要","href":"Overview","type":2,"isActive":false},{"label":"クラスタリング・コンパクションの使用","href":"Use-Clustering-Compaction","type":2,"isActive":false},{"label":"コレクションの構成","href":"Collection-Configuration","type":2,"isActive":false},{"label":"クラスタリング・コンパクションのトリガ","href":"Trigger-Clustering-Compaction","type":2,"isActive":false},{"label":"ベストプラクティス","href":"Best-practices","type":2,"isActive":false}]} \ No newline at end of file +{"codeList":["dataCoord:\n compaction:\n clustering:\n enable: true \n autoEnable: false \n triggerInterval: 600 \n minInterval: 3600 \n maxInterval: 259200 \n newDataSizeThreshold: 512m \n timeout: 7200\n \nqueryNode:\n enableSegmentPrune: true \n\ndatanode:\n clusteringCompaction:\n memoryBufferRatio: 0.1 \n workPoolSize: 8 \ncommon:\n usePartitionKeyAsClusteringKey: true \n","default_fields = [\n FieldSchema(name=\"id\", dtype=DataType.INT64, is_primary=True),\n FieldSchema(name=\"key\", dtype=DataType.INT64, is_clustering_key=True),\n FieldSchema(name=\"var\", dtype=DataType.VARCHAR, max_length=1000, is_primary=False),\n FieldSchema(name=\"embeddings\", dtype=DataType.FLOAT_VECTOR, dim=dim)\n]\n\ndefault_schema = CollectionSchema(\n fields=default_fields, \n description=\"test clustering-key collection\"\n)\n\ncoll1 = Collection(name=\"clustering_test\", schema=default_schema)\n","coll1.compact(is_clustering=True)\ncoll1.get_compaction_state(is_clustering=True)\ncoll1.wait_for_compaction_completed(is_clustering=True)\n"],"headingContent":"Clustering Compaction","anchorList":[{"label":"クラスタリング・コンパクション","href":"Clustering-Compaction","type":1,"isActive":false},{"label":"概要","href":"Overview","type":2,"isActive":false},{"label":"クラスタリング・コンパクションの使用","href":"Use-Clustering-Compaction","type":2,"isActive":false},{"label":"クラスタリング・コンパクションのトリガ","href":"Trigger-Clustering-Compaction","type":2,"isActive":false},{"label":"ベストプラクティス","href":"Best-practices","type":2,"isActive":false}]} \ No newline at end of file diff --git a/localization/v2.4.x/site/ja/userGuide/clustering-compaction.md b/localization/v2.4.x/site/ja/userGuide/clustering-compaction.md index 1a3ca0749..bf70e8dea 100644 --- a/localization/v2.4.x/site/ja/userGuide/clustering-compaction.md +++ b/localization/v2.4.x/site/ja/userGuide/clustering-compaction.md @@ -36,13 +36,13 @@ summary: >- d="M4 9h1v1H4c-1.5 0-3-1.69-3-3.5S2.55 3 4 3h4c1.45 0 3 1.69 3 3.5 0 1.41-.91 2.72-2 3.25V8.59c.58-.45 1-1.27 1-2.09C10 5.22 8.98 4 8 4H4c-.98 0-2 1.22-2 2.5S3 9 4 9zm9-3h-1v1h1c1 0 2 1.22 2 2.5S13.98 12 13 12H9c-.98 0-2-1.22-2-2.5 0-.83.42-1.64 1-2.09V6.25c-1.09.53-2 1.84-2 3.25C6 11.31 7.55 13 9 13h4c1.45 0 3-1.69 3-3.5S14.5 6 13 6z" > -

    Milvusは、入力されたエンティティをコレクション内のセグメントに保存し、セグメントがいっぱいになるとそのセグメントを閉じます。この場合、追加のエンティティを収容するために新しいセグメントが作成されます。その結果、エンティティはセグメント間で任意に分散される。この分散によって、Milvus は複数のセグメントを検索して、指定したクエリベクトルに最も近いものを見つける必要がある。

    +

    Milvusは入力されたエンティティをコレクション内のセグメントに保存し、セグメントが一杯になるとそのセグメントを封印します。この場合、追加のエンティティを格納するために新しいセグメントが作成されます。その結果、エンティティはセグメント間で任意に分散される。この分散により、Milvus は複数のセグメントを検索して、与えられたクエリベクトルに最も近いものを見つける必要がある。

    Without clustering Compaction クラスタリングなしの場合

    -

    Milvus が特定のフィールドの値に基づいてエンティティをセグメント間に分散させることができれば、 検索範囲をセグメント内に制限することができ、検索性能が向上します。

    -

    クラスタリングコンパクションは、スカラーフィールドの値に基づいて、コレクション内のセグメント間でエンティティを再分配するMilvusの機能です。この機能を有効にするには、まずクラスタリングキーとしてスカラーフィールドを選択する必要があります。これにより、Milvus は、クラスタリング・キー値が特定の範囲内にあるエンティティをセグメントに再分配します。クラスタリング コンパクションをトリガすると、Milvus はセグメントとクラスタリング キー値のマッピング関係を記録するPartitionStats というグローバル インデックスを生成/更新します。

    +

    Milvus が特定のフィールドの値に基づいてエンティティをセグメント間に分散させることができれば、検索範囲をセグメント内に制限することができ、検索性能が向上します。

    +

    クラスタリングコンパクションはMilvusの機能で、スカラーフィールドの値に基づいてコレクション内のセグメント間でエンティティを再分配します。この機能を有効にするには、まずクラスタリングキーとしてスカラーフィールドを選択する必要があります。これにより、Milvusはクラスタリングキーの値が特定の範囲内にあるエンティティをセグメントに再分配することができます。クラスタリングコンパクションをトリガーすると、MilvusはPartitionStatsと呼ばれるグローバルインデックスを生成/更新し、セグメントとクラスタリングキー値のマッピング関係を記録します。

    With Clustering Compaction @@ -63,8 +63,8 @@ summary: >- d="M4 9h1v1H4c-1.5 0-3-1.69-3-3.5S2.55 3 4 3h4c1.45 0 3 1.69 3 3.5 0 1.41-.91 2.72-2 3.25V8.59c.58-.45 1-1.27 1-2.09C10 5.22 8.98 4 8 4H4c-.98 0-2 1.22-2 2.5S3 9 4 9zm9-3h-1v1h1c1 0 2 1.22 2 2.5S13.98 12 13 12H9c-.98 0-2-1.22-2-2.5 0-.83.42-1.64 1-2.09V6.25c-1.09.53-2 1.84-2 3.25C6 11.31 7.55 13 9 13h4c1.45 0 3-1.69 3-3.5S14.5 6 13 6z" > -

    Milvusのクラスタリングコンパクション機能は高度に設定可能です。手動で起動することも、Milvusが一定間隔で自動的に起動するように設定することもできます。クラスタリングコンパクションを有効にするには、次のようにします:

    -

    グローバル設定

    以下のようにMilvusの設定ファイルを変更する必要があります。

    +

    MilvusのClustering Compaction機能は高度な設定が可能です。手動で起動させることも、Milvusが一定間隔で自動的に起動させるように設定することもできます。クラスタリングコンパクションを有効にするには、次のようにします:

    +

    グローバル設定

    Milvusの設定ファイルを以下のように変更する必要があります。

    dataCoord:
       compaction:
         clustering:
    @@ -94,11 +94,11 @@ common:
     
     
     enableクラスタリングコンパクションを有効にするかどうかを指定します。
    クラスタリングキーを持つすべてのコレクションでこの機能を有効にする必要がある場合は、true に設定します。false -autoEnable自動的にトリガーされるコンパクションを有効にするかどうかを指定します。
    これをtrue に設定すると、Milvus は指定された間隔でクラスタリング キーを持つコレクションを圧縮します。false -triggerIntervalMilvus がクラスタリング圧縮を開始する間隔をミリ秒単位で指定します。
    このパラメータはautoEnabletrue に設定されている場合のみ有効です。- -minInterval最小間隔をミリ秒単位で指定します。
    このパラメータはautoEnabletrue に設定されている場合のみ有効です。
    triggerIntervalより大きい整数に設定すると、短時間に繰り返し圧縮が行われるのを防ぐことができます。- -maxInterval最大間隔をミリ秒単位で指定する。
    このパラメータは、autoEnabletrue に設定されている場合のみ有効である。
    Milvusは、コレクションがこの値より長い期間クラスタリング圧縮されていないことを検出すると、強制的にクラスタリング圧縮を行います。- -newDataSizeThresholdクラスタリング圧縮をトリガする上限しきい値を指定します。
    このパラメータは、autoEnabletrue に設定されている場合にのみ有効です。
    コレクション内のデータ量がこの値を超えたことをMilvusが検出すると、クラスタリングコンパクションプロセスが開始されます。- +autoEnable自動的にトリガーされるコンパクションを有効にするかどうかを指定します。
    これをtrue に設定すると、Milvusは指定された間隔でクラスタリングキーを持つコレクションを圧縮します。false +triggerIntervalMilvusがクラスタリング圧縮を開始する間隔をミリ秒単位で指定します。
    このパラメータはautoEnabletrue に設定されている場合のみ有効です。- +minInterval最小間隔を秒単位で指定します。
    このパラメータはautoEnabletrue に設定されている場合のみ有効です。
    triggerIntervalより大きい整数に設定することで、短時間に繰り返しコンパクションが行われるのを防ぐことができます。- +maxInterval最大間隔を秒単位で指定する。
    このパラメータは、autoEnabletrue に設定されている場合のみ有効です。
    Milvusは、コレクションがこの値よりも長い期間クラスタリング圧縮されていないことを検出すると、強制的にクラスタリング圧縮を行います。- +newDataSizeThresholdクラスタリング圧縮をトリガする上限しきい値を指定します。
    このパラメータは、autoEnabletrue に設定されている場合にのみ有効です。
    Milvusはコレクションのデータ量がこの値を超えたことを検出すると、クラスタリングコンパクションプロセスを開始します。- timeoutクラスタリングコンパクションのタイムアウト時間を指定します。
    実行時間がこの値を超えると、クラスタリングコンパクションは失敗します。- @@ -109,7 +109,7 @@ common: 設定項目説明デフォルト値 -enableSegmentPruneMilvusが検索/クエリ要求を受信したときにPartitionStatsを参照してデータをプルーンするかどうかを指定します。
    これをtrue に設定すると、Milvus は検索/クエリ要求時にセグメントから無関係なデータを削除します。false +enableSegmentPruneMilvusが検索/クエリ要求を受信した際に、PartitionStatsを参照してデータをプルーンするかどうかを指定します。
    これをtrue に設定すると、Milvus は検索/クエリ要求時にセグメントから無関係なデータを削除します。false @@ -119,7 +119,7 @@ common: 設定項目設定項目デフォルト値 -memoryBufferRatioクラスタリング圧縮タスクのメモリバッファ比率を指定します。
    Milvusは、データサイズがこの比率を使用して計算された割り当て済みバッファサイズを超えると、データをフラッシュします。- +memoryBufferRatioクラスタリング圧縮タスクのメモリバッファ比率を指定します。
    Milvusは、データサイズがこの比率を使用して計算された割り当てバッファサイズを超えると、データをフラッシュします。- workPoolSizeクラスタリング・コンパクション・タスクのワーカープールサイズを指定します。- @@ -136,22 +136,7 @@ common:

    上記の変更をMilvusクラスタに適用するには、Configure Milvus with HelmおよびConfigure Milvus with Milvus Operatorsの手順に従ってください。

    -

    コレクションの構成

    特定のコレクションでクラスタリングコンパクトを行うには、コレクションからスカラフィールドをクラスタリングキーとして選択する必要があります。

    +

    コレクションの構成

    特定のコレクションでクラスタリングコンパクトを行うには、コレクションからスカラフィールドをクラスタリングキーとして選択する必要があります。

    default_fields = [
         FieldSchema(name="id", dtype=DataType.INT64, is_primary=True),
         FieldSchema(name="key", dtype=DataType.INT64, is_clustering_key=True),
    @@ -190,7 +175,7 @@ coll1.get_compaction_state(is_clustering=True)
     coll1.wait_for_compaction_completed(is_clustering=True)
     

    ベンチマークテスト

    データ量とクエリパターンの組み合わせにより、クラスタリングコンパクションがもたらすパフォーマンスの向上が決まります。社内のベンチマーク・テストでは、クラスタリング・コンパクションによって1秒あたりのクエリ数(QPS)が最大25倍向上することが実証されています。

    -

    ベンチマークテストは、2,000万、768次元のLAIONデータセットから、キーフィールドをクラスタリングキーとして指定したエンティティを含むコレクションを対象としている。コレクション内でクラスタリング圧縮がトリガーされた後、CPU使用率が高水準に達するまで同時検索が送信される。

    +

    このベンチマークテストは、2,000万、768次元のLAIONデータセットから、キーフィールドをクラスタリングキーとして指定したエンティティを含むコレクションを対象としたものです。コレクション内でクラスタリング圧縮がトリガーされた後、CPU使用率が高水準に達するまで同時検索が送信される。

    @@ -260,7 +245,7 @@ coll1.wait_for_compaction_completed(is_clustering=Tru
    -

    検索フィルターで検索範囲を狭めると、プルーンの比率が高くなる。これは、検索プロセスでより多くのエンティティがスキップされることを意味します。最初の行と最後の行の統計値を比較すると、クラスタリング・コンパクションを使用しない検索では、コ レクション全体をスキャンする必要があることがわかります。一方、特定のキーを使用してクラスタリング・コンパクションを行う検索では、最大25倍の改善が得られます。

    +

    検索フィルターで検索範囲を狭めると、プルーンの比率が高くなる。これは、検索プロセスでより多くのエンティティがスキップされることを意味します。最初の行と最後の行の統計値を比較すると、クラスタリング・コンパクションなしの検索では、コレクション全体をスキャンする必要があることがわかります。一方、特定のキーを使用してクラスタリング・コンパクションを行う検索では、最大25倍の改善が得られます。

    ベストプラクティス

    Milvusコレクションにおけるエンティティとは、コレクション内の単一で識別可能なインスタンスのことです。図書館の本、ゲノムの遺伝子、その他の識別可能なエンティティなど、特定のクラスの明確なメンバーを表します。

    -

    コレクション内のエンティティは、スキーマと呼ばれる共通の属性セットを共有し、フィールド名、データ型、 その他の制約など、各エンティティが準拠しなければならない構造の概要を示す。

    +

    Milvusコレクションにおけるエンティティとは、コレクション内の識別可能なインスタンスのことです。図書館の本、ゲノムの遺伝子、その他の識別可能なエンティティなど、特定のクラスの明確なメンバーを表します。

    +

    コレクション内のエンティティは、スキーマと呼ばれる共通の属性セットを共有し、フィールド名、データ型、 その他の制約など、各エンティティが遵守すべき構造を概説する。

    コレクションへのエンティティの挿入を成功させるには、提供されたデータにターゲット・コレクションのスキーマ定義フィールドがすべて含まれている必要があります。さらに、動的フィールドを有効にしている場合に限り、スキーマ定義以外のフィールドを含めることもできます。詳細は、Enable Dynamic Field を参照してください。

    準備

    以下のコードスニペットは、既存のコードを再利用してMilvusクラスタへの接続を確立し、コレクションを迅速にセットアップします。

    +

    以下のコードスニペットは、Milvusクラスタへの接続を確立し、コレクションを素早くセットアップするために既存のコードを再利用しています。

    準備のために MilvusClientを使用してMilvusサーバに接続し create_collection()を使用してクイックセットアップモードでコレクションを作成します。

    -

    準備には MilvusClientV2を使用してMilvusサーバに接続し createCollection()クイックセットアップモードでコレクションを作成します。

    +

    準備には MilvusClientV2でMilvusサーバに接続し createCollection()クイックセットアップモードでコレクションを作成します。

    -

    準備には MilvusClientでMilvusサーバに接続し createCollection()クイックセットアップモードでコレクションを作成します。

    +

    準備には MilvusClientを使ってMilvusサーバに接続し createCollection()クイックセットアップモードでコレクションを作成します。

    @@ -397,16 +397,15 @@ res = await client. -

    データのアップサートは、更新操作と挿入操作を組み合わせたものです。Milvusでは、upsert操作は、その主キーが既にコレクションに存在するかどうかに基づいて、エンティティを挿入または更新するデータレベルのアクションを実行します。具体的には

    +

    データのアップサートは、更新操作と挿入操作を組み合わせたものです。Milvusでは、upsertオペレーションは、その主キーが既にコレクションに存在するかどうかに基づいて、エンティティを挿入または更新するデータレベルのアクションを実行します。具体的には

    • エンティティの主キーが既にコレクションに存在する場合、既存のエンティティは上書きされます。

    • 主キーがコレクションに存在しない場合は、新しいエンティティが挿入されます。

      -
    • Upsert 操作では、主キーは更新されません。
    • -
    • Upsert 操作は、autoID が有効なコレクションをサポートしません。
    • -
    • 大規模なデータ取り込み(例:数百万のベクトル)にinsert の代わりにupsert オペレーションを使用する場合、Milvus データノードでメモリを大量に消費する可能性があることに注意してください。
    • +
    • Upsert 操作では主キーは更新されません。
    • +
    • 大規模なデータ取り込み(例えば数百万のベクター)にinsert の代わりにupsert の操作を使用する場合、Milvusデータノードでのメモリ消費量が多くなる可能性がありますのでご注意ください。
    @@ -613,7 +612,7 @@ res = await client.

    エンティティが不要になった場合、. delete().

    -

    Milvus には、削除するエンティティを特定するための 2 つの方法があります。

    +

    Milvusには、削除するエンティティを特定する2つの方法があります。

    • フィルタによるエンティティの削除。

      diff --git a/localization/v2.4.x/site/ja/userGuide/manage-collections.json b/localization/v2.4.x/site/ja/userGuide/manage-collections.json index bc492d471..87279fbe6 100644 --- a/localization/v2.4.x/site/ja/userGuide/manage-collections.json +++ b/localization/v2.4.x/site/ja/userGuide/manage-collections.json @@ -1 +1 @@ -{"codeList":["from pymilvus import MilvusClient, DataType\n\n# 1. Set up a Milvus client\nclient = MilvusClient(\n uri=\"http://localhost:19530\"\n)\n\n# 2. Create a collection in quick setup mode\nclient.create_collection(\n collection_name=\"quick_setup\",\n dimension=5\n)\n\nres = client.get_load_state(\n collection_name=\"quick_setup\"\n)\n\nprint(res)\n\n# Output\n#\n# {\n# \"state\": \"\"\n# }\n","import io.milvus.v2.client.ConnectConfig;\nimport io.milvus.v2.client.MilvusClientV2;\nimport io.milvus.v2.service.collection.request.GetLoadStateReq;\nimport io.milvus.v2.service.collection.request.CreateCollectionReq;\n\nString CLUSTER_ENDPOINT = \"http://localhost:19530\";\n\n// 1. Connect to Milvus server\nConnectConfig connectConfig = ConnectConfig.builder()\n .uri(CLUSTER_ENDPOINT)\n .build();\n\nMilvusClientV2 client = new MilvusClientV2(connectConfig);\n\n// 2. Create a collection in quick setup mode\nCreateCollectionReq quickSetupReq = CreateCollectionReq.builder()\n .collectionName(\"quick_setup\")\n .dimension(5)\n .build();\n\nclient.createCollection(quickSetupReq);\n\n// Thread.sleep(5000);\n\nGetLoadStateReq quickSetupLoadStateReq = GetLoadStateReq.builder()\n .collectionName(\"quick_setup\")\n .build();\n\nBoolean res = client.getLoadState(quickSetupLoadStateReq);\n\nSystem.out.println(res);\n\n// Output:\n// true\n","address = \"http://localhost:19530\"\n\n// 1. Set up a Milvus Client\nclient = new MilvusClient({address});\n\n// 2. Create a collection in quick setup mode\nlet res = await client.createCollection({\n collection_name: \"quick_setup\",\n dimension: 5,\n}); \n\nconsole.log(res.error_code)\n\n// Output\n// \n// Success\n// \n\nres = await client.getLoadState({\n collection_name: \"quick_setup\"\n})\n\nconsole.log(res.state)\n\n// Output\n// \n// LoadStateLoaded\n// \n","$ export MILVUS_URI=\"localhost:19530\"\n\n$ curl -X POST \"http://${MILVUS_URI}/v2/vectordb/collections/create\" \\\n-H \"Content-Type: application/json\" \\\n-d '{\n \"collectionName\": \"quick_setup\",\n \"dimension\": 5\n}'\n\n# Output\n#\n# {\n# \"code\": 0,\n# \"data\": {},\n# }\n\n$ curl -X POST \"http://${MILVUS_URI}/v2/vectordb/collections/get_load_state\" \\\n-H \"Content-Type: application/json\" \\\n-d '{\n \"collectionName\": \"quick_setup\"\n}'\n\n# {\n# \"code\": 0,\n# \"data\": {\n# \"loadProgress\": 100,\n# \"loadState\": \"LoadStateLoaded\"\n# }\n# }\n","# 3. Create a collection in customized setup mode\n\n# 3.1. Create schema\nschema = MilvusClient.create_schema(\n auto_id=False,\n enable_dynamic_field=True,\n)\n\n# 3.2. Add fields to schema\nschema.add_field(field_name=\"my_id\", datatype=DataType.INT64, is_primary=True)\nschema.add_field(field_name=\"my_vector\", datatype=DataType.FLOAT_VECTOR, dim=5)\n","import io.milvus.v2.common.DataType;\nimport io.milvus.v2.service.collection.request.CreateCollectionReq;\n\n// 3. Create a collection in customized setup mode\n\n// 3.1 Create schema\nCreateCollectionReq.CollectionSchema schema = client.createSchema();\n\n// 3.2 Add fields to schema\nschema.addField(AddFieldReq.builder()\n .fieldName(\"my_id\")\n .dataType(DataType.Int64)\n .isPrimaryKey(true)\n .autoID(false)\n .build());\n\nschema.addField(AddFieldReq.builder()\n .fieldName(\"my_vector\")\n .dataType(DataType.FloatVector)\n .dimension(5)\n .build());\n","// 3. Create a collection in customized setup mode\n// 3.1 Define fields\nconst fields = [\n {\n name: \"my_id\",\n data_type: DataType.Int64,\n is_primary_key: true,\n auto_id: false\n },\n {\n name: \"my_vector\",\n data_type: DataType.FloatVector,\n dim: 5\n },\n]\n","export fields='[{ \\\n \"fieldName\": \"my_id\", \\\n \"dataType\": \"Int64\", \\\n \"isPrimary\": true \\\n}, \\\n{ \\\n \"fieldName\": \"my_vector\", \\\n \"dataType\": \"FloatVector\", \\\n \"elementTypeParams\": { \\\n \"dim\": 5 \\\n } \\\n}]'\n","# 3.3. Prepare index parameters\nindex_params = client.prepare_index_params()\n\n# 3.4. Add indexes\nindex_params.add_index(\n field_name=\"my_id\",\n index_type=\"STL_SORT\"\n)\n\nindex_params.add_index(\n field_name=\"my_vector\", \n index_type=\"IVF_FLAT\",\n metric_type=\"IP\",\n params={ \"nlist\": 128 }\n)\n","import io.milvus.v2.common.IndexParam;\n\n// 3.3 Prepare index parameters\nIndexParam indexParamForIdField = IndexParam.builder()\n .fieldName(\"my_id\")\n .indexType(IndexParam.IndexType.STL_SORT)\n .build();\n\nIndexParam indexParamForVectorField = IndexParam.builder()\n .fieldName(\"my_vector\")\n .indexType(IndexParam.IndexType.IVF_FLAT)\n .metricType(IndexParam.MetricType.L2)\n .extraParams(Map.of(\"nlist\", 1024))\n .build();\n\nList indexParams = new ArrayList<>();\nindexParams.add(indexParamForIdField);\nindexParams.add(indexParamForVectorField);\n","// 3.2 Prepare index parameters\nconst index_params = [{\n field_name: \"my_id\",\n index_type: \"STL_SORT\"\n},{\n field_name: \"my_vector\",\n index_type: \"IVF_FLAT\",\n metric_type: \"IP\",\n params: { nlist: 1024}\n}]\n","export indexParams='[{ \\\n \"fieldName\": \"my_id\", \\\n \"indexName\": \"my_id\", \\\n \"params\": { \\\n \"index_type\": \"SLT_SORT\" \\\n } \\\n}, { \\\n \"fieldName\": \"my_vector\", \\\n \"metricType\": \"COSINE\", \\\n \"indexName\": \"my_vector\", \\\n \"params\": { \\\n \"index_type\": \"IVF_FLAT\", \\\n \"nlist\": 1024 \\\n } \\\n}]'\n","# 3.5. Create a collection with the index loaded simultaneously\nclient.create_collection(\n collection_name=\"customized_setup_1\",\n schema=schema,\n index_params=index_params\n)\n\ntime.sleep(5)\n\nres = client.get_load_state(\n collection_name=\"customized_setup_1\"\n)\n\nprint(res)\n\n# Output\n#\n# {\n# \"state\": \"\"\n# }\n","import io.milvus.v2.service.collection.request.CreateCollectionReq;\nimport io.milvus.v2.service.collection.request.GetLoadStateReq;\n\n// 3.4 Create a collection with schema and index parameters\nCreateCollectionReq customizedSetupReq1 = CreateCollectionReq.builder()\n .collectionName(\"customized_setup_1\")\n .collectionSchema(schema)\n .indexParams(indexParams)\n .build();\n\nclient.createCollection(customizedSetupReq1);\n\n// Thread.sleep(5000);\n\n// 3.5 Get load state of the collection\nGetLoadStateReq customSetupLoadStateReq1 = GetLoadStateReq.builder()\n .collectionName(\"customized_setup_1\")\n .build();\n\nres = client.getLoadState(customSetupLoadStateReq1);\n\nSystem.out.println(res);\n\n// Output:\n// true\n","// 3.3 Create a collection with fields and index parameters\nres = await client.createCollection({\n collection_name: \"customized_setup_1\",\n fields: fields,\n index_params: index_params,\n})\n\nconsole.log(res.error_code) \n\n// Output\n// \n// Success\n// \n\nres = await client.getLoadState({\n collection_name: \"customized_setup_1\"\n})\n\nconsole.log(res.state)\n\n// Output\n// \n// LoadStateLoaded\n// \n","$ curl -X POST \"http://${MILVUS_URI}/v2/vectordb/collections/create\" \\\n-H \"Content-Type: application/json\" \\\n-d '{\n \"collectionName\": \"customized_setup_1\",\n \"schema\": {\n \"autoId\": false,\n \"enabledDynamicField\": false,\n \"fields\": [\n {\n \"fieldName\": \"my_id\",\n \"dataType\": \"Int64\",\n \"isPrimary\": true\n },\n {\n \"fieldName\": \"my_vector\",\n \"dataType\": \"FloatVector\",\n \"elementTypeParams\": {\n \"dim\": \"5\"\n }\n }\n ]\n },\n \"indexParams\": [\n {\n \"fieldName\": \"my_vector\",\n \"metricType\": \"COSINE\",\n \"indexName\": \"my_vector\",\n \"params\": {\n \"index_type\": \"IVF_FLAT\",\n \"nlist\": \"1024\"\n }\n },\n {\n \"fieldName\": \"my_id\",\n \"indexName\": \"my_id\",\n \"params\": {\n \"index_type\": \"STL_SORT\"\n } \n }\n ]\n}'\n\n# Output\n#\n# {\n# \"code\": 0,\n# \"data\": {},\n# }\n\n$ curl -X POST \"http://${MILVUS_URI}/v2/vectordb/collections/get_load_state\" \\\n-H \"Content-Type: application/json\" \\\n-d '{\n \"collectionName\": \"customized_setup_1\"\n}'\n\n# {\n# \"code\": 0,\n# \"data\": {\n# \"loadProgress\": 100,\n# \"loadState\": \"LoadStateLoaded\"\n# }\n# }\n","# 3.6. Create a collection and index it separately\nclient.create_collection(\n collection_name=\"customized_setup_2\",\n schema=schema,\n)\n\nres = client.get_load_state(\n collection_name=\"customized_setup_2\"\n)\n\nprint(res)\n\n# Output\n#\n# {\n# \"state\": \"\"\n# }\n","// 3.6 Create a collection and index it separately\nCreateCollectionReq customizedSetupReq2 = CreateCollectionReq.builder()\n .collectionName(\"customized_setup_2\")\n .collectionSchema(schema)\n .build();\n\nclient.createCollection(customizedSetupReq2);\n","// 3.4 Create a collection and index it seperately\nres = await client.createCollection({\n collection_name: \"customized_setup_2\",\n fields: fields,\n})\n\nconsole.log(res.error_code)\n\n// Output\n// \n// Success\n// \n\nres = await client.getLoadState({\n collection_name: \"customized_setup_2\"\n})\n\nconsole.log(res.state)\n\n// Output\n// \n// LoadStateNotLoad\n// \n","$ curl -X POST \"http://${MILVUS_URI}/v2/vectordb/collections/create\" \\\n-H \"Content-Type: application/json\" \\\n-d '{\n \"collectionName\": \"customized_setup_2\",\n \"schema\": {\n \"autoId\": false,\n \"enabledDynamicField\": false,\n \"fields\": [\n {\n \"fieldName\": \"my_id\",\n \"dataType\": \"Int64\",\n \"isPrimary\": true\n },\n {\n \"fieldName\": \"my_vector\",\n \"dataType\": \"FloatVector\",\n \"elementTypeParams\": {\n \"dim\": \"5\"\n }\n }\n ]\n \n }\n}'\n\n# Output\n#\n# {\n# \"code\": 0,\n# \"data\": {},\n# }\n\n$ curl -X POST \"http://${MILVUS_URI}/v2/vectordb/collections/get_load_state\" \\\n-H \"Content-Type: application/json\" \\\n-d '{\n \"collectionName\": \"customized_setup_2\"\n}'\n\n# {\n# \"code\": 0,\n# \"data\": {\n# \"loadState\": \"LoadStateNotLoaded\"\n# }\n# }\n","# 3.6 Create index\nclient.create_index(\n collection_name=\"customized_setup_2\",\n index_params=index_params\n)\n\nres = client.get_load_state(\n collection_name=\"customized_setup_2\"\n)\n\nprint(res)\n\n# Output\n#\n# {\n# \"state\": \"\"\n# }\n","CreateIndexReq createIndexReq = CreateIndexReq.builder()\n .collectionName(\"customized_setup_2\")\n .indexParams(indexParams)\n .build();\n\nclient.createIndex(createIndexReq);\n\n// Thread.sleep(1000);\n\n// 3.7 Get load state of the collection\nGetLoadStateReq customSetupLoadStateReq2 = GetLoadStateReq.builder()\n .collectionName(\"customized_setup_2\")\n .build();\n\nres = client.getLoadState(customSetupLoadStateReq2);\n\nSystem.out.println(res);\n\n// Output:\n// false\n","// 3.5 Create index\nres = await client.createIndex({\n collection_name: \"customized_setup_2\",\n field_name: \"my_vector\",\n index_type: \"IVF_FLAT\",\n metric_type: \"IP\",\n params: { nlist: 1024}\n})\n\nres = await client.getLoadState({\n collection_name: \"customized_setup_2\"\n})\n\nconsole.log(res.state)\n\n// Output\n// \n// LoadStateNotLoad\n//\n","$ curl -X POST \"http://${MILVUS_URI}/v2/vectordb/indexes/create\" \\\n-H \"Content-Type: application/json\" \\\n-d '{\n \"collectionName\": \"customized_setup_2\",\n \"indexParams\": [\n {\n \"metricType\": \"L2\",\n \"fieldName\": \"my_vector\",\n \"indexName\": \"my_vector\",\n \"indexConfig\": {\n \"index_type\": \"IVF_FLAT\",\n \"nlist\": \"1024\"\n }\n }\n ]\n}'\n\n# Output\n#\n# {\n# \"code\": 0,\n# \"data\": {},\n# }\n\n$ curl -X POST \"http://${MILVUS_URI}/v2/vectordb/collections/get_load_state\" \\\n-H \"Content-Type: application/json\" \\\n-d '{\n \"collectionName\": \"customized_setup_2\"\n}'\n\n# {\n# \"code\": 0,\n# \"data\": {\n# \"loadState\": \"LoadStateNotLoaded\"\n# }\n# }\n","# 5. View Collections\nres = client.describe_collection(\n collection_name=\"customized_setup_2\"\n)\n\nprint(res)\n\n# Output\n#\n# {\n# \"collection_name\": \"customized_setup_2\",\n# \"auto_id\": false,\n# \"num_shards\": 1,\n# \"description\": \"\",\n# \"fields\": [\n# {\n# \"field_id\": 100,\n# \"name\": \"my_id\",\n# \"description\": \"\",\n# \"type\": 5,\n# \"params\": {},\n# \"element_type\": 0,\n# \"is_primary\": true\n# },\n# {\n# \"field_id\": 101,\n# \"name\": \"my_vector\",\n# \"description\": \"\",\n# \"type\": 101,\n# \"params\": {\n# \"dim\": 5\n# },\n# \"element_type\": 0\n# }\n# ],\n# \"aliases\": [],\n# \"collection_id\": 448143479230158446,\n# \"consistency_level\": 2,\n# \"properties\": {},\n# \"num_partitions\": 1,\n# \"enable_dynamic_field\": true\n# }\n\n","import io.milvus.v2.service.collection.request.DescribeCollectionReq;\nimport io.milvus.v2.service.collection.response.DescribeCollectionResp;\n\n// 4. View collections\nDescribeCollectionReq describeCollectionReq = DescribeCollectionReq.builder()\n .collectionName(\"customized_setup_2\")\n .build();\n\nDescribeCollectionResp describeCollectionRes = client.describeCollection(describeCollectionReq);\n\nSystem.out.println(JSONObject.toJSON(describeCollectionRes));\n\n// Output:\n// {\n// \"createTime\": 449005822816026627,\n// \"collectionSchema\": {\"fieldSchemaList\": [\n// {\n// \"autoID\": false,\n// \"dataType\": \"Int64\",\n// \"name\": \"my_id\",\n// \"description\": \"\",\n// \"isPrimaryKey\": true,\n// \"maxLength\": 65535,\n// \"isPartitionKey\": false\n// },\n// {\n// \"autoID\": false,\n// \"dataType\": \"FloatVector\",\n// \"name\": \"my_vector\",\n// \"description\": \"\",\n// \"isPrimaryKey\": false,\n// \"dimension\": 5,\n// \"maxLength\": 65535,\n// \"isPartitionKey\": false\n// }\n// ]},\n// \"vectorFieldName\": [\"my_vector\"],\n// \"autoID\": false,\n// \"fieldNames\": [\n// \"my_id\",\n// \"my_vector\"\n// ],\n// \"description\": \"\",\n// \"numOfPartitions\": 1,\n// \"primaryFieldName\": \"my_id\",\n// \"enableDynamicField\": true,\n// \"collectionName\": \"customized_setup_2\"\n// }\n","// 5. View Collections\nres = await client.describeCollection({\n collection_name: \"customized_setup_2\"\n})\n\nconsole.log(res)\n\n// Output\n// \n// {\n// virtual_channel_names: [ 'by-dev-rootcoord-dml_13_449007919953017716v0' ],\n// physical_channel_names: [ 'by-dev-rootcoord-dml_13' ],\n// aliases: [],\n// start_positions: [],\n// properties: [],\n// status: {\n// extra_info: {},\n// error_code: 'Success',\n// reason: '',\n// code: 0,\n// retriable: false,\n// detail: ''\n// },\n// schema: {\n// fields: [ [Object], [Object] ],\n// properties: [],\n// name: 'customized_setup_2',\n// description: '',\n// autoID: false,\n// enable_dynamic_field: false\n// },\n// collectionID: '449007919953017716',\n// created_timestamp: '449024569603784707',\n// created_utc_timestamp: '1712892797866',\n// shards_num: 1,\n// consistency_level: 'Bounded',\n// collection_name: 'customized_setup_2',\n// db_name: 'default',\n// num_partitions: '1'\n// }\n// \n","curl -X POST \"http://${MILVUS_URI}/v2/vectordb/collections/describe\" \\\n-H \"Content-Type: application/json\" \\\n-d '{\n \"dbName\": \"default\",\n \"collectionName\": \"test_collection\"\n}'\n\n# {\n# \"code\": 0,\n# \"data\": {\n# \"aliases\": [],\n# \"autoId\": false,\n# \"collectionID\": 448707763883002014,\n# \"collectionName\": \"test_collection\",\n# \"consistencyLevel\": \"Bounded\",\n# \"description\": \"\",\n# \"enableDynamicField\": true,\n# \"fields\": [\n# {\n# \"autoId\": false,\n# \"description\": \"\",\n# \"id\": 100,\n# \"name\": \"id\",\n# \"partitionKey\": false,\n# \"primaryKey\": true,\n# \"type\": \"Int64\"\n# },\n# {\n# \"autoId\": false,\n# \"description\": \"\",\n# \"id\": 101,\n# \"name\": \"vector\",\n# \"params\": [\n# {\n# \"key\": \"dim\",\n# \"value\": \"5\"\n# }\n# ],\n# \"partitionKey\": false,\n# \"primaryKey\": false,\n# \"type\": \"FloatVector\"\n# }\n# ],\n# \"indexes\": [\n# {\n# \"fieldName\": \"vector\",\n# \"indexName\": \"vector\",\n# \"metricType\": \"COSINE\"\n# }\n# ],\n# \"load\": \"LoadStateLoaded\",\n# \"partitionsNum\": 1,\n# \"properties\": [],\n# \"shardsNum\": 1\n# }\n# }\n","# 6. List all collection names\nres = client.list_collections()\n\nprint(res)\n\n# Output\n#\n# [\n# \"customized_setup_2\",\n# \"quick_setup\",\n# \"customized_setup_1\"\n# ]\n","import io.milvus.v2.service.collection.response.ListCollectionsResp;\n\n// 5. List all collection names\nListCollectionsResp listCollectionsRes = client.listCollections();\n\nSystem.out.println(listCollectionsRes.getCollectionNames());\n\n// Output:\n// [\n// \"customized_setup_2\",\n// \"quick_setup\",\n// \"customized_setup_1\"\n// ]\n","// 5. List all collection names\nListCollectionsResp listCollectionsRes = client.listCollections();\n\nSystem.out.println(listCollectionsRes.getCollectionNames());\n\n// Output:\n// [\n// \"customized_setup_1\",\n// \"quick_setup\",\n// \"customized_setup_2\"\n// ]\n","$ curl -X POST \"http://${MILVUS_URI}/v2/vectordb/collections/list\" \\\n-H \"Content-Type: application/json\" \\\n-d '{\n \"dbName\": \"default\"\n}'\n\n# {\n# \"code\": 0,\n# \"data\": [\n# \"quick_setup\",\n# \"customized_setup_1\",\n# \"customized_setup_2\"\n# ]\n# }\n","# 7. Load the collection\nclient.load_collection(\n collection_name=\"customized_setup_2\",\n replica_number=1 # Number of replicas to create on query nodes. Max value is 1 for Milvus Standalone, and no greater than `queryNode.replicas` for Milvus Cluster.\n)\n\nres = client.get_load_state(\n collection_name=\"customized_setup_2\"\n)\n\nprint(res)\n\n# Output\n#\n# {\n# \"state\": \"\"\n# }\n","import io.milvus.v2.service.collection.request.LoadCollectionReq;\n\n// 6. Load the collection\nLoadCollectionReq loadCollectionReq = LoadCollectionReq.builder()\n .collectionName(\"customized_setup_2\")\n .build();\n\nclient.loadCollection(loadCollectionReq);\n\n// Thread.sleep(5000);\n\n// 7. Get load state of the collection\nGetLoadStateReq loadStateReq = GetLoadStateReq.builder()\n .collectionName(\"customized_setup_2\")\n .build();\n\nres = client.getLoadState(loadStateReq);\n\nSystem.out.println(res);\n\n// Output:\n// true\n","// 7. Load the collection\nres = await client.loadCollection({\n collection_name: \"customized_setup_2\"\n})\n\nconsole.log(res.error_code)\n\n// Output\n// \n// Success\n// \n\nawait sleep(3000)\n\nres = await client.getLoadState({\n collection_name: \"customized_setup_2\"\n})\n\nconsole.log(res.state)\n\n// Output\n// \n// LoadStateLoaded\n// \n","$ curl -X POST \"http://${MILVUS_URI}/v2/vectordb/collections/load\" \\\n-H \"Content-Type: application/json\" \\\n-d '{\n \"collectionName\": \"customized_setup_2\"\n}'\n\n# Output\n#\n# {\n# \"code\": 0,\n# \"data\": {},\n# }\n\n$ curl -X POST \"http://${MILVUS_URI}/v2/vectordb/collections/get_load_state\" \\\n-H \"Content-Type: application/json\" \\\n-d '{\n \"collectionName\": \"customized_setup_2\"\n}'\n\n# {\n# \"code\": 0,\n# \"data\": {\n# \"loadProgress\": 100,\n# \"loadState\": \"LoadStateLoaded\"\n# }\n# }\n","# 8. Release the collection\nclient.release_collection(\n collection_name=\"customized_setup_2\"\n)\n\nres = client.get_load_state(\n collection_name=\"customized_setup_2\"\n)\n\nprint(res)\n\n# Output\n#\n# {\n# \"state\": \"\"\n# }\n","import io.milvus.v2.service.collection.request.ReleaseCollectionReq;\n\n// 8. Release the collection\nReleaseCollectionReq releaseCollectionReq = ReleaseCollectionReq.builder()\n .collectionName(\"customized_setup_2\")\n .build();\n\nclient.releaseCollection(releaseCollectionReq);\n\n// Thread.sleep(1000);\n\nres = client.getLoadState(loadStateReq);\n\nSystem.out.println(res);\n\n// Output:\n// false\n","// 8. Release the collection\nres = await client.releaseCollection({\n collection_name: \"customized_setup_2\"\n})\n\nconsole.log(res.error_code)\n\n// Output\n// \n// Success\n// \n\nres = await client.getLoadState({\n collection_name: \"customized_setup_2\"\n})\n\nconsole.log(res.state)\n\n// Output\n// \n// LoadStateNotLoad\n// \n","$ curl -X POST \"http://${MILVUS_URI}/v2/vectordb/collections/release\" \\\n-H \"Content-Type: application/json\" \\\n-d '{\n \"collectionName\": \"customized_setup_2\"\n}'\n\n# Output\n#\n# {\n# \"code\": 0,\n# \"data\": {},\n# }\n\n\n$ curl -X POST \"http://${MILVUS_URI}/v2/vectordb/collections/get_load_state\" \\\n-H \"Content-Type: application/json\" \\\n-d '{\n \"collectionName\": \"customized_setup_2\"\n}'\n\n\n# {\n# \"code\": 0,\n# \"data\": {\n# \"loadState\": \"LoadStateNotLoad\"\n# }\n# }\n","# 9.1. Create aliases\nclient.create_alias(\n collection_name=\"customized_setup_2\",\n alias=\"bob\"\n)\n\nclient.create_alias(\n collection_name=\"customized_setup_2\",\n alias=\"alice\"\n)\n","import io.milvus.v2.service.utility.request.CreateAliasReq;\n\n// 9. Manage aliases\n\n// 9.1 Create alias\nCreateAliasReq createAliasReq = CreateAliasReq.builder()\n .collectionName(\"customized_setup_2\")\n .alias(\"bob\")\n .build();\n\nclient.createAlias(createAliasReq);\n\ncreateAliasReq = CreateAliasReq.builder()\n .collectionName(\"customized_setup_2\")\n .alias(\"alice\")\n .build();\n\nclient.createAlias(createAliasReq);\n","// 9. Manage aliases\n// 9.1 Create aliases\nres = await client.createAlias({\n collection_name: \"customized_setup_2\",\n alias: \"bob\"\n})\n\nconsole.log(res.error_code)\n\n// Output\n// \n// Success\n// \n\nres = await client.createAlias({\n collection_name: \"customized_setup_2\",\n alias: \"alice\"\n})\n\nconsole.log(res.error_code)\n\n// Output\n// \n// Success\n// \n","$ curl -X POST \"http://${MILVUS_URI}/v2/vectordb/aliases/create\" \\\n-H \"Content-Type: application/json\" \\\n-d '{\n \"collectionName\": \"customized_setup_2\",\n \"aliasName\": \"bob\"\n}'\n\n# Output\n#\n# {\n# \"code\": 0,\n# \"data\": {}\n# }\n\n$ curl -X POST \"http://${MILVUS_URI}/v2/vectordb/aliases/create\" \\\n-H \"Content-Type: application/json\" \\\n-d '{\n \"collectionName\": \"customized_setup_2\",\n \"aliasName\": \"alice\"\n}'\n\n# Output\n#\n# {\n# \"code\": 0,\n# \"data\": {}\n# }\n","# 9.2. List aliases\nres = client.list_aliases(\n collection_name=\"customized_setup_2\"\n)\n\nprint(res)\n\n# Output\n#\n# {\n# \"aliases\": [\n# \"bob\",\n# \"alice\"\n# ],\n# \"collection_name\": \"customized_setup_2\",\n# \"db_name\": \"default\"\n# }\n","import io.milvus.v2.service.utility.request.ListAliasesReq;\nimport io.milvus.v2.service.utility.response.ListAliasResp;\n\n// 9.2 List alises\nListAliasesReq listAliasesReq = ListAliasesReq.builder()\n .collectionName(\"customized_setup_2\")\n .build();\n\nListAliasResp listAliasRes = client.listAliases(listAliasesReq);\n\nSystem.out.println(listAliasRes.getAlias());\n\n// Output:\n// [\n// \"bob\",\n// \"alice\"\n// ]\n","// 9.2 List aliases\nres = await client.listAliases({\n collection_name: \"customized_setup_2\"\n})\n\nconsole.log(res.aliases)\n\n// Output\n// \n// [ 'bob', 'alice' ]\n// \n","$ curl -X POST \"http://${MILVUS_URI}/v2/vectordb/aliases/list\" \\\n-H \"Content-Type: application/json\" \\\n-d '{\n \"collectionName\": \"customized_setup_2\"\n}'\n\n# {\n# \"code\": 0,\n# \"data\": [\n# \"bob\",\n# \"alice\"\n# ]\n# }\n","# 9.3. Describe aliases\nres = client.describe_alias(\n alias=\"bob\"\n)\n\nprint(res)\n\n# Output\n#\n# {\n# \"alias\": \"bob\",\n# \"collection_name\": \"customized_setup_2\",\n# \"db_name\": \"default\"\n# }\n","import io.milvus.v2.service.utility.request.DescribeAliasReq;\nimport io.milvus.v2.service.utility.response.DescribeAliasResp;\n\n// 9.3 Describe alias\nDescribeAliasReq describeAliasReq = DescribeAliasReq.builder()\n .alias(\"bob\")\n .build();\n\nDescribeAliasResp describeAliasRes = client.describeAlias(describeAliasReq);\n\nSystem.out.println(JSONObject.toJSON(describeAliasRes));\n\n// Output:\n// {\n// \"alias\": \"bob\",\n// \"collectionName\": \"customized_setup_2\"\n// }\n","// 9.3 Describe aliases\nres = await client.describeAlias({\n collection_name: \"customized_setup_2\",\n alias: \"bob\"\n})\n\nconsole.log(res)\n\n// Output\n// \n// {\n// status: {\n// extra_info: {},\n// error_code: 'Success',\n// reason: '',\n// code: 0,\n// retriable: false,\n// detail: ''\n// },\n// db_name: 'default',\n// alias: 'bob',\n// collection: 'customized_setup_2'\n// }\n// \n","$ curl -X POST \"http://${MILVUS_URI}/v2/vectordb/aliases/describe\" \\\n-H \"Content-Type: application/json\" \\\n-d '{\n \"aliasName\": \"bob\"\n}'\n\n# {\n# \"code\": 0,\n# \"data\": {\n# \"aliasName\": \"bob\",\n# \"collectionName\": \"quick_setup\",\n# \"dbName\": \"default\"\n# }\n# }\n","# 9.4 Reassign aliases to other collections\nclient.alter_alias(\n collection_name=\"customized_setup_1\",\n alias=\"alice\"\n)\n\nres = client.list_aliases(\n collection_name=\"customized_setup_1\"\n)\n\nprint(res)\n\n# Output\n#\n# {\n# \"aliases\": [\n# \"alice\"\n# ],\n# \"collection_name\": \"customized_setup_1\",\n# \"db_name\": \"default\"\n# }\n\nres = client.list_aliases(\n collection_name=\"customized_setup_2\"\n)\n\nprint(res)\n\n# Output\n#\n# {\n# \"aliases\": [\n# \"bob\"\n# ],\n# \"collection_name\": \"customized_setup_2\",\n# \"db_name\": \"default\"\n# }\n","import io.milvus.v2.service.utility.request.AlterAliasReq;\n\n// 9.4 Reassign alias to other collections\nAlterAliasReq alterAliasReq = AlterAliasReq.builder()\n .collectionName(\"customized_setup_1\")\n .alias(\"alice\")\n .build();\n\nclient.alterAlias(alterAliasReq);\n\nlistAliasesReq = ListAliasesReq.builder()\n .collectionName(\"customized_setup_1\")\n .build();\n\nlistAliasRes = client.listAliases(listAliasesReq);\n\nSystem.out.println(listAliasRes.getAlias());\n\n// Output:\n// [\"alice\"]\n\nlistAliasesReq = ListAliasesReq.builder()\n .collectionName(\"customized_setup_2\")\n .build();\n\nlistAliasRes = client.listAliases(listAliasesReq);\n\nSystem.out.println(listAliasRes.getAlias());\n\n// Output:\n// [\"bob\"]\n","// 9.4 Reassign aliases to other collections\nres = await client.alterAlias({\n collection_name: \"customized_setup_1\",\n alias: \"alice\"\n})\n\nconsole.log(res.error_code)\n\n// Output\n// \n// Success\n// \n\nres = await client.listAliases({\n collection_name: \"customized_setup_1\"\n})\n\nconsole.log(res.aliases)\n\n// Output\n// \n// [ 'alice' ]\n// \n\nres = await client.listAliases({\n collection_name: \"customized_setup_2\"\n})\n\nconsole.log(res.aliases)\n\n// Output\n// \n// [ 'bob' ]\n// \n","$ curl -X POST \"http://${MILVUS_URI}/v2/vectordb/aliases/alter\" \\\n-H \"Content-Type: application/json\" \\\n-d '{\n \"collectionName\": \"customized_setup_1\",\n \"aliasName\": \"alice\"\n}'\n\n# {\n# \"code\": 0,\n# \"data\": {}\n# }\n\n\n$ curl -X POST \"http://${MILVUS_URI}/v2/vectordb/aliases/list\" \\\n-H \"Content-Type: application/json\" \\\n-d '{\n \"collectionName\": \"customized_setup_1\"\n}'\n\n\n# {\n# \"code\": 0,\n# \"data\": [\n# \"alice\"\n# ]\n# }\n\n\n$ curl -X POST \"http://${MILVUS_URI}/v2/vectordb/aliases/list\" \\\n-H \"Content-Type: application/json\" \\\n-d '{\n \"collectionName\": \"customized_setup_2\"\n}'\n\n\n# {\n# \"code\": 0,\n# \"data\": [\n# \"bob\"\n# ]\n# }\n","# 9.5 Drop aliases\nclient.drop_alias(\n alias=\"bob\"\n)\n\nclient.drop_alias(\n alias=\"alice\"\n)\n","import io.milvus.v2.service.utility.request.DropAliasReq;\n\n// 9.5 Drop alias\nDropAliasReq dropAliasReq = DropAliasReq.builder()\n .alias(\"bob\")\n .build();\n\nclient.dropAlias(dropAliasReq);\n\ndropAliasReq = DropAliasReq.builder()\n .alias(\"alice\")\n .build();\n\nclient.dropAlias(dropAliasReq);\n","// 9.5 Drop aliases\nres = await client.dropAlias({\n alias: \"bob\"\n})\n\nconsole.log(res.error_code)\n\n// Output\n// \n// Success\n// \n\nres = await client.dropAlias({\n alias: \"alice\"\n})\n\nconsole.log(res.error_code)\n\n// Output\n// \n// Success\n// \n","$ curl -X POST \"http://${MILVUS_URI}/v2/vectordb/aliases/drop\" \\\n-H \"Content-Type: application/json\" \\\n-d '{\n \"aliasName\": \"bob\"\n}'\n\n# {\n# \"code\": 0,\n# \"data\": {}\n# }\n\n\n$ curl -X POST \"http://${MILVUS_URI}/v2/vectordb/aliases/drop\" \\\n-H \"Content-Type: application/json\" \\\n-d '{\n \"aliasName\": \"alice\"\n}'\n\n\n# {\n# \"code\": 0,\n# \"data\": {}\n# }\n","from pymilvus import Collection, connections\n\n# Connect to Milvus server\nconnections.connect(host=\"localhost\", port=\"19530\") # Change to your Milvus server IP and port\n\n# Get existing collection\ncollection = Collection(\"quick_setup\")\n\n# Set the TTL for the data in the collection\ncollection.set_properties(\n properties={\n \"collection.ttl.seconds\": 60\n }\n)\n","from pymilvus import Collection, connections\n\n# Connect to Milvus server\nconnections.connect(host=\"localhost\", port=\"19530\") # Change to your Milvus server IP and port\n\n# Get existing collection\ncollection = Collection(\"quick_setup\")\n\n# Before setting memory mapping property, we need to release the collection first.\ncollection.release()\n\n# Set memory mapping property to True or Flase\ncollection.set_properties(\n properties={\n \"mmap.enabled\": True\n }\n)\n","# 10. Drop the collections\nclient.drop_collection(\n collection_name=\"quick_setup\"\n)\n\nclient.drop_collection(\n collection_name=\"customized_setup_1\"\n)\n\nclient.drop_collection(\n collection_name=\"customized_setup_2\"\n)\n","import io.milvus.v2.service.collection.request.DropCollectionReq;\n\n// 10. Drop collections\n\nDropCollectionReq dropQuickSetupParam = DropCollectionReq.builder()\n .collectionName(\"quick_setup\")\n .build();\n\nclient.dropCollection(dropQuickSetupParam);\n\nDropCollectionReq dropCustomizedSetupParam = DropCollectionReq.builder()\n .collectionName(\"customized_setup_1\")\n .build();\n\nclient.dropCollection(dropCustomizedSetupParam);\n\ndropCustomizedSetupParam = DropCollectionReq.builder()\n .collectionName(\"customized_setup_2\")\n .build();\n\nclient.dropCollection(dropCustomizedSetupParam);\n","// 10. Drop the collection\nres = await client.dropCollection({\n collection_name: \"customized_setup_2\"\n})\n\nconsole.log(res.error_code)\n\n// Output\n// \n// Success\n// \n\nres = await client.dropCollection({\n collection_name: \"customized_setup_1\"\n})\n\nconsole.log(res.error_code)\n\n// Output\n// \n// Success\n// \n\nres = await client.dropCollection({\n collection_name: \"quick_setup\"\n})\n\nconsole.log(res.error_code)\n\n// Output\n// \n// Success\n// \n","$ curl -X POST \"http://${MILVUS_URI}/v2/vectordb/collections/drop\" \\\n-H \"Content-Type: application/json\" \\\n-d '{\n \"collectionName\": \"quick_setup\"\n}'\n\n# {\n# \"code\": 0,\n# \"data\": {}\n# }\n\n\n$ curl -X POST \"http://${MILVUS_URI}/v2/vectordb/collections/drop\" \\\n-H \"Content-Type: application/json\" \\\n-d '{\n \"collectionName\": \"customized_setup_1\"\n}'\n\n\n# {\n# \"code\": 0,\n# \"data\": {}\n# }\n\n\n$ curl -X POST \"http://${MILVUS_URI}/v2/vectordb/collections/drop\" \\\n-H \"Content-Type: application/json\" \\\n-d '{\n \"collectionName\": \"customized_setup_2\"\n}'\n\n\n# {\n# \"code\": 0,\n# \"data\": {}\n# }\n"],"headingContent":"","anchorList":[{"label":"コレクションの管理","href":"Manage-Collections","type":1,"isActive":false},{"label":"始める前に","href":"Before-you-start","type":2,"isActive":false},{"label":"概要","href":"Overview","type":2,"isActive":false},{"label":"コレクションの作成","href":"Create-Collection","type":2,"isActive":false},{"label":"コレクションの表示","href":"View-Collections","type":2,"isActive":false},{"label":"コレクションのロードとリリース","href":"Load--Release-Collection","type":2,"isActive":false},{"label":"エイリアスの設定","href":"Set-up-aliases","type":2,"isActive":false},{"label":"プロパティの設定","href":"Set-Properties","type":2,"isActive":false},{"label":"コレクションを放棄する","href":"Drop-a-Collection","type":2,"isActive":false}]} \ No newline at end of file +{"codeList":["from pymilvus import MilvusClient, DataType\n\n# 1. Set up a Milvus client\nclient = MilvusClient(\n uri=\"http://localhost:19530\"\n)\n\n# 2. Create a collection in quick setup mode\nclient.create_collection(\n collection_name=\"quick_setup\",\n dimension=5\n)\n\nres = client.get_load_state(\n collection_name=\"quick_setup\"\n)\n\nprint(res)\n\n# Output\n#\n# {\n# \"state\": \"\"\n# }\n","import io.milvus.v2.client.ConnectConfig;\nimport io.milvus.v2.client.MilvusClientV2;\nimport io.milvus.v2.service.collection.request.GetLoadStateReq;\nimport io.milvus.v2.service.collection.request.CreateCollectionReq;\n\nString CLUSTER_ENDPOINT = \"http://localhost:19530\";\n\n// 1. Connect to Milvus server\nConnectConfig connectConfig = ConnectConfig.builder()\n .uri(CLUSTER_ENDPOINT)\n .build();\n\nMilvusClientV2 client = new MilvusClientV2(connectConfig);\n\n// 2. Create a collection in quick setup mode\nCreateCollectionReq quickSetupReq = CreateCollectionReq.builder()\n .collectionName(\"quick_setup\")\n .dimension(5)\n .build();\n\nclient.createCollection(quickSetupReq);\n\n// Thread.sleep(5000);\n\nGetLoadStateReq quickSetupLoadStateReq = GetLoadStateReq.builder()\n .collectionName(\"quick_setup\")\n .build();\n\nBoolean res = client.getLoadState(quickSetupLoadStateReq);\n\nSystem.out.println(res);\n\n// Output:\n// true\n","address = \"http://localhost:19530\"\n\n// 1. Set up a Milvus Client\nclient = new MilvusClient({address});\n\n// 2. Create a collection in quick setup mode\nlet res = await client.createCollection({\n collection_name: \"quick_setup\",\n dimension: 5,\n}); \n\nconsole.log(res.error_code)\n\n// Output\n// \n// Success\n// \n\nres = await client.getLoadState({\n collection_name: \"quick_setup\"\n})\n\nconsole.log(res.state)\n\n// Output\n// \n// LoadStateLoaded\n// \n","$ export MILVUS_URI=\"localhost:19530\"\n\n$ curl -X POST \"http://${MILVUS_URI}/v2/vectordb/collections/create\" \\\n-H \"Content-Type: application/json\" \\\n-d '{\n \"collectionName\": \"quick_setup\",\n \"dimension\": 5\n}'\n\n# Output\n#\n# {\n# \"code\": 0,\n# \"data\": {},\n# }\n\n$ curl -X POST \"http://${MILVUS_URI}/v2/vectordb/collections/get_load_state\" \\\n-H \"Content-Type: application/json\" \\\n-d '{\n \"collectionName\": \"quick_setup\"\n}'\n\n# {\n# \"code\": 0,\n# \"data\": {\n# \"loadProgress\": 100,\n# \"loadState\": \"LoadStateLoaded\"\n# }\n# }\n","# 3. Create a collection in customized setup mode\n\n# 3.1. Create schema\nschema = MilvusClient.create_schema(\n auto_id=False,\n enable_dynamic_field=True,\n)\n\n# 3.2. Add fields to schema\nschema.add_field(field_name=\"my_id\", datatype=DataType.INT64, is_primary=True)\nschema.add_field(field_name=\"my_vector\", datatype=DataType.FLOAT_VECTOR, dim=5)\n","import io.milvus.v2.common.DataType;\nimport io.milvus.v2.service.collection.request.CreateCollectionReq;\n\n// 3. Create a collection in customized setup mode\n\n// 3.1 Create schema\nCreateCollectionReq.CollectionSchema schema = client.createSchema();\n\n// 3.2 Add fields to schema\nschema.addField(AddFieldReq.builder()\n .fieldName(\"my_id\")\n .dataType(DataType.Int64)\n .isPrimaryKey(true)\n .autoID(false)\n .build());\n\nschema.addField(AddFieldReq.builder()\n .fieldName(\"my_vector\")\n .dataType(DataType.FloatVector)\n .dimension(5)\n .build());\n","// 3. Create a collection in customized setup mode\n// 3.1 Define fields\nconst fields = [\n {\n name: \"my_id\",\n data_type: DataType.Int64,\n is_primary_key: true,\n auto_id: false\n },\n {\n name: \"my_vector\",\n data_type: DataType.FloatVector,\n dim: 5\n },\n]\n","export fields='[{ \\\n \"fieldName\": \"my_id\", \\\n \"dataType\": \"Int64\", \\\n \"isPrimary\": true \\\n}, \\\n{ \\\n \"fieldName\": \"my_vector\", \\\n \"dataType\": \"FloatVector\", \\\n \"elementTypeParams\": { \\\n \"dim\": 5 \\\n } \\\n}]'\n","# 3.3. Prepare index parameters\nindex_params = client.prepare_index_params()\n\n# 3.4. Add indexes\nindex_params.add_index(\n field_name=\"my_id\",\n index_type=\"STL_SORT\"\n)\n\nindex_params.add_index(\n field_name=\"my_vector\", \n index_type=\"IVF_FLAT\",\n metric_type=\"IP\",\n params={ \"nlist\": 128 }\n)\n","import io.milvus.v2.common.IndexParam;\n\n// 3.3 Prepare index parameters\nIndexParam indexParamForIdField = IndexParam.builder()\n .fieldName(\"my_id\")\n .indexType(IndexParam.IndexType.STL_SORT)\n .build();\n\nIndexParam indexParamForVectorField = IndexParam.builder()\n .fieldName(\"my_vector\")\n .indexType(IndexParam.IndexType.IVF_FLAT)\n .metricType(IndexParam.MetricType.L2)\n .extraParams(Map.of(\"nlist\", 1024))\n .build();\n\nList indexParams = new ArrayList<>();\nindexParams.add(indexParamForIdField);\nindexParams.add(indexParamForVectorField);\n","// 3.2 Prepare index parameters\nconst index_params = [{\n field_name: \"my_id\",\n index_type: \"STL_SORT\"\n},{\n field_name: \"my_vector\",\n index_type: \"IVF_FLAT\",\n metric_type: \"IP\",\n params: { nlist: 1024}\n}]\n","export indexParams='[{ \\\n \"fieldName\": \"my_id\", \\\n \"indexName\": \"my_id\", \\\n \"params\": { \\\n \"index_type\": \"SLT_SORT\" \\\n } \\\n}, { \\\n \"fieldName\": \"my_vector\", \\\n \"metricType\": \"COSINE\", \\\n \"indexName\": \"my_vector\", \\\n \"params\": { \\\n \"index_type\": \"IVF_FLAT\", \\\n \"nlist\": 1024 \\\n } \\\n}]'\n","# 3.5. Create a collection with the index loaded simultaneously\nclient.create_collection(\n collection_name=\"customized_setup_1\",\n schema=schema,\n index_params=index_params\n)\n\ntime.sleep(5)\n\nres = client.get_load_state(\n collection_name=\"customized_setup_1\"\n)\n\nprint(res)\n\n# Output\n#\n# {\n# \"state\": \"\"\n# }\n","import io.milvus.v2.service.collection.request.CreateCollectionReq;\nimport io.milvus.v2.service.collection.request.GetLoadStateReq;\n\n// 3.4 Create a collection with schema and index parameters\nCreateCollectionReq customizedSetupReq1 = CreateCollectionReq.builder()\n .collectionName(\"customized_setup_1\")\n .collectionSchema(schema)\n .indexParams(indexParams)\n .build();\n\nclient.createCollection(customizedSetupReq1);\n\n// Thread.sleep(5000);\n\n// 3.5 Get load state of the collection\nGetLoadStateReq customSetupLoadStateReq1 = GetLoadStateReq.builder()\n .collectionName(\"customized_setup_1\")\n .build();\n\nres = client.getLoadState(customSetupLoadStateReq1);\n\nSystem.out.println(res);\n\n// Output:\n// true\n","// 3.3 Create a collection with fields and index parameters\nres = await client.createCollection({\n collection_name: \"customized_setup_1\",\n fields: fields,\n index_params: index_params,\n})\n\nconsole.log(res.error_code) \n\n// Output\n// \n// Success\n// \n\nres = await client.getLoadState({\n collection_name: \"customized_setup_1\"\n})\n\nconsole.log(res.state)\n\n// Output\n// \n// LoadStateLoaded\n// \n","$ curl -X POST \"http://${MILVUS_URI}/v2/vectordb/collections/create\" \\\n-H \"Content-Type: application/json\" \\\n-d '{\n \"collectionName\": \"customized_setup_1\",\n \"schema\": {\n \"autoId\": false,\n \"enabledDynamicField\": false,\n \"fields\": [\n {\n \"fieldName\": \"my_id\",\n \"dataType\": \"Int64\",\n \"isPrimary\": true\n },\n {\n \"fieldName\": \"my_vector\",\n \"dataType\": \"FloatVector\",\n \"elementTypeParams\": {\n \"dim\": \"5\"\n }\n }\n ]\n },\n \"indexParams\": [\n {\n \"fieldName\": \"my_vector\",\n \"metricType\": \"COSINE\",\n \"indexName\": \"my_vector\",\n \"params\": {\n \"index_type\": \"IVF_FLAT\",\n \"nlist\": \"1024\"\n }\n },\n {\n \"fieldName\": \"my_id\",\n \"indexName\": \"my_id\",\n \"params\": {\n \"index_type\": \"STL_SORT\"\n } \n }\n ]\n}'\n\n# Output\n#\n# {\n# \"code\": 0,\n# \"data\": {},\n# }\n\n$ curl -X POST \"http://${MILVUS_URI}/v2/vectordb/collections/get_load_state\" \\\n-H \"Content-Type: application/json\" \\\n-d '{\n \"collectionName\": \"customized_setup_1\"\n}'\n\n# {\n# \"code\": 0,\n# \"data\": {\n# \"loadProgress\": 100,\n# \"loadState\": \"LoadStateLoaded\"\n# }\n# }\n","# 3.6. Create a collection and index it separately\nclient.create_collection(\n collection_name=\"customized_setup_2\",\n schema=schema,\n)\n\nres = client.get_load_state(\n collection_name=\"customized_setup_2\"\n)\n\nprint(res)\n\n# Output\n#\n# {\n# \"state\": \"\"\n# }\n","// 3.6 Create a collection and index it separately\nCreateCollectionReq customizedSetupReq2 = CreateCollectionReq.builder()\n .collectionName(\"customized_setup_2\")\n .collectionSchema(schema)\n .build();\n\nclient.createCollection(customizedSetupReq2);\n","// 3.4 Create a collection and index it seperately\nres = await client.createCollection({\n collection_name: \"customized_setup_2\",\n fields: fields,\n})\n\nconsole.log(res.error_code)\n\n// Output\n// \n// Success\n// \n\nres = await client.getLoadState({\n collection_name: \"customized_setup_2\"\n})\n\nconsole.log(res.state)\n\n// Output\n// \n// LoadStateNotLoad\n// \n","$ curl -X POST \"http://${MILVUS_URI}/v2/vectordb/collections/create\" \\\n-H \"Content-Type: application/json\" \\\n-d '{\n \"collectionName\": \"customized_setup_2\",\n \"schema\": {\n \"autoId\": false,\n \"enabledDynamicField\": false,\n \"fields\": [\n {\n \"fieldName\": \"my_id\",\n \"dataType\": \"Int64\",\n \"isPrimary\": true\n },\n {\n \"fieldName\": \"my_vector\",\n \"dataType\": \"FloatVector\",\n \"elementTypeParams\": {\n \"dim\": \"5\"\n }\n }\n ]\n \n }\n}'\n\n# Output\n#\n# {\n# \"code\": 0,\n# \"data\": {},\n# }\n\n$ curl -X POST \"http://${MILVUS_URI}/v2/vectordb/collections/get_load_state\" \\\n-H \"Content-Type: application/json\" \\\n-d '{\n \"collectionName\": \"customized_setup_2\"\n}'\n\n# {\n# \"code\": 0,\n# \"data\": {\n# \"loadState\": \"LoadStateNotLoaded\"\n# }\n# }\n","# 3.6 Create index\nclient.create_index(\n collection_name=\"customized_setup_2\",\n index_params=index_params\n)\n\nres = client.get_load_state(\n collection_name=\"customized_setup_2\"\n)\n\nprint(res)\n\n# Output\n#\n# {\n# \"state\": \"\"\n# }\n","CreateIndexReq createIndexReq = CreateIndexReq.builder()\n .collectionName(\"customized_setup_2\")\n .indexParams(indexParams)\n .build();\n\nclient.createIndex(createIndexReq);\n\n// Thread.sleep(1000);\n\n// 3.7 Get load state of the collection\nGetLoadStateReq customSetupLoadStateReq2 = GetLoadStateReq.builder()\n .collectionName(\"customized_setup_2\")\n .build();\n\nres = client.getLoadState(customSetupLoadStateReq2);\n\nSystem.out.println(res);\n\n// Output:\n// false\n","// 3.5 Create index\nres = await client.createIndex({\n collection_name: \"customized_setup_2\",\n field_name: \"my_vector\",\n index_type: \"IVF_FLAT\",\n metric_type: \"IP\",\n params: { nlist: 1024}\n})\n\nres = await client.getLoadState({\n collection_name: \"customized_setup_2\"\n})\n\nconsole.log(res.state)\n\n// Output\n// \n// LoadStateNotLoad\n//\n","$ curl -X POST \"http://${MILVUS_URI}/v2/vectordb/indexes/create\" \\\n-H \"Content-Type: application/json\" \\\n-d '{\n \"collectionName\": \"customized_setup_2\",\n \"indexParams\": [\n {\n \"metricType\": \"L2\",\n \"fieldName\": \"my_vector\",\n \"indexName\": \"my_vector\",\n \"indexConfig\": {\n \"index_type\": \"IVF_FLAT\",\n \"nlist\": \"1024\"\n }\n }\n ]\n}'\n\n# Output\n#\n# {\n# \"code\": 0,\n# \"data\": {},\n# }\n\n$ curl -X POST \"http://${MILVUS_URI}/v2/vectordb/collections/get_load_state\" \\\n-H \"Content-Type: application/json\" \\\n-d '{\n \"collectionName\": \"customized_setup_2\"\n}'\n\n# {\n# \"code\": 0,\n# \"data\": {\n# \"loadState\": \"LoadStateNotLoaded\"\n# }\n# }\n","# 5. View Collections\nres = client.describe_collection(\n collection_name=\"customized_setup_2\"\n)\n\nprint(res)\n\n# Output\n#\n# {\n# \"collection_name\": \"customized_setup_2\",\n# \"auto_id\": false,\n# \"num_shards\": 1,\n# \"description\": \"\",\n# \"fields\": [\n# {\n# \"field_id\": 100,\n# \"name\": \"my_id\",\n# \"description\": \"\",\n# \"type\": 5,\n# \"params\": {},\n# \"element_type\": 0,\n# \"is_primary\": true\n# },\n# {\n# \"field_id\": 101,\n# \"name\": \"my_vector\",\n# \"description\": \"\",\n# \"type\": 101,\n# \"params\": {\n# \"dim\": 5\n# },\n# \"element_type\": 0\n# }\n# ],\n# \"aliases\": [],\n# \"collection_id\": 448143479230158446,\n# \"consistency_level\": 2,\n# \"properties\": {},\n# \"num_partitions\": 1,\n# \"enable_dynamic_field\": true\n# }\n\n","import io.milvus.v2.service.collection.request.DescribeCollectionReq;\nimport io.milvus.v2.service.collection.response.DescribeCollectionResp;\n\n// 4. View collections\nDescribeCollectionReq describeCollectionReq = DescribeCollectionReq.builder()\n .collectionName(\"customized_setup_2\")\n .build();\n\nDescribeCollectionResp describeCollectionRes = client.describeCollection(describeCollectionReq);\n\nSystem.out.println(JSONObject.toJSON(describeCollectionRes));\n\n// Output:\n// {\n// \"createTime\": 449005822816026627,\n// \"collectionSchema\": {\"fieldSchemaList\": [\n// {\n// \"autoID\": false,\n// \"dataType\": \"Int64\",\n// \"name\": \"my_id\",\n// \"description\": \"\",\n// \"isPrimaryKey\": true,\n// \"maxLength\": 65535,\n// \"isPartitionKey\": false\n// },\n// {\n// \"autoID\": false,\n// \"dataType\": \"FloatVector\",\n// \"name\": \"my_vector\",\n// \"description\": \"\",\n// \"isPrimaryKey\": false,\n// \"dimension\": 5,\n// \"maxLength\": 65535,\n// \"isPartitionKey\": false\n// }\n// ]},\n// \"vectorFieldName\": [\"my_vector\"],\n// \"autoID\": false,\n// \"fieldNames\": [\n// \"my_id\",\n// \"my_vector\"\n// ],\n// \"description\": \"\",\n// \"numOfPartitions\": 1,\n// \"primaryFieldName\": \"my_id\",\n// \"enableDynamicField\": true,\n// \"collectionName\": \"customized_setup_2\"\n// }\n","// 5. View Collections\nres = await client.describeCollection({\n collection_name: \"customized_setup_2\"\n})\n\nconsole.log(res)\n\n// Output\n// \n// {\n// virtual_channel_names: [ 'by-dev-rootcoord-dml_13_449007919953017716v0' ],\n// physical_channel_names: [ 'by-dev-rootcoord-dml_13' ],\n// aliases: [],\n// start_positions: [],\n// properties: [],\n// status: {\n// extra_info: {},\n// error_code: 'Success',\n// reason: '',\n// code: 0,\n// retriable: false,\n// detail: ''\n// },\n// schema: {\n// fields: [ [Object], [Object] ],\n// properties: [],\n// name: 'customized_setup_2',\n// description: '',\n// autoID: false,\n// enable_dynamic_field: false\n// },\n// collectionID: '449007919953017716',\n// created_timestamp: '449024569603784707',\n// created_utc_timestamp: '1712892797866',\n// shards_num: 1,\n// consistency_level: 'Bounded',\n// collection_name: 'customized_setup_2',\n// db_name: 'default',\n// num_partitions: '1'\n// }\n// \n","curl -X POST \"http://${MILVUS_URI}/v2/vectordb/collections/describe\" \\\n-H \"Content-Type: application/json\" \\\n-d '{\n \"dbName\": \"default\",\n \"collectionName\": \"test_collection\"\n}'\n\n# {\n# \"code\": 0,\n# \"data\": {\n# \"aliases\": [],\n# \"autoId\": false,\n# \"collectionID\": 448707763883002014,\n# \"collectionName\": \"test_collection\",\n# \"consistencyLevel\": \"Bounded\",\n# \"description\": \"\",\n# \"enableDynamicField\": true,\n# \"fields\": [\n# {\n# \"autoId\": false,\n# \"description\": \"\",\n# \"id\": 100,\n# \"name\": \"id\",\n# \"partitionKey\": false,\n# \"primaryKey\": true,\n# \"type\": \"Int64\"\n# },\n# {\n# \"autoId\": false,\n# \"description\": \"\",\n# \"id\": 101,\n# \"name\": \"vector\",\n# \"params\": [\n# {\n# \"key\": \"dim\",\n# \"value\": \"5\"\n# }\n# ],\n# \"partitionKey\": false,\n# \"primaryKey\": false,\n# \"type\": \"FloatVector\"\n# }\n# ],\n# \"indexes\": [\n# {\n# \"fieldName\": \"vector\",\n# \"indexName\": \"vector\",\n# \"metricType\": \"COSINE\"\n# }\n# ],\n# \"load\": \"LoadStateLoaded\",\n# \"partitionsNum\": 1,\n# \"properties\": [],\n# \"shardsNum\": 1\n# }\n# }\n","# 6. List all collection names\nres = client.list_collections()\n\nprint(res)\n\n# Output\n#\n# [\n# \"customized_setup_2\",\n# \"quick_setup\",\n# \"customized_setup_1\"\n# ]\n","import io.milvus.v2.service.collection.response.ListCollectionsResp;\n\n// 5. List all collection names\nListCollectionsResp listCollectionsRes = client.listCollections();\n\nSystem.out.println(listCollectionsRes.getCollectionNames());\n\n// Output:\n// [\n// \"customized_setup_2\",\n// \"quick_setup\",\n// \"customized_setup_1\"\n// ]\n","// 5. List all collection names\nListCollectionsResp listCollectionsRes = client.listCollections();\n\nSystem.out.println(listCollectionsRes.getCollectionNames());\n\n// Output:\n// [\n// \"customized_setup_1\",\n// \"quick_setup\",\n// \"customized_setup_2\"\n// ]\n","$ curl -X POST \"http://${MILVUS_URI}/v2/vectordb/collections/list\" \\\n-H \"Content-Type: application/json\" \\\n-d '{\n \"dbName\": \"default\"\n}'\n\n# {\n# \"code\": 0,\n# \"data\": [\n# \"quick_setup\",\n# \"customized_setup_1\",\n# \"customized_setup_2\"\n# ]\n# }\n","# 7. Load the collection\nclient.load_collection(\n collection_name=\"customized_setup_2\",\n replica_number=1 # Number of replicas to create on query nodes. Max value is 1 for Milvus Standalone, and no greater than `queryNode.replicas` for Milvus Cluster.\n)\n\nres = client.get_load_state(\n collection_name=\"customized_setup_2\"\n)\n\nprint(res)\n\n# Output\n#\n# {\n# \"state\": \"\"\n# }\n","import io.milvus.v2.service.collection.request.LoadCollectionReq;\n\n// 6. Load the collection\nLoadCollectionReq loadCollectionReq = LoadCollectionReq.builder()\n .collectionName(\"customized_setup_2\")\n .build();\n\nclient.loadCollection(loadCollectionReq);\n\n// Thread.sleep(5000);\n\n// 7. Get load state of the collection\nGetLoadStateReq loadStateReq = GetLoadStateReq.builder()\n .collectionName(\"customized_setup_2\")\n .build();\n\nres = client.getLoadState(loadStateReq);\n\nSystem.out.println(res);\n\n// Output:\n// true\n","// 7. Load the collection\nres = await client.loadCollection({\n collection_name: \"customized_setup_2\"\n})\n\nconsole.log(res.error_code)\n\n// Output\n// \n// Success\n// \n\nawait sleep(3000)\n\nres = await client.getLoadState({\n collection_name: \"customized_setup_2\"\n})\n\nconsole.log(res.state)\n\n// Output\n// \n// LoadStateLoaded\n// \n","$ curl -X POST \"http://${MILVUS_URI}/v2/vectordb/collections/load\" \\\n-H \"Content-Type: application/json\" \\\n-d '{\n \"collectionName\": \"customized_setup_2\"\n}'\n\n# Output\n#\n# {\n# \"code\": 0,\n# \"data\": {},\n# }\n\n$ curl -X POST \"http://${MILVUS_URI}/v2/vectordb/collections/get_load_state\" \\\n-H \"Content-Type: application/json\" \\\n-d '{\n \"collectionName\": \"customized_setup_2\"\n}'\n\n# {\n# \"code\": 0,\n# \"data\": {\n# \"loadProgress\": 100,\n# \"loadState\": \"LoadStateLoaded\"\n# }\n# }\n","# 7. Load the collection\nclient.load_collection(\n collection_name=\"customized_setup_2\",\n load_fields=[\"my_id\", \"my_vector\"] # Load only the specified fields\n skip_load_dynamic_field=True # Skip loading the dynamic field\n)\n\nres = client.get_load_state(\n collection_name=\"customized_setup_2\"\n)\n\nprint(res)\n\n# Output\n#\n# {\n# \"state\": \"\"\n# }\n","# 8. Release the collection\nclient.release_collection(\n collection_name=\"customized_setup_2\"\n)\n\nres = client.get_load_state(\n collection_name=\"customized_setup_2\"\n)\n\nprint(res)\n\n# Output\n#\n# {\n# \"state\": \"\"\n# }\n","import io.milvus.v2.service.collection.request.ReleaseCollectionReq;\n\n// 8. Release the collection\nReleaseCollectionReq releaseCollectionReq = ReleaseCollectionReq.builder()\n .collectionName(\"customized_setup_2\")\n .build();\n\nclient.releaseCollection(releaseCollectionReq);\n\n// Thread.sleep(1000);\n\nres = client.getLoadState(loadStateReq);\n\nSystem.out.println(res);\n\n// Output:\n// false\n","// 8. Release the collection\nres = await client.releaseCollection({\n collection_name: \"customized_setup_2\"\n})\n\nconsole.log(res.error_code)\n\n// Output\n// \n// Success\n// \n\nres = await client.getLoadState({\n collection_name: \"customized_setup_2\"\n})\n\nconsole.log(res.state)\n\n// Output\n// \n// LoadStateNotLoad\n// \n","$ curl -X POST \"http://${MILVUS_URI}/v2/vectordb/collections/release\" \\\n-H \"Content-Type: application/json\" \\\n-d '{\n \"collectionName\": \"customized_setup_2\"\n}'\n\n# Output\n#\n# {\n# \"code\": 0,\n# \"data\": {},\n# }\n\n\n$ curl -X POST \"http://${MILVUS_URI}/v2/vectordb/collections/get_load_state\" \\\n-H \"Content-Type: application/json\" \\\n-d '{\n \"collectionName\": \"customized_setup_2\"\n}'\n\n\n# {\n# \"code\": 0,\n# \"data\": {\n# \"loadState\": \"LoadStateNotLoad\"\n# }\n# }\n","# 9.1. Create aliases\nclient.create_alias(\n collection_name=\"customized_setup_2\",\n alias=\"bob\"\n)\n\nclient.create_alias(\n collection_name=\"customized_setup_2\",\n alias=\"alice\"\n)\n","import io.milvus.v2.service.utility.request.CreateAliasReq;\n\n// 9. Manage aliases\n\n// 9.1 Create alias\nCreateAliasReq createAliasReq = CreateAliasReq.builder()\n .collectionName(\"customized_setup_2\")\n .alias(\"bob\")\n .build();\n\nclient.createAlias(createAliasReq);\n\ncreateAliasReq = CreateAliasReq.builder()\n .collectionName(\"customized_setup_2\")\n .alias(\"alice\")\n .build();\n\nclient.createAlias(createAliasReq);\n","// 9. Manage aliases\n// 9.1 Create aliases\nres = await client.createAlias({\n collection_name: \"customized_setup_2\",\n alias: \"bob\"\n})\n\nconsole.log(res.error_code)\n\n// Output\n// \n// Success\n// \n\nres = await client.createAlias({\n collection_name: \"customized_setup_2\",\n alias: \"alice\"\n})\n\nconsole.log(res.error_code)\n\n// Output\n// \n// Success\n// \n","$ curl -X POST \"http://${MILVUS_URI}/v2/vectordb/aliases/create\" \\\n-H \"Content-Type: application/json\" \\\n-d '{\n \"collectionName\": \"customized_setup_2\",\n \"aliasName\": \"bob\"\n}'\n\n# Output\n#\n# {\n# \"code\": 0,\n# \"data\": {}\n# }\n\n$ curl -X POST \"http://${MILVUS_URI}/v2/vectordb/aliases/create\" \\\n-H \"Content-Type: application/json\" \\\n-d '{\n \"collectionName\": \"customized_setup_2\",\n \"aliasName\": \"alice\"\n}'\n\n# Output\n#\n# {\n# \"code\": 0,\n# \"data\": {}\n# }\n","# 9.2. List aliases\nres = client.list_aliases(\n collection_name=\"customized_setup_2\"\n)\n\nprint(res)\n\n# Output\n#\n# {\n# \"aliases\": [\n# \"bob\",\n# \"alice\"\n# ],\n# \"collection_name\": \"customized_setup_2\",\n# \"db_name\": \"default\"\n# }\n","import io.milvus.v2.service.utility.request.ListAliasesReq;\nimport io.milvus.v2.service.utility.response.ListAliasResp;\n\n// 9.2 List alises\nListAliasesReq listAliasesReq = ListAliasesReq.builder()\n .collectionName(\"customized_setup_2\")\n .build();\n\nListAliasResp listAliasRes = client.listAliases(listAliasesReq);\n\nSystem.out.println(listAliasRes.getAlias());\n\n// Output:\n// [\n// \"bob\",\n// \"alice\"\n// ]\n","// 9.2 List aliases\nres = await client.listAliases({\n collection_name: \"customized_setup_2\"\n})\n\nconsole.log(res.aliases)\n\n// Output\n// \n// [ 'bob', 'alice' ]\n// \n","$ curl -X POST \"http://${MILVUS_URI}/v2/vectordb/aliases/list\" \\\n-H \"Content-Type: application/json\" \\\n-d '{\n \"collectionName\": \"customized_setup_2\"\n}'\n\n# {\n# \"code\": 0,\n# \"data\": [\n# \"bob\",\n# \"alice\"\n# ]\n# }\n","# 9.3. Describe aliases\nres = client.describe_alias(\n alias=\"bob\"\n)\n\nprint(res)\n\n# Output\n#\n# {\n# \"alias\": \"bob\",\n# \"collection_name\": \"customized_setup_2\",\n# \"db_name\": \"default\"\n# }\n","import io.milvus.v2.service.utility.request.DescribeAliasReq;\nimport io.milvus.v2.service.utility.response.DescribeAliasResp;\n\n// 9.3 Describe alias\nDescribeAliasReq describeAliasReq = DescribeAliasReq.builder()\n .alias(\"bob\")\n .build();\n\nDescribeAliasResp describeAliasRes = client.describeAlias(describeAliasReq);\n\nSystem.out.println(JSONObject.toJSON(describeAliasRes));\n\n// Output:\n// {\n// \"alias\": \"bob\",\n// \"collectionName\": \"customized_setup_2\"\n// }\n","// 9.3 Describe aliases\nres = await client.describeAlias({\n collection_name: \"customized_setup_2\",\n alias: \"bob\"\n})\n\nconsole.log(res)\n\n// Output\n// \n// {\n// status: {\n// extra_info: {},\n// error_code: 'Success',\n// reason: '',\n// code: 0,\n// retriable: false,\n// detail: ''\n// },\n// db_name: 'default',\n// alias: 'bob',\n// collection: 'customized_setup_2'\n// }\n// \n","$ curl -X POST \"http://${MILVUS_URI}/v2/vectordb/aliases/describe\" \\\n-H \"Content-Type: application/json\" \\\n-d '{\n \"aliasName\": \"bob\"\n}'\n\n# {\n# \"code\": 0,\n# \"data\": {\n# \"aliasName\": \"bob\",\n# \"collectionName\": \"quick_setup\",\n# \"dbName\": \"default\"\n# }\n# }\n","# 9.4 Reassign aliases to other collections\nclient.alter_alias(\n collection_name=\"customized_setup_1\",\n alias=\"alice\"\n)\n\nres = client.list_aliases(\n collection_name=\"customized_setup_1\"\n)\n\nprint(res)\n\n# Output\n#\n# {\n# \"aliases\": [\n# \"alice\"\n# ],\n# \"collection_name\": \"customized_setup_1\",\n# \"db_name\": \"default\"\n# }\n\nres = client.list_aliases(\n collection_name=\"customized_setup_2\"\n)\n\nprint(res)\n\n# Output\n#\n# {\n# \"aliases\": [\n# \"bob\"\n# ],\n# \"collection_name\": \"customized_setup_2\",\n# \"db_name\": \"default\"\n# }\n","import io.milvus.v2.service.utility.request.AlterAliasReq;\n\n// 9.4 Reassign alias to other collections\nAlterAliasReq alterAliasReq = AlterAliasReq.builder()\n .collectionName(\"customized_setup_1\")\n .alias(\"alice\")\n .build();\n\nclient.alterAlias(alterAliasReq);\n\nlistAliasesReq = ListAliasesReq.builder()\n .collectionName(\"customized_setup_1\")\n .build();\n\nlistAliasRes = client.listAliases(listAliasesReq);\n\nSystem.out.println(listAliasRes.getAlias());\n\n// Output:\n// [\"alice\"]\n\nlistAliasesReq = ListAliasesReq.builder()\n .collectionName(\"customized_setup_2\")\n .build();\n\nlistAliasRes = client.listAliases(listAliasesReq);\n\nSystem.out.println(listAliasRes.getAlias());\n\n// Output:\n// [\"bob\"]\n","// 9.4 Reassign aliases to other collections\nres = await client.alterAlias({\n collection_name: \"customized_setup_1\",\n alias: \"alice\"\n})\n\nconsole.log(res.error_code)\n\n// Output\n// \n// Success\n// \n\nres = await client.listAliases({\n collection_name: \"customized_setup_1\"\n})\n\nconsole.log(res.aliases)\n\n// Output\n// \n// [ 'alice' ]\n// \n\nres = await client.listAliases({\n collection_name: \"customized_setup_2\"\n})\n\nconsole.log(res.aliases)\n\n// Output\n// \n// [ 'bob' ]\n// \n","$ curl -X POST \"http://${MILVUS_URI}/v2/vectordb/aliases/alter\" \\\n-H \"Content-Type: application/json\" \\\n-d '{\n \"collectionName\": \"customized_setup_1\",\n \"aliasName\": \"alice\"\n}'\n\n# {\n# \"code\": 0,\n# \"data\": {}\n# }\n\n\n$ curl -X POST \"http://${MILVUS_URI}/v2/vectordb/aliases/list\" \\\n-H \"Content-Type: application/json\" \\\n-d '{\n \"collectionName\": \"customized_setup_1\"\n}'\n\n\n# {\n# \"code\": 0,\n# \"data\": [\n# \"alice\"\n# ]\n# }\n\n\n$ curl -X POST \"http://${MILVUS_URI}/v2/vectordb/aliases/list\" \\\n-H \"Content-Type: application/json\" \\\n-d '{\n \"collectionName\": \"customized_setup_2\"\n}'\n\n\n# {\n# \"code\": 0,\n# \"data\": [\n# \"bob\"\n# ]\n# }\n","# 9.5 Drop aliases\nclient.drop_alias(\n alias=\"bob\"\n)\n\nclient.drop_alias(\n alias=\"alice\"\n)\n","import io.milvus.v2.service.utility.request.DropAliasReq;\n\n// 9.5 Drop alias\nDropAliasReq dropAliasReq = DropAliasReq.builder()\n .alias(\"bob\")\n .build();\n\nclient.dropAlias(dropAliasReq);\n\ndropAliasReq = DropAliasReq.builder()\n .alias(\"alice\")\n .build();\n\nclient.dropAlias(dropAliasReq);\n","// 9.5 Drop aliases\nres = await client.dropAlias({\n alias: \"bob\"\n})\n\nconsole.log(res.error_code)\n\n// Output\n// \n// Success\n// \n\nres = await client.dropAlias({\n alias: \"alice\"\n})\n\nconsole.log(res.error_code)\n\n// Output\n// \n// Success\n// \n","$ curl -X POST \"http://${MILVUS_URI}/v2/vectordb/aliases/drop\" \\\n-H \"Content-Type: application/json\" \\\n-d '{\n \"aliasName\": \"bob\"\n}'\n\n# {\n# \"code\": 0,\n# \"data\": {}\n# }\n\n\n$ curl -X POST \"http://${MILVUS_URI}/v2/vectordb/aliases/drop\" \\\n-H \"Content-Type: application/json\" \\\n-d '{\n \"aliasName\": \"alice\"\n}'\n\n\n# {\n# \"code\": 0,\n# \"data\": {}\n# }\n","from pymilvus import Collection, connections\n\n# Connect to Milvus server\nconnections.connect(host=\"localhost\", port=\"19530\") # Change to your Milvus server IP and port\n\n# Get existing collection\ncollection = Collection(\"quick_setup\")\n\n# Set the TTL for the data in the collection\ncollection.set_properties(\n properties={\n \"collection.ttl.seconds\": 60\n }\n)\n","from pymilvus import Collection, connections\n\n# Connect to Milvus server\nconnections.connect(host=\"localhost\", port=\"19530\") # Change to your Milvus server IP and port\n\n# Get existing collection\ncollection = Collection(\"quick_setup\")\n\n# Before setting memory mapping property, we need to release the collection first.\ncollection.release()\n\n# Set memory mapping property to True or Flase\ncollection.set_properties(\n properties={\n \"mmap.enabled\": True\n }\n)\n","# 10. Drop the collections\nclient.drop_collection(\n collection_name=\"quick_setup\"\n)\n\nclient.drop_collection(\n collection_name=\"customized_setup_1\"\n)\n\nclient.drop_collection(\n collection_name=\"customized_setup_2\"\n)\n","import io.milvus.v2.service.collection.request.DropCollectionReq;\n\n// 10. Drop collections\n\nDropCollectionReq dropQuickSetupParam = DropCollectionReq.builder()\n .collectionName(\"quick_setup\")\n .build();\n\nclient.dropCollection(dropQuickSetupParam);\n\nDropCollectionReq dropCustomizedSetupParam = DropCollectionReq.builder()\n .collectionName(\"customized_setup_1\")\n .build();\n\nclient.dropCollection(dropCustomizedSetupParam);\n\ndropCustomizedSetupParam = DropCollectionReq.builder()\n .collectionName(\"customized_setup_2\")\n .build();\n\nclient.dropCollection(dropCustomizedSetupParam);\n","// 10. Drop the collection\nres = await client.dropCollection({\n collection_name: \"customized_setup_2\"\n})\n\nconsole.log(res.error_code)\n\n// Output\n// \n// Success\n// \n\nres = await client.dropCollection({\n collection_name: \"customized_setup_1\"\n})\n\nconsole.log(res.error_code)\n\n// Output\n// \n// Success\n// \n\nres = await client.dropCollection({\n collection_name: \"quick_setup\"\n})\n\nconsole.log(res.error_code)\n\n// Output\n// \n// Success\n// \n","$ curl -X POST \"http://${MILVUS_URI}/v2/vectordb/collections/drop\" \\\n-H \"Content-Type: application/json\" \\\n-d '{\n \"collectionName\": \"quick_setup\"\n}'\n\n# {\n# \"code\": 0,\n# \"data\": {}\n# }\n\n\n$ curl -X POST \"http://${MILVUS_URI}/v2/vectordb/collections/drop\" \\\n-H \"Content-Type: application/json\" \\\n-d '{\n \"collectionName\": \"customized_setup_1\"\n}'\n\n\n# {\n# \"code\": 0,\n# \"data\": {}\n# }\n\n\n$ curl -X POST \"http://${MILVUS_URI}/v2/vectordb/collections/drop\" \\\n-H \"Content-Type: application/json\" \\\n-d '{\n \"collectionName\": \"customized_setup_2\"\n}'\n\n\n# {\n# \"code\": 0,\n# \"data\": {}\n# }\n"],"headingContent":"Manage Collections","anchorList":[{"label":"コレクションの管理","href":"Manage-Collections","type":1,"isActive":false},{"label":"始める前に","href":"Before-you-start","type":2,"isActive":false},{"label":"概要","href":"Overview","type":2,"isActive":false},{"label":"コレクションの作成","href":"Create-Collection","type":2,"isActive":false},{"label":"コレクションの表示","href":"View-Collections","type":2,"isActive":false},{"label":"コレクションのロードとリリース","href":"Load--Release-Collection","type":2,"isActive":false},{"label":"エイリアスの設定","href":"Set-up-aliases","type":2,"isActive":false},{"label":"プロパティの設定","href":"Set-Properties","type":2,"isActive":false},{"label":"コレクションを放棄する","href":"Drop-a-Collection","type":2,"isActive":false}]} \ No newline at end of file diff --git a/localization/v2.4.x/site/ja/userGuide/manage-collections.md b/localization/v2.4.x/site/ja/userGuide/manage-collections.md index 85d873e3f..62d569c04 100644 --- a/localization/v2.4.x/site/ja/userGuide/manage-collections.md +++ b/localization/v2.4.x/site/ja/userGuide/manage-collections.md @@ -1,6 +1,6 @@ --- id: manage-collections.md -title: コレクション管理 +title: コレクションの管理 ---

      コレクションの管理

    • カスタマイズセットアップ

      -

      コレクションのほとんどすべてをIn Milvusに任せる代わりに、コレクションのスキーマと インデックス・パラメータを自分で決めることができる。詳細はカスタマイズセットアップを参照してください。

    • +

      In Milvusにコレクションのほとんどすべてを任せる代わりに、コレクションのスキーマと インデックスパラメータを自分で決めることができます。詳細はカスタマイズセットアップを参照してください。

    -

    クイックセットアップ

    AI業界における大きな飛躍を背景に、ほとんどの開発者はシンプルかつダイナミックなコレクションを必要としています。Milvusでは、3つの引数だけで、そのようなコレクションを素早くセットアップできます:

    +

    クイックセットアップ

    AI業界における大きな飛躍を背景に、ほとんどの開発者はシンプルかつダイナミックなコレクションを必要としています。Milvusでは、3つの引数を指定するだけで、そのようなコレクションを素早くセットアップできます:

    • 作成するコレクションの名前、

    • 挿入するベクトル埋め込み次元

    • @@ -225,7 +225,7 @@ $ curl -X POST "http://id
      vector を除くすべてのフィールドがダイナミック・フィールドとして扱われる。これらの追加フィールドは、$meta という特別なフィールド内にキーと値のペアとして保存される。この機能により、データ挿入時に追加フィールドを含めることができる。

    提供されたコードから自動的にインデックス付けされ、ロードされたコレクションは、すぐにデータ挿入の準備ができます。

    -

    カスタマイズされたセットアップ

    Milvusにコレクションのほとんどすべてを決定させる代わりに、あなた自身でコレクションのスキーマと インデックスパラメータを決定することができます。

    +

    カスタマイズされたセットアップ

    Milvusにコレクションのほとんど全てを決定させる代わりに、あなた自身でコレクションのスキーマと インデックスパラメータを決定することができます。

    ステップ1: スキーマの設定

    スキーマはコレクションの構造を定義します。スキーマ内では、enable_dynamic_field を有効または無効にし、定義済みフィールドを追加し、各フィールドに属性を設定するオプションがあります。スキーマの概念と使用可能なデータ型の詳細については、スキーマの説明を参照してください。

    スキーマを設定するには create_schema()を使用してスキーマ・オブジェクトを作成し add_field()を使用してスキーマにフィールドを追加する。

    @@ -360,7 +360,7 @@ schema.addField(AddFieldReq.builder() autoID -
    これをtrueに設定すると、プライマリフィールドが自動的にインクリメントされます。この場合、エラーを避けるため、プライマリ・フィールドは挿入するデータに含めるべきではありません。 +
    これをtrueに設定すると、プライマリフィールドが自動的にインクリメントされます。この場合、プライマリ・フィールドはエラーを避けるために挿入するデータに含めるべきではありません。 dimension @@ -523,7 +523,7 @@ indexParams.add(indexParamForVectorField); metric_type - ベクトル間の類似度を測定するために使用されるアルゴリズム。指定可能な値はIPL2COSINEJACCARDHAMMING。これは、指定されたフィールドがベクトル・フィールドである場合にのみ使用できる。詳細はMilvusでサポートされているインデックスを参照してください。 + ベクトル間の類似度を測定するために使用されるアルゴリズム。指定可能な値はIPL2COSINEJACCARDHAMMING。これは、指定されたフィールドがベクトル・フィールドである場合にのみ使用できる。詳しくはMilvusでサポートされているインデックスを参照してください。 params @@ -571,11 +571,11 @@ indexParams.add(indexParamForVectorField); index_type - 特定のフィールドにデータを配置するために使用されるアルゴリズム名。適用可能なアルゴリズムについては、インメモリ・インデックスと オンディスク・インデックスを参照のこと。 + 特定のフィールドにデータを配置するために使用されるアルゴリズムの名前。適用可能なアルゴリズムについては、インメモリ・インデックスと オンディスク・インデックスを参照のこと。 metric_type - ベクトル間の類似度を測定するために使用されるアルゴリズム。指定可能な値はIPL2COSINEJACCARDHAMMING。これは、指定されたフィールドがベクトル・フィールドである場合にのみ使用できる。詳細はMilvusでサポートされているインデックスを参照してください。 + ベクトル間の類似度を測定するために使用されるアルゴリズム。指定可能な値はIPL2COSINEJACCARDHAMMING。これは、指定されたフィールドがベクトル・フィールドである場合にのみ使用できる。詳しくはMilvusでサポートされているインデックスを参照してください。 params @@ -601,7 +601,7 @@ indexParams.add(indexParamForVectorField); metricType - ベクトル間の類似度の測定に使用するアルゴリズム。指定可能な値はIPL2COSINEJACCARDHAMMING です。これは、指定されたフィールドがベクトル・フィールドである場合にのみ使用できます。詳細については、Milvusでサポートされているインデックスを参照してください。 + ベクトル間の類似度の測定に使用するアルゴリズム。指定可能な値はIPL2COSINEJACCARDHAMMING です。これは、指定されたフィールドがベクトル・フィールドである場合にのみ使用できます。詳しくはMilvusでサポートされているインデックスを参照してください。 params @@ -881,7 +881,7 @@ $ curl -X POST "http://schema -このコレクションのスキーマ。
    これをNoneに設定すると、このコレクションはデフォルト設定で作成されます。
    カスタマイズしたスキーマでコレクションを設定するには、CollectionSchemaオブジェクトを作成し、それをここで参照する必要があります。この場合、Milvusはリクエストに含まれる他のスキーマ関連設定をすべて無視します。 +このコレクションのスキーマ。
    これをNoneに設定すると、このコレクションはデフォルト設定で作成されます。
    カスタマイズしたスキーマでコレクションを設定するには、CollectionSchemaオブジェクトを作成し、それをここで参照する必要があります。この場合、milvusはリクエストに含まれる他のスキーマ関連の設定をすべて無視します。 index_params @@ -903,11 +903,11 @@ $ curl -X POST "http://collectionSchema
    -
    これを空にすると、このコレクションはデフォルト設定で作成されます。カスタマイズしたスキーマでコレクションをセットアップするには、CollectionSchemaオブジェクトを作成し、ここでそれを参照する必要があります。 +
    これを空にすると、このコレクションはデフォルト設定で作成されます。カスタマイズしたスキーマでコレクションをセットアップするには、CollectionSchemaオブジェクトを作成し、それをここで参照する必要があります。 indexParams -このコレクション内のベクトル・フィールドにインデックスを構築するためのパラメータ。カスタマイズされたスキーマでコレクションをセットアップし、コレクションを自動的にメモリにロードするには、IndexParamオブジェクトのリストでIndexParamsオブジェクトを作成し、ここでそれを参照します。 +このコレクション内のベクトル・フィールドにインデックスを構築するためのパラメータ。カスタマイズされたスキーマでコレクションをセットアップし、コレクションを自動的にメモリにロードするには、IndexParamオブジェクトのリストでIndexParamsオブジェクトを作成し、ここを参照します。

    @@ -951,7 +951,7 @@ $ curl -X POST "http://schema.autoID -主フィールドの自動インクリメントを許可するかどうか。これをTrueに設定すると、プライマリ・フィールドは自動的にインクリメントされます。この場合、エラーを避けるためにプライマリ・フィールドを挿入するデータに含めるべきではありません。is_primaryをTrueに設定したフィールドにこのパラメータを設定します。 +主フィールドの自動インクリメントを許可するかどうか。これをTrueに設定すると、プライマリ・フィールドは自動的にインクリメントされます。この場合、エラーを避けるために、プライマリ・フィールドを挿入するデータに含めるべきではありません。is_primaryをTrueに設定したフィールドにこのパラメータを設定します。 schema.enableDynamicField @@ -1142,7 +1142,7 @@ $ curl -X POST "http://metric_type
    - ベクトル間の類似度を測定するために使用されるアルゴリズム。指定可能な値はIPL2COSINEJACCARDHAMMING。これは、指定されたフィールドがベクトル・フィールドである場合にのみ使用できる。詳細はMilvusでサポートされているインデックスを参照してください。 + ベクトル間の類似度を測定するために使用されるアルゴリズム。指定可能な値はIPL2COSINEJACCARDHAMMING。これは、指定されたフィールドがベクトル・フィールドである場合にのみ使用できる。詳しくはMilvusでサポートされているインデックスを参照してください。 params @@ -1180,7 +1180,7 @@ $ curl -X POST "http://indexParams.indexConfig.index_type - 作成するインデックスの種類。 + 作成するインデックスのタイプ。 indexParams.indexConfig.nlist @@ -1481,12 +1481,12 @@ System.out.println(listCollectionsRes.getCollectionNames()); d="M4 9h1v1H4c-1.5 0-3-1.69-3-3.5S2.55 3 4 3h4c1.45 0 3 1.69 3 3.5 0 1.41-.91 2.72-2 3.25V8.59c.58-.45 1-1.27 1-2.09C10 5.22 8.98 4 8 4H4c-.98 0-2 1.22-2 2.5S3 9 4 9zm9-3h-1v1h1c1 0 2 1.22 2 2.5S13.98 12 13 12H9c-.98 0-2-1.22-2-2.5 0-.83.42-1.64 1-2.09V6.25c-1.09.53-2 1.84-2 3.25C6 11.31 7.55 13 9 13h4c1.45 0 3-1.69 3-3.5S14.5 6 13 6z" > -

    コレクションのロード処理中、Milvusはコレクションのインデックスファイルをメモリにロードします。逆に、コレクションをリリースするとき、Milvusはメモリからインデックスファイルをアンロードします。コレクションで検索を行う前に、コレクションがロードされていることを確認してください。

    +

    コレクションをロードする際、Milvusはコレクションのインデックスファイルをメモリにロードします。逆に、コレクションをリリースする際、Milvusはインデックスファイルをメモリからアンロードします。コレクションで検索を行う前に、コレクションがロードされていることを確認してください。

    コレクションのロード

    コレクションをロードするには load_collection()メソッドを使用し、コレクション名を指定します。また、replica_number を設定して、コレクションのロード時にクエリノード上に作成するデータセグメントのメモリ内レプリカの数を決定することもできます。

      -
    • Milvus Standalone:replica_number の最大許容値は 1 です。
    • -
    • Milvusクラスタ:最大値はMilvus構成で設定されたqueryNode.replicas を超えないようにしてください。詳細については、クエリ・ノード関連設定を参照してください。
    • +
    • Milvus Standaloneの場合:replica_number の最大許容値は 1 です。
    • +
    • Milvus Cluster:最大値はMilvus構成で設定されたqueryNode.replicas を超えないようにしてください。詳細については、クエリ・ノード関連設定を参照してください。
    @@ -1593,7 +1593,34 @@ $ curl -X POST "http://# } # }
    -

    コレクションをリリースする

    +

    コレクションを部分的に読み込む(パブリックプレビュー)

    +

    この機能は現在パブリックプレビュー中です。APIと機能は将来変更される可能性があります。

    +
    +

    ロード要求を受け取ると、Milvusは全てのベクトルフィールドインデックスと全てのスカラーフィールドデータをメモリにロードします。検索やクエリに関与しないフィールドがある場合、それらをロードから除外してメモリ使用量を削減し、検索パフォーマンスを向上させることができます。

    +
    +
    # 7. Load the collection
    +client.load_collection(
    +    collection_name="customized_setup_2",
    +    load_fields=["my_id", "my_vector"] # Load only the specified fields
    +    skip_load_dynamic_field=True # Skip loading the dynamic field
    +)
    +
    +res = client.get_load_state(
    +    collection_name="customized_setup_2"
    +)
    +
    +print(res)
    +
    +# Output
    +#
    +# {
    +#     "state": "<LoadState: Loaded>"
    +# }
    +
    +

    検索やクエリでフィルタリング条件や出力フィールドとして使用できるのは、load_fields にリストされたフィールドだけであることに注意してください。リストには常に主キーを含める必要があります。ロードから除外されたフィールド名はフィルタリングや出力に使用できません。

    +

    skip_load_dynamic_field=True 、ダイナミック・フィールドのロードをスキップすることができます。Milvusはダイナミックフィールドを1つのフィールドとして扱うため、ダイナミックフィールド内のすべてのキーが一緒にインクルードまたは除外されます。

    +
    +

    コレクションの解放

    コレクションを解放するには release_collection()メソッドを使用します。

    @@ -2323,7 +2350,7 @@ collection.set_properties( } )
    -

    MMAP の設定

    これは、クエリのパフォーマンスを向上させるために、データをメモリにマッピングするかどうかを決定します。詳細は、メモリ・マッピングの構成 を参照してください

    +

    MMAP の設定

    これは、クエリのパフォーマンスを向上させるために、データをメモリにマッピングするかどうかを決定します。詳細は、メモリ・マッピングの構成 を参照してください。

    MMAP プロパティを設定する前に、まずコレクションを解放します。そうしないと、エラーが発生します。

    diff --git a/localization/v2.4.x/site/ja/userGuide/manage-indexes/index-vector-fields.json b/localization/v2.4.x/site/ja/userGuide/manage-indexes/index-vector-fields.json index c578ffe79..79b563660 100644 --- a/localization/v2.4.x/site/ja/userGuide/manage-indexes/index-vector-fields.json +++ b/localization/v2.4.x/site/ja/userGuide/manage-indexes/index-vector-fields.json @@ -1 +1 @@ -{"codeList":["from pymilvus import MilvusClient, DataType\n\n# 1. Set up a Milvus client\nclient = MilvusClient(\n uri=\"http://localhost:19530\"\n)\n\n# 2. Create schema\n# 2.1. Create schema\nschema = MilvusClient.create_schema(\n auto_id=False,\n enable_dynamic_field=True,\n)\n\n# 2.2. Add fields to schema\nschema.add_field(field_name=\"id\", datatype=DataType.INT64, is_primary=True)\nschema.add_field(field_name=\"vector\", datatype=DataType.FLOAT_VECTOR, dim=5)\n\n# 3. Create collection\nclient.create_collection(\n collection_name=\"customized_setup\", \n schema=schema, \n)\n","import io.milvus.v2.client.ConnectConfig;\nimport io.milvus.v2.client.MilvusClientV2;\nimport io.milvus.v2.common.DataType;\nimport io.milvus.v2.service.collection.request.CreateCollectionReq;\n\nString CLUSTER_ENDPOINT = \"http://localhost:19530\";\n\n// 1. Connect to Milvus server\nConnectConfig connectConfig = ConnectConfig.builder()\n .uri(CLUSTER_ENDPOINT)\n .build();\n\nMilvusClientV2 client = new MilvusClientV2(connectConfig);\n\n// 2. Create a collection\n\n// 2.1 Create schema\nCreateCollectionReq.CollectionSchema schema = client.createSchema();\n\n// 2.2 Add fields to schema\nschema.addField(AddFieldReq.builder().fieldName(\"id\").dataType(DataType.Int64).isPrimaryKey(true).autoID(false).build());\nschema.addField(AddFieldReq.builder().fieldName(\"vector\").dataType(DataType.FloatVector).dimension(5).build());\n\n// 3 Create a collection without schema and index parameters\nCreateCollectionReq customizedSetupReq = CreateCollectionReq.builder()\n.collectionName(\"customized_setup\")\n.collectionSchema(schema)\n.build();\n\nclient.createCollection(customizedSetupReq);\n","// 1. Set up a Milvus Client\nclient = new MilvusClient({address, token});\n\n// 2. Define fields for the collection\nconst fields = [\n {\n name: \"id\",\n data_type: DataType.Int64,\n is_primary_key: true,\n autoID: false\n },\n {\n name: \"vector\",\n data_type: DataType.FloatVector,\n dim: 5\n },\n]\n\n// 3. Create a collection\nres = await client.createCollection({\n collection_name: \"customized_setup\",\n fields: fields,\n})\n\nconsole.log(res.error_code) \n\n// Output\n// \n// Success\n// \n","# 4.1. Set up the index parameters\nindex_params = MilvusClient.prepare_index_params()\n\n# 4.2. Add an index on the vector field.\nindex_params.add_index(\n field_name=\"vector\",\n metric_type=\"COSINE\",\n index_type=\"IVF_FLAT\",\n index_name=\"vector_index\",\n params={ \"nlist\": 128 }\n)\n\n# 4.3. Create an index file\nclient.create_index(\n collection_name=\"customized_setup\",\n index_params=index_params\n)\n","import io.milvus.v2.common.IndexParam;\nimport io.milvus.v2.service.index.request.CreateIndexReq;\n\n// 4 Prepare index parameters\n\n// 4.2 Add an index for the vector field \"vector\"\nIndexParam indexParamForVectorField = IndexParam.builder()\n .fieldName(\"vector\")\n .indexName(\"vector_index\")\n .indexType(IndexParam.IndexType.IVF_FLAT)\n .metricType(IndexParam.MetricType.COSINE)\n .extraParams(Map.of(\"nlist\", 128))\n .build();\n\nList indexParams = new ArrayList<>();\nindexParams.add(indexParamForVectorField);\n\n// 4.3 Crate an index file\nCreateIndexReq createIndexReq = CreateIndexReq.builder()\n .collectionName(\"customized_setup\")\n .indexParams(indexParams)\n .build();\n\nclient.createIndex(createIndexReq);\n","// 4. Set up index for the collection\n// 4.1. Set up the index parameters\nres = await client.createIndex({\n collection_name: \"customized_setup\",\n field_name: \"vector\",\n index_type: \"AUTOINDEX\",\n metric_type: \"COSINE\", \n index_name: \"vector_index\",\n params: { \"nlist\": 128 }\n})\n\nconsole.log(res.error_code)\n\n// Output\n// \n// Success\n// \n","# 5. Describe index\nres = client.list_indexes(\n collection_name=\"customized_setup\"\n)\n\nprint(res)\n\n# Output\n#\n# [\n# \"vector_index\",\n# ]\n\nres = client.describe_index(\n collection_name=\"customized_setup\",\n index_name=\"vector_index\"\n)\n\nprint(res)\n\n# Output\n#\n# {\n# \"index_type\": ,\n# \"metric_type\": \"COSINE\",\n# \"field_name\": \"vector\",\n# \"index_name\": \"vector_index\"\n# }\n","import io.milvus.v2.service.index.request.DescribeIndexReq;\nimport io.milvus.v2.service.index.response.DescribeIndexResp;\n\n// 5. Describe index\n// 5.1 List the index names\nListIndexesReq listIndexesReq = ListIndexesReq.builder()\n .collectionName(\"customized_setup\")\n .build();\n\nList indexNames = client.listIndexes(listIndexesReq);\n\nSystem.out.println(indexNames);\n\n// Output:\n// [\n// \"vector_index\"\n// ]\n\n// 5.2 Describe an index\nDescribeIndexReq describeIndexReq = DescribeIndexReq.builder()\n .collectionName(\"customized_setup\")\n .indexName(\"vector_index\")\n .build();\n\nDescribeIndexResp describeIndexResp = client.describeIndex(describeIndexReq);\n\nSystem.out.println(JSONObject.toJSON(describeIndexResp));\n\n// Output:\n// {\n// \"metricType\": \"COSINE\",\n// \"indexType\": \"AUTOINDEX\",\n// \"fieldName\": \"vector\",\n// \"indexName\": \"vector_index\"\n// }\n","// 5. Describe the index\nres = await client.describeIndex({\n collection_name: \"customized_setup\",\n index_name: \"vector_index\"\n})\n\nconsole.log(JSON.stringify(res.index_descriptions, null, 2))\n\n// Output\n// \n// [\n// {\n// \"params\": [\n// {\n// \"key\": \"index_type\",\n// \"value\": \"AUTOINDEX\"\n// },\n// {\n// \"key\": \"metric_type\",\n// \"value\": \"COSINE\"\n// }\n// ],\n// \"index_name\": \"vector_index\",\n// \"indexID\": \"449007919953063141\",\n// \"field_name\": \"vector\",\n// \"indexed_rows\": \"0\",\n// \"total_rows\": \"0\",\n// \"state\": \"Finished\",\n// \"index_state_fail_reason\": \"\",\n// \"pending_index_rows\": \"0\"\n// }\n// ]\n// \n","# 6. Drop index\nclient.drop_index(\n collection_name=\"customized_setup\",\n index_name=\"vector_index\"\n)\n","// 6. Drop index\n\nDropIndexReq dropIndexReq = DropIndexReq.builder()\n .collectionName(\"customized_setup\")\n .indexName(\"vector_index\")\n .build();\n\nclient.dropIndex(dropIndexReq);\n","// 6. Drop the index\nres = await client.dropIndex({\n collection_name: \"customized_setup\",\n index_name: \"vector_index\"\n})\n\nconsole.log(res.error_code)\n\n// Output\n// \n// Success\n// \n"],"headingContent":"","anchorList":[{"label":"ベクトルフィールドのインデックス","href":"Index-Vector-Fields","type":1,"isActive":false},{"label":"概要","href":"Overview","type":2,"isActive":false},{"label":"準備","href":"Preparations","type":2,"isActive":false},{"label":"コレクションのインデックス","href":"Index-a-Collection","type":2,"isActive":false},{"label":"インデックスの詳細の確認","href":"Check-Index-Details","type":2,"isActive":false},{"label":"インデックスの削除","href":"Drop-an-Index","type":2,"isActive":false}]} \ No newline at end of file +{"codeList":["from pymilvus import MilvusClient, DataType\n\n# 1. Set up a Milvus client\nclient = MilvusClient(\n uri=\"http://localhost:19530\"\n)\n\n# 2. Create schema\n# 2.1. Create schema\nschema = MilvusClient.create_schema(\n auto_id=False,\n enable_dynamic_field=True,\n)\n\n# 2.2. Add fields to schema\nschema.add_field(field_name=\"id\", datatype=DataType.INT64, is_primary=True)\nschema.add_field(field_name=\"vector\", datatype=DataType.FLOAT_VECTOR, dim=5)\n\n# 3. Create collection\nclient.create_collection(\n collection_name=\"customized_setup\", \n schema=schema, \n)\n","import io.milvus.v2.client.ConnectConfig;\nimport io.milvus.v2.client.MilvusClientV2;\nimport io.milvus.v2.common.DataType;\nimport io.milvus.v2.service.collection.request.CreateCollectionReq;\n\nString CLUSTER_ENDPOINT = \"http://localhost:19530\";\n\n// 1. Connect to Milvus server\nConnectConfig connectConfig = ConnectConfig.builder()\n .uri(CLUSTER_ENDPOINT)\n .build();\n\nMilvusClientV2 client = new MilvusClientV2(connectConfig);\n\n// 2. Create a collection\n\n// 2.1 Create schema\nCreateCollectionReq.CollectionSchema schema = client.createSchema();\n\n// 2.2 Add fields to schema\nschema.addField(AddFieldReq.builder().fieldName(\"id\").dataType(DataType.Int64).isPrimaryKey(true).autoID(false).build());\nschema.addField(AddFieldReq.builder().fieldName(\"vector\").dataType(DataType.FloatVector).dimension(5).build());\n\n// 3 Create a collection without schema and index parameters\nCreateCollectionReq customizedSetupReq = CreateCollectionReq.builder()\n.collectionName(\"customized_setup\")\n.collectionSchema(schema)\n.build();\n\nclient.createCollection(customizedSetupReq);\n","// 1. Set up a Milvus Client\nclient = new MilvusClient({address, token});\n\n// 2. Define fields for the collection\nconst fields = [\n {\n name: \"id\",\n data_type: DataType.Int64,\n is_primary_key: true,\n autoID: false\n },\n {\n name: \"vector\",\n data_type: DataType.FloatVector,\n dim: 5\n },\n]\n\n// 3. Create a collection\nres = await client.createCollection({\n collection_name: \"customized_setup\",\n fields: fields,\n})\n\nconsole.log(res.error_code) \n\n// Output\n// \n// Success\n// \n","# 4.1. Set up the index parameters\nindex_params = MilvusClient.prepare_index_params()\n\n# 4.2. Add an index on the vector field.\nindex_params.add_index(\n field_name=\"vector\",\n metric_type=\"COSINE\",\n index_type=\"IVF_FLAT\",\n index_name=\"vector_index\",\n params={ \"nlist\": 128 }\n)\n\n# 4.3. Create an index file\nclient.create_index(\n collection_name=\"customized_setup\",\n index_params=index_params,\n sync=False # Whether to wait for index creation to complete before returning. Defaults to True.\n)\n","import io.milvus.v2.common.IndexParam;\nimport io.milvus.v2.service.index.request.CreateIndexReq;\n\n// 4 Prepare index parameters\n\n// 4.2 Add an index for the vector field \"vector\"\nIndexParam indexParamForVectorField = IndexParam.builder()\n .fieldName(\"vector\")\n .indexName(\"vector_index\")\n .indexType(IndexParam.IndexType.IVF_FLAT)\n .metricType(IndexParam.MetricType.COSINE)\n .extraParams(Map.of(\"nlist\", 128))\n .build();\n\nList indexParams = new ArrayList<>();\nindexParams.add(indexParamForVectorField);\n\n// 4.3 Crate an index file\nCreateIndexReq createIndexReq = CreateIndexReq.builder()\n .collectionName(\"customized_setup\")\n .indexParams(indexParams)\n .build();\n\nclient.createIndex(createIndexReq);\n","// 4. Set up index for the collection\n// 4.1. Set up the index parameters\nres = await client.createIndex({\n collection_name: \"customized_setup\",\n field_name: \"vector\",\n index_type: \"AUTOINDEX\",\n metric_type: \"COSINE\", \n index_name: \"vector_index\",\n params: { \"nlist\": 128 }\n})\n\nconsole.log(res.error_code)\n\n// Output\n// \n// Success\n// \n","# 5. Describe index\nres = client.list_indexes(\n collection_name=\"customized_setup\"\n)\n\nprint(res)\n\n# Output\n#\n# [\n# \"vector_index\",\n# ]\n\nres = client.describe_index(\n collection_name=\"customized_setup\",\n index_name=\"vector_index\"\n)\n\nprint(res)\n\n# Output\n#\n# {\n# \"index_type\": ,\n# \"metric_type\": \"COSINE\",\n# \"field_name\": \"vector\",\n# \"index_name\": \"vector_index\"\n# }\n","import io.milvus.v2.service.index.request.DescribeIndexReq;\nimport io.milvus.v2.service.index.response.DescribeIndexResp;\n\n// 5. Describe index\n// 5.1 List the index names\nListIndexesReq listIndexesReq = ListIndexesReq.builder()\n .collectionName(\"customized_setup\")\n .build();\n\nList indexNames = client.listIndexes(listIndexesReq);\n\nSystem.out.println(indexNames);\n\n// Output:\n// [\n// \"vector_index\"\n// ]\n\n// 5.2 Describe an index\nDescribeIndexReq describeIndexReq = DescribeIndexReq.builder()\n .collectionName(\"customized_setup\")\n .indexName(\"vector_index\")\n .build();\n\nDescribeIndexResp describeIndexResp = client.describeIndex(describeIndexReq);\n\nSystem.out.println(JSONObject.toJSON(describeIndexResp));\n\n// Output:\n// {\n// \"metricType\": \"COSINE\",\n// \"indexType\": \"AUTOINDEX\",\n// \"fieldName\": \"vector\",\n// \"indexName\": \"vector_index\"\n// }\n","// 5. Describe the index\nres = await client.describeIndex({\n collection_name: \"customized_setup\",\n index_name: \"vector_index\"\n})\n\nconsole.log(JSON.stringify(res.index_descriptions, null, 2))\n\n// Output\n// \n// [\n// {\n// \"params\": [\n// {\n// \"key\": \"index_type\",\n// \"value\": \"AUTOINDEX\"\n// },\n// {\n// \"key\": \"metric_type\",\n// \"value\": \"COSINE\"\n// }\n// ],\n// \"index_name\": \"vector_index\",\n// \"indexID\": \"449007919953063141\",\n// \"field_name\": \"vector\",\n// \"indexed_rows\": \"0\",\n// \"total_rows\": \"0\",\n// \"state\": \"Finished\",\n// \"index_state_fail_reason\": \"\",\n// \"pending_index_rows\": \"0\"\n// }\n// ]\n// \n","# 6. Drop index\nclient.drop_index(\n collection_name=\"customized_setup\",\n index_name=\"vector_index\"\n)\n","// 6. Drop index\n\nDropIndexReq dropIndexReq = DropIndexReq.builder()\n .collectionName(\"customized_setup\")\n .indexName(\"vector_index\")\n .build();\n\nclient.dropIndex(dropIndexReq);\n","// 6. Drop the index\nres = await client.dropIndex({\n collection_name: \"customized_setup\",\n index_name: \"vector_index\"\n})\n\nconsole.log(res.error_code)\n\n// Output\n// \n// Success\n// \n"],"headingContent":"Index Vector Fields","anchorList":[{"label":"ベクトルフィールドのインデックス","href":"Index-Vector-Fields","type":1,"isActive":false},{"label":"概要","href":"Overview","type":2,"isActive":false},{"label":"準備","href":"Preparations","type":2,"isActive":false},{"label":"コレクションのインデックス","href":"Index-a-Collection","type":2,"isActive":false},{"label":"インデックスの詳細の確認","href":"Check-Index-Details","type":2,"isActive":false},{"label":"インデックスの削除","href":"Drop-an-Index","type":2,"isActive":false}]} \ No newline at end of file diff --git a/localization/v2.4.x/site/ja/userGuide/manage-indexes/index-vector-fields.md b/localization/v2.4.x/site/ja/userGuide/manage-indexes/index-vector-fields.md index a37d4843e..d324ea5f6 100644 --- a/localization/v2.4.x/site/ja/userGuide/manage-indexes/index-vector-fields.md +++ b/localization/v2.4.x/site/ja/userGuide/manage-indexes/index-vector-fields.md @@ -2,7 +2,7 @@ id: index-vector-fields.md order: 1 summary: このガイドでは、コレクション内のベクトル・フィールドに対するインデックスの作成と管理に関する基本的な操作について説明します。 -title: インデックス・ベクトル・フィールド +title: ベクトルフィールドのインデックス ---

    ベクトルフィールドのインデックス

    インデックスファイルに格納されたメタデータを活用することで、Milvusはデータを特殊な構造で整理し、検索やクエリ時に要求された情報の迅速な取得を容易にします。

    +

    Milvusはインデックスファイルに保存されたメタデータを活用し、データを特殊な構造で整理することで、検索やクエリ時に要求された情報を迅速に取り出すことができます。

    Milvusは効率的な類似検索のために、いくつかのインデックスタイプとフィールド値をソートするメトリックを提供します。以下の表は、さまざまなベクトル・フィールド・タイプでサポートされているインデックス・タイプとメトリクスの一覧です。詳細については、「メモリ内インデックスと 類似度メトリクス」を参照してください。

    @@ -103,20 +103,20 @@ title: インデックス・ベクトル・フィールド d="M4 9h1v1H4c-1.5 0-3-1.69-3-3.5S2.55 3 4 3h4c1.45 0 3 1.69 3 3.5 0 1.41-.91 2.72-2 3.25V8.59c.58-.45 1-1.27 1-2.09C10 5.22 8.98 4 8 4H4c-.98 0-2 1.22-2 2.5S3 9 4 9zm9-3h-1v1h1c1 0 2 1.22 2 2.5S13.98 12 13 12H9c-.98 0-2-1.22-2-2.5 0-.83.42-1.64 1-2.09V6.25c-1.09.53-2 1.84-2 3.25C6 11.31 7.55 13 9 13h4c1.45 0 3-1.69 3-3.5S14.5 6 13 6z" > -

    Manage Collectionsで説明したように、Milvusはコレクション作成要求で以下の条件のいずれかが指定された場合、コレクション作成時に自動的にインデックスを生成し、メモリにロードします:

    +

    Manageコレクションで説明したように、Milvusはコレクション作成リクエストで以下の条件のいずれかが指定された場合、コレクション作成時に自動的にインデックスを生成し、メモリにロードします:

    • ベクトルフィールドの次元とメトリックタイプ、または

    • スキーマとインデックス・パラメータ。

    -

    以下のコードスニペットは、既存のコードを再利用して、Milvusインスタンスへの接続を確立し、インデックスパラメータを指定せずにコレクションを作成します。この場合、コレクションにはインデックスがなく、アンロードされたままになります。

    +

    以下のコードスニペットは、Milvusインスタンスへの接続を確立し、インデックスパラメータを指定せずにコレクションを作成するために、既存のコードを再利用しています。この場合、コレクションにはインデックスがなく、アンロードされたままになります。

    インデックス作成の準備のために MilvusClientを使用してMilvusサーバに接続し、コレクションをセットアップします。 create_schema(), add_field()および create_collection().

    -

    インデックス作成の準備には MilvusClientV2を使用してMilvusサーバーに接続し、コレクションをセットアップする。 createSchema(), addField()および createCollection().

    +

    インデックス作成の準備には MilvusClientV2を使ってMilvusサーバーに接続し、コレクションを設定する。 createSchema(), addField()および createCollection().

    -

    インデックス作成の準備には MilvusClientを使用してMilvusサーバーに接続し、コレクションをセットアップする。 createCollection().

    +

    インデックス作成の準備には MilvusClientを使ってMilvusサーバーに接続し、コレクションをセットアップする。 createCollection().

    @@ -247,7 +247,8 @@ index_params.add_index( # 4.3. Create an index file client.create_index( collection_name="customized_setup", - index_params=index_params + index_params=index_params, + sync=False # Whether to wait for index creation to complete before returning. Defaults to True. )
    import io.milvus.v2.common.IndexParam;
    @@ -329,6 +330,10 @@ res = await client.index_params
           IndexParamオブジェクトのリストを含むIndexParamsオブジェクト。
         
    +    
    +      sync
    +      クライアントの要求に関連するインデックスの構築方法を制御します。有効な値:
    • True (既定値):クライアントは、インデックスが完全に構築されるまで待ってから返します。これは、処理が完了するまでレスポンスを受け取らないことを意味します。
    • False:クライアントはリクエストを受け取った直後に戻り、バックグラウンドでインデックスが作成されます。インデックスの作成が完了したかどうかを調べるにはdescribe_index()メソッドを使用します。
    + @@ -341,11 +346,11 @@ res = await client.fieldName - + - + diff --git a/localization/v2.4.x/site/ja/userGuide/manage-partitions.json b/localization/v2.4.x/site/ja/userGuide/manage-partitions.json index 081f3c40c..92c21d965 100644 --- a/localization/v2.4.x/site/ja/userGuide/manage-partitions.json +++ b/localization/v2.4.x/site/ja/userGuide/manage-partitions.json @@ -1 +1 @@ -{"codeList":["from pymilvus import MilvusClient, DataType\n\n# 1. Set up a Milvus client\nclient = MilvusClient(\n uri=\"http://localhost:19530\"\n)\n\n# 2. Create a collection\nclient.create_collection(\n collection_name=\"quick_setup\",\n dimension=5,\n)\n\n","import io.milvus.v2.client.ConnectConfig;\nimport io.milvus.v2.client.MilvusClientV2;\nimport io.milvus.v2.service.collection.request.CreateCollectionReq;\n\nString CLUSTER_ENDPOINT = \"http://localhost:19530\";\n\n// 1. Connect to Milvus server\nConnectConfig connectConfig = ConnectConfig.builder()\n .uri(CLUSTER_ENDPOINT)\n .build();\n\nMilvusClientV2 client = new MilvusClientV2(connectConfig);\n\n// 2. Create a collection in quick setup mode\nCreateCollectionReq quickSetupReq = CreateCollectionReq.builder()\n .collectionName(\"quick_setup\")\n .dimension(5)\n .build();\n\nclient.createCollection(quickSetupReq);\n","const address = \"http://localhost:19530\"\n\n// 1. Set up a Milvus Client\nclient = new MilvusClient({address});\n\n// 2. Create a collection in quick setup mode\nawait client.createCollection({\n collection_name: \"quick_setup\",\n dimension: 5,\n}); \n","# 3. List partitions\nres = client.list_partitions(collection_name=\"quick_setup\")\nprint(res)\n\n# Output\n#\n# [\"_default\"]\n","import io.milvus.v2.service.partition.request.ListPartitionsReq;\n\n// 3. List all partitions in the collection\nListPartitionsReq listPartitionsReq = ListPartitionsReq.builder()\n .collectionName(\"quick_setup\")\n .build();\n\nList partitionNames = client.listPartitions(listPartitionsReq);\n\nSystem.out.println(partitionNames);\n\n// Output:\n// [\"_default\"]\n","// 3. List partitions\nres = await client.listPartitions({\n collection_name: \"quick_setup\"\n})\n\nconsole.log(res.partition_names)\n\n// Output\n// \n// [ '_default' ]\n// \n","# 4. Create more partitions\nclient.create_partition(\n collection_name=\"quick_setup\",\n partition_name=\"partitionA\"\n)\n\nclient.create_partition(\n collection_name=\"quick_setup\",\n partition_name=\"partitionB\"\n)\n\nres = client.list_partitions(collection_name=\"quick_setup\")\nprint(res)\n\n# Output\n#\n# [\"_default\", \"partitionA\", \"partitionB\"]\n","import io.milvus.v2.service.partition.request.CreatePartitionReq;\n\n// 4. Create more partitions\nCreatePartitionReq createPartitionReq = CreatePartitionReq.builder()\n .collectionName(\"quick_setup\")\n .partitionName(\"partitionA\")\n .build();\n\nclient.createPartition(createPartitionReq);\n\ncreatePartitionReq = CreatePartitionReq.builder()\n .collectionName(\"quick_setup\")\n .partitionName(\"partitionB\")\n .build();\n\nclient.createPartition(createPartitionReq);\n\nlistPartitionsReq = ListPartitionsReq.builder()\n .collectionName(\"quick_setup\")\n .build();\n\npartitionNames = client.listPartitions(listPartitionsReq);\n\nSystem.out.println(partitionNames);\n\n// Output:\n// [\n// \"_default\",\n// \"partitionA\",\n// \"partitionB\"\n// ]\n","// 4. Create more partitions\nawait client.createPartition({\n collection_name: \"quick_setup\",\n partition_name: \"partitionA\"\n})\n\nawait client.createPartition({\n collection_name: \"quick_setup\",\n partition_name: \"partitionB\"\n})\n\nres = await client.listPartitions({\n collection_name: \"quick_setup\"\n})\n\nconsole.log(res.partition_names)\n\n// Output\n// \n// [ '_default', 'partitionA', 'partitionB' ]\n// \n","# 5. Check whether a partition exists\nres = client.has_partition(\n collection_name=\"quick_setup\",\n partition_name=\"partitionA\"\n)\nprint(res)\n\n# Output\n#\n# True\n\nres = client.has_partition(\n collection_name=\"quick_setup\",\n partition_name=\"partitionC\"\n)\nprint(res)\n\n# Output\n#\n# False\n","import io.milvus.v2.service.partition.request.HasPartitionReq;\n\n// 5. Check whether a partition exists\nHasPartitionReq hasPartitionReq = HasPartitionReq.builder()\n .collectionName(\"quick_setup\")\n .partitionName(\"partitionA\")\n .build();\n\nboolean exists = client.hasPartition(hasPartitionReq);\n\nSystem.out.println(exists);\n\n// Output:\n// true\n\nhasPartitionReq = HasPartitionReq.builder()\n .collectionName(\"quick_setup\")\n .partitionName(\"partitionC\")\n .build();\n\nexists = client.hasPartition(hasPartitionReq);\n\nSystem.out.println(exists);\n\n// Output:\n// false\n","// 5. Check whether a partition exists\nres = await client.hasPartition({\n collection_name: \"quick_setup\",\n partition_name: \"partitionA\"\n})\n\nconsole.log(res.value)\n\n// Output\n// \n// true\n// \n\nres = await client.hasPartition({\n collection_name: \"quick_setup\",\n partition_name: \"partitionC\"\n})\n\nconsole.log(res.value)\n\n// Output\n// \n// false\n// \n","# Release the collection\nclient.release_collection(collection_name=\"quick_setup\")\n\n# Check the load status\nres = client.get_load_state(collection_name=\"quick_setup\")\nprint(res)\n\n# Output\n#\n# {\n# \"state\": \"\"\n# }\n\nres = client.get_load_state(\n collection_name=\"quick_setup\", \n partition_name=\"partitionA\"\n)\n\nprint(res)\n\n# Output\n#\n# {\n# \"state\": \"\"\n# }\n\nres = client.get_load_state(\n collection_name=\"quick_setup\", \n partition_name=\"partitionB\"\n)\n\nprint(res)\n\n# Output\n#\n# {\n# \"state\": \"\"\n# }\n\n","import io.milvus.v2.service.collection.request.GetLoadStateReq;\nimport io.milvus.v2.service.collection.request.ReleaseCollectionReq;\nimport io.milvus.v2.service.partition.request.LoadPartitionsReq;\nimport io.milvus.v2.service.partition.request.ReleasePartitionsReq;\n\n// 6. Load a partition independantly\n// 6.1 Release the collection\nReleaseCollectionReq releaseCollectionReq = ReleaseCollectionReq.builder()\n .collectionName(\"quick_setup\")\n .build();\n\nclient.releaseCollection(releaseCollectionReq);\n\n// 6.2 Load partitionA\nLoadPartitionsReq loadPartitionsReq = LoadPartitionsReq.builder()\n .collectionName(\"quick_setup\")\n .partitionNames(List.of(\"partitionA\"))\n .build();\n\nclient.loadPartitions(loadPartitionsReq);\n\nThread.sleep(3000);\n\n// 6.3 Check the load status of the collection and its partitions\nGetLoadStateReq getLoadStateReq = GetLoadStateReq.builder()\n .collectionName(\"quick_setup\")\n .build();\n\nboolean state = client.getLoadState(getLoadStateReq);\n\nSystem.out.println(state);\n\n// Output:\n// true\n\ngetLoadStateReq = GetLoadStateReq.builder()\n .collectionName(\"quick_setup\")\n .partitionName(\"partitionA\")\n .build();\n\nstate = client.getLoadState(getLoadStateReq);\n\nSystem.out.println(state);\n\n// Output:\n// true\n\ngetLoadStateReq = GetLoadStateReq.builder()\n .collectionName(\"quick_setup\")\n .partitionName(\"partitionB\")\n .build();\n\nstate = client.getLoadState(getLoadStateReq);\n\nSystem.out.println(state);\n\n// Output:\n// false\n","// 6. Load a partition indenpendantly\nawait client.releaseCollection({\n collection_name: \"quick_setup\"\n})\n\nres = await client.getLoadState({\n collection_name: \"quick_setup\"\n})\n\nconsole.log(res.state)\n\n// Output\n// \n// LoadStateNotLoad\n// \n\nawait client.loadPartitions({\n collection_name: \"quick_setup\",\n partition_names: [\"partitionA\"]\n})\n\nawait sleep(3000)\n\nres = await client.getLoadState({\n collection_name: \"quick_setup\"\n})\n\nconsole.log(res.state)\n\n// Output\n// \n// LoadStateLoaded\n// \n\nres = await client.getLoadState({\n collection_name: \"quick_setup\",\n partition_name: \"partitionA\"\n})\n\nconsole.log(res.state)\n\n// Output\n// \n// LoadStateLoaded\n// \n\nres = await client.getLoadState({\n collection_name: \"quick_setup\",\n partition_name: \"partitionB\"\n})\n\nconsole.log(res.state)\n\n// Output\n// \n// LoadStateLoaded\n// \n","client.load_partitions(\n collection_name=\"quick_setup\",\n partition_names=[\"partitionA\"]\n)\n\nres = client.get_load_state(collection_name=\"quick_setup\")\nprint(res)\n\n# Output\n#\n# {\n# \"state\": \"\"\n# }\n","LoadPartitionsReq loadPartitionsReq = LoadPartitionsReq.builder()\n .collectionName(\"quick_setup\")\n .partitionNames(List.of(\"partitionA\"))\n .build();\n\nclient.loadPartitions(loadPartitionsReq);\n\ngetLoadStateReq = GetLoadStateReq.builder()\n .collectionName(\"quick_setup\")\n .partitionName(\"partitionA\")\n .build();\n\nstate = client.getLoadState(getLoadStateReq);\n\nSystem.out.println(state);\n\n// Output:\n// true\n","await client.loadPartitions({\n collection_name: \"quick_setup\",\n partition_names: [\"partitionA\"]\n})\n\nres = await client.getLoadState({\n collection_name: \"quick_setup\",\n partition_name: \"partitionA\"\n})\n\nconsole.log(res.state)\n\n// Output\n// \n// LoadStateLoaded\n//\n","client.load_partitions(\n collection_name=\"quick_setup\",\n partition_names=[\"partitionA\", \"partitionB\"]\n)\n\nres = client.get_load_status(\n collection_name=\"quick_setup\",\n partition_name=\"partitionA\"\n)\n\n# Output\n#\n# {\n# \"state\": \"\"\n# }\n\nres = client.get_load_status(\n collection_name=\"quick_setup\",\n partition_name=\"partitionB\"\n)\n\n# Output\n#\n# {\n# \"state\": \"\"\n# }\n","LoadPartitionsReq loadPartitionsReq = LoadPartitionsReq.builder()\n .collectionName(\"quick_setup\")\n .partitionNames(List.of(\"partitionA\", \"partitionB\"))\n .build();\n\nclient.loadPartitions(loadPartitionsReq);\n\ngetLoadStateReq = GetLoadStateReq.builder()\n .collectionName(\"quick_setup\")\n .partitionName(\"partitionA\")\n .build();\n\nstate = client.getLoadState(getLoadStateReq);\n\nSystem.out.println(state);\n\n// Output:\n// true\n\ngetLoadStateReq = GetLoadStateReq.builder()\n .collectionName(\"quick_setup\")\n .partitionName(\"partitionB\")\n .build();\n\nstate = client.getLoadState(getLoadStateReq);\n\nSystem.out.println(state);\n\n// Output:\n// true\n","await client.loadPartitions({\n collection_name: \"quick_setup\",\n partition_names: [\"partitionA\", \"partitionB\"]\n})\n\nres = await client.getLoadState({\n collection_name: \"quick_setup\",\n partition_name: \"partitionA\"\n})\n\nconsole.log(res)\n\n// Output\n// \n// LoadStateLoaded\n// \n\nres = await client.getLoadState({\n collection_name: \"quick_setup\",\n partition_name: \"partitionB\"\n})\n\nconsole.log(res)\n\n// Output\n// \n// LoadStateLoaded\n// \n","# 7. Release a partition\nclient.release_partitions(\n collection_name=\"quick_setup\",\n partition_names=[\"partitionA\"]\n)\n\nres = client.get_load_state(\n collection_name=\"quick_setup\", \n partition_name=\"partitionA\"\n)\n\nprint(res)\n\n# Output\n#\n# {\n# \"state\": \"\"\n# }\n\n","import io.milvus.v2.service.partition.request.ReleasePartitionsReq;\n\n// 7. Release a partition\nReleasePartitionsReq releasePartitionsReq = ReleasePartitionsReq.builder()\n .collectionName(\"quick_setup\")\n .partitionNames(List.of(\"partitionA\"))\n .build();\n\nclient.releasePartitions(releasePartitionsReq);\n\ngetLoadStateReq = GetLoadStateReq.builder()\n .collectionName(\"quick_setup\")\n .partitionName(\"partitionA\")\n .build();\n\nstate = client.getLoadState(getLoadStateReq);\n\nSystem.out.println(state);\n\n// Output:\n// false\n","// 7. Release a partition\nawait client.releasePartitions({\n collection_name: \"quick_setup\",\n partition_names: [\"partitionA\"]\n})\n\nres = await client.getLoadState({\n collection_name: \"quick_setup\"\n})\n\nconsole.log(res.state)\n\n// Output\n// \n// LoadStateNotLoad\n// \n","client.release_partitions(\n collection_name=\"quick_setup\",\n partition_names=[\"_default\", \"partitionA\", \"partitionB\"]\n)\n\nres = client.get_load_status(\n collection_name=\"quick_setup\",\n)\n\n# Output\n#\n# {\n# \"state\": \"\"\n# }\n","# 8. Drop a partition\nclient.drop_partition(\n collection_name=\"quick_setup\",\n partition_name=\"partitionB\"\n)\n\nres = client.list_partitions(collection_name=\"quick_setup\")\nprint(res)\n\n# Output\n#\n# [\"_default\", \"partitionA\"]\n","import io.milvus.v2.service.partition.request.ReleasePartitionsReq;\n\nReleasePartitionsReq releasePartitionsReq = ReleasePartitionsReq.builder()\n .collectionName(\"quick_setup\")\n .partitionNames(List.of(\"_default\", \"partitionA\", \"partitionB\"))\n .build();\n\nclient.releasePartitions(releasePartitionsReq);\n\ngetLoadStateReq = GetLoadStateReq.builder()\n .collectionName(\"quick_setup\")\n .build();\n\nstate = client.getLoadState(getLoadStateReq);\n\nSystem.out.println(state);\n\n// Output:\n// false\n","\nawait client.releasePartitions({\n collection_name: \"quick_setup\",\n partition_names: [\"_default\", \"partitionA\", \"partitionB\"]\n})\n\nres = await client.getLoadState({\n collection_name: \"quick_setup\"\n})\n\nconsole.log(res)\n\n// Output\n// \n// {\n// status: {\n// error_code: 'Success',\n// reason: '',\n// code: 0,\n// retriable: false,\n// detail: ''\n// },\n// state: 'LoadStateNotLoad'\n// }\n// \n"],"headingContent":"","anchorList":[{"label":"パーティションの管理","href":"Manage-Partitions","type":1,"isActive":false},{"label":"概要","href":"Overview","type":2,"isActive":false},{"label":"準備","href":"Preparations","type":2,"isActive":false},{"label":"パーティションのリスト","href":"List-Partitions","type":2,"isActive":false},{"label":"パーティションの作成","href":"Create-Partitions","type":2,"isActive":false},{"label":"特定のパーティションのチェック","href":"Check-for-a-Specific-Partition","type":2,"isActive":false},{"label":"パーティションのロードと解放","href":"Load--Release-Partitions","type":2,"isActive":false},{"label":"パーティションの削除","href":"Drop-Partitions","type":2,"isActive":false},{"label":"よくある質問","href":"FAQ","type":2,"isActive":false}]} \ No newline at end of file +{"codeList":["from pymilvus import MilvusClient, DataType\n\n# 1. Set up a Milvus client\nclient = MilvusClient(\n uri=\"http://localhost:19530\"\n)\n\n# 2. Create a collection\nclient.create_collection(\n collection_name=\"quick_setup\",\n dimension=5,\n)\n\n","import io.milvus.v2.client.ConnectConfig;\nimport io.milvus.v2.client.MilvusClientV2;\nimport io.milvus.v2.service.collection.request.CreateCollectionReq;\n\nString CLUSTER_ENDPOINT = \"http://localhost:19530\";\n\n// 1. Connect to Milvus server\nConnectConfig connectConfig = ConnectConfig.builder()\n .uri(CLUSTER_ENDPOINT)\n .build();\n\nMilvusClientV2 client = new MilvusClientV2(connectConfig);\n\n// 2. Create a collection in quick setup mode\nCreateCollectionReq quickSetupReq = CreateCollectionReq.builder()\n .collectionName(\"quick_setup\")\n .dimension(5)\n .build();\n\nclient.createCollection(quickSetupReq);\n","const address = \"http://localhost:19530\"\n\n// 1. Set up a Milvus Client\nclient = new MilvusClient({address});\n\n// 2. Create a collection in quick setup mode\nawait client.createCollection({\n collection_name: \"quick_setup\",\n dimension: 5,\n}); \n","# 3. List partitions\nres = client.list_partitions(collection_name=\"quick_setup\")\nprint(res)\n\n# Output\n#\n# [\"_default\"]\n","import io.milvus.v2.service.partition.request.ListPartitionsReq;\n\n// 3. List all partitions in the collection\nListPartitionsReq listPartitionsReq = ListPartitionsReq.builder()\n .collectionName(\"quick_setup\")\n .build();\n\nList partitionNames = client.listPartitions(listPartitionsReq);\n\nSystem.out.println(partitionNames);\n\n// Output:\n// [\"_default\"]\n","// 3. List partitions\nres = await client.listPartitions({\n collection_name: \"quick_setup\"\n})\n\nconsole.log(res.partition_names)\n\n// Output\n// \n// [ '_default' ]\n// \n","# 4. Create more partitions\nclient.create_partition(\n collection_name=\"quick_setup\",\n partition_name=\"partitionA\"\n)\n\nclient.create_partition(\n collection_name=\"quick_setup\",\n partition_name=\"partitionB\"\n)\n\nres = client.list_partitions(collection_name=\"quick_setup\")\nprint(res)\n\n# Output\n#\n# [\"_default\", \"partitionA\", \"partitionB\"]\n","import io.milvus.v2.service.partition.request.CreatePartitionReq;\n\n// 4. Create more partitions\nCreatePartitionReq createPartitionReq = CreatePartitionReq.builder()\n .collectionName(\"quick_setup\")\n .partitionName(\"partitionA\")\n .build();\n\nclient.createPartition(createPartitionReq);\n\ncreatePartitionReq = CreatePartitionReq.builder()\n .collectionName(\"quick_setup\")\n .partitionName(\"partitionB\")\n .build();\n\nclient.createPartition(createPartitionReq);\n\nlistPartitionsReq = ListPartitionsReq.builder()\n .collectionName(\"quick_setup\")\n .build();\n\npartitionNames = client.listPartitions(listPartitionsReq);\n\nSystem.out.println(partitionNames);\n\n// Output:\n// [\n// \"_default\",\n// \"partitionA\",\n// \"partitionB\"\n// ]\n","// 4. Create more partitions\nawait client.createPartition({\n collection_name: \"quick_setup\",\n partition_name: \"partitionA\"\n})\n\nawait client.createPartition({\n collection_name: \"quick_setup\",\n partition_name: \"partitionB\"\n})\n\nres = await client.listPartitions({\n collection_name: \"quick_setup\"\n})\n\nconsole.log(res.partition_names)\n\n// Output\n// \n// [ '_default', 'partitionA', 'partitionB' ]\n// \n","# 5. Check whether a partition exists\nres = client.has_partition(\n collection_name=\"quick_setup\",\n partition_name=\"partitionA\"\n)\nprint(res)\n\n# Output\n#\n# True\n\nres = client.has_partition(\n collection_name=\"quick_setup\",\n partition_name=\"partitionC\"\n)\nprint(res)\n\n# Output\n#\n# False\n","import io.milvus.v2.service.partition.request.HasPartitionReq;\n\n// 5. Check whether a partition exists\nHasPartitionReq hasPartitionReq = HasPartitionReq.builder()\n .collectionName(\"quick_setup\")\n .partitionName(\"partitionA\")\n .build();\n\nboolean exists = client.hasPartition(hasPartitionReq);\n\nSystem.out.println(exists);\n\n// Output:\n// true\n\nhasPartitionReq = HasPartitionReq.builder()\n .collectionName(\"quick_setup\")\n .partitionName(\"partitionC\")\n .build();\n\nexists = client.hasPartition(hasPartitionReq);\n\nSystem.out.println(exists);\n\n// Output:\n// false\n","// 5. Check whether a partition exists\nres = await client.hasPartition({\n collection_name: \"quick_setup\",\n partition_name: \"partitionA\"\n})\n\nconsole.log(res.value)\n\n// Output\n// \n// true\n// \n\nres = await client.hasPartition({\n collection_name: \"quick_setup\",\n partition_name: \"partitionC\"\n})\n\nconsole.log(res.value)\n\n// Output\n// \n// false\n// \n","# Release the collection\nclient.release_collection(collection_name=\"quick_setup\")\n\n# Check the load status\nres = client.get_load_state(collection_name=\"quick_setup\")\nprint(res)\n\n# Output\n#\n# {\n# \"state\": \"\"\n# }\n\nres = client.get_load_state(\n collection_name=\"quick_setup\", \n partition_name=\"partitionA\"\n)\n\nprint(res)\n\n# Output\n#\n# {\n# \"state\": \"\"\n# }\n\nres = client.get_load_state(\n collection_name=\"quick_setup\", \n partition_name=\"partitionB\"\n)\n\nprint(res)\n\n# Output\n#\n# {\n# \"state\": \"\"\n# }\n\n","import io.milvus.v2.service.collection.request.GetLoadStateReq;\nimport io.milvus.v2.service.collection.request.ReleaseCollectionReq;\nimport io.milvus.v2.service.partition.request.LoadPartitionsReq;\nimport io.milvus.v2.service.partition.request.ReleasePartitionsReq;\n\n// 6. Load a partition independantly\n// 6.1 Release the collection\nReleaseCollectionReq releaseCollectionReq = ReleaseCollectionReq.builder()\n .collectionName(\"quick_setup\")\n .build();\n\nclient.releaseCollection(releaseCollectionReq);\n\n// 6.2 Load partitionA\nLoadPartitionsReq loadPartitionsReq = LoadPartitionsReq.builder()\n .collectionName(\"quick_setup\")\n .partitionNames(List.of(\"partitionA\"))\n .build();\n\nclient.loadPartitions(loadPartitionsReq);\n\nThread.sleep(3000);\n\n// 6.3 Check the load status of the collection and its partitions\nGetLoadStateReq getLoadStateReq = GetLoadStateReq.builder()\n .collectionName(\"quick_setup\")\n .build();\n\nboolean state = client.getLoadState(getLoadStateReq);\n\nSystem.out.println(state);\n\n// Output:\n// true\n\ngetLoadStateReq = GetLoadStateReq.builder()\n .collectionName(\"quick_setup\")\n .partitionName(\"partitionA\")\n .build();\n\nstate = client.getLoadState(getLoadStateReq);\n\nSystem.out.println(state);\n\n// Output:\n// true\n\ngetLoadStateReq = GetLoadStateReq.builder()\n .collectionName(\"quick_setup\")\n .partitionName(\"partitionB\")\n .build();\n\nstate = client.getLoadState(getLoadStateReq);\n\nSystem.out.println(state);\n\n// Output:\n// false\n","// 6. Load a partition indenpendantly\nawait client.releaseCollection({\n collection_name: \"quick_setup\"\n})\n\nres = await client.getLoadState({\n collection_name: \"quick_setup\"\n})\n\nconsole.log(res.state)\n\n// Output\n// \n// LoadStateNotLoad\n// \n\nawait client.loadPartitions({\n collection_name: \"quick_setup\",\n partition_names: [\"partitionA\"]\n})\n\nawait sleep(3000)\n\nres = await client.getLoadState({\n collection_name: \"quick_setup\"\n})\n\nconsole.log(res.state)\n\n// Output\n// \n// LoadStateLoaded\n// \n\nres = await client.getLoadState({\n collection_name: \"quick_setup\",\n partition_name: \"partitionA\"\n})\n\nconsole.log(res.state)\n\n// Output\n// \n// LoadStateLoaded\n// \n\nres = await client.getLoadState({\n collection_name: \"quick_setup\",\n partition_name: \"partitionB\"\n})\n\nconsole.log(res.state)\n\n// Output\n// \n// LoadStateLoaded\n// \n","client.load_partitions(\n collection_name=\"quick_setup\",\n partition_names=[\"partitionA\"]\n)\n\nres = client.get_load_state(collection_name=\"quick_setup\")\nprint(res)\n\n# Output\n#\n# {\n# \"state\": \"\"\n# }\n","LoadPartitionsReq loadPartitionsReq = LoadPartitionsReq.builder()\n .collectionName(\"quick_setup\")\n .partitionNames(List.of(\"partitionA\"))\n .build();\n\nclient.loadPartitions(loadPartitionsReq);\n\ngetLoadStateReq = GetLoadStateReq.builder()\n .collectionName(\"quick_setup\")\n .partitionName(\"partitionA\")\n .build();\n\nstate = client.getLoadState(getLoadStateReq);\n\nSystem.out.println(state);\n\n// Output:\n// true\n","await client.loadPartitions({\n collection_name: \"quick_setup\",\n partition_names: [\"partitionA\"]\n})\n\nres = await client.getLoadState({\n collection_name: \"quick_setup\",\n partition_name: \"partitionA\"\n})\n\nconsole.log(res.state)\n\n// Output\n// \n// LoadStateLoaded\n//\n","client.load_partitions(\n collection_name=\"quick_setup\",\n partition_names=[\"partitionA\", \"partitionB\"]\n)\n\nres = client.get_load_status(\n collection_name=\"quick_setup\",\n partition_name=\"partitionA\"\n)\n\n# Output\n#\n# {\n# \"state\": \"\"\n# }\n\nres = client.get_load_status(\n collection_name=\"quick_setup\",\n partition_name=\"partitionB\"\n)\n\n# Output\n#\n# {\n# \"state\": \"\"\n# }\n","LoadPartitionsReq loadPartitionsReq = LoadPartitionsReq.builder()\n .collectionName(\"quick_setup\")\n .partitionNames(List.of(\"partitionA\", \"partitionB\"))\n .build();\n\nclient.loadPartitions(loadPartitionsReq);\n\ngetLoadStateReq = GetLoadStateReq.builder()\n .collectionName(\"quick_setup\")\n .partitionName(\"partitionA\")\n .build();\n\nstate = client.getLoadState(getLoadStateReq);\n\nSystem.out.println(state);\n\n// Output:\n// true\n\ngetLoadStateReq = GetLoadStateReq.builder()\n .collectionName(\"quick_setup\")\n .partitionName(\"partitionB\")\n .build();\n\nstate = client.getLoadState(getLoadStateReq);\n\nSystem.out.println(state);\n\n// Output:\n// true\n","await client.loadPartitions({\n collection_name: \"quick_setup\",\n partition_names: [\"partitionA\", \"partitionB\"]\n})\n\nres = await client.getLoadState({\n collection_name: \"quick_setup\",\n partition_name: \"partitionA\"\n})\n\nconsole.log(res)\n\n// Output\n// \n// LoadStateLoaded\n// \n\nres = await client.getLoadState({\n collection_name: \"quick_setup\",\n partition_name: \"partitionB\"\n})\n\nconsole.log(res)\n\n// Output\n// \n// LoadStateLoaded\n// \n","client.load_partitions(\n collection_name=\"quick_setup\",\n partition_names=[\"partitionA\"],\n load_fields=[\"id\", \"vector\"],\n skip_load_dynamic_field=True\n)\n","# 7. Release a partition\nclient.release_partitions(\n collection_name=\"quick_setup\",\n partition_names=[\"partitionA\"]\n)\n\nres = client.get_load_state(\n collection_name=\"quick_setup\", \n partition_name=\"partitionA\"\n)\n\nprint(res)\n\n# Output\n#\n# {\n# \"state\": \"\"\n# }\n\n","import io.milvus.v2.service.partition.request.ReleasePartitionsReq;\n\n// 7. Release a partition\nReleasePartitionsReq releasePartitionsReq = ReleasePartitionsReq.builder()\n .collectionName(\"quick_setup\")\n .partitionNames(List.of(\"partitionA\"))\n .build();\n\nclient.releasePartitions(releasePartitionsReq);\n\ngetLoadStateReq = GetLoadStateReq.builder()\n .collectionName(\"quick_setup\")\n .partitionName(\"partitionA\")\n .build();\n\nstate = client.getLoadState(getLoadStateReq);\n\nSystem.out.println(state);\n\n// Output:\n// false\n","// 7. Release a partition\nawait client.releasePartitions({\n collection_name: \"quick_setup\",\n partition_names: [\"partitionA\"]\n})\n\nres = await client.getLoadState({\n collection_name: \"quick_setup\"\n})\n\nconsole.log(res.state)\n\n// Output\n// \n// LoadStateNotLoad\n// \n","client.release_partitions(\n collection_name=\"quick_setup\",\n partition_names=[\"_default\", \"partitionA\", \"partitionB\"]\n)\n\nres = client.get_load_status(\n collection_name=\"quick_setup\",\n)\n\n# Output\n#\n# {\n# \"state\": \"\"\n# }\n","# 8. Drop a partition\nclient.drop_partition(\n collection_name=\"quick_setup\",\n partition_name=\"partitionB\"\n)\n\nres = client.list_partitions(collection_name=\"quick_setup\")\nprint(res)\n\n# Output\n#\n# [\"_default\", \"partitionA\"]\n","import io.milvus.v2.service.partition.request.ReleasePartitionsReq;\n\nReleasePartitionsReq releasePartitionsReq = ReleasePartitionsReq.builder()\n .collectionName(\"quick_setup\")\n .partitionNames(List.of(\"_default\", \"partitionA\", \"partitionB\"))\n .build();\n\nclient.releasePartitions(releasePartitionsReq);\n\ngetLoadStateReq = GetLoadStateReq.builder()\n .collectionName(\"quick_setup\")\n .build();\n\nstate = client.getLoadState(getLoadStateReq);\n\nSystem.out.println(state);\n\n// Output:\n// false\n","\nawait client.releasePartitions({\n collection_name: \"quick_setup\",\n partition_names: [\"_default\", \"partitionA\", \"partitionB\"]\n})\n\nres = await client.getLoadState({\n collection_name: \"quick_setup\"\n})\n\nconsole.log(res)\n\n// Output\n// \n// {\n// status: {\n// error_code: 'Success',\n// reason: '',\n// code: 0,\n// retriable: false,\n// detail: ''\n// },\n// state: 'LoadStateNotLoad'\n// }\n// \n"],"headingContent":"Manage Partitions","anchorList":[{"label":"パーティションの管理","href":"Manage-Partitions","type":1,"isActive":false},{"label":"概要","href":"Overview","type":2,"isActive":false},{"label":"準備","href":"Preparations","type":2,"isActive":false},{"label":"パーティションのリスト","href":"List-Partitions","type":2,"isActive":false},{"label":"パーティションの作成","href":"Create-Partitions","type":2,"isActive":false},{"label":"特定のパーティションのチェック","href":"Check-for-a-Specific-Partition","type":2,"isActive":false},{"label":"パーティションのロードと解放","href":"Load--Release-Partitions","type":2,"isActive":false},{"label":"パーティションの削除","href":"Drop-Partitions","type":2,"isActive":false},{"label":"よくある質問","href":"FAQ","type":2,"isActive":false}]} \ No newline at end of file diff --git a/localization/v2.4.x/site/ja/userGuide/manage-partitions.md b/localization/v2.4.x/site/ja/userGuide/manage-partitions.md index e166336ba..c589cb19a 100644 --- a/localization/v2.4.x/site/ja/userGuide/manage-partitions.md +++ b/localization/v2.4.x/site/ja/userGuide/manage-partitions.md @@ -1,7 +1,6 @@ --- id: manage-partitions.md title: パーティションの管理 -summary: '' ---

    パーティションの管理

    以下のコードスニペットは、既存のコードを再利用してMilvusへの接続を確立し、クイックセットアップモードでコレクションを作成します。

    +

    以下のコードスニペットは、既存のコードを再利用し、Milvusへの接続を確立し、クイックセットアップモードでコレクションを作成します。

    -

    準備には MilvusClientを使用してMilvusに接続し create_collection()を使用して、クイックセットアップモードでコレクションを作成します。

    +

    準備には MilvusClientでMilvusに接続し create_collection()を使用して、クイックセットアップモードでコレクションを作成します。

    -

    準備の場合 MilvusClientV2を使用してMilvusに接続し createCollection()クイックセットアップモードでコレクションを作成します。

    +

    準備の場合 MilvusClientV2でMilvusに接続し createCollection()クイックセットアップモードでコレクションを作成します。

    準備のために MilvusClientでMilvusに接続し createCollection()クイックセットアップモードでコレクションを作成します。

    @@ -182,7 +181,7 @@ res = await client.

    注釈

    -

    コレクション内のパーティション・キーとしてフィールドを設定した場合、Milvusはコレクションと共に少なくとも64のパーティションを作成します。パーティションをリストする場合、結果は上記のコード・スニペットの出力と異なる場合があります。

    +

    コレクション内のパーティションキーにフィールドを設定した場合、Milvusはコレクションと一緒に少なくとも64のパーティションを作成します。パーティションをリストする場合、結果は上記のコード・スニペットの出力と異なる場合があります。

    詳細については、パーティションキーを使用するを参照してください。

    パーティションの作成 -

    上記のコード・スニペットは、コレクションにpartitionApartitionC というパーティションがあるかどうかをチェックします。

    +

    上記のコード・スニペットは、コレクションにpartitionApartitionC という名前のパーティションがあるかどうかをチェックします。

    パーティションのロードと解放

    特定のパーティションをロードしたり解放したりして、検索やクエリで利用可能にしたり利用不可能にしたりすることができます。

    +

    特定のパーティションをロードおよびリリースして、検索やクエリで利用可能にしたり、利用不可能にしたりできます。

    ロードステータスの取得

    コレクションとそのパーティションのロード・ステータスをチェックするには get_load_state().

    @@ -585,7 +584,7 @@ res = await client.パーティションのロード
    -

    コレクションのすべてのパーティションをロードするには、単に load_collection().コレクション内の特定のパーティションをロードするには load_partitions().

    +

    コレクションのすべてのパーティションをロードするには、単に load_collection().コレクションの特定のパーティションをロードするには load_partitions().

    コレクションのすべてのパーティションをロードするには、単に loadCollection().コレクションの特定のパーティションをロードするには loadPartitions().

    @@ -735,6 +734,16 @@ res = await client.// LoadStateLoaded // +

    1つまたは複数のパーティションの指定されたフィールドをロードするには、次のようにする:

    +
    client.load_partitions(
    +    collection_name="quick_setup",
    +    partition_names=["partitionA"],
    +    load_fields=["id", "vector"],
    +    skip_load_dynamic_field=True
    +)
    +
    +

    検索やクエリでフィルタリング条件や出力フィールドとして使用できるのは、load_fields にリストされたフィールドだけであることに注意してください。リストには必ず主キーを含めること。ロードから除外されたフィールド名は、フィルタリングや出力に使用できません。

    +

    skip_load_dynamic_field=True 、ダイナミック・フィールドのロードをスキップすることができます。Milvusはダイナミックフィールドを1つのフィールドとして扱いますので、ダイナミックフィールド内の全てのキーが一緒にロードまたは除外されます。

    パーティションの解放

    コレクションのすべてのパーティションを解放するには release_collection().コレクション内の特定のパーティションを解放するには release_partitions().

    @@ -805,7 +814,7 @@ res = await client.// LoadStateNotLoad // -

    一度に複数のパーティションを解放するには、次のようにします:

    +

    一度に複数のパーティションを解放するには、以下のようにします:

    client.release_partitions(
         collection_name="quick_setup",
         partition_names=["_default", "partitionA", "partitionB"]
    @@ -929,11 +938,11 @@ res = await client.rootCoord.maxPartitionNum 。詳細はシステム構成をご参照ください。

    +
  • パーティションの最大作成数は?

    +

    Milvusのデフォルトでは、最大1,024パーティションまで作成可能です。パーティションの最大数はrootCoord.maxPartitionNum 。詳細はシステム構成をご参照ください。

  • パーティションとパーティション・キーはどのように区別できますか?

    パーティションは物理的なストレージ単位であり、パーティション・キーは指定された列に基づいてデータを特定のパーティションに自動的に割り当てる論理的な概念です。

    -

    例えば、Milvusにおいて、パーティション・キーがcolor フィールドとして定義されたコレクションがある場合、システムは自動的に各エンティティのcolor フィールドのハッシュ値に基づいてデータをパーティションに割り当てます。この自動化されたプロセスにより、ユーザーはデータの挿入や検索時に手動でパーティションを指定する責任から解放されます。

    +

    例えば、milvusでは、パーティションキーがcolor フィールドとして定義されたコレクションがある場合、システムは自動的に各エンティティのcolor フィールドのハッシュ値に基づいてデータをパーティションに割り当てます。この自動化されたプロセスにより、ユーザーはデータの挿入や検索時に手動でパーティションを指定する責任から解放されます。

    一方、手動でパーティションを作成する場合は、パーティション・キーの基準に基づいて各パーティションにデータを割り当てる必要があります。color フィールドを持つコレクションがある場合、color の値がred のエンティ ティを手動でpartition A に割り当て、color の値がblue のエンティティを手動でpartition B に割り当てることになります。 この手動管理には、より多くの労力が必要です。

    -

    要約すると、パーティションとパーティション・キーの両方を利用することで、データ計算を最適化し、クエリの効率を高めることができます。パーティション・キーを有効にすることは、パーティション・データの挿入とロードの手動管理の制御を放棄することを意味する。

  • +

    要約すると、パーティションとパーティション・キーの両方を利用することで、データ計算を最適化し、クエリの効率を高めることができます。パーティションキーを有効にすることは、パーティションデータの挿入とロードを手動で管理することを放棄することを意味する。

    diff --git a/localization/v2.4.x/site/ja/userGuide/search-query-get/single-vector-search.json b/localization/v2.4.x/site/ja/userGuide/search-query-get/single-vector-search.json index 16bf0cd01..7295bd689 100644 --- a/localization/v2.4.x/site/ja/userGuide/search-query-get/single-vector-search.json +++ b/localization/v2.4.x/site/ja/userGuide/search-query-get/single-vector-search.json @@ -1 +1 @@ -{"codeList":["# 1. Set up a Milvus client\nclient = MilvusClient(\n uri=CLUSTER_ENDPOINT,\n token=TOKEN \n)\n\n# 2. Create a collection\nclient.create_collection(\n collection_name=\"quick_setup\",\n dimension=5,\n metric_type=\"IP\"\n)\n\n# 3. Insert randomly generated vectors \ncolors = [\"green\", \"blue\", \"yellow\", \"red\", \"black\", \"white\", \"purple\", \"pink\", \"orange\", \"brown\", \"grey\"]\ndata = []\n\nfor i in range(1000):\n current_color = random.choice(colors)\n data.append({\n \"id\": i,\n \"vector\": [ random.uniform(-1, 1) for _ in range(5) ],\n \"color\": current_color,\n \"color_tag\": f\"{current_color}_{str(random.randint(1000, 9999))}\"\n })\n\nres = client.insert(\n collection_name=\"quick_setup\",\n data=data\n)\n\nprint(res)\n\n# Output\n#\n# {\n# \"insert_count\": 1000,\n# \"ids\": [\n# 0,\n# 1,\n# 2,\n# 3,\n# 4,\n# 5,\n# 6,\n# 7,\n# 8,\n# 9,\n# \"(990 more items hidden)\"\n# ]\n# }\n\n# 6.1 Create partitions \nclient.create_partition(\n collection_name=\"quick_setup\",\n partition_name=\"red\"\n)\n\nclient.create_partition(\n collection_name=\"quick_setup\",\n partition_name=\"blue\"\n)\n\n# 6.1 Insert data into partitions\nred_data = [ {\"id\": i, \"vector\": [ random.uniform(-1, 1) for _ in range(5) ], \"color\": \"red\", \"color_tag\": f\"red_{str(random.randint(1000, 9999))}\" } for i in range(500) ]\nblue_data = [ {\"id\": i, \"vector\": [ random.uniform(-1, 1) for _ in range(5) ], \"color\": \"blue\", \"color_tag\": f\"blue_{str(random.randint(1000, 9999))}\" } for i in range(500) ]\n\nres = client.insert(\n collection_name=\"quick_setup\",\n data=red_data,\n partition_name=\"red\"\n)\n\nprint(res)\n\n# Output\n#\n# {\n# \"insert_count\": 500,\n# \"ids\": [\n# 0,\n# 1,\n# 2,\n# 3,\n# 4,\n# 5,\n# 6,\n# 7,\n# 8,\n# 9,\n# \"(490 more items hidden)\"\n# ]\n# }\n\nres = client.insert(\n collection_name=\"quick_setup\",\n data=blue_data,\n partition_name=\"blue\"\n)\n\nprint(res)\n\n# Output\n#\n# {\n# \"insert_count\": 500,\n# \"ids\": [\n# 0,\n# 1,\n# 2,\n# 3,\n# 4,\n# 5,\n# 6,\n# 7,\n# 8,\n# 9,\n# \"(490 more items hidden)\"\n# ]\n# }\n","import java.util.ArrayList;\nimport java.util.Arrays;\nimport java.util.List;\nimport java.util.Map;\nimport java.util.Random;\n\nimport com.alibaba.fastjson.JSONObject;\n\nimport io.milvus.v2.client.ConnectConfig;\nimport io.milvus.v2.client.MilvusClientV2;\nimport io.milvus.v2.service.collection.request.CreateCollectionReq;\nimport io.milvus.v2.service.collection.request.GetLoadStateReq;\nimport io.milvus.v2.service.vector.request.InsertReq;\nimport io.milvus.v2.service.vector.response.InsertResp; \n\nString CLUSTER_ENDPOINT = \"http://localhost:19530\";\n\n// 1. Connect to Milvus server\nConnectConfig connectConfig = ConnectConfig.builder()\n .uri(CLUSTER_ENDPOINT)\n .build();\n\nMilvusClientV2 client = new MilvusClientV2(connectConfig); \n\n// 2. Create a collection in quick setup mode\nCreateCollectionReq quickSetupReq = CreateCollectionReq.builder()\n .collectionName(\"quick_setup\")\n .dimension(5)\n .metricType(\"IP\")\n .build();\n\nclient.createCollection(quickSetupReq);\n\nGetLoadStateReq loadStateReq = GetLoadStateReq.builder()\n .collectionName(\"quick_setup\")\n .build();\n\nboolean state = client.getLoadState(loadStateReq);\n\nSystem.out.println(state);\n\n// Output:\n// true\n\n// 3. Insert randomly generated vectors into the collection\nList colors = Arrays.asList(\"green\", \"blue\", \"yellow\", \"red\", \"black\", \"white\", \"purple\", \"pink\", \"orange\", \"brown\", \"grey\");\nList data = new ArrayList<>();\n\nfor (int i=0; i<1000; i++) {\n Random rand = new Random();\n String current_color = colors.get(rand.nextInt(colors.size()-1));\n JSONObject row = new JSONObject();\n row.put(\"id\", Long.valueOf(i));\n row.put(\"vector\", Arrays.asList(rand.nextFloat(), rand.nextFloat(), rand.nextFloat(), rand.nextFloat(), rand.nextFloat()));\n row.put(\"color_tag\", current_color + \"_\" + String.valueOf(rand.nextInt(8999) + 1000));\n data.add(row);\n}\n\nInsertReq insertReq = InsertReq.builder()\n .collectionName(\"quick_setup\")\n .data(data)\n .build();\n\nInsertResp insertResp = client.insert(insertReq);\n\nSystem.out.println(JSONObject.toJSON(insertResp));\n\n// Output:\n// {\"insertCnt\": 1000}\n\n// 6.1. Create a partition\nCreatePartitionReq partitionReq = CreatePartitionReq.builder()\n .collectionName(\"quick_setup\")\n .partitionName(\"red\")\n .build();\n\nclient.createPartition(partitionReq);\n\npartitionReq = CreatePartitionReq.builder()\n .collectionName(\"quick_setup\")\n .partitionName(\"blue\")\n .build();\n\nclient.createPartition(partitionReq);\n\n// 6.2 Insert data into the partition\ndata = new ArrayList<>();\n\nfor (int i=1000; i<1500; i++) {\n Random rand = new Random();\n String current_color = \"red\";\n JSONObject row = new JSONObject();\n row.put(\"id\", Long.valueOf(i));\n row.put(\"vector\", Arrays.asList(rand.nextFloat(), rand.nextFloat(), rand.nextFloat(), rand.nextFloat(), rand.nextFloat()));\n row.put(\"color\", current_color);\n row.put(\"color_tag\", current_color + \"_\" + String.valueOf(rand.nextInt(8999) + 1000));\n data.add(row);\n} \n\ninsertReq = InsertReq.builder()\n .collectionName(\"quick_setup\")\n .data(data)\n .partitionName(\"red\")\n .build();\n\ninsertResp = client.insert(insertReq);\n\nSystem.out.println(JSONObject.toJSON(insertResp));\n\n// Output:\n// {\"insertCnt\": 500}\n\ndata = new ArrayList<>();\n\nfor (int i=1500; i<2000; i++) {\n Random rand = new Random();\n String current_color = \"blue\";\n JSONObject row = new JSONObject();\n row.put(\"id\", Long.valueOf(i));\n row.put(\"vector\", Arrays.asList(rand.nextFloat(), rand.nextFloat(), rand.nextFloat(), rand.nextFloat(), rand.nextFloat()));\n row.put(\"color\", current_color);\n row.put(\"color_tag\", current_color + \"_\" + String.valueOf(rand.nextInt(8999) + 1000));\n data.add(row);\n}\n\ninsertReq = InsertReq.builder()\n .collectionName(\"quick_setup\")\n .data(data)\n .partitionName(\"blue\")\n .build();\n\ninsertResp = client.insert(insertReq);\n\nSystem.out.println(JSONObject.toJSON(insertResp));\n\n// Output:\n// {\"insertCnt\": 500}\n","const { MilvusClient, DataType, sleep } = require(\"@zilliz/milvus2-sdk-node\")\n\nconst address = \"http://localhost:19530\"\n\n// 1. Set up a Milvus Client\nclient = new MilvusClient({address});\n\n// 2. Create a collection in quick setup mode\nawait client.createCollection({\n collection_name: \"quick_setup\",\n dimension: 5,\n metric_type: \"IP\"\n}); \n\n// 3. Insert randomly generated vectors\nconst colors = [\"green\", \"blue\", \"yellow\", \"red\", \"black\", \"white\", \"purple\", \"pink\", \"orange\", \"brown\", \"grey\"]\ndata = []\n\nfor (let i = 0; i < 1000; i++) {\n current_color = colors[Math.floor(Math.random() * colors.length)]\n data.push({\n id: i,\n vector: [Math.random(), Math.random(), Math.random(), Math.random(), Math.random()],\n color: current_color,\n color_tag: `${current_color}_${Math.floor(Math.random() * 8999) + 1000}`\n })\n}\n\nvar res = await client.insert({\n collection_name: \"quick_setup\",\n data: data\n})\n\nconsole.log(res.insert_cnt)\n\n// Output\n// \n// 1000\n// \n\nawait client.createPartition({\n collection_name: \"quick_setup\",\n partition_name: \"red\"\n})\n\nawait client.createPartition({\n collection_name: \"quick_setup\",\n partition_name: \"blue\"\n})\n\n// 6.1 Insert data into partitions\nvar red_data = []\nvar blue_data = []\n\nfor (let i = 1000; i < 1500; i++) {\n red_data.push({\n id: i,\n vector: [Math.random(), Math.random(), Math.random(), Math.random(), Math.random()],\n color: \"red\",\n color_tag: `red_${Math.floor(Math.random() * 8999) + 1000}`\n })\n}\n\nfor (let i = 1500; i < 2000; i++) {\n blue_data.push({\n id: i,\n vector: [Math.random(), Math.random(), Math.random(), Math.random(), Math.random()],\n color: \"blue\",\n color_tag: `blue_${Math.floor(Math.random() * 8999) + 1000}`\n })\n}\n\nres = await client.insert({\n collection_name: \"quick_setup\",\n data: red_data,\n partition_name: \"red\"\n})\n\nconsole.log(res.insert_cnt)\n\n// Output\n// \n// 500\n// \n\nres = await client.insert({\n collection_name: \"quick_setup\",\n data: blue_data,\n partition_name: \"blue\"\n})\n\nconsole.log(res.insert_cnt)\n\n// Output\n// \n// 500\n// \n","# Single vector search\nres = client.search(\n collection_name=\"test_collection\", # Replace with the actual name of your collection\n # Replace with your query vector\n data=[[0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592]],\n limit=5, # Max. number of search results to return\n search_params={\"metric_type\": \"IP\", \"params\": {}} # Search parameters\n)\n\n# Convert the output to a formatted JSON string\nresult = json.dumps(res, indent=4)\nprint(result)\n","// 4. Single vector search\nList> query_vectors = Arrays.asList(Arrays.asList(0.3580376395471989f, -0.6023495712049978f, 0.18414012509913835f, -0.26286205330961354f, 0.9029438446296592f));\n\nSearchReq searchReq = SearchReq.builder()\n .collectionName(\"quick_setup\")\n .data(query_vectors)\n .topK(3) // The number of results to return\n .build();\n\nSearchResp searchResp = client.search(searchReq);\n\nSystem.out.println(JSONObject.toJSON(searchResp));\n","// 4. Single vector search\nvar query_vector = [0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592],\n\nres = await client.search({\n collection_name: \"quick_setup\",\n data: [query_vector],\n limit: 3, // The number of results to return\n})\n\nconsole.log(res.results)\n","[\n [\n {\n \"id\": 0,\n \"distance\": 1.4093276262283325,\n \"entity\": {}\n },\n {\n \"id\": 4,\n \"distance\": 0.9902134537696838,\n \"entity\": {}\n },\n {\n \"id\": 1,\n \"distance\": 0.8519943356513977,\n \"entity\": {}\n },\n {\n \"id\": 5,\n \"distance\": 0.7972343564033508,\n \"entity\": {}\n },\n {\n \"id\": 2,\n \"distance\": 0.5928734540939331,\n \"entity\": {}\n }\n ]\n]\n","{\"searchResults\": [[\n {\n \"score\": 1.263043,\n \"fields\": {\n \"vector\": [\n 0.9533119,\n 0.02538395,\n 0.76714665,\n 0.35481733,\n 0.9845762\n ],\n \"id\": 740\n }\n },\n {\n \"score\": 1.2377806,\n \"fields\": {\n \"vector\": [\n 0.7411156,\n 0.08687937,\n 0.8254139,\n 0.08370924,\n 0.99095553\n ],\n \"id\": 640\n }\n },\n {\n \"score\": 1.1869997,\n \"fields\": {\n \"vector\": [\n 0.87928146,\n 0.05324632,\n 0.6312755,\n 0.28005534,\n 0.9542448\n ],\n \"id\": 455\n }\n }\n]]}\n","[\n { score: 1.7463608980178833, id: '854' },\n { score: 1.744946002960205, id: '425' },\n { score: 1.7258622646331787, id: '718' }\n]\n","# Bulk-vector search\nres = client.search(\n collection_name=\"test_collection\", # Replace with the actual name of your collection\n data=[\n [0.19886812562848388, 0.06023560599112088, 0.6976963061752597, 0.2614474506242501, 0.838729485096104],\n [0.3172005263489739, 0.9719044792798428, -0.36981146090600725, -0.4860894583077995, 0.95791889146345]\n ], # Replace with your query vectors\n limit=2, # Max. number of search results to return\n search_params={\"metric_type\": \"IP\", \"params\": {}} # Search parameters\n)\n\nresult = json.dumps(res, indent=4)\nprint(result)\n","// 5. Batch vector search\nquery_vectors = Arrays.asList(\n Arrays.asList(0.3580376395471989f, -0.6023495712049978f, 0.18414012509913835f, -0.26286205330961354f, 0.9029438446296592f),\n Arrays.asList(0.19886812562848388f, 0.06023560599112088f, 0.6976963061752597f, 0.2614474506242501f, 0.838729485096104f)\n);\n\nsearchReq = SearchReq.builder()\n .collectionName(\"quick_setup\")\n .data(query_vectors)\n .topK(2)\n .build();\n\nsearchResp = client.search(searchReq);\n\nSystem.out.println(JSONObject.toJSON(searchResp));\n","// 5. Batch vector search\nvar query_vectors = [\n [0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592],\n [0.19886812562848388, 0.06023560599112088, 0.6976963061752597, 0.2614474506242501, 0.838729485096104]\n]\n\nres = await client.search({\n collection_name: \"quick_setup\",\n data: query_vectors,\n limit: 2,\n})\n\nconsole.log(res.results)\n","[\n [\n {\n \"id\": 1,\n \"distance\": 1.3017789125442505,\n \"entity\": {}\n },\n {\n \"id\": 7,\n \"distance\": 1.2419954538345337,\n \"entity\": {}\n }\n ], # Result set 1\n [\n {\n \"id\": 3,\n \"distance\": 2.3358664512634277,\n \"entity\": {}\n },\n {\n \"id\": 8,\n \"distance\": 0.5642921924591064,\n \"entity\": {}\n }\n ] # Result set 2\n]\n","// Two sets of vectors are returned as expected\n\n{\"searchResults\": [\n [\n {\n \"score\": 1.263043,\n \"fields\": {\n \"vector\": [\n 0.9533119,\n 0.02538395,\n 0.76714665,\n 0.35481733,\n 0.9845762\n ],\n \"id\": 740\n }\n },\n {\n \"score\": 1.2377806,\n \"fields\": {\n \"vector\": [\n 0.7411156,\n 0.08687937,\n 0.8254139,\n 0.08370924,\n 0.99095553\n ],\n \"id\": 640\n }\n }\n ],\n [\n {\n \"score\": 1.8654699,\n \"fields\": {\n \"vector\": [\n 0.4671427,\n 0.8378432,\n 0.98844475,\n 0.82763994,\n 0.9729997\n ],\n \"id\": 638\n }\n },\n {\n \"score\": 1.8581753,\n \"fields\": {\n \"vector\": [\n 0.735541,\n 0.60140246,\n 0.86730254,\n 0.93152493,\n 0.98603314\n ],\n \"id\": 855\n }\n }\n ]\n]}\n","[\n [\n { score: 2.3590476512908936, id: '854' },\n { score: 2.2896690368652344, id: '59' }\n [\n { score: 2.664059638977051, id: '59' },\n { score: 2.59483003616333, id: '854' }\n ]\n]\n","# 6.2 Search within a partition\nquery_vector = [0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592]\n\nres = client.search(\n collection_name=\"quick_setup\",\n data=[query_vector],\n limit=5,\n search_params={\"metric_type\": \"IP\", \"params\": {\"level\": 1}},\n partition_names=[\"red\"]\n)\n\nprint(res)\n","// 6.3 Search within partitions\nquery_vectors = Arrays.asList(Arrays.asList(0.3580376395471989f, -0.6023495712049978f, 0.18414012509913835f, -0.26286205330961354f, 0.9029438446296592f));\n\nsearchReq = SearchReq.builder()\n .collectionName(\"quick_setup\")\n .data(query_vectors)\n .partitionNames(Arrays.asList(\"red\"))\n .topK(5)\n .build();\n\nsearchResp = client.search(searchReq);\n\nSystem.out.println(JSONObject.toJSON(searchResp));\n","// 6.2 Search within partitions\nquery_vector = [0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592]\n\nres = await client.search({\n collection_name: \"quick_setup\",\n data: [query_vector],\n partition_names: [\"red\"],\n limit: 5,\n})\n\nconsole.log(res.results)\n","[\n [\n {\n \"id\": 16,\n \"distance\": 0.9200337529182434,\n \"entity\": {}\n },\n {\n \"id\": 14,\n \"distance\": 0.4505271911621094,\n \"entity\": {}\n },\n {\n \"id\": 15,\n \"distance\": 0.19924677908420563,\n \"entity\": {}\n },\n {\n \"id\": 17,\n \"distance\": 0.0075093843042850494,\n \"entity\": {}\n },\n {\n \"id\": 13,\n \"distance\": -0.14609718322753906,\n \"entity\": {}\n }\n ]\n]\n","{\"searchResults\": [\n [\n {\n \"score\": 1.1677284,\n \"fields\": {\n \"vector\": [\n 0.9986977,\n 0.17964739,\n 0.49086612,\n 0.23155272,\n 0.98438674\n ],\n \"id\": 1435\n }\n },\n {\n \"score\": 1.1476475,\n \"fields\": {\n \"vector\": [\n 0.6952647,\n 0.13417172,\n 0.91045254,\n 0.119336545,\n 0.9338931\n ],\n \"id\": 1291\n }\n },\n {\n \"score\": 1.0969629,\n \"fields\": {\n \"vector\": [\n 0.3363194,\n 0.028906643,\n 0.6675426,\n 0.030419827,\n 0.9735209\n ],\n \"id\": 1168\n }\n },\n {\n \"score\": 1.0741848,\n \"fields\": {\n \"vector\": [\n 0.9980543,\n 0.36063594,\n 0.66427994,\n 0.17359233,\n 0.94954175\n ],\n \"id\": 1164\n }\n },\n {\n \"score\": 1.0584627,\n \"fields\": {\n \"vector\": [\n 0.7187005,\n 0.12674773,\n 0.987718,\n 0.3110777,\n 0.86093885\n ],\n \"id\": 1085\n }\n }\n ],\n [\n {\n \"score\": 1.8030131,\n \"fields\": {\n \"vector\": [\n 0.59726167,\n 0.7054632,\n 0.9573117,\n 0.94529945,\n 0.8664103\n ],\n \"id\": 1203\n }\n },\n {\n \"score\": 1.7728865,\n \"fields\": {\n \"vector\": [\n 0.6672442,\n 0.60448086,\n 0.9325822,\n 0.80272985,\n 0.8861626\n ],\n \"id\": 1448\n }\n },\n {\n \"score\": 1.7536311,\n \"fields\": {\n \"vector\": [\n 0.59663296,\n 0.77831805,\n 0.8578314,\n 0.88818026,\n 0.9030075\n ],\n \"id\": 1010\n }\n },\n {\n \"score\": 1.7520742,\n \"fields\": {\n \"vector\": [\n 0.854198,\n 0.72294194,\n 0.9245805,\n 0.86126596,\n 0.7969224\n ],\n \"id\": 1219\n }\n },\n {\n \"score\": 1.7452049,\n \"fields\": {\n \"vector\": [\n 0.96419,\n 0.943535,\n 0.87611496,\n 0.8268136,\n 0.79786557\n ],\n \"id\": 1149\n }\n }\n ]\n]}\n","[\n { score: 3.0258803367614746, id: '1201' },\n { score: 3.004319190979004, id: '1458' },\n { score: 2.880324363708496, id: '1187' },\n { score: 2.8246407508850098, id: '1347' },\n { score: 2.797295093536377, id: '1406' }\n]\n","res = client.search(\n collection_name=\"quick_setup\",\n data=[query_vector],\n limit=5,\n search_params={\"metric_type\": \"IP\", \"params\": {\"level\": 1}},\n partition_names=[\"blue\"]\n)\n\nprint(res)\n","searchReq = SearchReq.builder()\n .collectionName(\"quick_setup\")\n .data(query_vectors)\n .partitionNames(Arrays.asList(\"blue\"))\n .topK(5)\n .build();\n\nsearchResp = client.search(searchReq);\n\nSystem.out.println(JSONObject.toJSON(searchResp));\n","res = await client.search({\n collection_name: \"quick_setup\",\n data: [query_vector],\n partition_names: [\"blue\"],\n limit: 5,\n})\n\nconsole.log(res.results)\n","[\n [\n {\n \"id\": 20,\n \"distance\": 2.363696813583374,\n \"entity\": {}\n },\n {\n \"id\": 26,\n \"distance\": 1.0665391683578491,\n \"entity\": {}\n },\n {\n \"id\": 23,\n \"distance\": 1.066049575805664,\n \"entity\": {}\n },\n {\n \"id\": 29,\n \"distance\": 0.8353596925735474,\n \"entity\": {}\n },\n {\n \"id\": 28,\n \"distance\": 0.7484277486801147,\n \"entity\": {}\n }\n ]\n]\n","{\"searchResults\": [\n [\n {\n \"score\": 1.1628494,\n \"fields\": {\n \"vector\": [\n 0.7442872,\n 0.046407282,\n 0.71031404,\n 0.3544345,\n 0.9819991\n ],\n \"id\": 1992\n }\n },\n {\n \"score\": 1.1470042,\n \"fields\": {\n \"vector\": [\n 0.5505825,\n 0.04367262,\n 0.9985836,\n 0.18922359,\n 0.93255126\n ],\n \"id\": 1977\n }\n },\n {\n \"score\": 1.1450152,\n \"fields\": {\n \"vector\": [\n 0.89994013,\n 0.052991092,\n 0.8645576,\n 0.6406729,\n 0.95679337\n ],\n \"id\": 1573\n }\n },\n {\n \"score\": 1.1439825,\n \"fields\": {\n \"vector\": [\n 0.9253267,\n 0.15890503,\n 0.7999555,\n 0.19126713,\n 0.898583\n ],\n \"id\": 1552\n }\n },\n {\n \"score\": 1.1029172,\n \"fields\": {\n \"vector\": [\n 0.95661926,\n 0.18777144,\n 0.38115507,\n 0.14323527,\n 0.93137646\n ],\n \"id\": 1823\n }\n }\n ],\n [\n {\n \"score\": 1.8005109,\n \"fields\": {\n \"vector\": [\n 0.5953582,\n 0.7794224,\n 0.9388869,\n 0.79825854,\n 0.9197286\n ],\n \"id\": 1888\n }\n },\n {\n \"score\": 1.7714822,\n \"fields\": {\n \"vector\": [\n 0.56805456,\n 0.89422905,\n 0.88187534,\n 0.914824,\n 0.8944365\n ],\n \"id\": 1648\n }\n },\n {\n \"score\": 1.7561421,\n \"fields\": {\n \"vector\": [\n 0.83421993,\n 0.39865613,\n 0.92319834,\n 0.42695504,\n 0.96633124\n ],\n \"id\": 1688\n }\n },\n {\n \"score\": 1.7553532,\n \"fields\": {\n \"vector\": [\n 0.89994013,\n 0.052991092,\n 0.8645576,\n 0.6406729,\n 0.95679337\n ],\n \"id\": 1573\n }\n },\n {\n \"score\": 1.7543385,\n \"fields\": {\n \"vector\": [\n 0.16542226,\n 0.38248396,\n 0.9888778,\n 0.80913955,\n 0.9501492\n ],\n \"id\": 1544\n }\n }\n ]\n]}\n","[\n { score: 2.8421106338500977, id: '1745' },\n { score: 2.838560104370117, id: '1782' },\n { score: 2.8134000301361084, id: '1511' },\n { score: 2.718268871307373, id: '1679' },\n { score: 2.7014894485473633, id: '1597' }\n]\n","# Search with output fields\nres = client.search(\n collection_name=\"test_collection\", # Replace with the actual name of your collection\n data=[[0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592]],\n limit=5, # Max. number of search results to return\n search_params={\"metric_type\": \"IP\", \"params\": {}}, # Search parameters\n output_fields=[\"color\"] # Output fields to return\n)\n\nresult = json.dumps(res, indent=4)\nprint(result)\n","// 7. Search with output fields\nquery_vectors = Arrays.asList(Arrays.asList(0.3580376395471989f, -0.6023495712049978f, 0.18414012509913835f, -0.26286205330961354f, 0.9029438446296592f));\n\nsearchReq = SearchReq.builder()\n .collectionName(\"quick_setup\")\n .data(query_vectors)\n .outputFields(Arrays.asList(\"color\"))\n .topK(5)\n .build();\n\nsearchResp = client.search(searchReq);\n\nSystem.out.println(JSONObject.toJSON(searchResp));\n","// 7. Search with output fields\nquery_vector = [0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592]\n\nres = await client.search({\n collection_name: \"quick_setup\",\n data: [query_vector],\n limit: 5,\n output_fields: [\"color\"],\n})\n\nconsole.log(res.results)\n","[\n [\n {\n \"id\": 0,\n \"distance\": 1.4093276262283325,\n \"entity\": {\n \"color\": \"pink_8682\"\n }\n },\n {\n \"id\": 16,\n \"distance\": 1.0159327983856201,\n \"entity\": {\n \"color\": \"yellow_1496\"\n }\n },\n {\n \"id\": 4,\n \"distance\": 0.9902134537696838,\n \"entity\": {\n \"color\": \"red_4794\"\n }\n },\n {\n \"id\": 14,\n \"distance\": 0.9803846478462219,\n \"entity\": {\n \"color\": \"green_2899\"\n }\n },\n {\n \"id\": 1,\n \"distance\": 0.8519943356513977,\n \"entity\": {\n \"color\": \"red_7025\"\n }\n }\n ]\n]\n","{\"searchResults\": [\n [\n {\n \"score\": 1.263043,\n \"fields\": {}\n },\n {\n \"score\": 1.2377806,\n \"fields\": {}\n },\n {\n \"score\": 1.1869997,\n \"fields\": {}\n },\n {\n \"score\": 1.1748955,\n \"fields\": {}\n },\n {\n \"score\": 1.1720343,\n \"fields\": {}\n }\n ]\n]}\n","\n[\n { score: 3.036271572113037, id: '59', color: 'orange' },\n { score: 3.0267879962921143, id: '1745', color: 'blue' },\n { score: 3.0069446563720703, id: '854', color: 'black' },\n { score: 2.984386682510376, id: '718', color: 'black' },\n { score: 2.916019916534424, id: '425', color: 'purple' }\n]\n","# Search with filter\nres = client.search(\n collection_name=\"test_collection\", # Replace with the actual name of your collection\n data=[[0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592]],\n limit=5, # Max. number of search results to return\n search_params={\"metric_type\": \"IP\", \"params\": {}}, # Search parameters\n output_fields=[\"color\"], # Output fields to return\n filter='color like \"red%\"'\n)\n\nresult = json.dumps(res, indent=4)\nprint(result)\n","// 8. Filtered search\nquery_vectors = Arrays.asList(Arrays.asList(0.3580376395471989f, -0.6023495712049978f, 0.18414012509913835f, -0.26286205330961354f, 0.9029438446296592f));\n\nsearchReq = SearchReq.builder()\n .collectionName(\"quick_setup\")\n .data(query_vectors)\n .outputFields(Arrays.asList(\"color_tag\"))\n .filter(\"color_tag like \\\"red%\\\"\")\n .topK(5)\n .build();\n\nsearchResp = client.search(searchReq);\n\nSystem.out.println(JSONObject.toJSON(searchResp));\n","// 8. Filtered search\n// 8.1 Filter with \"like\" operator and prefix wildcard\nquery_vector = [0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592]\n\nres = await client.search({\n collection_name: \"quick_setup\",\n data: [query_vector],\n limit: 5,\n filters: \"color_tag like \\\"red%\\\"\",\n output_fields: [\"color_tag\"]\n})\n\nconsole.log(res.results)\n","[\n [\n {\n \"id\": 4,\n \"distance\": 0.9902134537696838,\n \"entity\": {\n \"color\": \"red_4794\"\n }\n },\n {\n \"id\": 1,\n \"distance\": 0.8519943356513977,\n \"entity\": {\n \"color\": \"red_7025\"\n }\n },\n {\n \"id\": 6,\n \"distance\": -0.4113418459892273,\n \"entity\": {\n \"color\": \"red_9392\"\n }\n }\n ]\n]\n","{\"searchResults\": [\n [\n {\n \"score\": 1.1869997,\n \"fields\": {\"color_tag\": \"red_3026\"}\n },\n {\n \"score\": 1.1677284,\n \"fields\": {\"color_tag\": \"red_9030\"}\n },\n {\n \"score\": 1.1476475,\n \"fields\": {\"color_tag\": \"red_3744\"}\n },\n {\n \"score\": 1.0969629,\n \"fields\": {\"color_tag\": \"red_4168\"}\n },\n {\n \"score\": 1.0741848,\n \"fields\": {\"color_tag\": \"red_9678\"}\n }\n ]\n]}\n","[\n { score: 2.5080761909484863, id: '1201', color_tag: 'red_8904' },\n { score: 2.491129159927368, id: '425', color_tag: 'purple_8212' },\n { score: 2.4889798164367676, id: '1458', color_tag: 'red_6891' },\n { score: 2.42964243888855, id: '724', color_tag: 'black_9885' },\n { score: 2.4004223346710205, id: '854', color_tag: 'black_5990' }\n]\n","# Infix match on color field\nres = client.search(\n collection_name=\"test_collection\", # Replace with the actual name of your collection\n data=[[0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592]],\n limit=5, # Max. number of search results to return\n search_params={\"metric_type\": \"IP\", \"params\": {}}, # Search parameters\n output_fields=[\"color\"], # Output fields to return\n filter='color like \"%ll%\"' # Filter on color field, infix match on \"ll\"\n)\n\nresult = json.dumps(res, indent=4)\nprint(result)\n","// 8. Filtered search\nquery_vectors = Arrays.asList(Arrays.asList(0.3580376395471989f, -0.6023495712049978f, 0.18414012509913835f, -0.26286205330961354f, 0.9029438446296592f));\n\nsearchReq = SearchReq.builder()\n .collectionName(\"quick_setup\")\n .data(query_vectors)\n .outputFields(Arrays.asList(\"color_tag\"))\n .filter(\"color like \\\"%ll%\\\"\")\n .topK(5)\n .build();\n\nsearchResp = client.search(searchReq);\n\nSystem.out.println(JSONObject.toJSON(searchResp));\n","// 8. Filtered search\n// 8.1 Filter with \"like\" operator and prefix wildcard\nquery_vector = [0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592]\n\nres = await client.search({\n collection_name: \"quick_setup\",\n data: [query_vector],\n limit: 5,\n filters: \"color_tag like \\\"%ll%\\\"\",\n output_fields: [\"color_tag\"]\n})\n\nconsole.log(res.results)\n","[\n [\n {\n \"id\": 5,\n \"distance\": 0.7972343564033508,\n \"entity\": {\n \"color\": \"yellow_4222\"\n }\n }\n ]\n]\n","{\"searchResults\": [\n [\n {\n \"score\": 1.1869997,\n \"fields\": {\"color_tag\": \"yellow_4222\"}\n }\n ]\n]}\n","[\n { score: 2.5080761909484863, id: '1201', color_tag: 'yellow_4222' }\n]\n","# Conduct a range search\nsearch_params = {\n \"metric_type\": \"IP\",\n \"params\": {\n \"radius\": 0.8, # Radius of the search circle\n \"range_filter\": 1.0 # Range filter to filter out vectors that are not within the search circle\n }\n}\n\nres = client.search(\n collection_name=\"test_collection\", # Replace with the actual name of your collection\n data=[[0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592]],\n limit=3, # Max. number of search results to return\n search_params=search_params, # Search parameters\n output_fields=[\"color\"], # Output fields to return\n)\n\nresult = json.dumps(res, indent=4)\nprint(result)\n","// 9. Range search\nquery_vectors = Arrays.asList(Arrays.asList(0.3580376395471989f, -0.6023495712049978f, 0.18414012509913835f, -0.26286205330961354f, 0.9029438446296592f));\n\nsearchReq = SearchReq.builder()\n .collectionName(\"quick_setup\")\n .data(query_vectors)\n .outputFields(Arrays.asList(\"color_tag\"))\n .searchParams(Map.of(\"radius\", 0.1, \"range\", 1.0))\n .topK(5)\n .build();\n\nsearchResp = client.search(searchReq);\n\nSystem.out.println(JSONObject.toJSON(searchResp));\n","// 9. Range search\nquery_vector = [0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592]\n\nres = await client.search({\n collection_name: \"quick_setup\",\n data: [query_vector],\n limit: 5,\n params: {\n radius: 0.1,\n range: 1.0\n },\n output_fields: [\"color_tag\"]\n})\n\nconsole.log(res.results)\n","[\n [\n {\n \"id\": 4,\n \"distance\": 0.9902134537696838,\n \"entity\": {\n \"color\": \"red_4794\"\n }\n },\n {\n \"id\": 14,\n \"distance\": 0.9803846478462219,\n \"entity\": {\n \"color\": \"green_2899\"\n }\n },\n {\n \"id\": 1,\n \"distance\": 0.8519943356513977,\n \"entity\": {\n \"color\": \"red_7025\"\n }\n }\n ]\n]\n","{\"searchResults\": [\n [\n {\n \"score\": 1.263043,\n \"fields\": {\"color_tag\": \"green_2052\"}\n },\n {\n \"score\": 1.2377806,\n \"fields\": {\"color_tag\": \"purple_3709\"}\n },\n {\n \"score\": 1.1869997,\n \"fields\": {\"color_tag\": \"red_3026\"}\n },\n {\n \"score\": 1.1748955,\n \"fields\": {\"color_tag\": \"black_1646\"}\n },\n {\n \"score\": 1.1720343,\n \"fields\": {\"color_tag\": \"green_4853\"}\n }\n ]\n]}\n","[\n { score: 2.3387961387634277, id: '718', color_tag: 'black_7154' },\n { score: 2.3352415561676025, id: '1745', color_tag: 'blue_8741' },\n { score: 2.290485382080078, id: '1408', color_tag: 'red_2324' },\n { score: 2.285870313644409, id: '854', color_tag: 'black_5990' },\n { score: 2.2593345642089844, id: '1309', color_tag: 'red_8458' }\n]\n","# Connect to Milvus\nclient = MilvusClient(uri='http://localhost:19530') # Milvus server address\n\n# Load data into collection\nclient.load_collection(\"group_search\") # Collection name\n\n# Group search results\nres = client.search(\n collection_name=\"group_search\", # Collection name\n data=[[0.14529211512077012, 0.9147257273453546, 0.7965055218724449, 0.7009258593102812, 0.5605206522382088]], # Query vector\n search_params={\n \"metric_type\": \"L2\",\n \"params\": {\"nprobe\": 10},\n }, # Search parameters\n limit=10, # Max. number of search results to return\n group_by_field=\"doc_id\", # Group results by document ID\n output_fields=[\"doc_id\", \"passage_id\"]\n)\n\n# Retrieve the values in the `doc_id` column\ndoc_ids = [result['entity']['doc_id'] for result in res[0]]\n\nprint(doc_ids)\n","[5, 10, 1, 7, 9, 6, 3, 4, 8, 2]\n","# Connect to Milvus\nclient = MilvusClient(uri='http://localhost:19530') # Milvus server address\n\n# Load data into collection\nclient.load_collection(\"group_search\") # Collection name\n\n# Search without `group_by_field`\nres = client.search(\n collection_name=\"group_search\", # Collection name\n data=query_passage_vector, # Replace with your query vector\n search_params={\n \"metric_type\": \"L2\",\n \"params\": {\"nprobe\": 10},\n }, # Search parameters\n limit=10, # Max. number of search results to return\n # group_by_field=\"doc_id\", # Group results by document ID\n output_fields=[\"doc_id\", \"passage_id\"]\n)\n\n# Retrieve the values in the `doc_id` column\ndoc_ids = [result['entity']['doc_id'] for result in res[0]]\n\nprint(doc_ids)\n","[1, 10, 3, 10, 1, 9, 4, 4, 8, 6]\n","# In normal cases, you do not need to set search parameters manually\n# Except for range searches.\nsearch_parameters = {\n 'metric_type': 'L2',\n 'params': {\n 'nprobe': 10,\n 'level': 1,\n 'radius': 1.0\n 'range_filter': 0.8\n }\n}\n"],"headingContent":"","anchorList":[{"label":"単一ベクトル検索","href":"Single-Vector-Search","type":1,"isActive":false},{"label":"概要","href":"Overview","type":2,"isActive":false},{"label":"準備","href":"Preparations","type":2,"isActive":false},{"label":"基本的な検索","href":"Basic-search","type":2,"isActive":false},{"label":"フィルター検索","href":"Filtered-search","type":2,"isActive":false},{"label":"範囲検索","href":"Range-search","type":2,"isActive":false},{"label":"グループ化検索","href":"Grouping-search","type":2,"isActive":false},{"label":"検索パラメータ","href":"Search-parameters","type":2,"isActive":false}]} \ No newline at end of file +{"codeList":["# 1. Set up a Milvus client\nclient = MilvusClient(\n uri=CLUSTER_ENDPOINT,\n token=TOKEN \n)\n\n# 2. Create a collection\nclient.create_collection(\n collection_name=\"quick_setup\",\n dimension=5,\n metric_type=\"IP\"\n)\n\n# 3. Insert randomly generated vectors \ncolors = [\"green\", \"blue\", \"yellow\", \"red\", \"black\", \"white\", \"purple\", \"pink\", \"orange\", \"brown\", \"grey\"]\ndata = []\n\nfor i in range(1000):\n current_color = random.choice(colors)\n data.append({\n \"id\": i,\n \"vector\": [ random.uniform(-1, 1) for _ in range(5) ],\n \"color\": current_color,\n \"color_tag\": f\"{current_color}_{str(random.randint(1000, 9999))}\"\n })\n\nres = client.insert(\n collection_name=\"quick_setup\",\n data=data\n)\n\nprint(res)\n\n# Output\n#\n# {\n# \"insert_count\": 1000,\n# \"ids\": [\n# 0,\n# 1,\n# 2,\n# 3,\n# 4,\n# 5,\n# 6,\n# 7,\n# 8,\n# 9,\n# \"(990 more items hidden)\"\n# ]\n# }\n\n# 6.1 Create partitions \nclient.create_partition(\n collection_name=\"quick_setup\",\n partition_name=\"red\"\n)\n\nclient.create_partition(\n collection_name=\"quick_setup\",\n partition_name=\"blue\"\n)\n\n# 6.1 Insert data into partitions\nred_data = [ {\"id\": i, \"vector\": [ random.uniform(-1, 1) for _ in range(5) ], \"color\": \"red\", \"color_tag\": f\"red_{str(random.randint(1000, 9999))}\" } for i in range(500) ]\nblue_data = [ {\"id\": i, \"vector\": [ random.uniform(-1, 1) for _ in range(5) ], \"color\": \"blue\", \"color_tag\": f\"blue_{str(random.randint(1000, 9999))}\" } for i in range(500) ]\n\nres = client.insert(\n collection_name=\"quick_setup\",\n data=red_data,\n partition_name=\"red\"\n)\n\nprint(res)\n\n# Output\n#\n# {\n# \"insert_count\": 500,\n# \"ids\": [\n# 0,\n# 1,\n# 2,\n# 3,\n# 4,\n# 5,\n# 6,\n# 7,\n# 8,\n# 9,\n# \"(490 more items hidden)\"\n# ]\n# }\n\nres = client.insert(\n collection_name=\"quick_setup\",\n data=blue_data,\n partition_name=\"blue\"\n)\n\nprint(res)\n\n# Output\n#\n# {\n# \"insert_count\": 500,\n# \"ids\": [\n# 0,\n# 1,\n# 2,\n# 3,\n# 4,\n# 5,\n# 6,\n# 7,\n# 8,\n# 9,\n# \"(490 more items hidden)\"\n# ]\n# }\n","import java.util.ArrayList;\nimport java.util.Arrays;\nimport java.util.List;\nimport java.util.Map;\nimport java.util.Random;\n\nimport com.alibaba.fastjson.JSONObject;\n\nimport io.milvus.v2.client.ConnectConfig;\nimport io.milvus.v2.client.MilvusClientV2;\nimport io.milvus.v2.service.collection.request.CreateCollectionReq;\nimport io.milvus.v2.service.collection.request.GetLoadStateReq;\nimport io.milvus.v2.service.vector.request.InsertReq;\nimport io.milvus.v2.service.vector.response.InsertResp; \n\nString CLUSTER_ENDPOINT = \"http://localhost:19530\";\n\n// 1. Connect to Milvus server\nConnectConfig connectConfig = ConnectConfig.builder()\n .uri(CLUSTER_ENDPOINT)\n .build();\n\nMilvusClientV2 client = new MilvusClientV2(connectConfig); \n\n// 2. Create a collection in quick setup mode\nCreateCollectionReq quickSetupReq = CreateCollectionReq.builder()\n .collectionName(\"quick_setup\")\n .dimension(5)\n .metricType(\"IP\")\n .build();\n\nclient.createCollection(quickSetupReq);\n\nGetLoadStateReq loadStateReq = GetLoadStateReq.builder()\n .collectionName(\"quick_setup\")\n .build();\n\nboolean state = client.getLoadState(loadStateReq);\n\nSystem.out.println(state);\n\n// Output:\n// true\n\n// 3. Insert randomly generated vectors into the collection\nList colors = Arrays.asList(\"green\", \"blue\", \"yellow\", \"red\", \"black\", \"white\", \"purple\", \"pink\", \"orange\", \"brown\", \"grey\");\nList data = new ArrayList<>();\n\nfor (int i=0; i<1000; i++) {\n Random rand = new Random();\n String current_color = colors.get(rand.nextInt(colors.size()-1));\n JSONObject row = new JSONObject();\n row.put(\"id\", Long.valueOf(i));\n row.put(\"vector\", Arrays.asList(rand.nextFloat(), rand.nextFloat(), rand.nextFloat(), rand.nextFloat(), rand.nextFloat()));\n row.put(\"color_tag\", current_color + \"_\" + String.valueOf(rand.nextInt(8999) + 1000));\n data.add(row);\n}\n\nInsertReq insertReq = InsertReq.builder()\n .collectionName(\"quick_setup\")\n .data(data)\n .build();\n\nInsertResp insertResp = client.insert(insertReq);\n\nSystem.out.println(JSONObject.toJSON(insertResp));\n\n// Output:\n// {\"insertCnt\": 1000}\n\n// 6.1. Create a partition\nCreatePartitionReq partitionReq = CreatePartitionReq.builder()\n .collectionName(\"quick_setup\")\n .partitionName(\"red\")\n .build();\n\nclient.createPartition(partitionReq);\n\npartitionReq = CreatePartitionReq.builder()\n .collectionName(\"quick_setup\")\n .partitionName(\"blue\")\n .build();\n\nclient.createPartition(partitionReq);\n\n// 6.2 Insert data into the partition\ndata = new ArrayList<>();\n\nfor (int i=1000; i<1500; i++) {\n Random rand = new Random();\n String current_color = \"red\";\n JSONObject row = new JSONObject();\n row.put(\"id\", Long.valueOf(i));\n row.put(\"vector\", Arrays.asList(rand.nextFloat(), rand.nextFloat(), rand.nextFloat(), rand.nextFloat(), rand.nextFloat()));\n row.put(\"color\", current_color);\n row.put(\"color_tag\", current_color + \"_\" + String.valueOf(rand.nextInt(8999) + 1000));\n data.add(row);\n} \n\ninsertReq = InsertReq.builder()\n .collectionName(\"quick_setup\")\n .data(data)\n .partitionName(\"red\")\n .build();\n\ninsertResp = client.insert(insertReq);\n\nSystem.out.println(JSONObject.toJSON(insertResp));\n\n// Output:\n// {\"insertCnt\": 500}\n\ndata = new ArrayList<>();\n\nfor (int i=1500; i<2000; i++) {\n Random rand = new Random();\n String current_color = \"blue\";\n JSONObject row = new JSONObject();\n row.put(\"id\", Long.valueOf(i));\n row.put(\"vector\", Arrays.asList(rand.nextFloat(), rand.nextFloat(), rand.nextFloat(), rand.nextFloat(), rand.nextFloat()));\n row.put(\"color\", current_color);\n row.put(\"color_tag\", current_color + \"_\" + String.valueOf(rand.nextInt(8999) + 1000));\n data.add(row);\n}\n\ninsertReq = InsertReq.builder()\n .collectionName(\"quick_setup\")\n .data(data)\n .partitionName(\"blue\")\n .build();\n\ninsertResp = client.insert(insertReq);\n\nSystem.out.println(JSONObject.toJSON(insertResp));\n\n// Output:\n// {\"insertCnt\": 500}\n","const { MilvusClient, DataType, sleep } = require(\"@zilliz/milvus2-sdk-node\")\n\nconst address = \"http://localhost:19530\"\n\n// 1. Set up a Milvus Client\nclient = new MilvusClient({address});\n\n// 2. Create a collection in quick setup mode\nawait client.createCollection({\n collection_name: \"quick_setup\",\n dimension: 5,\n metric_type: \"IP\"\n}); \n\n// 3. Insert randomly generated vectors\nconst colors = [\"green\", \"blue\", \"yellow\", \"red\", \"black\", \"white\", \"purple\", \"pink\", \"orange\", \"brown\", \"grey\"]\ndata = []\n\nfor (let i = 0; i < 1000; i++) {\n current_color = colors[Math.floor(Math.random() * colors.length)]\n data.push({\n id: i,\n vector: [Math.random(), Math.random(), Math.random(), Math.random(), Math.random()],\n color: current_color,\n color_tag: `${current_color}_${Math.floor(Math.random() * 8999) + 1000}`\n })\n}\n\nvar res = await client.insert({\n collection_name: \"quick_setup\",\n data: data\n})\n\nconsole.log(res.insert_cnt)\n\n// Output\n// \n// 1000\n// \n\nawait client.createPartition({\n collection_name: \"quick_setup\",\n partition_name: \"red\"\n})\n\nawait client.createPartition({\n collection_name: \"quick_setup\",\n partition_name: \"blue\"\n})\n\n// 6.1 Insert data into partitions\nvar red_data = []\nvar blue_data = []\n\nfor (let i = 1000; i < 1500; i++) {\n red_data.push({\n id: i,\n vector: [Math.random(), Math.random(), Math.random(), Math.random(), Math.random()],\n color: \"red\",\n color_tag: `red_${Math.floor(Math.random() * 8999) + 1000}`\n })\n}\n\nfor (let i = 1500; i < 2000; i++) {\n blue_data.push({\n id: i,\n vector: [Math.random(), Math.random(), Math.random(), Math.random(), Math.random()],\n color: \"blue\",\n color_tag: `blue_${Math.floor(Math.random() * 8999) + 1000}`\n })\n}\n\nres = await client.insert({\n collection_name: \"quick_setup\",\n data: red_data,\n partition_name: \"red\"\n})\n\nconsole.log(res.insert_cnt)\n\n// Output\n// \n// 500\n// \n\nres = await client.insert({\n collection_name: \"quick_setup\",\n data: blue_data,\n partition_name: \"blue\"\n})\n\nconsole.log(res.insert_cnt)\n\n// Output\n// \n// 500\n// \n","# Single vector search\nres = client.search(\n collection_name=\"test_collection\", # Replace with the actual name of your collection\n # Replace with your query vector\n data=[[0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592]],\n limit=5, # Max. number of search results to return\n search_params={\"metric_type\": \"IP\", \"params\": {}} # Search parameters\n)\n\n# Convert the output to a formatted JSON string\nresult = json.dumps(res, indent=4)\nprint(result)\n","// 4. Single vector search\nList> query_vectors = Arrays.asList(Arrays.asList(0.3580376395471989f, -0.6023495712049978f, 0.18414012509913835f, -0.26286205330961354f, 0.9029438446296592f));\n\nSearchReq searchReq = SearchReq.builder()\n .collectionName(\"quick_setup\")\n .data(query_vectors)\n .topK(3) // The number of results to return\n .build();\n\nSearchResp searchResp = client.search(searchReq);\n\nSystem.out.println(JSONObject.toJSON(searchResp));\n","// 4. Single vector search\nvar query_vector = [0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592],\n\nres = await client.search({\n collection_name: \"quick_setup\",\n data: [query_vector],\n limit: 3, // The number of results to return\n})\n\nconsole.log(res.results)\n","[\n [\n {\n \"id\": 0,\n \"distance\": 1.4093276262283325,\n \"entity\": {}\n },\n {\n \"id\": 4,\n \"distance\": 0.9902134537696838,\n \"entity\": {}\n },\n {\n \"id\": 1,\n \"distance\": 0.8519943356513977,\n \"entity\": {}\n },\n {\n \"id\": 5,\n \"distance\": 0.7972343564033508,\n \"entity\": {}\n },\n {\n \"id\": 2,\n \"distance\": 0.5928734540939331,\n \"entity\": {}\n }\n ]\n]\n","{\"searchResults\": [[\n {\n \"score\": 1.263043,\n \"fields\": {\n \"vector\": [\n 0.9533119,\n 0.02538395,\n 0.76714665,\n 0.35481733,\n 0.9845762\n ],\n \"id\": 740\n }\n },\n {\n \"score\": 1.2377806,\n \"fields\": {\n \"vector\": [\n 0.7411156,\n 0.08687937,\n 0.8254139,\n 0.08370924,\n 0.99095553\n ],\n \"id\": 640\n }\n },\n {\n \"score\": 1.1869997,\n \"fields\": {\n \"vector\": [\n 0.87928146,\n 0.05324632,\n 0.6312755,\n 0.28005534,\n 0.9542448\n ],\n \"id\": 455\n }\n }\n]]}\n","[\n { score: 1.7463608980178833, id: '854' },\n { score: 1.744946002960205, id: '425' },\n { score: 1.7258622646331787, id: '718' }\n]\n","# Bulk-vector search\nres = client.search(\n collection_name=\"test_collection\", # Replace with the actual name of your collection\n data=[\n [0.19886812562848388, 0.06023560599112088, 0.6976963061752597, 0.2614474506242501, 0.838729485096104],\n [0.3172005263489739, 0.9719044792798428, -0.36981146090600725, -0.4860894583077995, 0.95791889146345]\n ], # Replace with your query vectors\n limit=2, # Max. number of search results to return\n search_params={\"metric_type\": \"IP\", \"params\": {}} # Search parameters\n)\n\nresult = json.dumps(res, indent=4)\nprint(result)\n","// 5. Batch vector search\nquery_vectors = Arrays.asList(\n Arrays.asList(0.3580376395471989f, -0.6023495712049978f, 0.18414012509913835f, -0.26286205330961354f, 0.9029438446296592f),\n Arrays.asList(0.19886812562848388f, 0.06023560599112088f, 0.6976963061752597f, 0.2614474506242501f, 0.838729485096104f)\n);\n\nsearchReq = SearchReq.builder()\n .collectionName(\"quick_setup\")\n .data(query_vectors)\n .topK(2)\n .build();\n\nsearchResp = client.search(searchReq);\n\nSystem.out.println(JSONObject.toJSON(searchResp));\n","// 5. Batch vector search\nvar query_vectors = [\n [0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592],\n [0.19886812562848388, 0.06023560599112088, 0.6976963061752597, 0.2614474506242501, 0.838729485096104]\n]\n\nres = await client.search({\n collection_name: \"quick_setup\",\n data: query_vectors,\n limit: 2,\n})\n\nconsole.log(res.results)\n","[\n [\n {\n \"id\": 1,\n \"distance\": 1.3017789125442505,\n \"entity\": {}\n },\n {\n \"id\": 7,\n \"distance\": 1.2419954538345337,\n \"entity\": {}\n }\n ], # Result set 1\n [\n {\n \"id\": 3,\n \"distance\": 2.3358664512634277,\n \"entity\": {}\n },\n {\n \"id\": 8,\n \"distance\": 0.5642921924591064,\n \"entity\": {}\n }\n ] # Result set 2\n]\n","// Two sets of vectors are returned as expected\n\n{\"searchResults\": [\n [\n {\n \"score\": 1.263043,\n \"fields\": {\n \"vector\": [\n 0.9533119,\n 0.02538395,\n 0.76714665,\n 0.35481733,\n 0.9845762\n ],\n \"id\": 740\n }\n },\n {\n \"score\": 1.2377806,\n \"fields\": {\n \"vector\": [\n 0.7411156,\n 0.08687937,\n 0.8254139,\n 0.08370924,\n 0.99095553\n ],\n \"id\": 640\n }\n }\n ],\n [\n {\n \"score\": 1.8654699,\n \"fields\": {\n \"vector\": [\n 0.4671427,\n 0.8378432,\n 0.98844475,\n 0.82763994,\n 0.9729997\n ],\n \"id\": 638\n }\n },\n {\n \"score\": 1.8581753,\n \"fields\": {\n \"vector\": [\n 0.735541,\n 0.60140246,\n 0.86730254,\n 0.93152493,\n 0.98603314\n ],\n \"id\": 855\n }\n }\n ]\n]}\n","[\n [\n { score: 2.3590476512908936, id: '854' },\n { score: 2.2896690368652344, id: '59' }\n [\n { score: 2.664059638977051, id: '59' },\n { score: 2.59483003616333, id: '854' }\n ]\n]\n","# 6.2 Search within a partition\nquery_vector = [0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592]\n\nres = client.search(\n collection_name=\"quick_setup\",\n data=[query_vector],\n limit=5,\n search_params={\"metric_type\": \"IP\", \"params\": {\"level\": 1}},\n partition_names=[\"red\"]\n)\n\nprint(res)\n","// 6.3 Search within partitions\nquery_vectors = Arrays.asList(Arrays.asList(0.3580376395471989f, -0.6023495712049978f, 0.18414012509913835f, -0.26286205330961354f, 0.9029438446296592f));\n\nsearchReq = SearchReq.builder()\n .collectionName(\"quick_setup\")\n .data(query_vectors)\n .partitionNames(Arrays.asList(\"red\"))\n .topK(5)\n .build();\n\nsearchResp = client.search(searchReq);\n\nSystem.out.println(JSONObject.toJSON(searchResp));\n","// 6.2 Search within partitions\nquery_vector = [0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592]\n\nres = await client.search({\n collection_name: \"quick_setup\",\n data: [query_vector],\n partition_names: [\"red\"],\n limit: 5,\n})\n\nconsole.log(res.results)\n","[\n [\n {\n \"id\": 16,\n \"distance\": 0.9200337529182434,\n \"entity\": {}\n },\n {\n \"id\": 14,\n \"distance\": 0.4505271911621094,\n \"entity\": {}\n },\n {\n \"id\": 15,\n \"distance\": 0.19924677908420563,\n \"entity\": {}\n },\n {\n \"id\": 17,\n \"distance\": 0.0075093843042850494,\n \"entity\": {}\n },\n {\n \"id\": 13,\n \"distance\": -0.14609718322753906,\n \"entity\": {}\n }\n ]\n]\n","{\"searchResults\": [\n [\n {\n \"score\": 1.1677284,\n \"fields\": {\n \"vector\": [\n 0.9986977,\n 0.17964739,\n 0.49086612,\n 0.23155272,\n 0.98438674\n ],\n \"id\": 1435\n }\n },\n {\n \"score\": 1.1476475,\n \"fields\": {\n \"vector\": [\n 0.6952647,\n 0.13417172,\n 0.91045254,\n 0.119336545,\n 0.9338931\n ],\n \"id\": 1291\n }\n },\n {\n \"score\": 1.0969629,\n \"fields\": {\n \"vector\": [\n 0.3363194,\n 0.028906643,\n 0.6675426,\n 0.030419827,\n 0.9735209\n ],\n \"id\": 1168\n }\n },\n {\n \"score\": 1.0741848,\n \"fields\": {\n \"vector\": [\n 0.9980543,\n 0.36063594,\n 0.66427994,\n 0.17359233,\n 0.94954175\n ],\n \"id\": 1164\n }\n },\n {\n \"score\": 1.0584627,\n \"fields\": {\n \"vector\": [\n 0.7187005,\n 0.12674773,\n 0.987718,\n 0.3110777,\n 0.86093885\n ],\n \"id\": 1085\n }\n }\n ],\n [\n {\n \"score\": 1.8030131,\n \"fields\": {\n \"vector\": [\n 0.59726167,\n 0.7054632,\n 0.9573117,\n 0.94529945,\n 0.8664103\n ],\n \"id\": 1203\n }\n },\n {\n \"score\": 1.7728865,\n \"fields\": {\n \"vector\": [\n 0.6672442,\n 0.60448086,\n 0.9325822,\n 0.80272985,\n 0.8861626\n ],\n \"id\": 1448\n }\n },\n {\n \"score\": 1.7536311,\n \"fields\": {\n \"vector\": [\n 0.59663296,\n 0.77831805,\n 0.8578314,\n 0.88818026,\n 0.9030075\n ],\n \"id\": 1010\n }\n },\n {\n \"score\": 1.7520742,\n \"fields\": {\n \"vector\": [\n 0.854198,\n 0.72294194,\n 0.9245805,\n 0.86126596,\n 0.7969224\n ],\n \"id\": 1219\n }\n },\n {\n \"score\": 1.7452049,\n \"fields\": {\n \"vector\": [\n 0.96419,\n 0.943535,\n 0.87611496,\n 0.8268136,\n 0.79786557\n ],\n \"id\": 1149\n }\n }\n ]\n]}\n","[\n { score: 3.0258803367614746, id: '1201' },\n { score: 3.004319190979004, id: '1458' },\n { score: 2.880324363708496, id: '1187' },\n { score: 2.8246407508850098, id: '1347' },\n { score: 2.797295093536377, id: '1406' }\n]\n","res = client.search(\n collection_name=\"quick_setup\",\n data=[query_vector],\n limit=5,\n search_params={\"metric_type\": \"IP\", \"params\": {\"level\": 1}},\n partition_names=[\"blue\"]\n)\n\nprint(res)\n","searchReq = SearchReq.builder()\n .collectionName(\"quick_setup\")\n .data(query_vectors)\n .partitionNames(Arrays.asList(\"blue\"))\n .topK(5)\n .build();\n\nsearchResp = client.search(searchReq);\n\nSystem.out.println(JSONObject.toJSON(searchResp));\n","res = await client.search({\n collection_name: \"quick_setup\",\n data: [query_vector],\n partition_names: [\"blue\"],\n limit: 5,\n})\n\nconsole.log(res.results)\n","[\n [\n {\n \"id\": 20,\n \"distance\": 2.363696813583374,\n \"entity\": {}\n },\n {\n \"id\": 26,\n \"distance\": 1.0665391683578491,\n \"entity\": {}\n },\n {\n \"id\": 23,\n \"distance\": 1.066049575805664,\n \"entity\": {}\n },\n {\n \"id\": 29,\n \"distance\": 0.8353596925735474,\n \"entity\": {}\n },\n {\n \"id\": 28,\n \"distance\": 0.7484277486801147,\n \"entity\": {}\n }\n ]\n]\n","{\"searchResults\": [\n [\n {\n \"score\": 1.1628494,\n \"fields\": {\n \"vector\": [\n 0.7442872,\n 0.046407282,\n 0.71031404,\n 0.3544345,\n 0.9819991\n ],\n \"id\": 1992\n }\n },\n {\n \"score\": 1.1470042,\n \"fields\": {\n \"vector\": [\n 0.5505825,\n 0.04367262,\n 0.9985836,\n 0.18922359,\n 0.93255126\n ],\n \"id\": 1977\n }\n },\n {\n \"score\": 1.1450152,\n \"fields\": {\n \"vector\": [\n 0.89994013,\n 0.052991092,\n 0.8645576,\n 0.6406729,\n 0.95679337\n ],\n \"id\": 1573\n }\n },\n {\n \"score\": 1.1439825,\n \"fields\": {\n \"vector\": [\n 0.9253267,\n 0.15890503,\n 0.7999555,\n 0.19126713,\n 0.898583\n ],\n \"id\": 1552\n }\n },\n {\n \"score\": 1.1029172,\n \"fields\": {\n \"vector\": [\n 0.95661926,\n 0.18777144,\n 0.38115507,\n 0.14323527,\n 0.93137646\n ],\n \"id\": 1823\n }\n }\n ],\n [\n {\n \"score\": 1.8005109,\n \"fields\": {\n \"vector\": [\n 0.5953582,\n 0.7794224,\n 0.9388869,\n 0.79825854,\n 0.9197286\n ],\n \"id\": 1888\n }\n },\n {\n \"score\": 1.7714822,\n \"fields\": {\n \"vector\": [\n 0.56805456,\n 0.89422905,\n 0.88187534,\n 0.914824,\n 0.8944365\n ],\n \"id\": 1648\n }\n },\n {\n \"score\": 1.7561421,\n \"fields\": {\n \"vector\": [\n 0.83421993,\n 0.39865613,\n 0.92319834,\n 0.42695504,\n 0.96633124\n ],\n \"id\": 1688\n }\n },\n {\n \"score\": 1.7553532,\n \"fields\": {\n \"vector\": [\n 0.89994013,\n 0.052991092,\n 0.8645576,\n 0.6406729,\n 0.95679337\n ],\n \"id\": 1573\n }\n },\n {\n \"score\": 1.7543385,\n \"fields\": {\n \"vector\": [\n 0.16542226,\n 0.38248396,\n 0.9888778,\n 0.80913955,\n 0.9501492\n ],\n \"id\": 1544\n }\n }\n ]\n]}\n","[\n { score: 2.8421106338500977, id: '1745' },\n { score: 2.838560104370117, id: '1782' },\n { score: 2.8134000301361084, id: '1511' },\n { score: 2.718268871307373, id: '1679' },\n { score: 2.7014894485473633, id: '1597' }\n]\n","# Search with output fields\nres = client.search(\n collection_name=\"test_collection\", # Replace with the actual name of your collection\n data=[[0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592]],\n limit=5, # Max. number of search results to return\n search_params={\"metric_type\": \"IP\", \"params\": {}}, # Search parameters\n output_fields=[\"color\"] # Output fields to return\n)\n\nresult = json.dumps(res, indent=4)\nprint(result)\n","// 7. Search with output fields\nquery_vectors = Arrays.asList(Arrays.asList(0.3580376395471989f, -0.6023495712049978f, 0.18414012509913835f, -0.26286205330961354f, 0.9029438446296592f));\n\nsearchReq = SearchReq.builder()\n .collectionName(\"quick_setup\")\n .data(query_vectors)\n .outputFields(Arrays.asList(\"color\"))\n .topK(5)\n .build();\n\nsearchResp = client.search(searchReq);\n\nSystem.out.println(JSONObject.toJSON(searchResp));\n","// 7. Search with output fields\nquery_vector = [0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592]\n\nres = await client.search({\n collection_name: \"quick_setup\",\n data: [query_vector],\n limit: 5,\n output_fields: [\"color\"],\n})\n\nconsole.log(res.results)\n","[\n [\n {\n \"id\": 0,\n \"distance\": 1.4093276262283325,\n \"entity\": {\n \"color\": \"pink_8682\"\n }\n },\n {\n \"id\": 16,\n \"distance\": 1.0159327983856201,\n \"entity\": {\n \"color\": \"yellow_1496\"\n }\n },\n {\n \"id\": 4,\n \"distance\": 0.9902134537696838,\n \"entity\": {\n \"color\": \"red_4794\"\n }\n },\n {\n \"id\": 14,\n \"distance\": 0.9803846478462219,\n \"entity\": {\n \"color\": \"green_2899\"\n }\n },\n {\n \"id\": 1,\n \"distance\": 0.8519943356513977,\n \"entity\": {\n \"color\": \"red_7025\"\n }\n }\n ]\n]\n","{\"searchResults\": [\n [\n {\n \"score\": 1.263043,\n \"fields\": {}\n },\n {\n \"score\": 1.2377806,\n \"fields\": {}\n },\n {\n \"score\": 1.1869997,\n \"fields\": {}\n },\n {\n \"score\": 1.1748955,\n \"fields\": {}\n },\n {\n \"score\": 1.1720343,\n \"fields\": {}\n }\n ]\n]}\n","\n[\n { score: 3.036271572113037, id: '59', color: 'orange' },\n { score: 3.0267879962921143, id: '1745', color: 'blue' },\n { score: 3.0069446563720703, id: '854', color: 'black' },\n { score: 2.984386682510376, id: '718', color: 'black' },\n { score: 2.916019916534424, id: '425', color: 'purple' }\n]\n","# Search with filter\nres = client.search(\n collection_name=\"test_collection\", # Replace with the actual name of your collection\n data=[[0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592]],\n limit=5, # Max. number of search results to return\n search_params={\"metric_type\": \"IP\", \"params\": {}}, # Search parameters\n output_fields=[\"color\"], # Output fields to return\n filter='color like \"red%\"'\n)\n\nresult = json.dumps(res, indent=4)\nprint(result)\n","// 8. Filtered search\nquery_vectors = Arrays.asList(Arrays.asList(0.3580376395471989f, -0.6023495712049978f, 0.18414012509913835f, -0.26286205330961354f, 0.9029438446296592f));\n\nsearchReq = SearchReq.builder()\n .collectionName(\"quick_setup\")\n .data(query_vectors)\n .outputFields(Arrays.asList(\"color_tag\"))\n .filter(\"color_tag like \\\"red%\\\"\")\n .topK(5)\n .build();\n\nsearchResp = client.search(searchReq);\n\nSystem.out.println(JSONObject.toJSON(searchResp));\n","// 8. Filtered search\n// 8.1 Filter with \"like\" operator and prefix wildcard\nquery_vector = [0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592]\n\nres = await client.search({\n collection_name: \"quick_setup\",\n data: [query_vector],\n limit: 5,\n filters: \"color_tag like \\\"red%\\\"\",\n output_fields: [\"color_tag\"]\n})\n\nconsole.log(res.results)\n","[\n [\n {\n \"id\": 4,\n \"distance\": 0.9902134537696838,\n \"entity\": {\n \"color\": \"red_4794\"\n }\n },\n {\n \"id\": 1,\n \"distance\": 0.8519943356513977,\n \"entity\": {\n \"color\": \"red_7025\"\n }\n },\n {\n \"id\": 6,\n \"distance\": -0.4113418459892273,\n \"entity\": {\n \"color\": \"red_9392\"\n }\n }\n ]\n]\n","{\"searchResults\": [\n [\n {\n \"score\": 1.1869997,\n \"fields\": {\"color_tag\": \"red_3026\"}\n },\n {\n \"score\": 1.1677284,\n \"fields\": {\"color_tag\": \"red_9030\"}\n },\n {\n \"score\": 1.1476475,\n \"fields\": {\"color_tag\": \"red_3744\"}\n },\n {\n \"score\": 1.0969629,\n \"fields\": {\"color_tag\": \"red_4168\"}\n },\n {\n \"score\": 1.0741848,\n \"fields\": {\"color_tag\": \"red_9678\"}\n }\n ]\n]}\n","[\n { score: 2.5080761909484863, id: '1201', color_tag: 'red_8904' },\n { score: 2.491129159927368, id: '425', color_tag: 'purple_8212' },\n { score: 2.4889798164367676, id: '1458', color_tag: 'red_6891' },\n { score: 2.42964243888855, id: '724', color_tag: 'black_9885' },\n { score: 2.4004223346710205, id: '854', color_tag: 'black_5990' }\n]\n","# Infix match on color field\nres = client.search(\n collection_name=\"test_collection\", # Replace with the actual name of your collection\n data=[[0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592]],\n limit=5, # Max. number of search results to return\n search_params={\"metric_type\": \"IP\", \"params\": {}}, # Search parameters\n output_fields=[\"color\"], # Output fields to return\n filter='color like \"%ll%\"' # Filter on color field, infix match on \"ll\"\n)\n\nresult = json.dumps(res, indent=4)\nprint(result)\n","// 8. Filtered search\nquery_vectors = Arrays.asList(Arrays.asList(0.3580376395471989f, -0.6023495712049978f, 0.18414012509913835f, -0.26286205330961354f, 0.9029438446296592f));\n\nsearchReq = SearchReq.builder()\n .collectionName(\"quick_setup\")\n .data(query_vectors)\n .outputFields(Arrays.asList(\"color_tag\"))\n .filter(\"color like \\\"%ll%\\\"\")\n .topK(5)\n .build();\n\nsearchResp = client.search(searchReq);\n\nSystem.out.println(JSONObject.toJSON(searchResp));\n","// 8. Filtered search\n// 8.1 Filter with \"like\" operator and prefix wildcard\nquery_vector = [0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592]\n\nres = await client.search({\n collection_name: \"quick_setup\",\n data: [query_vector],\n limit: 5,\n filters: \"color_tag like \\\"%ll%\\\"\",\n output_fields: [\"color_tag\"]\n})\n\nconsole.log(res.results)\n","[\n [\n {\n \"id\": 5,\n \"distance\": 0.7972343564033508,\n \"entity\": {\n \"color\": \"yellow_4222\"\n }\n }\n ]\n]\n","{\"searchResults\": [\n [\n {\n \"score\": 1.1869997,\n \"fields\": {\"color_tag\": \"yellow_4222\"}\n }\n ]\n]}\n","[\n { score: 2.5080761909484863, id: '1201', color_tag: 'yellow_4222' }\n]\n","# Conduct a range search\nsearch_params = {\n \"metric_type\": \"IP\",\n \"params\": {\n \"radius\": 0.8, # Radius of the search circle\n \"range_filter\": 1.0 # Range filter to filter out vectors that are not within the search circle\n }\n}\n\nres = client.search(\n collection_name=\"test_collection\", # Replace with the actual name of your collection\n data=[[0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592]],\n limit=3, # Max. number of search results to return\n search_params=search_params, # Search parameters\n output_fields=[\"color\"], # Output fields to return\n)\n\nresult = json.dumps(res, indent=4)\nprint(result)\n","// 9. Range search\nquery_vectors = Arrays.asList(Arrays.asList(0.3580376395471989f, -0.6023495712049978f, 0.18414012509913835f, -0.26286205330961354f, 0.9029438446296592f));\n\nsearchReq = SearchReq.builder()\n .collectionName(\"quick_setup\")\n .data(query_vectors)\n .outputFields(Arrays.asList(\"color_tag\"))\n .searchParams(Map.of(\"radius\", 0.1, \"range\", 1.0))\n .topK(5)\n .build();\n\nsearchResp = client.search(searchReq);\n\nSystem.out.println(JSONObject.toJSON(searchResp));\n","// 9. Range search\nquery_vector = [0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592]\n\nres = await client.search({\n collection_name: \"quick_setup\",\n data: [query_vector],\n limit: 5,\n params: {\n radius: 0.1,\n range: 1.0\n },\n output_fields: [\"color_tag\"]\n})\n\nconsole.log(res.results)\n","[\n [\n {\n \"id\": 4,\n \"distance\": 0.9902134537696838,\n \"entity\": {\n \"color\": \"red_4794\"\n }\n },\n {\n \"id\": 14,\n \"distance\": 0.9803846478462219,\n \"entity\": {\n \"color\": \"green_2899\"\n }\n },\n {\n \"id\": 1,\n \"distance\": 0.8519943356513977,\n \"entity\": {\n \"color\": \"red_7025\"\n }\n }\n ]\n]\n","{\"searchResults\": [\n [\n {\n \"score\": 1.263043,\n \"fields\": {\"color_tag\": \"green_2052\"}\n },\n {\n \"score\": 1.2377806,\n \"fields\": {\"color_tag\": \"purple_3709\"}\n },\n {\n \"score\": 1.1869997,\n \"fields\": {\"color_tag\": \"red_3026\"}\n },\n {\n \"score\": 1.1748955,\n \"fields\": {\"color_tag\": \"black_1646\"}\n },\n {\n \"score\": 1.1720343,\n \"fields\": {\"color_tag\": \"green_4853\"}\n }\n ]\n]}\n","[\n { score: 2.3387961387634277, id: '718', color_tag: 'black_7154' },\n { score: 2.3352415561676025, id: '1745', color_tag: 'blue_8741' },\n { score: 2.290485382080078, id: '1408', color_tag: 'red_2324' },\n { score: 2.285870313644409, id: '854', color_tag: 'black_5990' },\n { score: 2.2593345642089844, id: '1309', color_tag: 'red_8458' }\n]\n","# Connect to Milvus\nclient = MilvusClient(uri='http://localhost:19530') # Milvus server address\n\n# Load data into collection\nclient.load_collection(\"group_search\") # Collection name\n\n# Group search results\nres = client.search(\n collection_name=\"group_search\", # Collection name\n data=[[0.14529211512077012, 0.9147257273453546, 0.7965055218724449, 0.7009258593102812, 0.5605206522382088]], # Query vector\n search_params={\n \"metric_type\": \"L2\",\n \"params\": {\"nprobe\": 10},\n }, # Search parameters\n limit=5, # Max. number of groups to return\n group_by_field=\"doc_id\", # Group results by document ID\n group_size=2, # returned at most 2 passages per document, the default value is 1\n group_strict_size=True, # ensure every group contains exactly 3 passages\n output_fields=[\"doc_id\", \"passage_id\"]\n)\n\n# Retrieve the values in the `doc_id` column\ndoc_ids = [result['entity']['doc_id'] for result in res[0]]\npassage_ids = [result['entity']['passage_id'] for result in res[0]]\n\nprint(doc_ids)\nprint(passage_ids)\n","[\"doc_11\", \"doc_11\", \"doc_7\", \"doc_7\", \"doc_3\", \"doc_3\", \"doc_2\", \"doc_2\", \"doc_8\", \"doc_8\"]\n[5, 10, 11, 10, 9, 6, 5, 4, 9, 2]\n","# Connect to Milvus\nclient = MilvusClient(uri='http://localhost:19530') # Milvus server address\n\n# Load data into collection\nclient.load_collection(\"group_search\") # Collection name\n\n# Search without `group_by_field`\nres = client.search(\n collection_name=\"group_search\", # Collection name\n data=query_passage_vector, # Replace with your query vector\n search_params={\n \"metric_type\": \"L2\",\n \"params\": {\"nprobe\": 10},\n }, # Search parameters\n limit=5, # Max. number of search results to return\n # group_by_field=\"doc_id\", # Group results by document ID\n # group_size=2, \n # group_strict_size=True,\n output_fields=[\"doc_id\", \"passage_id\"]\n)\n\n# Retrieve the values in the `doc_id` column\ndoc_ids = [result['entity']['doc_id'] for result in res[0]]\npassage_ids = [result['entity']['passage_id'] for result in res[0]]\n\nprint(doc_ids)\nprint(passage_ids)\n","[\"doc_11\", \"doc_11\", \"doc_11\", \"doc_11\", \"doc_11\"]\n[1, 10, 3, 12, 9]\n","# In normal cases, you do not need to set search parameters manually\n# Except for range searches.\nsearch_parameters = {\n 'metric_type': 'L2',\n 'params': {\n 'nprobe': 10,\n 'level': 1,\n 'radius': 1.0\n 'range_filter': 0.8\n }\n}\n"],"headingContent":"Single-Vector Search","anchorList":[{"label":"単一ベクトル検索","href":"Single-Vector-Search","type":1,"isActive":false},{"label":"概要","href":"Overview","type":2,"isActive":false},{"label":"準備","href":"Preparations","type":2,"isActive":false},{"label":"基本的な検索","href":"Basic-search","type":2,"isActive":false},{"label":"フィルター検索","href":"Filtered-search","type":2,"isActive":false},{"label":"範囲検索","href":"Range-search","type":2,"isActive":false},{"label":"グループ化検索","href":"Grouping-search","type":2,"isActive":false},{"label":"検索パラメータ","href":"Search-parameters","type":2,"isActive":false}]} \ No newline at end of file diff --git a/localization/v2.4.x/site/ja/userGuide/search-query-get/single-vector-search.md b/localization/v2.4.x/site/ja/userGuide/search-query-get/single-vector-search.md index aa216bfaf..406a15cbb 100644 --- a/localization/v2.4.x/site/ja/userGuide/search-query-get/single-vector-search.md +++ b/localization/v2.4.x/site/ja/userGuide/search-query-get/single-vector-search.md @@ -1,7 +1,7 @@ --- id: single-vector-search.md order: 1 -summary: この記事では、1つのクエリベクターを使用してMilvusコレクション内のベクターを検索する方法について説明します。 +summary: この記事では、Milvusコレクション内のベクターを単一のクエリーベクターを使って検索する方法について説明します。 title: 単一ベクトル検索 ---

    単一ベクトル検索

    + @@ -544,11 +544,11 @@ res = await client.data - + - +
    この IndexParam オブジェクトが適用されるターゲット・フィールドの名前。この IndexParam オブジェクトを適用する対象フィールドの名前。
    indexNameこのオブジェクトが適用された後に生成されるインデックス・ファイルの名前。このオブジェクトが適用された後に生成されるインデックスファイルの名前。
    indexType
    Milvus は、指定されたベクトル埋め込みに最も類似したベクトル埋め込みを検索する。

    Milvusは指定されたベクトル埋め込みに最も類似したベクトル埋め込みを検索する。
    limit
    Milvus は、指定されたベクトル埋め込みに最も類似したベクトル埋め込みを検索する。

    Milvusは指定されたベクトル埋め込みに最も類似したベクトル埋め込みを検索する。
    limit
    このパラメータとparamの offsetを組み合わせて使用すると、ページ分割が可能になります。
    この値とparamの offsetの和は、16,384未満でなければなりません。

    このパラメータは、paramの offsetと組み合わせて使用することで、ページ分割を有効にすることができます。
    この値とparamの offsetの合計は、16,384未満でなければなりません。
    @@ -634,9 +634,9 @@ res = await client.

    出力には、クエリベクトルに最も近い上位5つの近傍ベクトルが表示され、一意のIDと計算された距離も表示されます。

    -

    一括ベクトル検索は、1回のリクエストで複数のクエリベクトルを検索できるようにすることで、単一ベクトル検索の概念を拡張します。このタイプの検索は、クエリベクターのセットに対して類似したベクトルを検索する必要があるシナリオに最適で、必要な時間と計算リソースを大幅に削減します。

    -

    一括ベクトル検索では、data フィールドに複数のクエリベクトルを含めることができます。システムはこれらのベクトルを並列処理し、クエリ・ベクターごとに個別の結果セットを返し、各セットにはコレクション内で見つかった最も近い一致が含まれます。

    -

    以下は、2 つのクエリ・ベクタから、最も類似したエンティティの 2 つの異なるセットを検索する例です:

    +

    一括ベクトル検索は、1回のリクエストで複数のクエリベクトルを検索できるようにすることで、単一ベクトル検索の概念を拡張します。このタイプの検索は、クエリベクターのセットに対して類似したベクトルを検索する必要があるシナリオに最適で、必要な時間と計算リソースを大幅に削減します。

    +

    一括ベクトル検索では、data フィールドに複数のクエリベクトルを含めることができます。システムはこれらのベクトルを並列に処理し、各クエリ・ベクトルに対して個別の結果セットを返し、各セットにはコレクション内で見つかった最も近い一致が含まれます。

    +

    以下は、2 つのクエリ・ベクターから、最も類似したエンティティの 2 つの異なるセットを検索する例です:

    # Bulk-vector search
    @@ -1715,9 +1715,10 @@ res = await client.
           
    -    

    Milvusでは、特定のフィールドで検索をグループ化することで、結果に同じフィールドの項目が重複することを避けることができます。特定のフィールドに対して様々な検索結果を得ることができます。

    -

    各文書は様々なパッセージに分割されます。各パッセージは1つのベクトル埋め込みで表現され、1つの文書に属する。似たようなパッセージではなく、関連するドキュメントを見つけるには、search() opeartionにgroup_by_field 引数を含めることで、ドキュメントIDで結果をグループ化することができます。これにより、同じ文書の別々の文章ではなく、最も関連性の高いユニークな文書を返すことができます。

    -

    以下は、検索結果をフィールドでグループ化するコード例です:

    +

    Milvusでは、検索結果の網羅性と精度を向上させるためにグループ化検索が設計されています。

    +

    RAGのシナリオを考えてみましょう。ここでは、ロードされた文書が様々なパッセージに分割され、各パッセージが1つのベクトル埋め込みで表現されています。ユーザはLLMを正確に促すために最も関連性の高い文章を見つけたい。Milvusの通常の検索機能はこの要求を満たすことができるが、検索結果が非常に偏ったものになる可能性がある:ほとんどのパッセージは数個の文書からしか得られず、検索結果の包括性は非常に低い。これは、LLMが提供する検索結果の正確さ、あるいは正しさを著しく損ない、LLM利用者の経験に悪影響を与える可能性がある。

    +

    グループ化検索は、この問題を効果的に解決することができます。group_by_fieldとgroup_sizeを渡すことにより、Milvusユーザは検索結果をいくつかのグループに分け、各グループからのエンティティ数が特定のgroup_sizeを超えないようにすることができる。この機能により、検索結果の網羅性と公平性が大幅に向上し、LLM出力の品質が顕著に改善されます。

    +

    検索結果をフィールドごとにグループ化するコード例を以下に示す:

    # Connect to Milvus
     client = MilvusClient(uri='http://localhost:19530') # Milvus server address
     
    @@ -1732,21 +1733,26 @@ res = client.search(
         "metric_type": "L2",
         "params": {"nprobe": 10},
         }, # Search parameters
    -    limit=10, # Max. number of search results to return
    +    limit=5, # Max. number of groups to return
         group_by_field="doc_id", # Group results by document ID
    +    group_size=2, # returned at most 2 passages per document, the default value is 1
    +    group_strict_size=True, # ensure every group contains exactly 3 passages
         output_fields=["doc_id", "passage_id"]
     )
     
     # Retrieve the values in the `doc_id` column
     doc_ids = [result['entity']['doc_id'] for result in res[0]]
    +passage_ids = [result['entity']['passage_id'] for result in res[0]]
     
     print(doc_ids)
    +print(passage_ids)
     

    出力は以下のようになる:

    -
    [5, 10, 1, 7, 9, 6, 3, 4, 8, 2]
    +
    ["doc_11", "doc_11", "doc_7", "doc_7", "doc_3", "doc_3", "doc_2", "doc_2", "doc_8", "doc_8"]
    +[5, 10, 11, 10, 9, 6, 5, 4, 9, 2]
     
    -

    指定された出力では、返されたエンティティにdoc_id 値の重複がないことが確認できる。

    -

    比較のために、group_by_field をコメントアウトして、通常の検索を行ってみよう:

    +

    与えられた出力では、各文書に対してちょうど2つの文章が検索され、合計で5つの文書が結果を構成していることがわかる。

    +

    比較のために、グループ関連のパラメーターをコメントアウトして、通常の検索を行ってみよう:

    # Connect to Milvus
     client = MilvusClient(uri='http://localhost:19530') # Milvus server address
     
    @@ -1761,27 +1767,33 @@ res = client.search(
         "metric_type": "L2",
         "params": {"nprobe": 10},
         }, # Search parameters
    -    limit=10, # Max. number of search results to return
    +    limit=5, # Max. number of search results to return
         # group_by_field="doc_id", # Group results by document ID
    +    # group_size=2, 
    +    # group_strict_size=True,
         output_fields=["doc_id", "passage_id"]
     )
     
     # Retrieve the values in the `doc_id` column
     doc_ids = [result['entity']['doc_id'] for result in res[0]]
    +passage_ids = [result['entity']['passage_id'] for result in res[0]]
     
     print(doc_ids)
    +print(passage_ids)
     

    出力は以下のようになる:

    -
    [1, 10, 3, 10, 1, 9, 4, 4, 8, 6]
    +
    ["doc_11", "doc_11", "doc_11", "doc_11", "doc_11"]
    +[1, 10, 3, 12, 9]
     
    -

    指定された出力では、返されたエンティティに重複したdoc_id 値が含まれていることが確認できる。

    +

    与えられた出力では、"doc_11 "が検索結果を完全に支配し、他の文書からの質の高いパラグラフを覆い隠していることが観察できる。

    +

    もう一つ注意すべき点があります。デフォルトでは、grouping_searchは十分なグループ数があれば即座に結果を返しますが、その結果、各グループの検索結果数がgroup_sizeを満たさない可能性があります。各グループの検索結果数を気にするのであれば、上のコードのようにgroup_strict_size=Trueを設定してください。これにより、Milvusは各グループで十分な結果を得ようと努力するようになりますが、パフォーマンスは若干低下します。

    制限事項

      -
    • インデックス作成:このグループ化機能は、HNSWIVF_FLAT、またはFLATタイプでインデックス付けされたコレクションに対してのみ機能します。詳細については、「メモリ内インデックス」を参照してください。

    • +
    • インデックス作成:このグループ化機能はHNSWIVF_FLAT、またはFLATタイプでインデックスされたコレクションに対してのみ機能します。詳細については、メモリ内インデックスを参照してください。

    • ベクトル:現在、グループ化検索はBINARY_VECTOR型のベクトル・フィールドをサポートしていません。データ型の詳細については、サポートされるデータ型を参照してください。

    • フィールド:現在のところ、グループ化検索では1つの列しか使用できません。group_by_field コンフィグで複数のフィールド名を指定することはできません。 また、グループ化検索は、JSON、FLOAT、DOUBLE、ARRAY、またはvectorフィールドのデータ型とは互換性がありません。

    • パフォーマンスへの影響:クエリ・ベクタ数が増えるとパフォーマンスが低下することに注意してください。CPUコア2個、メモリ8GBのクラスタを例にとると、グルーピング検索の実行時間は入力クエリベクタ数に比例して増加します。

    • -
    • 機能現在のところ、グルーピング検索は範囲検索検索イテレータハイブリッド検索ではサポートされていない。

    • +
    • 機能現在のところ、グルーピング検索は範囲検索検索イテレータではサポートされていません。

    検索パラメータ

    Milvusは大量のエンティティの結果を反復処理するための検索およびクエリのイテレータを提供します。MilvusはTopKを16384に制限しているため、ユーザはイテレータを使用してバッチモードでコレクション内の大量の、あるいはエンティティ全体を返すことができます。

    +

    Milvusは大量のエンティティを反復処理するための検索および問い合わせイテレータを提供しています。MilvusはTopKを16384に制限しているため、ユーザはイテレータを使用して、バッチモードでコレクション内の大量の、あるいはエンティティ全体を返すことができます。

    概要

    イテレータは、主キー値とブール式を使用して、コレクション内の大量のデータまたはすべてのデータを反復処理するのに役立つ強力なツールです。これにより、データの取得方法を大幅に改善できます。時間の経過とともに効率が低下する可能性のある、従来のoffsetパラメータやlimitパラメータの使用とは異なり、イテレータはよりスケーラブルなソリューションを提供します。

    -

    イテレータを使用する利点

      -
    • シンプルさ:複雑なオフセットや リミットの設定が不要になります。

    • +

      イテレータは、主キー値やフィルタ式を指定することで、コレクション全体をスキャンしたり、大量のエンティティを反復処理したりするための効率的なツールです。offsetパラメータやlimitパラメータを指定した検索コールやクエリコールと比較して、イテレータの使用はより効率的でスケーラブルです。

      +

      イテレータを使用するメリット

        +
      • 単純さ:複雑なオフセットや リミットの設定が不要になります。

      • 効率性:必要なデータのみをフェッチすることで、スケーラブルなデータ検索を実現。

      • 一貫性:ブーリアンフィルターにより、一貫したデータセットサイズを保証します。

      @@ -62,12 +62,12 @@ title: イテレータ d="M4 9h1v1H4c-1.5 0-3-1.69-3-3.5S2.55 3 4 3h4c1.45 0 3 1.69 3 3.5 0 1.41-.91 2.72-2 3.25V8.59c.58-.45 1-1.27 1-2.09C10 5.22 8.98 4 8 4H4c-.98 0-2 1.22-2 2.5S3 9 4 9zm9-3h-1v1h1c1 0 2 1.22 2 2.5S13.98 12 13 12H9c-.98 0-2-1.22-2-2.5 0-.83.42-1.64 1-2.09V6.25c-1.09.53-2 1.84-2 3.25C6 11.31 7.55 13 9 13h4c1.45 0 3-1.69 3-3.5S14.5 6 13 6z" > -

      以下のステップでは、Milvusに接続し、コレクションを素早くセットアップし、10,000以上のランダムに生成されたエンティティをコレクションに挿入するためのコードを再利用する。

      +

      以下の準備ステップでは、Milvusに接続し、ランダムに生成されたエンティティをコレクションに挿入する。

      ステップ1: コレクションの作成

      -

      コレクションを作成するには MilvusClientを使用してMilvusサーバに接続し create_collection()コレクションを作成します。

      +

      以下の手順で MilvusClientを使用してMilvusサーバに接続し create_collection()を使用してコレクションを作成します。

      -

      使用方法 MilvusClientV2を使ってMilvusサーバに接続し createCollection()コレクションを作成する。

      +

      コレクションを作成するには MilvusClientV2を使ってMilvusサーバに接続し createCollection()コレクションを作成する。

      @@ -264,8 +264,9 @@ iterator = collection.search_iterator( batch_size=10, param=search_params, output_fields=["color_tag"], - limit=3 + limit=300 ) +# search 300 entities totally with 10 entities per page results = [] @@ -356,7 +357,7 @@ System.out.println(results.size()); data -
      Milvus は指定されたものに最も類似したベクトル埋め込みを検索します。 +
      Milvusは指定されたものに最も類似したベクトル埋め込みを検索します。 anns_field @@ -376,7 +377,7 @@ System.out.println(results.size()); limit -
      デフォルト値は-1 で、一致するすべてのエンティティが返されます。 +
      値の既定値は-1 で、一致するすべてのエンティティが返されます。 @@ -547,7 +548,7 @@ R<QueryIterator> queryIteratRes = c expr -
      デフォルト値はNone で、スカラー・フィルタリングが無視されることを示す。スカラー・フィルタリング条件を構築するには、「Boolean Expression Rules」を参照してください。 +
      この値のデフォルトはNone で、スカラー・フィルタリングが無視されることを示す。スカラー・フィルタリング条件を構築するには、「Boolean Expression Rules」を参照してください。 output_fields @@ -555,7 +556,7 @@ R<QueryIterator> queryIteratRes = c limit -
      値の既定値は-1 で、一致するすべてのエンティティが返されます。 +
      値の既定値は-1 で、一致するすべてのエンティティが返されることを示します。 diff --git a/localization/v2.4.x/site/ja/userGuide/tools/cli_commands.json b/localization/v2.4.x/site/ja/userGuide/tools/cli_commands.json index e27ede1f4..203b1fddf 100644 --- a/localization/v2.4.x/site/ja/userGuide/tools/cli_commands.json +++ b/localization/v2.4.x/site/ja/userGuide/tools/cli_commands.json @@ -1 +1 @@ -{"codeList":["clear\n","connect [-uri (text)] [-t (text)]\n","milvus_cli > connect -uri http://127.0.0.1:19530 \n","create database -db (text) \n","milvus_cli > create database -db testdb\n","use database -db (text) \n","milvus_cli > use database -db testdb\n","list databases \n","milvus_cli > list databases\n","delete database -db (text) \n","milvus_cli > delete database -db testdb\n","create user -u (text) -p (text)\n","milvus_cli > create user -u zilliz -p zilliz\n","create alias -c (text) -a (text) [-A] \n","milvus_cli > create alias -c car -a carAlias1\n","milvus_cli > create alias -c car2 -A -a carAlias1\n","create collection -c (text) -f (text) -p (text) [-a] [-d (text)]\n","## For array field: --schema-field support :::(:if Varchar)\n\nmilvus_cli > create collection -c car -f id:INT64:primary_field -f vector:FLOAT_VECTOR:128 -f color:INT64:color -f brand:ARRAY:64:VARCHAR:128 -p id -A -d 'car_collection'\n","create partition -c (text) -p (text) [-d (text)]\n","milvus_cli > create partition -c car -p new_partition -d test_add_partition\n","create index\n","milvus_cli > create index\n\nCollection name (car, car2): car2\n\nThe name of the field to create an index for (vector): vector\n\nIndex name: vectorIndex\n\n# Default is ''\nIndex type FLAT, IVF_FLAT, IVF_SQ8, IVF_PQ, RNSG, HNSW, ANNOY, AUTOINDEX, DISKANN, GPU_IVF_FLAT, GPU_IVF_PQ, SPARSE_INVERTED_INDEX, SPARSE_WAND, SCANN, STL_SORT, Trie, INVERTED, ) []: IVF_FLAT \n\n# Default is ''\nIndex metric type (L2, IP, HAMMING, TANIMOTO, COSINE, ) []: \n\nTimeout []:\n","delete user -u (text)\n","milvus_cli > delete user -u zilliz\n","delete alias -a (text) \n","delete collection -c (text) \n","milvus_cli > delete collection -c car\n","delete entities -c (text) -p (text) \n","milvus_cli > delete entities -c car\n\nThe expression to specify entities to be deleted, such as \"film_id in [ 0, 1 ]\": film_id in [ 0, 1 ]\n\nYou are trying to delete the entities of collection. This action cannot be undone!\n\nDo you want to continue? [y/N]: y\n","delete partition -c (text) -p (text)\n","milvus_cli > delete partition -c car -p new_partition\n","delete index -c (text) -in (text)\n","milvus_cli > delete index -c car -in indexName\n","show collection -c (text)\n","milvus_cli > show collection -c test_collection_insert\n","show partition -c (text) -p (text)\n","milvus_cli > show partition -c test_collection_insert -p _default\n","show index -c (text) -in (text)\n","milvus_cli > show index -c test_collection -in index_name\n","exit\n","help \n","import -c (text)[-p (text)] \n","milvus_cli > import -c car 'examples/import_csv/vectors.csv'\n\nReading csv file... [####################################] 100%\n\nColumn names are ['vector', 'color', 'brand']\n\nProcessed 50001 lines.\n\nInserting ...\n\nInsert successfully.\n-------------------------- ------------------\nTotal insert entities: 50000\nTotal collection entities: 150000\nMilvus timestamp: 428849214449254403\n-------------------------- ------------------\n","milvus_cli > import -c car 'https://raw.githubusercontent.com/milvus-\nio/milvus_cli/main/examples/import_csv/vectors.csv'\n\nReading file from remote URL.\n\nReading csv file... [####################################] 100%\n\nColumn names are ['vector', 'color', 'brand']\n\nProcessed 50001 lines.\n\nInserting ...\n\nInsert successfully.\n\n-------------------------- ------------------\nTotal insert entities: 50000\nTotal collection entities: 150000\nMilvus timestamp: 428849214449254403\n-------------------------- ------------------\n","list users\n","list collections\n","list indexes -c (text)\n","list partitions -c (text)\n","load -c (text) [-p (text)]\n","query\n","milvus_cli > query\n\nCollection name: car\n\nThe query expression: id in [ 428960801420883491, 428960801420883492,\n428960801420883493 ]\n\nName of partitions that contain entities(split by \",\" if multiple) []:\ndefault\n\nA list of fields to return(split by \",\" if multiple) []: color, brand\n\ntimeout []:\n\nGuarantee timestamp. This instructs Milvus to see all operations performed before a provided timestamp. If no such timestamp is provided, then Milvus will search all operations performed to date. [0]:\nGraceful time. Only used in bounded consistency level. If graceful_time is set, PyMilvus will use current timestamp minus the graceful_time as the guarantee_timestamp. This option is 5s by default if not set. [5]:\n","milvus_cli > query\n\nCollection name: car\n\nThe query expression: id > 428960801420883491\n\nName of partitions that contain entities(split by \",\" if multiple) []:\ndefault\n\nA list of fields to return(split by \",\" if multiple) []: id, color,\nbrand\n\ntimeout []:\n\nGuarantee timestamp. This instructs Milvus to see all operations performed before a provided timestamp. If no such timestamp is provided, then Milvus will search all operations performed to date. [0]:\nGraceful time. Only used in bounded consistency level. If graceful_time is set, PyMilvus will use current timestamp minus the graceful_time as the guarantee_timestamp. This option is 5s by default if not set. [5]:\n","release -c (text) [-p (text)]\n","search\n","milvus_cli > search\n\nCollection name (car, test_collection): car\n\nThe vectors of search data(the length of data is number of query (nq), the dim of every vector in data must be equal to vector field’s of collection. You can also import a csv file\nout headers): examples/import_csv/search_vectors.csv\n\nThe vector field used to search of collection (vector): vector\n\nSearch parameter nprobe's value: 10\n\nThe max number of returned record, also known as topk: 2\n\nThe boolean expression used to filter attribute []: id > 0\n\nThe names of partitions to search (split by \",\" if multiple) ['_default'] []: _default\n\ntimeout []:\n\nGuarantee Timestamp(It instructs Milvus to see all operations performed before a provided timestamp. If no such timestamp is provided, then Milvus will search all operations performed to date) [0]:\n\n","milvus_cli > search\n\nCollection name (car, test_collection): car\n\nThe vectors of search data(the length of data is number of query (nq), the dim of every vector in data must be equal to vector field’s of collection. You can also import a csv file without headers):\n [[0.71, 0.76, 0.17, 0.13, 0.42, 0.07, 0.15, 0.67, 0.58, 0.02, 0.39, 0.47, 0.58, 0.88, 0.73, 0.31, 0.23, 0.57, 0.33, 0.2, 0.03, 0.43, 0.78, 0.49, 0.17, 0.56, 0.76, 0.54, 0.45, 0.46, 0.05, 0.1, 0.43, 0.63, 0.29, 0.44, 0.65, 0.01, 0.35, 0.46, 0.66, 0.7, 0.88, 0.07, 0.49, 0.92, 0.57, 0.5, 0.16, 0.77, 0.98, 0.1, 0.44, 0.88, 0.82, 0.16, 0.67, 0.63, 0.57, 0.55, 0.95, 0.13, 0.64, 0.43, 0.71, 0.81, 0.43, 0.65, 0.76, 0.7, 0.05, 0.24, 0.03, 0.9, 0.46, 0.28, 0.92, 0.25, 0.97, 0.79, 0.73, 0.97, 0.49, 0.28, 0.64, 0.19, 0.23, 0.51, 0.09, 0.1, 0.53, 0.03, 0.23, 0.94, 0.87, 0.14, 0.42, 0.82, 0.91, 0.11, 0.91, 0.37, 0.26, 0.6, 0.89, 0.6, 0.32, 0.11, 0.98, 0.67, 0.12, 0.66, 0.47, 0.02, 0.15, 0.6, 0.64, 0.57, 0.14, 0.81, 0.75, 0.11, 0.49, 0.78, 0.16, 0.63, 0.57, 0.18]]\n\nThe vector field used to search of collection (vector): vector\n\nSearch parameter nprobe's value: 10\n\nThe specified number of decimal places of returned distance [-1]: 5\n\nThe max number of returned record, also known as topk: 2\n\nThe boolean expression used to filter attribute []: id > 0\n\nThe names of partitions to search (split by \",\" if multiple) ['_default'] []: _default\n\ntimeout []:\n\nGuarantee Timestamp(It instructs Milvus to see all operations performed before a provided timestamp. If no such timestamp is provided, then Milvus will search all operations performed to date) [0]:\n\n","milvus_cli > search\n\nCollection name (car, car2): car\n\nThe vectors of search data(the length of data is number of query (nq), the dim of every vector in data must be equal to vector field’s of collection. You can also import a csv file without headers): examples/import_csv/search_vectors.csv\n\nThe vector field used to search of collection (vector): vector\n\nThe specified number of decimal places of returned distance [-1]: 5\n\nThe max number of returned record, also known as topk: 2\n\nThe boolean expression used to filter attribute []:\n\nThe names of partitions to search (split by \",\" if multiple) ['_default'] []:\n\ntimeout []:\n\nGuarantee Timestamp(It instructs Milvus to see all operations performed before a provided timestamp. If no such timestamp is provided, then Milvus will search all operations performed to date) [0]:\n\n","list connections \n","show index_progress -c (text) [-i (text)]\n","show loading_progress -c (text) [-p (text)]\n","version\n","$ milvus_cli --version\nMilvus_CLI v0.4.0\n"],"headingContent":"","anchorList":[{"label":"Milvus_CLI コマンドリファレンス","href":"MilvusCLI-Command-Reference","type":1,"isActive":false},{"label":"クリア","href":"clear","type":2,"isActive":false},{"label":"接続","href":"connect","type":2,"isActive":false},{"label":"データベースの作成","href":"create-Database","type":2,"isActive":false},{"label":"データベース使用","href":"use-Database","type":2,"isActive":false},{"label":"データベースのリスト","href":"List-Databases","type":2,"isActive":false},{"label":"データベースの削除","href":"Delete-Database","type":2,"isActive":false},{"label":"ユーザ作成","href":"create-user","type":2,"isActive":false},{"label":"エイリアスの作成","href":"create-alias","type":2,"isActive":false},{"label":"コレクションの作成","href":"create-collection","type":2,"isActive":false},{"label":"パーティション作成","href":"create-partition","type":2,"isActive":false},{"label":"インデックスの作成","href":"create-index","type":2,"isActive":false},{"label":"delete user","href":"delete-user","type":2,"isActive":false},{"label":"エイリアス削除","href":"delete-alias","type":2,"isActive":false},{"label":"コレクション削除","href":"delete-collection","type":2,"isActive":false},{"label":"エンティティを削除する","href":"delete-entities","type":2,"isActive":false},{"label":"パーティション削除","href":"delete-partition","type":2,"isActive":false},{"label":"インデックスの削除","href":"delete-index","type":2,"isActive":false},{"label":"ショーコレクション","href":"show-collection","type":2,"isActive":false},{"label":"show partition","href":"show-partition","type":2,"isActive":false},{"label":"ショー・インデックス","href":"show-index","type":2,"isActive":false},{"label":"終了","href":"exit","type":2,"isActive":false},{"label":"ヘルプ","href":"help","type":2,"isActive":false},{"label":"インポート","href":"import","type":2,"isActive":false},{"label":"ユーザ一覧","href":"list-users","type":2,"isActive":false},{"label":"コレクションをリストする","href":"list-collections","type":2,"isActive":false},{"label":"list indexes","href":"list-indexes","type":2,"isActive":false},{"label":"list partitions","href":"list-partitions","type":2,"isActive":false},{"label":"ロード","href":"load","type":2,"isActive":false},{"label":"クエリ","href":"query","type":2,"isActive":false},{"label":"リリース","href":"release","type":2,"isActive":false},{"label":"検索","href":"search","type":2,"isActive":false},{"label":"接続のリスト","href":"List-connection","type":2,"isActive":false},{"label":"show index_progress","href":"show-indexprogress","type":2,"isActive":false},{"label":"show loading_progress","href":"show-loadingprogress","type":2,"isActive":false},{"label":"バージョン","href":"version","type":2,"isActive":false}]} \ No newline at end of file +{"codeList":["clear\n","connect [-uri (text)] [-t (text)]\n","milvus_cli > connect -uri http://127.0.0.1:19530 \n","create database -db (text) \n","milvus_cli > create database -db testdb\n","use database -db (text) \n","milvus_cli > use database -db testdb\n","list databases \n","milvus_cli > list databases\n","delete database -db (text) \n","milvus_cli > delete database -db testdb\n","create user -u (text) -p (text)\n","milvus_cli > create user -u zilliz -p zilliz\n","create alias -c (text) -a (text) [-A] \n","milvus_cli > create alias -c car -a carAlias1\n","milvus_cli > create alias -c car2 -A -a carAlias1\n","create collection -c (text) -f (text) -p (text) [-a] [-d (text)]\n","## For array field: --schema-field support :::(:if Varchar)\n\nmilvus_cli > create collection -c car -f id:INT64:primary_field -f vector:FLOAT_VECTOR:128 -f color:INT64:color -f brand:ARRAY:64:VARCHAR:128 -p id -A -d 'car_collection'\n","create partition -c (text) -p (text) [-d (text)]\n","milvus_cli > create partition -c car -p new_partition -d test_add_partition\n","create index\n","milvus_cli > create index\n\nCollection name (car, car2): car2\n\nThe name of the field to create an index for (vector): vector\n\nIndex name: vectorIndex\n\n# Default is ''\nIndex type FLAT, IVF_FLAT, IVF_SQ8, IVF_PQ, RNSG, HNSW, ANNOY, AUTOINDEX, DISKANN, GPU_IVF_FLAT, GPU_IVF_PQ, SPARSE_INVERTED_INDEX, SPARSE_WAND, SCANN, STL_SORT, Trie, INVERTED, ) []: IVF_FLAT \n\n# Default is ''\nIndex metric type (L2, IP, HAMMING, TANIMOTO, COSINE, ) []: \n\nTimeout []:\n","delete user -u (text)\n","milvus_cli > delete user -u zilliz\n","delete alias -a (text) \n","delete collection -c (text) \n","milvus_cli > delete collection -c car\n","delete entities -c (text) -p (text) \n","milvus_cli > delete entities -c car\n\nThe expression to specify entities to be deleted, such as \"film_id in [ 0, 1 ]\": film_id in [ 0, 1 ]\n\nYou are trying to delete the entities of collection. This action cannot be undone!\n\nDo you want to continue? [y/N]: y\n","delete partition -c (text) -p (text)\n","milvus_cli > delete partition -c car -p new_partition\n","delete index -c (text) -in (text)\n","milvus_cli > delete index -c car -in indexName\n","show collection -c (text)\n","milvus_cli > show collection -c test_collection_insert\n","show partition -c (text) -p (text)\n","milvus_cli > show partition -c test_collection_insert -p _default\n","show index -c (text) -in (text)\n","milvus_cli > show index -c test_collection -in index_name\n","exit\n","help \n","import -c (text)[-p (text)] \n","milvus_cli > import -c car 'examples/import_csv/vectors.csv'\n\nReading csv file... [####################################] 100%\n\nColumn names are ['vector', 'color', 'brand']\n\nProcessed 50001 lines.\n\nInserting ...\n\nInsert successfully.\n-------------------------- ------------------\nTotal insert entities: 50000\nTotal collection entities: 150000\nMilvus timestamp: 428849214449254403\n-------------------------- ------------------\n","milvus_cli > import -c car 'https://raw.githubusercontent.com/milvus-\nio/milvus_cli/main/examples/import_csv/vectors.csv'\n\nReading file from remote URL.\n\nReading csv file... [####################################] 100%\n\nColumn names are ['vector', 'color', 'brand']\n\nProcessed 50001 lines.\n\nInserting ...\n\nInsert successfully.\n\n-------------------------- ------------------\nTotal insert entities: 50000\nTotal collection entities: 150000\nMilvus timestamp: 428849214449254403\n-------------------------- ------------------\n","list users\n","list collections\n","list indexes -c (text)\n","list partitions -c (text)\n","load -c (text) [-p (text)]\n","query\n","milvus_cli > query\n\nCollection name: car\n\nThe query expression: id in [ 428960801420883491, 428960801420883492,\n428960801420883493 ]\n\nName of partitions that contain entities(split by \",\" if multiple) []:\ndefault\n\nA list of fields to return(split by \",\" if multiple) []: color, brand\n\ntimeout []:\n\nGuarantee timestamp. This instructs Milvus to see all operations performed before a provided timestamp. If no such timestamp is provided, then Milvus will search all operations performed to date. [0]:\nGraceful time. Only used in bounded consistency level. If graceful_time is set, PyMilvus will use current timestamp minus the graceful_time as the guarantee_timestamp. This option is 5s by default if not set. [5]:\n","milvus_cli > query\n\nCollection name: car\n\nThe query expression: id > 428960801420883491\n\nName of partitions that contain entities(split by \",\" if multiple) []:\ndefault\n\nA list of fields to return(split by \",\" if multiple) []: id, color,\nbrand\n\ntimeout []:\n\nGuarantee timestamp. This instructs Milvus to see all operations performed before a provided timestamp. If no such timestamp is provided, then Milvus will search all operations performed to date. [0]:\nGraceful time. Only used in bounded consistency level. If graceful_time is set, PyMilvus will use current timestamp minus the graceful_time as the guarantee_timestamp. This option is 5s by default if not set. [5]:\n","release -c (text) [-p (text)]\n","search\n","milvus_cli > search\n\nCollection name (car, test_collection): car\n\nThe vectors of search data(the length of data is number of query (nq), the dim of every vector in data must be equal to vector field’s of collection. You can also import a csv file\nout headers): examples/import_csv/search_vectors.csv\n\nThe vector field used to search of collection (vector): vector\n\nSearch parameter nprobe's value: 10\n\nThe max number of returned record, also known as topk: 2\n\nThe boolean expression used to filter attribute []: id > 0\n\nThe names of partitions to search (split by \",\" if multiple) ['_default'] []: _default\n\ntimeout []:\n\nGuarantee Timestamp(It instructs Milvus to see all operations performed before a provided timestamp. If no such timestamp is provided, then Milvus will search all operations performed to date) [0]:\n\n","milvus_cli > search\n\nCollection name (car, test_collection): car\n\nThe vectors of search data(the length of data is number of query (nq), the dim of every vector in data must be equal to vector field’s of collection. You can also import a csv file without headers):\n [[0.71, 0.76, 0.17, 0.13, 0.42, 0.07, 0.15, 0.67, 0.58, 0.02, 0.39, 0.47, 0.58, 0.88, 0.73, 0.31, 0.23, 0.57, 0.33, 0.2, 0.03, 0.43, 0.78, 0.49, 0.17, 0.56, 0.76, 0.54, 0.45, 0.46, 0.05, 0.1, 0.43, 0.63, 0.29, 0.44, 0.65, 0.01, 0.35, 0.46, 0.66, 0.7, 0.88, 0.07, 0.49, 0.92, 0.57, 0.5, 0.16, 0.77, 0.98, 0.1, 0.44, 0.88, 0.82, 0.16, 0.67, 0.63, 0.57, 0.55, 0.95, 0.13, 0.64, 0.43, 0.71, 0.81, 0.43, 0.65, 0.76, 0.7, 0.05, 0.24, 0.03, 0.9, 0.46, 0.28, 0.92, 0.25, 0.97, 0.79, 0.73, 0.97, 0.49, 0.28, 0.64, 0.19, 0.23, 0.51, 0.09, 0.1, 0.53, 0.03, 0.23, 0.94, 0.87, 0.14, 0.42, 0.82, 0.91, 0.11, 0.91, 0.37, 0.26, 0.6, 0.89, 0.6, 0.32, 0.11, 0.98, 0.67, 0.12, 0.66, 0.47, 0.02, 0.15, 0.6, 0.64, 0.57, 0.14, 0.81, 0.75, 0.11, 0.49, 0.78, 0.16, 0.63, 0.57, 0.18]]\n\nThe vector field used to search of collection (vector): vector\n\nSearch parameter nprobe's value: 10\n\nThe specified number of decimal places of returned distance [-1]: 5\n\nThe max number of returned record, also known as topk: 2\n\nThe boolean expression used to filter attribute []: id > 0\n\nThe names of partitions to search (split by \",\" if multiple) ['_default'] []: _default\n\ntimeout []:\n\nGuarantee Timestamp(It instructs Milvus to see all operations performed before a provided timestamp. If no such timestamp is provided, then Milvus will search all operations performed to date) [0]:\n\n","milvus_cli > search\n\nCollection name (car, car2): car\n\nThe vectors of search data(the length of data is number of query (nq), the dim of every vector in data must be equal to vector field’s of collection. You can also import a csv file without headers): examples/import_csv/search_vectors.csv\n\nThe vector field used to search of collection (vector): vector\n\nThe specified number of decimal places of returned distance [-1]: 5\n\nThe max number of returned record, also known as topk: 2\n\nThe boolean expression used to filter attribute []:\n\nThe names of partitions to search (split by \",\" if multiple) ['_default'] []:\n\ntimeout []:\n\nGuarantee Timestamp(It instructs Milvus to see all operations performed before a provided timestamp. If no such timestamp is provided, then Milvus will search all operations performed to date) [0]:\n\n","list connections \n","show index_progress -c (text) [-i (text)]\n","show loading_progress -c (text) [-p (text)]\n","version\n","$ milvus_cli --version\nMilvus_CLI v0.4.0\n"],"headingContent":"Milvus_CLI Command Reference","anchorList":[{"label":"Milvus_CLI コマンドリファレンス","href":"MilvusCLI-Command-Reference","type":1,"isActive":false},{"label":"クリア","href":"clear","type":2,"isActive":false},{"label":"接続","href":"connect","type":2,"isActive":false},{"label":"データベースの作成","href":"create-Database","type":2,"isActive":false},{"label":"データベース使用","href":"use-Database","type":2,"isActive":false},{"label":"リスト データベース","href":"list-Databases","type":2,"isActive":false},{"label":"データベース削除","href":"delete-Database","type":2,"isActive":false},{"label":"ユーザ作成","href":"create-user","type":2,"isActive":false},{"label":"エイリアスの作成","href":"create-alias","type":2,"isActive":false},{"label":"コレクションの作成","href":"create-collection","type":2,"isActive":false},{"label":"パーティション作成","href":"create-partition","type":2,"isActive":false},{"label":"インデックスの作成","href":"create-index","type":2,"isActive":false},{"label":"delete user","href":"delete-user","type":2,"isActive":false},{"label":"エイリアス削除","href":"delete-alias","type":2,"isActive":false},{"label":"コレクション削除","href":"delete-collection","type":2,"isActive":false},{"label":"エンティティを削除する","href":"delete-entities","type":2,"isActive":false},{"label":"パーティション削除","href":"delete-partition","type":2,"isActive":false},{"label":"インデックスの削除","href":"delete-index","type":2,"isActive":false},{"label":"ショーコレクション","href":"show-collection","type":2,"isActive":false},{"label":"show partition","href":"show-partition","type":2,"isActive":false},{"label":"ショー・インデックス","href":"show-index","type":2,"isActive":false},{"label":"終了","href":"exit","type":2,"isActive":false},{"label":"ヘルプ","href":"help","type":2,"isActive":false},{"label":"インポート","href":"import","type":2,"isActive":false},{"label":"ユーザ一覧","href":"list-users","type":2,"isActive":false},{"label":"コレクションをリストする","href":"list-collections","type":2,"isActive":false},{"label":"list indexes","href":"list-indexes","type":2,"isActive":false},{"label":"list partitions","href":"list-partitions","type":2,"isActive":false},{"label":"ロード","href":"load","type":2,"isActive":false},{"label":"クエリ","href":"query","type":2,"isActive":false},{"label":"リリース","href":"release","type":2,"isActive":false},{"label":"検索","href":"search","type":2,"isActive":false},{"label":"リスト接続","href":"list-connection","type":2,"isActive":false},{"label":"show index_progress","href":"show-indexprogress","type":2,"isActive":false},{"label":"show loading_progress","href":"show-loadingprogress","type":2,"isActive":false},{"label":"バージョン","href":"version","type":2,"isActive":false}]} \ No newline at end of file diff --git a/localization/v2.4.x/site/ja/userGuide/tools/cli_commands.md b/localization/v2.4.x/site/ja/userGuide/tools/cli_commands.md index 9eaaab00a..e4b780e78 100644 --- a/localization/v2.4.x/site/ja/userGuide/tools/cli_commands.md +++ b/localization/v2.4.x/site/ja/userGuide/tools/cli_commands.md @@ -3,7 +3,6 @@ id: cli_commands.md summary: コマンドを使ってMilvusと対話する。 title: Milvus_CLI コマンドリファレンス --- -

      Milvus_CLI コマンドリファレンス

      Milvusでデータベースを作成する

      +

      Milvusでデータベースを作成する。

      構文

      create database -db (text) 
       
      @@ -110,7 +109,7 @@ title: Milvus_CLI コマンドリファレンス -ヘルプn/aコマンドの使用に関するヘルプを表示します。 -

      次の例では、milvusにデータベースtestdb を作成します。

      +

      次の例では、milvusにデータベースtestdb を作成しています。

      milvus_cli > create database -db testdb
       

      データベース使用

      Milvusでデータベースを使用する

      +

      milvusでデータベースを使用する。

      構文

      use database -db (text) 
       
      @@ -144,7 +143,7 @@ title: Milvus_CLI コマンドリファレンス

      以下の例では、milvusのデータベースtestdb

      milvus_cli > use database -db testdb
       
      -

      データベースのリスト

      Milvusのデータベースをリストする

      +

      milvusのデータベースをリストアップする。

      構文

      list databases 
       
      -

      例 1

      以下の例では、milvusのデータベースをリストアップしています。

      +

      例 1

      次の例はmilvusのデータベースをリストアップします。

      milvus_cli > list databases
       
      -

      データベースの削除

      Milvusにおけるデータベースの削除

      +

      milvusのデータベースを削除する。

      構文

      delete database -db (text) 
       
      @@ -194,7 +193,7 @@ title: Milvus_CLI コマンドリファレンス -ヘルプn/aコマンドの使用に関するヘルプを表示します。 -

      次の例では、milvusのデータベースtestdb を削除します。

      +

      次の例では、milvusのデータベースtestdb を削除しています。

      milvus_cli > delete database -db testdb
       

      ユーザ作成

      Milvusにユーザを作成する。

      +

      milvusにユーザを作成する。

      構文

      create user -u (text) -p (text)
       
      @@ -221,12 +220,12 @@ title: Milvus_CLI コマンドリファレンス オプションフルネーム説明 --p-パスワードmilvus でのユーザー・パスワード。デフォルトは "None"。 --u-ユーザー名milvus のユーザー名。デフォルトは "None" です。 +-p-パスワードmilvusのユーザーパスワード。デフォルトは "None"。 +-u-ユーザー名milvusでのユーザー名。デフォルトは "None" です。 -ヘルプn/aコマンドの使用に関するヘルプを表示します。 -

      次の例では、milvusにユーザーzilliz とパスワードzilliz を作成します。

      +

      次の例では、milvusにユーザーzilliz 、パスワードzilliz を作成します。

      milvus_cli > create user -u zilliz -p zilliz
       

      エイリアスの作成

    -

    パーティション作成

    -

    delete user

    -

    パーティション削除

    -

    例 2

    次の例は、リモートの CSV ファイルをインポートします。

    milvus_cli > import -c car 'https://raw.githubusercontent.com/milvus-
    @@ -823,7 +814,7 @@ io/milvus_cli/main/examples/import_csv/vectors.csv'
     
     Reading file from remote URL.
     
    -Reading csv file... [####################################] 100%
    +Reading csv file...  [####################################]  100%
     
     Column names are ['vector', 'color', 'brand']
     
    @@ -833,16 +824,12 @@ Inserting ...
     
     Insert successfully.
     
    ----
    -
    -Total insert entities: 50000
    -Total collection entities: 150000
    -Milvus timestamp: 428849214449254403
    -
    ----
    -
    +--------------------------  ------------------
    +Total insert entities:                   50000
    +Total collection entities:              150000
    +Milvus timestamp:           428849214449254403
    +--------------------------  ------------------
     
    -

    ユーザ一覧

    コレクションのすべてのパーティションを一覧表示します。

    +

    コレクションのすべてのパーティションをリストする。

    構文

    list partitions -c (text)
     
    @@ -1027,7 +1014,6 @@ timeout []: Guarantee timestamp. This instructs Milvus to see all operations performed before a provided timestamp. If no such timestamp is provided, then Milvus will search all operations performed to date. [0]: Graceful time. Only used in bounded consistency level. If graceful_time is set, PyMilvus will use current timestamp minus the graceful_time as the guarantee_timestamp. This option is 5s by default if not set. [5]:
    -

    例 2

    クエリを実行し、必要な入力を求めるプロンプトを表示する:

    milvus_cli > query
    @@ -1047,7 +1033,6 @@ timeout []:
     Guarantee timestamp. This instructs Milvus to see all operations performed before a provided timestamp. If no such timestamp is provided, then Milvus will search all operations performed to date. [0]:
     Graceful time. Only used in bounded consistency level. If graceful_time is set, PyMilvus will use current timestamp minus the graceful_time as the guarantee_timestamp. This option is 5s by default if not set. [5]:
     
    -

    リリース

    -

    インデックス付きコレクションで検索を実行し、必要な入力を求めるには:

    milvus_cli > search
    @@ -1139,7 +1123,7 @@ Guarantee Timestamp(It
     Collection name (car, test_collection): car
     
     The vectors of search data(the length of data is number of query (nq), the dim of every vector in data must be equal to vector field’s of collection. You can also import a csv file without headers):
    -[[0.71, 0.76, 0.17, 0.13, 0.42, 0.07, 0.15, 0.67, 0.58, 0.02, 0.39, 0.47, 0.58, 0.88, 0.73, 0.31, 0.23, 0.57, 0.33, 0.2, 0.03, 0.43, 0.78, 0.49, 0.17, 0.56, 0.76, 0.54, 0.45, 0.46, 0.05, 0.1, 0.43, 0.63, 0.29, 0.44, 0.65, 0.01, 0.35, 0.46, 0.66, 0.7, 0.88, 0.07, 0.49, 0.92, 0.57, 0.5, 0.16, 0.77, 0.98, 0.1, 0.44, 0.88, 0.82, 0.16, 0.67, 0.63, 0.57, 0.55, 0.95, 0.13, 0.64, 0.43, 0.71, 0.81, 0.43, 0.65, 0.76, 0.7, 0.05, 0.24, 0.03, 0.9, 0.46, 0.28, 0.92, 0.25, 0.97, 0.79, 0.73, 0.97, 0.49, 0.28, 0.64, 0.19, 0.23, 0.51, 0.09, 0.1, 0.53, 0.03, 0.23, 0.94, 0.87, 0.14, 0.42, 0.82, 0.91, 0.11, 0.91, 0.37, 0.26, 0.6, 0.89, 0.6, 0.32, 0.11, 0.98, 0.67, 0.12, 0.66, 0.47, 0.02, 0.15, 0.6, 0.64, 0.57, 0.14, 0.81, 0.75, 0.11, 0.49, 0.78, 0.16, 0.63, 0.57, 0.18]]
    +    [[0.71, 0.76, 0.17, 0.13, 0.42, 0.07, 0.15, 0.67, 0.58, 0.02, 0.39, 0.47, 0.58, 0.88, 0.73, 0.31, 0.23, 0.57, 0.33, 0.2, 0.03, 0.43, 0.78, 0.49, 0.17, 0.56, 0.76, 0.54, 0.45, 0.46, 0.05, 0.1, 0.43, 0.63, 0.29, 0.44, 0.65, 0.01, 0.35, 0.46, 0.66, 0.7, 0.88, 0.07, 0.49, 0.92, 0.57, 0.5, 0.16, 0.77, 0.98, 0.1, 0.44, 0.88, 0.82, 0.16, 0.67, 0.63, 0.57, 0.55, 0.95, 0.13, 0.64, 0.43, 0.71, 0.81, 0.43, 0.65, 0.76, 0.7, 0.05, 0.24, 0.03, 0.9, 0.46, 0.28, 0.92, 0.25, 0.97, 0.79, 0.73, 0.97, 0.49, 0.28, 0.64, 0.19, 0.23, 0.51, 0.09, 0.1, 0.53, 0.03, 0.23, 0.94, 0.87, 0.14, 0.42, 0.82, 0.91, 0.11, 0.91, 0.37, 0.26, 0.6, 0.89, 0.6, 0.32, 0.11, 0.98, 0.67, 0.12, 0.66, 0.47, 0.02, 0.15, 0.6, 0.64, 0.57, 0.14, 0.81, 0.75, 0.11, 0.49, 0.78, 0.16, 0.63, 0.57, 0.18]]
     
     The vector field used to search of collection (vector): vector
     
    @@ -1151,16 +1135,15 @@ The max number of returned record, also known
     
     The boolean expression used to filter attribute []: id > 0
     
    -The names of partitions to search (split by "," if multiple) ['_default'] []: \_default
    +The names of partitions to search (split by "," if multiple) ['_default'] []: _default
     
     timeout []:
     
     Guarantee Timestamp(It instructs Milvus to see all operations performed before a provided timestamp. If no such timestamp is provided, then Milvus will search all operations performed to date) [0]:
     
     
    - -

    -

    インデックス化されていないコレクションで検索を実行し、必要な入力を求めるプロンプトが表示される場合:

    +

    +

    索引付けされていないコレクションで検索を実行し、必要な入力を求めるには:

    milvus_cli > search
     
     Collection name (car, car2): car
    @@ -1182,8 +1165,7 @@ timeout []:
     Guarantee Timestamp(It instructs Milvus to see all operations performed before a provided timestamp. If no such timestamp is provided, then Milvus will search all operations performed to date) [0]:
     
     
    - -

    接続のリスト

    エンティティのロードの進行状況を表示します。

    +

    コレクションのロードの進行状況を表示します。

    構文

    show loading_progress -c (text) [-p (text)]
     
    diff --git a/localization/v2.4.x/site/ja/userGuide/tools/milvus_backup_overview.json b/localization/v2.4.x/site/ja/userGuide/tools/milvus_backup_overview.json index f79e1e673..a9c1aa606 100644 --- a/localization/v2.4.x/site/ja/userGuide/tools/milvus_backup_overview.json +++ b/localization/v2.4.x/site/ja/userGuide/tools/milvus_backup_overview.json @@ -1 +1 @@ -{"codeList":[],"headingContent":"","anchorList":[{"label":"Milvusバックアップ","href":"Milvus-Backup","type":1,"isActive":false},{"label":"前提条件","href":"Prerequisites","type":2,"isActive":false},{"label":"アーキテクチャ","href":"Architecture","type":2,"isActive":false},{"label":"最新リリース","href":"Latest-release","type":2,"isActive":false}]} \ No newline at end of file +{"codeList":[],"headingContent":"Milvus Backup","anchorList":[{"label":"Milvusバックアップ","href":"Milvus-Backup","type":1,"isActive":false},{"label":"前提条件","href":"Prerequisites","type":2,"isActive":false},{"label":"アーキテクチャ","href":"Architecture","type":2,"isActive":false},{"label":"最新リリース","href":"Latest-release","type":2,"isActive":false}]} \ No newline at end of file diff --git a/localization/v2.4.x/site/ja/userGuide/tools/milvus_backup_overview.md b/localization/v2.4.x/site/ja/userGuide/tools/milvus_backup_overview.md index 375524d21..544246734 100644 --- a/localization/v2.4.x/site/ja/userGuide/tools/milvus_backup_overview.md +++ b/localization/v2.4.x/site/ja/userGuide/tools/milvus_backup_overview.md @@ -1,9 +1,8 @@ --- id: milvus_backup_overview.md -summary: Milvus-Backupは、Milvusのデータのバックアップと復元を可能にするツールです。 -title: Milvusのバックアップ +summary: Milvus-BackupはMilvusデータのバックアップとリストアを可能にするツールです。 +title: Milvusバックアップ --- -

    Milvusバックアップ

    diff --git a/localization/v2.4.x/site/ja/userGuide/use-partition-key.json b/localization/v2.4.x/site/ja/userGuide/use-partition-key.json index 2d2156bce..dcd63a24d 100644 --- a/localization/v2.4.x/site/ja/userGuide/use-partition-key.json +++ b/localization/v2.4.x/site/ja/userGuide/use-partition-key.json @@ -1 +1 @@ -{"codeList":["import random, time\nfrom pymilvus import connections, MilvusClient, DataType\n\nSERVER_ADDR = \"http://localhost:19530\"\n\n# 1. Set up a Milvus client\nclient = MilvusClient(\n uri=SERVER_ADDR\n)\n\n# 2. Create a collection\nschema = MilvusClient.create_schema(\n auto_id=False,\n enable_dynamic_field=True,\n partition_key_field=\"color\",\n num_partitions=16 # Number of partitions. Defaults to 16.\n)\n\nschema.add_field(field_name=\"id\", datatype=DataType.INT64, is_primary=True)\nschema.add_field(field_name=\"vector\", datatype=DataType.FLOAT_VECTOR, dim=5)\nschema.add_field(field_name=\"color\", datatype=DataType.VARCHAR, max_length=512)\n","import io.milvus.v2.client.ConnectConfig;\nimport io.milvus.v2.client.MilvusClientV2;\nimport io.milvus.v2.common.DataType;\nimport io.milvus.v2.common.IndexParam;\nimport io.milvus.v2.service.collection.request.AddFieldReq;\nimport io.milvus.v2.service.collection.request.CreateCollectionReq;\n\nString CLUSTER_ENDPOINT = \"http://localhost:19530\";\n\n// 1. Connect to Milvus server\nConnectConfig connectConfig = ConnectConfig.builder()\n .uri(CLUSTER_ENDPOINT)\n .build();\n\nMilvusClientV2 client = new MilvusClientV2(connectConfig);\n\n// 2. Create a collection in customized setup mode\n\n// 2.1 Create schema\nCreateCollectionReq.CollectionSchema schema = client.createSchema();\n\n// 2.2 Add fields to schema\nschema.addField(AddFieldReq.builder()\n .fieldName(\"id\")\n .dataType(DataType.Int64)\n .isPrimaryKey(true)\n .autoID(false)\n .build());\n\nschema.addField(AddFieldReq.builder()\n .fieldName(\"vector\")\n .dataType(DataType.FloatVector)\n .dimension(5)\n .build());\n \nschema.addField(AddFieldReq.builder()\n .fieldName(\"color\")\n .dataType(DataType.VarChar)\n .maxLength(512)\n .isPartitionKey(true)\n .build());\n","const { MilvusClient, DataType, sleep } = require(\"@zilliz/milvus2-sdk-node\")\n\nconst address = \"http://localhost:19530\"\n\nasync function main() {\n// 1. Set up a Milvus Client\nclient = new MilvusClient({address}); \n\n// 2. Create a collection\n// 2.1 Define fields\nconst fields = [\n {\n name: \"id\",\n data_type: DataType.Int64,\n is_primary_key: true,\n auto_id: false\n },\n {\n name: \"vector\",\n data_type: DataType.FloatVector,\n dim: 5\n },\n {\n name: \"color\",\n data_type: DataType.VarChar,\n max_length: 512,\n is_partition_key: true\n }\n]\n","index_params = MilvusClient.prepare_index_params()\n\nindex_params.add_index(\n field_name=\"id\",\n index_type=\"STL_SORT\"\n)\n\nindex_params.add_index(\n field_name=\"color\",\n index_type=\"Trie\"\n)\n\nindex_params.add_index(\n field_name=\"vector\",\n index_type=\"IVF_FLAT\",\n metric_type=\"L2\",\n params={\"nlist\": 1024}\n)\n","// 2.3 Prepare index parameters\nIndexParam indexParamForVectorField = IndexParam.builder()\n .fieldName(\"vector\")\n .indexType(IndexParam.IndexType.IVF_FLAT)\n .metricType(IndexParam.MetricType.IP)\n .extraParams(Map.of(\"nlist\", 1024))\n .build();\n\nList indexParams = new ArrayList<>();\nindexParams.add(indexParamForVectorField);\n","// 2.2 Prepare index parameters\nconst index_params = [{\n field_name: \"color\",\n index_type: \"Trie\"\n},{\n field_name: \"id\",\n index_type: \"STL_SORT\"\n},{\n field_name: \"vector\",\n index_type: \"IVF_FLAT\",\n metric_type: \"IP\",\n params: { nlist: 1024}\n}]\n","client.create_collection(\n collection_name=\"test_collection\",\n schema=schema,\n index_params=index_params\n)\n","// 2.4 Create a collection with schema and index parameters\nCreateCollectionReq customizedSetupReq = CreateCollectionReq.builder()\n .collectionName(\"test_collection\")\n .collectionSchema(schema)\n .indexParams(indexParams) \n .build();\n\nclient.createCollection(customizedSetupReq);\n","// 2.3 Create a collection with fields and index parameters\nres = await client.createCollection({\n collection_name: \"test_collection\",\n fields: fields, \n index_params: index_params,\n})\n\nconsole.log(res.error_code)\n\n// Output\n// \n// Success\n//\n","# 3. Insert randomly generated vectors \ncolors = [\"green\", \"blue\", \"yellow\", \"red\", \"black\", \"white\", \"purple\", \"pink\", \"orange\", \"brown\", \"grey\"]\ndata = []\n\nfor i in range(1000):\n current_color = random.choice(colors)\n current_tag = random.randint(1000, 9999)\n data.append({\n \"id\": i,\n \"vector\": [ random.uniform(-1, 1) for _ in range(5) ],\n \"color\": current_color,\n \"tag\": current_tag,\n \"color_tag\": f\"{current_color}_{str(current_tag)}\"\n })\n\nprint(data[0])\n","// 3. Insert randomly generated vectors\nList colors = Arrays.asList(\"green\", \"blue\", \"yellow\", \"red\", \"black\", \"white\", \"purple\", \"pink\", \"orange\", \"brown\", \"grey\");\nList data = new ArrayList<>();\n\nfor (int i=0; i<1000; i++) {\n Random rand = new Random();\n String current_color = colors.get(rand.nextInt(colors.size()-1));\n int current_tag = rand.nextInt(8999) + 1000;\n JSONObject row = new JSONObject();\n row.put(\"id\", Long.valueOf(i));\n row.put(\"vector\", Arrays.asList(rand.nextFloat(), rand.nextFloat(), rand.nextFloat(), rand.nextFloat(), rand.nextFloat()));\n row.put(\"color\", current_color);\n row.put(\"tag\", current_tag);\n row.put(\"color_tag\", current_color + \"_\" + String.valueOf(rand.nextInt(8999) + 1000));\n data.add(row);\n}\n\nSystem.out.println(JSONObject.toJSON(data.get(0))); \n","// 3. Insert randomly generated vectors \nconst colors = [\"green\", \"blue\", \"yellow\", \"red\", \"black\", \"white\", \"purple\", \"pink\", \"orange\", \"brown\", \"grey\"]\nvar data = []\n\nfor (let i = 0; i < 1000; i++) {\n const current_color = colors[Math.floor(Math.random() * colors.length)]\n const current_tag = Math.floor(Math.random() * 8999 + 1000)\n data.push({\n id: i,\n vector: [Math.random(), Math.random(), Math.random(), Math.random(), Math.random()],\n color: current_color,\n tag: current_tag,\n color_tag: `${current_color}_${current_tag}`\n })\n}\n\nconsole.log(data[0])\n","{\n id: 0,\n vector: [\n 0.1275656405044483,\n 0.47417858592773277,\n 0.13858264437643286,\n 0.2390904907020377,\n 0.8447862593689635\n ],\n color: 'blue',\n tag: 2064,\n color_tag: 'blue_2064'\n}\n","res = client.insert(\n collection_name=\"test_collection\",\n data=data\n)\n\nprint(res)\n\n# Output\n#\n# {\n# \"insert_count\": 1000,\n# \"ids\": [\n# 0,\n# 1,\n# 2,\n# 3,\n# 4,\n# 5,\n# 6,\n# 7,\n# 8,\n# 9,\n# \"(990 more items hidden)\"\n# ]\n# }\n","// 3.1 Insert data into the collection\nInsertReq insertReq = InsertReq.builder()\n .collectionName(\"test_collection\")\n .data(data)\n .build();\n\nInsertResp insertResp = client.insert(insertReq);\n\nSystem.out.println(JSONObject.toJSON(insertResp));\n\n// Output:\n// {\"insertCnt\": 1000}\n","res = await client.insert({\n collection_name: \"test_collection\",\n data: data,\n})\n\nconsole.log(res.insert_cnt)\n\n// Output\n// \n// 1000\n// \n","# 4. Search with partition key\nquery_vectors = [[0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592]]\n\nres = client.search(\n collection_name=\"test_collection\",\n data=query_vectors,\n filter=\"color == 'green'\",\n search_params={\"metric_type\": \"L2\", \"params\": {\"nprobe\": 10}},\n output_fields=[\"id\", \"color_tag\"],\n limit=3\n)\n\nprint(res)\n\n# Output\n#\n# [\n# [\n# {\n# \"id\": 970,\n# \"distance\": 0.5770174264907837,\n# \"entity\": {\n# \"id\": 970,\n# \"color_tag\": \"green_9828\"\n# }\n# },\n# {\n# \"id\": 115,\n# \"distance\": 0.6898155808448792,\n# \"entity\": {\n# \"id\": 115,\n# \"color_tag\": \"green_4073\"\n# }\n# },\n# {\n# \"id\": 899,\n# \"distance\": 0.7028976678848267,\n# \"entity\": {\n# \"id\": 899,\n# \"color_tag\": \"green_9897\"\n# }\n# }\n# ]\n# ]\n","// 4. Search with partition key\nList> query_vectors = Arrays.asList(Arrays.asList(0.3580376395471989f, -0.6023495712049978f, 0.18414012509913835f, -0.26286205330961354f, 0.9029438446296592f));\n\nSearchReq searchReq = SearchReq.builder()\n .collectionName(\"test_collection\")\n .data(query_vectors)\n .filter(\"color == \\\"green\\\"\")\n .topK(3)\n .build();\n\nSearchResp searchResp = client.search(searchReq);\n\nSystem.out.println(JSONObject.toJSON(searchResp)); \n\n// Output:\n// {\"searchResults\": [[\n// {\n// \"distance\": 1.0586997,\n// \"id\": 414,\n// \"entity\": {}\n// },\n// {\n// \"distance\": 0.981384,\n// \"id\": 293,\n// \"entity\": {}\n// },\n// {\n// \"distance\": 0.9548756,\n// \"id\": 325,\n// \"entity\": {}\n// }\n// ]]}\n","// 4. Search with partition key\nconst query_vectors = [0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592]\n\nres = await client.search({\n collection_name: \"test_collection\",\n data: query_vectors,\n filter: \"color == 'green'\",\n output_fields: [\"color_tag\"],\n limit: 3\n})\n\nconsole.log(res.results)\n\n// Output\n// \n// [\n// { score: 2.402090549468994, id: '135', color_tag: 'green_2694' },\n// { score: 2.3938629627227783, id: '326', color_tag: 'green_7104' },\n// { score: 2.3235254287719727, id: '801', color_tag: 'green_3162' }\n// ]\n// \n"],"headingContent":"","anchorList":[{"label":"パーティション・キーの使用","href":"Use-Partition-Key","type":1,"isActive":false},{"label":"概要","href":"Overview","type":2,"isActive":false},{"label":"パーティション・キーの有効化","href":"Enable-partition-key","type":2,"isActive":false},{"label":"パーティションのリスト","href":"List-partitions","type":2,"isActive":false},{"label":"データの挿入","href":"Insert-data","type":2,"isActive":false},{"label":"パーティション・キーの使用","href":"Use-partition-key","type":2,"isActive":false},{"label":"典型的な使用例","href":"Typical-use-cases","type":2,"isActive":false}]} \ No newline at end of file +{"codeList":["import random, time\nfrom pymilvus import connections, MilvusClient, DataType\n\nSERVER_ADDR = \"http://localhost:19530\"\n\n# 1. Set up a Milvus client\nclient = MilvusClient(\n uri=SERVER_ADDR\n)\n\n# 2. Create a collection\nschema = MilvusClient.create_schema(\n auto_id=False,\n enable_dynamic_field=True,\n partition_key_field=\"color\",\n num_partitions=64 # Number of partitions. Defaults to 64.\n)\n\nschema.add_field(field_name=\"id\", datatype=DataType.INT64, is_primary=True)\nschema.add_field(field_name=\"vector\", datatype=DataType.FLOAT_VECTOR, dim=5)\nschema.add_field(field_name=\"color\", datatype=DataType.VARCHAR, max_length=512)\n","import io.milvus.v2.client.ConnectConfig;\nimport io.milvus.v2.client.MilvusClientV2;\nimport io.milvus.v2.common.DataType;\nimport io.milvus.v2.common.IndexParam;\nimport io.milvus.v2.service.collection.request.AddFieldReq;\nimport io.milvus.v2.service.collection.request.CreateCollectionReq;\n\nString CLUSTER_ENDPOINT = \"http://localhost:19530\";\n\n// 1. Connect to Milvus server\nConnectConfig connectConfig = ConnectConfig.builder()\n .uri(CLUSTER_ENDPOINT)\n .build();\n\nMilvusClientV2 client = new MilvusClientV2(connectConfig);\n\n// 2. Create a collection in customized setup mode\n\n// 2.1 Create schema\nCreateCollectionReq.CollectionSchema schema = client.createSchema();\n\n// 2.2 Add fields to schema\nschema.addField(AddFieldReq.builder()\n .fieldName(\"id\")\n .dataType(DataType.Int64)\n .isPrimaryKey(true)\n .autoID(false)\n .build());\n\nschema.addField(AddFieldReq.builder()\n .fieldName(\"vector\")\n .dataType(DataType.FloatVector)\n .dimension(5)\n .build());\n \nschema.addField(AddFieldReq.builder()\n .fieldName(\"color\")\n .dataType(DataType.VarChar)\n .maxLength(512)\n .isPartitionKey(true)\n .build());\n","const { MilvusClient, DataType, sleep } = require(\"@zilliz/milvus2-sdk-node\")\n\nconst address = \"http://localhost:19530\"\n\nasync function main() {\n// 1. Set up a Milvus Client\nclient = new MilvusClient({address}); \n\n// 2. Create a collection\n// 2.1 Define fields\nconst fields = [\n {\n name: \"id\",\n data_type: DataType.Int64,\n is_primary_key: true,\n auto_id: false\n },\n {\n name: \"vector\",\n data_type: DataType.FloatVector,\n dim: 5\n },\n {\n name: \"color\",\n data_type: DataType.VarChar,\n max_length: 512,\n is_partition_key: true\n }\n]\n","index_params = MilvusClient.prepare_index_params()\n\nindex_params.add_index(\n field_name=\"id\",\n index_type=\"STL_SORT\"\n)\n\nindex_params.add_index(\n field_name=\"color\",\n index_type=\"Trie\"\n)\n\nindex_params.add_index(\n field_name=\"vector\",\n index_type=\"IVF_FLAT\",\n metric_type=\"L2\",\n params={\"nlist\": 1024}\n)\n","// 2.3 Prepare index parameters\nIndexParam indexParamForVectorField = IndexParam.builder()\n .fieldName(\"vector\")\n .indexType(IndexParam.IndexType.IVF_FLAT)\n .metricType(IndexParam.MetricType.IP)\n .extraParams(Map.of(\"nlist\", 1024))\n .build();\n\nList indexParams = new ArrayList<>();\nindexParams.add(indexParamForVectorField);\n","// 2.2 Prepare index parameters\nconst index_params = [{\n field_name: \"color\",\n index_type: \"Trie\"\n},{\n field_name: \"id\",\n index_type: \"STL_SORT\"\n},{\n field_name: \"vector\",\n index_type: \"IVF_FLAT\",\n metric_type: \"IP\",\n params: { nlist: 1024}\n}]\n","client.create_collection(\n collection_name=\"test_collection\",\n schema=schema,\n index_params=index_params\n)\n","// 2.4 Create a collection with schema and index parameters\nCreateCollectionReq customizedSetupReq = CreateCollectionReq.builder()\n .collectionName(\"test_collection\")\n .collectionSchema(schema)\n .indexParams(indexParams) \n .build();\n\nclient.createCollection(customizedSetupReq);\n","// 2.3 Create a collection with fields and index parameters\nres = await client.createCollection({\n collection_name: \"test_collection\",\n fields: fields, \n index_params: index_params,\n})\n\nconsole.log(res.error_code)\n\n// Output\n// \n// Success\n//\n","# 3. Insert randomly generated vectors \ncolors = [\"green\", \"blue\", \"yellow\", \"red\", \"black\", \"white\", \"purple\", \"pink\", \"orange\", \"brown\", \"grey\"]\ndata = []\n\nfor i in range(1000):\n current_color = random.choice(colors)\n current_tag = random.randint(1000, 9999)\n data.append({\n \"id\": i,\n \"vector\": [ random.uniform(-1, 1) for _ in range(5) ],\n \"color\": current_color,\n \"tag\": current_tag,\n \"color_tag\": f\"{current_color}_{str(current_tag)}\"\n })\n\nprint(data[0])\n","// 3. Insert randomly generated vectors\nList colors = Arrays.asList(\"green\", \"blue\", \"yellow\", \"red\", \"black\", \"white\", \"purple\", \"pink\", \"orange\", \"brown\", \"grey\");\nList data = new ArrayList<>();\n\nfor (int i=0; i<1000; i++) {\n Random rand = new Random();\n String current_color = colors.get(rand.nextInt(colors.size()-1));\n int current_tag = rand.nextInt(8999) + 1000;\n JSONObject row = new JSONObject();\n row.put(\"id\", Long.valueOf(i));\n row.put(\"vector\", Arrays.asList(rand.nextFloat(), rand.nextFloat(), rand.nextFloat(), rand.nextFloat(), rand.nextFloat()));\n row.put(\"color\", current_color);\n row.put(\"tag\", current_tag);\n row.put(\"color_tag\", current_color + \"_\" + String.valueOf(rand.nextInt(8999) + 1000));\n data.add(row);\n}\n\nSystem.out.println(JSONObject.toJSON(data.get(0))); \n","// 3. Insert randomly generated vectors \nconst colors = [\"green\", \"blue\", \"yellow\", \"red\", \"black\", \"white\", \"purple\", \"pink\", \"orange\", \"brown\", \"grey\"]\nvar data = []\n\nfor (let i = 0; i < 1000; i++) {\n const current_color = colors[Math.floor(Math.random() * colors.length)]\n const current_tag = Math.floor(Math.random() * 8999 + 1000)\n data.push({\n id: i,\n vector: [Math.random(), Math.random(), Math.random(), Math.random(), Math.random()],\n color: current_color,\n tag: current_tag,\n color_tag: `${current_color}_${current_tag}`\n })\n}\n\nconsole.log(data[0])\n","{\n id: 0,\n vector: [\n 0.1275656405044483,\n 0.47417858592773277,\n 0.13858264437643286,\n 0.2390904907020377,\n 0.8447862593689635\n ],\n color: 'blue',\n tag: 2064,\n color_tag: 'blue_2064'\n}\n","res = client.insert(\n collection_name=\"test_collection\",\n data=data\n)\n\nprint(res)\n\n# Output\n#\n# {\n# \"insert_count\": 1000,\n# \"ids\": [\n# 0,\n# 1,\n# 2,\n# 3,\n# 4,\n# 5,\n# 6,\n# 7,\n# 8,\n# 9,\n# \"(990 more items hidden)\"\n# ]\n# }\n","// 3.1 Insert data into the collection\nInsertReq insertReq = InsertReq.builder()\n .collectionName(\"test_collection\")\n .data(data)\n .build();\n\nInsertResp insertResp = client.insert(insertReq);\n\nSystem.out.println(JSONObject.toJSON(insertResp));\n\n// Output:\n// {\"insertCnt\": 1000}\n","res = await client.insert({\n collection_name: \"test_collection\",\n data: data,\n})\n\nconsole.log(res.insert_cnt)\n\n// Output\n// \n// 1000\n// \n","# 4. Search with partition key\nquery_vectors = [[0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592]]\n\nres = client.search(\n collection_name=\"test_collection\",\n data=query_vectors,\n filter=\"color == 'green'\",\n search_params={\"metric_type\": \"L2\", \"params\": {\"nprobe\": 10}},\n output_fields=[\"id\", \"color_tag\"],\n limit=3\n)\n\nprint(res)\n\n# Output\n#\n# [\n# [\n# {\n# \"id\": 970,\n# \"distance\": 0.5770174264907837,\n# \"entity\": {\n# \"id\": 970,\n# \"color_tag\": \"green_9828\"\n# }\n# },\n# {\n# \"id\": 115,\n# \"distance\": 0.6898155808448792,\n# \"entity\": {\n# \"id\": 115,\n# \"color_tag\": \"green_4073\"\n# }\n# },\n# {\n# \"id\": 899,\n# \"distance\": 0.7028976678848267,\n# \"entity\": {\n# \"id\": 899,\n# \"color_tag\": \"green_9897\"\n# }\n# }\n# ]\n# ]\n","// 4. Search with partition key\nList> query_vectors = Arrays.asList(Arrays.asList(0.3580376395471989f, -0.6023495712049978f, 0.18414012509913835f, -0.26286205330961354f, 0.9029438446296592f));\n\nSearchReq searchReq = SearchReq.builder()\n .collectionName(\"test_collection\")\n .data(query_vectors)\n .filter(\"color == \\\"green\\\"\")\n .topK(3)\n .build();\n\nSearchResp searchResp = client.search(searchReq);\n\nSystem.out.println(JSONObject.toJSON(searchResp)); \n\n// Output:\n// {\"searchResults\": [[\n// {\n// \"distance\": 1.0586997,\n// \"id\": 414,\n// \"entity\": {}\n// },\n// {\n// \"distance\": 0.981384,\n// \"id\": 293,\n// \"entity\": {}\n// },\n// {\n// \"distance\": 0.9548756,\n// \"id\": 325,\n// \"entity\": {}\n// }\n// ]]}\n","// 4. Search with partition key\nconst query_vectors = [0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592]\n\nres = await client.search({\n collection_name: \"test_collection\",\n data: query_vectors,\n filter: \"color == 'green'\",\n output_fields: [\"color_tag\"],\n limit: 3\n})\n\nconsole.log(res.results)\n\n// Output\n// \n// [\n// { score: 2.402090549468994, id: '135', color_tag: 'green_2694' },\n// { score: 2.3938629627227783, id: '326', color_tag: 'green_7104' },\n// { score: 2.3235254287719727, id: '801', color_tag: 'green_3162' }\n// ]\n// \n"],"headingContent":"Use Partition Key","anchorList":[{"label":"パーティション・キーの使用","href":"Use-Partition-Key","type":1,"isActive":false},{"label":"概要","href":"Overview","type":2,"isActive":false},{"label":"パーティション・キーの有効化","href":"Enable-partition-key","type":2,"isActive":false},{"label":"パーティションのリスト","href":"List-partitions","type":2,"isActive":false},{"label":"データの挿入","href":"Insert-data","type":2,"isActive":false},{"label":"パーティション・キーの使用","href":"Use-partition-key","type":2,"isActive":false},{"label":"典型的な使用例","href":"Typical-use-cases","type":2,"isActive":false}]} \ No newline at end of file diff --git a/localization/v2.4.x/site/ja/userGuide/use-partition-key.md b/localization/v2.4.x/site/ja/userGuide/use-partition-key.md index 908777c5d..9e6c1332b 100644 --- a/localization/v2.4.x/site/ja/userGuide/use-partition-key.md +++ b/localization/v2.4.x/site/ja/userGuide/use-partition-key.md @@ -1,7 +1,6 @@ --- id: use-partition-key.md title: パーティション・キーの使用 -summary: '' ---

    パーティション・キーの使用

    コレクション内の特定のフィールドをパーティションキーとして設定すると、Milvusはこのフィールドのそれぞれのパーティション値に従って、受信エンティティを異なるパーティションに分配します。これにより、同じキー値を持つエンティティをパーティションにまとめることができ、キーフィールドによるフィルタリング時に無関係なパーティションをスキャンする必要がなくなるため、検索パフォーマンスが向上します。従来のフィルタリング方法と比較すると、パーティション・キーはクエリ・パフォーマンスを大幅に向上させることができます。

    +

    コレクション内の特定のフィールドをパーティションキーとして設定することで、Milvusは入力されたエンティティをこのフィールドのそれぞれのパーティション値に従って異なるパーティションに振り分けることができます。これにより、同じキー値を持つエンティティをパーティションにまとめることができ、キーフィールドによるフィルタリング時に無関係なパーティションをスキャンする必要がなくなるため、検索パフォーマンスが向上します。従来のフィルタリング方法と比較すると、パーティション・キーはクエリ・パフォーマンスを大幅に向上させることができます。

    パーティション・キーを使用してマルチテナンシーを実装できます。マルチ・テナンシーの詳細については、マルチ・テナンシーをお読みください。

    パーティション・キーの有効化

    フィールドをパーティション・キーとして設定するには、コレクション・スキーマを作成するときにpartition_key_field

    -

    以下のコード例では、num_partitions が作成されるパーティション数を決定します。デフォルトでは、16 に設定されています。 デフォルト値を保持することをお勧めします。

    +

    以下のコード例では、num_partitions が作成されるパーティション数を決定します。デフォルトでは、64 に設定されています。 デフォルト値を保持することをお勧めします。

    パラメータの詳細については MilvusClient, create_schema()および add_field()を参照してください。

    @@ -79,7 +78,7 @@ schema = MilvusClient.create_schema( auto_id=False, enable_dynamic_field=True, partition_key_field="color", - num_partitions=16 # Number of partitions. Defaults to 16. + num_partitions=64 # Number of partitions. Defaults to 64. ) schema.add_field(field_name="id", datatype=DataType.INT64, is_primary=True) @@ -345,7 +344,7 @@ data = []

    データを挿入するには insert()メソッドを使用して、データをコレクションに挿入します。

    -

    コレクションにデータを挿入するには insert()メソッドを使用して、データをコレクションに挿入します。

    +

    データをコレクションに挿入するには insert()メソッドを使用して、データをコレクションに挿入します。

    データをコレクションに挿入するには insert()メソッドを使用して、データをコレクションに挿入します。

    @@ -554,4 +553,4 @@ res = await client. -

    パーティション・キー機能を利用することで、より優れた検索パフォーマンスを実現し、マルチテナントを有効にすることができる。これは、テナント固有の値を各エンティティのパーティション・キー・フィールドとして割り当てることで実現できます。コレクションを検索またはクエリするとき、ブーリアン式にパーティション・キー・フィールドを含めることで、テナント固有の値によってエンティティをフィルタリングできます。このアプローチにより、テナントによるデータの分離が保証され、不要なパーティションのスキャンが回避されます。

    +

    パーティション・キー機能を利用することで、より優れた検索パフォーマンスを実現し、マルチテナントを有効にすることができる。これは、テナント固有の値を各エンティティのパーティション・キー・フィールドとして割り当てることで実現できます。コレクションを検索またはクエリするときに、ブーリアン式にパーティション・キー・フィールドを含めることで、テナント固有の値によってエンティティをフィルタリングできます。このアプローチにより、テナントによるデータの分離が保証され、不要なパーティションのスキャンが回避されます。

    diff --git a/localization/v2.4.x/site/ko/adminGuide/clouds/aws/s3.json b/localization/v2.4.x/site/ko/adminGuide/clouds/aws/s3.json index 2c3f0b69d..39f2273ea 100644 --- a/localization/v2.4.x/site/ko/adminGuide/clouds/aws/s3.json +++ b/localization/v2.4.x/site/ko/adminGuide/clouds/aws/s3.json @@ -1 +1 @@ -{"codeList":["milvus_bucket_name=\"milvus-bucket-$(openssl rand -hex 12)\"\n\naws s3api create-bucket --bucket \"$milvus_bucket_name\" --region 'us-east-2' --acl private --object-ownership ObjectWriter --create-bucket-configuration LocationConstraint='us-east-2'\n\n\n# Output\n#\n# \"Location\": \"http://milvus-bucket-039dd013c0712f085d60e21f.s3.amazonaws.com/\"\n","echo '{\n \"Version\": \"2012-10-17\",\n \"Statement\": [\n {\n \"Effect\": \"Allow\",\n \"Action\": [\n \"s3:GetObject\",\n \"s3:PutObject\",\n \"s3:ListBucket\",\n \"s3:DeleteObject\"\n ],\n \"Resource\": [\n \"arn:aws:s3:::\",\n \"arn:aws:s3:::/*\"\n ]\n }\n ]\n}' > milvus-s3-policy.json\n\naws iam create-policy --policy-name MilvusS3ReadWrite --policy-document file://milvus-s3-policy.json\n\n\n# Get the ARN from the command output as follows:\n# {\n# \"Policy\": {\n# \"PolicyName\": \"MilvusS3ReadWrite\",\n# \"PolicyId\": \"AN5QQVVPM1BVTFlBNkdZT\",\n# \"Arn\": \"arn:aws:iam::12345678901:policy/MilvusS3ReadWrite\",\n# \"Path\": \"/\",\n# \"DefaultVersionId\": \"v1\",\n# \"AttachmentCount\": 0,\n# \"PermissionsBoundaryUsageCount\": 0,\n# \"IsAttachable\": true,\n# \"CreateDate\": \"2023-11-16T06:00:01+00:00\",\n# \"UpdateDate\": \"2023-11-16T06:00:01+00:00\"\n# }\n# } \n","eksctl create iamserviceaccount --name milvus-s3-access-sa --namespace milvus --cluster milvus-eks-cluster --role-name milvus-s3-access-sa \\\n --attach-policy-arn arn:aws:iam:::policy/MilvusS3ReadWrite --approve\n","aws iam get-role --role-name milvus-s3-access-sa --query Role.AssumeRolePolicyDocument\n# An example output is as follows\n{\n \"Version\": \"2012-10-17\",\n \"Statement\": [\n {\n \"Effect\": \"Allow\",\n \"Principal\": {\n \"Federated\": \"arn:aws:iam::111122223333:oidc-provider/oidc.eks.region-code.amazonaws.com/id/EXAMPLED539D4633E53DE1B71EXAMPLE\"\n },\n \"Action\": \"sts:AssumeRoleWithWebIdentity\",\n \"Condition\": {\n \"StringEquals\": {\n \"oidc.eks.region-code.amazonaws.com/id/EXAMPLED539D4633E53DE1B71EXAMPLE:sub\": \"system:serviceaccount:default:my-service-account\",\n \"oidc.eks.region-code.amazonaws.com/id/EXAMPLED539D4633E53DE1B71EXAMPLE:aud\": \"sts.amazonaws.com\"\n }\n }\n }\n ]\n}\n","aws iam list-attached-role-policies --role-name milvus-s3-access-sa --query 'AttachedPolicies[].PolicyArn' --output text\n# An example output is as follows\narn:aws:iam::12345678901:policy/MilvusS3ReadWrite\n","export policy_arn='arn:aws:iam::12345678901:policy/MilvusS3ReadWrite'\naws iam get-policy --policy-arn $policy_arn\n# An example output is as follows\n{\n \"Policy\": {\n \"PolicyName\": \"MilvusS3ReadWrite\",\n \"PolicyId\": \"EXAMPLEBIOWGLDEXAMPLE\",\n \"Arn\": \"arn:aws:iam::12345678901:policy/MilvusS3ReadWrite\",\n \"Path\": \"/\",\n \"DefaultVersionId\": \"v2\",\n [...]\n }\n}\n","aws iam get-policy-version --policy-arn $policy_arn --version-id v2\n# An example output is as follows\n{\n \"PolicyVersion\": {\n \"Document\": {\n \"Version\": \"2012-10-17\",\n \"Statement\": [\n {\n \"Effect\": \"Allow\",\n \"Action\": [\n \"s3:GetObject\",\n \"s3:PutObject\",\n \"s3:ListBucket\",\n \"s3:DeleteObject\"\n ],\n \"Resource\": [\n \"arn:aws:s3:::\",\n \"arn:aws:s3:::/*\"\n ]\n }\n ]\n },\n [...]\n }\n}\n","kubectl describe serviceaccount milvus-s3-access-sa -n milvus\n# An example output is as follows\nName: milvus-s3-access-sa\nNamespace: milvus\nLabels: app.kubernetes.io/managed-by=eksctl\nAnnotations: eks.amazonaws.com/role-arn: arn:aws:iam::12345678901:role/milvus-s3-access-sa\n[...]\n","helm repo add milvus https://zilliztech.github.io/milvus-helm/\nhelm repo update\n","cluster:\n enabled: true\n\nservice:\n type: LoadBalancer\n port: 19530\n annotations: \n service.beta.kubernetes.io/aws-load-balancer-type: external\n service.beta.kubernetes.io/aws-load-balancer-name: milvus-service\n service.beta.kubernetes.io/aws-load-balancer-scheme: internet-facing\n service.beta.kubernetes.io/aws-load-balancer-nlb-target-type: ip\n\nserviceAccount:\n create: false\n name: milvus-s3-access-sa\n\nminio:\n enabled: false\n\nexternalS3:\n enabled: true\n host: \"s3.us-east-2.amazonaws.com\"\n port: \"443\"\n useSSL: true\n bucketName: \"\"\n useIAM: true\n cloudProvider: \"aws\"\n iamEndpoint: \"\"\n\nrootCoordinator:\n replicas: 2\n activeStandby:\n enabled: true\n resources: \n limits:\n cpu: 1\n memory: 2Gi\n\nindexCoordinator:\n replicas: 2\n activeStandby:\n enabled: true\n resources: \n limits:\n cpu: \"0.5\"\n memory: 0.5Gi\n\nqueryCoordinator:\n replicas: 2\n activeStandby:\n enabled: true\n resources: \n limits:\n cpu: \"0.5\"\n memory: 0.5Gi\n\ndataCoordinator:\n replicas: 2\n activeStandby:\n enabled: true\n resources: \n limits:\n cpu: \"0.5\"\n memory: 0.5Gi\n\nproxy:\n replicas: 2\n resources: \n limits:\n cpu: 1\n memory: 2Gi \n","helm upgrade --install milvus-demo milvus/milvus -n milvus -f milvus.yaml\n"],"headingContent":"","anchorList":[{"label":"IAM 역할별로 S3 액세스 구성하기","href":"Configure-S3-Access-by-IAM-Role","type":1,"isActive":false},{"label":"시작하기 전에","href":"Before-you-start","type":2,"isActive":false},{"label":"IAM 역할을 Kubernetes 서비스 계정에 연결하기","href":"Associate-an-IAM-role-with-a-Kubernetes-service-account","type":2,"isActive":false},{"label":"역할 및 서비스 계정 설정 확인","href":"Verify-the-role-and-service-account-setup","type":2,"isActive":false},{"label":"Milvus 배포","href":"Deploy-Milvus","type":2,"isActive":false},{"label":"설치 확인","href":"Verify-the-installation","type":2,"isActive":false}]} \ No newline at end of file +{"codeList":["milvus_bucket_name=\"milvus-bucket-$(openssl rand -hex 12)\"\n\naws s3api create-bucket --bucket \"$milvus_bucket_name\" --region 'us-east-2' --acl private --object-ownership ObjectWriter --create-bucket-configuration LocationConstraint='us-east-2'\n\n\n# Output\n#\n# \"Location\": \"http://milvus-bucket-039dd013c0712f085d60e21f.s3.amazonaws.com/\"\n","echo '{\n \"Version\": \"2012-10-17\",\n \"Statement\": [\n {\n \"Effect\": \"Allow\",\n \"Action\": [\n \"s3:ListBucket\"\n ],\n \"Resource\": [\n \"arn:aws:s3:::\"\n ]\n },\n {\n \"Effect\": \"Allow\",\n \"Action\": [\n \"s3:DeleteObject\",\n \"s3:GetObject\",\n \"s3:PutObject\"\n ],\n \"Resource\": [\n \"arn:aws:s3:::/*\"\n ]\n }\n ]\n}' > milvus-s3-policy.json\n\naws iam create-policy --policy-name MilvusS3ReadWrite --policy-document file://milvus-s3-policy.json\n\n\n# Get the ARN from the command output as follows:\n# {\n# \"Policy\": {\n# \"PolicyName\": \"MilvusS3ReadWrite\",\n# \"PolicyId\": \"AN5QQVVPM1BVTFlBNkdZT\",\n# \"Arn\": \"arn:aws:iam::12345678901:policy/MilvusS3ReadWrite\",\n# \"Path\": \"/\",\n# \"DefaultVersionId\": \"v1\",\n# \"AttachmentCount\": 0,\n# \"PermissionsBoundaryUsageCount\": 0,\n# \"IsAttachable\": true,\n# \"CreateDate\": \"2023-11-16T06:00:01+00:00\",\n# \"UpdateDate\": \"2023-11-16T06:00:01+00:00\"\n# }\n# } \n","eksctl create iamserviceaccount --name milvus-s3-access-sa --namespace milvus --cluster milvus-eks-cluster --role-name milvus-s3-access-sa \\\n --attach-policy-arn arn:aws:iam:::policy/MilvusS3ReadWrite --approve\n","aws iam get-role --role-name milvus-s3-access-sa --query Role.AssumeRolePolicyDocument\n# An example output is as follows\n{\n \"Version\": \"2012-10-17\",\n \"Statement\": [\n {\n \"Effect\": \"Allow\",\n \"Principal\": {\n \"Federated\": \"arn:aws:iam::111122223333:oidc-provider/oidc.eks.region-code.amazonaws.com/id/EXAMPLED539D4633E53DE1B71EXAMPLE\"\n },\n \"Action\": \"sts:AssumeRoleWithWebIdentity\",\n \"Condition\": {\n \"StringEquals\": {\n \"oidc.eks.region-code.amazonaws.com/id/EXAMPLED539D4633E53DE1B71EXAMPLE:sub\": \"system:serviceaccount:default:my-service-account\",\n \"oidc.eks.region-code.amazonaws.com/id/EXAMPLED539D4633E53DE1B71EXAMPLE:aud\": \"sts.amazonaws.com\"\n }\n }\n }\n ]\n}\n","aws iam list-attached-role-policies --role-name milvus-s3-access-sa --query 'AttachedPolicies[].PolicyArn' --output text\n# An example output is as follows\narn:aws:iam::12345678901:policy/MilvusS3ReadWrite\n","export policy_arn='arn:aws:iam::12345678901:policy/MilvusS3ReadWrite'\naws iam get-policy --policy-arn $policy_arn\n# An example output is as follows\n{\n \"Policy\": {\n \"PolicyName\": \"MilvusS3ReadWrite\",\n \"PolicyId\": \"EXAMPLEBIOWGLDEXAMPLE\",\n \"Arn\": \"arn:aws:iam::12345678901:policy/MilvusS3ReadWrite\",\n \"Path\": \"/\",\n \"DefaultVersionId\": \"v2\",\n [...]\n }\n}\n","aws iam get-policy-version --policy-arn $policy_arn --version-id v2\n# An example output is as follows\n{\n \"PolicyVersion\": {\n \"Document\": {\n \"Version\": \"2012-10-17\",\n \"Statement\": [\n {\n \"Effect\": \"Allow\",\n \"Action\": [\n \"s3:GetObject\",\n \"s3:PutObject\",\n \"s3:ListBucket\",\n \"s3:DeleteObject\"\n ],\n \"Resource\": [\n \"arn:aws:s3:::\",\n \"arn:aws:s3:::/*\"\n ]\n }\n ]\n },\n [...]\n }\n}\n","kubectl describe serviceaccount milvus-s3-access-sa -n milvus\n# An example output is as follows\nName: milvus-s3-access-sa\nNamespace: milvus\nLabels: app.kubernetes.io/managed-by=eksctl\nAnnotations: eks.amazonaws.com/role-arn: arn:aws:iam::12345678901:role/milvus-s3-access-sa\n[...]\n","helm repo add milvus https://zilliztech.github.io/milvus-helm/\nhelm repo update\n","cluster:\n enabled: true\n\nservice:\n type: LoadBalancer\n port: 19530\n annotations: \n service.beta.kubernetes.io/aws-load-balancer-type: external\n service.beta.kubernetes.io/aws-load-balancer-name: milvus-service\n service.beta.kubernetes.io/aws-load-balancer-scheme: internet-facing\n service.beta.kubernetes.io/aws-load-balancer-nlb-target-type: ip\n\nserviceAccount:\n create: false\n name: milvus-s3-access-sa\n\nminio:\n enabled: false\n\nexternalS3:\n enabled: true\n host: \"s3.us-east-2.amazonaws.com\"\n port: \"443\"\n useSSL: true\n bucketName: \"\"\n useIAM: true\n cloudProvider: \"aws\"\n iamEndpoint: \"\"\n\nrootCoordinator:\n replicas: 2\n activeStandby:\n enabled: true\n resources: \n limits:\n cpu: 1\n memory: 2Gi\n\nindexCoordinator:\n replicas: 2\n activeStandby:\n enabled: true\n resources: \n limits:\n cpu: \"0.5\"\n memory: 0.5Gi\n\nqueryCoordinator:\n replicas: 2\n activeStandby:\n enabled: true\n resources: \n limits:\n cpu: \"0.5\"\n memory: 0.5Gi\n\ndataCoordinator:\n replicas: 2\n activeStandby:\n enabled: true\n resources: \n limits:\n cpu: \"0.5\"\n memory: 0.5Gi\n\nproxy:\n replicas: 2\n resources: \n limits:\n cpu: 1\n memory: 2Gi \n","helm upgrade --install milvus-demo milvus/milvus -n milvus -f milvus.yaml\n"],"headingContent":"Configure S3 Access by IAM Role","anchorList":[{"label":"IAM 역할별로 S3 액세스 구성하기","href":"Configure-S3-Access-by-IAM-Role","type":1,"isActive":false},{"label":"시작하기 전에","href":"Before-you-start","type":2,"isActive":false},{"label":"IAM 역할을 Kubernetes 서비스 계정에 연결하기","href":"Associate-an-IAM-role-with-a-Kubernetes-service-account","type":2,"isActive":false},{"label":"역할 및 서비스 계정 설정 확인","href":"Verify-the-role-and-service-account-setup","type":2,"isActive":false},{"label":"Milvus 배포","href":"Deploy-Milvus","type":2,"isActive":false},{"label":"설치 확인","href":"Verify-the-installation","type":2,"isActive":false}]} \ No newline at end of file diff --git a/localization/v2.4.x/site/ko/adminGuide/clouds/aws/s3.md b/localization/v2.4.x/site/ko/adminGuide/clouds/aws/s3.md index c764618e8..39b0f05b5 100644 --- a/localization/v2.4.x/site/ko/adminGuide/clouds/aws/s3.md +++ b/localization/v2.4.x/site/ko/adminGuide/clouds/aws/s3.md @@ -1,6 +1,6 @@ --- id: s3.md -title: IAM 역할별 S3 액세스 구성 +title: IAM 역할별로 S3 액세스 구성하기 related_key: 's3, storage, iam' summary: IAM 역할로 s3를 구성하는 방법을 알아보세요. --- @@ -56,7 +56,7 @@ summary: IAM 역할로 s3를 구성하는 방법을 알아보세요.
    • AWS S3 버킷을 생성합니다.

      -

      버킷 이름 지정 규칙을 읽고 AWS S3 버킷의 이름을 지정할 때 이름 지정 규칙을 준수하세요.

      +

      버킷 이름 지정 규칙을 읽고 AWS S3 버킷의 이름을 지정할 때 이름 지정 규칙을 준수하세요.

      milvus_bucket_name="milvus-bucket-$(openssl rand -hex 12)"
       
       aws s3api create-bucket --bucket "$milvus_bucket_name" --region 'us-east-2' --acl private  --object-ownership ObjectWriter --create-bucket-configuration LocationConstraint='us-east-2'
      @@ -73,13 +73,20 @@ aws s3api create-bucket --bucket "
    • 파일을 적용합니다:

      kubectl apply -f milvus-operator-certificate.yaml
      diff --git a/localization/v2.4.x/site/ko/adminGuide/configure-docker.json b/localization/v2.4.x/site/ko/adminGuide/configure-docker.json
      index 5c73b3f91..125e4902c 100644
      --- a/localization/v2.4.x/site/ko/adminGuide/configure-docker.json
      +++ b/localization/v2.4.x/site/ko/adminGuide/configure-docker.json
      @@ -1 +1 @@
      -{"codeList":["$ wget https://raw.githubusercontent.com/milvus-io/milvus/v2.4.9/configs/milvus.yaml\n","# For Milvus standalone\n$ wget https://github.com/milvus-io/milvus/releases/download/v2.4.9/milvus-standalone-docker-compose.yml -O docker-compose.yml\n","...\n  standalone:\n    container_name: milvus-standalone\n    image: milvusdb/milvus:v2.2.13\n    command: [\"milvus\", \"run\", \"standalone\"]\n    environment:\n      ETCD_ENDPOINTS: etcd:2379\n      MINIO_ADDRESS: minio:9000\n    volumes:\n      - /local/path/to/your/milvus.yaml:/milvus/configs/milvus.yaml   # Map the local path to the container path\n      - ${DOCKER_VOLUME_DIRECTORY:-.}/volumes/milvus:/var/lib/milvus\n    ports:\n      - \"19530:19530\"\n      - \"9091:9091\"\n    depends_on:\n      - \"etcd\"\n      - \"minio\"\n...\n","$ sudo docker compose up -d\n"],"headingContent":"","anchorList":[{"label":"Docker Compose로 Milvus 구성하기","href":"Configure-Milvus-with-Docker-Compose","type":1,"isActive":false},{"label":"구성 파일 다운로드","href":"Download-a-configuration-file","type":2,"isActive":false},{"label":"구성 파일 수정하기","href":"Modify-the-configuration-file","type":2,"isActive":false},{"label":"설치 파일 다운로드","href":"Download-an-installation-file","type":2,"isActive":false},{"label":"설치 파일 수정하기","href":"Modify-the-installation-file","type":2,"isActive":false},{"label":"Milvus 시작","href":"Start-Milvus","type":2,"isActive":false},{"label":"다음 단계","href":"Whats-next","type":2,"isActive":false}]}
      \ No newline at end of file
      +{"codeList":["$ wget https://raw.githubusercontent.com/milvus-io/milvus/v2.4.13-hotfix/configs/milvus.yaml\n","# For Milvus standalone\n$ wget https://github.com/milvus-io/milvus/releases/download/v2.4.13-hotfix/milvus-standalone-docker-compose.yml -O docker-compose.yml\n","...\n  standalone:\n    container_name: milvus-standalone\n    image: milvusdb/milvus:v2.2.13\n    command: [\"milvus\", \"run\", \"standalone\"]\n    environment:\n      ETCD_ENDPOINTS: etcd:2379\n      MINIO_ADDRESS: minio:9000\n    volumes:\n      - /local/path/to/your/milvus.yaml:/milvus/configs/milvus.yaml   # Map the local path to the container path\n      - ${DOCKER_VOLUME_DIRECTORY:-.}/volumes/milvus:/var/lib/milvus\n    ports:\n      - \"19530:19530\"\n      - \"9091:9091\"\n    depends_on:\n      - \"etcd\"\n      - \"minio\"\n...\n","$ sudo docker compose up -d\n"],"headingContent":"Configure Milvus with Docker Compose","anchorList":[{"label":"Docker Compose로 Milvus 구성하기","href":"Configure-Milvus-with-Docker-Compose","type":1,"isActive":false},{"label":"구성 파일 다운로드","href":"Download-a-configuration-file","type":2,"isActive":false},{"label":"구성 파일 수정하기","href":"Modify-the-configuration-file","type":2,"isActive":false},{"label":"설치 파일 다운로드","href":"Download-an-installation-file","type":2,"isActive":false},{"label":"설치 파일 수정하기","href":"Modify-the-installation-file","type":2,"isActive":false},{"label":"Milvus 시작","href":"Start-Milvus","type":2,"isActive":false},{"label":"다음 단계","href":"Whats-next","type":2,"isActive":false}]}
      \ No newline at end of file
      diff --git a/localization/v2.4.x/site/ko/adminGuide/configure-docker.md b/localization/v2.4.x/site/ko/adminGuide/configure-docker.md
      index e645798f2..efe208cda 100644
      --- a/localization/v2.4.x/site/ko/adminGuide/configure-docker.md
      +++ b/localization/v2.4.x/site/ko/adminGuide/configure-docker.md
      @@ -38,8 +38,8 @@ title: Docker Compose로 Milvus 구성하기
                 d="M4 9h1v1H4c-1.5 0-3-1.69-3-3.5S2.55 3 4 3h4c1.45 0 3 1.69 3 3.5 0 1.41-.91 2.72-2 3.25V8.59c.58-.45 1-1.27 1-2.09C10 5.22 8.98 4 8 4H4c-.98 0-2 1.22-2 2.5S3 9 4 9zm9-3h-1v1h1c1 0 2 1.22 2 2.5S13.98 12 13 12H9c-.98 0-2-1.22-2-2.5 0-.83.42-1.64 1-2.09V6.25c-1.09.53-2 1.84-2 3.25C6 11.31 7.55 13 9 13h4c1.45 0 3-1.69 3-3.5S14.5 6 13 6z"
               >
             
      -    

      milvus.yaml 을 직접다운로드하거나 다음 명령을 사용하여다운로드합니다.

      -
      $ wget https://raw.githubusercontent.com/milvus-io/milvus/v2.4.9/configs/milvus.yaml
      +    

      milvus.yaml 을 직접다운로드하거나 다음 명령을 사용하여다운로드합니다.

      +
      $ wget https://raw.githubusercontent.com/milvus-io/milvus/v2.4.13-hotfix/configs/milvus.yaml
       

      구성 파일 수정하기

    @@ -211,10 +211,10 @@ title: Docker Compose로 Milvus 구성하기 d="M4 9h1v1H4c-1.5 0-3-1.69-3-3.5S2.55 3 4 3h4c1.45 0 3 1.69 3 3.5 0 1.41-.91 2.72-2 3.25V8.59c.58-.45 1-1.27 1-2.09C10 5.22 8.98 4 8 4H4c-.98 0-2 1.22-2 2.5S3 9 4 9zm9-3h-1v1h1c1 0 2 1.22 2 2.5S13.98 12 13 12H9c-.98 0-2-1.22-2-2.5 0-.83.42-1.64 1-2.09V6.25c-1.09.53-2 1.84-2 3.25C6 11.31 7.55 13 9 13h4c1.45 0 3-1.69 3-3.5S14.5 6 13 6z" > -

    Milvus 스탠드얼론용 설치 파일을 다운로드하고 docker-compose.yml 에 저장합니다.

    +

    Milvus 스탠드얼론용 설치 파일을 다운로드하고 docker-compose.yml 에 저장합니다.

    다음 명령을 간단히 실행할 수도 있습니다.

    # For Milvus standalone
    -$ wget https://github.com/milvus-io/milvus/releases/download/v2.4.9/milvus-standalone-docker-compose.yml -O docker-compose.yml
    +$ wget https://github.com/milvus-io/milvus/releases/download/v2.4.13-hotfix/milvus-standalone-docker-compose.yml -O docker-compose.yml
     

    설치 파일 수정하기

    확인 결과가 만족스러우면 다음 단계에 따라 Milvus Distributed를 배포할 수 있습니다:

    -

    헬름을 사용하여 밀버스 배포를 위한 팁

    쿼리노드 파드는 기본적으로 NVMe 디스크를 EmptyDir 볼륨으로 사용한다. 최적의 성능을 보장하려면 NVMe 디스크를 쿼리노드 파드 내의 /var/lib/milvus/data 에 마운트하는 것이 좋다.

    +

    헬름을 사용하여 밀버스 디스트리뷰티드 배포를 위한 팁

    쿼리노드 파드는 기본적으로 NVMe 디스크를 EmptyDir 볼륨으로 사용한다. 최적의 성능을 보장하려면 NVMe 디스크를 쿼리노드 파드 내의 /var/lib/milvus/data 에 마운트하는 것이 좋다.

    헬름을 사용하여 밀버스 디스트리뷰션을 배포하는 방법에 대한 자세한 내용은 헬름으로 쿠버네티스에서 밀버스 실행하기를 참조한다.

    밀버스 오퍼레이터를 사용하여 밀버스 디스트리뷰티드 배포를 위한 팁

    밀버스 오퍼레이터는 NVMe 디스크를 EmptyDir 볼륨으로 사용하도록 쿼리노드 파드를 자동으로 구성한다. MilvusCluster 사용자 정의 리소스에 다음 구성을 추가하는 것이 좋습니다:

    ...
    diff --git a/localization/v2.4.x/site/ko/adminGuide/limit_collection_counts.json b/localization/v2.4.x/site/ko/adminGuide/limit_collection_counts.json
    index 2d19f9b01..f59a9f3d6 100644
    --- a/localization/v2.4.x/site/ko/adminGuide/limit_collection_counts.json
    +++ b/localization/v2.4.x/site/ko/adminGuide/limit_collection_counts.json
    @@ -1 +1 @@
    -{"codeList":["rootCoord:\n    maxGeneralCapacity: 1024\n","60 (collections) x 2 (shards) x 4 (partitions) + 40 (collections) x 1 (shard) x 12 (partitions) = 960\n","failed checking constraint: sum_collections(parition*shard) exceeding the max general capacity:\n"],"headingContent":"","anchorList":[{"label":"수집 횟수 제한","href":"Limit-Collection-Counts","type":1,"isActive":false},{"label":"구성 옵션","href":"Configuration-options","type":2,"isActive":false},{"label":"컬렉션 수 계산하기","href":"Calculating-the-number-of-collections","type":2,"isActive":false}]}
    \ No newline at end of file
    +{"codeList":["rootCoord:\n    maxGeneralCapacity: 65536\n","60 (collections) x 2 (shards) x 4 (partitions) + 40 (collections) x 1 (shard) x 12 (partitions) = 960\n","failed checking constraint: sum_collections(parition*shard) exceeding the max general capacity:\n"],"headingContent":"Limit Collection Counts","anchorList":[{"label":"수집 횟수 제한","href":"Limit-Collection-Counts","type":1,"isActive":false},{"label":"구성 옵션","href":"Configuration-options","type":2,"isActive":false},{"label":"컬렉션 수 계산하기","href":"Calculating-the-number-of-collections","type":2,"isActive":false}]}
    \ No newline at end of file
    diff --git a/localization/v2.4.x/site/ko/adminGuide/limit_collection_counts.md b/localization/v2.4.x/site/ko/adminGuide/limit_collection_counts.md
    index 8f9896aa3..63c6dd526 100644
    --- a/localization/v2.4.x/site/ko/adminGuide/limit_collection_counts.md
    +++ b/localization/v2.4.x/site/ko/adminGuide/limit_collection_counts.md
    @@ -1,7 +1,6 @@
     ---
     id: limit_collection_counts.md
     title: 수거 횟수 제한 설정
    -summary: ''
     ---
     

    수집 횟수 제한

    Milvus 인스턴스는 최대 65,536건의 수집을 허용합니다. 그러나 컬렉션 수가 너무 많으면 성능 문제가 발생할 수 있습니다. 따라서 Milvus 인스턴스에서 생성되는 컬렉션 수를 제한하는 것이 좋습니다.

    -

    이 가이드는 Milvus 인스턴스에서 컬렉션 수를 제한하는 방법에 대한 지침을 제공합니다.

    +

    이 가이드에서는 Milvus 인스턴스에서 컬렉션 수를 제한하는 방법에 대한 지침을 제공합니다.

    구성은 Milvus 인스턴스를 설치하는 방식에 따라 다릅니다.

    • 헬름 차트를 사용하여 설치한 Milvus 인스턴스의 경우

      @@ -45,9 +44,9 @@ summary: '' >
      rootCoord:
      -    maxGeneralCapacity: 1024
      +    maxGeneralCapacity: 65536
       
      -

      maxGeneralCapacity 매개 변수는 현재 Milvus 인스턴스가 보유할 수 있는 최대 컬렉션 수를 설정합니다. 기본값은 1024 입니다.

      +

      maxGeneralCapacity 매개 변수는 현재 Milvus 인스턴스가 보유할 수 있는 최대 컬렉션 수를 설정합니다. 기본값은 65536 입니다.

      컬렉션 수 계산하기

      다음 예는 roleA 이라는 역할에 모든 컬렉션을 검색할 수 있는 권한을 부여하는 방법을 보여줍니다.

      -

      object_type 은 리소스 유형으로도 이해할 수 있는 객체 유형을 지정합니다. 현재 유효한 값으로는 컬렉션/사용자/글로벌 등이 있으며, 여기서 글로벌은 특정 리소스 유형이 없음을 의미합니다. object_name 은 리소스 이름입니다. 객체 유형이Collection인 경우 객체 이름은 특정 컬렉션 이름을 참조하거나 *를 사용하여 모든 컬렉션을 지정할 수 있습니다. 개체 유형이Global인 경우에는 개체 이름을 *로만 지정할 수 있습니다. 부여할 수 있는 다른 유형의 권한은 사용자 및 역할을 참조하세요.

      +

      object_type 은 리소스 유형으로도 이해할 수 있는 객체 유형을 지정합니다. 현재 유효한 값으로는 컬렉션/사용자/글로벌 등이 있으며, 여기서 글로벌은 특정 리소스 유형이 없음을 의미합니다. object_name 은 리소스 이름입니다. 객체 유형이Collection인 경우 객체 이름은 특정 컬렉션 이름을 참조하거나 *를 사용하여 모든 컬렉션을 지정할 수 있습니다.개체유형이Global인 경우에는 개체 이름을 *로만 지정할 수 있습니다. 부여할 수 있는 다른 유형의 권한은 사용자 및 역할을 참조하세요.

      역할 권한을 관리하기 전에 사용자 인증을 사용 설정했는지 확인하세요. 그렇지 않으면 오류가 발생할 수 있습니다. 사용자 인증을 사용 설정하는 방법에 대한 자세한 내용은 사용자 액세스 인증하기를 참조하세요.

      # grant privilege to a role
       
      diff --git a/localization/v2.4.x/site/ko/adminGuide/resource_group.json b/localization/v2.4.x/site/ko/adminGuide/resource_group.json
      index 1e9d0f99b..b74c9955c 100644
      --- a/localization/v2.4.x/site/ko/adminGuide/resource_group.json
      +++ b/localization/v2.4.x/site/ko/adminGuide/resource_group.json
      @@ -1 +1 @@
      -{"codeList":["{\n    \"requests\": { \"nodeNum\": 1 },\n    \"limits\": { \"nodeNum\": 1 },\n    \"transfer_from\": [{ \"resource_group\": \"rg1\" }],\n    \"transfer_to\": [{ \"resource_group\": \"rg2\" }]\n}\n","import pymilvus\n\n# A resource group name should be a string of 1 to 255 characters, starting with a letter or an underscore (_) and containing only numbers, letters, and underscores (_).\nname = \"rg\"\nnode_num = 0\n\n# create a resource group that exactly hold no query node.\ntry:\n    utility.create_resource_group(name, config=utility.ResourceGroupConfig(\n        requests={\"node_num\": node_num},\n        limits={\"node_num\": node_num},\n    ), using='default')\n    print(f\"Succeeded in creating resource group {name}.\")\nexcept Exception:\n    print(\"Failed to create the resource group.\")\n","rgs = utility.list_resource_groups(using='default')\nprint(f\"Resource group list: {rgs}\")\n\n# Resource group list: ['__default_resource_group', 'rg']\n","info = utility.describe_resource_group(name, using=\"default\")\nprint(f\"Resource group description: {info}\")\n\n# Resource group description: \n#        ,           // string, rg name\n#        ,            // int, num_node which has been transfer to this rg\n#        ,  // int, available node_num, some node may shutdown\n#        , // map[string]int, from collection_name to loaded replica of each collecion in this rg\n#        ,  // map[string]int, from collection_name to outgoging accessed node num by replica loaded in this rg \n#        .  // map[string]int, from collection_name to incoming accessed node num by replica loaded in other rg\n","source = '__default_resource_group'\ntarget = 'rg'\nexpected_num_nodes_in_default = 0\nexpected_num_nodes_in_rg = 1\n\ntry:\n    utility.update_resource_groups({\n        source: ResourceGroupConfig(\n            requests={\"node_num\": expected_num_nodes_in_default},\n            limits={\"node_num\": expected_num_nodes_in_default},\n        ),\n        target: ResourceGroupConfig(\n            requests={\"node_num\": expected_num_nodes_in_rg},\n            limits={\"node_num\": expected_num_nodes_in_rg},\n        )\n    }, using=\"default\")\n    print(f\"Succeeded in move 1 node(s) from {source} to {target}.\")\nexcept Exception:\n    print(\"Something went wrong while moving nodes.\")\n\n# After a while, succeeded in moving 1 node(s) from __default_resource_group to rg.\n","from pymilvus import Collection\n\ncollection = Collection('demo')\n\n# Milvus loads the collection to the default resource group.\ncollection.load(replica_number=2)\n\n# Or, you can ask Milvus load the collection to the desired resource group.\n# make sure that query nodes num should be greater or equal to replica_number\nresource_groups = ['rg']\ncollection.load(replica_number=2, _resource_groups=resource_groups) \n","collection = Collection(\"Books\")\n\n# Use the load method of a collection to load one of its partition\ncollection.load([\"Novels\"], replica_number=2, _resource_groups=resource_groups)\n\n# Or, you can use the load method of a partition directly\npartition = Partition(collection, \"Novels\")\npartition.load(replica_number=2, _resource_groups=resource_groups)\n","source = '__default_resource_group'\ntarget = 'rg'\ncollection_name = 'c'\nnum_replicas = 1\n\ntry:\n    utility.transfer_replica(source, target, collection_name, num_replicas, using=\"default\")\n    print(f\"Succeeded in moving {num_node} replica(s) of {collection_name} from {source} to {target}.\")\nexcept Exception:\n    print(\"Something went wrong while moving replicas.\")\n\n# Succeeded in moving 1 replica(s) of c from __default_resource_group to rg.\n","try:\n    utility.update_resource_groups({\n        \"rg\": utility.ResourceGroupConfig(\n            requests={\"node_num\": 0},\n            limits={\"node_num\": 0},\n        ),\n    }, using=\"default\")\n    utility.drop_resource_group(\"rg\", using=\"default\")\n    print(f\"Succeeded in dropping {source}.\")\nexcept Exception:\n    print(f\"Something went wrong while dropping {source}.\")\n","from pymilvus import utility\nfrom pymilvus.client.types import ResourceGroupConfig\n\n_PENDING_NODES_RESOURCE_GROUP=\"__pending_nodes\"\n\ndef init_cluster(node_num: int):\n    print(f\"Init cluster with {node_num} nodes, all nodes will be put in default resource group\")\n    # create a pending resource group, which can used to hold the pending nodes that do not hold any data.\n    utility.create_resource_group(name=_PENDING_NODES_RESOURCE_GROUP, config=ResourceGroupConfig(\n        requests={\"node_num\": 0}, # this resource group can hold 0 nodes, no data will be load on it.\n        limits={\"node_num\": 10000}, # this resource group can hold at most 10000 nodes \n    ))\n\n    # update default resource group, which can used to hold the nodes that all initial node in it.\n    utility.update_resource_groups({\n        \"__default_resource_group\": ResourceGroupConfig(\n            requests={\"node_num\": node_num},\n            limits={\"node_num\": node_num},\n            transfer_from=[{\"resource_group\": _PENDING_NODES_RESOURCE_GROUP}], # recover missing node from pending resource group at high priority.\n            transfer_to=[{\"resource_group\": _PENDING_NODES_RESOURCE_GROUP}], # recover redundant node to pending resource group at low priority.\n        )})\n    utility.create_resource_group(name=\"rg1\", config=ResourceGroupConfig(\n        requests={\"node_num\": 0},\n        limits={\"node_num\": 0},\n        transfer_from=[{\"resource_group\": _PENDING_NODES_RESOURCE_GROUP}], \n        transfer_to=[{\"resource_group\": _PENDING_NODES_RESOURCE_GROUP}],\n    ))\n    utility.create_resource_group(name=\"rg2\", config=ResourceGroupConfig(\n        requests={\"node_num\": 0},\n        limits={\"node_num\": 0},\n        transfer_from=[{\"resource_group\": _PENDING_NODES_RESOURCE_GROUP}], \n        transfer_to=[{\"resource_group\": _PENDING_NODES_RESOURCE_GROUP}],\n    ))\n\ninit_cluster(1)\n","\ndef scale_to(node_num: int):\n    # scale the querynode number in Milvus into node_num.\n    pass\n","# scale rg1 into 3 nodes, rg2 into 1 nodes\nutility.update_resource_groups({\n    \"rg1\": ResourceGroupConfig(\n        requests={\"node_num\": 3},\n        limits={\"node_num\": 3},\n        transfer_from=[{\"resource_group\": _PENDING_NODES_RESOURCE_GROUP}],\n        transfer_to=[{\"resource_group\": _PENDING_NODES_RESOURCE_GROUP}],\n    ),\n    \"rg2\": ResourceGroupConfig(\n        requests={\"node_num\": 1},\n        limits={\"node_num\": 1},\n        transfer_from=[{\"resource_group\": _PENDING_NODES_RESOURCE_GROUP}],\n        transfer_to=[{\"resource_group\": _PENDING_NODES_RESOURCE_GROUP}],\n    ),\n})\nscale_to(5)\n# rg1 has 3 nodes, rg2 has 1 node, __default_resource_group has 1 node.\n","# scale rg1 from 3 nodes into 2 nodes\nutility.update_resource_groups({\n    \"rg1\": ResourceGroupConfig(\n        requests={\"node_num\": 2},\n        limits={\"node_num\": 2},\n        transfer_from=[{\"resource_group\": _PENDING_NODES_RESOURCE_GROUP}],\n        transfer_to=[{\"resource_group\": _PENDING_NODES_RESOURCE_GROUP}],\n    ),\n})\n\n# rg1 has 2 nodes, rg2 has 1 node, __default_resource_group has 1 node, __pending_nodes has 1 node.\nscale_to(4)\n# scale the node in __pending_nodes\n"],"headingContent":"","anchorList":[{"label":"리소스 그룹 관리","href":"Manage-Resource-Groups","type":1,"isActive":false},{"label":"리소스 그룹이란?","href":"What-is-a-resource-group","type":2,"isActive":false},{"label":"리소스 그룹의 개념","href":"Concepts-of-resource-group","type":2,"isActive":false},{"label":"선언적 API를 사용하여 리소스 그룹 관리하기","href":"Use-declarative-api-to-manage-resource-group","type":2,"isActive":false},{"label":"클러스터 확장을 관리하는 좋은 방법","href":"A-good-practice-to-manage-cluster-scaling","type":2,"isActive":false},{"label":"리소스 그룹이 여러 복제본과 상호 작용하는 방식","href":"How-resource-groups-interacts-with-multiple-replicas","type":2,"isActive":false},{"label":"다음 단계","href":"Whats-next","type":1,"isActive":false}]}
      \ No newline at end of file
      +{"codeList":["{\n    \"requests\": { \"nodeNum\": 1 },\n    \"limits\": { \"nodeNum\": 1 },\n    \"transfer_from\": [{ \"resource_group\": \"rg1\" }],\n    \"transfer_to\": [{ \"resource_group\": \"rg2\" }]\n}\n","import pymilvus\n\n# A resource group name should be a string of 1 to 255 characters, starting with a letter or an underscore (_) and containing only numbers, letters, and underscores (_).\nname = \"rg\"\nnode_num = 0\n\n# create a resource group that exactly hold no query node.\ntry:\n    utility.create_resource_group(name, config=utility.ResourceGroupConfig(\n        requests={\"node_num\": node_num},\n        limits={\"node_num\": node_num},\n    ), using='default')\n    print(f\"Succeeded in creating resource group {name}.\")\nexcept Exception:\n    print(\"Failed to create the resource group.\")\n","rgs = utility.list_resource_groups(using='default')\nprint(f\"Resource group list: {rgs}\")\n\n# Resource group list: ['__default_resource_group', 'rg']\n","info = utility.describe_resource_group(name, using=\"default\")\nprint(f\"Resource group description: {info}\")\n\n# Resource group description: \n#        ,           // string, rg name\n#        ,            // int, num_node which has been transfer to this rg\n#        ,  // int, available node_num, some node may shutdown\n#        , // map[string]int, from collection_name to loaded replica of each collecion in this rg\n#        ,  // map[string]int, from collection_name to outgoging accessed node num by replica loaded in this rg \n#        .  // map[string]int, from collection_name to incoming accessed node num by replica loaded in other rg\n","source = '__default_resource_group'\ntarget = 'rg'\nexpected_num_nodes_in_default = 0\nexpected_num_nodes_in_rg = 1\n\ntry:\n    utility.update_resource_groups({\n        source: ResourceGroupConfig(\n            requests={\"node_num\": expected_num_nodes_in_default},\n            limits={\"node_num\": expected_num_nodes_in_default},\n        ),\n        target: ResourceGroupConfig(\n            requests={\"node_num\": expected_num_nodes_in_rg},\n            limits={\"node_num\": expected_num_nodes_in_rg},\n        )\n    }, using=\"default\")\n    print(f\"Succeeded in move 1 node(s) from {source} to {target}.\")\nexcept Exception:\n    print(\"Something went wrong while moving nodes.\")\n\n# After a while, succeeded in moving 1 node(s) from __default_resource_group to rg.\n","from pymilvus import Collection\n\ncollection = Collection('demo')\n\n# Milvus loads the collection to the default resource group.\ncollection.load(replica_number=2)\n\n# Or, you can ask Milvus load the collection to the desired resource group.\n# make sure that query nodes num should be greater or equal to replica_number\nresource_groups = ['rg']\ncollection.load(replica_number=2, _resource_groups=resource_groups) \n","collection = Collection(\"Books\")\n\n# Use the load method of a collection to load one of its partition\ncollection.load([\"Novels\"], replica_number=2, _resource_groups=resource_groups)\n\n# Or, you can use the load method of a partition directly\npartition = Partition(collection, \"Novels\")\npartition.load(replica_number=2, _resource_groups=resource_groups)\n","source = '__default_resource_group'\ntarget = 'rg'\ncollection_name = 'c'\nnum_replicas = 1\n\ntry:\n    utility.transfer_replica(source, target, collection_name, num_replicas, using=\"default\")\n    print(f\"Succeeded in moving {num_node} replica(s) of {collection_name} from {source} to {target}.\")\nexcept Exception:\n    print(\"Something went wrong while moving replicas.\")\n\n# Succeeded in moving 1 replica(s) of c from __default_resource_group to rg.\n","try:\n    utility.update_resource_groups({\n        \"rg\": utility.ResourceGroupConfig(\n            requests={\"node_num\": 0},\n            limits={\"node_num\": 0},\n        ),\n    }, using=\"default\")\n    utility.drop_resource_group(\"rg\", using=\"default\")\n    print(f\"Succeeded in dropping {source}.\")\nexcept Exception:\n    print(f\"Something went wrong while dropping {source}.\")\n","from pymilvus import utility\nfrom pymilvus.client.types import ResourceGroupConfig\n\n_PENDING_NODES_RESOURCE_GROUP=\"__pending_nodes\"\n\ndef init_cluster(node_num: int):\n    print(f\"Init cluster with {node_num} nodes, all nodes will be put in default resource group\")\n    # create a pending resource group, which can used to hold the pending nodes that do not hold any data.\n    utility.create_resource_group(name=_PENDING_NODES_RESOURCE_GROUP, config=ResourceGroupConfig(\n        requests={\"node_num\": 0}, # this resource group can hold 0 nodes, no data will be load on it.\n        limits={\"node_num\": 10000}, # this resource group can hold at most 10000 nodes \n    ))\n\n    # update default resource group, which can used to hold the nodes that all initial node in it.\n    utility.update_resource_groups({\n        \"__default_resource_group\": ResourceGroupConfig(\n            requests={\"node_num\": node_num},\n            limits={\"node_num\": node_num},\n            transfer_from=[{\"resource_group\": _PENDING_NODES_RESOURCE_GROUP}], # recover missing node from pending resource group at high priority.\n            transfer_to=[{\"resource_group\": _PENDING_NODES_RESOURCE_GROUP}], # recover redundant node to pending resource group at low priority.\n        )})\n    utility.create_resource_group(name=\"rg1\", config=ResourceGroupConfig(\n        requests={\"node_num\": 0},\n        limits={\"node_num\": 0},\n        transfer_from=[{\"resource_group\": _PENDING_NODES_RESOURCE_GROUP}], \n        transfer_to=[{\"resource_group\": _PENDING_NODES_RESOURCE_GROUP}],\n    ))\n    utility.create_resource_group(name=\"rg2\", config=ResourceGroupConfig(\n        requests={\"node_num\": 0},\n        limits={\"node_num\": 0},\n        transfer_from=[{\"resource_group\": _PENDING_NODES_RESOURCE_GROUP}], \n        transfer_to=[{\"resource_group\": _PENDING_NODES_RESOURCE_GROUP}],\n    ))\n\ninit_cluster(1)\n","\ndef scale_to(node_num: int):\n    # scale the querynode number in Milvus into node_num.\n    pass\n","# scale rg1 into 3 nodes, rg2 into 1 nodes\nutility.update_resource_groups({\n    \"rg1\": ResourceGroupConfig(\n        requests={\"node_num\": 3},\n        limits={\"node_num\": 3},\n        transfer_from=[{\"resource_group\": _PENDING_NODES_RESOURCE_GROUP}],\n        transfer_to=[{\"resource_group\": _PENDING_NODES_RESOURCE_GROUP}],\n    ),\n    \"rg2\": ResourceGroupConfig(\n        requests={\"node_num\": 1},\n        limits={\"node_num\": 1},\n        transfer_from=[{\"resource_group\": _PENDING_NODES_RESOURCE_GROUP}],\n        transfer_to=[{\"resource_group\": _PENDING_NODES_RESOURCE_GROUP}],\n    ),\n})\nscale_to(5)\n# rg1 has 3 nodes, rg2 has 1 node, __default_resource_group has 1 node.\n","# scale rg1 from 3 nodes into 2 nodes\nutility.update_resource_groups({\n    \"rg1\": ResourceGroupConfig(\n        requests={\"node_num\": 2},\n        limits={\"node_num\": 2},\n        transfer_from=[{\"resource_group\": _PENDING_NODES_RESOURCE_GROUP}],\n        transfer_to=[{\"resource_group\": _PENDING_NODES_RESOURCE_GROUP}],\n    ),\n})\n\n# rg1 has 2 nodes, rg2 has 1 node, __default_resource_group has 1 node, __pending_nodes has 1 node.\nscale_to(4)\n# scale the node in __pending_nodes\n"],"headingContent":"Manage Resource Groups","anchorList":[{"label":"리소스 그룹 관리","href":"Manage-Resource-Groups","type":1,"isActive":false},{"label":"리소스 그룹이란?","href":"What-is-a-resource-group","type":2,"isActive":false},{"label":"리소스 그룹의 개념","href":"Concepts-of-resource-group","type":2,"isActive":false},{"label":"선언적 API를 사용하여 리소스 그룹 관리하기","href":"Use-declarative-api-to-manage-resource-group","type":2,"isActive":false},{"label":"클러스터 확장을 관리하는 좋은 방법","href":"A-good-practice-to-manage-cluster-scaling","type":2,"isActive":false},{"label":"리소스 그룹이 여러 복제본과 상호 작용하는 방식","href":"How-resource-groups-interacts-with-multiple-replicas","type":2,"isActive":false},{"label":"다음 단계","href":"Whats-next","type":1,"isActive":false}]}
      \ No newline at end of file
      diff --git a/localization/v2.4.x/site/ko/adminGuide/resource_group.md b/localization/v2.4.x/site/ko/adminGuide/resource_group.md
      index f8a6fd201..8f0b1061d 100644
      --- a/localization/v2.4.x/site/ko/adminGuide/resource_group.md
      +++ b/localization/v2.4.x/site/ko/adminGuide/resource_group.md
      @@ -90,7 +90,7 @@ title: 리소스 그룹 관리
               >
             
           
      -

      이 페이지의 모든 코드 샘플은 PyMilvus 2.4.5 버전입니다. 실행하기 전에 PyMilvus 설치를 업그레이드하세요.

      +

      이 페이지의 모든 코드 샘플은 PyMilvus 2.4.8 버전입니다. 실행하기 전에 PyMilvus 설치를 업그레이드하세요.

      1. 리소스 그룹을 생성합니다.

        diff --git a/localization/v2.4.x/site/ko/adminGuide/tls.json b/localization/v2.4.x/site/ko/adminGuide/tls.json index 119f31c9f..97e1a579b 100644 --- a/localization/v2.4.x/site/ko/adminGuide/tls.json +++ b/localization/v2.4.x/site/ko/adminGuide/tls.json @@ -1 +1 @@ -{"codeList":["openssl version\n","sudo apt install openssl\n","mkdir cert && cd cert\ntouch openssl.cnf gen.sh\n","#\n# OpenSSL example configuration file.\n# This is mostly being used for generation of certificate requests.\n#\n\n# This definition stops the following lines choking if HOME isn't\n# defined.\nHOME = .\nRANDFILE = $ENV::HOME/.rnd\n\n# Extra OBJECT IDENTIFIER info:\n#oid_file = $ENV::HOME/.oid\noid_section = new_oids\n\n# To use this configuration file with the \"-extfile\" option of the\n# \"openssl x509\" utility, name here the section containing the\n# X.509v3 extensions to use:\n# extensions = \n# (Alternatively, use a configuration file that has only\n# X.509v3 extensions in its main [= default] section.)\n\n[ new_oids ]\n\n# We can add new OIDs in here for use by 'ca', 'req' and 'ts'.\n# Add a simple OID like this:\n# testoid1=1.2.3.4\n# Or use config file substitution like this:\n# testoid2=${testoid1}.5.6\n\n# Policies used by the TSA examples.\ntsa_policy1 = 1.2.3.4.1\ntsa_policy2 = 1.2.3.4.5.6\ntsa_policy3 = 1.2.3.4.5.7\n\n####################################################################\n[ ca ]\ndefault_ca = CA_default # The default ca section\n\n####################################################################\n[ CA_default ]\n\ndir = ./demoCA # Where everything is kept\ncerts = $dir/certs # Where the issued certs are kept\ncrl_dir = $dir/crl # Where the issued crl are kept\ndatabase = $dir/index.txt # database index file.\n#unique_subject = no # Set to 'no' to allow creation of\n # several ctificates with same subject.\nnew_certs_dir = $dir/newcerts # default place for new certs.\n\ncertificate = $dir/cacert.pem # The CA certificate\nserial = $dir/serial # The current serial number\ncrlnumber = $dir/crlnumber # the current crl number\n # must be commented out to leave a V1 CRL\ncrl = $dir/crl.pem # The current CRL\nprivate_key = $dir/private/cakey.pem# The private key\nRANDFILE = $dir/private/.rand # private random number file\n\nx509_extensions = usr_cert # The extentions to add to the cert\n\n# Comment out the following two lines for the \"traditional\"\n# (and highly broken) format.\nname_opt = ca_default # Subject Name options\ncert_opt = ca_default # Certificate field options\n\n# Extension copying option: use with caution.\ncopy_extensions = copy\n\n# Extensions to add to a CRL. Note: Netscape communicator chokes on V2 CRLs\n# so this is commented out by default to leave a V1 CRL.\n# crlnumber must also be commented out to leave a V1 CRL.\n# crl_extensions = crl_ext\n\ndefault_days = 365 # how long to certify for\ndefault_crl_days= 30 # how long before next CRL\ndefault_md = default # use public key default MD\npreserve = no # keep passed DN ordering\n\n# A few difference way of specifying how similar the request should look\n# For type CA, the listed attributes must be the same, and the optional\n# and supplied fields are just that :-)\npolicy = policy_match\n\n# For the CA policy\n[ policy_match ]\ncountryName = match\nstateOrProvinceName = match\norganizationName = match\norganizationalUnitName = optional\ncommonName = supplied\nemailAddress = optional\n\n# For the 'anything' policy\n# At this point in time, you must list all acceptable 'object'\n# types.\n[ policy_anything ]\ncountryName = optional\nstateOrProvinceName = optional\nlocalityName = optional\norganizationName = optional\norganizationalUnitName = optional\ncommonName = supplied\nemailAddress = optional\n\n####################################################################\n[ req ]\ndefault_bits = 2048\ndefault_keyfile = privkey.pem\ndistinguished_name = req_distinguished_name\nattributes = req_attributes\nx509_extensions = v3_ca # The extentions to add to the self signed cert\n\n# Passwords for private keys if not present they will be prompted for\n# input_password = secret\n# output_password = secret\n\n# This sets a mask for permitted string types. There are several options. \n# default: PrintableString, T61String, BMPString.\n# pkix : PrintableString, BMPString (PKIX recommendation before 2004)\n# utf8only: only UTF8Strings (PKIX recommendation after 2004).\n# nombstr : PrintableString, T61String (no BMPStrings or UTF8Strings).\n# MASK:XXXX a literal mask value.\n# WARNING: ancient versions of Netscape crash on BMPStrings or UTF8Strings.\nstring_mask = utf8only\n\nreq_extensions = v3_req # The extensions to add to a certificate request\n\n[ req_distinguished_name ]\ncountryName = Country Name (2 letter code)\ncountryName_default = AU\ncountryName_min = 2\ncountryName_max = 2\n\nstateOrProvinceName = State or Province Name (full name)\nstateOrProvinceName_default = Some-State\n\nlocalityName = Locality Name (eg, city)\n\n0.organizationName = Organization Name (eg, company)\n0.organizationName_default = Internet Widgits Pty Ltd\n\n# we can do this but it is not needed normally :-)\n#1.organizationName = Second Organization Name (eg, company)\n#1.organizationName_default = World Wide Web Pty Ltd\n\norganizationalUnitName = Organizational Unit Name (eg, section)\n#organizationalUnitName_default =\n\ncommonName = Common Name (e.g. server FQDN or YOUR name)\ncommonName_max = 64\n\nemailAddress = Email Address\nemailAddress_max = 64\n\n# SET-ex3 = SET extension number 3\n\n[ req_attributes ]\nchallengePassword = A challenge password\nchallengePassword_min = 4\nchallengePassword_max = 20\n\nunstructuredName = An optional company name\n\n[ usr_cert ]\n\n# These extensions are added when 'ca' signs a request.\n\n# This goes against PKIX guidelines but some CAs do it and some software\n# requires this to avoid interpreting an end user certificate as a CA.\n\nbasicConstraints=CA:FALSE\n\n# Here are some examples of the usage of nsCertType. If it is omitted\n# the certificate can be used for anything *except* object signing.\n\n# This is OK for an SSL server.\n# nsCertType = server\n\n# For an object signing certificate this would be used.\n# nsCertType = objsign\n\n# For normal client use this is typical\n# nsCertType = client, email\n\n# and for everything including object signing:\n# nsCertType = client, email, objsign\n\n# This is typical in keyUsage for a client certificate.\n# keyUsage = nonRepudiation, digitalSignature, keyEncipherment\n\n# This will be displayed in Netscape's comment listbox.\nnsComment = \"OpenSSL Generated Certificate\"\n\n# PKIX recommendations harmless if included in all certificates.\nsubjectKeyIdentifier=hash\nauthorityKeyIdentifier=keyid,issuer\n\n# This stuff is for subjectAltName and issuerAltname.\n# Import the email address.\n# subjectAltName=email:copy\n# An alternative to produce certificates that aren't\n# deprecated according to PKIX.\n# subjectAltName=email:move\n\n# Copy subject details\n# issuerAltName=issuer:copy\n\n#nsCaRevocationUrl = http://www.domain.dom/ca-crl.pem\n#nsBaseUrl\n#nsRevocationUrl\n#nsRenewalUrl\n#nsCaPolicyUrl\n#nsSslServerName\n\n# This is required for TSA certificates.\n# extendedKeyUsage = critical,timeStamping\n\n[ v3_req ]\n\n# Extensions to add to a certificate request\n\nbasicConstraints = CA:FALSE\nkeyUsage = nonRepudiation, digitalSignature, keyEncipherment\n\n\n[ v3_ca ]\n\n\n# Extensions for a typical CA\n\n\n# PKIX recommendation.\n\nsubjectKeyIdentifier=hash\n\nauthorityKeyIdentifier=keyid:always,issuer\n\n# This is what PKIX recommends but some broken software chokes on critical\n# extensions.\n#basicConstraints = critical,CA:true\n# So we do this instead.\nbasicConstraints = CA:true\n\n# Key usage: this is typical for a CA certificate. However since it will\n# prevent it being used as an test self-signed certificate it is best\n# left out by default.\n# keyUsage = cRLSign, keyCertSign\n\n# Some might want this also\n# nsCertType = sslCA, emailCA\n\n# Include email address in subject alt name: another PKIX recommendation\n# subjectAltName=email:copy\n# Copy issuer details\n# issuerAltName=issuer:copy\n\n# DER hex encoding of an extension: beware experts only!\n# obj=DER:02:03\n# Where 'obj' is a standard or added object\n# You can even override a supported extension:\n# basicConstraints= critical, DER:30:03:01:01:FF\n\n[ crl_ext ]\n\n# CRL extensions.\n# Only issuerAltName and authorityKeyIdentifier make any sense in a CRL.\n\n# issuerAltName=issuer:copy\nauthorityKeyIdentifier=keyid:always\n\n[ proxy_cert_ext ]\n# These extensions should be added when creating a proxy certificate\n\n# This goes against PKIX guidelines but some CAs do it and some software\n# requires this to avoid interpreting an end user certificate as a CA.\n\nbasicConstraints=CA:FALSE\n\n# Here are some examples of the usage of nsCertType. If it is omitted\n# the certificate can be used for anything *except* object signing.\n\n# This is OK for an SSL server.\n# nsCertType = server\n\n# For an object signing certificate this would be used.\n# nsCertType = objsign\n\n# For normal client use this is typical\n# nsCertType = client, email\n\n# and for everything including object signing:\n# nsCertType = client, email, objsign\n\n# This is typical in keyUsage for a client certificate.\n# keyUsage = nonRepudiation, digitalSignature, keyEncipherment\n\n# This will be displayed in Netscape's comment listbox.\nnsComment = \"OpenSSL Generated Certificate\"\n\n# PKIX recommendations harmless if included in all certificates.\nsubjectKeyIdentifier=hash\nauthorityKeyIdentifier=keyid,issuer\n\n# This stuff is for subjectAltName and issuerAltname.\n# Import the email address.\n# subjectAltName=email:copy\n# An alternative to produce certificates that aren't\n# deprecated according to PKIX.\n# subjectAltName=email:move\n\n# Copy subject details\n# issuerAltName=issuer:copy\n\n#nsCaRevocationUrl = http://www.domain.dom/ca-crl.pem\n#nsBaseUrl\n#nsRevocationUrl\n#nsRenewalUrl\n#nsCaPolicyUrl\n#nsSslServerName\n\n# This really needs to be in place for it to be a proxy certificate.\nproxyCertInfo=critical,language:id-ppl-anyLanguage,pathlen:3,policy:foo\n\n####################################################################\n[ tsa ]\n\ndefault_tsa = tsa_config1 # the default TSA section\n\n[ tsa_config1 ]\n\n# These are used by the TSA reply generation only.\ndir = ./demoCA # TSA root directory\nserial = $dir/tsaserial # The current serial number (mandatory)\ncrypto_device = builtin # OpenSSL engine to use for signing\nsigner_cert = $dir/tsacert.pem # The TSA signing certificate\n # (optional)\ncerts = $dir/cacert.pem # Certificate chain to include in reply\n # (optional)\nsigner_key = $dir/private/tsakey.pem # The TSA private key (optional)\n\ndefault_policy = tsa_policy1 # Policy if request did not specify it\n # (optional)\nother_policies = tsa_policy2, tsa_policy3 # acceptable policies (optional)\ndigests = md5, sha1 # Acceptable message digests (mandatory)\naccuracy = secs:1, millisecs:500, microsecs:100 # (optional)\nclock_precision_digits = 0 # number of digits after dot. (optional)\nordering = yes # Is ordering defined for timestamps?\n # (optional, default: no)\ntsa_name = yes # Must the TSA name be included in the reply?\n # (optional, default: no)\ness_cert_id_chain = no # Must the ESS cert id chain be included?\n # (optional, default: no)\n","#!/usr/bin/env sh\n# your variables\nCountry=\"CN\"\nState=\"Shanghai\"\nLocation=\"Shanghai\"\nOrganization=\"milvus\"\nOrganizational=\"milvus\"\nCommonName=\"localhost\"\n\necho \"generate ca.key\"\nopenssl genrsa -out ca.key 2048\n\necho \"generate ca.pem\"\nopenssl req -new -x509 -key ca.key -out ca.pem -days 3650 -subj \"/C=$Country/ST=$State/L=$Location/O=$Organization/OU=$Organizational/CN=$CommonName\"\n\necho \"generate server SAN certificate\"\nopenssl genpkey -algorithm RSA -out server.key\nopenssl req -new -nodes -key server.key -out server.csr -days 3650 -subj \"/C=$Country/O=$Organization/OU=$Organizational/CN=$CommonName\" -config ./openssl.cnf -extensions v3_req\nopenssl x509 -req -days 3650 -in server.csr -out server.pem -CA ca.pem -CAkey ca.key -CAcreateserial -extfile ./openssl.cnf -extensions v3_req\n\necho \"generate client SAN certificate\"\nopenssl genpkey -algorithm RSA -out client.key\nopenssl req -new -nodes -key client.key -out client.csr -days 3650 -subj \"/C=$Country/O=$Organization/OU=$Organizational/CN=$CommonName\" -config ./openssl.cnf -extensions v3_req\nopenssl x509 -req -days 3650 -in client.csr -out client.pem -CA ca.pem -CAkey ca.key -CAcreateserial -extfile ./openssl.cnf -extensions v3_req\n\n","chmod +x gen.sh\n./gen.sh\n","openssl genpkey -algorithm RSA -out ca.key\n","openssl req -new -x509 -key ca.key -out ca.pem -days 3650 -subj \"/C=$Country/ST=$State/L=$Location/O=$Organization/OU=$Organizational/CN=$CommonName\"\n","openssl genpkey -algorithm RSA -out server.key\n","openssl req -new -nodes -key server.key -out server.csr -days 3650 -subj \"/C=$Country/O=$Organization/OU=$Organizational/CN=$CommonName\" -config ./openssl.cnf -extensions v3_req\n","openssl x509 -req -days 3650 -in server.csr -out server.pem -CA ca.pem -CAkey ca.key -CAcreateserial -extfile ./openssl.cnf -extensions v3_req\n","tls:\n serverPemPath: /milvus/tls/server.pem\n serverKeyPath: /milvus/tls/server.key\n caPemPath: /milvus/tls/ca.pem\n\ncommon:\n security:\n tlsMode: 1\n","├── docker-compose.yml\n├── milvus.yaml\n└── tls\n ├── server.pem\n ├── server.key\n └── ca.pem\n"," standalone:\n container_name: milvus-standalone\n image: milvusdb/milvus:latest\n command: [\"milvus\", \"run\", \"standalone\"]\n security_opt:\n - seccomp:unconfined\n environment:\n ETCD_ENDPOINTS: etcd:2379\n MINIO_ADDRESS: minio:9000\n volumes:\n - ${DOCKER_VOLUME_DIRECTORY:-.}/volumes/milvus:/var/lib/milvus\n - ${DOCKER_VOLUME_DIRECTORY:-.}/tls:/milvus/tls\n - ${DOCKER_VOLUME_DIRECTORY:-.}/milvus.yaml:/milvus/configs/milvus.yaml\n","sudo docker compose up -d\n","from pymilvus import MilvusClient\n\nclient = MilvusClient(\n uri=\"http://localhost:19530\",\n secure=True,\n server_pem_path=\"path_to/server.pem\",\n server_name=\"localhost\"\n)\n","from pymilvus import MilvusClient\n\nclient = MilvusClient(\n uri=\"http://localhost:19530\",\n secure=True,\n client_pem_path=\"path_to/client.pem\",\n client_key_path=\"path_to/client.key\",\n ca_pem_path=\"path_to/ca.pem\",\n server_name=\"localhost\"\n)\n"],"headingContent":"","anchorList":[{"label":"전송 중 암호화","href":"Encryption-in-Transit","type":1,"isActive":false},{"label":"나만의 인증서 만들기","href":"Create-your-own-certificate","type":2,"isActive":false},{"label":"TLS로 Milvus 서버 설정하기","href":"Set-up-a-Milvus-server-with-TLS","type":2,"isActive":false},{"label":"TLS로 Milvus 서버에 연결하기","href":"Connect-to-the-Milvus-server-with-TLS","type":2,"isActive":false}]} \ No newline at end of file +{"codeList":["openssl version\n","sudo apt install openssl\n","mkdir cert && cd cert\ntouch openssl.cnf gen.sh\n","#\n# OpenSSL example configuration file.\n# This is mostly being used for generation of certificate requests.\n#\n\n# This definition stops the following lines choking if HOME isn't\n# defined.\nHOME = .\nRANDFILE = $ENV::HOME/.rnd\n\n# Extra OBJECT IDENTIFIER info:\n#oid_file = $ENV::HOME/.oid\noid_section = new_oids\n\n# To use this configuration file with the \"-extfile\" option of the\n# \"openssl x509\" utility, name here the section containing the\n# X.509v3 extensions to use:\n# extensions = \n# (Alternatively, use a configuration file that has only\n# X.509v3 extensions in its main [= default] section.)\n\n[ new_oids ]\n\n# We can add new OIDs in here for use by 'ca', 'req' and 'ts'.\n# Add a simple OID like this:\n# testoid1=1.2.3.4\n# Or use config file substitution like this:\n# testoid2=${testoid1}.5.6\n\n# Policies used by the TSA examples.\ntsa_policy1 = 1.2.3.4.1\ntsa_policy2 = 1.2.3.4.5.6\ntsa_policy3 = 1.2.3.4.5.7\n\n####################################################################\n[ ca ]\ndefault_ca = CA_default # The default ca section\n\n####################################################################\n[ CA_default ]\n\ndir = ./demoCA # Where everything is kept\ncerts = $dir/certs # Where the issued certs are kept\ncrl_dir = $dir/crl # Where the issued crl are kept\ndatabase = $dir/index.txt # database index file.\n#unique_subject = no # Set to 'no' to allow creation of\n # several ctificates with same subject.\nnew_certs_dir = $dir/newcerts # default place for new certs.\n\ncertificate = $dir/cacert.pem # The CA certificate\nserial = $dir/serial # The current serial number\ncrlnumber = $dir/crlnumber # the current crl number\n # must be commented out to leave a V1 CRL\ncrl = $dir/crl.pem # The current CRL\nprivate_key = $dir/private/cakey.pem# The private key\nRANDFILE = $dir/private/.rand # private random number file\n\nx509_extensions = usr_cert # The extentions to add to the cert\n\n# Comment out the following two lines for the \"traditional\"\n# (and highly broken) format.\nname_opt = ca_default # Subject Name options\ncert_opt = ca_default # Certificate field options\n\n# Extension copying option: use with caution.\ncopy_extensions = copy\n\n# Extensions to add to a CRL. Note: Netscape communicator chokes on V2 CRLs\n# so this is commented out by default to leave a V1 CRL.\n# crlnumber must also be commented out to leave a V1 CRL.\n# crl_extensions = crl_ext\n\ndefault_days = 365 # how long to certify for\ndefault_crl_days= 30 # how long before next CRL\ndefault_md = default # use public key default MD\npreserve = no # keep passed DN ordering\n\n# A few difference way of specifying how similar the request should look\n# For type CA, the listed attributes must be the same, and the optional\n# and supplied fields are just that :-)\npolicy = policy_match\n\n# For the CA policy\n[ policy_match ]\ncountryName = match\nstateOrProvinceName = match\norganizationName = match\norganizationalUnitName = optional\ncommonName = supplied\nemailAddress = optional\n\n# For the 'anything' policy\n# At this point in time, you must list all acceptable 'object'\n# types.\n[ policy_anything ]\ncountryName = optional\nstateOrProvinceName = optional\nlocalityName = optional\norganizationName = optional\norganizationalUnitName = optional\ncommonName = supplied\nemailAddress = optional\n\n####################################################################\n[ req ]\ndefault_bits = 2048\ndefault_keyfile = privkey.pem\ndistinguished_name = req_distinguished_name\nattributes = req_attributes\nx509_extensions = v3_ca # The extentions to add to the self signed cert\n\n# Passwords for private keys if not present they will be prompted for\n# input_password = secret\n# output_password = secret\n\n# This sets a mask for permitted string types. There are several options. \n# default: PrintableString, T61String, BMPString.\n# pkix : PrintableString, BMPString (PKIX recommendation before 2004)\n# utf8only: only UTF8Strings (PKIX recommendation after 2004).\n# nombstr : PrintableString, T61String (no BMPStrings or UTF8Strings).\n# MASK:XXXX a literal mask value.\n# WARNING: ancient versions of Netscape crash on BMPStrings or UTF8Strings.\nstring_mask = utf8only\n\nreq_extensions = v3_req # The extensions to add to a certificate request\n\n[ req_distinguished_name ]\ncountryName = Country Name (2 letter code)\ncountryName_default = AU\ncountryName_min = 2\ncountryName_max = 2\n\nstateOrProvinceName = State or Province Name (full name)\nstateOrProvinceName_default = Some-State\n\nlocalityName = Locality Name (eg, city)\n\n0.organizationName = Organization Name (eg, company)\n0.organizationName_default = Internet Widgits Pty Ltd\n\n# we can do this but it is not needed normally :-)\n#1.organizationName = Second Organization Name (eg, company)\n#1.organizationName_default = World Wide Web Pty Ltd\n\norganizationalUnitName = Organizational Unit Name (eg, section)\n#organizationalUnitName_default =\n\ncommonName = Common Name (e.g. server FQDN or YOUR name)\ncommonName_max = 64\n\nemailAddress = Email Address\nemailAddress_max = 64\n\n# SET-ex3 = SET extension number 3\n\n[ req_attributes ]\nchallengePassword = A challenge password\nchallengePassword_min = 4\nchallengePassword_max = 20\n\nunstructuredName = An optional company name\n\n[ usr_cert ]\n\n# These extensions are added when 'ca' signs a request.\n\n# This goes against PKIX guidelines but some CAs do it and some software\n# requires this to avoid interpreting an end user certificate as a CA.\n\nbasicConstraints=CA:FALSE\n\n# Here are some examples of the usage of nsCertType. If it is omitted\n# the certificate can be used for anything *except* object signing.\n\n# This is OK for an SSL server.\n# nsCertType = server\n\n# For an object signing certificate this would be used.\n# nsCertType = objsign\n\n# For normal client use this is typical\n# nsCertType = client, email\n\n# and for everything including object signing:\n# nsCertType = client, email, objsign\n\n# This is typical in keyUsage for a client certificate.\n# keyUsage = nonRepudiation, digitalSignature, keyEncipherment\n\n# This will be displayed in Netscape's comment listbox.\nnsComment = \"OpenSSL Generated Certificate\"\n\n# PKIX recommendations harmless if included in all certificates.\nsubjectKeyIdentifier=hash\nauthorityKeyIdentifier=keyid,issuer\n\n# This stuff is for subjectAltName and issuerAltname.\n# Import the email address.\n# subjectAltName=email:copy\n# An alternative to produce certificates that aren't\n# deprecated according to PKIX.\n# subjectAltName=email:move\n\n# Copy subject details\n# issuerAltName=issuer:copy\n\n#nsCaRevocationUrl = http://www.domain.dom/ca-crl.pem\n#nsBaseUrl\n#nsRevocationUrl\n#nsRenewalUrl\n#nsCaPolicyUrl\n#nsSslServerName\n\n# This is required for TSA certificates.\n# extendedKeyUsage = critical,timeStamping\n\n[ v3_req ]\n\n# Extensions to add to a certificate request\n\nbasicConstraints = CA:FALSE\nkeyUsage = nonRepudiation, digitalSignature, keyEncipherment\n\n\n[ v3_ca ]\n\n\n# Extensions for a typical CA\n\n\n# PKIX recommendation.\n\nsubjectKeyIdentifier=hash\n\nauthorityKeyIdentifier=keyid:always,issuer\n\n# This is what PKIX recommends but some broken software chokes on critical\n# extensions.\n#basicConstraints = critical,CA:true\n# So we do this instead.\nbasicConstraints = CA:true\n\n# Key usage: this is typical for a CA certificate. However since it will\n# prevent it being used as an test self-signed certificate it is best\n# left out by default.\n# keyUsage = cRLSign, keyCertSign\n\n# Some might want this also\n# nsCertType = sslCA, emailCA\n\n# Include email address in subject alt name: another PKIX recommendation\n# subjectAltName=email:copy\n# Copy issuer details\n# issuerAltName=issuer:copy\n\n# DER hex encoding of an extension: beware experts only!\n# obj=DER:02:03\n# Where 'obj' is a standard or added object\n# You can even override a supported extension:\n# basicConstraints= critical, DER:30:03:01:01:FF\n\n[ crl_ext ]\n\n# CRL extensions.\n# Only issuerAltName and authorityKeyIdentifier make any sense in a CRL.\n\n# issuerAltName=issuer:copy\nauthorityKeyIdentifier=keyid:always\n\n[ proxy_cert_ext ]\n# These extensions should be added when creating a proxy certificate\n\n# This goes against PKIX guidelines but some CAs do it and some software\n# requires this to avoid interpreting an end user certificate as a CA.\n\nbasicConstraints=CA:FALSE\n\n# Here are some examples of the usage of nsCertType. If it is omitted\n# the certificate can be used for anything *except* object signing.\n\n# This is OK for an SSL server.\n# nsCertType = server\n\n# For an object signing certificate this would be used.\n# nsCertType = objsign\n\n# For normal client use this is typical\n# nsCertType = client, email\n\n# and for everything including object signing:\n# nsCertType = client, email, objsign\n\n# This is typical in keyUsage for a client certificate.\n# keyUsage = nonRepudiation, digitalSignature, keyEncipherment\n\n# This will be displayed in Netscape's comment listbox.\nnsComment = \"OpenSSL Generated Certificate\"\n\n# PKIX recommendations harmless if included in all certificates.\nsubjectKeyIdentifier=hash\nauthorityKeyIdentifier=keyid,issuer\n\n# This stuff is for subjectAltName and issuerAltname.\n# Import the email address.\n# subjectAltName=email:copy\n# An alternative to produce certificates that aren't\n# deprecated according to PKIX.\n# subjectAltName=email:move\n\n# Copy subject details\n# issuerAltName=issuer:copy\n\n#nsCaRevocationUrl = http://www.domain.dom/ca-crl.pem\n#nsBaseUrl\n#nsRevocationUrl\n#nsRenewalUrl\n#nsCaPolicyUrl\n#nsSslServerName\n\n# This really needs to be in place for it to be a proxy certificate.\nproxyCertInfo=critical,language:id-ppl-anyLanguage,pathlen:3,policy:foo\n\n####################################################################\n[ tsa ]\n\ndefault_tsa = tsa_config1 # the default TSA section\n\n[ tsa_config1 ]\n\n# These are used by the TSA reply generation only.\ndir = ./demoCA # TSA root directory\nserial = $dir/tsaserial # The current serial number (mandatory)\ncrypto_device = builtin # OpenSSL engine to use for signing\nsigner_cert = $dir/tsacert.pem # The TSA signing certificate\n # (optional)\ncerts = $dir/cacert.pem # Certificate chain to include in reply\n # (optional)\nsigner_key = $dir/private/tsakey.pem # The TSA private key (optional)\n\ndefault_policy = tsa_policy1 # Policy if request did not specify it\n # (optional)\nother_policies = tsa_policy2, tsa_policy3 # acceptable policies (optional)\ndigests = md5, sha1 # Acceptable message digests (mandatory)\naccuracy = secs:1, millisecs:500, microsecs:100 # (optional)\nclock_precision_digits = 0 # number of digits after dot. (optional)\nordering = yes # Is ordering defined for timestamps?\n # (optional, default: no)\ntsa_name = yes # Must the TSA name be included in the reply?\n # (optional, default: no)\ness_cert_id_chain = no # Must the ESS cert id chain be included?\n # (optional, default: no)\n","#!/usr/bin/env sh\n# your variables\nCountry=\"CN\"\nState=\"Shanghai\"\nLocation=\"Shanghai\"\nOrganization=\"milvus\"\nOrganizational=\"milvus\"\nCommonName=\"localhost\"\n\necho \"generate ca.key\"\nopenssl genrsa -out ca.key 2048\n\necho \"generate ca.pem\"\nopenssl req -new -x509 -key ca.key -out ca.pem -days 3650 -subj \"/C=$Country/ST=$State/L=$Location/O=$Organization/OU=$Organizational/CN=$CommonName\"\n\necho \"generate server SAN certificate\"\nopenssl genpkey -algorithm RSA -out server.key\nopenssl req -new -nodes -key server.key -out server.csr -days 3650 -subj \"/C=$Country/O=$Organization/OU=$Organizational/CN=$CommonName\" -config ./openssl.cnf -extensions v3_req\nopenssl x509 -req -days 3650 -in server.csr -out server.pem -CA ca.pem -CAkey ca.key -CAcreateserial -extfile ./openssl.cnf -extensions v3_req\n\necho \"generate client SAN certificate\"\nopenssl genpkey -algorithm RSA -out client.key\nopenssl req -new -nodes -key client.key -out client.csr -days 3650 -subj \"/C=$Country/O=$Organization/OU=$Organizational/CN=$CommonName\" -config ./openssl.cnf -extensions v3_req\nopenssl x509 -req -days 3650 -in client.csr -out client.pem -CA ca.pem -CAkey ca.key -CAcreateserial -extfile ./openssl.cnf -extensions v3_req\n\n","chmod +x gen.sh\n./gen.sh\n","openssl genpkey -algorithm RSA -out ca.key\n","openssl req -new -x509 -key ca.key -out ca.pem -days 3650 -subj \"/C=$Country/ST=$State/L=$Location/O=$Organization/OU=$Organizational/CN=$CommonName\"\n","openssl genpkey -algorithm RSA -out server.key\n","openssl req -new -nodes -key server.key -out server.csr -days 3650 -subj \"/C=$Country/O=$Organization/OU=$Organizational/CN=$CommonName\" -config ./openssl.cnf -extensions v3_req\n","openssl x509 -req -days 3650 -in server.csr -out server.pem -CA ca.pem -CAkey ca.key -CAcreateserial -extfile ./openssl.cnf -extensions v3_req\n","tls:\n serverPemPath: /milvus/tls/server.pem\n serverKeyPath: /milvus/tls/server.key\n caPemPath: /milvus/tls/ca.pem\n\ncommon:\n security:\n tlsMode: 1\n","├── docker-compose.yml\n├── milvus.yaml\n└── tls\n ├── server.pem\n ├── server.key\n └── ca.pem\n"," standalone:\n container_name: milvus-standalone\n image: milvusdb/milvus:latest\n command: [\"milvus\", \"run\", \"standalone\"]\n security_opt:\n - seccomp:unconfined\n environment:\n ETCD_ENDPOINTS: etcd:2379\n MINIO_ADDRESS: minio:9000\n volumes:\n - ${DOCKER_VOLUME_DIRECTORY:-.}/volumes/milvus:/var/lib/milvus\n - ${DOCKER_VOLUME_DIRECTORY:-.}/tls:/milvus/tls\n - ${DOCKER_VOLUME_DIRECTORY:-.}/milvus.yaml:/milvus/configs/milvus.yaml\n","sudo docker compose up -d\n","from pymilvus import MilvusClient\n\nclient = MilvusClient(\n uri=\"https://localhost:19530\",\n secure=True,\n server_pem_path=\"path_to/server.pem\",\n server_name=\"localhost\"\n)\n","from pymilvus import MilvusClient\n\nclient = MilvusClient(\n uri=\"https://localhost:19530\",\n secure=True,\n client_pem_path=\"path_to/client.pem\",\n client_key_path=\"path_to/client.key\",\n ca_pem_path=\"path_to/ca.pem\",\n server_name=\"localhost\"\n)\n","curl --cacert path_to/ca.pem https://localhost:19530/v2/vectordb/collections/list\n","curl --cert path_to/client.pem --key path_to/client.key --cacert path_to/ca.pem https://localhost:19530/v2/vectordb/collections/list\n"],"headingContent":"Encryption in Transit","anchorList":[{"label":"전송 중 암호화","href":"Encryption-in-Transit","type":1,"isActive":false},{"label":"나만의 인증서 만들기","href":"Create-your-own-certificate","type":2,"isActive":false},{"label":"TLS로 Milvus 서버 설정하기","href":"Set-up-a-Milvus-server-with-TLS","type":2,"isActive":false},{"label":"TLS로 Milvus 서버에 연결하기","href":"Connect-to-the-Milvus-server-with-TLS","type":2,"isActive":false},{"label":"TLS를 사용하여 Milvus RESTful 서버에 연결하기","href":"Connect-to-the-Milvus-RESTful-server-with-TLS","type":2,"isActive":false}]} \ No newline at end of file diff --git a/localization/v2.4.x/site/ko/adminGuide/tls.md b/localization/v2.4.x/site/ko/adminGuide/tls.md index 2c250cf87..2d710abc1 100644 --- a/localization/v2.4.x/site/ko/adminGuide/tls.md +++ b/localization/v2.4.x/site/ko/adminGuide/tls.md @@ -19,9 +19,9 @@ summary: Milvus에서 TLS 프록시를 활성화하는 방법을 알아보세요 >

        TLS(전송 계층 보안)는 통신 보안을 보장하기 위한 암호화 프로토콜입니다. Milvus 프록시는 TLS 단방향 및 양방향 인증을 사용합니다.

        -

        이 항목에서는 Milvus에서 TLS 프록시를 활성화하는 방법에 대해 설명합니다.

        +

        이 항목에서는 Milvus 프록시에서 gRPC 및 RESTful 트래픽 모두에 대해 TLS를 활성화하는 방법에 대해 설명합니다.

        -

        TLS와 사용자 인증은 서로 다른 두 가지 보안 접근 방식입니다. Milvus 시스템에서 사용자 인증과 TLS를 모두 활성화한 경우 사용자 이름, 비밀번호 및 인증서 파일 경로를 제공해야 합니다. 사용자 인증을 활성화하는 방법에 대한 자세한 내용은 사용자 액세스 인증을 참조하세요.

        +

        TLS와 사용자 인증은 서로 다른 두 가지 보안 접근 방식입니다. Milvus 시스템에서 사용자 인증과 TLS를 모두 사용 설정한 경우 사용자 이름, 비밀번호 및 인증서 파일 경로를 제공해야 합니다. 사용자 인증을 활성화하는 방법에 대한 자세한 내용은 사용자 액세스 인증을 참조하세요.

        나만의 인증서 만들기

      자세한 내용은 예제_tls1.py예제_tls2.py를 참조하세요.

      +

      TLS를 사용하여 Milvus RESTful 서버에 연결하기

      RESTful API의 경우 curl 명령을 사용하여 tls를 확인할 수 있습니다.

      +

      단방향 TLS 연결

      curl --cacert path_to/ca.pem https://localhost:19530/v2/vectordb/collections/list
      +
      +

      양방향 TLS 연결

      curl --cert path_to/client.pem --key path_to/client.key --cacert path_to/ca.pem https://localhost:19530/v2/vectordb/collections/list
      +
      diff --git a/localization/v2.4.x/site/ko/adminGuide/upgrade_milvus_cluster-docker.json b/localization/v2.4.x/site/ko/adminGuide/upgrade_milvus_cluster-docker.json index 7c6b353f2..c296e8740 100644 --- a/localization/v2.4.x/site/ko/adminGuide/upgrade_milvus_cluster-docker.json +++ b/localization/v2.4.x/site/ko/adminGuide/upgrade_milvus_cluster-docker.json @@ -1 +1 @@ -{"codeList":["...\nrootcoord:\n container_name: milvus-rootcoord\n image: milvusdb/milvus:v2.4.9\n...\nproxy:\n container_name: milvus-proxy\n image: milvusdb/milvus:v2.4.9\n...\nquerycoord:\n container_name: milvus-querycoord\n image: milvusdb/milvus:v2.4.9 \n...\nquerynode:\n container_name: milvus-querynode\n image: milvusdb/milvus:v2.4.9\n...\nindexcoord:\n container_name: milvus-indexcoord\n image: milvusdb/milvus:v2.4.9\n...\nindexnode:\n container_name: milvus-indexnode\n image: milvusdb/milvus:v2.4.9 \n...\ndatacoord:\n container_name: milvus-datacoord\n image: milvusdb/milvus:v2.4.9 \n...\ndatanode:\n container_name: milvus-datanode\n image: milvusdb/milvus:v2.4.9\n","docker compose down\ndocker compose up -d\n","docker stop \n","# migration.yaml\ncmd:\n # Option: run/backup/rollback\n type: run\n runWithBackup: true\nconfig:\n sourceVersion: 2.1.4 # Specify your milvus version\n targetVersion: 2.4.9\n backupFilePath: /tmp/migration.bak\nmetastore:\n type: etcd\netcd:\n endpoints:\n - milvus-etcd:2379 # Use the etcd container name\n rootPath: by-dev # The root path where data is stored in etcd\n metaSubPath: meta\n kvSubPath: kv\n","# Suppose your docker-compose run with the default milvus network,\n# and you put migration.yaml in the same directory with docker-compose.yaml.\ndocker run --rm -it --network milvus -v $(pwd)/migration.yaml:/milvus/configs/migration.yaml milvus/meta-migration:v2.2.0 /milvus/bin/meta-migration -config=/milvus/configs/migration.yaml\n","Update the milvus image tag in the docker-compose.yaml\ndocker compose down\ndocker compose up -d\n"],"headingContent":"","anchorList":[{"label":"도커 컴포즈로 밀버스 클러스터 업그레이드하기","href":"Upgrade-Milvus-Cluster-with-Docker-Compose","type":1,"isActive":false},{"label":"이미지를 변경하여 Milvus 업그레이드하기","href":"Upgrade-Milvus-by-changing-its-image","type":2,"isActive":false},{"label":"메타데이터 마이그레이션","href":"Migrate-the-metadata","type":2,"isActive":false},{"label":"다음 단계","href":"Whats-next","type":2,"isActive":false}]} \ No newline at end of file +{"codeList":["...\nrootcoord:\n container_name: milvus-rootcoord\n image: milvusdb/milvus:v2.4.13-hotfix\n...\nproxy:\n container_name: milvus-proxy\n image: milvusdb/milvus:v2.4.13-hotfix\n...\nquerycoord:\n container_name: milvus-querycoord\n image: milvusdb/milvus:v2.4.13-hotfix \n...\nquerynode:\n container_name: milvus-querynode\n image: milvusdb/milvus:v2.4.13-hotfix\n...\nindexcoord:\n container_name: milvus-indexcoord\n image: milvusdb/milvus:v2.4.13-hotfix\n...\nindexnode:\n container_name: milvus-indexnode\n image: milvusdb/milvus:v2.4.13-hotfix \n...\ndatacoord:\n container_name: milvus-datacoord\n image: milvusdb/milvus:v2.4.13-hotfix \n...\ndatanode:\n container_name: milvus-datanode\n image: milvusdb/milvus:v2.4.13-hotfix\n","docker compose down\ndocker compose up -d\n","docker stop \n","# migration.yaml\ncmd:\n # Option: run/backup/rollback\n type: run\n runWithBackup: true\nconfig:\n sourceVersion: 2.1.4 # Specify your milvus version\n targetVersion: 2.4.13-hotfix\n backupFilePath: /tmp/migration.bak\nmetastore:\n type: etcd\netcd:\n endpoints:\n - milvus-etcd:2379 # Use the etcd container name\n rootPath: by-dev # The root path where data is stored in etcd\n metaSubPath: meta\n kvSubPath: kv\n","# Suppose your docker-compose run with the default milvus network,\n# and you put migration.yaml in the same directory with docker-compose.yaml.\ndocker run --rm -it --network milvus -v $(pwd)/migration.yaml:/milvus/configs/migration.yaml milvus/meta-migration:v2.2.0 /milvus/bin/meta-migration -config=/milvus/configs/migration.yaml\n","Update the milvus image tag in the docker-compose.yaml\ndocker compose down\ndocker compose up -d\n"],"headingContent":"Upgrade Milvus Cluster with Docker Compose","anchorList":[{"label":"도커 컴포즈로 밀버스 클러스터 업그레이드하기","href":"Upgrade-Milvus-Cluster-with-Docker-Compose","type":1,"isActive":false},{"label":"이미지를 변경하여 Milvus 업그레이드하기","href":"Upgrade-Milvus-by-changing-its-image","type":2,"isActive":false},{"label":"메타데이터 마이그레이션","href":"Migrate-the-metadata","type":2,"isActive":false},{"label":"다음 단계","href":"Whats-next","type":2,"isActive":false}]} \ No newline at end of file diff --git a/localization/v2.4.x/site/ko/adminGuide/upgrade_milvus_cluster-docker.md b/localization/v2.4.x/site/ko/adminGuide/upgrade_milvus_cluster-docker.md index 1a24c6f9d..8c3c67167 100644 --- a/localization/v2.4.x/site/ko/adminGuide/upgrade_milvus_cluster-docker.md +++ b/localization/v2.4.x/site/ko/adminGuide/upgrade_milvus_cluster-docker.md @@ -1,7 +1,7 @@ --- id: upgrade_milvus_cluster-docker.md summary: Docker Compose로 Milvus 클러스터를 업그레이드하는 방법을 알아보세요. -title: Docker Compose로 Milvus 클러스터 업그레이드하기 +title: 도커 컴포즈로 밀버스 클러스터 업그레이드하기 ---

      도커 컴포즈로 밀버스 클러스터 업그레이드하기

      이 항목에서는 도커 컴포즈를 사용하여 Milvus를 업그레이드하는 방법을 설명합니다.

      -

      일반적인 경우, 이미지를 변경하여 Milvus를 업그레이드할 수 있습니다. 그러나 v2.1.x에서 v2.4.9로 업그레이드하기 전에 메타데이터를 마이그레이션해야 합니다.

      +

      일반적인 경우, 이미지를 변경하여 Milvus를 업그레이드할 수 있습니다. 그러나 v2.1.x에서 v2.4.13-핫픽스로 업그레이드하기 전에 메타데이터를 마이그레이션해야 합니다.

      이미지를 변경하여 Milvus 업그레이드하기

  • 다음 명령을 실행하여 업그레이드를 수행합니다.

    docker compose down
    @@ -105,7 +105,7 @@ cmd:
       runWithBackup: true
     config:
       sourceVersion: 2.1.4   # Specify your milvus version
    -  targetVersion: 2.4.9
    +  targetVersion: 2.4.13-hotfix
       backupFilePath: /tmp/migration.bak
     metastore:
       type: etcd
    diff --git a/localization/v2.4.x/site/ko/adminGuide/upgrade_milvus_cluster-helm.json b/localization/v2.4.x/site/ko/adminGuide/upgrade_milvus_cluster-helm.json
    index 5ee12a7ba..06d3a3984 100644
    --- a/localization/v2.4.x/site/ko/adminGuide/upgrade_milvus_cluster-helm.json
    +++ b/localization/v2.4.x/site/ko/adminGuide/upgrade_milvus_cluster-helm.json
    @@ -1 +1 @@
    -{"codeList":["$ helm repo update\n$ helm search repo zilliztech/milvus --versions\n","helm repo add zilliztech https://zilliztech.github.io/milvus-helm\nhelm repo update\n# upgrade existing helm release\nhelm upgrade my-release zilliztech/milvus\n","NAME                    CHART VERSION   APP VERSION             DESCRIPTION                                       \nzilliztech/milvus       4.1.34          2.4.5                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.33          2.4.4                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.32          2.4.3                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.31          2.4.1                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.30          2.4.1                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.29          2.4.0                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.24          2.3.11                  Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.23          2.3.10                  Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.22          2.3.10                  Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.21          2.3.10                  Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.20          2.3.10                  Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.18          2.3.10                  Milvus is an open-source vector database built ... \nzilliztech/milvus       4.1.18          2.3.9                   Milvus is an open-source vector database built ...                                       \nzilliztech/milvus       4.1.17          2.3.8                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.16          2.3.7                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.15          2.3.5                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.14          2.3.6                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.13          2.3.5                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.12          2.3.5                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.11          2.3.4                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.10          2.3.3                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.9           2.3.3                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.8           2.3.2                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.7           2.3.2                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.6           2.3.1                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.5           2.3.1                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.4           2.3.1                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.3           2.3.1                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.2           2.3.1                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.1           2.3.0                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.0           2.3.0                   Milvus is an open-source vector database built ...\n","sh rollingUpdate.sh -n default -i my-release -o update -t 2.4.9 -w 'milvusdb/milvus:v2.4.9'\n","helm repo update\nhelm upgrade my-release zilliztech/milvus --reuse-values --version=4.1.24 # use the helm chart version here\n","NAME                NAMESPACE   REVISION    UPDATED                                 STATUS      CHART           APP VERSION    \nnew-release         default     1           2022-11-21 15:41:25.51539 +0800 CST     deployed    milvus-3.2.18   2.1.4 \n","NAME                                             READY   STATUS      RESTARTS   AGE\nmy-release-etcd-0                               1/1     Running     0          21m\nmy-release-etcd-1                               1/1     Running     0          21m\nmy-release-etcd-2                               1/1     Running     0          21m\nmy-release-milvus-datacoord-664c58798d-fl75s    1/1     Running     0          21m\nmy-release-milvus-datanode-5f75686c55-xfg2r     1/1     Running     0          21m\nmy-release-milvus-indexcoord-5f98b97589-2l48r   1/1     Running     0          21m\nmy-release-milvus-indexnode-857b4ddf98-vmd75    1/1     Running     0          21m\nmy-release-milvus-proxy-6c548f787f-scspp        1/1     Running     0          21m\nmy-release-milvus-querycoord-c454f44cd-dwmwq    1/1     Running     0          21m\nmy-release-milvus-querynode-76bb4946d-lbrz6     1/1     Running     0          21m\nmy-release-milvus-rootcoord-7764c5b686-62msm    1/1     Running     0          21m\nmy-release-minio-0                              1/1     Running     0          21m\nmy-release-minio-1                              1/1     Running     0          21m\nmy-release-minio-2                              1/1     Running     0          21m\nmy-release-minio-3                              1/1     Running     0          21m\nmy-release-pulsar-bookie-0                      1/1     Running     0          21m\nmy-release-pulsar-bookie-1                      1/1     Running     0          21m\nmy-release-pulsar-bookie-2                      1/1     Running     0          21m\nmy-release-pulsar-bookie-init-tjxpj             0/1     Completed   0          21m\nmy-release-pulsar-broker-0                      1/1     Running     0          21m\nmy-release-pulsar-proxy-0                       1/1     Running     0          21m\nmy-release-pulsar-pulsar-init-c8vvc             0/1     Completed   0          21m\nmy-release-pulsar-recovery-0                    1/1     Running     0          21m\nmy-release-pulsar-zookeeper-0                   1/1     Running     0          21m\nmy-release-pulsar-zookeeper-1                   1/1     Running     0          20m\nmy-release-pulsar-zookeeper-2                   1/1     Running     0          20m\n","$ kubectl get pods my-release-milvus-proxy-6c548f787f-scspp -o=jsonpath='{$.spec.containers[0].image}'\n# milvusdb/milvus:v2.1.4\n","./migrate.sh -i my-release -s 2.1.4 -t 2.4.9\n","./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.9\n","./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.9 -r by-dev\n","./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.9 -r by-dev -w milvusdb/milvus:v2.4.9\n","./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.9 -w milvusdb/milvus:v2.4.9 -d true\n","./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.9 -r by-dev -o rollback -w milvusdb/milvus:v2.1.1\n./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.9 -r by-dev -o migrate -w milvusdb/milvus:v2.4.9\n"],"headingContent":"","anchorList":[{"label":"헬름 차트로 Milvus 클러스터 업그레이드하기","href":"Upgrade-Milvus-Cluster-with-Helm-Chart","type":1,"isActive":false},{"label":"밀버스 헬름 차트 확인","href":"Check-Milvus-Helm-Chart","type":2,"isActive":false},{"label":"롤링 업그레이드 수행","href":"Conduct-a-rolling-upgrade","type":2,"isActive":false},{"label":"헬름을 사용하여 밀버스 업그레이드","href":"Upgrade-Milvus-using-Helm","type":2,"isActive":false},{"label":"메타데이터 마이그레이션","href":"Migrate-the-metadata","type":2,"isActive":false}]}
    \ No newline at end of file
    +{"codeList":["$ helm repo update\n$ helm search repo zilliztech/milvus --versions\n","helm repo add zilliztech https://zilliztech.github.io/milvus-helm\nhelm repo update\n# upgrade existing helm release\nhelm upgrade my-release zilliztech/milvus\n","NAME                    CHART VERSION   APP VERSION             DESCRIPTION                                       \nzilliztech/milvus       4.1.34          2.4.5                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.33          2.4.4                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.32          2.4.3                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.31          2.4.1                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.30          2.4.1                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.29          2.4.0                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.24          2.3.11                  Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.23          2.3.10                  Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.22          2.3.10                  Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.21          2.3.10                  Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.20          2.3.10                  Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.18          2.3.10                  Milvus is an open-source vector database built ... \nzilliztech/milvus       4.1.18          2.3.9                   Milvus is an open-source vector database built ...                                       \nzilliztech/milvus       4.1.17          2.3.8                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.16          2.3.7                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.15          2.3.5                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.14          2.3.6                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.13          2.3.5                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.12          2.3.5                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.11          2.3.4                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.10          2.3.3                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.9           2.3.3                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.8           2.3.2                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.7           2.3.2                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.6           2.3.1                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.5           2.3.1                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.4           2.3.1                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.3           2.3.1                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.2           2.3.1                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.1           2.3.0                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.0           2.3.0                   Milvus is an open-source vector database built ...\n","sh rollingUpdate.sh -n default -i my-release -o update -t 2.4.13-hotfix -w 'milvusdb/milvus:v2.4.13-hotfix'\n","helm repo update\nhelm upgrade my-release zilliztech/milvus --reuse-values --version=4.1.24 # use the helm chart version here\n","NAME                NAMESPACE   REVISION    UPDATED                                 STATUS      CHART           APP VERSION    \nnew-release         default     1           2022-11-21 15:41:25.51539 +0800 CST     deployed    milvus-3.2.18   2.1.4 \n","NAME                                             READY   STATUS      RESTARTS   AGE\nmy-release-etcd-0                               1/1     Running     0          21m\nmy-release-etcd-1                               1/1     Running     0          21m\nmy-release-etcd-2                               1/1     Running     0          21m\nmy-release-milvus-datacoord-664c58798d-fl75s    1/1     Running     0          21m\nmy-release-milvus-datanode-5f75686c55-xfg2r     1/1     Running     0          21m\nmy-release-milvus-indexcoord-5f98b97589-2l48r   1/1     Running     0          21m\nmy-release-milvus-indexnode-857b4ddf98-vmd75    1/1     Running     0          21m\nmy-release-milvus-proxy-6c548f787f-scspp        1/1     Running     0          21m\nmy-release-milvus-querycoord-c454f44cd-dwmwq    1/1     Running     0          21m\nmy-release-milvus-querynode-76bb4946d-lbrz6     1/1     Running     0          21m\nmy-release-milvus-rootcoord-7764c5b686-62msm    1/1     Running     0          21m\nmy-release-minio-0                              1/1     Running     0          21m\nmy-release-minio-1                              1/1     Running     0          21m\nmy-release-minio-2                              1/1     Running     0          21m\nmy-release-minio-3                              1/1     Running     0          21m\nmy-release-pulsar-bookie-0                      1/1     Running     0          21m\nmy-release-pulsar-bookie-1                      1/1     Running     0          21m\nmy-release-pulsar-bookie-2                      1/1     Running     0          21m\nmy-release-pulsar-bookie-init-tjxpj             0/1     Completed   0          21m\nmy-release-pulsar-broker-0                      1/1     Running     0          21m\nmy-release-pulsar-proxy-0                       1/1     Running     0          21m\nmy-release-pulsar-pulsar-init-c8vvc             0/1     Completed   0          21m\nmy-release-pulsar-recovery-0                    1/1     Running     0          21m\nmy-release-pulsar-zookeeper-0                   1/1     Running     0          21m\nmy-release-pulsar-zookeeper-1                   1/1     Running     0          20m\nmy-release-pulsar-zookeeper-2                   1/1     Running     0          20m\n","$ kubectl get pods my-release-milvus-proxy-6c548f787f-scspp -o=jsonpath='{$.spec.containers[0].image}'\n# milvusdb/milvus:v2.1.4\n","./migrate.sh -i my-release -s 2.1.4 -t 2.4.13-hotfix\n","./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.13-hotfix\n","./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.13-hotfix -r by-dev\n","./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.13-hotfix -r by-dev -w milvusdb/milvus:v2.4.13-hotfix\n","./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.13-hotfix -w milvusdb/milvus:v2.4.13-hotfix -d true\n","./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.13-hotfix -r by-dev -o rollback -w milvusdb/milvus:v2.1.1\n./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.13-hotfix -r by-dev -o migrate -w milvusdb/milvus:v2.4.13-hotfix\n"],"headingContent":"Upgrade Milvus Cluster with Helm Chart","anchorList":[{"label":"헬름 차트로 Milvus 클러스터 업그레이드하기","href":"Upgrade-Milvus-Cluster-with-Helm-Chart","type":1,"isActive":false},{"label":"밀버스 헬름 차트 확인","href":"Check-Milvus-Helm-Chart","type":2,"isActive":false},{"label":"롤링 업그레이드 수행","href":"Conduct-a-rolling-upgrade","type":2,"isActive":false},{"label":"헬름을 사용하여 밀버스 업그레이드","href":"Upgrade-Milvus-using-Helm","type":2,"isActive":false},{"label":"메타데이터 마이그레이션","href":"Migrate-the-metadata","type":2,"isActive":false}]}
    \ No newline at end of file
    diff --git a/localization/v2.4.x/site/ko/adminGuide/upgrade_milvus_cluster-helm.md b/localization/v2.4.x/site/ko/adminGuide/upgrade_milvus_cluster-helm.md
    index 0992bdc1c..b0f849d81 100644
    --- a/localization/v2.4.x/site/ko/adminGuide/upgrade_milvus_cluster-helm.md
    +++ b/localization/v2.4.x/site/ko/adminGuide/upgrade_milvus_cluster-helm.md
    @@ -86,10 +86,10 @@ zilliztech/milvus       4.1.1           2.3.0                   Milvus is an ope
     zilliztech/milvus       4.1.0           2.3.0                   Milvus is an open-source vector database built ...
     

    Milvus의 업그레이드 경로는 다음과 같이 선택할 수 있습니다:

    -
    - 밀버스 v2.2.3 이상 릴리스에서 v2.4.9로 [롤링 업그레이드 수행](#conduct-a-rolling-upgrade)을 수행한다.
    +
    - 밀버스 v2.2.3 이상 릴리스에서 v2.4.13 핫픽스로 [롤링 업그레이드 수행](#conduct-a-rolling-upgrade).

    롤링 업그레이드 수행

  • o작업updateFalse
    -

    Milvus 인스턴스의 모든 배포가 정상 상태인지 확인했습니다. 다음 명령을 실행하여 Milvus 인스턴스를 2.4.9로 업그레이드할 수 있습니다.

    -
    sh rollingUpdate.sh -n default -i my-release -o update -t 2.4.9 -w 'milvusdb/milvus:v2.4.9'
    +

    Milvus 인스턴스의 모든 배포가 정상 상태인지 확인했습니다. 다음 명령을 실행하여 Milvus 인스턴스를 2.4.13-hotfix로 업그레이드할 수 있습니다.

    +
    sh rollingUpdate.sh -n default -i my-release -o update -t 2.4.13-hotfix -w 'milvusdb/milvus:v2.4.13-hotfix'
     
      @@ -235,25 +235,25 @@ my-release-pulsar-zookeeper-2
    1. Milvus 메타데이터를 마이그레이션합니다.
    2. 새 이미지로 Milvus 구성 요소를 시작합니다.
    -

    2. Milvus를 v2.1.x에서 2.4.9로 업그레이드합니다.

    다음 명령은 Milvus를 v2.1.4에서 2.4.9로 업그레이드한다고 가정합니다. 필요에 맞는 버전으로 변경하세요.

    +

    2. Milvus를 v2.1.x에서 2.4.13 핫픽스로 업그레이드하기

    다음 명령은 Milvus를 v2.1.4에서 2.4.13-hotfix로 업그레이드한다고 가정합니다. 필요에 맞는 버전으로 변경하세요.

    1. 밀버스 인스턴스 이름, 소스 밀버스 버전, 대상 밀버스 버전을 지정합니다.

      -
      ./migrate.sh -i my-release -s 2.1.4 -t 2.4.9
      +
      ./migrate.sh -i my-release -s 2.1.4 -t 2.4.13-hotfix
       
    2. -
    3. Milvus가 기본 K8s 네임스페이스에 설치되지 않은 경우 -n 로 네임스페이스를 지정합니다.

      -
      ./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.9
      +
    4. Milvus가 기본 K8s 네임스페이스에 설치되어 있지 않은 경우 -n 로 네임스페이스를 지정합니다.

      +
      ./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.13-hotfix
       
    5. Milvus가 사용자 지정 rootpath 과 함께 설치된 경우 -r 으로 루트 경로를 지정합니다.

      -
      ./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.9 -r by-dev
      +
      ./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.13-hotfix -r by-dev
       
    6. Milvus가 사용자 지정 image 과 함께 설치된 경우 이미지 태그를 -w 으로 지정합니다.

      -
      ./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.9 -r by-dev -w milvusdb/milvus:v2.4.9
      +
      ./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.13-hotfix -r by-dev -w milvusdb/milvus:v2.4.13-hotfix
       
    7. 마이그레이션이 완료된 후 마이그레이션 파드를 자동으로 제거하려면 -d true 을 설정합니다.

      -
      ./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.9 -w milvusdb/milvus:v2.4.9 -d true
      +
      ./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.13-hotfix -w milvusdb/milvus:v2.4.13-hotfix -d true
       
    8. 마이그레이션이 실패하면 롤백하고 다시 마이그레이션하세요.

      -
      ./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.9 -r by-dev -o rollback -w milvusdb/milvus:v2.1.1
      -./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.9 -r by-dev -o migrate -w milvusdb/milvus:v2.4.9
      +
      ./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.13-hotfix -r by-dev -o rollback -w milvusdb/milvus:v2.1.1
      +./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.13-hotfix -r by-dev -o migrate -w milvusdb/milvus:v2.4.13-hotfix
       
    diff --git a/localization/v2.4.x/site/ko/adminGuide/upgrade_milvus_cluster-operator.json b/localization/v2.4.x/site/ko/adminGuide/upgrade_milvus_cluster-operator.json index c9ff53671..5b4032555 100644 --- a/localization/v2.4.x/site/ko/adminGuide/upgrade_milvus_cluster-operator.json +++ b/localization/v2.4.x/site/ko/adminGuide/upgrade_milvus_cluster-operator.json @@ -1 +1 @@ -{"codeList":["helm repo add zilliztech-milvus-operator https://zilliztech.github.io/milvus-operator/\nhelm repo update zilliztech-milvus-operator\nhelm -n milvus-operator upgrade milvus-operator zilliztech-milvus-operator/milvus-operator\n","apiVersion: milvus.io/v1beta1\nkind: Milvus\nmetadata:\n name: my-release\nspec:\n components:\n enableRollingUpdate: true\n imageUpdateMode: rollingUpgrade # Default value, can be omitted\n image: milvusdb/milvus:v2.4.9\n","apiVersion: milvus.io/v1beta1\nkind: Milvus\nmetadata:\n name: my-release\nspec:\n components:\n enableRollingUpdate: true\n imageUpdateMode: all\n image: milvusdb/milvus:v2.4.9\n","apiVersion: milvus.io/v1beta1\nkind: Milvus\nmetadata:\n name: my-release\nspec:\n components:\n enableRollingUpdate: true\n imageUpdateMode: rollingDowngrade\n image: milvusdb/milvus:\n","kubectl apply -f milvusupgrade.yml\n","apiVersion: milvus.io/v1beta1\nkind: Milvus\nmetadata:\n name: my-release\nspec:\n # Omit other fields ...\n components:\n image: milvusdb/milvus:v2.4.9\n","kubectl apply -f milvusupgrade.yaml\n","apiVersion: milvus.io/v1beta1\nkind: MilvusUpgrade\nmetadata:\n name: my-release-upgrade\nspec:\n milvus:\n namespace: default\n name: my-release\n sourceVersion: \"v2.1.4\"\n targetVersion: \"v2.4.9\"\n # below are some omit default values:\n # targetImage: \"milvusdb/milvus:v2.4.9\"\n # toolImage: \"milvusdb/meta-migration:v2.2.0\"\n # operation: upgrade\n # rollbackIfFailed: true\n # backupPVC: \"\"\n # maxRetry: 3\n","$ kubectl apply -f https://github.com/zilliztech/milvus-operator/blob/main/config/samples/beta/milvusupgrade.yaml\n","kubectl describe milvus release-name\n"],"headingContent":"","anchorList":[{"label":"밀버스 오퍼레이터로 밀버스 클러스터 업그레이드하기","href":"Upgrade-Milvus-Cluster-with-Milvus-Operator","type":1,"isActive":false},{"label":"Milvus 오퍼레이터 업그레이드","href":"Upgrade-your-Milvus-operator","type":2,"isActive":false},{"label":"롤링 업그레이드 수행","href":"Conduct-a-rolling-upgrade","type":2,"isActive":false},{"label":"이미지를 변경하여 Milvus 업그레이드하기","href":"Upgrade-Milvus-by-changing-its-image","type":2,"isActive":false},{"label":"메타데이터 마이그레이션","href":"Migrate-the-metadata","type":2,"isActive":false}]} \ No newline at end of file +{"codeList":["helm repo add zilliztech-milvus-operator https://zilliztech.github.io/milvus-operator/\nhelm repo update zilliztech-milvus-operator\nhelm -n milvus-operator upgrade milvus-operator zilliztech-milvus-operator/milvus-operator\n","apiVersion: milvus.io/v1beta1\nkind: Milvus\nmetadata:\n name: my-release\nspec:\n components:\n enableRollingUpdate: true\n imageUpdateMode: rollingUpgrade # Default value, can be omitted\n image: milvusdb/milvus:v2.4.13-hotfix\n","apiVersion: milvus.io/v1beta1\nkind: Milvus\nmetadata:\n name: my-release\nspec:\n components:\n enableRollingUpdate: true\n imageUpdateMode: all\n image: milvusdb/milvus:v2.4.13-hotfix\n","apiVersion: milvus.io/v1beta1\nkind: Milvus\nmetadata:\n name: my-release\nspec:\n components:\n enableRollingUpdate: true\n imageUpdateMode: rollingDowngrade\n image: milvusdb/milvus:\n","kubectl apply -f milvusupgrade.yml\n","apiVersion: milvus.io/v1beta1\nkind: Milvus\nmetadata:\n name: my-release\nspec:\n # Omit other fields ...\n components:\n image: milvusdb/milvus:v2.4.13-hotfix\n","kubectl apply -f milvusupgrade.yaml\n","apiVersion: milvus.io/v1beta1\nkind: MilvusUpgrade\nmetadata:\n name: my-release-upgrade\nspec:\n milvus:\n namespace: default\n name: my-release\n sourceVersion: \"v2.1.4\"\n targetVersion: \"v2.4.13-hotfix\"\n # below are some omit default values:\n # targetImage: \"milvusdb/milvus:v2.4.13-hotfix\"\n # toolImage: \"milvusdb/meta-migration:v2.2.0\"\n # operation: upgrade\n # rollbackIfFailed: true\n # backupPVC: \"\"\n # maxRetry: 3\n","$ kubectl apply -f https://github.com/zilliztech/milvus-operator/blob/main/config/samples/beta/milvusupgrade.yaml\n","kubectl describe milvus release-name\n"],"headingContent":"Upgrade Milvus Cluster with Milvus Operator","anchorList":[{"label":"밀버스 오퍼레이터로 밀버스 클러스터 업그레이드하기","href":"Upgrade-Milvus-Cluster-with-Milvus-Operator","type":1,"isActive":false},{"label":"Milvus 오퍼레이터 업그레이드","href":"Upgrade-your-Milvus-operator","type":2,"isActive":false},{"label":"롤링 업그레이드 수행","href":"Conduct-a-rolling-upgrade","type":2,"isActive":false},{"label":"이미지를 변경하여 Milvus 업그레이드하기","href":"Upgrade-Milvus-by-changing-its-image","type":2,"isActive":false},{"label":"메타데이터 마이그레이션","href":"Migrate-the-metadata","type":2,"isActive":false}]} \ No newline at end of file diff --git a/localization/v2.4.x/site/ko/adminGuide/upgrade_milvus_cluster-operator.md b/localization/v2.4.x/site/ko/adminGuide/upgrade_milvus_cluster-operator.md index 68ef9a668..187e4e52d 100644 --- a/localization/v2.4.x/site/ko/adminGuide/upgrade_milvus_cluster-operator.md +++ b/localization/v2.4.x/site/ko/adminGuide/upgrade_milvus_cluster-operator.md @@ -5,7 +5,7 @@ order: 0 group: upgrade_milvus_cluster-operator.md related_key: upgrade Milvus Cluster summary: Milvus 운영자를 사용하여 Milvus 클러스터를 업그레이드하는 방법을 알아보세요. -title: Milvus 운영자를 통한 Milvus 클러스터 업그레이드 +title: 밀버스 오퍼레이터로 밀버스 클러스터 업그레이드하기 ---

    밀버스 오퍼레이터로 밀버스 클러스터 업그레이드하기

    Milvus 운영자를 최신 버전으로 업그레이드한 후에는 다음과 같은 옵션을 선택할 수 있습니다:

    롤링 업그레이드 수행

    위의 구성 파일에서 spec.components.enableRollingUpdatetrue 으로 설정하고 spec.components.image 을 원하는 Milvus 버전으로 설정합니다.

    기본적으로 Milvus는 코디네이터에 대한 롤링 업그레이드를 수행하여 코디네이터 포드 이미지를 차례로 교체하는 순서대로 진행합니다. 업그레이드 시간을 줄이려면 spec.components.imageUpdateModeall 으로 설정하여 Milvus가 모든 포드 이미지를 동시에 교체하도록 하세요.

    @@ -88,7 +88,7 @@ spec: components: enableRollingUpdate: true imageUpdateMode: all - image: milvusdb/milvus:v2.4.9 + image: milvusdb/milvus:v2.4.13-hotfix

    spec.components.imageUpdateModerollingDowngrade 으로 설정하여 Milvus가 코디네이터 파드 이미지를 하위 버전으로 대체하도록 할 수 있습니다.

    apiVersion: milvus.io/v1beta1
    @@ -128,7 +128,7 @@ metadata:
     spec:
       # Omit other fields ...
       components:
    -   image: milvusdb/milvus:v2.4.9
    +   image: milvusdb/milvus:v2.4.13-hotfix
     

    그런 다음 다음을 실행하여 업그레이드를 수행합니다:

    kubectl apply -f milvusupgrade.yaml
    @@ -148,8 +148,8 @@ spec:
               d="M4 9h1v1H4c-1.5 0-3-1.69-3-3.5S2.55 3 4 3h4c1.45 0 3 1.69 3 3.5 0 1.41-.91 2.72-2 3.25V8.59c.58-.45 1-1.27 1-2.09C10 5.22 8.98 4 8 4H4c-.98 0-2 1.22-2 2.5S3 9 4 9zm9-3h-1v1h1c1 0 2 1.22 2 2.5S13.98 12 13 12H9c-.98 0-2-1.22-2-2.5 0-.83.42-1.64 1-2.09V6.25c-1.09.53-2 1.84-2 3.25C6 11.31 7.55 13 9 13h4c1.45 0 3-1.69 3-3.5S14.5 6 13 6z"
             >
           
    -    

    Milvus 2.2.0부터는 메타데이터가 이전 릴리즈의 메타데이터와 호환되지 않습니다. 다음 예제 코드 조각은 Milvus 2.1.4에서 Milvus 2.4.9로 업그레이드하는 경우를 가정합니다.

    -

    1. 메타데이터 마이그레이션을 위한 .yaml 파일 만들기

    메타데이터 마이그레이션 파일을 만듭니다. 다음은 예시입니다. 구성 파일에 name, sourceVersion, targetVersion 을 지정해야 합니다. 다음 예에서는 namemy-release-upgrade 으로 , sourceVersionv2.1.4 으로 , targetVersionv2.4.9 으로 설정합니다. 즉, Milvus 클러스터가 v2.1.4에서 v2.4.9로 업그레이드됩니다.

    +

    Milvus 2.2.0부터는 메타데이터가 이전 릴리즈의 메타데이터와 호환되지 않습니다. 다음 예제 코드 조각은 Milvus 2.1.4에서 Milvus 2.4.13 핫픽스로 업그레이드하는 경우를 가정합니다.

    +

    1. 메타데이터 마이그레이션을 위한 .yaml 파일 만들기

    메타데이터 마이그레이션 파일을 만듭니다. 다음은 예시입니다. 구성 파일에 name, sourceVersion, targetVersion 을 지정해야 합니다. 다음 예에서는 namemy-release-upgrade 으로 , sourceVersionv2.1.4 으로 , targetVersionv2.4.13-hotfix 으로 설정합니다. 즉, Milvus 클러스터가 v2.1.4에서 v2.4.13-hotfix로 업그레이드됩니다.

    apiVersion: milvus.io/v1beta1
     kind: MilvusUpgrade
     metadata:
    @@ -159,9 +159,9 @@ spec:
         namespace: default
         name: my-release
       sourceVersion: "v2.1.4"
    -  targetVersion: "v2.4.9"
    +  targetVersion: "v2.4.13-hotfix"
       # below are some omit default values:
    -  # targetImage: "milvusdb/milvus:v2.4.9"
    +  # targetImage: "milvusdb/milvus:v2.4.13-hotfix"
       # toolImage: "milvusdb/meta-migration:v2.2.0"
       # operation: upgrade
       # rollbackIfFailed: true
    diff --git a/localization/v2.4.x/site/ko/adminGuide/upgrade_milvus_standalone-docker.json b/localization/v2.4.x/site/ko/adminGuide/upgrade_milvus_standalone-docker.json
    index 42cc2b0d6..71fc2534e 100644
    --- a/localization/v2.4.x/site/ko/adminGuide/upgrade_milvus_standalone-docker.json
    +++ b/localization/v2.4.x/site/ko/adminGuide/upgrade_milvus_standalone-docker.json
    @@ -1 +1 @@
    -{"codeList":["...\nstandalone:\n  container_name: milvus-standalone\n  image: milvusdb/milvus:v2.4.9\n","docker compose down\ndocker compose up -d\n","docker stop \n","# migration.yaml\ncmd:\n  # Option: run/backup/rollback\n  type: run\n  runWithBackup: true\nconfig:\n  sourceVersion: 2.1.4   # Specify your milvus version\n  targetVersion: 2.4.9\n  backupFilePath: /tmp/migration.bak\nmetastore:\n  type: etcd\netcd:\n  endpoints:\n    - milvus-etcd:2379  # Use the etcd container name\n  rootPath: by-dev # The root path where data is stored in etcd\n  metaSubPath: meta\n  kvSubPath: kv\n","# Suppose your docker-compose run with the default milvus network,\n# and you put migration.yaml in the same directory with docker-compose.yaml.\ndocker run --rm -it --network milvus -v $(pwd)/migration.yaml:/milvus/configs/migration.yaml milvusdb/meta-migration:v2.2.0 /milvus/bin/meta-migration -config=/milvus/configs/migration.yaml\n","// Run the following only after update the milvus image tag in the docker-compose.yaml\ndocker compose down\ndocker compose up -d\n"],"headingContent":"","anchorList":[{"label":"Docker Compose로 Milvus 독립형 업그레이드하기","href":"Upgrade-Milvus-Standalone-with-Docker-Compose","type":1,"isActive":false},{"label":"이미지를 변경하여 Milvus 업그레이드","href":"Upgrade-Milvus-by-changing-its-image","type":2,"isActive":false},{"label":"메타데이터 마이그레이션","href":"Migrate-the-metadata","type":2,"isActive":false},{"label":"다음 단계","href":"Whats-next","type":2,"isActive":false}]}
    \ No newline at end of file
    +{"codeList":["...\nstandalone:\n  container_name: milvus-standalone\n  image: milvusdb/milvus:v2.4.13-hotfix\n","docker compose down\ndocker compose up -d\n","docker stop \n","# migration.yaml\ncmd:\n  # Option: run/backup/rollback\n  type: run\n  runWithBackup: true\nconfig:\n  sourceVersion: 2.1.4   # Specify your milvus version\n  targetVersion: 2.4.13-hotfix\n  backupFilePath: /tmp/migration.bak\nmetastore:\n  type: etcd\netcd:\n  endpoints:\n    - milvus-etcd:2379  # Use the etcd container name\n  rootPath: by-dev # The root path where data is stored in etcd\n  metaSubPath: meta\n  kvSubPath: kv\n","# Suppose your docker-compose run with the default milvus network,\n# and you put migration.yaml in the same directory with docker-compose.yaml.\ndocker run --rm -it --network milvus -v $(pwd)/migration.yaml:/milvus/configs/migration.yaml milvusdb/meta-migration:v2.2.0 /milvus/bin/meta-migration -config=/milvus/configs/migration.yaml\n","// Run the following only after update the milvus image tag in the docker-compose.yaml\ndocker compose down\ndocker compose up -d\n"],"headingContent":"Upgrade Milvus Standalone with Docker Compose","anchorList":[{"label":"Docker Compose로 Milvus 독립형 업그레이드하기","href":"Upgrade-Milvus-Standalone-with-Docker-Compose","type":1,"isActive":false},{"label":"이미지를 변경하여 Milvus 업그레이드","href":"Upgrade-Milvus-by-changing-its-image","type":2,"isActive":false},{"label":"메타데이터 마이그레이션","href":"Migrate-the-metadata","type":2,"isActive":false},{"label":"다음 단계","href":"Whats-next","type":2,"isActive":false}]}
    \ No newline at end of file
    diff --git a/localization/v2.4.x/site/ko/adminGuide/upgrade_milvus_standalone-docker.md b/localization/v2.4.x/site/ko/adminGuide/upgrade_milvus_standalone-docker.md
    index f74e913a0..67b5a7e23 100644
    --- a/localization/v2.4.x/site/ko/adminGuide/upgrade_milvus_standalone-docker.md
    +++ b/localization/v2.4.x/site/ko/adminGuide/upgrade_milvus_standalone-docker.md
    @@ -5,7 +5,7 @@ order: 1
     group: upgrade_milvus_standalone-operator.md
     related_key: upgrade Milvus Standalone
     summary: Docker Compose를 사용하여 Milvus를 스탠드얼론으로 업그레이드하는 방법을 알아보세요.
    -title: 도커 컴포즈로 Milvus 스탠드얼론 업그레이드하기
    +title: Docker Compose로 Milvus 독립형 업그레이드하기
     ---
     
     

    Docker Compose로 Milvus 독립형 업그레이드하기

    이 항목에서는 Docker Compose를 사용하여 Milvus를 업그레이드하는 방법을 설명합니다.

    -

    일반적인 경우, Milvus의 이미지를 변경하여 업그레이드할 수 있습니다. 그러나 v2.1.x에서 v2.4.9로 업그레이드하기 전에 메타데이터를 마이그레이션해야 합니다.

    +

    일반적인 경우, Milvus의 이미지를 변경하여 업그레이드할 수 있습니다. 그러나 v2.1.x에서 v2.4.13-핫픽스로 업그레이드하기 전에 메타데이터를 마이그레이션해야 합니다.

    보안 문제로 인해 Milvus는 v2.2.5 릴리스와 함께 MinIO를 RELEASE.2023-03-20T20-16-18Z로 업그레이드합니다. Docker Compose를 사용하여 설치된 이전 Milvus 독립 실행형 릴리스에서 업그레이드하기 전에 단일 노드 단일 드라이브 MinIO 배포를 생성하고 기존 MinIO 설정 및 콘텐츠를 새 배포로 마이그레이션해야 합니다. 자세한 내용은 이 가이드를 참조하세요.

    @@ -49,7 +49,7 @@ title: 도커 컴포즈로 Milvus 스탠드얼론 업그레이드하기
    ...
     standalone:
       container_name: milvus-standalone
    -  image: milvusdb/milvus:v2.4.9
    +  image: milvusdb/milvus:v2.4.13-hotfix
     
  • 다음 명령을 실행하여 업그레이드를 수행합니다.

    docker compose down
    @@ -83,7 +83,7 @@ cmd:
       runWithBackup: true
     config:
       sourceVersion: 2.1.4   # Specify your milvus version
    -  targetVersion: 2.4.9
    +  targetVersion: 2.4.13-hotfix
       backupFilePath: /tmp/migration.bak
     metastore:
       type: etcd
    diff --git a/localization/v2.4.x/site/ko/adminGuide/upgrade_milvus_standalone-helm.json b/localization/v2.4.x/site/ko/adminGuide/upgrade_milvus_standalone-helm.json
    index 64ae2a464..1860caa18 100644
    --- a/localization/v2.4.x/site/ko/adminGuide/upgrade_milvus_standalone-helm.json
    +++ b/localization/v2.4.x/site/ko/adminGuide/upgrade_milvus_standalone-helm.json
    @@ -1 +1 @@
    -{"codeList":["$ helm repo update\n$ helm search repo zilliztech/milvus --versions\n","helm repo add zilliztech https://zilliztech.github.io/milvus-helm\nhelm repo update\n# upgrade existing helm release\nhelm upgrade my-release zilliztech/milvus\n","NAME                    CHART VERSION   APP VERSION             DESCRIPTION                                       \nzilliztech/milvus       4.1.34          2.4.5                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.33          2.4.4                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.32          2.4.3                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.31          2.4.1                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.30          2.4.1                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.29          2.4.0                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.24          2.3.11                  Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.23          2.3.10                  Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.22          2.3.10                  Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.21          2.3.10                  Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.20          2.3.10                  Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.18          2.3.10                  Milvus is an open-source vector database built ... \nzilliztech/milvus       4.1.18          2.3.9                   Milvus is an open-source vector database built ...                                       \nzilliztech/milvus       4.1.17          2.3.8                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.16          2.3.7                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.15          2.3.5                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.14          2.3.6                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.13          2.3.5                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.12          2.3.5                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.11          2.3.4                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.10          2.3.3                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.9           2.3.3                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.8           2.3.2                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.7           2.3.2                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.6           2.3.1                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.5           2.3.1                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.4           2.3.1                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.3           2.3.1                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.2           2.3.1                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.1           2.3.0                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.0           2.3.0                   Milvus is an open-source vector database built ...\n","sh rollingUpdate.sh -n default -i my-release -o update -t 2.4.9 -w 'milvusdb/milvus:v2.4.9'\n","helm repo update\nhelm upgrade my-release milvus/milvus --reuse-values --version=4.1.24 # use the helm chart version here\n","NAME                NAMESPACE   REVISION    UPDATED                                 STATUS      CHART           APP VERSION     \nmy-release          default     1           2022-11-21 15:41:25.51539 +0800 CST     deployed    milvus-3.2.18   2.1.4\n","NAME                                            READY   STATUS    RESTARTS   AGE\nmy-release-etcd-0                               1/1     Running   0          84s\nmy-release-milvus-standalone-75c599fffc-6rwlj   1/1     Running   0          84s\nmy-release-minio-744dd9586f-qngzv               1/1     Running   0          84s\n","$ kubectl get pods my-release-milvus-proxy-6c548f787f-scspp -o=jsonpath='{$.spec.containers[0].image}'\n# milvusdb/milvus:v2.1.4\n","./migrate.sh -i my-release -s 2.1.4 -t 2.4.9\n","./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.9\n","./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.9 -r by-dev\n","./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.9 -r by-dev -w milvusdb/milvus:v2.4.9\n","./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.9 -w milvusdb/milvus:v2.4.9 -d true\n","./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.9 -r by-dev -o rollback -w milvusdb/milvus:v2.1.1\n./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.9 -r by-dev -o migrate -w milvusdb/milvus:v2.4.9\n"],"headingContent":"","anchorList":[{"label":"헬름 차트로 Milvus 스탠드얼론 업그레이드하기","href":"Upgrade-Milvus-Standalone-with-Helm-Chart","type":1,"isActive":false},{"label":"Milvus 버전 확인","href":"Check-the-Milvus-version","type":2,"isActive":false},{"label":"롤링 업그레이드 수행","href":"Conduct-a-rolling-upgrade","type":2,"isActive":false},{"label":"헬름을 사용하여 밀버스 업그레이드","href":"Upgrade-Milvus-using-Helm","type":2,"isActive":false},{"label":"메타데이터 마이그레이션","href":"Migrate-the-metadata","type":2,"isActive":false}]}
    \ No newline at end of file
    +{"codeList":["$ helm repo update\n$ helm search repo zilliztech/milvus --versions\n","helm repo add zilliztech https://zilliztech.github.io/milvus-helm\nhelm repo update\n# upgrade existing helm release\nhelm upgrade my-release zilliztech/milvus\n","NAME                    CHART VERSION   APP VERSION             DESCRIPTION                                       \nzilliztech/milvus       4.1.34          2.4.5                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.33          2.4.4                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.32          2.4.3                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.31          2.4.1                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.30          2.4.1                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.29          2.4.0                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.24          2.3.11                  Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.23          2.3.10                  Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.22          2.3.10                  Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.21          2.3.10                  Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.20          2.3.10                  Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.18          2.3.10                  Milvus is an open-source vector database built ... \nzilliztech/milvus       4.1.18          2.3.9                   Milvus is an open-source vector database built ...                                       \nzilliztech/milvus       4.1.17          2.3.8                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.16          2.3.7                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.15          2.3.5                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.14          2.3.6                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.13          2.3.5                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.12          2.3.5                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.11          2.3.4                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.10          2.3.3                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.9           2.3.3                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.8           2.3.2                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.7           2.3.2                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.6           2.3.1                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.5           2.3.1                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.4           2.3.1                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.3           2.3.1                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.2           2.3.1                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.1           2.3.0                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.0           2.3.0                   Milvus is an open-source vector database built ...\n","sh rollingUpdate.sh -n default -i my-release -o update -t 2.4.13-hotfix -w 'milvusdb/milvus:v2.4.13-hotfix'\n","helm repo update\nhelm upgrade my-release milvus/milvus --reuse-values --version=4.1.24 # use the helm chart version here\n","NAME                NAMESPACE   REVISION    UPDATED                                 STATUS      CHART           APP VERSION     \nmy-release          default     1           2022-11-21 15:41:25.51539 +0800 CST     deployed    milvus-3.2.18   2.1.4\n","NAME                                            READY   STATUS    RESTARTS   AGE\nmy-release-etcd-0                               1/1     Running   0          84s\nmy-release-milvus-standalone-75c599fffc-6rwlj   1/1     Running   0          84s\nmy-release-minio-744dd9586f-qngzv               1/1     Running   0          84s\n","$ kubectl get pods my-release-milvus-proxy-6c548f787f-scspp -o=jsonpath='{$.spec.containers[0].image}'\n# milvusdb/milvus:v2.1.4\n","./migrate.sh -i my-release -s 2.1.4 -t 2.4.13-hotfix\n","./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.13-hotfix\n","./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.13-hotfix -r by-dev\n","./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.13-hotfix -r by-dev -w milvusdb/milvus:v2.4.13-hotfix\n","./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.13-hotfix -w milvusdb/milvus:v2.4.13-hotfix -d true\n","./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.13-hotfix -r by-dev -o rollback -w milvusdb/milvus:v2.1.1\n./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.13-hotfix -r by-dev -o migrate -w milvusdb/milvus:v2.4.13-hotfix\n"],"headingContent":"Upgrade Milvus Standalone with Helm Chart","anchorList":[{"label":"헬름 차트로 Milvus 스탠드얼론 업그레이드하기","href":"Upgrade-Milvus-Standalone-with-Helm-Chart","type":1,"isActive":false},{"label":"Milvus 버전 확인","href":"Check-the-Milvus-version","type":2,"isActive":false},{"label":"롤링 업그레이드 수행","href":"Conduct-a-rolling-upgrade","type":2,"isActive":false},{"label":"헬름을 사용하여 밀버스 업그레이드","href":"Upgrade-Milvus-using-Helm","type":2,"isActive":false},{"label":"메타데이터 마이그레이션","href":"Migrate-the-metadata","type":2,"isActive":false}]}
    \ No newline at end of file
    diff --git a/localization/v2.4.x/site/ko/adminGuide/upgrade_milvus_standalone-helm.md b/localization/v2.4.x/site/ko/adminGuide/upgrade_milvus_standalone-helm.md
    index aac516ef3..8784f9b10 100644
    --- a/localization/v2.4.x/site/ko/adminGuide/upgrade_milvus_standalone-helm.md
    +++ b/localization/v2.4.x/site/ko/adminGuide/upgrade_milvus_standalone-helm.md
    @@ -86,10 +86,10 @@ zilliztech/milvus       4.1.1           2.3.0                   Milvus is an ope
     zilliztech/milvus       4.1.0           2.3.0                   Milvus is an open-source vector database built ...
     

    Milvus의 업그레이드 경로는 다음과 같이 선택할 수 있습니다:

    -
    - 밀버스 v2.2.3 이상 릴리스에서 v2.4.9로 [롤링 업그레이드 수행](#conduct-a-rolling-upgrade)을 수행한다.
    +
    - 밀버스 v2.2.3 이상 릴리스에서 v2.4.13 핫픽스로 [롤링 업그레이드 수행](#conduct-a-rolling-upgrade).

    롤링 업그레이드 수행

    1. @@ -213,25 +213,25 @@ my-release-minio-744dd9586f-qngzv 1/1 Running 0 84s
    2. Milvus 메타데이터를 마이그레이션합니다.
    3. 새 이미지로 Milvus 구성 요소를 시작합니다.
    -

    2. Milvus를 v2.1.x에서 2.4.9로 업그레이드합니다.

    다음 명령은 Milvus를 v2.1.4에서 2.4.9로 업그레이드한다고 가정합니다. 필요에 맞는 버전으로 변경하세요.

    +

    2. Milvus를 v2.1.x에서 2.4.13 핫픽스로 업그레이드하기

    다음 명령은 Milvus를 v2.1.4에서 2.4.13-hotfix로 업그레이드한다고 가정합니다. 필요에 맞는 버전으로 변경하세요.

    1. 밀버스 인스턴스 이름, 소스 밀버스 버전, 대상 밀버스 버전을 지정합니다.

      -
      ./migrate.sh -i my-release -s 2.1.4 -t 2.4.9
      +
      ./migrate.sh -i my-release -s 2.1.4 -t 2.4.13-hotfix
       
    2. Milvus가 기본 K8s 네임스페이스에 설치되지 않은 경우 -n 로 네임스페이스를 지정합니다.

      -
      ./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.9
      +
      ./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.13-hotfix
       
    3. Milvus가 사용자 지정 rootpath 과 함께 설치된 경우 -r 으로 루트 경로를 지정합니다.

      -
      ./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.9 -r by-dev
      +
      ./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.13-hotfix -r by-dev
       
    4. Milvus가 사용자 지정 image 과 함께 설치된 경우 이미지 태그를 -w 으로 지정합니다.

      -
      ./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.9 -r by-dev -w milvusdb/milvus:v2.4.9
      +
      ./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.13-hotfix -r by-dev -w milvusdb/milvus:v2.4.13-hotfix
       
    5. 마이그레이션이 완료된 후 마이그레이션 파드를 자동으로 제거하려면 -d true 을 설정합니다.

      -
      ./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.9 -w milvusdb/milvus:v2.4.9 -d true
      +
      ./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.13-hotfix -w milvusdb/milvus:v2.4.13-hotfix -d true
       
    6. 마이그레이션이 실패하면 롤백하고 다시 마이그레이션하세요.

      -
      ./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.9 -r by-dev -o rollback -w milvusdb/milvus:v2.1.1
      -./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.9 -r by-dev -o migrate -w milvusdb/milvus:v2.4.9
      +
      ./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.13-hotfix -r by-dev -o rollback -w milvusdb/milvus:v2.1.1
      +./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.13-hotfix -r by-dev -o migrate -w milvusdb/milvus:v2.4.13-hotfix
       
    diff --git a/localization/v2.4.x/site/ko/adminGuide/upgrade_milvus_standalone-operator.json b/localization/v2.4.x/site/ko/adminGuide/upgrade_milvus_standalone-operator.json index 1a1559b3d..cbcf7d2aa 100644 --- a/localization/v2.4.x/site/ko/adminGuide/upgrade_milvus_standalone-operator.json +++ b/localization/v2.4.x/site/ko/adminGuide/upgrade_milvus_standalone-operator.json @@ -1 +1 @@ -{"codeList":["helm repo add zilliztech-milvus-operator https://zilliztech.github.io/milvus-operator/\nhelm repo update zilliztech-milvus-operator\nhelm -n milvus-operator upgrade milvus-operator zilliztech-milvus-operator/milvus-operator\n","apiVersion: milvus.io/v1beta1\nkind: Milvus\nmetadata:\n name: my-release\nspec:\n components:\n enableRollingUpdate: true\n imageUpdateMode: rollingUpgrade # Default value, can be omitted\n image: milvusdb/milvus:v2.4.9\n","apiVersion: milvus.io/v1beta1\nkind: Milvus\nmetadata:\n name: my-release\nspec:\n components:\n enableRollingUpdate: true\n imageUpdateMode: all\n image: milvusdb/milvus:v2.4.9\n","apiVersion: milvus.io/v1beta1\nkind: Milvus\nmetadata:\n name: my-release\nspec:\n components:\n enableRollingUpdate: true\n imageUpdateMode: rollingDowngrade\n image: milvusdb/milvus:\n","kubectl apply -f milvusupgrade.yml\n","apiVersion: milvus.io/v1beta1\nkind: Milvus\nmetadata:\n name: my-release\nlabels:\n app: milvus\nspec:\n # Omit other fields ...\n components:\n image: milvusdb/milvus:v2.4.9\n","kubectl apply -f milvusupgrade.yaml\n","apiVersion: milvus.io/v1beta1\nkind: MilvusUpgrade\nmetadata:\n name: my-release-upgrade\nspec:\n milvus:\n namespace: default\n name: my-release\n sourceVersion: \"v2.1.4\"\n targetVersion: \"v2.4.9\"\n # below are some omit default values:\n # targetImage: \"milvusdb/milvus:v2.4.9\"\n # toolImage: \"milvusdb/meta-migration:v2.2.0\"\n # operation: upgrade\n # rollbackIfFailed: true\n # backupPVC: \"\"\n # maxRetry: 3\n","$ kubectl apply -f https://raw.githubusercontent.com/zilliztech/milvus-operator/main/config/samples/milvusupgrade.yaml\n","kubectl describe milvus release-name\n"],"headingContent":"","anchorList":[{"label":"Milvus 오퍼레이터로 Milvus 스탠드얼론 업그레이드하기","href":"Upgrade-Milvus-Standalone-with-Milvus-Operator","type":1,"isActive":false},{"label":"Milvus 오퍼레이터 업그레이드","href":"Upgrade-your-Milvus-operator","type":2,"isActive":false},{"label":"롤링 업그레이드 수행","href":"Conduct-a-rolling-upgrade","type":2,"isActive":false},{"label":"이미지를 변경하여 Milvus 업그레이드하기","href":"Upgrade-Milvus-by-changing-its-image","type":2,"isActive":false},{"label":"메타데이터 마이그레이션","href":"Migrate-the-metadata","type":2,"isActive":false}]} \ No newline at end of file +{"codeList":["helm repo add zilliztech-milvus-operator https://zilliztech.github.io/milvus-operator/\nhelm repo update zilliztech-milvus-operator\nhelm -n milvus-operator upgrade milvus-operator zilliztech-milvus-operator/milvus-operator\n","apiVersion: milvus.io/v1beta1\nkind: Milvus\nmetadata:\n name: my-release\nspec:\n components:\n enableRollingUpdate: true\n imageUpdateMode: rollingUpgrade # Default value, can be omitted\n image: milvusdb/milvus:v2.4.13-hotfix\n","apiVersion: milvus.io/v1beta1\nkind: Milvus\nmetadata:\n name: my-release\nspec:\n components:\n enableRollingUpdate: true\n imageUpdateMode: all\n image: milvusdb/milvus:v2.4.13-hotfix\n","apiVersion: milvus.io/v1beta1\nkind: Milvus\nmetadata:\n name: my-release\nspec:\n components:\n enableRollingUpdate: true\n imageUpdateMode: rollingDowngrade\n image: milvusdb/milvus:\n","kubectl apply -f milvusupgrade.yml\n","apiVersion: milvus.io/v1beta1\nkind: Milvus\nmetadata:\n name: my-release\nlabels:\n app: milvus\nspec:\n # Omit other fields ...\n components:\n image: milvusdb/milvus:v2.4.13-hotfix\n","kubectl apply -f milvusupgrade.yaml\n","apiVersion: milvus.io/v1beta1\nkind: MilvusUpgrade\nmetadata:\n name: my-release-upgrade\nspec:\n milvus:\n namespace: default\n name: my-release\n sourceVersion: \"v2.1.4\"\n targetVersion: \"v2.4.13-hotfix\"\n # below are some omit default values:\n # targetImage: \"milvusdb/milvus:v2.4.13-hotfix\"\n # toolImage: \"milvusdb/meta-migration:v2.2.0\"\n # operation: upgrade\n # rollbackIfFailed: true\n # backupPVC: \"\"\n # maxRetry: 3\n","$ kubectl apply -f https://raw.githubusercontent.com/zilliztech/milvus-operator/main/config/samples/milvusupgrade.yaml\n","kubectl describe milvus release-name\n"],"headingContent":"Upgrade Milvus Standalone with Milvus Operator","anchorList":[{"label":"Milvus 오퍼레이터로 Milvus 스탠드얼론 업그레이드하기","href":"Upgrade-Milvus-Standalone-with-Milvus-Operator","type":1,"isActive":false},{"label":"Milvus 오퍼레이터 업그레이드","href":"Upgrade-your-Milvus-operator","type":2,"isActive":false},{"label":"롤링 업그레이드 수행","href":"Conduct-a-rolling-upgrade","type":2,"isActive":false},{"label":"이미지를 변경하여 Milvus 업그레이드하기","href":"Upgrade-Milvus-by-changing-its-image","type":2,"isActive":false},{"label":"메타데이터 마이그레이션","href":"Migrate-the-metadata","type":2,"isActive":false}]} \ No newline at end of file diff --git a/localization/v2.4.x/site/ko/adminGuide/upgrade_milvus_standalone-operator.md b/localization/v2.4.x/site/ko/adminGuide/upgrade_milvus_standalone-operator.md index 9e6ff6681..e264962f7 100644 --- a/localization/v2.4.x/site/ko/adminGuide/upgrade_milvus_standalone-operator.md +++ b/localization/v2.4.x/site/ko/adminGuide/upgrade_milvus_standalone-operator.md @@ -5,7 +5,7 @@ order: 0 group: upgrade_milvus_standalone-operator.md related_key: upgrade Milvus Standalone summary: Milvus 운영자를 통해 Milvus 스탠드얼론 업그레이드 방법을 알아보세요. -title: 밀버스 오퍼레이터로 밀버스 스탠드얼론 업그레이드하기 +title: Milvus 오퍼레이터로 Milvus 스탠드얼론 업그레이드하기 ---

    Milvus 오퍼레이터로 Milvus 스탠드얼론 업그레이드하기

    Milvus 운영자를 최신 버전으로 업그레이드한 후에는 다음과 같은 옵션을 선택할 수 있습니다:

    롤링 업그레이드 수행

    위의 구성 파일에서 spec.components.enableRollingUpdatetrue 으로 설정하고 spec.components.image 을 원하는 Milvus 버전으로 설정합니다.

    기본적으로 Milvus는 코디네이터에 대해 순차적으로 롤링 업그레이드를 수행하여 코디네이터 포드 이미지를 차례로 교체합니다. 업그레이드 시간을 줄이려면 spec.components.imageUpdateModeall 으로 설정하여 Milvus가 모든 포드 이미지를 동시에 교체하도록 하세요.

    @@ -88,7 +88,7 @@ spec: components: enableRollingUpdate: true imageUpdateMode: all - image: milvusdb/milvus:v2.4.9 + image: milvusdb/milvus:v2.4.13-hotfix

    spec.components.imageUpdateModerollingDowngrade 으로 설정하여 Milvus가 코디네이터 파드 이미지를 하위 버전으로 대체하도록 할 수 있습니다.

    apiVersion: milvus.io/v1beta1
    @@ -130,7 +130,7 @@ labels:
     spec:
       # Omit other fields ...
       components:
    -   image: milvusdb/milvus:v2.4.9
    +   image: milvusdb/milvus:v2.4.13-hotfix
     

    그런 다음 다음을 실행하여 업그레이드를 수행합니다:

    kubectl apply -f milvusupgrade.yaml
    @@ -150,8 +150,8 @@ spec:
               d="M4 9h1v1H4c-1.5 0-3-1.69-3-3.5S2.55 3 4 3h4c1.45 0 3 1.69 3 3.5 0 1.41-.91 2.72-2 3.25V8.59c.58-.45 1-1.27 1-2.09C10 5.22 8.98 4 8 4H4c-.98 0-2 1.22-2 2.5S3 9 4 9zm9-3h-1v1h1c1 0 2 1.22 2 2.5S13.98 12 13 12H9c-.98 0-2-1.22-2-2.5 0-.83.42-1.64 1-2.09V6.25c-1.09.53-2 1.84-2 3.25C6 11.31 7.55 13 9 13h4c1.45 0 3-1.69 3-3.5S14.5 6 13 6z"
             >
           
    -    

    Milvus 2.2.0부터는 메타데이터가 이전 릴리즈의 메타데이터와 호환되지 않습니다. 다음 예제 코드 조각은 Milvus 2.1.4에서 Milvus v2.4.9로 업그레이드하는 경우를 가정합니다.

    -

    1. 메타데이터 마이그레이션을 위한 .yaml 파일 만들기

    메타데이터 마이그레이션 파일을 만듭니다. 다음은 예시입니다. 설정 파일에 name, sourceVersion, targetVersion 을 지정해야 합니다. 다음 예에서는 namemy-release-upgrade 으로 , sourceVersionv2.1.4 으로 , targetVersionv2.4.9 으로 설정합니다. 즉, Milvus 인스턴스가 v2.1.4에서 v2.4.9로 업그레이드됩니다.

    +

    Milvus 2.2.0부터는 메타데이터가 이전 릴리즈의 메타데이터와 호환되지 않습니다. 다음 예제 코드 조각은 Milvus 2.1.4에서 Milvus v2.4.13 핫픽스로 업그레이드하는 경우를 가정합니다.

    +

    1. 메타데이터 마이그레이션을 위한 .yaml 파일 만들기

    메타데이터 마이그레이션 파일을 만듭니다. 다음은 예시입니다. 설정 파일에 name, sourceVersion, targetVersion 을 지정해야 합니다. 다음 예에서는 namemy-release-upgrade 으로 , sourceVersionv2.1.4 으로 , targetVersionv2.4.13-hotfix 으로 설정합니다. 즉, Milvus 인스턴스가 v2.1.4에서 v2.4.13-hotfix로 업그레이드됩니다.

    apiVersion: milvus.io/v1beta1
     kind: MilvusUpgrade
     metadata:
    @@ -161,9 +161,9 @@ spec:
         namespace: default
         name: my-release
       sourceVersion: "v2.1.4"
    -  targetVersion: "v2.4.9"
    +  targetVersion: "v2.4.13-hotfix"
       # below are some omit default values:
    -  # targetImage: "milvusdb/milvus:v2.4.9"
    +  # targetImage: "milvusdb/milvus:v2.4.13-hotfix"
       # toolImage: "milvusdb/meta-migration:v2.2.0"
       # operation: upgrade
       # rollbackIfFailed: true
    diff --git a/localization/v2.4.x/site/ko/embeddings/embed-with-cohere.json b/localization/v2.4.x/site/ko/embeddings/embed-with-cohere.json
    index 40401b602..3ce35a476 100644
    --- a/localization/v2.4.x/site/ko/embeddings/embed-with-cohere.json
    +++ b/localization/v2.4.x/site/ko/embeddings/embed-with-cohere.json
    @@ -1 +1 @@
    -{"codeList":["pip install --upgrade pymilvus\npip install \"pymilvus[model]\"\n","cohere_ef = CohereEmbeddingFunction(\n    model_name=\"embed-english-light-v3.0\",\n    api_key=\"YOUR_COHERE_API_KEY\",\n    input_type=\"search_document\",\n    embedding_types=[\"float\"]\n)\n","docs = [\n    \"Artificial intelligence was founded as an academic discipline in 1956.\",\n    \"Alan Turing was the first person to conduct substantial research in AI.\",\n    \"Born in Maida Vale, London, Turing was raised in southern England.\",\n]\n\ndocs_embeddings = cohere_ef.encode_documents(docs)\n\n# Print embeddings\nprint(\"Embeddings:\", docs_embeddings)\n# Print dimension and shape of embeddings\nprint(\"Dim:\", cohere_ef.dim, docs_embeddings[0].shape)\n","Embeddings: [array([ 3.43322754e-02,  1.16252899e-03, -5.25207520e-02,  1.32846832e-03,\n       -6.80541992e-02,  6.10961914e-02, -7.06176758e-02,  1.48925781e-01,\n        1.54174805e-01,  1.98516846e-02,  2.43835449e-02,  3.55224609e-02,\n        1.82952881e-02,  7.57446289e-02, -2.40783691e-02,  4.40063477e-02,\n...\n        0.06359863, -0.01971436, -0.02253723,  0.00354195,  0.00222015,\n        0.00184727,  0.03408813, -0.00777817,  0.04919434,  0.01519775,\n       -0.02862549,  0.04760742, -0.07891846,  0.0124054 ], dtype=float32)]\nDim: 384 (384,)\n","queries = [\"When was artificial intelligence founded\", \n           \"Where was Alan Turing born?\"]\n\nquery_embeddings = cohere_ef.encode_queries(queries)\n\nprint(\"Embeddings:\", query_embeddings)\nprint(\"Dim\", cohere_ef.dim, query_embeddings[0].shape)\n","Embeddings: [array([-1.33361816e-02,  9.79423523e-04, -7.28759766e-02, -1.93786621e-02,\n       -9.71679688e-02,  4.34875488e-02, -9.81445312e-02,  1.16882324e-01,\n        5.89904785e-02, -4.19921875e-02,  4.95910645e-02,  5.83496094e-02,\n        3.47595215e-02, -5.87463379e-03, -7.30514526e-03,  2.92816162e-02,\n...\n        0.00749969, -0.01192474,  0.02719116,  0.03347778,  0.07696533,\n        0.01409149,  0.00964355, -0.01681519, -0.0073204 ,  0.00043154,\n       -0.04577637,  0.03591919, -0.02807617, -0.04812622], dtype=float32)]\nDim 384 (384,)\n"],"headingContent":"","anchorList":[{"label":"Cohere","href":"Cohere","type":1,"isActive":false}]}
    \ No newline at end of file
    +{"codeList":["pip install --upgrade pymilvus\npip install \"pymilvus[model]\"\n","from pymilvus.model.dense import CohereEmbeddingFunction\n\ncohere_ef = CohereEmbeddingFunction(\n    model_name=\"embed-english-light-v3.0\",\n    api_key=\"YOUR_COHERE_API_KEY\",\n    input_type=\"search_document\",\n    embedding_types=[\"float\"]\n)\n","docs = [\n    \"Artificial intelligence was founded as an academic discipline in 1956.\",\n    \"Alan Turing was the first person to conduct substantial research in AI.\",\n    \"Born in Maida Vale, London, Turing was raised in southern England.\",\n]\n\ndocs_embeddings = cohere_ef.encode_documents(docs)\n\n# Print embeddings\nprint(\"Embeddings:\", docs_embeddings)\n# Print dimension and shape of embeddings\nprint(\"Dim:\", cohere_ef.dim, docs_embeddings[0].shape)\n","Embeddings: [array([ 3.43322754e-02,  1.16252899e-03, -5.25207520e-02,  1.32846832e-03,\n       -6.80541992e-02,  6.10961914e-02, -7.06176758e-02,  1.48925781e-01,\n        1.54174805e-01,  1.98516846e-02,  2.43835449e-02,  3.55224609e-02,\n        1.82952881e-02,  7.57446289e-02, -2.40783691e-02,  4.40063477e-02,\n...\n        0.06359863, -0.01971436, -0.02253723,  0.00354195,  0.00222015,\n        0.00184727,  0.03408813, -0.00777817,  0.04919434,  0.01519775,\n       -0.02862549,  0.04760742, -0.07891846,  0.0124054 ], dtype=float32)]\nDim: 384 (384,)\n","queries = [\"When was artificial intelligence founded\", \n           \"Where was Alan Turing born?\"]\n\nquery_embeddings = cohere_ef.encode_queries(queries)\n\nprint(\"Embeddings:\", query_embeddings)\nprint(\"Dim\", cohere_ef.dim, query_embeddings[0].shape)\n","Embeddings: [array([-1.33361816e-02,  9.79423523e-04, -7.28759766e-02, -1.93786621e-02,\n       -9.71679688e-02,  4.34875488e-02, -9.81445312e-02,  1.16882324e-01,\n        5.89904785e-02, -4.19921875e-02,  4.95910645e-02,  5.83496094e-02,\n        3.47595215e-02, -5.87463379e-03, -7.30514526e-03,  2.92816162e-02,\n...\n        0.00749969, -0.01192474,  0.02719116,  0.03347778,  0.07696533,\n        0.01409149,  0.00964355, -0.01681519, -0.0073204 ,  0.00043154,\n       -0.04577637,  0.03591919, -0.02807617, -0.04812622], dtype=float32)]\nDim 384 (384,)\n"],"headingContent":"Cohere","anchorList":[{"label":"Cohere","href":"Cohere","type":1,"isActive":false}]}
    \ No newline at end of file
    diff --git a/localization/v2.4.x/site/ko/embeddings/embed-with-cohere.md b/localization/v2.4.x/site/ko/embeddings/embed-with-cohere.md
    index e8203990d..8b4f3588b 100644
    --- a/localization/v2.4.x/site/ko/embeddings/embed-with-cohere.md
    +++ b/localization/v2.4.x/site/ko/embeddings/embed-with-cohere.md
    @@ -26,7 +26,9 @@ title: Cohere 포함
     pip install "pymilvus[model]"
     

    그런 다음 CohereEmbeddingFunction 을 인스턴스화합니다:

    -
    cohere_ef = CohereEmbeddingFunction(
    +
    from pymilvus.model.dense import CohereEmbeddingFunction
    +
    +cohere_ef = CohereEmbeddingFunction(
         model_name="embed-english-light-v3.0",
         api_key="YOUR_COHERE_API_KEY",
         input_type="search_document",
    diff --git a/localization/v2.4.x/site/ko/embeddings/embed-with-jina.json b/localization/v2.4.x/site/ko/embeddings/embed-with-jina.json
    index 3ba04b590..4cee744ff 100644
    --- a/localization/v2.4.x/site/ko/embeddings/embed-with-jina.json
    +++ b/localization/v2.4.x/site/ko/embeddings/embed-with-jina.json
    @@ -1 +1 @@
    -{"codeList":["pip install --upgrade pymilvus\npip install \"pymilvus[model]\"\n","from pymilvus.model.dense import JinaEmbeddingFunction\n\njina_ef = JinaEmbeddingFunction(\n    model_name=\"jina-embeddings-v2-base-en\", # Defaults to `jina-embeddings-v2-base-en`\n    api_key=JINAAI_API_KEY # Provide your Jina AI API key\n)\n","docs = [\n    \"Artificial intelligence was founded as an academic discipline in 1956.\",\n    \"Alan Turing was the first person to conduct substantial research in AI.\",\n    \"Born in Maida Vale, London, Turing was raised in southern England.\",\n]\n\ndocs_embeddings = jina_ef.encode_documents(docs)\n\n# Print embeddings\nprint(\"Embeddings:\", docs_embeddings)\n# Print dimension and shape of embeddings\nprint(\"Dim:\", jina_ef.dim, docs_embeddings[0].shape)\n","Embeddings: [array([-4.88487840e-01, -4.28095880e-01,  4.90086500e-01, -1.63274320e-01,\n        3.43437800e-01,  3.21476880e-01,  2.83173790e-02, -3.10403670e-01,\n        4.76985040e-01, -1.77410420e-01, -3.84803180e-01, -2.19224200e-01,\n       -2.52898000e-01,  6.62411900e-02, -8.58173100e-01,  1.05221800e+00,\n...\n       -2.04462400e-01,  7.14229800e-01, -1.66823000e-01,  8.72551440e-01,\n        5.53560140e-01,  8.92506300e-01, -2.39408610e-01, -4.22413560e-01,\n       -3.19551350e-01,  5.59153850e-01,  2.44338100e-01, -8.60452100e-01])]\nDim: 768 (768,)\n","queries = [\"When was artificial intelligence founded\", \n           \"Where was Alan Turing born?\"]\n\nquery_embeddings = jina_ef.encode_queries(queries)\n\nprint(\"Embeddings:\", query_embeddings)\nprint(\"Dim\", jina_ef.dim, query_embeddings[0].shape)\n","Embeddings: [array([-5.99164660e-01, -3.49827350e-01,  8.22405160e-01, -1.18632730e-01,\n        5.78107540e-01,  1.09789170e-01,  2.91604200e-01, -3.29306450e-01,\n        2.93779640e-01, -2.17880800e-01, -6.84535440e-01, -3.79752000e-01,\n       -3.47541800e-01,  9.20846100e-02, -6.13804400e-01,  6.31312800e-01,\n...\n       -1.84993740e-02,  9.38629150e-01,  2.74858470e-02,  1.09396360e+00,\n        3.96270750e-01,  7.44445800e-01, -1.95404050e-01, -6.08383200e-01,\n       -3.75076300e-01,  3.87512200e-01,  8.11889650e-01, -3.76407620e-01])]\nDim 768 (768,)\n"],"headingContent":"","anchorList":[{"label":"Jina AI","href":"Jina-AI","type":1,"isActive":false}]}
    \ No newline at end of file
    +{"codeList":["pip install --upgrade pymilvus\npip install \"pymilvus[model]\"\n","from pymilvus.model.dense import JinaEmbeddingFunction\n\njina_ef = JinaEmbeddingFunction(\n    model_name=\"jina-embeddings-v3\", # Defaults to `jina-embeddings-v3`\n    api_key=JINAAI_API_KEY, # Provide your Jina AI API key\n    task=\"retrieval.passage\", # Specify the task\n    dimensions=1024, # Defaults to 1024\n)\n","\n```python\ndocs = [\n    \"Artificial intelligence was founded as an academic discipline in 1956.\",\n    \"Alan Turing was the first person to conduct substantial research in AI.\",\n    \"Born in Maida Vale, London, Turing was raised in southern England.\",\n]\n\ndocs_embeddings = jina_ef.encode_documents(docs)\n\n# Print embeddings\nprint(\"Embeddings:\", docs_embeddings)\n# Print dimension and shape of embeddings\nprint(\"Dim:\", jina_ef.dim, docs_embeddings[0].shape)\n","Embeddings: [array([9.80641991e-02, -8.51697400e-02,  7.36531913e-02,  1.42558888e-02,\n       -2.23589484e-02,  1.68494112e-03, -3.50753777e-02, -3.11530549e-02,\n       -3.26012149e-02,  5.04568312e-03,  3.69836427e-02,  3.48948985e-02,\n        8.19722563e-03,  5.88679723e-02, -6.71099266e-03, -1.82369724e-02,\n...\n        2.48654783e-02,  3.43279652e-02, -1.66154150e-02, -9.90478322e-03,\n       -2.96043139e-03, -8.57473817e-03, -7.39028037e-04,  6.25024503e-03,\n       -1.08831357e-02, -4.00776342e-02,  3.25369164e-02, -1.42691191e-03])]\nDim: 1024 (1024,)\n","queries = [\"When was artificial intelligence founded\", \n           \"Where was Alan Turing born?\"]\n\nquery_embeddings = jina_ef.encode_queries(queries)\n\nprint(\"Embeddings:\", query_embeddings)\nprint(\"Dim\", jina_ef.dim, query_embeddings[0].shape)\n","Embeddings: [array([8.79201014e-03,  1.47551354e-02,  4.02722731e-02, -2.52991207e-02,\n        1.12719582e-02,  3.75947170e-02,  3.97946090e-02, -7.36681819e-02,\n       -2.17952449e-02, -1.16298944e-02, -6.83426252e-03, -5.12507409e-02,\n        5.26071340e-02,  6.75181448e-02,  3.92445624e-02, -1.40817231e-02,\n...\n        8.81703943e-03,  4.24629413e-02, -2.32944116e-02, -2.05193572e-02,\n       -3.22035812e-02,  2.81896023e-03,  3.85326855e-02,  3.64372656e-02,\n       -1.65050142e-02, -4.26847413e-02,  2.02664156e-02, -1.72684863e-02])]\nDim 1024 (1024,)\n","from pymilvus.model.dense import JinaEmbeddingFunction\n\njina_ef = JinaEmbeddingFunction(\n    model_name=\"jina-embeddings-v3\", # Defaults to `jina-embeddings-v3`\n    api_key=JINA_API_KEY, # Provide your Jina AI API key\n    task=\"text-matching\",\n    dimensions=1024, # Defaults to 1024\n)\n\ntexts = [\n    \"Follow the white rabbit.\",  # English\n    \"Sigue al conejo blanco.\",  # Spanish\n    \"Suis le lapin blanc.\",  # French\n    \"跟着白兔走。\",  # Chinese\n    \"اتبع الأرنب الأبيض.\",  # Arabic\n    \"Folge dem weißen Kaninchen.\",  # German\n]\n\nembeddings = jina_ef(texts)\n\n# Compute similarities\nprint(embeddings[0] @ embeddings[1].T)\n"],"headingContent":"Jina AI","anchorList":[{"label":"Jina AI","href":"Jina-AI","type":1,"isActive":false}]}
    \ No newline at end of file
    diff --git a/localization/v2.4.x/site/ko/embeddings/embed-with-jina.md b/localization/v2.4.x/site/ko/embeddings/embed-with-jina.md
    index 8ba2757e1..adc3c4ebf 100644
    --- a/localization/v2.4.x/site/ko/embeddings/embed-with-jina.md
    +++ b/localization/v2.4.x/site/ko/embeddings/embed-with-jina.md
    @@ -31,19 +31,36 @@ pip install "pymilvus[model]"
     
    from pymilvus.model.dense import JinaEmbeddingFunction
     
     jina_ef = JinaEmbeddingFunction(
    -    model_name="jina-embeddings-v2-base-en", # Defaults to `jina-embeddings-v2-base-en`
    -    api_key=JINAAI_API_KEY # Provide your Jina AI API key
    +    model_name="jina-embeddings-v3", # Defaults to `jina-embeddings-v3`
    +    api_key=JINAAI_API_KEY, # Provide your Jina AI API key
    +    task="retrieval.passage", # Specify the task
    +    dimensions=1024, # Defaults to 1024
     )
     

    매개변수

    • model_name (문자열)

      -

      인코딩에 사용할 Jina AI 임베딩 모델의 이름입니다. 예를 들어 jina-embeddings-v2-base-en, jina-embeddings-v2-small-en 등과 같이 사용 가능한 Jina AI 임베딩 모델 이름 중 하나를 지정할 수 있습니다. 이 파라미터를 지정하지 않으면 jina-embeddings-v2-base-en 이 사용됩니다. 사용 가능한 모델 목록은 Jina 임베딩을 참조하세요.

    • +

      인코딩에 사용할 Jina AI 임베딩 모델의 이름입니다. 예를 들어 jina-embeddings-v3, jina-embeddings-v2-base-en 등과 같이 사용 가능한 Jina AI 임베딩 모델 이름 중 하나를 지정할 수 있습니다. 이 파라미터를 지정하지 않으면 jina-embeddings-v3 이 사용됩니다. 사용 가능한 모델 목록은 Jina 임베딩을 참조하세요.

    • api_key (문자열)

      Jina AI API에 액세스하기 위한 API 키입니다.

    • +
    • task (문자열)

      +

      모델에 전달된 입력 유형입니다. 임베딩 모델 v3 이상에 필요합니다.

      +
        +
      • "retrieval.passage": 인덱싱 시 검색 작업에서 대용량 문서를 인코딩하는 데 사용됩니다.
      • +
      • "retrieval.query": 검색 작업에서 사용자 쿼리 또는 질문을 인코딩하는 데 사용됩니다.
      • +
      • "classification": 텍스트 분류 작업에서 텍스트를 인코딩하는 데 사용됩니다.
      • +
      • "text-matching": 두 문장 간의 유사도 측정과 같은 유사도 매칭을 위해 텍스트를 인코딩하는 데 사용됩니다.
      • +
      • "clustering": 클러스터링 또는 재순위 지정 작업에 사용됩니다.
      • +
    • +
    • dimensions (int)

      +

      결과 출력 임베딩이 가져야 하는 차원 수입니다. 기본값은 1024입니다. 임베딩 모델 v3 이상에서만 지원됩니다.

    • +
    • late_chunking (bool)

      +

      이 매개변수는 지난 달에 도입된 새로운 청킹 방법인 Jina AI를 문장 일괄 인코딩에 사용할지 여부를 제어합니다. 기본값은 False 입니다. True 으로 설정하면 Jina AI API가 입력 필드에 있는 모든 문장을 연결하여 단일 문자열로 모델에 공급합니다. 내부적으로 모델은 이 긴 연결 문자열을 임베딩한 다음 후기 청킹을 수행하여 입력 목록의 크기와 일치하는 임베딩 목록을 반환합니다.

    -

    문서용 임베딩을 만들려면 encode_documents() 메서드를 사용합니다:

    -
    docs = [
    +

    문서에 대한 임베딩을 만들려면 encode_documents() 메서드를 사용합니다. 이 방법은 검색 또는 추천 작업을 위한 문서 색인화와 같은 비대칭 검색 작업에서 문서 임베딩을 위해 설계되었습니다. 이 방법은 retrieval.passage 을 작업으로 사용합니다.

    +
    
    +```python
    +docs = [
         "Artificial intelligence was founded as an academic discipline in 1956.",
         "Alan Turing was the first person to conduct substantial research in AI.",
         "Born in Maida Vale, London, Turing was raised in southern England.",
    @@ -56,18 +73,18 @@ docs_embeddings = jina_ef.encode_documents(docs)
     # Print dimension and shape of embeddings
     print("Dim:", jina_ef.dim, docs_embeddings[0].shape)
     
    -

    예상 출력은 다음과 유사합니다:

    -
    Embeddings: [array([-4.88487840e-01, -4.28095880e-01,  4.90086500e-01, -1.63274320e-01,
    -        3.43437800e-01,  3.21476880e-01,  2.83173790e-02, -3.10403670e-01,
    -        4.76985040e-01, -1.77410420e-01, -3.84803180e-01, -2.19224200e-01,
    -       -2.52898000e-01,  6.62411900e-02, -8.58173100e-01,  1.05221800e+00,
    +

    예상 출력은 다음과 비슷합니다:

    +
    Embeddings: [array([9.80641991e-02, -8.51697400e-02,  7.36531913e-02,  1.42558888e-02,
    +       -2.23589484e-02,  1.68494112e-03, -3.50753777e-02, -3.11530549e-02,
    +       -3.26012149e-02,  5.04568312e-03,  3.69836427e-02,  3.48948985e-02,
    +        8.19722563e-03,  5.88679723e-02, -6.71099266e-03, -1.82369724e-02,
     ...
    -       -2.04462400e-01,  7.14229800e-01, -1.66823000e-01,  8.72551440e-01,
    -        5.53560140e-01,  8.92506300e-01, -2.39408610e-01, -4.22413560e-01,
    -       -3.19551350e-01,  5.59153850e-01,  2.44338100e-01, -8.60452100e-01])]
    -Dim: 768 (768,)
    +        2.48654783e-02,  3.43279652e-02, -1.66154150e-02, -9.90478322e-03,
    +       -2.96043139e-03, -8.57473817e-03, -7.39028037e-04,  6.25024503e-03,
    +       -1.08831357e-02, -4.00776342e-02,  3.25369164e-02, -1.42691191e-03])]
    +Dim: 1024 (1024,)
     
    -

    쿼리에 대한 임베딩을 만들려면 encode_queries() 메서드를 사용합니다:

    +

    쿼리용 임베딩을 만들려면 encode_queries() 메서드를 사용합니다. 이 방법은 검색 쿼리 또는 질문과 같은 비대칭 검색 작업의 쿼리 임베딩을 위해 설계되었습니다. 이 방법은 retrieval.query 을 작업으로 사용합니다.

    queries = ["When was artificial intelligence founded", 
                "Where was Alan Turing born?"]
     
    @@ -77,13 +94,37 @@ query_embeddings = jina_ef.encode_queries(queries)
     print("Dim", jina_ef.dim, query_embeddings[0].shape)
     

    예상 출력은 다음과 유사합니다:

    -
    Embeddings: [array([-5.99164660e-01, -3.49827350e-01,  8.22405160e-01, -1.18632730e-01,
    -        5.78107540e-01,  1.09789170e-01,  2.91604200e-01, -3.29306450e-01,
    -        2.93779640e-01, -2.17880800e-01, -6.84535440e-01, -3.79752000e-01,
    -       -3.47541800e-01,  9.20846100e-02, -6.13804400e-01,  6.31312800e-01,
    +
    Embeddings: [array([8.79201014e-03,  1.47551354e-02,  4.02722731e-02, -2.52991207e-02,
    +        1.12719582e-02,  3.75947170e-02,  3.97946090e-02, -7.36681819e-02,
    +       -2.17952449e-02, -1.16298944e-02, -6.83426252e-03, -5.12507409e-02,
    +        5.26071340e-02,  6.75181448e-02,  3.92445624e-02, -1.40817231e-02,
     ...
    -       -1.84993740e-02,  9.38629150e-01,  2.74858470e-02,  1.09396360e+00,
    -        3.96270750e-01,  7.44445800e-01, -1.95404050e-01, -6.08383200e-01,
    -       -3.75076300e-01,  3.87512200e-01,  8.11889650e-01, -3.76407620e-01])]
    -Dim 768 (768,)
    +        8.81703943e-03,  4.24629413e-02, -2.32944116e-02, -2.05193572e-02,
    +       -3.22035812e-02,  2.81896023e-03,  3.85326855e-02,  3.64372656e-02,
    +       -1.65050142e-02, -4.26847413e-02,  2.02664156e-02, -1.72684863e-02])]
    +Dim 1024 (1024,)
    +
    +

    유사도 매칭(예: STS 또는 대칭 검색 작업), 텍스트 분류, 클러스터링 또는 순위 재조정 작업에 대한 입력의 임베딩을 만들려면 JinaEmbeddingFunction 클래스를 인스턴스화할 때 적절한 task 매개 변수 값을 사용하세요.

    +
    from pymilvus.model.dense import JinaEmbeddingFunction
    +
    +jina_ef = JinaEmbeddingFunction(
    +    model_name="jina-embeddings-v3", # Defaults to `jina-embeddings-v3`
    +    api_key=JINA_API_KEY, # Provide your Jina AI API key
    +    task="text-matching",
    +    dimensions=1024, # Defaults to 1024
    +)
    +
    +texts = [
    +    "Follow the white rabbit.",  # English
    +    "Sigue al conejo blanco.",  # Spanish
    +    "Suis le lapin blanc.",  # French
    +    "跟着白兔走。",  # Chinese
    +    "اتبع الأرنب الأبيض.",  # Arabic
    +    "Folge dem weißen Kaninchen.",  # German
    +]
    +
    +embeddings = jina_ef(texts)
    +
    +# Compute similarities
    +print(embeddings[0] @ embeddings[1].T)
     
    diff --git a/localization/v2.4.x/site/ko/embeddings/embed-with-voyage.json b/localization/v2.4.x/site/ko/embeddings/embed-with-voyage.json index c5fe18415..ba7547304 100644 --- a/localization/v2.4.x/site/ko/embeddings/embed-with-voyage.json +++ b/localization/v2.4.x/site/ko/embeddings/embed-with-voyage.json @@ -1 +1 @@ -{"codeList":["pip install --upgrade pymilvus\npip install \"pymilvus[model]\"\n","from pymilvus.model.dense import VoyageEmbeddingFunction\n\nvoyage_ef = VoyageEmbeddingFunction(\n model_name=\"voyage-lite-02-instruct\", # Defaults to `voyage-2`\n api_key=VOYAGE_API_KEY # Provide your Voyage API key\n)\n","docs = [\n \"Artificial intelligence was founded as an academic discipline in 1956.\",\n \"Alan Turing was the first person to conduct substantial research in AI.\",\n \"Born in Maida Vale, London, Turing was raised in southern England.\",\n]\n\ndocs_embeddings = voyage_ef.encode_documents(docs)\n\n# Print embeddings\nprint(\"Embeddings:\", docs_embeddings)\n# Print dimension and shape of embeddings\nprint(\"Dim:\", voyage_ef.dim, docs_embeddings[0].shape)\n","Embeddings: [array([ 0.02582654, -0.00907086, -0.04604037, ..., -0.01227521,\n 0.04420955, -0.00038829]), array([ 0.03844212, -0.01597065, -0.03728884, ..., -0.02118733,\n 0.03349845, 0.0065346 ]), array([ 0.05143557, -0.01096631, -0.02690451, ..., -0.02416254,\n 0.07658645, 0.03064499])]\nDim: 1024 (1024,)\n","queries = [\"When was artificial intelligence founded\", \n \"Where was Alan Turing born?\"]\n\nquery_embeddings = voyage_ef.encode_queries(queries)\n\nprint(\"Embeddings:\", query_embeddings)\nprint(\"Dim\", voyage_ef.dim, query_embeddings[0].shape)\n","Embeddings: [array([ 0.01733501, -0.0230672 , -0.05208827, ..., -0.00957995,\n 0.04493361, 0.01485138]), array([ 0.05937521, -0.00729363, -0.02184347, ..., -0.02107683,\n 0.05706626, 0.0263358 ])]\nDim 1024 (1024,)\n"],"headingContent":"","anchorList":[{"label":"Voyage","href":"Voyage","type":1,"isActive":false}]} \ No newline at end of file +{"codeList":["pip install --upgrade pymilvus\npip install \"pymilvus[model]\"\n","from pymilvus.model.dense import VoyageEmbeddingFunction\n\nvoyage_ef = VoyageEmbeddingFunction(\n model_name=\"voyage-3\", # Defaults to `voyage-3`\n api_key=VOYAGE_API_KEY # Provide your Voyage API key\n)\n","docs = [\n \"Artificial intelligence was founded as an academic discipline in 1956.\",\n \"Alan Turing was the first person to conduct substantial research in AI.\",\n \"Born in Maida Vale, London, Turing was raised in southern England.\",\n]\n\ndocs_embeddings = voyage_ef.encode_documents(docs)\n\n# Print embeddings\nprint(\"Embeddings:\", docs_embeddings)\n# Print dimension and shape of embeddings\nprint(\"Dim:\", voyage_ef.dim, docs_embeddings[0].shape)\n","Embeddings: [array([ 0.02582654, -0.00907086, -0.04604037, ..., -0.01227521,\n 0.04420955, -0.00038829]), array([ 0.03844212, -0.01597065, -0.03728884, ..., -0.02118733,\n 0.03349845, 0.0065346 ]), array([ 0.05143557, -0.01096631, -0.02690451, ..., -0.02416254,\n 0.07658645, 0.03064499])]\nDim: 1024 (1024,)\n","queries = [\"When was artificial intelligence founded\", \n \"Where was Alan Turing born?\"]\n\nquery_embeddings = voyage_ef.encode_queries(queries)\n\nprint(\"Embeddings:\", query_embeddings)\nprint(\"Dim\", voyage_ef.dim, query_embeddings[0].shape)\n","Embeddings: [array([ 0.01733501, -0.0230672 , -0.05208827, ..., -0.00957995,\n 0.04493361, 0.01485138]), array([ 0.05937521, -0.00729363, -0.02184347, ..., -0.02107683,\n 0.05706626, 0.0263358 ])]\nDim 1024 (1024,)\n"],"headingContent":"Voyage","anchorList":[{"label":"Voyage","href":"Voyage","type":1,"isActive":false}]} \ No newline at end of file diff --git a/localization/v2.4.x/site/ko/embeddings/embed-with-voyage.md b/localization/v2.4.x/site/ko/embeddings/embed-with-voyage.md index 75e92c631..8a9475f37 100644 --- a/localization/v2.4.x/site/ko/embeddings/embed-with-voyage.md +++ b/localization/v2.4.x/site/ko/embeddings/embed-with-voyage.md @@ -30,13 +30,13 @@ pip install "pymilvus[model]"
    from pymilvus.model.dense import VoyageEmbeddingFunction
     
     voyage_ef = VoyageEmbeddingFunction(
    -    model_name="voyage-lite-02-instruct", # Defaults to `voyage-2`
    +    model_name="voyage-3", # Defaults to `voyage-3`
         api_key=VOYAGE_API_KEY # Provide your Voyage API key
     )
     

    매개변수

      -
    • model_name (문자열) 인코딩에 사용할 Voyage 모델의 이름입니다. 예를 들어 voyage-law-2, voyage-code-2 등과 같이 사용 가능한 Voyage 모델 이름 중 하나를 지정할 수 있습니다. 이 매개변수를 지정하지 않으면 voyage-2 이 사용됩니다. 사용 가능한 모델 목록은 Voyage 공식 문서를 참조하세요.
    • +
    • model_name (문자열) 인코딩에 사용할 Voyage 모델의 이름입니다. 예를 들어 voyage-3-lite, voyage-finance-2 등과 같이 사용 가능한 Voyage 모델 이름 중 하나를 지정할 수 있습니다. 이 매개변수를 지정하지 않으면 voyage-3 이 사용됩니다. 사용 가능한 모델 목록은 Voyage 공식 문서를 참조하세요.
    • api_key (문자열) Voyage API에 액세스하기 위한 API 키입니다. API 키를 생성하는 방법에 대한 자세한 내용은 API 키 및 파이썬 클라이언트를 참조하세요.

    문서용 임베딩을 만들려면 encode_documents() 메서드를 사용합니다:

    diff --git a/localization/v2.4.x/site/ko/embeddings/embeddings.json b/localization/v2.4.x/site/ko/embeddings/embeddings.json index 3d959f541..7de2bcf7c 100644 --- a/localization/v2.4.x/site/ko/embeddings/embeddings.json +++ b/localization/v2.4.x/site/ko/embeddings/embeddings.json @@ -1 +1 @@ -{"codeList":["pip install \"pymilvus[model]\"\n","from pymilvus import model\n\n# This will download \"all-MiniLM-L6-v2\", a light weight model.\nef = model.DefaultEmbeddingFunction()\n\n# Data from which embeddings are to be generated \ndocs = [\n \"Artificial intelligence was founded as an academic discipline in 1956.\",\n \"Alan Turing was the first person to conduct substantial research in AI.\",\n \"Born in Maida Vale, London, Turing was raised in southern England.\",\n]\n\nembeddings = ef.encode_documents(docs)\n\n# Print embeddings\nprint(\"Embeddings:\", embeddings)\n# Print dimension and shape of embeddings\nprint(\"Dim:\", ef.dim, embeddings[0].shape)\n","Embeddings: [array([-3.09392996e-02, -1.80662833e-02, 1.34775648e-02, 2.77156215e-02,\n -4.86349640e-03, -3.12581174e-02, -3.55921760e-02, 5.76934684e-03,\n 2.80773244e-03, 1.35783911e-01, 3.59678417e-02, 6.17732145e-02,\n...\n -4.61330153e-02, -4.85207550e-02, 3.13997865e-02, 7.82178566e-02,\n -4.75336798e-02, 5.21207601e-02, 9.04406682e-02, -5.36676683e-02],\n dtype=float32)]\nDim: 384 (384,)\n","from pymilvus.model.hybrid import BGEM3EmbeddingFunction\nfrom pymilvus import (\n utility,\n FieldSchema, CollectionSchema, DataType,\n Collection, AnnSearchRequest, RRFRanker, connections,\n)\n","# 1. prepare a small corpus to search\ndocs = [\n \"Artificial intelligence was founded as an academic discipline in 1956.\",\n \"Alan Turing was the first person to conduct substantial research in AI.\",\n \"Born in Maida Vale, London, Turing was raised in southern England.\",\n]\nquery = \"Who started AI research?\"\n\n# BGE-M3 model can embed texts as dense and sparse vectors.\n# It is included in the optional `model` module in pymilvus, to install it,\n# simply run \"pip install pymilvus[model]\".\n\nbge_m3_ef = BGEM3EmbeddingFunction(use_fp16=False, device=\"cpu\")\n\ndocs_embeddings = bge_m3_ef(docs)\nquery_embeddings = bge_m3_ef([query])\n","from pymilvus.model.sparse import BM25EmbeddingFunction\n","# 1. prepare a small corpus to search\ndocs = [\n \"Artificial intelligence was founded as an academic discipline in 1956.\",\n \"Alan Turing was the first person to conduct substantial research in AI.\",\n \"Born in Maida Vale, London, Turing was raised in southern England.\",\n]\nquery = \"Where was Turing born?\"\nbm25_ef = BM25EmbeddingFunction()\n\n# 2. fit the corpus to get BM25 model parameters on your documents.\nbm25_ef.fit(docs)\n\n# 3. store the fitted parameters to disk to expedite future processing.\nbm25_ef.save(\"bm25_params.json\")\n\n# 4. load the saved params\nnew_bm25_ef = BM25EmbeddingFunction()\nnew_bm25_ef.load(\"bm25_params.json\")\n\ndocs_embeddings = new_bm25_ef.encode_documents(docs)\nquery_embeddings = new_bm25_ef.encode_queries([query])\nprint(\"Dim:\", new_bm25_ef.dim, list(docs_embeddings)[0].shape)\n","Dim: 21 (1, 21)\n"],"headingContent":"","anchorList":[{"label":"임베딩 개요","href":"Embedding-Overview","type":1,"isActive":false},{"label":"예제 1: 기본 임베딩 함수를 사용하여 밀도 벡터 생성하기","href":"Example-1-Use-default-embedding-function-to-generate-dense-vectors","type":2,"isActive":false},{"label":"예 2: BGE M3 모델을 사용하여 한 번의 호출로 고밀도 및 스파스 벡터 생성하기","href":"Example-2-Generate-dense-and-sparse-vectors-in-one-call-with-BGE-M3-model","type":2,"isActive":false},{"label":"예 3: BM25 모델을 사용하여 스파스 벡터 생성하기","href":"Example-3-Generate--sparse-vectors-using-BM25-model","type":2,"isActive":false}]} \ No newline at end of file +{"codeList":["pip install \"pymilvus[model]\"\n","from pymilvus import model\n\n# This will download \"all-MiniLM-L6-v2\", a light weight model.\nef = model.DefaultEmbeddingFunction()\n\n# Data from which embeddings are to be generated \ndocs = [\n \"Artificial intelligence was founded as an academic discipline in 1956.\",\n \"Alan Turing was the first person to conduct substantial research in AI.\",\n \"Born in Maida Vale, London, Turing was raised in southern England.\",\n]\n\nembeddings = ef.encode_documents(docs)\n\n# Print embeddings\nprint(\"Embeddings:\", embeddings)\n# Print dimension and shape of embeddings\nprint(\"Dim:\", ef.dim, embeddings[0].shape)\n","Embeddings: [array([-3.09392996e-02, -1.80662833e-02, 1.34775648e-02, 2.77156215e-02,\n -4.86349640e-03, -3.12581174e-02, -3.55921760e-02, 5.76934684e-03,\n 2.80773244e-03, 1.35783911e-01, 3.59678417e-02, 6.17732145e-02,\n...\n -4.61330153e-02, -4.85207550e-02, 3.13997865e-02, 7.82178566e-02,\n -4.75336798e-02, 5.21207601e-02, 9.04406682e-02, -5.36676683e-02],\n dtype=float32)]\nDim: 384 (384,)\n","from pymilvus.model.hybrid import BGEM3EmbeddingFunction\nfrom pymilvus import (\n utility,\n FieldSchema, CollectionSchema, DataType,\n Collection, AnnSearchRequest, RRFRanker, connections,\n)\n","# 1. prepare a small corpus to search\ndocs = [\n \"Artificial intelligence was founded as an academic discipline in 1956.\",\n \"Alan Turing was the first person to conduct substantial research in AI.\",\n \"Born in Maida Vale, London, Turing was raised in southern England.\",\n]\nquery = \"Who started AI research?\"\n\n# BGE-M3 model can embed texts as dense and sparse vectors.\n# It is included in the optional `model` module in pymilvus, to install it,\n# simply run \"pip install pymilvus[model]\".\n\nbge_m3_ef = BGEM3EmbeddingFunction(use_fp16=False, device=\"cpu\")\n\ndocs_embeddings = bge_m3_ef(docs)\nquery_embeddings = bge_m3_ef([query])\n","from pymilvus.model.sparse import BM25EmbeddingFunction\n","# 1. prepare a small corpus to search\ndocs = [\n \"Artificial intelligence was founded as an academic discipline in 1956.\",\n \"Alan Turing was the first person to conduct substantial research in AI.\",\n \"Born in Maida Vale, London, Turing was raised in southern England.\",\n]\nquery = \"Where was Turing born?\"\nbm25_ef = BM25EmbeddingFunction()\n\n# 2. fit the corpus to get BM25 model parameters on your documents.\nbm25_ef.fit(docs)\n\n# 3. store the fitted parameters to disk to expedite future processing.\nbm25_ef.save(\"bm25_params.json\")\n\n# 4. load the saved params\nnew_bm25_ef = BM25EmbeddingFunction()\nnew_bm25_ef.load(\"bm25_params.json\")\n\ndocs_embeddings = new_bm25_ef.encode_documents(docs)\nquery_embeddings = new_bm25_ef.encode_queries([query])\nprint(\"Dim:\", new_bm25_ef.dim, list(docs_embeddings)[0].shape)\n","Dim: 21 (1, 21)\n"],"headingContent":"Embedding Overview","anchorList":[{"label":"임베딩 개요","href":"Embedding-Overview","type":1,"isActive":false},{"label":"예제 1: 기본 임베딩 함수를 사용하여 고밀도 벡터 생성하기","href":"Example-1-Use-default-embedding-function-to-generate-dense-vectors","type":2,"isActive":false},{"label":"예 2: BGE M3 모델을 사용하여 한 번의 호출로 고밀도 및 스파스 벡터 생성하기","href":"Example-2-Generate-dense-and-sparse-vectors-in-one-call-with-BGE-M3-model","type":2,"isActive":false},{"label":"예 3: BM25 모델을 사용하여 스파스 벡터 생성하기","href":"Example-3-Generate--sparse-vectors-using-BM25-model","type":2,"isActive":false}]} \ No newline at end of file diff --git a/localization/v2.4.x/site/ko/embeddings/embeddings.md b/localization/v2.4.x/site/ko/embeddings/embeddings.md index 1c035efb7..f43e5cf9c 100644 --- a/localization/v2.4.x/site/ko/embeddings/embeddings.md +++ b/localization/v2.4.x/site/ko/embeddings/embeddings.md @@ -40,9 +40,13 @@ title: 임베딩 개요 voyageaiDenseAPI jinaDenseAPI cohereDenseAPI +InstructorDense오픈 소스 +미스트랄 AIDenseAPI +NomicDenseAPI +mGTE하이브리드오픈 소스 -

    예제 1: 기본 임베딩 함수를 사용하여 밀도 벡터 생성하기

    +

    질문이 더 있으신가요?

    그럴 수 있습니다:

    • GitHub에서 Milvus를 확인하세요. 자유롭게 질문하고, 아이디어를 공유하고, 다른 사람들을 도와주세요.
    • Milvus 포럼 또는 Slack 채널에 가입하여 지원을 찾고 오픈 소스 커뮤니티에 참여하세요.
    • diff --git a/localization/v2.4.x/site/ko/faq/performance_faq.json b/localization/v2.4.x/site/ko/faq/performance_faq.json index 98a5c439b..03724f1d2 100644 --- a/localization/v2.4.x/site/ko/faq/performance_faq.json +++ b/localization/v2.4.x/site/ko/faq/performance_faq.json @@ -1 +1 @@ -{"codeList":[],"headingContent":"","anchorList":[{"label":"성능 FAQ","href":"Performance-FAQ","type":1,"isActive":false}]} \ No newline at end of file +{"codeList":[],"headingContent":"Performance FAQ","anchorList":[{"label":"성능 FAQ","href":"Performance-FAQ","type":1,"isActive":false}]} \ No newline at end of file diff --git a/localization/v2.4.x/site/ko/faq/performance_faq.md b/localization/v2.4.x/site/ko/faq/performance_faq.md index e48b0461a..b18aadc15 100644 --- a/localization/v2.4.x/site/ko/faq/performance_faq.md +++ b/localization/v2.4.x/site/ko/faq/performance_faq.md @@ -26,7 +26,7 @@ title: 성능 FAQ Accuracy test 정확도 테스트 Performance test성능 테스트

      -

      작은 데이터 세트에서 쿼리 시간이 더 오래 걸리는 이유는 무엇인가요?

      쿼리 작업은 세그먼트에서 수행됩니다. 인덱스는 세그먼트를 쿼리하는 데 걸리는 시간을 줄여줍니다. 세그먼트가 인덱싱되지 않은 경우, Milvus는 원시 데이터에 대한 무차별 대입 검색을 사용하므로 쿼리 시간이 크게 늘어납니다.

      +

      작은 데이터 세트에서 쿼리 시간이 더 오래 걸리는 이유는 무엇인가요?

      쿼리 작업은 세그먼트에서 수행됩니다. 인덱스는 세그먼트를 쿼리하는 데 걸리는 시간을 줄여줍니다. 세그먼트가 인덱싱되지 않은 경우, Milvus는 원시 데이터에 대한 무차별 검색을 사용하므로 쿼리 시간이 크게 늘어납니다.

      따라서 일반적으로 인덱스를 구축하지 않았기 때문에 작은 데이터 세트(컬렉션)에 대한 쿼리 시간이 더 오래 걸립니다. 이는 세그먼트의 크기가 rootCoord.minSegmentSizeToEnableindex 에서 설정한 인덱스 구축 임계값에 도달하지 않았기 때문입니다. create_index() 을 호출하여 임계값에 도달했지만 아직 자동으로 색인되지 않은 세그먼트를 강제로 색인하면 쿼리 성능이 크게 향상됩니다.

      CPU 사용량에 영향을 미치는 요인은 무엇인가요?

      Milvus가 인덱스를 구축하거나 쿼리를 실행할 때 CPU 사용량이 증가합니다. 일반적으로 인덱스 구축은 단일 스레드에서 실행되는 Annoy를 사용하는 경우를 제외하고는 CPU 집약적입니다.

      쿼리를 실행할 때 CPU 사용량은 nqnprobe 에 의해 영향을 받습니다. nqnprobe 이 작으면 동시성이 낮고 CPU 사용량이 낮게 유지됩니다.

      @@ -41,7 +41,7 @@ title: 성능 FAQ

      일관성 수준을 선택할 때는 일관성과 성능 간의 장단점을 고려하세요. 즉각적인 가시성이 필요한 작업의 경우 '강력' 일관성 수준을 사용하세요. 더 빠른 쓰기를 원한다면 약한 일관성(데이터가 즉시 표시되지 않을 수 있음)을 우선순위로 정하세요. 자세한 내용은 일관성을 참조하세요.

      VARCHAR 필드를 색인하면 삭제 속도가 향상되나요?

      예. VARCHAR 필드를 색인하면 '표현식으로 삭제' 작업 속도를 높일 수 있지만 특정 조건에서만 가능합니다:

        -
      • 반전 인덱스: 이 인덱스는 기본 키가 아닌 VARCHAR 필드에 대한 IN 또는 == 표현식에 도움이 됩니다.
      • +
      • 반전 인덱스: 이 인덱스는 기본 키가 아닌 VARCHAR 필드에 있는 IN 또는 == 표현식에 도움이 됩니다.
      • 트라이 인덱스: 이 인덱스는 주 키가 아닌 VARCHAR 필드에 대한 접두사 쿼리(예: LIKE prefix%)에 도움이 됩니다.

      그러나 VARCHAR 필드를 색인해도 속도가 빨라지지는 않습니다:

      diff --git a/localization/v2.4.x/site/ko/faq/product_faq.json b/localization/v2.4.x/site/ko/faq/product_faq.json index 684595ff4..dc11d4b36 100644 --- a/localization/v2.4.x/site/ko/faq/product_faq.json +++ b/localization/v2.4.x/site/ko/faq/product_faq.json @@ -1 +1 @@ -{"codeList":["60 * 2 * 4 + 40 * 1 * 12 = 960\n"],"headingContent":"","anchorList":[{"label":"제품 FAQ","href":"Product-FAQ","type":1,"isActive":false}]} \ No newline at end of file +{"codeList":["60 * 2 * 4 + 40 * 1 * 12 = 960\n"],"headingContent":"Product FAQ","anchorList":[{"label":"제품 FAQ","href":"Product-FAQ","type":1,"isActive":false}]} \ No newline at end of file diff --git a/localization/v2.4.x/site/ko/faq/product_faq.md b/localization/v2.4.x/site/ko/faq/product_faq.md index af6d21c51..730293084 100644 --- a/localization/v2.4.x/site/ko/faq/product_faq.md +++ b/localization/v2.4.x/site/ko/faq/product_faq.md @@ -29,7 +29,7 @@ title: 제품 FAQ
    • 새로운 엔티티와 컬렉션 관련 스키마(현재는 데이터 지속성을 위해 MinIO만 지원됨)가 Milvus에 추가되면 시스템 스토리지가 삽입된 데이터의 최대 허용 크기를 결정합니다.

    Milvus는 데이터를 어디에 저장하나요?

    Milvus는 삽입된 데이터와 메타데이터라는 두 가지 유형의 데이터를 처리합니다.

    -

    벡터 데이터, 스칼라 데이터, 컬렉션별 스키마 등 삽입된 데이터는 증분 로그 형태로 영구 스토리지에 저장됩니다. Milvus는 MinIO, AWS S3, 구글 클라우드 스토리지 (GCS), 애저 블롭 스토리지, 알리바바 클라우드 OSS, 텐센트 클라우드 오브젝트 스토리지 (COS) 등 여러 오브젝트 스토리지 백엔드를 지원합니다.

    +

    벡터 데이터, 스칼라 데이터, 컬렉션별 스키마 등 삽입된 데이터는 증분 로그 형태로 영구 스토리지에 저장됩니다. Milvus는 MinIO, AWS S3, 구글 클라우드 스토리지 (GCS), 애저 블롭 스토리지, 알리바바 클라우드 OSS, 텐센트 클라우드 오브젝트 스토리지 (COS) 등 여러 오브젝트 스토리지 백엔드를 지원합니다.

    메타데이터는 Milvus 내에서 생성됩니다. 각 Milvus 모듈에는 etcd에 저장되는 자체 메타데이터가 있습니다.

    etcd에 벡터 데이터가 없는 이유는 무엇인가요?

    etcd는 Milvus 모듈 메타데이터를 저장하고, MinIO는 엔티티를 저장합니다.

    Milvus는 데이터 삽입과 검색을 동시에 지원하나요?

    예. 삽입 작업과 쿼리 작업은 상호 독립적인 두 개의 개별 모듈에서 처리됩니다. 클라이언트 관점에서 삽입 작업은 삽입된 데이터가 메시지 큐에 들어가면 완료됩니다. 그러나 삽입된 데이터는 쿼리 노드에 로드될 때까지 검색할 수 없습니다. 세그먼트 크기가 인덱스 구축 임계값(기본값 512MB)에 도달하지 않으면 Milvus는 무차별 대입 검색을 사용하며 쿼리 성능이 저하될 수 있습니다.

    @@ -62,9 +62,9 @@ title: 제품 FAQ

    이를 방지하려면 nprobe 을 더 크게, nlistk 을 더 작게 설정해 보세요.

    자세한 내용은 벡터 색인을 참조하세요.

    Milvus에서 지원되는 최대 벡터 크기는 얼마인가요?

    Milvus는 기본적으로 최대 32,768개의 차원으로 벡터를 관리할 수 있습니다. Proxy.maxDimension 값을 늘려 더 큰 차원의 벡터를 허용할 수 있습니다.

    -

    Milvus는 Apple M1 CPU를 지원하나요?

    현재 Milvus 릴리스는 Apple M1 CPU를 지원하지 않습니다.

    +

    Milvus는 Apple M1 CPU를 지원하나요?

    현재 Milvus 릴리스는 Apple M1 CPU를 직접 지원하지 않습니다. Milvus 2.3 이후에는 ARM64 아키텍처용 Docker 이미지를 제공합니다.

    Milvus는 기본 키 필드에서 어떤 데이터 유형을 지원하나요?

    현재 릴리스에서 Milvus는 INT64와 문자열을 모두 지원합니다.

    -

    Milvus는 확장 가능한가요?

    예. 쿠버네티스의 헬름 차트를 통해 여러 노드가 있는 Milvus 클러스터를 배포할 수 있습니다. 자세한 지침은 확장 가이드를 참조하세요.

    +

    Milvus는 확장 가능한가요?

    예. Kubernetes의 헬름 차트를 통해 여러 노드가 있는 Milvus 클러스터를 배포할 수 있습니다. 자세한 지침은 확장 가이드를 참조하세요.

    쿼리가 메모리에서 수행되나요? 증분 데이터와 기록 데이터란 무엇인가요?

    예. 쿼리 요청이 들어오면 Milvus는 증분 데이터와 기록 데이터를 모두 메모리에 로드하여 검색합니다. 증분 데이터는 증가하는 세그먼트에 있으며, 스토리지 엔진에서 지속될 임계값에 도달하기 전에 메모리에 버퍼링되며, 히스토리 데이터는 오브젝트 스토리지에 저장되는 봉인된 세그먼트의 데이터입니다. 증분 데이터와 기록 데이터가 함께 검색할 전체 데이터 세트를 구성합니다.

    네. 동일한 컬렉션에 대한 쿼리의 경우, Milvus는 증분 데이터와 기록 데이터를 동시에 검색합니다. 그러나 서로 다른 컬렉션에 대한 쿼리는 순차적으로 수행됩니다. 기록 데이터는 매우 방대한 데이터 세트가 될 수 있지만, 기록 데이터에 대한 검색은 상대적으로 더 많은 시간이 소요되며 기본적으로 연속적으로 수행됩니다.

    해당 컬렉션이 삭제된 후에도 MinIO의 데이터가 남아있는 이유는 무엇인가요?

    MinIO의 데이터는 데이터 롤백의 편의를 위해 일정 기간 동안 유지되도록 설계되었습니다.

    diff --git a/localization/v2.4.x/site/ko/getstarted/install_SDKs/install-java.json b/localization/v2.4.x/site/ko/getstarted/install_SDKs/install-java.json index f2d304337..3e3bc0e0f 100644 --- a/localization/v2.4.x/site/ko/getstarted/install_SDKs/install-java.json +++ b/localization/v2.4.x/site/ko/getstarted/install_SDKs/install-java.json @@ -1 +1 @@ -{"codeList":["\n io.milvus\n milvus-sdk-java\n 2.4.3\n\n","implementation 'io.milvus:milvus-sdk-java:2.4.3'\n"],"headingContent":"","anchorList":[{"label":"Milvus Java SDK 설치","href":"Install-Milvus-Java-SDK","type":1,"isActive":false},{"label":"요구 사항","href":"Requirement","type":2,"isActive":false},{"label":"Milvus Java SDK 설치","href":"Install-Milvus-Java-SDK","type":2,"isActive":false},{"label":"다음 단계","href":"Whats-next","type":2,"isActive":false}]} \ No newline at end of file +{"codeList":["\n io.milvus\n milvus-sdk-java\n 2.4.5\n\n","implementation 'io.milvus:milvus-sdk-java:2.4.5'\n"],"headingContent":"Install Milvus Java SDK","anchorList":[{"label":"Milvus Java SDK 설치","href":"Install-Milvus-Java-SDK","type":1,"isActive":false},{"label":"요구 사항","href":"Requirement","type":2,"isActive":false},{"label":"Milvus Java SDK 설치","href":"Install-Milvus-Java-SDK","type":2,"isActive":false},{"label":"다음 단계","href":"Whats-next","type":2,"isActive":false}]} \ No newline at end of file diff --git a/localization/v2.4.x/site/ko/getstarted/install_SDKs/install-java.md b/localization/v2.4.x/site/ko/getstarted/install_SDKs/install-java.md index 5a3f0d630..12d0f1cfa 100644 --- a/localization/v2.4.x/site/ko/getstarted/install_SDKs/install-java.md +++ b/localization/v2.4.x/site/ko/getstarted/install_SDKs/install-java.md @@ -63,13 +63,13 @@ title: Milvus Java SDK 설치
    <dependency>
         <groupId>io.milvus</groupId>
         <artifactId>milvus-sdk-java</artifactId>
    -    <version>2.4.3</version>
    +    <version>2.4.5</version>
     </dependency>
     
    • Gradle/Grails
    -
    implementation 'io.milvus:milvus-sdk-java:2.4.3'
    +
    implementation 'io.milvus:milvus-sdk-java:2.4.5'
     

    다음 단계

    파이밀버스는 파이썬 패키지 색인에서 찾을 수 있습니다.

    설치한 Milvus 서버의 버전과 일치하는 PyMilvus 버전을 설치하는 것이 좋습니다. 자세한 내용은 릴리스 노트를 참조하세요.
    -
    $ python3 -m pip install pymilvus==2.4.5
    +
    $ python3 -m pip install pymilvus==2.4.8
     

    설치 확인

    Open In Colab

    +

    Open In Colab +GitHub Repository

    신경망 모델의 출력 데이터 형식인 벡터는 정보를 효과적으로 인코딩하고 지식 베이스, 시맨틱 검색, 검색 증강 생성(RAG) 등과 같은 AI 애플리케이션에서 중추적인 역할을 할 수 있습니다.

    Milvus는 오픈 소스 벡터 데이터베이스로, Jupyter 노트북에서 데모 챗봇을 실행하는 것부터 수십억 명의 사용자에게 서비스를 제공하는 웹 규모 검색 구축에 이르기까지 모든 규모의 AI 애플리케이션에 적합합니다. 이 가이드에서는 몇 분 안에 Milvus를 로컬에서 설정하고 Python 클라이언트 라이브러리를 사용하여 벡터를 생성, 저장 및 검색하는 방법을 안내해 드립니다.

    Milvus 설치하기

    data: ["[{'id': 2, 'distance': 0.5859944820404053, 'entity': {'text': 'Born in Maida Vale, London, Turing was raised in southern England.', 'subject': 'history'}}, {'id': 1, 'distance': 0.5118255615234375, 'entity': {'text': 'Alan Turing was the first person to conduct substantial research in AI.', 'subject': 'history'}}]"] , extra_info: {'cost': 0}
     
    -

    출력은 결과 목록이며, 각 결과는 벡터 검색 쿼리에 매핑됩니다. 각 쿼리에는 결과 목록이 포함되며, 각 결과에는 엔티티 기본 키, 쿼리 벡터까지의 거리, 지정된 output_fields 으로 엔티티 세부 정보가 포함됩니다.

    +

    출력은 결과 목록이며, 각 결과는 벡터 검색 쿼리에 매핑됩니다. 각 쿼리에는 결과 목록이 포함되며, 각 결과에는 엔티티 기본 키, 쿼리 벡터까지의 거리 및 지정된 output_fields 으로 엔티티 세부 정보가 포함됩니다.

    메타데이터 필터링을 사용한 벡터 검색

    메타데이터의 값을 고려하면서 벡터 검색을 수행할 수도 있습니다(Milvus에서는 "스칼라" 필드라고 하며, 스칼라는 벡터가 아닌 데이터를 의미함). 이는 특정 기준을 지정하는 필터 표현식을 사용하여 수행됩니다. 다음 예제에서 subject 필드를 사용하여 검색 및 필터링하는 방법을 살펴보겠습니다.

    +

    메타데이터의 값을 고려하면서 벡터 검색을 수행할 수도 있습니다(밀버스에서는 "스칼라" 필드라고 하는데, 스칼라는 벡터가 아닌 데이터를 의미하기 때문입니다). 이는 특정 기준을 지정하는 필터 표현식을 사용하여 수행됩니다. 다음 예제에서 subject 필드를 사용하여 검색 및 필터링하는 방법을 살펴보겠습니다.

    # Insert more docs in another subject.
     docs = [
         "Machine learning has been used for drug design.",
    @@ -299,8 +301,8 @@ res = client.search(
     

    기본적으로 스칼라 필드는 인덱싱되지 않습니다. 대규모 데이터 세트에서 메타데이터 필터링 검색을 수행해야 하는 경우 고정 스키마를 사용하고 인덱스를 켜서 검색 성능을 개선하는 것도 고려해 볼 수 있습니다.

    벡터 검색 외에도 다른 유형의 검색을 수행할 수도 있습니다:

    -

    쿼리

    쿼리()는 필터 표현식이나 일부 ID와 일치하는 크레트리아와 일치하는 모든 엔티티를 검색하는 작업입니다.

    -

    예를 들어 스칼라 필드에 특정 값이 있는 모든 엔티티를 검색합니다:

    +

    쿼리

    쿼리()는 필터 표현식이나 일부 ID와 일치하는 등 조건에 일치하는 모든 엔터티를 검색하는 작업입니다.

    +

    예를 들어, 스칼라 필드에 특정 값이 있는 모든 엔터티를 검색합니다:

    res = client.query(
         collection_name="demo_collection",
         filter="subject == 'history'",
    diff --git a/localization/v2.4.x/site/ko/getstarted/run-milvus-docker/install_standalone-docker-compose.json b/localization/v2.4.x/site/ko/getstarted/run-milvus-docker/install_standalone-docker-compose.json
    index 6c47ed7e7..b1e8d8fed 100644
    --- a/localization/v2.4.x/site/ko/getstarted/run-milvus-docker/install_standalone-docker-compose.json
    +++ b/localization/v2.4.x/site/ko/getstarted/run-milvus-docker/install_standalone-docker-compose.json
    @@ -1 +1 @@
    -{"codeList":["# Download the configuration file\n$ wget https://github.com/milvus-io/milvus/releases/download/v2.4.9/milvus-standalone-docker-compose.yml -O docker-compose.yml\n\n# Start Milvus\n$ sudo docker compose up -d\n\nCreating milvus-etcd  ... done\nCreating milvus-minio ... done\nCreating milvus-standalone ... done\n","$ sudo docker compose ps\n\n      Name                     Command                  State                            Ports\n--------------------------------------------------------------------------------------------------------------------\nmilvus-etcd         etcd -advertise-client-url ...   Up             2379/tcp, 2380/tcp\nmilvus-minio        /usr/bin/docker-entrypoint ...   Up (healthy)   9000/tcp\nmilvus-standalone   /tini -- milvus run standalone   Up             0.0.0.0:19530->19530/tcp, 0.0.0.0:9091->9091/tcp\n","# Stop Milvus\n$ sudo docker compose down\n\n# Delete service data\n$ sudo rm -rf volumes\n"],"headingContent":"","anchorList":[{"label":"Docker Compose로 Milvus 실행하기","href":"Run-Milvus-with-Docker-Compose","type":1,"isActive":false},{"label":"전제 조건","href":"Prerequisites","type":2,"isActive":false},{"label":"Milvus 설치","href":"Install-Milvus","type":2,"isActive":false},{"label":"다음 단계","href":"Whats-next","type":2,"isActive":false}]}
    \ No newline at end of file
    +{"codeList":["# Download the configuration file\n$ wget https://github.com/milvus-io/milvus/releases/download/v2.4.13-hotfix/milvus-standalone-docker-compose.yml -O docker-compose.yml\n\n# Start Milvus\n$ sudo docker-compose up -d\n\nCreating milvus-etcd  ... done\nCreating milvus-minio ... done\nCreating milvus-standalone ... done\n","$ sudo docker-compose ps\n\n      Name                     Command                  State                            Ports\n--------------------------------------------------------------------------------------------------------------------\nmilvus-etcd         etcd -advertise-client-url ...   Up             2379/tcp, 2380/tcp\nmilvus-minio        /usr/bin/docker-entrypoint ...   Up (healthy)   9000/tcp\nmilvus-standalone   /tini -- milvus run standalone   Up             0.0.0.0:19530->19530/tcp, 0.0.0.0:9091->9091/tcp\n","# Stop Milvus\n$ sudo docker-compose down\n\n# Delete service data\n$ sudo rm -rf volumes\n"],"headingContent":"Run Milvus with Docker Compose","anchorList":[{"label":"Docker Compose로 Milvus 실행하기","href":"Run-Milvus-with-Docker-Compose","type":1,"isActive":false},{"label":"전제 조건","href":"Prerequisites","type":2,"isActive":false},{"label":"Milvus 설치","href":"Install-Milvus","type":2,"isActive":false},{"label":"다음 단계","href":"Whats-next","type":2,"isActive":false}]}
    \ No newline at end of file
    diff --git a/localization/v2.4.x/site/ko/getstarted/run-milvus-docker/install_standalone-docker-compose.md b/localization/v2.4.x/site/ko/getstarted/run-milvus-docker/install_standalone-docker-compose.md
    index 08b425a4a..991ad0fb6 100644
    --- a/localization/v2.4.x/site/ko/getstarted/run-milvus-docker/install_standalone-docker-compose.md
    +++ b/localization/v2.4.x/site/ko/getstarted/run-milvus-docker/install_standalone-docker-compose.md
    @@ -3,7 +3,7 @@ id: install_standalone-docker-compose.md
     label: Docker Compose
     related_key: Docker Compose
     summary: Docker Compose를 사용하여 Milvus를 독립형으로 설치하는 방법을 알아보세요.
    -title: 도커 컴포즈로 Milvus 실행하기
    +title: Docker Compose로 Milvus 실행하기
     ---
     

    Docker Compose로 Milvus 실행하기

    Milvus는 Milvus 리포지토리에 Docker Compose 구성 파일을 제공합니다. Docker Compose를 사용하여 Milvus를 설치하려면 다음을 실행하세요.

    # Download the configuration file
    -$ wget https://github.com/milvus-io/milvus/releases/download/v2.4.9/milvus-standalone-docker-compose.yml -O docker-compose.yml
    +$ wget https://github.com/milvus-io/milvus/releases/download/v2.4.13-hotfix/milvus-standalone-docker-compose.yml -O docker-compose.yml
     
     # Start Milvus
    -$ sudo docker compose up -d
    +$ sudo docker-compose up -d
     
     Creating milvus-etcd  ... done
     Creating milvus-minio ... done
    @@ -74,14 +74,14 @@ Creating milvus-standalone ... done
     
     

    Milvus를 시작한 후

      -
    • 밀버스 독립형, 밀버스 미니오, 밀버스-etcd라는 이름의 컨테이너가 가동됩니다.
        +
      • 밀버스 독립형, 밀버스 미니오, 밀버스-etcd라는 이름의 컨테이너가 가동됩니다.
        • milvus-etcd 컨테이너는 호스트에 포트를 노출하지 않으며 데이터를 현재 폴더의 볼륨/etcd에 매핑합니다.
        • milvus-minio 컨테이너는 기본 인증 자격 증명을 사용하여 포트 90909091을 로컬로 제공하고 해당 데이터를 현재 폴더의 볼륨/minio에 매핑합니다.
        • 밀버스-독립형 컨테이너는 기본 설정으로 포트 19530을 로컬로 서비스하고 데이터를 현재 폴더의 볼륨/milvus에 매핑합니다.

      다음 명령을 사용하여 컨테이너가 실행 중인지 확인할 수 있습니다:

      -
      $ sudo docker compose ps
      +
      $ sudo docker-compose ps
       
             Name                     Command                  State                            Ports
       --------------------------------------------------------------------------------------------------------------------
      @@ -91,7 +91,7 @@ milvus-standalone   /tini -- milvus run standalone   Up             0.0.0.0:1953
       

      다음과 같이 이 컨테이너를 중지하고 삭제할 수 있습니다.

      # Stop Milvus
      -$ sudo docker compose down
      +$ sudo docker-compose down
       
       # Delete service data
       $ sudo rm -rf volumes
      @@ -125,13 +125,13 @@ $ sudo rm
       
  • 헬름 차트를 사용하여 Milvus 업그레이드.

  • Milvus 클러스터 확장하기.

  • -
  • Milvu 클러스터를 클라우드에 배포하세요:

    +
  • Milvus 클러스터를 클라우드에 배포하세요:

  • -
  • Milvus 데이터 백업을 위한 오픈 소스 도구인 Milvus Backup을 살펴보세요.

  • +
  • Milvus 데이터 백업을 위한 오픈 소스 도구인 Milvus Backup을 살펴보세요.

  • Milvus 디버깅 및 동적 구성 업데이트를 위한 오픈 소스 도구인 Birdwatcher를 살펴보세요.

  • 직관적인 Milvus 관리를 위한 오픈 소스 GUI 도구인 Attu를 살펴보세요.

  • Prometheus로 Milvus 모니터링.

  • diff --git a/localization/v2.4.x/site/ko/getstarted/run-milvus-gpu/install_standalone-docker-compose-gpu.json b/localization/v2.4.x/site/ko/getstarted/run-milvus-gpu/install_standalone-docker-compose-gpu.json index 13bb59da4..3483d08ba 100644 --- a/localization/v2.4.x/site/ko/getstarted/run-milvus-gpu/install_standalone-docker-compose-gpu.json +++ b/localization/v2.4.x/site/ko/getstarted/run-milvus-gpu/install_standalone-docker-compose-gpu.json @@ -1 +1 @@ -{"codeList":["$ wget https://github.com/milvus-io/milvus/releases/download/v2.4.9/milvus-standalone-docker-compose-gpu.yml -O docker-compose.yml\n","...\nstandalone:\n ...\n deploy:\n resources:\n reservations:\n devices:\n - driver: nvidia\n capabilities: [\"gpu\"]\n device_ids: [\"0\"]\n...\n","...\nstandalone:\n ...\n deploy:\n resources:\n reservations:\n devices:\n - driver: nvidia\n capabilities: [\"gpu\"]\n device_ids: ['0', '1']\n...\n","$ sudo docker compose up -d\n\nCreating milvus-etcd ... done\nCreating milvus-minio ... done\nCreating milvus-standalone ... done\n","$ sudo docker compose ps\n\n Name Command State Ports\n--------------------------------------------------------------------------------------------------------------------\nmilvus-etcd etcd -advertise-client-url ... Up 2379/tcp, 2380/tcp\nmilvus-minio /usr/bin/docker-entrypoint ... Up (healthy) 9000/tcp\nmilvus-standalone /tini -- milvus run standalone Up 0.0.0.0:19530->19530/tcp, 0.0.0.0:9091->9091/tcp\n","$ CUDA_VISIBLE_DEVICES=0 ./milvus run standalone\n","$ CUDA_VISIBLE_DEVICES=0,1 ./milvus run standalone\n","# Stop Milvus\n$ sudo docker compose down\n\n# Delete service data\n$ sudo rm -rf volumes\n","docker cp :/milvus/configs/milvus.yaml milvus.yaml\n","vim milvus.yaml\n","...\ngpu:\n initMemSize: 0\n maxMemSize: 0\n...\n","docker cp milvus.yaml :/milvus/configs/milvus.yaml\n","docker stop \ndocker start \n"],"headingContent":"","anchorList":[{"label":"도커 컴포즈를 사용하여 GPU 지원으로 Milvus 실행하기","href":"Run-Milvus-with-GPU-Support-Using-Docker-Compose","type":1,"isActive":false},{"label":"전제 조건","href":"Prerequisites","type":2,"isActive":false},{"label":"Milvus 설치하기","href":"Install-Milvus","type":2,"isActive":false},{"label":"메모리 풀 구성","href":"Configure-memory-pool","type":2,"isActive":false},{"label":"다음 단계","href":"Whats-next","type":2,"isActive":false}]} \ No newline at end of file +{"codeList":["$ wget https://github.com/milvus-io/milvus/releases/download/v2.4.13-hotfix/milvus-standalone-docker-compose-gpu.yml -O docker-compose.yml\n","...\nstandalone:\n ...\n deploy:\n resources:\n reservations:\n devices:\n - driver: nvidia\n capabilities: [\"gpu\"]\n device_ids: [\"0\"]\n...\n","...\nstandalone:\n ...\n deploy:\n resources:\n reservations:\n devices:\n - driver: nvidia\n capabilities: [\"gpu\"]\n device_ids: ['0', '1']\n...\n","$ sudo docker compose up -d\n\nCreating milvus-etcd ... done\nCreating milvus-minio ... done\nCreating milvus-standalone ... done\n","$ sudo docker compose ps\n\n Name Command State Ports\n--------------------------------------------------------------------------------------------------------------------\nmilvus-etcd etcd -advertise-client-url ... Up 2379/tcp, 2380/tcp\nmilvus-minio /usr/bin/docker-entrypoint ... Up (healthy) 9000/tcp\nmilvus-standalone /tini -- milvus run standalone Up 0.0.0.0:19530->19530/tcp, 0.0.0.0:9091->9091/tcp\n","$ CUDA_VISIBLE_DEVICES=0 ./milvus run standalone\n","$ CUDA_VISIBLE_DEVICES=0,1 ./milvus run standalone\n","# Stop Milvus\n$ sudo docker compose down\n\n# Delete service data\n$ sudo rm -rf volumes\n","docker cp :/milvus/configs/milvus.yaml milvus.yaml\n","vim milvus.yaml\n","...\ngpu:\n initMemSize: 0\n maxMemSize: 0\n...\n","docker cp milvus.yaml :/milvus/configs/milvus.yaml\n","docker stop \ndocker start \n"],"headingContent":"Run Milvus with GPU Support Using Docker Compose","anchorList":[{"label":"도커 컴포즈를 사용하여 GPU 지원으로 Milvus 실행하기","href":"Run-Milvus-with-GPU-Support-Using-Docker-Compose","type":1,"isActive":false},{"label":"전제 조건","href":"Prerequisites","type":2,"isActive":false},{"label":"Milvus 설치하기","href":"Install-Milvus","type":2,"isActive":false},{"label":"메모리 풀 구성","href":"Configure-memory-pool","type":2,"isActive":false},{"label":"다음 단계","href":"Whats-next","type":2,"isActive":false}]} \ No newline at end of file diff --git a/localization/v2.4.x/site/ko/getstarted/run-milvus-gpu/install_standalone-docker-compose-gpu.md b/localization/v2.4.x/site/ko/getstarted/run-milvus-gpu/install_standalone-docker-compose-gpu.md index bb2fdefcf..2ff7a446b 100644 --- a/localization/v2.4.x/site/ko/getstarted/run-milvus-gpu/install_standalone-docker-compose-gpu.md +++ b/localization/v2.4.x/site/ko/getstarted/run-milvus-gpu/install_standalone-docker-compose-gpu.md @@ -59,8 +59,8 @@ title: 도커 컴포즈를 사용하여 GPU 지원으로 Milvus 실행하기 >

    Docker Compose를 사용하여 GPU를 지원하는 Milvus를 설치하려면 다음 단계를 따르세요.

    -

    1. YAML 파일 다운로드 및 구성

    다운로드 milvus-standalone-docker-compose-gpu.yml 를 클릭하고 수동으로 또는 다음 명령을 사용하여 docker-compose.yml로 저장합니다.

    -
    $ wget https://github.com/milvus-io/milvus/releases/download/v2.4.9/milvus-standalone-docker-compose-gpu.yml -O docker-compose.yml
    +

    1. YAML 파일 다운로드 및 구성

    다운로드 milvus-standalone-docker-compose-gpu.yml 를 클릭하고 수동으로 또는 다음 명령을 사용하여 docker-compose.yml로 저장합니다.

    +
    $ wget https://github.com/milvus-io/milvus/releases/download/v2.4.13-hotfix/milvus-standalone-docker-compose-gpu.yml -O docker-compose.yml
     

    YAML 파일에서 독립형 서비스의 환경 변수를 다음과 같이 몇 가지 변경해야 합니다:

      @@ -104,7 +104,7 @@ Creating milvus-standalone ... done

      Milvus를 시작한 후

        -
      • 밀버스 독립형, 밀버스 미니오, 밀버스-etcd라는 이름의 컨테이너가 가동됩니다.
          +
        • 밀버스 독립형, 밀버스 미니오, 밀버스-etcd라는 이름의 컨테이너가 가동됩니다.
          • milvus-etcd 컨테이너는 호스트에 포트를 노출하지 않으며 데이터를 현재 폴더의 볼륨/etcd에 매핑합니다.
          • milvus-minio 컨테이너는 기본 인증 자격 증명을 사용하여 포트 90909091을 로컬로 제공하고 해당 데이터를 현재 폴더의 볼륨/minio에 매핑합니다.
          • 밀버스-독립형 컨테이너는 기본 설정으로 포트 19530을 로컬로 서비스하고 데이터를 현재 폴더의 볼륨/milvus에 매핑합니다.
          • @@ -214,7 +214,7 @@ docker start <milvus_container_id>
          • 구글 클라우드
          • 마이크로소프트 애저
        • -
        • Milvus 데이터 백업을 위한 오픈 소스 도구인 Milvus Backup을 살펴보세요.

        • +
        • Milvus 데이터 백업을 위한 오픈 소스 도구인 Milvus Backup을 살펴보세요.

        • Milvus 디버깅 및 동적 구성 업데이트를 위한 오픈 소스 도구인 Birdwatcher를 살펴보세요.

        • 직관적인 Milvus 관리를 위한 오픈 소스 GUI 도구인 Attu를 살펴보세요.

        • Prometheus로 Milvus 모니터링.

        • diff --git a/localization/v2.4.x/site/ko/integrations/evaluation_with_deepeval.md b/localization/v2.4.x/site/ko/integrations/evaluation_with_deepeval.md index 5ce2328c2..84dfdd88e 100644 --- a/localization/v2.4.x/site/ko/integrations/evaluation_with_deepeval.md +++ b/localization/v2.4.x/site/ko/integrations/evaluation_with_deepeval.md @@ -18,7 +18,8 @@ title: DeepEval을 사용한 평가 d="M4 9h1v1H4c-1.5 0-3-1.69-3-3.5S2.55 3 4 3h4c1.45 0 3 1.69 3 3.5 0 1.41-.91 2.72-2 3.25V8.59c.58-.45 1-1.27 1-2.09C10 5.22 8.98 4 8 4H4c-.98 0-2 1.22-2 2.5S3 9 4 9zm9-3h-1v1h1c1 0 2 1.22 2 2.5S13.98 12 13 12H9c-.98 0-2-1.22-2-2.5 0-.83.42-1.64 1-2.09V6.25c-1.09.53-2 1.84-2 3.25C6 11.31 7.55 13 9 13h4c1.45 0 3-1.69 3-3.5S14.5 6 13 6z" > -

          Open In Colab

          +

          Open In Colab +GitHub Repository

          이 가이드에서는 DeepEval을 사용하여 Milvus를 기반으로 구축된 검색 증강 생성(RAG) 파이프라인을 평가하는 방법을 보여드립니다.

          RAG 시스템은 검색 시스템과 생성 모델을 결합하여 주어진 프롬프트에 따라 새로운 텍스트를 생성합니다. 시스템은 먼저 Milvus를 사용하여 말뭉치에서 관련 문서를 검색한 다음, 생성 모델을 사용하여 검색된 문서를 기반으로 새 텍스트를 생성합니다.

          DeepEval은 RAG 파이프라인을 평가하는 데 도움이 되는 프레임워크입니다. 이러한 파이프라인을 구축하는 데 도움이 되는 기존 도구와 프레임워크가 있지만, 이를 평가하고 파이프라인 성능을 정량화하는 것은 어려울 수 있습니다. 이것이 바로 DeepEval이 필요한 이유입니다.

          diff --git a/localization/v2.4.x/site/ko/integrations/evaluation_with_phoenix.md b/localization/v2.4.x/site/ko/integrations/evaluation_with_phoenix.md index 8bcec5331..7081f988d 100644 --- a/localization/v2.4.x/site/ko/integrations/evaluation_with_phoenix.md +++ b/localization/v2.4.x/site/ko/integrations/evaluation_with_phoenix.md @@ -20,7 +20,8 @@ title: 아리제 피닉스를 사용한 평가 d="M4 9h1v1H4c-1.5 0-3-1.69-3-3.5S2.55 3 4 3h4c1.45 0 3 1.69 3 3.5 0 1.41-.91 2.72-2 3.25V8.59c.58-.45 1-1.27 1-2.09C10 5.22 8.98 4 8 4H4c-.98 0-2 1.22-2 2.5S3 9 4 9zm9-3h-1v1h1c1 0 2 1.22 2 2.5S13.98 12 13 12H9c-.98 0-2-1.22-2-2.5 0-.83.42-1.64 1-2.09V6.25c-1.09.53-2 1.84-2 3.25C6 11.31 7.55 13 9 13h4c1.45 0 3-1.69 3-3.5S14.5 6 13 6z" > -

          Open In Colab

          +

          Open In Colab +GitHub Repository

          이 가이드에서는 Milvus를 기반으로 구축된 검색 증강 생성(RAG) 파이프라인을 평가하기 위해 Arize Pheonix를 사용하는 방법을 설명합니다.

          RAG 시스템은 검색 시스템과 생성 모델을 결합하여 주어진 프롬프트에 따라 새로운 텍스트를 생성합니다. 시스템은 먼저 Milvus를 사용하여 말뭉치에서 관련 문서를 검색한 다음, 생성 모델을 사용하여 검색된 문서를 기반으로 새 텍스트를 생성합니다.

          아리제 피오닉스는 RAG 파이프라인을 평가하는 데 도움이 되는 프레임워크입니다. 이러한 파이프라인을 구축하는 데 도움이 되는 기존 도구와 프레임워크가 있지만 이를 평가하고 파이프라인의 성능을 정량화하는 것은 어려울 수 있습니다. 이것이 바로 Arize Pheonix가 필요한 이유입니다.

          @@ -359,7 +360,7 @@ OpenAIInstrumentor().instrument()

    - Alt Text + Alt Text 대체 텍스트

    import nest_asyncio
     
    diff --git a/localization/v2.4.x/site/ko/integrations/integrate_with_bentoml.json b/localization/v2.4.x/site/ko/integrations/integrate_with_bentoml.json
    index 920ccc5df..906772694 100644
    --- a/localization/v2.4.x/site/ko/integrations/integrate_with_bentoml.json
    +++ b/localization/v2.4.x/site/ko/integrations/integrate_with_bentoml.json
    @@ -1 +1 @@
    -{"codeList":["$ pip install -U pymilvus bentoml\n","import bentoml\n\nBENTO_EMBEDDING_MODEL_END_POINT = \"BENTO_EMBEDDING_MODEL_END_POINT\"\nBENTO_API_TOKEN = \"BENTO_API_TOKEN\"\n\nembedding_client = bentoml.SyncHTTPClient(\n    BENTO_EMBEDDING_MODEL_END_POINT, token=BENTO_API_TOKEN\n)\n","# naively chunk on newlines\ndef chunk_text(filename: str) -> list:\n    with open(filename, \"r\") as f:\n        text = f.read()\n    sentences = text.split(\"\\n\")\n    return sentences\n","import os\nimport requests\nimport urllib.request\n\n# set up the data source\nrepo = \"ytang07/bento_octo_milvus_RAG\"\ndirectory = \"data\"\nsave_dir = \"./city_data\"\napi_url = f\"https://api.github.com/repos/{repo}/contents/{directory}\"\n\n\nresponse = requests.get(api_url)\ndata = response.json()\n\nif not os.path.exists(save_dir):\n    os.makedirs(save_dir)\n\nfor item in data:\n    if item[\"type\"] == \"file\":\n        file_url = item[\"download_url\"]\n        file_path = os.path.join(save_dir, item[\"name\"])\n        urllib.request.urlretrieve(file_url, file_path)\n","# please upload your data directory under this file's folder\ncities = os.listdir(\"city_data\")\n# store chunked text for each of the cities in a list of dicts\ncity_chunks = []\nfor city in cities:\n    chunked = chunk_text(f\"city_data/{city}\")\n    cleaned = []\n    for chunk in chunked:\n        if len(chunk) > 7:\n            cleaned.append(chunk)\n    mapped = {\"city_name\": city.split(\".\")[0], \"chunks\": cleaned}\n    city_chunks.append(mapped)\n","def get_embeddings(texts: list) -> list:\n    if len(texts) > 25:\n        splits = [texts[x : x + 25] for x in range(0, len(texts), 25)]\n        embeddings = []\n        for split in splits:\n            embedding_split = embedding_client.encode(sentences=split)\n            embeddings += embedding_split\n        return embeddings\n    return embedding_client.encode(\n        sentences=texts,\n    )\n","entries = []\nfor city_dict in city_chunks:\n    # No need for the embeddings list if get_embeddings already returns a list of lists\n    embedding_list = get_embeddings(city_dict[\"chunks\"])  # returns a list of lists\n    # Now match texts with embeddings and city name\n    for i, embedding in enumerate(embedding_list):\n        entry = {\n            \"embedding\": embedding,\n            \"sentence\": city_dict[\"chunks\"][\n                i\n            ],  # Assume \"chunks\" has the corresponding texts for the embeddings\n            \"city\": city_dict[\"city_name\"],\n        }\n        entries.append(entry)\n    print(entries)\n","from pymilvus import MilvusClient\n\nCOLLECTION_NAME = \"Bento_Milvus_RAG\"  # random name for your collection\nDIMENSION = 384\n\n# Initialize a Milvus Lite client\nmilvus_client = MilvusClient(\"milvus_demo.db\")\n","from pymilvus import connections\n\nconnections.connect(uri=\"milvus_demo.db\")\n","from pymilvus import MilvusClient, DataType, Collection\n\n# Create schema\nschema = MilvusClient.create_schema(\n    auto_id=True,\n    enable_dynamic_field=True,\n)\n\n# 3.2. Add fields to schema\nschema.add_field(field_name=\"id\", datatype=DataType.INT64, is_primary=True)\nschema.add_field(field_name=\"embedding\", datatype=DataType.FLOAT_VECTOR, dim=DIMENSION)\n","# prepare index parameters\nindex_params = milvus_client.prepare_index_params()\n\n# add index\nindex_params.add_index(\n    field_name=\"embedding\",\n    index_type=\"AUTOINDEX\",  # use autoindex instead of other complex indexing method\n    metric_type=\"COSINE\",  # L2, COSINE, or IP\n)\n\n# create collection\nif milvus_client.has_collection(collection_name=COLLECTION_NAME):\n    milvus_client.drop_collection(collection_name=COLLECTION_NAME)\nmilvus_client.create_collection(\n    collection_name=COLLECTION_NAME, schema=schema, index_params=index_params\n)\n\n# Outside the loop, now you upsert all the entries at once\nmilvus_client.insert(collection_name=COLLECTION_NAME, data=entries)\n","BENTO_LLM_END_POINT = \"BENTO_LLM_END_POINT\"\n\nllm_client = bentoml.SyncHTTPClient(BENTO_LLM_END_POINT, token=BENTO_API_TOKEN)\n","def dorag(question: str, context: str):\n\n    prompt = (\n        f\"You are a helpful assistant. The user has a question. Answer the user question based only on the context: {context}. \\n\"\n        f\"The user question is {question}\"\n    )\n\n    results = llm_client.generate(\n        max_tokens=1024,\n        prompt=prompt,\n    )\n\n    res = \"\"\n    for result in results:\n        res += result\n\n    return res\n","question = \"What state is Cambridge in?\"\n\n\ndef ask_a_question(question):\n    embeddings = get_embeddings([question])\n    res = milvus_client.search(\n        collection_name=COLLECTION_NAME,\n        data=embeddings,  # search for the one (1) embedding returned as a list of lists\n        anns_field=\"embedding\",  # Search across embeddings\n        limit=5,  # get me the top 5 results\n        output_fields=[\"sentence\"],  # get the sentence/chunk and city\n    )\n\n    sentences = []\n    for hits in res:\n        for hit in hits:\n            print(hit)\n            sentences.append(hit[\"entity\"][\"sentence\"])\n    context = \". \".join(sentences)\n    return context\n\n\ncontext = ask_a_question(question=question)\nprint(context)\n","print(dorag(question=question, context=context))\n"],"headingContent":"","anchorList":[{"label":"Milvus와 BentoML을 사용한 검색 증강 세대(RAG)","href":"Retrieval-Augmented-Generation-RAG-with-Milvus-and-BentoML","type":1,"isActive":false},{"label":"소개","href":"Introduction","type":2,"isActive":false},{"label":"시작하기 전에","href":"Before-you-begin","type":2,"isActive":false},{"label":"BentoML/BentoCloud로 임베딩 서비스하기","href":"Serving-Embeddings-with-BentoMLBentoCloud","type":2,"isActive":false},{"label":"검색을 위해 벡터 데이터베이스에 데이터 삽입하기","href":"Inserting-Data-into-a-Vector-Database-for-Retrieval","type":2,"isActive":false},{"label":"Milvus Lite 컬렉션 생성하기","href":"Creating-Your-Milvus-Lite-Collection","type":2,"isActive":false},{"label":"RAG용 LLM 설정","href":"Set-up-Your-LLM-for-RAG","type":2,"isActive":false},{"label":"LLM 지침","href":"LLM-Instructions","type":2,"isActive":false},{"label":"RAG 예제","href":"A-RAG-Example","type":2,"isActive":false}]}
    \ No newline at end of file
    +{"codeList":["$ pip install -U pymilvus bentoml\n","import bentoml\n\nBENTO_EMBEDDING_MODEL_END_POINT = \"BENTO_EMBEDDING_MODEL_END_POINT\"\nBENTO_API_TOKEN = \"BENTO_API_TOKEN\"\n\nembedding_client = bentoml.SyncHTTPClient(\n    BENTO_EMBEDDING_MODEL_END_POINT, token=BENTO_API_TOKEN\n)\n","# naively chunk on newlines\ndef chunk_text(filename: str) -> list:\n    with open(filename, \"r\") as f:\n        text = f.read()\n    sentences = text.split(\"\\n\")\n    return sentences\n","import os\nimport requests\nimport urllib.request\n\n# set up the data source\nrepo = \"ytang07/bento_octo_milvus_RAG\"\ndirectory = \"data\"\nsave_dir = \"./city_data\"\napi_url = f\"https://api.github.com/repos/{repo}/contents/{directory}\"\n\n\nresponse = requests.get(api_url)\ndata = response.json()\n\nif not os.path.exists(save_dir):\n    os.makedirs(save_dir)\n\nfor item in data:\n    if item[\"type\"] == \"file\":\n        file_url = item[\"download_url\"]\n        file_path = os.path.join(save_dir, item[\"name\"])\n        urllib.request.urlretrieve(file_url, file_path)\n","# please upload your data directory under this file's folder\ncities = os.listdir(\"city_data\")\n# store chunked text for each of the cities in a list of dicts\ncity_chunks = []\nfor city in cities:\n    chunked = chunk_text(f\"city_data/{city}\")\n    cleaned = []\n    for chunk in chunked:\n        if len(chunk) > 7:\n            cleaned.append(chunk)\n    mapped = {\"city_name\": city.split(\".\")[0], \"chunks\": cleaned}\n    city_chunks.append(mapped)\n","def get_embeddings(texts: list) -> list:\n    if len(texts) > 25:\n        splits = [texts[x : x + 25] for x in range(0, len(texts), 25)]\n        embeddings = []\n        for split in splits:\n            embedding_split = embedding_client.encode(sentences=split)\n            embeddings += embedding_split\n        return embeddings\n    return embedding_client.encode(\n        sentences=texts,\n    )\n","entries = []\nfor city_dict in city_chunks:\n    # No need for the embeddings list if get_embeddings already returns a list of lists\n    embedding_list = get_embeddings(city_dict[\"chunks\"])  # returns a list of lists\n    # Now match texts with embeddings and city name\n    for i, embedding in enumerate(embedding_list):\n        entry = {\n            \"embedding\": embedding,\n            \"sentence\": city_dict[\"chunks\"][\n                i\n            ],  # Assume \"chunks\" has the corresponding texts for the embeddings\n            \"city\": city_dict[\"city_name\"],\n        }\n        entries.append(entry)\n    print(entries)\n","from pymilvus import MilvusClient\n\nCOLLECTION_NAME = \"Bento_Milvus_RAG\"  # random name for your collection\nDIMENSION = 384\n\n# Initialize a Milvus Lite client\nmilvus_client = MilvusClient(\"milvus_demo.db\")\n","from pymilvus import connections\n\nconnections.connect(uri=\"milvus_demo.db\")\n","from pymilvus import MilvusClient, DataType, Collection\n\n# Create schema\nschema = MilvusClient.create_schema(\n    auto_id=True,\n    enable_dynamic_field=True,\n)\n\n# 3.2. Add fields to schema\nschema.add_field(field_name=\"id\", datatype=DataType.INT64, is_primary=True)\nschema.add_field(field_name=\"embedding\", datatype=DataType.FLOAT_VECTOR, dim=DIMENSION)\n","# prepare index parameters\nindex_params = milvus_client.prepare_index_params()\n\n# add index\nindex_params.add_index(\n    field_name=\"embedding\",\n    index_type=\"AUTOINDEX\",  # use autoindex instead of other complex indexing method\n    metric_type=\"COSINE\",  # L2, COSINE, or IP\n)\n\n# create collection\nif milvus_client.has_collection(collection_name=COLLECTION_NAME):\n    milvus_client.drop_collection(collection_name=COLLECTION_NAME)\nmilvus_client.create_collection(\n    collection_name=COLLECTION_NAME, schema=schema, index_params=index_params\n)\n\n# Outside the loop, now you upsert all the entries at once\nmilvus_client.insert(collection_name=COLLECTION_NAME, data=entries)\n","BENTO_LLM_END_POINT = \"BENTO_LLM_END_POINT\"\n\nllm_client = bentoml.SyncHTTPClient(BENTO_LLM_END_POINT, token=BENTO_API_TOKEN)\n","def dorag(question: str, context: str):\n\n    prompt = (\n        f\"You are a helpful assistant. The user has a question. Answer the user question based only on the context: {context}. \\n\"\n        f\"The user question is {question}\"\n    )\n\n    results = llm_client.generate(\n        max_tokens=1024,\n        prompt=prompt,\n    )\n\n    res = \"\"\n    for result in results:\n        res += result\n\n    return res\n","question = \"What state is Cambridge in?\"\n\n\ndef ask_a_question(question):\n    embeddings = get_embeddings([question])\n    res = milvus_client.search(\n        collection_name=COLLECTION_NAME,\n        data=embeddings,  # search for the one (1) embedding returned as a list of lists\n        anns_field=\"embedding\",  # Search across embeddings\n        limit=5,  # get me the top 5 results\n        output_fields=[\"sentence\"],  # get the sentence/chunk and city\n    )\n\n    sentences = []\n    for hits in res:\n        for hit in hits:\n            print(hit)\n            sentences.append(hit[\"entity\"][\"sentence\"])\n    context = \". \".join(sentences)\n    return context\n\n\ncontext = ask_a_question(question=question)\nprint(context)\n","print(dorag(question=question, context=context))\n"],"headingContent":"Retrieval-Augmented Generation (RAG) with Milvus and BentoML","anchorList":[{"label":"Milvus와 BentoML을 사용한 검색 증강 세대(RAG)","href":"Retrieval-Augmented-Generation-RAG-with-Milvus-and-BentoML","type":1,"isActive":false},{"label":"소개","href":"Introduction","type":2,"isActive":false},{"label":"시작하기 전에","href":"Before-you-begin","type":2,"isActive":false},{"label":"BentoML/BentoCloud로 임베딩 서비스하기","href":"Serving-Embeddings-with-BentoMLBentoCloud","type":2,"isActive":false},{"label":"검색을 위해 벡터 데이터베이스에 데이터 삽입하기","href":"Inserting-Data-into-a-Vector-Database-for-Retrieval","type":2,"isActive":false},{"label":"Milvus Lite 컬렉션 생성하기","href":"Creating-Your-Milvus-Lite-Collection","type":2,"isActive":false},{"label":"RAG용 LLM 설정","href":"Set-up-Your-LLM-for-RAG","type":2,"isActive":false},{"label":"LLM 지침","href":"LLM-Instructions","type":2,"isActive":false},{"label":"RAG 예제","href":"A-RAG-Example","type":2,"isActive":false}]}
    \ No newline at end of file
    diff --git a/localization/v2.4.x/site/ko/integrations/integrate_with_bentoml.md b/localization/v2.4.x/site/ko/integrations/integrate_with_bentoml.md
    index 0c6aff23f..95c728068 100644
    --- a/localization/v2.4.x/site/ko/integrations/integrate_with_bentoml.md
    +++ b/localization/v2.4.x/site/ko/integrations/integrate_with_bentoml.md
    @@ -3,7 +3,7 @@ id: integrate_with_bentoml.md
     summary: >-
       이 가이드에서는 Milvus 벡터 데이터베이스와 함께 BentoCloud에서 오픈 소스 임베딩 모델과 대규모 언어 모델을 사용하여 검색 증강
       생성(RAG) 애플리케이션을 구축하는 방법을 설명합니다.
    -title: '검색 증강 세대(RAG), Milvus 및 BentoML 사용'
    +title: Milvus와 BentoML을 사용한 검색 증강 세대(RAG)
     ---
     

    Milvus와 BentoML을 사용한 검색 증강 세대(RAG)

    Open In Colab

    +

    Open In Colab +GitHub Repository

    소개

    Open In Colab

    +

    Open In Colab +GitHub Repository

    이 가이드에서는 CAMEL과 Milvus를 사용하여 검색 증강 생성(RAG) 시스템을 구축하는 방법을 설명합니다.

    RAG 시스템은 검색 시스템과 생성 모델을 결합하여 주어진 프롬프트에 따라 새 텍스트를 생성합니다. 이 시스템은 먼저 Milvus를 사용하여 말뭉치에서 관련 문서를 검색한 다음 생성 모델을 사용하여 검색된 문서를 기반으로 새 텍스트를 생성합니다.

    CAMEL은 다중 에이전트 프레임워크입니다. Milvus는 세계에서 가장 진보된 오픈 소스 벡터 데이터베이스로, 임베딩 유사도 검색 및 AI 애플리케이션을 강화하기 위해 구축되었습니다.

    diff --git a/localization/v2.4.x/site/ko/integrations/integrate_with_dspy.json b/localization/v2.4.x/site/ko/integrations/integrate_with_dspy.json index b420fd00f..97cb97768 100644 --- a/localization/v2.4.x/site/ko/integrations/integrate_with_dspy.json +++ b/localization/v2.4.x/site/ko/integrations/integrate_with_dspy.json @@ -1 +1 @@ -{"codeList":["$ pip install \"dspy-ai[milvus]\"\n$ pip install -U pymilvus\n","from dspy.datasets import HotPotQA\n\n# Load the dataset.\ndataset = HotPotQA(\n train_seed=1, train_size=20, eval_seed=2023, dev_size=50, test_size=0\n)\n\n# Tell DSPy that the 'question' field is the input. Any other fields are labels and/or metadata.\ntrainset = [x.with_inputs(\"question\") for x in dataset.train]\ndevset = [x.with_inputs(\"question\") for x in dataset.dev]\n","import requests\nimport os\n\nos.environ[\"OPENAI_API_KEY\"] = \"\"\nMILVUS_URI = \"example.db\"\nMILVUS_TOKEN = \"\"\n\nfrom pymilvus import MilvusClient, DataType, Collection\nfrom dspy.retrieve.milvus_rm import openai_embedding_function\n\nclient = MilvusClient(uri=MILVUS_URI, token=MILVUS_TOKEN)\n\nif \"dspy_example\" not in client.list_collections():\n client.create_collection(\n collection_name=\"dspy_example\",\n overwrite=True,\n dimension=1536,\n primary_field_name=\"id\",\n vector_field_name=\"embedding\",\n id_type=\"int\",\n metric_type=\"IP\",\n max_length=65535,\n enable_dynamic=True,\n )\ntext = requests.get(\n \"https://raw.githubusercontent.com/wxywb/dspy_dataset_sample/master/sample_data.txt\"\n).text\n\nfor idx, passage in enumerate(text.split(\"\\n\")):\n if len(passage) == 0:\n continue\n client.insert(\n collection_name=\"dspy_example\",\n data=[\n {\n \"id\": idx,\n \"embedding\": openai_embedding_function(passage)[0],\n \"text\": passage,\n }\n ],\n )\n","from dspy.retrieve.milvus_rm import MilvusRM\nimport dspy\n\nretriever_model = MilvusRM(\n collection_name=\"dspy_example\",\n uri=MILVUS_URI,\n token=MILVUS_TOKEN, # ignore this if no token is required for Milvus connection\n embedding_function=openai_embedding_function,\n)\nturbo = dspy.OpenAI(model=\"gpt-3.5-turbo\")\ndspy.settings.configure(lm=turbo)\n","class GenerateAnswer(dspy.Signature):\n \"\"\"Answer questions with short factoid answers.\"\"\"\n\n context = dspy.InputField(desc=\"may contain relevant facts\")\n question = dspy.InputField()\n answer = dspy.OutputField(desc=\"often between 1 and 5 words\")\n","class RAG(dspy.Module):\n def __init__(self, rm):\n super().__init__()\n self.retrieve = rm\n\n # This signature indicates the task imposed on the COT module.\n self.generate_answer = dspy.ChainOfThought(GenerateAnswer)\n\n def forward(self, question):\n # Use milvus_rm to retrieve context for the question.\n context = self.retrieve(question).passages\n # COT module takes \"context, query\" and output \"answer\".\n prediction = self.generate_answer(context=context, question=question)\n return dspy.Prediction(\n context=[item.long_text for item in context], answer=prediction.answer\n )\n","rag = RAG(retriever_model)\nprint(rag(\"who write At My Window\").answer)\n","from dspy.evaluate.evaluate import Evaluate\nfrom dspy.datasets import HotPotQA\n\nevaluate_on_hotpotqa = Evaluate(\n devset=devset, num_threads=1, display_progress=False, display_table=5\n)\n\nmetric = dspy.evaluate.answer_exact_match\nscore = evaluate_on_hotpotqa(rag, metric=metric)\nprint(\"rag:\", score)\n","from dspy.teleprompt import BootstrapFewShot\n\n# Validation logic: check that the predicted answer is correct.# Also check that the retrieved context does contain that answer.\n\n\ndef validate_context_and_answer(example, pred, trace=None):\n answer_EM = dspy.evaluate.answer_exact_match(example, pred)\n answer_PM = dspy.evaluate.answer_passage_match(example, pred)\n return answer_EM and answer_PM\n\n\n# Set up a basic teleprompter, which will compile our RAG program.\nteleprompter = BootstrapFewShot(metric=validate_context_and_answer)\n\n# Compile!\ncompiled_rag = teleprompter.compile(rag, trainset=trainset)\n\n# Now compiled_rag is optimized and ready to answer your new question!\n# Now, let’s evaluate the compiled RAG program.\nscore = evaluate_on_hotpotqa(compiled_rag, metric=metric)\nprint(score)\nprint(\"compile_rag:\", score)\n"],"headingContent":"","anchorList":[{"label":"Milvus와 DSPy 통합","href":"Integrate-Milvus-with-DSPy","type":1,"isActive":false},{"label":"DSPy란?","href":"What-is-DSPy","type":2,"isActive":false},{"label":"DSPy 사용의 이점","href":"Benefits-of-using-DSPy","type":2,"isActive":false},{"label":"모듈","href":"Modules","type":2,"isActive":false},{"label":"왜 밀버스가 필요한가?","href":"Why-Milvus-in-DSPy","type":2,"isActive":false},{"label":"예제","href":"Examples","type":2,"isActive":false},{"label":"요약","href":"Summary","type":2,"isActive":false}]} \ No newline at end of file +{"codeList":["$ pip install \"dspy-ai[milvus]\"\n$ pip install -U pymilvus\n","from dspy.datasets import HotPotQA\n\n# Load the dataset.\ndataset = HotPotQA(\n train_seed=1, train_size=20, eval_seed=2023, dev_size=50, test_size=0\n)\n\n# Tell DSPy that the 'question' field is the input. Any other fields are labels and/or metadata.\ntrainset = [x.with_inputs(\"question\") for x in dataset.train]\ndevset = [x.with_inputs(\"question\") for x in dataset.dev]\n","import requests\nimport os\n\nos.environ[\"OPENAI_API_KEY\"] = \"\"\nMILVUS_URI = \"example.db\"\nMILVUS_TOKEN = \"\"\n\nfrom pymilvus import MilvusClient, DataType, Collection\nfrom dspy.retrieve.milvus_rm import openai_embedding_function\n\nclient = MilvusClient(uri=MILVUS_URI, token=MILVUS_TOKEN)\n\nif \"dspy_example\" not in client.list_collections():\n client.create_collection(\n collection_name=\"dspy_example\",\n overwrite=True,\n dimension=1536,\n primary_field_name=\"id\",\n vector_field_name=\"embedding\",\n id_type=\"int\",\n metric_type=\"IP\",\n max_length=65535,\n enable_dynamic=True,\n )\ntext = requests.get(\n \"https://raw.githubusercontent.com/wxywb/dspy_dataset_sample/master/sample_data.txt\"\n).text\n\nfor idx, passage in enumerate(text.split(\"\\n\")):\n if len(passage) == 0:\n continue\n client.insert(\n collection_name=\"dspy_example\",\n data=[\n {\n \"id\": idx,\n \"embedding\": openai_embedding_function(passage)[0],\n \"text\": passage,\n }\n ],\n )\n","from dspy.retrieve.milvus_rm import MilvusRM\nimport dspy\n\nretriever_model = MilvusRM(\n collection_name=\"dspy_example\",\n uri=MILVUS_URI,\n token=MILVUS_TOKEN, # ignore this if no token is required for Milvus connection\n embedding_function=openai_embedding_function,\n)\nturbo = dspy.OpenAI(model=\"gpt-3.5-turbo\")\ndspy.settings.configure(lm=turbo)\n","class GenerateAnswer(dspy.Signature):\n \"\"\"Answer questions with short factoid answers.\"\"\"\n\n context = dspy.InputField(desc=\"may contain relevant facts\")\n question = dspy.InputField()\n answer = dspy.OutputField(desc=\"often between 1 and 5 words\")\n","class RAG(dspy.Module):\n def __init__(self, rm):\n super().__init__()\n self.retrieve = rm\n\n # This signature indicates the task imposed on the COT module.\n self.generate_answer = dspy.ChainOfThought(GenerateAnswer)\n\n def forward(self, question):\n # Use milvus_rm to retrieve context for the question.\n context = self.retrieve(question).passages\n # COT module takes \"context, query\" and output \"answer\".\n prediction = self.generate_answer(context=context, question=question)\n return dspy.Prediction(\n context=[item.long_text for item in context], answer=prediction.answer\n )\n","rag = RAG(retriever_model)\nprint(rag(\"who write At My Window\").answer)\n","from dspy.evaluate.evaluate import Evaluate\nfrom dspy.datasets import HotPotQA\n\nevaluate_on_hotpotqa = Evaluate(\n devset=devset, num_threads=1, display_progress=False, display_table=5\n)\n\nmetric = dspy.evaluate.answer_exact_match\nscore = evaluate_on_hotpotqa(rag, metric=metric)\nprint(\"rag:\", score)\n","from dspy.teleprompt import BootstrapFewShot\n\n# Validation logic: check that the predicted answer is correct.# Also check that the retrieved context does contain that answer.\n\n\ndef validate_context_and_answer(example, pred, trace=None):\n answer_EM = dspy.evaluate.answer_exact_match(example, pred)\n answer_PM = dspy.evaluate.answer_passage_match(example, pred)\n return answer_EM and answer_PM\n\n\n# Set up a basic teleprompter, which will compile our RAG program.\nteleprompter = BootstrapFewShot(metric=validate_context_and_answer)\n\n# Compile!\ncompiled_rag = teleprompter.compile(rag, trainset=trainset)\n\n# Now compiled_rag is optimized and ready to answer your new question!\n# Now, let’s evaluate the compiled RAG program.\nscore = evaluate_on_hotpotqa(compiled_rag, metric=metric)\nprint(score)\nprint(\"compile_rag:\", score)\n"],"headingContent":"Integrate Milvus with DSPy","anchorList":[{"label":"Milvus와 DSPy 통합","href":"Integrate-Milvus-with-DSPy","type":1,"isActive":false},{"label":"DSPy란?","href":"What-is-DSPy","type":2,"isActive":false},{"label":"DSPy 사용의 이점","href":"Benefits-of-using-DSPy","type":2,"isActive":false},{"label":"모듈","href":"Modules","type":2,"isActive":false},{"label":"왜 밀버스가 필요한가?","href":"Why-Milvus-in-DSPy","type":2,"isActive":false},{"label":"예제","href":"Examples","type":2,"isActive":false},{"label":"요약","href":"Summary","type":2,"isActive":false}]} \ No newline at end of file diff --git a/localization/v2.4.x/site/ko/integrations/integrate_with_dspy.md b/localization/v2.4.x/site/ko/integrations/integrate_with_dspy.md index 6037fcd34..573e48517 100644 --- a/localization/v2.4.x/site/ko/integrations/integrate_with_dspy.md +++ b/localization/v2.4.x/site/ko/integrations/integrate_with_dspy.md @@ -18,7 +18,8 @@ title: Milvus와 DSPy 통합 d="M4 9h1v1H4c-1.5 0-3-1.69-3-3.5S2.55 3 4 3h4c1.45 0 3 1.69 3 3.5 0 1.41-.91 2.72-2 3.25V8.59c.58-.45 1-1.27 1-2.09C10 5.22 8.98 4 8 4H4c-.98 0-2 1.22-2 2.5S3 9 4 9zm9-3h-1v1h1c1 0 2 1.22 2 2.5S13.98 12 13 12H9c-.98 0-2-1.22-2-2.5 0-.83.42-1.64 1-2.09V6.25c-1.09.53-2 1.84-2 3.25C6 11.31 7.55 13 9 13h4c1.45 0 3-1.69 3-3.5S14.5 6 13 6z" > -

    Open In Colab

    +

    Open In Colab +GitHub Repository

    DSPy란?

    Open In Colab

    +

    Open In Colab +GitHub Repository

    이 가이드에서는 Haystack과 Milvus를 사용하여 검색 증강 생성(RAG) 시스템을 구축하는 방법을 설명합니다.

    RAG 시스템은 검색 시스템과 생성 모델을 결합하여 주어진 프롬프트에 따라 새 텍스트를 생성합니다. 이 시스템은 먼저 Milvus를 사용하여 말뭉치에서 관련 문서를 검색한 다음 생성 모델을 사용하여 검색된 문서를 기반으로 새 텍스트를 생성합니다.

    헤이스택은 대규모 언어 모델(LLM)로 사용자 지정 앱을 구축하기 위한 딥셋의 오픈 소스 Python 프레임워크입니다. Milvus는 세계에서 가장 진보된 오픈 소스 벡터 데이터베이스로, 임베딩 유사도 검색 및 AI 애플리케이션을 강화하기 위해 구축되었습니다.

    diff --git a/localization/v2.4.x/site/ko/integrations/integrate_with_hugging-face.json b/localization/v2.4.x/site/ko/integrations/integrate_with_hugging-face.json index e8161594b..2f777d46e 100644 --- a/localization/v2.4.x/site/ko/integrations/integrate_with_hugging-face.json +++ b/localization/v2.4.x/site/ko/integrations/integrate_with_hugging-face.json @@ -1 +1 @@ -{"codeList":["$ pip install --upgrade pymilvus transformers datasets torch\n","from datasets import load_dataset\n\n\nDATASET = \"squad\" # Name of dataset from HuggingFace Datasets\nINSERT_RATIO = 0.001 # Ratio of example dataset to be inserted\n\ndata = load_dataset(DATASET, split=\"validation\")\n# Generates a fixed subset. To generate a random subset, remove the seed.\ndata = data.train_test_split(test_size=INSERT_RATIO, seed=42)[\"test\"]\n# Clean up the data structure in the dataset.\ndata = data.map(\n lambda val: {\"answer\": val[\"answers\"][\"text\"][0]},\n remove_columns=[\"id\", \"answers\", \"context\"],\n)\n\n# View summary of example data\nprint(data)\n","from transformers import AutoTokenizer, AutoModel\nimport torch\n\nMODEL = (\n \"sentence-transformers/all-MiniLM-L6-v2\" # Name of model from HuggingFace Models\n)\nINFERENCE_BATCH_SIZE = 64 # Batch size of model inference\n\n# Load tokenizer & model from HuggingFace Hub\ntokenizer = AutoTokenizer.from_pretrained(MODEL)\nmodel = AutoModel.from_pretrained(MODEL)\n\n\ndef encode_text(batch):\n # Tokenize sentences\n encoded_input = tokenizer(\n batch[\"question\"], padding=True, truncation=True, return_tensors=\"pt\"\n )\n\n # Compute token embeddings\n with torch.no_grad():\n model_output = model(**encoded_input)\n\n # Perform pooling\n token_embeddings = model_output[0]\n attention_mask = encoded_input[\"attention_mask\"]\n input_mask_expanded = (\n attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float()\n )\n sentence_embeddings = torch.sum(\n token_embeddings * input_mask_expanded, 1\n ) / torch.clamp(input_mask_expanded.sum(1), min=1e-9)\n\n # Normalize embeddings\n batch[\"question_embedding\"] = torch.nn.functional.normalize(\n sentence_embeddings, p=2, dim=1\n )\n return batch\n\n\ndata = data.map(encode_text, batched=True, batch_size=INFERENCE_BATCH_SIZE)\ndata_list = data.to_list()\n","from pymilvus import MilvusClient\n\n\nMILVUS_URI = \"./huggingface_milvus_test.db\" # Connection URI\nCOLLECTION_NAME = \"huggingface_test\" # Collection name\nDIMENSION = 384 # Embedding dimension depending on model\n\nmilvus_client = MilvusClient(MILVUS_URI)\nif milvus_client.has_collection(collection_name=COLLECTION_NAME):\n milvus_client.drop_collection(collection_name=COLLECTION_NAME)\nmilvus_client.create_collection(\n collection_name=COLLECTION_NAME,\n dimension=DIMENSION,\n auto_id=True, # Enable auto id\n enable_dynamic_field=True, # Enable dynamic fields\n vector_field_name=\"question_embedding\", # Map vector field name and embedding column in dataset\n consistency_level=\"Strong\", # To enable search with latest data\n)\n","milvus_client.insert(collection_name=COLLECTION_NAME, data=data_list)\n","questions = {\n \"question\": [\n \"What is LGM?\",\n \"When did Massachusetts first mandate that children be educated in schools?\",\n ]\n}\n\n# Generate question embeddings\nquestion_embeddings = [v.tolist() for v in encode_text(questions)[\"question_embedding\"]]\n\n# Search across Milvus\nsearch_results = milvus_client.search(\n collection_name=COLLECTION_NAME,\n data=question_embeddings,\n limit=3, # How many search results to output\n output_fields=[\"answer\", \"question\"], # Include these fields in search results\n)\n\n# Print out results\nfor q, res in zip(questions[\"question\"], search_results):\n print(\"Question:\", q)\n for r in res:\n print(\n {\n \"answer\": r[\"entity\"][\"answer\"],\n \"score\": r[\"distance\"],\n \"original question\": r[\"entity\"][\"question\"],\n }\n )\n print(\"\\n\")\n"],"headingContent":"","anchorList":[{"label":"밀버스와 허깅 페이스를 사용한 질문 답변하기","href":"Question-Answering-Using-Milvus-and-Hugging-Face","type":1,"isActive":false},{"label":"시작하기 전에","href":"Before-you-begin","type":2,"isActive":false},{"label":"데이터 준비","href":"Prepare-data","type":2,"isActive":false},{"label":"데이터 삽입하기","href":"Insert-data","type":2,"isActive":false},{"label":"질문하기","href":"Ask-questions","type":2,"isActive":false}]} \ No newline at end of file +{"codeList":["$ pip install --upgrade pymilvus transformers datasets torch\n","from datasets import load_dataset\n\n\nDATASET = \"squad\" # Name of dataset from HuggingFace Datasets\nINSERT_RATIO = 0.001 # Ratio of example dataset to be inserted\n\ndata = load_dataset(DATASET, split=\"validation\")\n# Generates a fixed subset. To generate a random subset, remove the seed.\ndata = data.train_test_split(test_size=INSERT_RATIO, seed=42)[\"test\"]\n# Clean up the data structure in the dataset.\ndata = data.map(\n lambda val: {\"answer\": val[\"answers\"][\"text\"][0]},\n remove_columns=[\"id\", \"answers\", \"context\"],\n)\n\n# View summary of example data\nprint(data)\n","from transformers import AutoTokenizer, AutoModel\nimport torch\n\nMODEL = (\n \"sentence-transformers/all-MiniLM-L6-v2\" # Name of model from HuggingFace Models\n)\nINFERENCE_BATCH_SIZE = 64 # Batch size of model inference\n\n# Load tokenizer & model from HuggingFace Hub\ntokenizer = AutoTokenizer.from_pretrained(MODEL)\nmodel = AutoModel.from_pretrained(MODEL)\n\n\ndef encode_text(batch):\n # Tokenize sentences\n encoded_input = tokenizer(\n batch[\"question\"], padding=True, truncation=True, return_tensors=\"pt\"\n )\n\n # Compute token embeddings\n with torch.no_grad():\n model_output = model(**encoded_input)\n\n # Perform pooling\n token_embeddings = model_output[0]\n attention_mask = encoded_input[\"attention_mask\"]\n input_mask_expanded = (\n attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float()\n )\n sentence_embeddings = torch.sum(\n token_embeddings * input_mask_expanded, 1\n ) / torch.clamp(input_mask_expanded.sum(1), min=1e-9)\n\n # Normalize embeddings\n batch[\"question_embedding\"] = torch.nn.functional.normalize(\n sentence_embeddings, p=2, dim=1\n )\n return batch\n\n\ndata = data.map(encode_text, batched=True, batch_size=INFERENCE_BATCH_SIZE)\ndata_list = data.to_list()\n","from pymilvus import MilvusClient\n\n\nMILVUS_URI = \"./huggingface_milvus_test.db\" # Connection URI\nCOLLECTION_NAME = \"huggingface_test\" # Collection name\nDIMENSION = 384 # Embedding dimension depending on model\n\nmilvus_client = MilvusClient(MILVUS_URI)\nif milvus_client.has_collection(collection_name=COLLECTION_NAME):\n milvus_client.drop_collection(collection_name=COLLECTION_NAME)\nmilvus_client.create_collection(\n collection_name=COLLECTION_NAME,\n dimension=DIMENSION,\n auto_id=True, # Enable auto id\n enable_dynamic_field=True, # Enable dynamic fields\n vector_field_name=\"question_embedding\", # Map vector field name and embedding column in dataset\n consistency_level=\"Strong\", # To enable search with latest data\n)\n","milvus_client.insert(collection_name=COLLECTION_NAME, data=data_list)\n","questions = {\n \"question\": [\n \"What is LGM?\",\n \"When did Massachusetts first mandate that children be educated in schools?\",\n ]\n}\n\n# Generate question embeddings\nquestion_embeddings = [v.tolist() for v in encode_text(questions)[\"question_embedding\"]]\n\n# Search across Milvus\nsearch_results = milvus_client.search(\n collection_name=COLLECTION_NAME,\n data=question_embeddings,\n limit=3, # How many search results to output\n output_fields=[\"answer\", \"question\"], # Include these fields in search results\n)\n\n# Print out results\nfor q, res in zip(questions[\"question\"], search_results):\n print(\"Question:\", q)\n for r in res:\n print(\n {\n \"answer\": r[\"entity\"][\"answer\"],\n \"score\": r[\"distance\"],\n \"original question\": r[\"entity\"][\"question\"],\n }\n )\n print(\"\\n\")\n"],"headingContent":"Question Answering Using Milvus and Hugging Face","anchorList":[{"label":"밀버스와 허깅 페이스를 사용한 질문 답변하기","href":"Question-Answering-Using-Milvus-and-Hugging-Face","type":1,"isActive":false},{"label":"시작하기 전에","href":"Before-you-begin","type":2,"isActive":false},{"label":"데이터 준비","href":"Prepare-data","type":2,"isActive":false},{"label":"데이터 삽입하기","href":"Insert-data","type":2,"isActive":false},{"label":"질문하기","href":"Ask-questions","type":2,"isActive":false}]} \ No newline at end of file diff --git a/localization/v2.4.x/site/ko/integrations/integrate_with_hugging-face.md b/localization/v2.4.x/site/ko/integrations/integrate_with_hugging-face.md index b1c26a754..b0b1b4a3d 100644 --- a/localization/v2.4.x/site/ko/integrations/integrate_with_hugging-face.md +++ b/localization/v2.4.x/site/ko/integrations/integrate_with_hugging-face.md @@ -3,7 +3,7 @@ id: integrate_with_hugging-face.md summary: >- 이 튜토리얼에서는 데이터 처리를 위한 데이터 로더 및 임베딩 생성기로 Hugging Face를, 시맨틱 검색을 위한 벡터 데이터베이스로 Milvus를 사용하여 질문 답변 시스템을 구축하는 방법을 보여드립니다. -title: 밀버스와 포옹하는 얼굴을 사용한 질문 답변 +title: 밀버스와 허깅 페이스를 사용한 질문 답변하기 ---

    밀버스와 허깅 페이스를 사용한 질문 답변하기

    Open In Colab

    +

    Open In Colab +GitHub Repository

    시맨틱 검색을 기반으로 하는 질문 답변 시스템은 주어진 쿼리 질문에 대한 질문-답변 쌍의 데이터 세트에서 가장 유사한 질문을 찾는 방식으로 작동합니다. 가장 유사한 질문이 식별되면 데이터 세트의 해당 답변이 쿼리에 대한 답변으로 간주됩니다. 이 접근 방식은 의미론적 유사성 측정값을 사용하여 질문 간의 유사성을 결정하고 관련 답변을 검색합니다.

    이 튜토리얼에서는 데이터 처리를 위한 데이터 로더 및 임베딩 생성기로 Hugging Face를, 시맨틱 검색을 위한 벡터 데이터베이스로 Milvus를 사용하여 질문 답변 시스템을 구축하는 방법을 보여드립니다.

    시작하기 전에

    Open In Colab

    +

    Open In Colab +GitHub Repository

    이 가이드에서는 Jina AI 임베딩과 Milvus를 사용하여 유사도 검색 및 검색 작업을 수행하는 방법을 설명합니다.

    Jina AI란?

    Jina AI의 핵심 임베딩 모델은 상세한 텍스트를 이해하는 데 탁월하여 시맨틱 검색, 콘텐츠 분류에 이상적이며 고급 감성 분석, 텍스트 요약 및 개인화된 추천 시스템을 지원합니다.

    -
    from pymilvus.model.dense import JinaEmbeddingFunction
    +
    from pymilvus.model.dense import JinaEmbeddingFunction
     
     jina_api_key = "<YOUR_JINA_API_KEY>"
    -ef = JinaEmbeddingFunction("jina-embeddings-v2-base-en", jina_api_key)
    +ef = JinaEmbeddingFunction(
    +    "jina-embeddings-v3", 
    +    jina_api_key,
    +    task="retrieval.passage",
    +    dimensions=1024
    +)
     
     query = "what is information retrieval?"
     doc = "Information retrieval is the process of finding relevant information from a large collection of data or documents."
     
    -qvecs = ef.encode_queries([query])
    -dvecs = ef.encode_documents([doc])
    +qvecs = ef.encode_queries([query])  # This method uses `retrieval.query` as the task
    +dvecs = ef.encode_documents([doc])  # This method uses `retrieval.passage` as the task
     

    이중 언어 임베딩

    Open In Colab

    +

    Open In Colab +GitHub Repository

    이 가이드에서는 LlamaIndex와 Milvus를 사용하여 검색 증강 생성(RAG) 시스템을 구축하는 방법을 설명합니다.

    RAG 시스템은 검색 시스템과 생성 모델을 결합하여 주어진 프롬프트에 따라 새 텍스트를 생성합니다. 이 시스템은 먼저 Milvus를 사용하여 말뭉치에서 관련 문서를 검색한 다음 생성 모델을 사용하여 검색된 문서를 기반으로 새 텍스트를 생성합니다.

    LlamaIndex는 사용자 정의 데이터 소스를 대규모 언어 모델(LLM)에 연결하기 위한 간단하고 유연한 데이터 프레임워크입니다. Milvus는 세계에서 가장 진보된 오픈 소스 벡터 데이터베이스로, 임베딩 유사도 검색 및 AI 애플리케이션을 강화하기 위해 구축되었습니다.

    @@ -106,7 +107,7 @@ index = VectorStoreIndex.from_documents(documents, storage_context=storage_conte
  • 밀버스의 완전 관리형 클라우드 서비스인 질리즈 클라우드를 사용하려면, 질리즈 클라우드의 퍼블릭 엔드포인트와 API 키에 해당하는 uritoken 을 조정하세요.
  • -

    데이터 쿼리하기

    이제 문서가 인덱스에 저장되었으므로 인덱스에 대해 질문할 수 있습니다. 인덱스는 자체에 저장된 데이터를 chatgpt의 지식 베이스로 사용합니다.

    +

    데이터 조회하기

    이제 문서가 인덱스에 저장되었으므로 인덱스에 대해 질문할 수 있습니다. 인덱스는 자체에 저장된 데이터를 chatgpt의 지식 베이스로 사용합니다.

    query_engine = index.as_query_engine()
     res = query_engine.query("What did the author learn?")
     print(res)
    diff --git a/localization/v2.4.x/site/ko/integrations/integrate_with_openai.json b/localization/v2.4.x/site/ko/integrations/integrate_with_openai.json
    index 4d06c35c4..2f24e687d 100644
    --- a/localization/v2.4.x/site/ko/integrations/integrate_with_openai.json
    +++ b/localization/v2.4.x/site/ko/integrations/integrate_with_openai.json
    @@ -1 +1 @@
    -{"codeList":["pip install --upgrade openai pymilvus\n","from openai import OpenAI\nfrom pymilvus import MilvusClient\n\nMODEL_NAME = \"text-embedding-3-small\"  # Which model to use, please check https://platform.openai.com/docs/guides/embeddings for available models\nDIMENSION = 1536  # Dimension of vector embedding\n\n# Connect to OpenAI with API Key.\nopenai_client = OpenAI(api_key=\"\")\n\ndocs = [\n    \"Artificial intelligence was founded as an academic discipline in 1956.\",\n    \"Alan Turing was the first person to conduct substantial research in AI.\",\n    \"Born in Maida Vale, London, Turing was raised in southern England.\",\n]\n\nvectors = [\n    vec.embedding\n    for vec in openai_client.embeddings.create(input=docs, model=MODEL_NAME).data\n]\n\n# Prepare data to be stored in Milvus vector database.\n# We can store the id, vector representation, raw text and labels such as \"subject\" in this case in Milvus.\ndata = [\n    {\"id\": i, \"vector\": vectors[i], \"text\": docs[i], \"subject\": \"history\"}\n    for i in range(len(docs))\n]\n\n\n# Connect to Milvus, all data is stored in a local file named \"milvus_openai_demo.db\"\n# in current directory. You can also connect to a remote Milvus server following this\n# instruction: https://milvus.io/docs/install_standalone-docker.md.\nmilvus_client = MilvusClient(uri=\"milvus_openai_demo.db\")\nCOLLECTION_NAME = \"demo_collection\"  # Milvus collection name\n# Create a collection to store the vectors and text.\nif milvus_client.has_collection(collection_name=COLLECTION_NAME):\n    milvus_client.drop_collection(collection_name=COLLECTION_NAME)\nmilvus_client.create_collection(collection_name=COLLECTION_NAME, dimension=DIMENSION)\n\n# Insert all data into Milvus vector database.\nres = milvus_client.insert(collection_name=\"demo_collection\", data=data)\n\nprint(res[\"insert_count\"])\n","queries = [\"When was artificial intelligence founded?\"]\n\nquery_vectors = [\n    vec.embedding\n    for vec in openai_client.embeddings.create(input=queries, model=MODEL_NAME).data\n]\n\nres = milvus_client.search(\n    collection_name=COLLECTION_NAME,  # target collection\n    data=query_vectors,  # query vectors\n    limit=2,  # number of returned entities\n    output_fields=[\"text\", \"subject\"],  # specifies fields to be returned\n)\n\nfor q in queries:\n    print(\"Query:\", q)\n    for result in res:\n        print(result)\n    print(\"\\n\")\n","[\n    {\n        \"id\": 0,\n        \"distance\": -0.772376537322998,\n        \"entity\": {\n            \"text\": \"Artificial intelligence was founded as an academic discipline in 1956.\",\n            \"subject\": \"history\",\n        },\n    },\n    {\n        \"id\": 1,\n        \"distance\": -0.58596271276474,\n        \"entity\": {\n            \"text\": \"Alan Turing was the first person to conduct substantial research in AI.\",\n            \"subject\": \"history\",\n        },\n    },\n]\n"],"headingContent":"","anchorList":[{"label":"Milvus와 OpenAI를 사용한 시맨틱 검색","href":"Semantic-Search-with-Milvus-and-OpenAI","type":1,"isActive":false},{"label":"시작하기","href":"Getting-started","type":2,"isActive":false},{"label":"OpenAI 및 Milvus로 책 제목 검색하기","href":"Searching-book-titles-with-OpenAI--Milvus","type":2,"isActive":false}]}
    \ No newline at end of file
    +{"codeList":["pip install --upgrade openai pymilvus\n","from openai import OpenAI\nfrom pymilvus import MilvusClient\n\nMODEL_NAME = \"text-embedding-3-small\"  # Which model to use, please check https://platform.openai.com/docs/guides/embeddings for available models\nDIMENSION = 1536  # Dimension of vector embedding\n\n# Connect to OpenAI with API Key.\nopenai_client = OpenAI(api_key=\"\")\n\ndocs = [\n    \"Artificial intelligence was founded as an academic discipline in 1956.\",\n    \"Alan Turing was the first person to conduct substantial research in AI.\",\n    \"Born in Maida Vale, London, Turing was raised in southern England.\",\n]\n\nvectors = [\n    vec.embedding\n    for vec in openai_client.embeddings.create(input=docs, model=MODEL_NAME).data\n]\n\n# Prepare data to be stored in Milvus vector database.\n# We can store the id, vector representation, raw text and labels such as \"subject\" in this case in Milvus.\ndata = [\n    {\"id\": i, \"vector\": vectors[i], \"text\": docs[i], \"subject\": \"history\"}\n    for i in range(len(docs))\n]\n\n\n# Connect to Milvus, all data is stored in a local file named \"milvus_openai_demo.db\"\n# in current directory. You can also connect to a remote Milvus server following this\n# instruction: https://milvus.io/docs/install_standalone-docker.md.\nmilvus_client = MilvusClient(uri=\"milvus_openai_demo.db\")\nCOLLECTION_NAME = \"demo_collection\"  # Milvus collection name\n# Create a collection to store the vectors and text.\nif milvus_client.has_collection(collection_name=COLLECTION_NAME):\n    milvus_client.drop_collection(collection_name=COLLECTION_NAME)\nmilvus_client.create_collection(collection_name=COLLECTION_NAME, dimension=DIMENSION)\n\n# Insert all data into Milvus vector database.\nres = milvus_client.insert(collection_name=\"demo_collection\", data=data)\n\nprint(res[\"insert_count\"])\n","queries = [\"When was artificial intelligence founded?\"]\n\nquery_vectors = [\n    vec.embedding\n    for vec in openai_client.embeddings.create(input=queries, model=MODEL_NAME).data\n]\n\nres = milvus_client.search(\n    collection_name=COLLECTION_NAME,  # target collection\n    data=query_vectors,  # query vectors\n    limit=2,  # number of returned entities\n    output_fields=[\"text\", \"subject\"],  # specifies fields to be returned\n)\n\nfor q in queries:\n    print(\"Query:\", q)\n    for result in res:\n        print(result)\n    print(\"\\n\")\n","[\n    {\n        \"id\": 0,\n        \"distance\": -0.772376537322998,\n        \"entity\": {\n            \"text\": \"Artificial intelligence was founded as an academic discipline in 1956.\",\n            \"subject\": \"history\",\n        },\n    },\n    {\n        \"id\": 1,\n        \"distance\": -0.58596271276474,\n        \"entity\": {\n            \"text\": \"Alan Turing was the first person to conduct substantial research in AI.\",\n            \"subject\": \"history\",\n        },\n    },\n]\n"],"headingContent":"Semantic Search with Milvus and OpenAI","anchorList":[{"label":"Milvus와 OpenAI를 사용한 시맨틱 검색","href":"Semantic-Search-with-Milvus-and-OpenAI","type":1,"isActive":false},{"label":"시작하기","href":"Getting-started","type":2,"isActive":false},{"label":"OpenAI 및 Milvus로 책 제목 검색하기","href":"Searching-book-titles-with-OpenAI--Milvus","type":2,"isActive":false}]}
    \ No newline at end of file
    diff --git a/localization/v2.4.x/site/ko/integrations/integrate_with_openai.md b/localization/v2.4.x/site/ko/integrations/integrate_with_openai.md
    index 9f92fdfba..62d30d6c4 100644
    --- a/localization/v2.4.x/site/ko/integrations/integrate_with_openai.md
    +++ b/localization/v2.4.x/site/ko/integrations/integrate_with_openai.md
    @@ -1,6 +1,6 @@
     ---
     id: integrate_with_openai.md
    -title: Milvus 및 OpenAI를 사용한 시맨틱 검색
    +title: Milvus와 OpenAI를 사용한 시맨틱 검색
     summary: 이 페이지에서는 OpenAI의 임베딩 API와 벡터 데이터베이스의 통합에 대해 설명합니다.
     ---
     

    Milvus와 OpenAI를 사용한 시맨틱 검색

    Open In Colab

    +

    Open In Colab +GitHub Repository

    이 가이드에서는 OpenAI의 임베딩 API를 Milvus 벡터 데이터베이스와 함께 사용하여 텍스트에 대한 의미론적 검색을 수행하는 방법을 소개합니다.

    시작하기

    Open In Colab

    +

    Open In Colab +GitHub Repository

    이 가이드에서는 Ragas를 사용하여 Milvus를 기반으로 구축된 검색 증강 생성(RAG) 파이프라인을 평가하는 방법을 보여드립니다.

    RAG 시스템은 검색 시스템과 생성 모델을 결합하여 주어진 프롬프트에 따라 새 텍스트를 생성합니다. 시스템은 먼저 Milvus를 사용하여 말뭉치에서 관련 문서를 검색한 다음, 생성 모델을 사용하여 검색된 문서를 기반으로 새 텍스트를 생성합니다.

    Ragas는 RAG 파이프라인을 평가하는 데 도움이 되는 프레임워크입니다. 이러한 파이프라인을 구축하는 데 도움이 되는 기존 도구와 프레임워크가 있지만, 이를 평가하고 파이프라인 성능을 정량화하는 것은 어려울 수 있습니다. 이것이 바로 Ragas(RAG 평가)가 필요한 이유입니다.

    diff --git a/localization/v2.4.x/site/ko/integrations/integrate_with_vanna.json b/localization/v2.4.x/site/ko/integrations/integrate_with_vanna.json index 17628e9ef..a73f16f1b 100644 --- a/localization/v2.4.x/site/ko/integrations/integrate_with_vanna.json +++ b/localization/v2.4.x/site/ko/integrations/integrate_with_vanna.json @@ -1 +1 @@ -{"codeList":["$ pip install \"vanna[milvus,openai]\"\n","import os\n\nos.environ[\"OPENAI_API_KEY\"] = \"sk-***********\"\n","from pymilvus import MilvusClient, model\nfrom vanna.milvus import Milvus_VectorStore\nfrom vanna.openai import OpenAI_Chat\n\n\nclass VannaMilvus(Milvus_VectorStore, OpenAI_Chat):\n def __init__(self, config=None):\n Milvus_VectorStore.__init__(self, config=config)\n OpenAI_Chat.__init__(self, config=config)\n","milvus_uri = \"./milvus_vanna.db\"\n\nmilvus_client = MilvusClient(uri=milvus_uri)\n\nvn_milvus = VannaMilvus(\n config={\n \"api_key\": os.getenv(\"OPENAI_API_KEY\"),\n \"model\": \"gpt-3.5-turbo\",\n \"milvus_client\": milvus_client,\n \"embedding_function\": model.DefaultEmbeddingFunction(),\n \"n_results\": 2, # The number of results to return from Milvus semantic search.\n }\n)\n","import sqlite3\n\nsqlite_path = \"./my-database.sqlite\"\nsql_connect = sqlite3.connect(sqlite_path)\nc = sql_connect.cursor()\n\ninit_sqls = \"\"\"\nCREATE TABLE IF NOT EXISTS Customer (\n ID INTEGER PRIMARY KEY AUTOINCREMENT,\n Name TEXT NOT NULL,\n Company TEXT NOT NULL,\n City TEXT NOT NULL,\n Phone TEXT NOT NULL\n);\n\nCREATE TABLE IF NOT EXISTS Company (\n ID INTEGER PRIMARY KEY AUTOINCREMENT,\n Name TEXT NOT NULL,\n Industry TEXT NOT NULL,\n Location TEXT NOT NULL,\n EmployeeCount INTEGER NOT NULL\n);\n\nCREATE TABLE IF NOT EXISTS User (\n ID INTEGER PRIMARY KEY AUTOINCREMENT,\n Username TEXT NOT NULL UNIQUE,\n Email TEXT NOT NULL UNIQUE\n);\n\nINSERT INTO Customer (Name, Company, City, Phone) \nVALUES ('John Doe', 'ABC Corp', 'New York', '123-456-7890');\n\nINSERT INTO Customer (Name, Company, City, Phone) \nVALUES ('Jane Smith', 'XYZ Inc', 'Los Angeles', '098-765-4321');\n\nINSERT INTO Company (Name, Industry, Location, EmployeeCount)\nVALUES ('ABC Corp', 'cutting-edge technology', 'New York', 100);\n\nINSERT INTO User (Username, Email)\nVALUES ('johndoe123', 'johndoe123@example.com');\n\"\"\"\n\nfor sql in init_sqls.split(\";\"):\n c.execute(sql)\n\nsql_connect.commit()\n\n# Connect to the SQLite database\nvn_milvus.connect_to_sqlite(sqlite_path)\n","# If there exists training data, we should remove it before training.\nexisting_training_data = vn_milvus.get_training_data()\nif len(existing_training_data) > 0:\n for _, training_data in existing_training_data.iterrows():\n vn_milvus.remove_training_data(training_data[\"id\"])\n\n# Get the DDL of the SQLite database\ndf_ddl = vn_milvus.run_sql(\"SELECT type, sql FROM sqlite_master WHERE sql is not null\")\n\n# Train the model on the DDL data\nfor ddl in df_ddl[\"sql\"].to_list():\n vn_milvus.train(ddl=ddl)\n","# Add documentation about your business terminology or definitions.\nvn_milvus.train(\n documentation=\"ABC Corp specializes in cutting-edge technology solutions and innovation.\"\n)\nvn_milvus.train(\n documentation=\"XYZ Inc is a global leader in manufacturing and supply chain management.\"\n)\n\n# You can also add SQL queries to your training data.\nvn_milvus.train(sql=\"SELECT * FROM Customer WHERE Name = 'John Doe'\")\n","training_data = vn_milvus.get_training_data()\ntraining_data\n","sql = vn_milvus.generate_sql(\"what is the phone number of John Doe?\")\nvn_milvus.run_sql(sql)\n","sql = vn_milvus.generate_sql(\"which customer works for a manufacturing corporation?\")\nvn_milvus.run_sql(sql)\n","sql_connect.close()\nmilvus_client.close()\n\nos.remove(sqlite_path)\nif os.path.exists(milvus_uri):\n os.remove(milvus_uri)\n"],"headingContent":"","anchorList":[{"label":"Vanna 및 Milvus로 SQL 작성","href":"Write-SQL-with-Vanna-and-Milvus","type":1,"isActive":false},{"label":"전제 조건","href":"Prerequisites","type":2,"isActive":false},{"label":"데이터 준비","href":"Data-preparation","type":2,"isActive":false},{"label":"데이터로 훈련하기","href":"Train-with-data","type":2,"isActive":false},{"label":"SQL 생성 및 실행","href":"Generate-SQLs-and-execute-them","type":2,"isActive":false}]} \ No newline at end of file +{"codeList":["$ pip install \"vanna[milvus,openai]\"\n","import os\n\nos.environ[\"OPENAI_API_KEY\"] = \"sk-***********\"\n","from pymilvus import MilvusClient, model\nfrom vanna.milvus import Milvus_VectorStore\nfrom vanna.openai import OpenAI_Chat\n\n\nclass VannaMilvus(Milvus_VectorStore, OpenAI_Chat):\n def __init__(self, config=None):\n Milvus_VectorStore.__init__(self, config=config)\n OpenAI_Chat.__init__(self, config=config)\n","milvus_uri = \"./milvus_vanna.db\"\n\nmilvus_client = MilvusClient(uri=milvus_uri)\n\nvn_milvus = VannaMilvus(\n config={\n \"api_key\": os.getenv(\"OPENAI_API_KEY\"),\n \"model\": \"gpt-3.5-turbo\",\n \"milvus_client\": milvus_client,\n \"embedding_function\": model.DefaultEmbeddingFunction(),\n \"n_results\": 2, # The number of results to return from Milvus semantic search.\n }\n)\n","import sqlite3\n\nsqlite_path = \"./my-database.sqlite\"\nsql_connect = sqlite3.connect(sqlite_path)\nc = sql_connect.cursor()\n\ninit_sqls = \"\"\"\nCREATE TABLE IF NOT EXISTS Customer (\n ID INTEGER PRIMARY KEY AUTOINCREMENT,\n Name TEXT NOT NULL,\n Company TEXT NOT NULL,\n City TEXT NOT NULL,\n Phone TEXT NOT NULL\n);\n\nCREATE TABLE IF NOT EXISTS Company (\n ID INTEGER PRIMARY KEY AUTOINCREMENT,\n Name TEXT NOT NULL,\n Industry TEXT NOT NULL,\n Location TEXT NOT NULL,\n EmployeeCount INTEGER NOT NULL\n);\n\nCREATE TABLE IF NOT EXISTS User (\n ID INTEGER PRIMARY KEY AUTOINCREMENT,\n Username TEXT NOT NULL UNIQUE,\n Email TEXT NOT NULL UNIQUE\n);\n\nINSERT INTO Customer (Name, Company, City, Phone) \nVALUES ('John Doe', 'ABC Corp', 'New York', '123-456-7890');\n\nINSERT INTO Customer (Name, Company, City, Phone) \nVALUES ('Jane Smith', 'XYZ Inc', 'Los Angeles', '098-765-4321');\n\nINSERT INTO Company (Name, Industry, Location, EmployeeCount)\nVALUES ('ABC Corp', 'cutting-edge technology', 'New York', 100);\n\nINSERT INTO User (Username, Email)\nVALUES ('johndoe123', 'johndoe123@example.com');\n\"\"\"\n\nfor sql in init_sqls.split(\";\"):\n c.execute(sql)\n\nsql_connect.commit()\n\n# Connect to the SQLite database\nvn_milvus.connect_to_sqlite(sqlite_path)\n","# If there exists training data, we should remove it before training.\nexisting_training_data = vn_milvus.get_training_data()\nif len(existing_training_data) > 0:\n for _, training_data in existing_training_data.iterrows():\n vn_milvus.remove_training_data(training_data[\"id\"])\n\n# Get the DDL of the SQLite database\ndf_ddl = vn_milvus.run_sql(\"SELECT type, sql FROM sqlite_master WHERE sql is not null\")\n\n# Train the model on the DDL data\nfor ddl in df_ddl[\"sql\"].to_list():\n vn_milvus.train(ddl=ddl)\n","# Add documentation about your business terminology or definitions.\nvn_milvus.train(\n documentation=\"ABC Corp specializes in cutting-edge technology solutions and innovation.\"\n)\nvn_milvus.train(\n documentation=\"XYZ Inc is a global leader in manufacturing and supply chain management.\"\n)\n\n# You can also add SQL queries to your training data.\nvn_milvus.train(sql=\"SELECT * FROM Customer WHERE Name = 'John Doe'\")\n","training_data = vn_milvus.get_training_data()\ntraining_data\n","sql = vn_milvus.generate_sql(\"what is the phone number of John Doe?\")\nvn_milvus.run_sql(sql)\n","sql = vn_milvus.generate_sql(\"which customer works for a manufacturing corporation?\")\nvn_milvus.run_sql(sql)\n","sql_connect.close()\nmilvus_client.close()\n\nos.remove(sqlite_path)\nif os.path.exists(milvus_uri):\n os.remove(milvus_uri)\n"],"headingContent":"Write SQL with Vanna and Milvus","anchorList":[{"label":"Vanna 및 Milvus로 SQL 작성","href":"Write-SQL-with-Vanna-and-Milvus","type":1,"isActive":false},{"label":"전제 조건","href":"Prerequisites","type":2,"isActive":false},{"label":"데이터 준비","href":"Data-preparation","type":2,"isActive":false},{"label":"데이터로 훈련하기","href":"Train-with-data","type":2,"isActive":false},{"label":"SQL 생성 및 실행","href":"Generate-SQLs-and-execute-them","type":2,"isActive":false}]} \ No newline at end of file diff --git a/localization/v2.4.x/site/ko/integrations/integrate_with_vanna.md b/localization/v2.4.x/site/ko/integrations/integrate_with_vanna.md index 46c665742..f47ac7c59 100644 --- a/localization/v2.4.x/site/ko/integrations/integrate_with_vanna.md +++ b/localization/v2.4.x/site/ko/integrations/integrate_with_vanna.md @@ -18,7 +18,9 @@ title: Vanna 및 Milvus로 SQL 작성 d="M4 9h1v1H4c-1.5 0-3-1.69-3-3.5S2.55 3 4 3h4c1.45 0 3 1.69 3 3.5 0 1.41-.91 2.72-2 3.25V8.59c.58-.45 1-1.27 1-2.09C10 5.22 8.98 4 8 4H4c-.98 0-2 1.22-2 2.5S3 9 4 9zm9-3h-1v1h1c1 0 2 1.22 2 2.5S13.98 12 13 12H9c-.98 0-2-1.22-2-2.5 0-.83.42-1.64 1-2.09V6.25c-1.09.53-2 1.84-2 3.25C6 11.31 7.55 13 9 13h4c1.45 0 3-1.69 3-3.5S14.5 6 13 6z" > -

    Vanna는 SQL 생성 및 관련 기능을 위한 오픈 소스 Python RAG(검색 증강 생성) 프레임워크입니다. Milvus는 세계에서 가장 진보된 오픈 소스 벡터 데이터베이스로, 임베딩 유사도 검색 및 AI 애플리케이션을 강화하기 위해 구축되었습니다.

    +

    Open In Colab +GitHub Repository

    +

    Vanna는 SQL 생성 및 관련 기능을 위한 오픈 소스 Python RAG(검색 증강 생성) 프레임워크입니다. Milvus는 세계에서 가장 진보된 오픈 소스 벡터 데이터베이스로, 임베딩 유사도 검색 및 AI 애플리케이션을 강화하기 위해 구축되었습니다.

    Vanna는 데이터에 대해 RAG '모델'을 학습시킨 다음, 데이터베이스에서 실행되도록 설정할 수 있는 SQL 쿼리를 반환하는 질문을 하는 두 가지 간단한 단계로 작동합니다. 이 가이드에서는 Vanna를 사용해 데이터베이스에 저장된 데이터를 기반으로 SQL 쿼리를 생성하고 실행하는 방법을 설명합니다.

    전제 조건

    Open In Colab

    +

    Open In Colab +GitHub Repository

    이 가이드에서는 VoyageAI의 임베딩 API를 Milvus 벡터 데이터베이스와 함께 사용하여 텍스트에 대한 의미론적 검색을 수행하는 방법을 소개합니다.

    시작하기

    Open In Colab

    +

    Open In Colab +GitHub Repository

    이 가이드는 LangChain과 Milvus를 사용하여 검색 증강 생성(RAG) 시스템을 구축하는 방법을 설명합니다.

    RAG 시스템은 검색 시스템과 생성 모델을 결합하여 주어진 프롬프트에 따라 새로운 텍스트를 생성합니다. 시스템은 먼저 Milvus를 사용하여 말뭉치에서 관련 문서를 검색한 다음, 생성 모델을 사용하여 검색된 문서를 기반으로 새로운 텍스트를 생성합니다.

    LangChain은 대규모 언어 모델(LLM)로 구동되는 애플리케이션을 개발하기 위한 프레임워크입니다. Milvus는 세계에서 가장 진보된 오픈 소스 벡터 데이터베이스로, 임베딩 유사도 검색 및 AI 애플리케이션을 강화하기 위해 구축되었습니다.

    diff --git a/localization/v2.4.x/site/ko/menuStructure/ko.json b/localization/v2.4.x/site/ko/menuStructure/ko.json index 9541e0b9a..d470ada3b 100644 --- a/localization/v2.4.x/site/ko/menuStructure/ko.json +++ b/localization/v2.4.x/site/ko/menuStructure/ko.json @@ -255,7 +255,7 @@ "children": [] }, { - "label": "유사성 메트릭", + "label": "유사성 지표", "id": "metric.md", "order": 5, "children": [] diff --git a/localization/v2.4.x/site/ko/reference/architecture/architecture_overview.json b/localization/v2.4.x/site/ko/reference/architecture/architecture_overview.json index b78dd6daa..d0c4ff6ab 100644 --- a/localization/v2.4.x/site/ko/reference/architecture/architecture_overview.json +++ b/localization/v2.4.x/site/ko/reference/architecture/architecture_overview.json @@ -1 +1 @@ -{"codeList":[],"headingContent":"","anchorList":[{"label":"Milvus 아키텍처 개요","href":"Milvus-Architecture-Overview","type":1,"isActive":false},{"label":"다음 단계","href":"Whats-next","type":2,"isActive":false}]} \ No newline at end of file +{"codeList":[],"headingContent":"Milvus Architecture Overview","anchorList":[{"label":"Milvus 아키텍처 개요","href":"Milvus-Architecture-Overview","type":1,"isActive":false},{"label":"다음 단계","href":"Whats-next","type":2,"isActive":false}]} \ No newline at end of file diff --git a/localization/v2.4.x/site/ko/reference/architecture/architecture_overview.md b/localization/v2.4.x/site/ko/reference/architecture/architecture_overview.md index 7558ee59e..67689e020 100644 --- a/localization/v2.4.x/site/ko/reference/architecture/architecture_overview.md +++ b/localization/v2.4.x/site/ko/reference/architecture/architecture_overview.md @@ -19,12 +19,18 @@ title: Milvus 아키텍처 개요 >

    Milvus는 수백만, 수십억, 심지어 수조 개의 벡터가 포함된 고밀도 벡터 데이터 세트에 대한 유사도 검색을 위해 설계된 것으로 Faiss, HNSW, DiskANN, SCANN 등 널리 사용되는 벡터 검색 라이브러리를 기반으로 구축되었습니다. 계속 진행하기 전에 임베딩 검색의 기본 원칙을 숙지하세요.

    -

    Milvus는 데이터 샤딩, 스트리밍 데이터 수집, 동적 스키마, 벡터와 스칼라 데이터 결합 검색, 멀티 벡터 및 하이브리드 검색, 스파스 벡터 및 기타 여러 고급 기능도 지원합니다. 이 플랫폼은 온디맨드 성능을 제공하며 모든 임베딩 검색 시나리오에 맞게 최적화할 수 있습니다. 최적의 가용성과 탄력성을 위해 Kubernetes를 사용해 Milvus를 배포하는 것이 좋습니다.

    +

    Milvus는 데이터 샤딩, 스트리밍 데이터 수집, 동적 스키마, 벡터와 스칼라 데이터 결합 검색, 다중 벡터 및 하이브리드 검색, 스파스 벡터 및 기타 여러 고급 기능도 지원합니다. 이 플랫폼은 온디맨드 성능을 제공하며 모든 임베딩 검색 시나리오에 맞게 최적화할 수 있습니다. 최적의 가용성과 탄력성을 위해 Kubernetes를 사용해 Milvus를 배포하는 것이 좋습니다.

    Milvus는 컴퓨팅 노드에 스토리지 및 컴퓨팅 분리와 수평적 확장성을 갖춘 공유 스토리지 아키텍처를 채택하고 있습니다. 데이터 플레인과 제어 플레인 분리 원칙에 따라 Milvus는 액세스 레이어, 코디네이터 서비스, 워커 노드, 스토리지의 네 가지 레이어로 구성됩니다. 이러한 계층은 확장 또는 재해 복구와 관련하여 상호 독립적입니다.

    Architecture_diagram 아키텍처_도표

    +

    그림에 따르면 인터페이스는 다음과 같은 카테고리로 분류할 수 있습니다:

    +
      +
    • DDL / DCL: createCollection / createPartition / dropCollection / dropPartition / hasCollection / hasPartition
    • +
    • DML / Produce: 삽입 / 삭제 / 업서트
    • +
    • DQL: 검색 / 쿼리
    • +

    다음 단계

    diff --git a/localization/v2.4.x/site/ko/reference/disk_index.json b/localization/v2.4.x/site/ko/reference/disk_index.json index f893da395..df0da9c54 100644 --- a/localization/v2.4.x/site/ko/reference/disk_index.json +++ b/localization/v2.4.x/site/ko/reference/disk_index.json @@ -1 +1 @@ -{"codeList":["...\nDiskIndex:\n MaxDegree: 56\n SearchListSize: 100\n PQCodeBugetGBRatio: 0.125\n SearchCacheBudgetGBRatio: 0.125\n BeamWidthRatio: 4.0\n...\n"],"headingContent":"","anchorList":[{"label":"온디스크 인덱스","href":"On-disk-Index","type":1,"isActive":false},{"label":"전제 조건","href":"Prerequisites","type":2,"isActive":false},{"label":"제한 사항","href":"Limits","type":2,"isActive":false},{"label":"색인 및 검색 설정","href":"Index-and-search-settings","type":2,"isActive":false},{"label":"DiskANN 관련 Milvus 구성","href":"DiskANN-related-Milvus-configurations","type":2,"isActive":false},{"label":"문제 해결","href":"Troubleshooting","type":2,"isActive":false}]} \ No newline at end of file +{"codeList":["...\nDiskIndex:\n MaxDegree: 56\n SearchListSize: 100\n PQCodeBugetGBRatio: 0.125\n SearchCacheBudgetGBRatio: 0.125\n BeamWidthRatio: 4.0\n...\n"],"headingContent":"On-disk Index","anchorList":[{"label":"온디스크 인덱스","href":"On-disk-Index","type":1,"isActive":false},{"label":"전제 조건","href":"Prerequisites","type":2,"isActive":false},{"label":"제한 사항","href":"Limits","type":2,"isActive":false},{"label":"색인 및 검색 설정","href":"Index-and-search-settings","type":2,"isActive":false},{"label":"DiskANN 관련 Milvus 구성","href":"DiskANN-related-Milvus-configurations","type":2,"isActive":false},{"label":"문제 해결","href":"Troubleshooting","type":2,"isActive":false}]} \ No newline at end of file diff --git a/localization/v2.4.x/site/ko/reference/disk_index.md b/localization/v2.4.x/site/ko/reference/disk_index.md index 6a07218bd..23102d46c 100644 --- a/localization/v2.4.x/site/ko/reference/disk_index.md +++ b/localization/v2.4.x/site/ko/reference/disk_index.md @@ -2,7 +2,7 @@ id: disk_index.md related_key: disk_index summary: Milvus의 디스크 인덱스 메커니즘. -title: 디스크 색인 +title: 온디스크 인덱스 ---

    온디스크 인덱스

    DiskANN을 사용하려면 다음 사항을 준수해야 합니다.

    • 데이터에 차원이 1개 이상인 플로트 벡터만 사용하세요.
    • -
    • 벡터 사이의 거리를 측정할 때는 유클리드 거리(L2) 또는 내적 곱(IP) 만 사용하세요.
    • +
    • 벡터 사이의 거리를 측정할 때는 유클리드 거리(L2), 내적 곱(IP) 또는 COSINE만 사용하세요.

    색인 및 검색 설정

      -
    • 색인 구축 매개변수

      +
    • 인덱스 구축 매개변수

      DiskANN 인덱스를 구축할 때 인덱스 유형으로 DISKANN 을 사용합니다. 인덱스 매개변수는 필요하지 않습니다.

    • 검색 매개변수

      diff --git a/localization/v2.4.x/site/ko/reference/replica.json b/localization/v2.4.x/site/ko/reference/replica.json index da9c308a5..ba9b455ca 100644 --- a/localization/v2.4.x/site/ko/reference/replica.json +++ b/localization/v2.4.x/site/ko/reference/replica.json @@ -1 +1 @@ -{"codeList":[],"headingContent":"","anchorList":[{"label":"인메모리 복제본","href":"In-Memory-Replica","type":1,"isActive":false},{"label":"개요","href":"Overview","type":2,"isActive":false},{"label":"주요 개념","href":"Key-Concepts","type":2,"isActive":false},{"label":"설계 세부 정보","href":"Design-Details","type":2,"isActive":false}]} \ No newline at end of file +{"codeList":[],"headingContent":"In-Memory Replica","anchorList":[{"label":"인메모리 복제본","href":"In-Memory-Replica","type":1,"isActive":false},{"label":"개요","href":"Overview","type":2,"isActive":false},{"label":"주요 개념","href":"Key-Concepts","type":2,"isActive":false},{"label":"설계 세부 정보","href":"Design-Details","type":2,"isActive":false}]} \ No newline at end of file diff --git a/localization/v2.4.x/site/ko/reference/replica.md b/localization/v2.4.x/site/ko/reference/replica.md index 3b67e6abe..290d81eb5 100644 --- a/localization/v2.4.x/site/ko/reference/replica.md +++ b/localization/v2.4.x/site/ko/reference/replica.md @@ -60,7 +60,7 @@ title: 인메모리 복제본

      인메모리 복제본은 복제본 그룹으로 구성됩니다. 각 복제본 그룹에는 샤드 복제본이 포함됩니다. 각 샤드 복제본에는 스트리밍 복제본과 샤드에서 성장 및 봉인된 세그먼트 (즉, DML 채널)에 해당하는 기록 복제본이 있습니다.

      - An illustration of how in-memory replica works + An illustration of how in-memory replica works 인메모리 복제본의 작동 방식에 대한 그림

      복제본 그룹

      복제본 그룹은 기록 데이터 및 복제본 처리를 담당하는 여러 쿼리 노드로 구성됩니다.

      샤드 복제본

      샤드 복제본은 스트리밍 복제본과 기록 복제본으로 구성되며, 둘 다 동일한 샤드에 속합니다. 복제본 그룹의 샤드 복제본 수는 지정된 컬렉션의 샤드 수에 따라 결정됩니다.

      diff --git a/localization/v2.4.x/site/ko/release_notes.json b/localization/v2.4.x/site/ko/release_notes.json index e1b8282e9..2e06db4ae 100644 --- a/localization/v2.4.x/site/ko/release_notes.json +++ b/localization/v2.4.x/site/ko/release_notes.json @@ -1 +1 @@ -{"codeList":[],"headingContent":"Release Notes","anchorList":[{"label":"릴리스 노트","href":"Release-Notes","type":1,"isActive":false},{"label":"v2.4.11","href":"v2411","type":2,"isActive":false},{"label":"v2.4.10","href":"v2410","type":2,"isActive":false},{"label":"v2.4.9","href":"v249","type":2,"isActive":false},{"label":"v2.4.8","href":"v248","type":2,"isActive":false},{"label":"v2.4.6","href":"v246","type":2,"isActive":false},{"label":"v2.4.5","href":"v245","type":2,"isActive":false},{"label":"v2.4.4","href":"v244","type":2,"isActive":false},{"label":"v2.4.3","href":"v243","type":2,"isActive":false},{"label":"v2.4.1","href":"v241","type":2,"isActive":false},{"label":"v2.4.0","href":"v240","type":2,"isActive":false},{"label":"v2.4.0-rc.1","href":"v240-rc1","type":2,"isActive":false}]} \ No newline at end of file +{"codeList":[],"headingContent":"Release Notes","anchorList":[{"label":"릴리스 노트","href":"Release-Notes","type":1,"isActive":false},{"label":"v2.4.13-핫픽스","href":"v2413-hotfix","type":2,"isActive":false},{"label":"[사용 중단됨] v2.4.13","href":"Deprecated-v2413","type":2,"isActive":false},{"label":"v2.4.12","href":"v2412","type":2,"isActive":false},{"label":"v2.4.11","href":"v2411","type":2,"isActive":false},{"label":"v2.4.10","href":"v2410","type":2,"isActive":false},{"label":"v2.4.9","href":"v249","type":2,"isActive":false},{"label":"v2.4.8","href":"v248","type":2,"isActive":false},{"label":"v2.4.6","href":"v246","type":2,"isActive":false},{"label":"v2.4.5","href":"v245","type":2,"isActive":false},{"label":"v2.4.4","href":"v244","type":2,"isActive":false},{"label":"v2.4.3","href":"v243","type":2,"isActive":false},{"label":"v2.4.1","href":"v241","type":2,"isActive":false},{"label":"v2.4.0","href":"v240","type":2,"isActive":false},{"label":"v2.4.0-rc.1","href":"v240-rc1","type":2,"isActive":false}]} \ No newline at end of file diff --git a/localization/v2.4.x/site/ko/release_notes.md b/localization/v2.4.x/site/ko/release_notes.md index 7620e3f71..921ef2bf9 100644 --- a/localization/v2.4.x/site/ko/release_notes.md +++ b/localization/v2.4.x/site/ko/release_notes.md @@ -19,6 +19,163 @@ title: 릴리스 노트 >

      Milvus의 새로운 기능을 알아보세요! 이 페이지에는 각 릴리스의 새로운 기능, 개선 사항, 알려진 문제 및 버그 수정 사항이 요약되어 있습니다. 이 섹션에서 v2.4.0 이후 출시된 각 버전에 대한 릴리스 노트를 확인할 수 있습니다. 이 페이지를 정기적으로 방문하여 업데이트에 대해 알아보는 것이 좋습니다.

      +

      v2.4.13-핫픽스

      릴리스 날짜: 2024년 10월 17일

      +
      + + + + + + +
      Milvus 버전Python SDK 버전Java SDK 버전Node.js SDK 버전
      2.4.13 핫픽스2.4.82.4.52.4.9
      +

      Milvus v2.4.13-hotfix는 모든 MetaKV 스냅샷이 가비지 수집된 경우 재시작 후 수집 정보를 검색하지 못할 수 있는 v2.4.13과 관련된 중요한 문제를 해결합니다(#36933). 현재 v2.4.13을 실행 중인 사용자는 잠재적인 중단을 피하기 위해 가능한 한 빨리 v2.4.13 핫픽스로 업그레이드하는 것이 좋습니다.

      +

      중요 수정 사항

        +
      • 타임스탬프가 MaxTimestamp인 경우 원본 키 로드(#36935)
      • +
      +

      [사용 중단됨] v2.4.13

      릴리스 날짜: 2024년 10월 12일

      + + + + + + + +
      Milvus 버전Python SDK 버전Java SDK 버전Node.js SDK 버전
      2.4.132.4.82.4.52.4.9
      +

      Milvus 2.4.13은 동적 복제본 로드를 도입하여 사용자가 컬렉션을 해제하고 다시 로드할 필요 없이 컬렉션 복제본의 수를 조정할 수 있도록 합니다. 이 버전은 또한 대량 가져오기, 표현식 구문 분석, 로드 밸런싱 및 장애 복구와 관련된 몇 가지 중요한 버그를 해결합니다. 또한 MMAP 리소스 사용량과 가져오기 성능이 크게 개선되어 전반적인 시스템 효율성이 향상되었습니다. 더 나은 성능과 안정성을 위해 이 릴리스로 업그레이드하는 것을 적극 권장합니다.

      +

      주요 기능

        +
      • 로드된 컬렉션에 대한 동적 복제본 조정(#36417)
      • +
      • 증가하는 세그먼트 유형에서 스파스 벡터 MMAP(#36565)
      • +
      +

      버그 수정

        +
      • 플러시 성능 문제 수정(#36741)
      • +
      • "[]"의 JSON 표현식 관련 버그 수정(#36722).
      • +
      • 컴팩트 타겟이 인덱싱되지 않은 경우 이웃 제거(#36694).
      • +
      • 채널이 꽉 찼을 때 Rocksmq의 성능 개선(#36618).
      • +
      • 고정 해제 중 오류가 지연되지 않던 문제 수정(#36665).
      • +
      • 세그먼트 매니저에서 가져온 세그먼트의 메모리 누수 문제 해결(#36631)
      • +
      • 프록시에서 쿼리 노드에 대한 불필요한 상태 검사 생략(#36553).
      • +
      • 용어 표현식의 오버플로 문제 수정(#36534)
      • +
      • 작업 할당 전에 노드 ID를 기록하여 작업 오할당을 방지(#36493).
      • +
      • 클러스터링 압축 시 데이터 경합 문제 해결(#36499)
      • +
      • 유형 일치 후 문자열 배열 최대 길이 검사 추가(#36497).
      • +
      • 혼합 또는 독립 실행형 모드에서 경쟁 조건 해결(#36459)
      • +
      • 반복적인 로드 및 릴리스 작업 후 세그먼트 불균형 수정(#36543).
      • +
      • 중지 노드에서 세그먼트를 이동할 수 없는 코너 케이스 수정(#36475).
      • +
      • 일부 세그먼트가 누락된 경우에도 세그먼트 정보가 정상적으로 업데이트됨(#36729).
      • +
      • etcd 트랜잭션이 스냅샷 KV의 최대 한도를 초과하지 않도록 수정(#36773).
      • +
      +

      개선 사항

        +
      • MMAP 리소스 추정 기능이 향상되었습니다:
          +
        • column.h의 MMAP 관련 코드 개선(#36521)
        • +
        • 컬렉션 로드 시 리소스 추정 개선(#36728)
        • +
      • +
      • 성능 개선:
          +
        • 유니코드를 ASCII로 변환하여 표현식 구문 분석 효율성 개선(#36676)
        • +
        • 여러 주제에 대한 메시지 병렬 생성 활성화(#36462)
        • +
        • 인덱스 파일 크기 계산 시 CPU 오버헤드 감소(#36580)
        • +
        • 헤더에서 메시지 유형을 검색하여 마샬링 해제 최소화(#36454)
        • +
        • 워크로드 기반 복제본 선택 정책 최적화(#36384)
        • +
      • +
      • 최대 메시지 크기 제한에 맞게 삭제 작업 메시지 분할(#36574)
      • +
      • 가져오기 작업을 설명하는 새로운 RESTful URL 추가(#36754).
      • +
      • 가져오기 스케줄 최적화 및 시간 비용 메트릭 추가(#36684)
      • +
      • 쿼리 코디네이터 밸런서에 대한 잔액 보고서 로그 추가(#36749).
      • +
      • 공통 GC 구성 사용으로 전환(#36670)
      • +
      • 위임자에 대한 스트리밍 포워드 정책 스위치 추가(#36712).
      • +
      • 인덱스가 없는 컬렉션에 대한 수동 압축 활성화(#36581)
      • +
      • 다양한 메모리 용량을 가진 쿼리 노드에서 로드 밸런싱 활성화(#36625).
      • +
      • metrics.label을 사용하는 인바운드 레이블의 통합 케이스(#36616)
      • +
      • 전송 채널/세그먼트 작업을 비활성화(#36552).
      • +
      • 가져오기 처리량 및 가져온 행 수를 모니터링하는 메트릭 추가(#36588)
      • +
      • 대상에 여러 타이머 객체가 생성되는 것을 방지(#36573).
      • +
      • 표현식 버전 업데이트 및 표현식에 대한 형식화된 HTTP 응답(#36467)
      • +
      • 스냅샷 KV에서 향상된 가비지 수집(#36793)
      • +
      • 컨텍스트 매개변수가 있는 메서드 실행 지원 추가(#36798).
      • +
      +

      v2.4.12

      릴리스 날짜: 2024년 9월 26일

      + + + + + + + +
      Milvus 버전Python SDK 버전Java SDK 버전Node.js SDK 버전
      2.4.122.4.72.4.42.4.9
      +

      Milvus 2.4.12는 중요한 버그 수정과 중요한 개선 사항을 도입했습니다. 이번 릴리스에서는 데이터 중복 문제를 해결하고 특히 대량의 삭제를 처리할 때 장애 복구 속도를 개선했습니다. 그러나 대량의 데이터를 삭제할 때 장애 복구 속도가 느려지는 알려진 문제가 여전히 남아 있습니다. 이 문제를 해결하기 위해 적극적으로 노력하고 있습니다.

      +

      개선 사항

        +
      • 플로우그래프 매니저에 대한 유예 중지 구현(#36358)
      • +
      • 로드되지 않은 벡터 필드에 대한 인덱스 검사 비활성화(#36280)
      • +
      • 델타 로드 중 히트되지 않은 삭제 레코드 필터링(#36272)
      • +
      • std::stoi 예외에 대한 오류 처리 개선(#36296)
      • +
      • 필드 이름 또는 동적 필드 이름으로 허용되지 않는 키워드(#36108)
      • +
      • L0 세그먼트에서 항목 삭제에 대한 메트릭 추가(#36227)
      • +
      • 원격 로딩을 지원하기 위해 L0 포워드 정책 구현(#36208)
      • +
      • 프록시에서 ANN 필드 로드 확인 추가(#36194)
      • +
      • 빈 스파스 행 지원 활성화(#36061)
      • +
      • 보안 취약점 수정(#36156)
      • +
      • 요청/응답 크기 메트릭에 대한 통계 핸들러 구현(#36118)
      • +
      • 인코딩된 배열 데이터의 크기 추정 수정(#36379)
      • +
      +

      버그 수정

        +
      • 두 개의 벡터 필드가 있는 컬렉션의 메트릭 유형 오류 해결(#36473)
      • +
      • 메시지 큐 수신 실패를 유발하는 긴 버퍼링 문제 수정(#36425)
      • +
      • 분할 지원 후 적절한 압축-세그먼트 반환 구현(#36429).
      • +
      • 노드 ID 확인 고루틴의 데이터 경합 문제 해결(#36377).
      • +
      • 요소 유형 검사 제거(#36324)
      • +
      • 성장 및 봉인된 세그먼트의 동시 액세스 문제 수정(#36288).
      • +
      • 향후 스테이트풀 잠금 구현(#36333)
      • +
      • 하이브리드 검색에서 오프셋 사용 수정(#36287, #36253)
      • +
      • 쿼리 노드에서 더티 세그먼트/채널 누수 해결(#36259).
      • +
      • 기본 키 중복 처리 수정(#36274)
      • +
      • 검색 요청에서 메트릭 유형 설정 강제 적용(#36279)
      • +
      • 저장된 인덱스 파일 크기 메트릭 지우기 문제 수정(#36161)
      • +
      • 글로벌 API 액세스에 대한 읽기 쓰기 권한 그룹 동작 수정(#36145)
      • +

      v2.4.11

      릴리스 날짜: 2024년 9월 11일

      +

      릴리스 날짜 2024년 9월 11일

      @@ -45,48 +202,48 @@ title: 릴리스 노트
      Milvus 버전Python SDK 버전Java SDK 버전Node.js SDK 버전

      Milvus 2.4.11은 MARISA 트라이 인덱스, 압축 및 로딩 작업과 관련된 여러 가지 중요한 문제를 해결하는 버그 수정 릴리스입니다. 이 릴리스에는 표현식을 보고 삭제 안정성을 개선하는 새로운 기능이 도입되었습니다. 2.4.x 시리즈의 모든 사용자는 이 버전으로 업그레이드하여 이러한 개선 사항과 수정 사항을 활용하시기 바랍니다.

      주요 기능

        -
      • 2.4에서 표현식에 대한 정적 보기 추가(#35954)
      • -
      • 버퍼 삭제 관련 할당량 로직 구현(#35997)
      • +
      • 2.4에서 표현식에 대한 정적 보기 추가(#35954)
      • +
      • 버퍼 삭제 관련 할당량 로직 구현(#35997)

      버그 수정

        -
      • GreaterThan 및 GreaterThanEqual 비교에 대한 Trie 인덱스 범위 연산 문제 해결(#36126)
      • -
      • 트라이 인덱스 구성에서 marisa_label_order 사용 수정(#36060)
      • -
      • trie.predictive_search 값 검사 개선(#35999)
      • -
      • 반전된 인덱스에서 이진 산술 표현식 지원 활성화(#36097).
      • -
      • 스킵인덱스로 인한 세그먼트 오류 수정(#35908)
      • -
      • 프록시 메타 캐시에서 메모리 누수 해결(#36076)
      • -
      • 디렉터리 충돌을 방지하기 위해 mmap 파일 경로 이름 변경(#35975)
      • -
      • 믹스 압축에서 실패/시간 초과 작업에 대한 로깅 및 정리 개선(#35967).
      • -
      • 위임자가 메모리를 많이 사용하는 동안 논리 교착 상태 해결(#36066).
      • -
      • 압축으로 모든 삽입이 삭제될 때 빈 세그먼트 생성 구현(#36045)
      • -
      • 2.4의 이전 버전 로드 정보에서 로드 필드 목록 채우기 수정(#36018)
      • -
      • 2.4에서 추적 구성 업데이트 로직 수정(#35998)
      • -
      • 동적 파티션 릴리스 중 검색/쿼리 요청 실패 해결(#36019)
      • -
      • 폴백 매개변수의 재정의 방지(#36006)
      • -
      • 유효성 검사를 위한 적절한 권한 그룹 등록 보장(#35938)
      • -
      • DB 리미터 노드의 잘못된 정리 방지(#35992)
      • -
      • 장애 복구 후 복제본이 쿼리에 참여하지 않는 문제 해결(#35925)
      • -
      • 클러스터링 압축 쓰기에서 데이터 경합 해결(#35958)
      • -
      • 이동 작업 후 변수 참조 수정(#35904)
      • -
      • 클러스터링 키 스킵 로드 동작 검사 구현(#35899)
      • -
      • 2.4에서 쿼리코드 옵저버의 단일 시작 보장(#35817)
      • +
      • GreaterThan 및 GreaterThanEqual 비교에 대한 Trie 인덱스 범위 연산 문제 해결(#36126)
      • +
      • 트라이 인덱스 구성에서 marisa_label_order 사용 수정(#36060)
      • +
      • trie.predictive_search 값 검사 개선(#35999)
      • +
      • 반전된 인덱스에서 이진 산술 표현식 지원 활성화(#36097).
      • +
      • 스킵인덱스로 인한 세그먼트 오류 수정(#35908)
      • +
      • 프록시 메타 캐시에서 메모리 누수 해결(#36076)
      • +
      • 디렉터리 충돌을 방지하기 위해 mmap 파일 경로 이름 변경(#35975)
      • +
      • 믹스 압축에서 실패/시간 초과 작업에 대한 로깅 및 정리 개선(#35967).
      • +
      • 위임자가 메모리를 많이 사용하는 동안 논리 교착 상태 해결(#36066).
      • +
      • 압축으로 모든 삽입이 삭제될 때 빈 세그먼트 생성 구현(#36045)
      • +
      • 2.4의 이전 버전 로드 정보에서 로드 필드 목록 채우기 수정(#36018)
      • +
      • 2.4에서 추적 구성 업데이트 로직 수정(#35998)
      • +
      • 동적 파티션 릴리스 중 검색/쿼리 요청 실패 해결(#36019)
      • +
      • 폴백 매개변수의 재정의 방지(#36006)
      • +
      • 유효성 검사를 위한 적절한 권한 그룹 등록 보장(#35938)
      • +
      • DB 리미터 노드의 잘못된 정리 방지(#35992)
      • +
      • 장애 복구 후 복제본이 쿼리에 참여하지 않는 문제 해결(#35925)
      • +
      • 클러스터링 압축 쓰기에서 데이터 경합 해결(#35958)
      • +
      • 이동 작업 후 변수 참조 수정(#35904)
      • +
      • 클러스터링 키 스킵 로드 동작 검사 구현(#35899)
      • +
      • 2.4에서 쿼리코드 옵저버의 단일 시작 보장(#35817)

      개선 사항

        -
      • Milvus 및 프로토 버전을 2.4.11로 업그레이드(#36069).
      • -
      • 단위 테스트에서 메모리 누수 문제 해결 및 단위 테스트 빌드에 use_asan 옵션 활성화(#35857).
      • -
      • l0segmentsrowcount 제한을 보다 적절한 값으로 조정(#36015).
      • -
      • 델타로그 메모리 추정 계수를 1로 수정(#36035).
      • -
      • 로드 필드 목록 비교를 위한 슬라이스셋퀄 구현(#36062).
      • -
      • 삭제 작업의 로그 빈도 감소(#35981)
      • -
      • etcd 버전을 3.5.14로 업그레이드(#35977)
      • -
      • 워밍업 후 MMAP-RSS 감소 최적화(#35965)
      • -
      • 읽기 요청에 대한 속도 제한기에서 냉각 오프 기간 제거(#35936)
      • -
      • 이전에 로드된 컬렉션에 대한 로드 필드 검사 개선(#35910)
      • +
      • Milvus 및 프로토 버전을 2.4.11로 업그레이드(#36069).
      • +
      • 단위 테스트에서 메모리 누수 문제 해결 및 단위 테스트 빌드에 use_asan 옵션 활성화(#35857).
      • +
      • l0segmentsrowcount 제한을 보다 적절한 값으로 조정(#36015).
      • +
      • 델타로그 메모리 추정 계수를 1로 수정(#36035).
      • +
      • 로드 필드 목록 비교를 위한 슬라이스셋퀄 구현(#36062).
      • +
      • 삭제 작업의 로그 빈도 감소(#35981)
      • +
      • etcd 버전을 3.5.14로 업그레이드(#35977)
      • +
      • 워밍업 후 MMAP-RSS 감소 최적화(#35965)
      • +
      • 읽기 요청에 대한 속도 제한기에서 냉각 오프 기간 제거(#35936)
      • +
      • 이전에 로드된 컬렉션에 대한 로드 필드 검사 개선(#35910)
      • 2.4에서 권한 목록과 관련된 역할 삭제 지원 추가(#35863)
      • -
      • 더 이상 사용되지 않는 프로토 라이브러리 사용을 금지하는 depguard 규칙 구현(#35818).
      • +
      • 더 이상 사용되지 않는 프로토 라이브러리 사용을 금지하는 depguard 규칙 구현(#35818).

      기타

        -
      • Knowhere 버전 업데이트(#36067)
      • +
      • Knowhere 버전 업데이트(#36067)

      v2.4.10

      • 프록시에서 중복된 스키마 헬퍼 생성을 제거했습니다(#35502).
      • 우분투 20.04에서 Milvus 컴파일 지원 추가(#35457).
      • -
      • 잠금 사용을 최적화하고 이중 플러시 클러스터링 버퍼 쓰기(#35490)를 방지했습니다.
      • -
      • 유효하지 않은 로그 제거(#35473).
      • -
      • 클러스터링 압축 사용 가이드 문서(#35428)를 추가했습니다.
      • -
      • 스키마 도우미에서 동적 필드에 대한 지원을 추가했습니다(#35469).
      • +
      • 잠금 사용을 최적화하고 이중 플러시 클러스터링 버퍼 쓰기(#35490)를 방지했습니다.
      • +
      • 유효하지 않은 로그 제거(#35473).
      • +
      • 클러스터링 압축 사용 가이드 문서(#35428)를 추가했습니다.
      • +
      • 스키마 도우미에서 동적 필드에 대한 지원을 추가했습니다(#35469).
      • 생성된 YAML에 msgchannel 섹션을 추가했습니다(#35466).

      v2.4.8

    기능

      -
    • 대량 삽입에서 Float16/BFloat16 벡터 데이터 유형에 대한 지원 추가(#32157)
    • -
    • 무차별 반복자 검색 및 범위 검색을 지원하도록 스파스 플로트 벡터가 개선되었습니다(#32635).
    • +
    • 대량 삽입에서 Float16/BFloat16 벡터 데이터 유형에 대한 지원 추가(#32157)
    • +
    • 무차별 반복자 검색 및 범위 검색을 지원하도록 스파스 플로트 벡터가 개선되었습니다(#32635).

    개선 사항

    • 선언적 리소스 그룹 API 추가(#31930, #32297, #32536, #32666).
    • -
    • 쿼리코드에서 컬렉션 옵저버를 작업 중심으로 재작성(#32441).
    • -
    • 메모리 사용량을 줄이고 오류를 방지하기 위해 DataNode의 SyncManager에서 사용되는 데이터 구조를 리팩터링(#32673).
    • -
    • 가비지 컬렉션 구현을 수정하여 오브젝트 스토리지와 관련된 목록 연산을 최소화(#31740).
    • -
    • 수집 수가 많을 때 CPU 사용량 감소(#32245)
    • -
    • 코드를 통해 milvus.yaml 파일에 관련 구성 항목을 자동으로 생성하여 milvus.yaml 관리 개선(#31832, #32357).
    • -
    • 로컬 축소를 수행한 후 데이터를 검색하여 쿼리 성능을 향상시켰습니다(#32346).
    • -
    • etcd 클라이언트 생성에 WithBlock 옵션 추가(#32641)
    • -
    • 클라이언트가 제공한 경우 클라이언트가 지정한 client_request_id를 TraceID로 사용(#32264).
    • -
    • 삭제 및 대량 삽입 작업의 메트릭에 db 레이블 추가(#32611).
    • -
    • AutoID 및 PartitionKey 열에 대한 구성을 통해 확인을 건너뛰는 로직 추가(#32592).
    • -
    • 인증 관련 오류 개선(#32253)
    • -
    • DataCoord의 AllocSegmentID에 대한 오류 로그 개선(#32351, #32335)
    • -
    • 중복 메트릭 제거(#32380, #32308) 및 사용되지 않는 메트릭 정리(#32404, #32515).
    • -
    • partitionKey 기능의 활성화 여부를 제어하는 구성 옵션 추가(#32433).
    • -
    • 단일 요청에 삽입할 수 있는 최대 데이터 양을 제어하는 구성 옵션 추가(#32433).
    • -
    • 세그먼트 수준에서 적용/삭제 작업을 병렬화하여 위임자에 의한 삭제 메시지 처리 가속화(#32291).
    • -
    • 인덱스(#32232, #32505, #32533, #32595)를 사용하고 캐시(#32580)를 추가하여 QueryCoord에서 빈번한 필터링 작업을 가속화합니다.
    • -
    • 데이터 구조를 재작성(#32273)하고 코드를 리팩터링(#32389)하여 DataCoord에서 일반적인 작업을 가속화했습니다.
    • -
    • 코난에서 오픈블라스를 제거했습니다(#32002).
    • +
    • 쿼리코드에서 컬렉션 옵저버를 작업 중심으로 재작성(#32441).
    • +
    • 메모리 사용량을 줄이고 오류를 방지하기 위해 DataNode의 SyncManager에서 사용되는 데이터 구조를 리팩터링(#32673).
    • +
    • 가비지 컬렉션 구현을 수정하여 오브젝트 스토리지와 관련된 목록 연산을 최소화(#31740).
    • +
    • 수집 수가 많을 때 CPU 사용량 감소(#32245)
    • +
    • 코드를 통해 milvus.yaml 파일에 관련 구성 항목을 자동으로 생성하여 milvus.yaml 관리 개선(#31832, #32357).
    • +
    • 로컬 축소를 수행한 후 데이터를 검색하여 쿼리 성능을 향상시켰습니다(#32346).
    • +
    • etcd 클라이언트 생성에 WithBlock 옵션 추가(#32641)
    • +
    • 클라이언트가 제공한 경우 클라이언트가 지정한 client_request_id를 TraceID로 사용(#32264).
    • +
    • 삭제 및 대량 삽입 작업의 메트릭에 db 레이블 추가(#32611).
    • +
    • AutoID 및 PartitionKey 열에 대한 구성을 통해 확인을 건너뛰는 로직 추가(#32592).
    • +
    • 인증 관련 오류 개선(#32253)
    • +
    • DataCoord에서 AllocSegmentID에 대한 오류 로그 개선(#32351, #32335)
    • +
    • 중복 메트릭 제거(#32380, #32308) 및 사용되지 않는 메트릭 정리(#32404, #32515).
    • +
    • partitionKey 기능의 활성화 여부를 제어하는 구성 옵션 추가(#32433).
    • +
    • 단일 요청에 삽입할 수 있는 최대 데이터 양을 제어하는 구성 옵션 추가(#32433).
    • +
    • 세그먼트 수준에서 적용/삭제 작업을 병렬화하여 위임자에 의한 삭제 메시지 처리 가속화(#32291).
    • +
    • 인덱스(#32232, #32505, #32533, #32595)를 사용하고 캐시(#32580)를 추가하여 QueryCoord에서 빈번한 필터링 작업을 가속화합니다.
    • +
    • 데이터 구조를 재작성(#32273)하고 코드를 리팩터링(#32389)하여 DataCoord에서 일반적인 작업을 가속화했습니다.
    • +
    • 코난에서 오픈블라스를 제거했습니다(#32002).

    버그 수정

      -
    • rockylinux8에서 빌드 밀버스 수정(#32619)
    • -
    • ARM에서 SVE의 컴파일 오류 수정(#32463, #32270)
    • -
    • ARM 기반 GPU 이미지에서 충돌 문제 수정(#31980).
    • -
    • 정규식 쿼리가 개행이 있는 텍스트를 처리할 수 없는 문제 수정(#32569)
    • -
    • GetShardLeaders가 빈 노드 목록을 반환하여 검색 결과가 비어 있는 문제를 수정했습니다(#32685).
    • -
    • numpy 파일에서 동적 필드를 만나면 BulkInsert가 오류를 발생시키는 문제를 수정했습니다(#32596).
    • -
    • 요청의 숫자 매개변수가 문자열 유형 대신 숫자 입력을 허용하도록 하는 중요한 수정(#32485, #32355)을 포함하여 RESTFulV2 인터페이스와 관련된 버그가 수정되었습니다.
    • -
    • 전송률 제한기에서 구성 이벤트 감시를 제거하여 프록시에서 메모리 누수를 수정했습니다(#32313).
    • -
    • 파티션 이름이 지정되지 않은 경우 속도 제한기가 파티션을 찾을 수 없다고 잘못 보고하는 문제 수정(#32647)
    • +
    • rockylinux8에서 빌드 밀버스 수정(#32619)
    • +
    • ARM에서 SVE의 컴파일 오류 수정(#32463, #32270)
    • +
    • ARM 기반 GPU 이미지에서 충돌 문제 수정(#31980).
    • +
    • 정규식 쿼리가 개행이 있는 텍스트를 처리할 수 없는 문제 수정(#32569)
    • +
    • GetShardLeaders가 빈 노드 목록을 반환하여 검색 결과가 비어 있는 문제를 수정했습니다(#32685).
    • +
    • numpy 파일에서 동적 필드를 만나면 BulkInsert가 오류를 발생시키는 문제를 수정했습니다(#32596).
    • +
    • 요청의 숫자 매개변수가 문자열 유형 대신 숫자 입력을 허용하도록 하는 중요한 수정(#32485, #32355)을 포함하여 RESTFulV2 인터페이스와 관련된 버그가 수정되었습니다.
    • +
    • 전송률 제한기에서 구성 이벤트 감시를 제거하여 프록시에서 메모리 누수를 수정했습니다(#32313).
    • +
    • 파티션 이름이 지정되지 않은 경우 속도 제한기가 파티션을 찾을 수 없다고 잘못 보고하는 문제 수정(#32647)
    • 오류 유형에 컬렉션이 복구 상태인 경우와 로드되지 않은 경우 사이에 감지 기능을 추가했습니다.(#32447)
    • -
    • 음수 쿼리 가능 엔티티 수 메트릭 수정(#32361)
    • +
    • 음수 쿼리 가능 엔티티 수 메트릭 수정(#32361)

    v2.4.0

    GPU 색인 - CAGRA

    온라인에서 사용할 수 있는 최신(SoTA) GPU 기반 그래프 인덱스인 CAGRA에 귀중한 기여를 해주신 NVIDIA 팀에 진심으로 감사드립니다.

    이전 GPU 인덱스와 달리 CAGRA는 전통적으로 CPU 인덱스가 뛰어난 영역인 소규모 배치 쿼리에서도 압도적인 우월성을 보여줍니다. 또한, GPU 인덱스가 이미 강세를 보이고 있는 영역인 대규모 배치 쿼리 및 인덱스 구축 속도에서 CAGRA의 성능은 정말 타의 추종을 불허합니다.

    예제 코드는 example_gpu_cagra.py에서 확인할 수 있습니다.

    -

    스파스 벡터(베타)

    이번 릴리스에서는 스파스 벡터라는 새로운 유형의 벡터 필드를 도입합니다. 스파스 벡터는 밀도가 높은 벡터와 달리 차원 수가 몇 배 더 많으며 소수만이 0이 아닌 경향이 있습니다. 이 기능은 용어 기반의 특성으로 인해 더 나은 해석 가능성을 제공하며 특정 도메인에서 더 효과적일 수 있습니다. SPLADEv2/BGE-M3와 같은 학습된 희소 모델은 일반적인 1단계 랭킹 작업에 매우 유용하다는 것이 입증되었습니다. Milvus의 이 새로운 기능의 주요 사용 사례는 SPLADEv2/BGE-M3와 같은 신경 모델과 BM25 알고리즘과 같은 통계 모델에 의해 생성된 희소 벡터에 대해 효율적인 근사 의미론적 최인접 이웃 검색을 가능하게 하는 것입니다. 밀버스는 이제 희소 벡터의 효과적인 고성능 저장, 인덱싱, 검색(MIPS, 최대 내부 제품 검색)을 지원합니다.

    +

    스파스 벡터(베타)

    이번 릴리스에서는 스파스 벡터라는 새로운 유형의 벡터 필드를 도입합니다. 스파스 벡터는 밀도가 높은 벡터와 달리 차원 수가 몇 배 더 많으며 소수만 0이 아닌 경향이 있습니다. 이 기능은 용어 기반의 특성으로 인해 더 나은 해석 가능성을 제공하며 특정 도메인에서 더 효과적일 수 있습니다. SPLADEv2/BGE-M3와 같은 학습된 희소 모델은 일반적인 1단계 랭킹 작업에 매우 유용하다는 것이 입증되었습니다. Milvus의 이 새로운 기능의 주요 사용 사례는 SPLADEv2/BGE-M3와 같은 신경 모델과 BM25 알고리즘과 같은 통계 모델에 의해 생성된 희소 벡터에 대해 효율적인 근사 의미론적 최인접 이웃 검색을 가능하게 하는 것입니다. 밀버스는 이제 희소 벡터의 효과적인 고성능 저장, 인덱싱, 검색(MIPS, 최대 내부 제품 검색)을 지원합니다.

    예제 코드는 hello_sparse.py에서 확인할 수 있습니다.

    다중 벡터 지원은 다중 모델 데이터 처리 또는 고밀도 벡터와 희소 벡터의 혼합이 필요한 애플리케이션을 위한 초석입니다. 이제 멀티 벡터 지원으로 가능합니다:

      @@ -824,7 +981,7 @@ title: 릴리스 노트

      예제 코드는 example_group_by.py에서 찾을 수 있습니다.

      Float16 및 BFloat- 벡터 데이터 유형

      머신 러닝과 신경망은 종종 Float16 및 BFloat와 같은 반정밀도 데이터 유형을 사용합니다. 이러한 데이터 유형은 쿼리 효율성을 개선하고 메모리 사용량을 줄일 수 있지만 정확도가 떨어진다는 단점이 있습니다. 이번 릴리스에서 Milvus는 이제 벡터 필드에 대해 이러한 데이터 유형을 지원합니다.

      예제 코드는 float16_example.pybfloat16_example.py에서 확인할 수 있습니다.

      -

      업그레이드된 아키텍처

      L0 세그먼트

      이번 릴리스에는 삭제된 데이터를 기록하도록 설계된 L0 세그먼트라는 새로운 세그먼트가 포함되어 있습니다. 이 세그먼트는 저장된 삭제된 레코드를 주기적으로 압축하고 밀봉된 세그먼트로 분할하여 소규모 삭제에 필요한 데이터 플러시 횟수를 줄이고 저장 공간을 적게 차지합니다. 이 메커니즘을 통해 Milvus는 데이터 압축과 데이터 플러시를 완전히 분리하여 삭제 및 업서트 작업의 성능을 향상시킵니다.

      +

      업그레이드된 아키텍처

      L0 세그먼트

      이번 릴리스에는 삭제된 데이터를 기록하도록 설계된 L0 세그먼트라는 새로운 세그먼트가 포함되어 있습니다. 이 세그먼트는 저장된 삭제된 레코드를 주기적으로 압축하여 밀봉된 세그먼트로 분할함으로써 소규모 삭제에 필요한 데이터 플러시 횟수를 줄이고 저장 공간을 적게 차지합니다. 이 메커니즘을 통해 Milvus는 데이터 압축과 데이터 플러시를 완전히 분리하여 삭제 및 업서트 작업의 성능을 향상시킵니다.

      리팩터링된 대량 삽입

      이번 릴리스에는 향상된 대량 삽입 로직도 도입되었습니다. 이를 통해 한 번의 대량 삽입 요청으로 여러 파일을 가져올 수 있습니다. 리팩터링된 버전에서는 대량 삽입의 성능과 안정성이 모두 크게 개선되었습니다. 미세 조정된 속도 제한과 더욱 사용자 친화적인 오류 메시지 등 사용자 경험도 개선되었습니다. 또한 Milvus의 RESTful API를 통해 대량 삽입 엔드포인트에 쉽게 액세스할 수 있습니다.

      메모리 매핑 스토리지

      Milvus는 메모리 매핑 스토리지(MMap)를 사용하여 메모리 사용량을 최적화합니다. 이 메커니즘은 파일 콘텐츠를 메모리에 직접 로드하는 대신 파일 콘텐츠를 메모리에 매핑합니다. 이 접근 방식에는 성능 저하라는 트레이드오프가 있습니다. CPU 2개와 8GB RAM이 있는 호스트에서 HNSW 색인 컬렉션에 MMap을 사용하도록 설정하면 10% 미만의 성능 저하로 4배 더 많은 데이터를 로드할 수 있습니다.

      또한, 이번 릴리스에서는 Milvus를 다시 시작할 필요 없이 MMap을 동적이고 세밀하게 제어할 수 있습니다.

      diff --git a/localization/v2.4.x/site/ko/tutorials/build-rag-with-milvus.json b/localization/v2.4.x/site/ko/tutorials/build-rag-with-milvus.json index 66c453598..ad917b121 100644 --- a/localization/v2.4.x/site/ko/tutorials/build-rag-with-milvus.json +++ b/localization/v2.4.x/site/ko/tutorials/build-rag-with-milvus.json @@ -1 +1 @@ -{"codeList":["$ pip install --upgrade pymilvus openai requests tqdm\n","import os\n\nos.environ[\"OPENAI_API_KEY\"] = \"sk-***********\"\n","$ wget https://github.com/milvus-io/milvus-docs/releases/download/v2.4.6-preview/milvus_docs_2.4.x_en.zip\n$ unzip -q milvus_docs_2.4.x_en.zip -d milvus_docs\n","from glob import glob\n\ntext_lines = []\n\nfor file_path in glob(\"milvus_docs/en/faq/*.md\", recursive=True):\n with open(file_path, \"r\") as file:\n file_text = file.read()\n\n text_lines += file_text.split(\"# \")\n","from openai import OpenAI\n\nopenai_client = OpenAI()\n","def emb_text(text):\n return (\n openai_client.embeddings.create(input=text, model=\"text-embedding-3-small\")\n .data[0]\n .embedding\n )\n","test_embedding = emb_text(\"This is a test\")\nembedding_dim = len(test_embedding)\nprint(embedding_dim)\nprint(test_embedding[:10])\n","from pymilvus import MilvusClient\n\nmilvus_client = MilvusClient(uri=\"./milvus_demo.db\")\n\ncollection_name = \"my_rag_collection\"\n","if milvus_client.has_collection(collection_name):\n milvus_client.drop_collection(collection_name)\n","milvus_client.create_collection(\n collection_name=collection_name,\n dimension=embedding_dim,\n metric_type=\"IP\", # Inner product distance\n consistency_level=\"Strong\", # Strong consistency level\n)\n","from tqdm import tqdm\n\ndata = []\n\nfor i, line in enumerate(tqdm(text_lines, desc=\"Creating embeddings\")):\n data.append({\"id\": i, \"vector\": emb_text(line), \"text\": line})\n\nmilvus_client.insert(collection_name=collection_name, data=data)\n","question = \"How is data stored in milvus?\"\n","search_res = milvus_client.search(\n collection_name=collection_name,\n data=[\n emb_text(question)\n ], # Use the `emb_text` function to convert the question to an embedding vector\n limit=3, # Return top 3 results\n search_params={\"metric_type\": \"IP\", \"params\": {}}, # Inner product distance\n output_fields=[\"text\"], # Return the text field\n)\n","import json\n\nretrieved_lines_with_distances = [\n (res[\"entity\"][\"text\"], res[\"distance\"]) for res in search_res[0]\n]\nprint(json.dumps(retrieved_lines_with_distances, indent=4))\n","context = \"\\n\".join(\n [line_with_distance[0] for line_with_distance in retrieved_lines_with_distances]\n)\n","SYSTEM_PROMPT = \"\"\"\nHuman: You are an AI assistant. You are able to find answers to the questions from the contextual passage snippets provided.\n\"\"\"\nUSER_PROMPT = f\"\"\"\nUse the following pieces of information enclosed in tags to provide an answer to the question enclosed in tags.\n\n{context}\n\n\n{question}\n\n\"\"\"\n","response = openai_client.chat.completions.create(\n model=\"gpt-3.5-turbo\",\n messages=[\n {\"role\": \"system\", \"content\": SYSTEM_PROMPT},\n {\"role\": \"user\", \"content\": USER_PROMPT},\n ],\n)\nprint(response.choices[0].message.content)\n"],"headingContent":"","anchorList":[{"label":"Milvus로 RAG 구축하기","href":"Build-RAG-with-Milvus","type":1,"isActive":false},{"label":"준비","href":"Preparation","type":2,"isActive":false},{"label":"Milvus에 데이터 로드","href":"Load-data-into-Milvus","type":2,"isActive":false},{"label":"RAG 구축","href":"Build-RAG","type":2,"isActive":false},{"label":"빠른 배포","href":"Quick-Deploy","type":2,"isActive":false}]} \ No newline at end of file +{"codeList":["$ pip install --upgrade pymilvus openai requests tqdm\n","import os\n\nos.environ[\"OPENAI_API_KEY\"] = \"sk-***********\"\n","$ wget https://github.com/milvus-io/milvus-docs/releases/download/v2.4.6-preview/milvus_docs_2.4.x_en.zip\n$ unzip -q milvus_docs_2.4.x_en.zip -d milvus_docs\n","from glob import glob\n\ntext_lines = []\n\nfor file_path in glob(\"milvus_docs/en/faq/*.md\", recursive=True):\n with open(file_path, \"r\") as file:\n file_text = file.read()\n\n text_lines += file_text.split(\"# \")\n","from openai import OpenAI\n\nopenai_client = OpenAI()\n","def emb_text(text):\n return (\n openai_client.embeddings.create(input=text, model=\"text-embedding-3-small\")\n .data[0]\n .embedding\n )\n","test_embedding = emb_text(\"This is a test\")\nembedding_dim = len(test_embedding)\nprint(embedding_dim)\nprint(test_embedding[:10])\n","from pymilvus import MilvusClient\n\nmilvus_client = MilvusClient(uri=\"./milvus_demo.db\")\n\ncollection_name = \"my_rag_collection\"\n","if milvus_client.has_collection(collection_name):\n milvus_client.drop_collection(collection_name)\n","milvus_client.create_collection(\n collection_name=collection_name,\n dimension=embedding_dim,\n metric_type=\"IP\", # Inner product distance\n consistency_level=\"Strong\", # Strong consistency level\n)\n","from tqdm import tqdm\n\ndata = []\n\nfor i, line in enumerate(tqdm(text_lines, desc=\"Creating embeddings\")):\n data.append({\"id\": i, \"vector\": emb_text(line), \"text\": line})\n\nmilvus_client.insert(collection_name=collection_name, data=data)\n","question = \"How is data stored in milvus?\"\n","search_res = milvus_client.search(\n collection_name=collection_name,\n data=[\n emb_text(question)\n ], # Use the `emb_text` function to convert the question to an embedding vector\n limit=3, # Return top 3 results\n search_params={\"metric_type\": \"IP\", \"params\": {}}, # Inner product distance\n output_fields=[\"text\"], # Return the text field\n)\n","import json\n\nretrieved_lines_with_distances = [\n (res[\"entity\"][\"text\"], res[\"distance\"]) for res in search_res[0]\n]\nprint(json.dumps(retrieved_lines_with_distances, indent=4))\n","context = \"\\n\".join(\n [line_with_distance[0] for line_with_distance in retrieved_lines_with_distances]\n)\n","SYSTEM_PROMPT = \"\"\"\nHuman: You are an AI assistant. You are able to find answers to the questions from the contextual passage snippets provided.\n\"\"\"\nUSER_PROMPT = f\"\"\"\nUse the following pieces of information enclosed in tags to provide an answer to the question enclosed in tags.\n\n{context}\n\n\n{question}\n\n\"\"\"\n","response = openai_client.chat.completions.create(\n model=\"gpt-3.5-turbo\",\n messages=[\n {\"role\": \"system\", \"content\": SYSTEM_PROMPT},\n {\"role\": \"user\", \"content\": USER_PROMPT},\n ],\n)\nprint(response.choices[0].message.content)\n"],"headingContent":"Build RAG with Milvus","anchorList":[{"label":"Milvus로 RAG 구축하기","href":"Build-RAG-with-Milvus","type":1,"isActive":false},{"label":"준비","href":"Preparation","type":2,"isActive":false},{"label":"Milvus에 데이터 로드","href":"Load-data-into-Milvus","type":2,"isActive":false},{"label":"RAG 구축","href":"Build-RAG","type":2,"isActive":false},{"label":"빠른 배포","href":"Quick-Deploy","type":2,"isActive":false}]} \ No newline at end of file diff --git a/localization/v2.4.x/site/ko/tutorials/build-rag-with-milvus.md b/localization/v2.4.x/site/ko/tutorials/build-rag-with-milvus.md index d5c7ef943..057f992da 100644 --- a/localization/v2.4.x/site/ko/tutorials/build-rag-with-milvus.md +++ b/localization/v2.4.x/site/ko/tutorials/build-rag-with-milvus.md @@ -1,7 +1,7 @@ --- id: build-rag-with-milvus.md summary: 밀버스로 걸레 만들기 -title: Milvus로 RAG 구축 +title: Milvus로 RAG 구축하기 ---

      Milvus로 RAG 구축하기

      Open In Colab

      +

      Open In Colab +GitHub Repository

      이 튜토리얼에서는 Milvus를 사용하여 RAG(검색 증강 생성) 파이프라인을 구축하는 방법을 보여드리겠습니다.

      RAG 시스템은 검색 시스템과 생성 모델을 결합하여 주어진 프롬프트에 따라 새로운 텍스트를 생성합니다. 시스템은 먼저 Milvus를 사용하여 말뭉치에서 관련 문서를 검색한 다음, 생성 모델을 사용하여 검색된 문서를 기반으로 새 텍스트를 생성합니다.

      diff --git a/localization/v2.4.x/site/ko/tutorials/graph_rag_with_milvus.md b/localization/v2.4.x/site/ko/tutorials/graph_rag_with_milvus.md index 3e300ed2f..7b28545d9 100644 --- a/localization/v2.4.x/site/ko/tutorials/graph_rag_with_milvus.md +++ b/localization/v2.4.x/site/ko/tutorials/graph_rag_with_milvus.md @@ -18,7 +18,8 @@ title: Milvus를 사용한 그래프 RAG d="M4 9h1v1H4c-1.5 0-3-1.69-3-3.5S2.55 3 4 3h4c1.45 0 3 1.69 3 3.5 0 1.41-.91 2.72-2 3.25V8.59c.58-.45 1-1.27 1-2.09C10 5.22 8.98 4 8 4H4c-.98 0-2 1.22-2 2.5S3 9 4 9zm9-3h-1v1h1c1 0 2 1.22 2 2.5S13.98 12 13 12H9c-.98 0-2-1.22-2-2.5 0-.83.42-1.64 1-2.09V6.25c-1.09.53-2 1.84-2 3.25C6 11.31 7.55 13 9 13h4c1.45 0 3-1.69 3-3.5S14.5 6 13 6z" > -

      Open In Colab

      +

      Open In Colab +GitHub Repository

      대규모 언어 모델이 광범위하게 적용되면서 응답의 정확성과 관련성을 개선하는 것이 중요해졌습니다. 검색 증강 생성(RAG)은 외부 지식 기반을 통해 모델을 강화하여 더 많은 맥락 정보를 제공하고 환각이나 지식 부족과 같은 문제를 완화합니다. 그러나 단순한 RAG 패러다임에만 의존하는 것은 한계가 있으며, 특히 모델이 정확한 답변을 제공하기 어려운 복잡한 개체 관계와 멀티홉 질문을 다룰 때는 더욱 그렇습니다.

      지식 그래프(KG)를 RAG 시스템에 도입하면 새로운 해결책을 찾을 수 있습니다. KG는 엔티티와 그 관계를 구조화된 방식으로 제시하여 보다 정확한 검색 정보를 제공하고 RAG가 복잡한 질문 답변 작업을 더 잘 처리할 수 있도록 도와줍니다. KG-RAG는 아직 초기 단계에 있으며, KG에서 엔티티와 관계를 효과적으로 검색하는 방법이나 벡터 유사성 검색을 그래프 구조와 통합하는 방법에 대한 합의가 아직 이루어지지 않았습니다.

      이 노트북에서는 이 시나리오의 성능을 크게 향상시킬 수 있는 간단하면서도 강력한 접근 방식을 소개합니다. 이 방법은 다방향 검색 후 재순위를 매기는 단순한 RAG 패러다임이지만 그래프 RAG를 논리적으로 구현하고 멀티홉 질문을 처리하는 데 있어 최첨단 성능을 달성합니다. 어떻게 구현되는지 살펴보겠습니다.

      @@ -371,7 +372,7 @@ relation_candidate_texts = [ ]

    하위 그래프를 확장하여 후보 관계를 얻었으며, 다음 단계에서는 LLM으로 순위를 다시 매깁니다.

    -

    LLM 재랭크

    이 단계에서는 LLM의 강력한 자기 주의 메커니즘을 배포하여 후보 관계 집합을 더욱 필터링하고 구체화합니다. 쿼리와 후보 관계 집합을 프롬프트에 통합하는 원샷 프롬프트를 사용하여 쿼리에 대한 답변에 도움이 될 수 있는 잠재적 관계를 선택하도록 LLM에 지시합니다. 일부 쿼리가 복잡할 수 있다는 점을 감안하여 연쇄적 사고 접근 방식을 채택하여 LLM이 응답에서 사고 과정을 명확하게 표현할 수 있도록 합니다. 편리한 구문 분석을 위해 LLM의 응답을 json 형식으로 규정하고 있습니다.

    +

    LLM 재랭크

    이 단계에서는 LLM의 강력한 자기 주의 메커니즘을 배포하여 후보 관계 집합을 더욱 필터링하고 구체화합니다. 쿼리와 후보 관계 집합을 프롬프트에 통합하는 원샷 프롬프트를 사용하여 쿼리에 대한 답변에 도움이 될 수 있는 잠재적 관계를 선택하도록 LLM에 지시합니다. 일부 쿼리가 복잡할 수 있다는 점을 감안하여 연쇄적 사고 접근 방식을 채택하여 LLM이 응답에 대한 사고 과정을 명확하게 표현할 수 있도록 합니다. 편리한 구문 분석을 위해 LLM의 응답을 json 형식으로 규정하고 있습니다.

    query_prompt_one_shot_input = """I will provide you with a list of relationship descriptions. Your task is to select 3 relationships that may be useful to answer the given question. Please return a JSON object containing your thought process and a list of the selected relationships in order of their relevance.
     
     Question:
    diff --git a/localization/v2.4.x/site/ko/tutorials/hybrid_search_with_milvus.json b/localization/v2.4.x/site/ko/tutorials/hybrid_search_with_milvus.json
    index 6cc9ad7aa..0e4988b71 100644
    --- a/localization/v2.4.x/site/ko/tutorials/hybrid_search_with_milvus.json
    +++ b/localization/v2.4.x/site/ko/tutorials/hybrid_search_with_milvus.json
    @@ -1 +1 @@
    -{"codeList":["$ pip install --upgrade pymilvus \"pymilvus[model]\"\n","# Run this cell to download the dataset\n$ wget http://qim.fs.quoracdn.net/quora_duplicate_questions.tsv\n","import pandas as pd\n\nfile_path = \"quora_duplicate_questions.tsv\"\ndf = pd.read_csv(file_path, sep=\"\\t\")\nquestions = set()\nfor _, row in df.iterrows():\n    obj = row.to_dict()\n    questions.add(obj[\"question1\"][:512])\n    questions.add(obj[\"question2\"][:512])\n    if len(questions) > 500:  # Skip this if you want to use the full dataset\n        break\n\ndocs = list(questions)\n\n# example question\nprint(docs[0])\n","from milvus_model.hybrid import BGEM3EmbeddingFunction\n\nef = BGEM3EmbeddingFunction(use_fp16=False, device=\"cpu\")\ndense_dim = ef.dim[\"dense\"]\n\n# Generate embeddings using BGE-M3 model\ndocs_embeddings = ef(docs)\n","from pymilvus import (\n    connections,\n    utility,\n    FieldSchema,\n    CollectionSchema,\n    DataType,\n    Collection,\n)\n\n# Connect to Milvus given URI\nconnections.connect(uri=\"./milvus.db\")\n\n# Specify the data schema for the new Collection\nfields = [\n    # Use auto generated id as primary key\n    FieldSchema(\n        name=\"pk\", dtype=DataType.VARCHAR, is_primary=True, auto_id=True, max_length=100\n    ),\n    # Store the original text to retrieve based on semantically distance\n    FieldSchema(name=\"text\", dtype=DataType.VARCHAR, max_length=512),\n    # Milvus now supports both sparse and dense vectors,\n    # we can store each in a separate field to conduct hybrid search on both vectors\n    FieldSchema(name=\"sparse_vector\", dtype=DataType.SPARSE_FLOAT_VECTOR),\n    FieldSchema(name=\"dense_vector\", dtype=DataType.FLOAT_VECTOR, dim=dense_dim),\n]\nschema = CollectionSchema(fields)\n\n# Create collection (drop the old one if exists)\ncol_name = \"hybrid_demo\"\nif utility.has_collection(col_name):\n    Collection(col_name).drop()\ncol = Collection(col_name, schema, consistency_level=\"Strong\")\n\n# To make vector search efficient, we need to create indices for the vector fields\nsparse_index = {\"index_type\": \"SPARSE_INVERTED_INDEX\", \"metric_type\": \"IP\"}\ncol.create_index(\"sparse_vector\", sparse_index)\ndense_index = {\"index_type\": \"AUTOINDEX\", \"metric_type\": \"IP\"}\ncol.create_index(\"dense_vector\", dense_index)\ncol.load()\n","# For efficiency, we insert 50 records in each small batch\nfor i in range(0, len(docs), 50):\n    batched_entities = [\n        docs[i : i + 50],\n        docs_embeddings[\"sparse\"][i : i + 50],\n        docs_embeddings[\"dense\"][i : i + 50],\n    ]\n    col.insert(batched_entities)\nprint(\"Number of entities inserted:\", col.num_entities)\n","# Enter your search query\nquery = input(\"Enter your search query: \")\nprint(query)\n\n# Generate embeddings for the query\nquery_embeddings = ef([query])\n# print(query_embeddings)\n","from pymilvus import (\n    AnnSearchRequest,\n    WeightedRanker,\n)\n\n\ndef dense_search(col, query_dense_embedding, limit=10):\n    search_params = {\"metric_type\": \"IP\", \"params\": {}}\n    res = col.search(\n        [query_dense_embedding],\n        anns_field=\"dense_vector\",\n        limit=limit,\n        output_fields=[\"text\"],\n        param=search_params,\n    )[0]\n    return [hit.get(\"text\") for hit in res]\n\n\ndef sparse_search(col, query_sparse_embedding, limit=10):\n    search_params = {\n        \"metric_type\": \"IP\",\n        \"params\": {},\n    }\n    res = col.search(\n        [query_sparse_embedding],\n        anns_field=\"sparse_vector\",\n        limit=limit,\n        output_fields=[\"text\"],\n        param=search_params,\n    )[0]\n    return [hit.get(\"text\") for hit in res]\n\n\ndef hybrid_search(\n    col,\n    query_dense_embedding,\n    query_sparse_embedding,\n    sparse_weight=1.0,\n    dense_weight=1.0,\n    limit=10,\n):\n    dense_search_params = {\"metric_type\": \"IP\", \"params\": {}}\n    dense_req = AnnSearchRequest(\n        [query_dense_embedding], \"dense_vector\", dense_search_params, limit=limit\n    )\n    sparse_search_params = {\"metric_type\": \"IP\", \"params\": {}}\n    sparse_req = AnnSearchRequest(\n        [query_sparse_embedding], \"sparse_vector\", sparse_search_params, limit=limit\n    )\n    rerank = WeightedRanker(sparse_weight, dense_weight)\n    res = col.hybrid_search(\n        [sparse_req, dense_req], rerank=rerank, limit=limit, output_fields=[\"text\"]\n    )[0]\n    return [hit.get(\"text\") for hit in res]\n","dense_results = dense_search(col, query_embeddings[\"dense\"][0])\nsparse_results = sparse_search(col, query_embeddings[\"sparse\"][0])\nhybrid_results = hybrid_search(\n    col,\n    query_embeddings[\"dense\"][0],\n    query_embeddings[\"sparse\"][0],\n    sparse_weight=0.7,\n    dense_weight=1.0,\n)\n","def doc_text_formatting(ef, query, docs):\n    tokenizer = ef.model.tokenizer\n    query_tokens_ids = tokenizer.encode(query, return_offsets_mapping=True)\n    query_tokens = tokenizer.convert_ids_to_tokens(query_tokens_ids)\n    formatted_texts = []\n\n    for doc in docs:\n        ldx = 0\n        landmarks = []\n        encoding = tokenizer.encode_plus(doc, return_offsets_mapping=True)\n        tokens = tokenizer.convert_ids_to_tokens(encoding[\"input_ids\"])[1:-1]\n        offsets = encoding[\"offset_mapping\"][1:-1]\n        for token, (start, end) in zip(tokens, offsets):\n            if token in query_tokens:\n                if len(landmarks) != 0 and start == landmarks[-1]:\n                    landmarks[-1] = end\n                else:\n                    landmarks.append(start)\n                    landmarks.append(end)\n        close = False\n        formatted_text = \"\"\n        for i, c in enumerate(doc):\n            if ldx == len(landmarks):\n                pass\n            elif i == landmarks[ldx]:\n                if close:\n                    formatted_text += \"\"\n                else:\n                    formatted_text += \"\"\n                close = not close\n                ldx = ldx + 1\n            formatted_text += c\n        if close is True:\n            formatted_text += \"\"\n        formatted_texts.append(formatted_text)\n    return formatted_texts\n","from IPython.display import Markdown, display\n\n# Dense search results\ndisplay(Markdown(\"**Dense Search Results:**\"))\nformatted_results = doc_text_formatting(ef, query, dense_results)\nfor result in dense_results:\n    display(Markdown(result))\n\n# Sparse search results\ndisplay(Markdown(\"\\n**Sparse Search Results:**\"))\nformatted_results = doc_text_formatting(ef, query, sparse_results)\nfor result in formatted_results:\n    display(Markdown(result))\n\n# Hybrid search results\ndisplay(Markdown(\"\\n**Hybrid Search Results:**\"))\nformatted_results = doc_text_formatting(ef, query, hybrid_results)\nfor result in formatted_results:\n    display(Markdown(result))\n"],"headingContent":"","anchorList":[{"label":"Milvus를 사용한 하이브리드 검색","href":"Hybrid-Search-with-Milvus","type":1,"isActive":false}]}
    \ No newline at end of file
    +{"codeList":["$ pip install --upgrade pymilvus \"pymilvus[model]\"\n","# Run this cell to download the dataset\n$ wget http://qim.fs.quoracdn.net/quora_duplicate_questions.tsv\n","import pandas as pd\n\nfile_path = \"quora_duplicate_questions.tsv\"\ndf = pd.read_csv(file_path, sep=\"\\t\")\nquestions = set()\nfor _, row in df.iterrows():\n    obj = row.to_dict()\n    questions.add(obj[\"question1\"][:512])\n    questions.add(obj[\"question2\"][:512])\n    if len(questions) > 500:  # Skip this if you want to use the full dataset\n        break\n\ndocs = list(questions)\n\n# example question\nprint(docs[0])\n","from milvus_model.hybrid import BGEM3EmbeddingFunction\n\nef = BGEM3EmbeddingFunction(use_fp16=False, device=\"cpu\")\ndense_dim = ef.dim[\"dense\"]\n\n# Generate embeddings using BGE-M3 model\ndocs_embeddings = ef(docs)\n","from pymilvus import (\n    connections,\n    utility,\n    FieldSchema,\n    CollectionSchema,\n    DataType,\n    Collection,\n)\n\n# Connect to Milvus given URI\nconnections.connect(uri=\"./milvus.db\")\n\n# Specify the data schema for the new Collection\nfields = [\n    # Use auto generated id as primary key\n    FieldSchema(\n        name=\"pk\", dtype=DataType.VARCHAR, is_primary=True, auto_id=True, max_length=100\n    ),\n    # Store the original text to retrieve based on semantically distance\n    FieldSchema(name=\"text\", dtype=DataType.VARCHAR, max_length=512),\n    # Milvus now supports both sparse and dense vectors,\n    # we can store each in a separate field to conduct hybrid search on both vectors\n    FieldSchema(name=\"sparse_vector\", dtype=DataType.SPARSE_FLOAT_VECTOR),\n    FieldSchema(name=\"dense_vector\", dtype=DataType.FLOAT_VECTOR, dim=dense_dim),\n]\nschema = CollectionSchema(fields)\n\n# Create collection (drop the old one if exists)\ncol_name = \"hybrid_demo\"\nif utility.has_collection(col_name):\n    Collection(col_name).drop()\ncol = Collection(col_name, schema, consistency_level=\"Strong\")\n\n# To make vector search efficient, we need to create indices for the vector fields\nsparse_index = {\"index_type\": \"SPARSE_INVERTED_INDEX\", \"metric_type\": \"IP\"}\ncol.create_index(\"sparse_vector\", sparse_index)\ndense_index = {\"index_type\": \"AUTOINDEX\", \"metric_type\": \"IP\"}\ncol.create_index(\"dense_vector\", dense_index)\ncol.load()\n","# For efficiency, we insert 50 records in each small batch\nfor i in range(0, len(docs), 50):\n    batched_entities = [\n        docs[i : i + 50],\n        docs_embeddings[\"sparse\"][i : i + 50],\n        docs_embeddings[\"dense\"][i : i + 50],\n    ]\n    col.insert(batched_entities)\nprint(\"Number of entities inserted:\", col.num_entities)\n","# Enter your search query\nquery = input(\"Enter your search query: \")\nprint(query)\n\n# Generate embeddings for the query\nquery_embeddings = ef([query])\n# print(query_embeddings)\n","from pymilvus import (\n    AnnSearchRequest,\n    WeightedRanker,\n)\n\n\ndef dense_search(col, query_dense_embedding, limit=10):\n    search_params = {\"metric_type\": \"IP\", \"params\": {}}\n    res = col.search(\n        [query_dense_embedding],\n        anns_field=\"dense_vector\",\n        limit=limit,\n        output_fields=[\"text\"],\n        param=search_params,\n    )[0]\n    return [hit.get(\"text\") for hit in res]\n\n\ndef sparse_search(col, query_sparse_embedding, limit=10):\n    search_params = {\n        \"metric_type\": \"IP\",\n        \"params\": {},\n    }\n    res = col.search(\n        [query_sparse_embedding],\n        anns_field=\"sparse_vector\",\n        limit=limit,\n        output_fields=[\"text\"],\n        param=search_params,\n    )[0]\n    return [hit.get(\"text\") for hit in res]\n\n\ndef hybrid_search(\n    col,\n    query_dense_embedding,\n    query_sparse_embedding,\n    sparse_weight=1.0,\n    dense_weight=1.0,\n    limit=10,\n):\n    dense_search_params = {\"metric_type\": \"IP\", \"params\": {}}\n    dense_req = AnnSearchRequest(\n        [query_dense_embedding], \"dense_vector\", dense_search_params, limit=limit\n    )\n    sparse_search_params = {\"metric_type\": \"IP\", \"params\": {}}\n    sparse_req = AnnSearchRequest(\n        [query_sparse_embedding], \"sparse_vector\", sparse_search_params, limit=limit\n    )\n    rerank = WeightedRanker(sparse_weight, dense_weight)\n    res = col.hybrid_search(\n        [sparse_req, dense_req], rerank=rerank, limit=limit, output_fields=[\"text\"]\n    )[0]\n    return [hit.get(\"text\") for hit in res]\n","dense_results = dense_search(col, query_embeddings[\"dense\"][0])\nsparse_results = sparse_search(col, query_embeddings[\"sparse\"]._getrow(0))\nhybrid_results = hybrid_search(\n    col,\n    query_embeddings[\"dense\"][0],\n    query_embeddings[\"sparse\"]._getrow(0),\n    sparse_weight=0.7,\n    dense_weight=1.0,\n)\n","def doc_text_formatting(ef, query, docs):\n    tokenizer = ef.model.tokenizer\n    query_tokens_ids = tokenizer.encode(query, return_offsets_mapping=True)\n    query_tokens = tokenizer.convert_ids_to_tokens(query_tokens_ids)\n    formatted_texts = []\n\n    for doc in docs:\n        ldx = 0\n        landmarks = []\n        encoding = tokenizer.encode_plus(doc, return_offsets_mapping=True)\n        tokens = tokenizer.convert_ids_to_tokens(encoding[\"input_ids\"])[1:-1]\n        offsets = encoding[\"offset_mapping\"][1:-1]\n        for token, (start, end) in zip(tokens, offsets):\n            if token in query_tokens:\n                if len(landmarks) != 0 and start == landmarks[-1]:\n                    landmarks[-1] = end\n                else:\n                    landmarks.append(start)\n                    landmarks.append(end)\n        close = False\n        formatted_text = \"\"\n        for i, c in enumerate(doc):\n            if ldx == len(landmarks):\n                pass\n            elif i == landmarks[ldx]:\n                if close:\n                    formatted_text += \"\"\n                else:\n                    formatted_text += \"\"\n                close = not close\n                ldx = ldx + 1\n            formatted_text += c\n        if close is True:\n            formatted_text += \"\"\n        formatted_texts.append(formatted_text)\n    return formatted_texts\n","from IPython.display import Markdown, display\n\n# Dense search results\ndisplay(Markdown(\"**Dense Search Results:**\"))\nformatted_results = doc_text_formatting(ef, query, dense_results)\nfor result in dense_results:\n    display(Markdown(result))\n\n# Sparse search results\ndisplay(Markdown(\"\\n**Sparse Search Results:**\"))\nformatted_results = doc_text_formatting(ef, query, sparse_results)\nfor result in formatted_results:\n    display(Markdown(result))\n\n# Hybrid search results\ndisplay(Markdown(\"\\n**Hybrid Search Results:**\"))\nformatted_results = doc_text_formatting(ef, query, hybrid_results)\nfor result in formatted_results:\n    display(Markdown(result))\n"],"headingContent":"Hybrid Search with Milvus","anchorList":[{"label":"Milvus를 사용한 하이브리드 검색","href":"Hybrid-Search-with-Milvus","type":1,"isActive":false}]}
    \ No newline at end of file
    diff --git a/localization/v2.4.x/site/ko/tutorials/hybrid_search_with_milvus.md b/localization/v2.4.x/site/ko/tutorials/hybrid_search_with_milvus.md
    index aa3408630..750d7f1d1 100644
    --- a/localization/v2.4.x/site/ko/tutorials/hybrid_search_with_milvus.md
    +++ b/localization/v2.4.x/site/ko/tutorials/hybrid_search_with_milvus.md
    @@ -18,7 +18,8 @@ title: Milvus를 사용한 하이브리드 검색
               d="M4 9h1v1H4c-1.5 0-3-1.69-3-3.5S2.55 3 4 3h4c1.45 0 3 1.69 3 3.5 0 1.41-.91 2.72-2 3.25V8.59c.58-.45 1-1.27 1-2.09C10 5.22 8.98 4 8 4H4c-.98 0-2 1.22-2 2.5S3 9 4 9zm9-3h-1v1h1c1 0 2 1.22 2 2.5S13.98 12 13 12H9c-.98 0-2-1.22-2-2.5 0-.83.42-1.64 1-2.09V6.25c-1.09.53-2 1.84-2 3.25C6 11.31 7.55 13 9 13h4c1.45 0 3-1.69 3-3.5S14.5 6 13 6z"
             >
           
    -    

    Open In Colab

    +

    Open In Colab +GitHub Repository

    이 튜토리얼에서는 Milvus와 BGE-M3 모델을 사용하여 하이브리드 검색을 수행하는 방법을 보여드리겠습니다. BGE-M3 모델은 텍스트를 조밀하고 희박한 벡터로 변환할 수 있습니다. Milvus는 두 가지 유형의 벡터를 하나의 컬렉션에 저장하여 결과 연관성을 향상시키는 하이브리드 검색을 지원합니다.

    Milvus는 밀도, 스파스, 하이브리드 검색 방식을 지원합니다:

    @@ -201,11 +202,11 @@ def dense_search(col,

    정의된 함수를 사용하여 세 가지 다른 검색을 실행해 보겠습니다:

    dense_results = dense_search(col, query_embeddings["dense"][0])
    -sparse_results = sparse_search(col, query_embeddings["sparse"][0])
    +sparse_results = sparse_search(col, query_embeddings["sparse"]._getrow(0))
     hybrid_results = hybrid_search(
         col,
         query_embeddings["dense"][0],
    -    query_embeddings["sparse"][0],
    +    query_embeddings["sparse"]._getrow(0),
         sparse_weight=0.7,
         dense_weight=1.0,
     )
    @@ -283,30 +284,30 @@ formatted_results = doc_text_formatting(ef, query, hybrid_results)
     

    스파스 검색 결과:

    자바 프로그래밍이란 무엇인가요? 자바 프로그래밍 언어를 배우는 방법?

    로봇 공학을 배우는 가장 좋은 방법은 무엇인가요?

    -

    머신 러닝의 대안은 무엇인가요?

    +

    머신 러닝의 대안은 무엇인가요?

    C 프로그래밍을 사용하여 Linux에서 새 터미널과 새 셸을 만들려면어떻게해야합니까?

    C 프로그래밍 (Linux 터미널)을 사용하여 새 터미널에서 새 셸을 만들려면어떻게해야하나요?

    하이데라바드에서 어떤 사업을 시작하는 것이 더 낫습니까?

    하이데라바드에서 어떤 사업을 시작하는 것이 좋은가요?

    로봇 공학을 시작하는 가장 좋은 방법은 무엇인가요? 작업을 시작할 수있는 최고의 개발 보드는 무엇입니까?

    -

    초보자가 컴퓨터 프로그래밍 알고리즘을 이해하려면 어떤 수학이 필요하나요? 완전 초보자에게 적합한 알고리즘 관련 서적은 무엇인가요?

    -

    어떻게 하면 삶이 자신에게 적합하게 만들고 삶이 정신적, 정서적으로 학대하는 것을 막을 수 있습니까?

    +

    초보자가 컴퓨터 프로그래밍 알고리즘을 이해하려면 어떤 수학이 필요하나요? 완전 초보자에게 적합한 알고리즘 관련 서적은 무엇인가요?

    +

    어떻게 하면 삶이 자신에게 적합하게 만들고 삶이 정신적, 정서적으로 자신을 학대하는 것을 막을 수 있습니까?

    하이브리드 검색 결과:

    로봇 공학을 시작하는 가장 좋은 방법은 무엇인가요? 작업을 시작할 수있는 최고의 개발 보드는 무엇입니까?

    자바 프로그래밍이란 무엇인가요? 자바 프로그래밍 언어를 배우는 방법?

    로봇 공학을 배우는 가장 좋은 방법은 무엇인가요?

    UPSC는어떻게 준비하나요?

    -

    물리학을 쉽게 배우려면어떻게 해야 하나요?

    +

    물리학을 쉽게 배우려면어떻게 해야 하나요?

    프랑스어를 배우는 가장 좋은 방법은 무엇인가요?

    -

    영어를 유창하게 배우려면어떻게 해야 하나요?

    +

    영어를 유창하게 배우려면어떻게 해야 하나요?

    컴퓨터 보안은어떻게 배울 수 있나요?

    -

    정보 보안을 배우려면어떻게 시작해야 하나요?

    -

    자바 같은 컴퓨터 언어를 배우려면어떻게 해야 하나요?

    -

    머신 러닝의 대안은 무엇인가요?

    -

    C 프로그래밍을 사용하여 Linux에서 새 터미널과 새 셸을 만들려면어떻게 해야 하나요?

    +

    정보 보안을 배우려면어떻게 시작해야 하나요?

    +

    자바 같은 컴퓨터 언어를 배우려면어떻게 해야 하나요?

    +

    머신 러닝의 대안은 무엇인가요?

    +

    C 프로그래밍을 사용하여 Linux에서 새 터미널과 새 셸을 만들려면어떻게 해야 하나요?

    C 프로그래밍 (Linux 터미널)을 사용하여 새 터미널에서 새 셸을 만들려면어떻게해야하나요?

    하이데라바드에서 어떤 사업을 시작하는 것이 더 낫습니까?

    하이데라바드에서 어떤 사업을 시작하는 것이 좋은가요?

    -

    완전한 초보자가 컴퓨터 프로그래밍 알고리즘을 이해하려면 어떤 수학이 필요합니까? 완전한 초보자에게 적합한 알고리즘에 관한 책은 무엇입니까?

    -

    어떻게 삶을 자신에게 적합하게 만들고 삶이 정신적, 정서적으로 당신을 학대하는 것을 막을 수 있습니까?

    +

    완전한 초보자가 컴퓨터 프로그래밍 알고리즘을 이해하려면 어떤 수학이 필요합니까? 완전한 초보자에게 적합한 알고리즘에 관한 책은 무엇입니까?

    +

    어떻게 삶을 자신에게 적합하게 만들고 삶이 정신적, 정서적으로 당신을 학대하는 것을 막을 수 있습니까?

    빠른 배포

    이 튜토리얼을 통해 온라인 데모를 시작하는 방법에 대해 알아보려면 예제 애플리케이션을 참조하세요.

    diff --git a/localization/v2.4.x/site/ko/tutorials/image_similarity_search.json b/localization/v2.4.x/site/ko/tutorials/image_similarity_search.json index ee14c8d82..beb0722f3 100644 --- a/localization/v2.4.x/site/ko/tutorials/image_similarity_search.json +++ b/localization/v2.4.x/site/ko/tutorials/image_similarity_search.json @@ -1 +1 @@ -{"codeList":["!wget https://github.com/milvus-io/pymilvus-assets/releases/download/imagedata/reverse_image_search.zip\n!unzip -q -o reverse_image_search.zip\n","$ pip install pymilvus --upgrade\n$ pip install timm\n","import torch\nfrom PIL import Image\nimport timm\nfrom sklearn.preprocessing import normalize\nfrom timm.data import resolve_data_config\nfrom timm.data.transforms_factory import create_transform\n\n\nclass FeatureExtractor:\n def __init__(self, modelname):\n # Load the pre-trained model\n self.model = timm.create_model(\n modelname, pretrained=True, num_classes=0, global_pool=\"avg\"\n )\n self.model.eval()\n\n # Get the input size required by the model\n self.input_size = self.model.default_cfg[\"input_size\"]\n\n config = resolve_data_config({}, model=modelname)\n # Get the preprocessing function provided by TIMM for the model\n self.preprocess = create_transform(**config)\n\n def __call__(self, imagepath):\n # Preprocess the input image\n input_image = Image.open(imagepath).convert(\"RGB\") # Convert to RGB if needed\n input_image = self.preprocess(input_image)\n\n # Convert the image to a PyTorch tensor and add a batch dimension\n input_tensor = input_image.unsqueeze(0)\n\n # Perform inference\n with torch.no_grad():\n output = self.model(input_tensor)\n\n # Extract the feature vector\n feature_vector = output.squeeze().numpy()\n\n return normalize(feature_vector.reshape(1, -1), norm=\"l2\").flatten()\n","from pymilvus import MilvusClient\n\n# Set up a Milvus client\nclient = MilvusClient(uri=\"example.db\")\n# Create a collection in quick setup mode\nif client.has_collection(collection_name=\"image_embeddings\"):\n client.drop_collection(collection_name=\"image_embeddings\")\nclient.create_collection(\n collection_name=\"image_embeddings\",\n vector_field_name=\"vector\",\n dimension=512,\n auto_id=True,\n enable_dynamic_field=True,\n metric_type=\"COSINE\",\n)\n","import os\n\nextractor = FeatureExtractor(\"resnet34\")\n\nroot = \"./train\"\ninsert = True\nif insert is True:\n for dirpath, foldername, filenames in os.walk(root):\n for filename in filenames:\n if filename.endswith(\".JPEG\"):\n filepath = dirpath + \"/\" + filename\n image_embedding = extractor(filepath)\n client.insert(\n \"image_embeddings\",\n {\"vector\": image_embedding, \"filename\": filepath},\n )\n","from IPython.display import display\n\nquery_image = \"./test/Afghan_hound/n02088094_4261.JPEG\"\n\nresults = client.search(\n \"image_embeddings\",\n data=[extractor(query_image)],\n output_fields=[\"filename\"],\n search_params={\"metric_type\": \"COSINE\"},\n)\nimages = []\nfor result in results:\n for hit in result[:10]:\n filename = hit[\"entity\"][\"filename\"]\n img = Image.open(filename)\n img = img.resize((150, 150))\n images.append(img)\n\nwidth = 150 * 5\nheight = 150 * 2\nconcatenated_image = Image.new(\"RGB\", (width, height))\n\nfor idx, img in enumerate(images):\n x = idx % 5\n y = idx // 5\n concatenated_image.paste(img, (x * 150, y * 150))\ndisplay(\"query\")\ndisplay(Image.open(query_image).resize((150, 150)))\ndisplay(\"results\")\ndisplay(concatenated_image)\n"],"headingContent":"","anchorList":[{"label":"Milvus로 이미지 검색하기","href":"Image-Search-with-Milvus","type":1,"isActive":false},{"label":"데이터 세트 준비","href":"Dataset-Preparation","type":2,"isActive":false},{"label":"전제 조건","href":"Prequisites","type":2,"isActive":false},{"label":"특징 추출기 정의하기","href":"Define-the-Feature-Extractor","type":2,"isActive":false},{"label":"Milvus 컬렉션 만들기","href":"Create-a-Milvus-Collection","type":2,"isActive":false},{"label":"밀버스에 임베딩 삽입하기","href":"Insert-the-Embeddings-to-Milvus","type":2,"isActive":false},{"label":"빠른 배포","href":"Quick-Deploy","type":2,"isActive":false}]} \ No newline at end of file +{"codeList":["!wget https://github.com/milvus-io/pymilvus-assets/releases/download/imagedata/reverse_image_search.zip\n!unzip -q -o reverse_image_search.zip\n","$ pip install pymilvus --upgrade\n$ pip install timm\n","import torch\nfrom PIL import Image\nimport timm\nfrom sklearn.preprocessing import normalize\nfrom timm.data import resolve_data_config\nfrom timm.data.transforms_factory import create_transform\n\n\nclass FeatureExtractor:\n def __init__(self, modelname):\n # Load the pre-trained model\n self.model = timm.create_model(\n modelname, pretrained=True, num_classes=0, global_pool=\"avg\"\n )\n self.model.eval()\n\n # Get the input size required by the model\n self.input_size = self.model.default_cfg[\"input_size\"]\n\n config = resolve_data_config({}, model=modelname)\n # Get the preprocessing function provided by TIMM for the model\n self.preprocess = create_transform(**config)\n\n def __call__(self, imagepath):\n # Preprocess the input image\n input_image = Image.open(imagepath).convert(\"RGB\") # Convert to RGB if needed\n input_image = self.preprocess(input_image)\n\n # Convert the image to a PyTorch tensor and add a batch dimension\n input_tensor = input_image.unsqueeze(0)\n\n # Perform inference\n with torch.no_grad():\n output = self.model(input_tensor)\n\n # Extract the feature vector\n feature_vector = output.squeeze().numpy()\n\n return normalize(feature_vector.reshape(1, -1), norm=\"l2\").flatten()\n","from pymilvus import MilvusClient\n\n# Set up a Milvus client\nclient = MilvusClient(uri=\"example.db\")\n# Create a collection in quick setup mode\nif client.has_collection(collection_name=\"image_embeddings\"):\n client.drop_collection(collection_name=\"image_embeddings\")\nclient.create_collection(\n collection_name=\"image_embeddings\",\n vector_field_name=\"vector\",\n dimension=512,\n auto_id=True,\n enable_dynamic_field=True,\n metric_type=\"COSINE\",\n)\n","import os\n\nextractor = FeatureExtractor(\"resnet34\")\n\nroot = \"./train\"\ninsert = True\nif insert is True:\n for dirpath, foldername, filenames in os.walk(root):\n for filename in filenames:\n if filename.endswith(\".JPEG\"):\n filepath = dirpath + \"/\" + filename\n image_embedding = extractor(filepath)\n client.insert(\n \"image_embeddings\",\n {\"vector\": image_embedding, \"filename\": filepath},\n )\n","from IPython.display import display\n\nquery_image = \"./test/Afghan_hound/n02088094_4261.JPEG\"\n\nresults = client.search(\n \"image_embeddings\",\n data=[extractor(query_image)],\n output_fields=[\"filename\"],\n search_params={\"metric_type\": \"COSINE\"},\n)\nimages = []\nfor result in results:\n for hit in result[:10]:\n filename = hit[\"entity\"][\"filename\"]\n img = Image.open(filename)\n img = img.resize((150, 150))\n images.append(img)\n\nwidth = 150 * 5\nheight = 150 * 2\nconcatenated_image = Image.new(\"RGB\", (width, height))\n\nfor idx, img in enumerate(images):\n x = idx % 5\n y = idx // 5\n concatenated_image.paste(img, (x * 150, y * 150))\ndisplay(\"query\")\ndisplay(Image.open(query_image).resize((150, 150)))\ndisplay(\"results\")\ndisplay(concatenated_image)\n"],"headingContent":"Image Search with Milvus","anchorList":[{"label":"Milvus로 이미지 검색하기","href":"Image-Search-with-Milvus","type":1,"isActive":false},{"label":"데이터 세트 준비","href":"Dataset-Preparation","type":2,"isActive":false},{"label":"전제 조건","href":"Prequisites","type":2,"isActive":false},{"label":"특징 추출기 정의하기","href":"Define-the-Feature-Extractor","type":2,"isActive":false},{"label":"Milvus 컬렉션 만들기","href":"Create-a-Milvus-Collection","type":2,"isActive":false},{"label":"밀버스에 임베딩 삽입하기","href":"Insert-the-Embeddings-to-Milvus","type":2,"isActive":false},{"label":"빠른 배포","href":"Quick-Deploy","type":2,"isActive":false}]} \ No newline at end of file diff --git a/localization/v2.4.x/site/ko/tutorials/image_similarity_search.md b/localization/v2.4.x/site/ko/tutorials/image_similarity_search.md index c0e4f3204..0756e6fab 100644 --- a/localization/v2.4.x/site/ko/tutorials/image_similarity_search.md +++ b/localization/v2.4.x/site/ko/tutorials/image_similarity_search.md @@ -1,7 +1,7 @@ --- id: image_similarity_search.md summary: Milvus로 이미지 검색 -title: Milvus로 이미지 검색 +title: Milvus로 이미지 검색하기 ---

    Milvus로 이미지 검색하기

    Open In Colab

    +

    Open In Colab +GitHub Repository

    이 노트북에서는 Milvus를 사용해 데이터 세트에서 유사한 이미지를 검색하는 방법을 보여드리겠습니다. 이를 보여드리기 위해 ImageNet 데이터 집합의 하위 집합을 사용한 다음 아프간 사냥개 이미지를 검색해 보겠습니다.

    데이터 세트 준비

    그런 다음 이미지 임베딩을 저장할 Milvus 컬렉션을 생성해야 합니다.

    +

    그런 다음 이미지 임베딩을 저장할 Milvus 컬렉션을 만들어야 합니다.

    from pymilvus import MilvusClient
     
     # Set up a Milvus client
    diff --git a/localization/v2.4.x/site/ko/tutorials/multimodal_rag_with_milvus.json b/localization/v2.4.x/site/ko/tutorials/multimodal_rag_with_milvus.json
    index aa8c36e12..e98759bdb 100644
    --- a/localization/v2.4.x/site/ko/tutorials/multimodal_rag_with_milvus.json
    +++ b/localization/v2.4.x/site/ko/tutorials/multimodal_rag_with_milvus.json
    @@ -1 +1 @@
    -{"codeList":["$ pip install --upgrade pymilvus openai datasets opencv-python timm einops ftfy peft tqdm\n","$ git clone https://github.com/FlagOpen/FlagEmbedding.git\n$ pip install -e FlagEmbedding\n","$ wget https://github.com/milvus-io/bootcamp/releases/download/data/amazon_reviews_2023_subset.tar.gz\n$ tar -xzf amazon_reviews_2023_subset.tar.gz\n","$ wget https://huggingface.co/BAAI/bge-visualized/resolve/main/Visualized_base_en_v1.5.pth\n","import torch\nfrom FlagEmbedding.visual.modeling import Visualized_BGE\n\n\nclass Encoder:\n    def __init__(self, model_name: str, model_path: str):\n        self.model = Visualized_BGE(model_name_bge=model_name, model_weight=model_path)\n        self.model.eval()\n\n    def encode_query(self, image_path: str, text: str) -> list[float]:\n        with torch.no_grad():\n            query_emb = self.model.encode(image=image_path, text=text)\n        return query_emb.tolist()[0]\n\n    def encode_image(self, image_path: str) -> list[float]:\n        with torch.no_grad():\n            query_emb = self.model.encode(image=image_path)\n        return query_emb.tolist()[0]\n\n\nmodel_name = \"BAAI/bge-base-en-v1.5\"\nmodel_path = \"./Visualized_base_en_v1.5.pth\"  # Change to your own value if using a different model path\nencoder = Encoder(model_name, model_path)\n","import os\nfrom tqdm import tqdm\nfrom glob import glob\n\n\n# Generate embeddings for the image dataset\ndata_dir = (\n    \"./images_folder\"  # Change to your own value if using a different data directory\n)\nimage_list = glob(\n    os.path.join(data_dir, \"images\", \"*.jpg\")\n)  # We will only use images ending with \".jpg\"\nimage_dict = {}\nfor image_path in tqdm(image_list, desc=\"Generating image embeddings: \"):\n    try:\n        image_dict[image_path] = encoder.encode_image(image_path)\n    except Exception as e:\n        print(f\"Failed to generate embedding for {image_path}. Skipped.\")\n        continue\nprint(\"Number of encoded images:\", len(image_dict))\n","from pymilvus import MilvusClient\n\n\ndim = len(list(image_dict.values())[0])\ncollection_name = \"multimodal_rag_demo\"\n\n# Connect to Milvus client given URI\nmilvus_client = MilvusClient(uri=\"./milvus_demo.db\")\n\n# Create Milvus Collection\n# By default, vector field name is \"vector\"\nmilvus_client.create_collection(\n    collection_name=collection_name,\n    auto_id=True,\n    dimension=dim,\n    enable_dynamic_field=True,\n)\n\n# Insert data into collection\nmilvus_client.insert(\n    collection_name=collection_name,\n    data=[{\"image_path\": k, \"vector\": v} for k, v in image_dict.items()],\n)\n","query_image = os.path.join(\n    data_dir, \"leopard.jpg\"\n)  # Change to your own query image path\nquery_text = \"phone case with this image theme\"\n\n# Generate query embedding given image and text instructions\nquery_vec = encoder.encode_query(image_path=query_image, text=query_text)\n\nsearch_results = milvus_client.search(\n    collection_name=collection_name,\n    data=[query_vec],\n    output_fields=[\"image_path\"],\n    limit=9,  # Max number of search results to return\n    search_params={\"metric_type\": \"COSINE\", \"params\": {}},  # Search parameters\n)[0]\n\nretrieved_images = [hit.get(\"entity\").get(\"image_path\") for hit in search_results]\nprint(retrieved_images)\n","import numpy as np\nimport cv2\n\nimg_height = 300\nimg_width = 300\nrow_count = 3\n\n\ndef create_panoramic_view(query_image_path: str, retrieved_images: list) -> np.ndarray:\n    \"\"\"\n    creates a 5x5 panoramic view image from a list of images\n\n    args:\n        images: list of images to be combined\n\n    returns:\n        np.ndarray: the panoramic view image\n    \"\"\"\n    panoramic_width = img_width * row_count\n    panoramic_height = img_height * row_count\n    panoramic_image = np.full(\n        (panoramic_height, panoramic_width, 3), 255, dtype=np.uint8\n    )\n\n    # create and resize the query image with a blue border\n    query_image_null = np.full((panoramic_height, img_width, 3), 255, dtype=np.uint8)\n    query_image = Image.open(query_image_path).convert(\"RGB\")\n    query_array = np.array(query_image)[:, :, ::-1]\n    resized_image = cv2.resize(query_array, (img_width, img_height))\n\n    border_size = 10\n    blue = (255, 0, 0)  # blue color in BGR\n    bordered_query_image = cv2.copyMakeBorder(\n        resized_image,\n        border_size,\n        border_size,\n        border_size,\n        border_size,\n        cv2.BORDER_CONSTANT,\n        value=blue,\n    )\n\n    query_image_null[img_height * 2 : img_height * 3, 0:img_width] = cv2.resize(\n        bordered_query_image, (img_width, img_height)\n    )\n\n    # add text \"query\" below the query image\n    text = \"query\"\n    font_scale = 1\n    font_thickness = 2\n    text_org = (10, img_height * 3 + 30)\n    cv2.putText(\n        query_image_null,\n        text,\n        text_org,\n        cv2.FONT_HERSHEY_SIMPLEX,\n        font_scale,\n        blue,\n        font_thickness,\n        cv2.LINE_AA,\n    )\n\n    # combine the rest of the images into the panoramic view\n    retrieved_imgs = [\n        np.array(Image.open(img).convert(\"RGB\"))[:, :, ::-1] for img in retrieved_images\n    ]\n    for i, image in enumerate(retrieved_imgs):\n        image = cv2.resize(image, (img_width - 4, img_height - 4))\n        row = i // row_count\n        col = i % row_count\n        start_row = row * img_height\n        start_col = col * img_width\n\n        border_size = 2\n        bordered_image = cv2.copyMakeBorder(\n            image,\n            border_size,\n            border_size,\n            border_size,\n            border_size,\n            cv2.BORDER_CONSTANT,\n            value=(0, 0, 0),\n        )\n        panoramic_image[\n            start_row : start_row + img_height, start_col : start_col + img_width\n        ] = bordered_image\n\n        # add red index numbers to each image\n        text = str(i)\n        org = (start_col + 50, start_row + 30)\n        (font_width, font_height), baseline = cv2.getTextSize(\n            text, cv2.FONT_HERSHEY_SIMPLEX, 1, 2\n        )\n\n        top_left = (org[0] - 48, start_row + 2)\n        bottom_right = (org[0] - 48 + font_width + 5, org[1] + baseline + 5)\n\n        cv2.rectangle(\n            panoramic_image, top_left, bottom_right, (255, 255, 255), cv2.FILLED\n        )\n        cv2.putText(\n            panoramic_image,\n            text,\n            (start_col + 10, start_row + 30),\n            cv2.FONT_HERSHEY_SIMPLEX,\n            1,\n            (0, 0, 255),\n            2,\n            cv2.LINE_AA,\n        )\n\n    # combine the query image with the panoramic view\n    panoramic_image = np.hstack([query_image_null, panoramic_image])\n    return panoramic_image\n","from PIL import Image\n\ncombined_image_path = os.path.join(data_dir, \"combined_image.jpg\")\npanoramic_image = create_panoramic_view(query_image, retrieved_images)\ncv2.imwrite(combined_image_path, panoramic_image)\n\ncombined_image = Image.open(combined_image_path)\nshow_combined_image = combined_image.resize((300, 300))\nshow_combined_image.show()\n","import requests\nimport base64\n\nopenai_api_key = \"sk-***\"  # Change to your OpenAI API Key\n\n\ndef generate_ranking_explanation(\n    combined_image_path: str, caption: str, infos: dict = None\n) -> tuple[list[int], str]:\n    with open(combined_image_path, \"rb\") as image_file:\n        base64_image = base64.b64encode(image_file.read()).decode(\"utf-8\")\n\n    information = (\n        \"You are responsible for ranking results for a Composed Image Retrieval. \"\n        \"The user retrieves an image with an 'instruction' indicating their retrieval intent. \"\n        \"For example, if the user queries a red car with the instruction 'change this car to blue,' a similar type of car in blue would be ranked higher in the results. \"\n        \"Now you would receive instruction and query image with blue border. Every item has its red index number in its top left. Do not misunderstand it. \"\n        f\"User instruction: {caption} \\n\\n\"\n    )\n\n    # add additional information for each image\n    if infos:\n        for i, info in enumerate(infos[\"product\"]):\n            information += f\"{i}. {info}\\n\"\n\n    information += (\n        \"Provide a new ranked list of indices from most suitable to least suitable, followed by an explanation for the top 1 most suitable item only. \"\n        \"The format of the response has to be 'Ranked list: []' with the indices in brackets as integers, followed by 'Reasons:' plus the explanation why this most fit user's query intent.\"\n    )\n\n    headers = {\n        \"Content-Type\": \"application/json\",\n        \"Authorization\": f\"Bearer {openai_api_key}\",\n    }\n\n    payload = {\n        \"model\": \"gpt-4o\",\n        \"messages\": [\n            {\n                \"role\": \"user\",\n                \"content\": [\n                    {\"type\": \"text\", \"text\": information},\n                    {\n                        \"type\": \"image_url\",\n                        \"image_url\": {\"url\": f\"data:image/jpeg;base64,{base64_image}\"},\n                    },\n                ],\n            }\n        ],\n        \"max_tokens\": 300,\n    }\n\n    response = requests.post(\n        \"https://api.openai.com/v1/chat/completions\", headers=headers, json=payload\n    )\n    result = response.json()[\"choices\"][0][\"message\"][\"content\"]\n\n    # parse the ranked indices from the response\n    start_idx = result.find(\"[\")\n    end_idx = result.find(\"]\")\n    ranked_indices_str = result[start_idx + 1 : end_idx].split(\",\")\n    ranked_indices = [int(index.strip()) for index in ranked_indices_str]\n\n    # extract explanation\n    explanation = result[end_idx + 1 :].strip()\n\n    return ranked_indices, explanation\n","ranked_indices, explanation = generate_ranking_explanation(\n    combined_image_path, query_text\n)\n","print(explanation)\n\nbest_index = ranked_indices[0]\nbest_img = Image.open(retrieved_images[best_index])\nbest_img = best_img.resize((150, 150))\nbest_img.show()\n"],"headingContent":"","anchorList":[{"label":"Milvus를 사용한 멀티모달 RAG","href":"Multimodal-RAG-with-Milvus","type":1,"isActive":false},{"label":"준비","href":"Preparation","type":2,"isActive":false},{"label":"데이터 로드","href":"Load-Data","type":2,"isActive":false},{"label":"제너레이티브 리랭커로 멀티모달 검색하기","href":"Multimodal-Search-with-Generative-Reranker","type":2,"isActive":false}]}
    \ No newline at end of file
    +{"codeList":["$ pip install --upgrade pymilvus openai datasets opencv-python timm einops ftfy peft tqdm\n","$ git clone https://github.com/FlagOpen/FlagEmbedding.git\n$ pip install -e FlagEmbedding\n","$ wget https://github.com/milvus-io/bootcamp/releases/download/data/amazon_reviews_2023_subset.tar.gz\n$ tar -xzf amazon_reviews_2023_subset.tar.gz\n","$ wget https://huggingface.co/BAAI/bge-visualized/resolve/main/Visualized_base_en_v1.5.pth\n","import torch\nfrom FlagEmbedding.visual.modeling import Visualized_BGE\n\n\nclass Encoder:\n    def __init__(self, model_name: str, model_path: str):\n        self.model = Visualized_BGE(model_name_bge=model_name, model_weight=model_path)\n        self.model.eval()\n\n    def encode_query(self, image_path: str, text: str) -> list[float]:\n        with torch.no_grad():\n            query_emb = self.model.encode(image=image_path, text=text)\n        return query_emb.tolist()[0]\n\n    def encode_image(self, image_path: str) -> list[float]:\n        with torch.no_grad():\n            query_emb = self.model.encode(image=image_path)\n        return query_emb.tolist()[0]\n\n\nmodel_name = \"BAAI/bge-base-en-v1.5\"\nmodel_path = \"./Visualized_base_en_v1.5.pth\"  # Change to your own value if using a different model path\nencoder = Encoder(model_name, model_path)\n","import os\nfrom tqdm import tqdm\nfrom glob import glob\n\n\n# Generate embeddings for the image dataset\ndata_dir = (\n    \"./images_folder\"  # Change to your own value if using a different data directory\n)\nimage_list = glob(\n    os.path.join(data_dir, \"images\", \"*.jpg\")\n)  # We will only use images ending with \".jpg\"\nimage_dict = {}\nfor image_path in tqdm(image_list, desc=\"Generating image embeddings: \"):\n    try:\n        image_dict[image_path] = encoder.encode_image(image_path)\n    except Exception as e:\n        print(f\"Failed to generate embedding for {image_path}. Skipped.\")\n        continue\nprint(\"Number of encoded images:\", len(image_dict))\n","from pymilvus import MilvusClient\n\n\ndim = len(list(image_dict.values())[0])\ncollection_name = \"multimodal_rag_demo\"\n\n# Connect to Milvus client given URI\nmilvus_client = MilvusClient(uri=\"./milvus_demo.db\")\n\n# Create Milvus Collection\n# By default, vector field name is \"vector\"\nmilvus_client.create_collection(\n    collection_name=collection_name,\n    auto_id=True,\n    dimension=dim,\n    enable_dynamic_field=True,\n)\n\n# Insert data into collection\nmilvus_client.insert(\n    collection_name=collection_name,\n    data=[{\"image_path\": k, \"vector\": v} for k, v in image_dict.items()],\n)\n","query_image = os.path.join(\n    data_dir, \"leopard.jpg\"\n)  # Change to your own query image path\nquery_text = \"phone case with this image theme\"\n\n# Generate query embedding given image and text instructions\nquery_vec = encoder.encode_query(image_path=query_image, text=query_text)\n\nsearch_results = milvus_client.search(\n    collection_name=collection_name,\n    data=[query_vec],\n    output_fields=[\"image_path\"],\n    limit=9,  # Max number of search results to return\n    search_params={\"metric_type\": \"COSINE\", \"params\": {}},  # Search parameters\n)[0]\n\nretrieved_images = [hit.get(\"entity\").get(\"image_path\") for hit in search_results]\nprint(retrieved_images)\n","import numpy as np\nimport cv2\n\nimg_height = 300\nimg_width = 300\nrow_count = 3\n\n\ndef create_panoramic_view(query_image_path: str, retrieved_images: list) -> np.ndarray:\n    \"\"\"\n    creates a 5x5 panoramic view image from a list of images\n\n    args:\n        images: list of images to be combined\n\n    returns:\n        np.ndarray: the panoramic view image\n    \"\"\"\n    panoramic_width = img_width * row_count\n    panoramic_height = img_height * row_count\n    panoramic_image = np.full(\n        (panoramic_height, panoramic_width, 3), 255, dtype=np.uint8\n    )\n\n    # create and resize the query image with a blue border\n    query_image_null = np.full((panoramic_height, img_width, 3), 255, dtype=np.uint8)\n    query_image = Image.open(query_image_path).convert(\"RGB\")\n    query_array = np.array(query_image)[:, :, ::-1]\n    resized_image = cv2.resize(query_array, (img_width, img_height))\n\n    border_size = 10\n    blue = (255, 0, 0)  # blue color in BGR\n    bordered_query_image = cv2.copyMakeBorder(\n        resized_image,\n        border_size,\n        border_size,\n        border_size,\n        border_size,\n        cv2.BORDER_CONSTANT,\n        value=blue,\n    )\n\n    query_image_null[img_height * 2 : img_height * 3, 0:img_width] = cv2.resize(\n        bordered_query_image, (img_width, img_height)\n    )\n\n    # add text \"query\" below the query image\n    text = \"query\"\n    font_scale = 1\n    font_thickness = 2\n    text_org = (10, img_height * 3 + 30)\n    cv2.putText(\n        query_image_null,\n        text,\n        text_org,\n        cv2.FONT_HERSHEY_SIMPLEX,\n        font_scale,\n        blue,\n        font_thickness,\n        cv2.LINE_AA,\n    )\n\n    # combine the rest of the images into the panoramic view\n    retrieved_imgs = [\n        np.array(Image.open(img).convert(\"RGB\"))[:, :, ::-1] for img in retrieved_images\n    ]\n    for i, image in enumerate(retrieved_imgs):\n        image = cv2.resize(image, (img_width - 4, img_height - 4))\n        row = i // row_count\n        col = i % row_count\n        start_row = row * img_height\n        start_col = col * img_width\n\n        border_size = 2\n        bordered_image = cv2.copyMakeBorder(\n            image,\n            border_size,\n            border_size,\n            border_size,\n            border_size,\n            cv2.BORDER_CONSTANT,\n            value=(0, 0, 0),\n        )\n        panoramic_image[\n            start_row : start_row + img_height, start_col : start_col + img_width\n        ] = bordered_image\n\n        # add red index numbers to each image\n        text = str(i)\n        org = (start_col + 50, start_row + 30)\n        (font_width, font_height), baseline = cv2.getTextSize(\n            text, cv2.FONT_HERSHEY_SIMPLEX, 1, 2\n        )\n\n        top_left = (org[0] - 48, start_row + 2)\n        bottom_right = (org[0] - 48 + font_width + 5, org[1] + baseline + 5)\n\n        cv2.rectangle(\n            panoramic_image, top_left, bottom_right, (255, 255, 255), cv2.FILLED\n        )\n        cv2.putText(\n            panoramic_image,\n            text,\n            (start_col + 10, start_row + 30),\n            cv2.FONT_HERSHEY_SIMPLEX,\n            1,\n            (0, 0, 255),\n            2,\n            cv2.LINE_AA,\n        )\n\n    # combine the query image with the panoramic view\n    panoramic_image = np.hstack([query_image_null, panoramic_image])\n    return panoramic_image\n","from PIL import Image\n\ncombined_image_path = os.path.join(data_dir, \"combined_image.jpg\")\npanoramic_image = create_panoramic_view(query_image, retrieved_images)\ncv2.imwrite(combined_image_path, panoramic_image)\n\ncombined_image = Image.open(combined_image_path)\nshow_combined_image = combined_image.resize((300, 300))\nshow_combined_image.show()\n","import requests\nimport base64\n\nopenai_api_key = \"sk-***\"  # Change to your OpenAI API Key\n\n\ndef generate_ranking_explanation(\n    combined_image_path: str, caption: str, infos: dict = None\n) -> tuple[list[int], str]:\n    with open(combined_image_path, \"rb\") as image_file:\n        base64_image = base64.b64encode(image_file.read()).decode(\"utf-8\")\n\n    information = (\n        \"You are responsible for ranking results for a Composed Image Retrieval. \"\n        \"The user retrieves an image with an 'instruction' indicating their retrieval intent. \"\n        \"For example, if the user queries a red car with the instruction 'change this car to blue,' a similar type of car in blue would be ranked higher in the results. \"\n        \"Now you would receive instruction and query image with blue border. Every item has its red index number in its top left. Do not misunderstand it. \"\n        f\"User instruction: {caption} \\n\\n\"\n    )\n\n    # add additional information for each image\n    if infos:\n        for i, info in enumerate(infos[\"product\"]):\n            information += f\"{i}. {info}\\n\"\n\n    information += (\n        \"Provide a new ranked list of indices from most suitable to least suitable, followed by an explanation for the top 1 most suitable item only. \"\n        \"The format of the response has to be 'Ranked list: []' with the indices in brackets as integers, followed by 'Reasons:' plus the explanation why this most fit user's query intent.\"\n    )\n\n    headers = {\n        \"Content-Type\": \"application/json\",\n        \"Authorization\": f\"Bearer {openai_api_key}\",\n    }\n\n    payload = {\n        \"model\": \"gpt-4o\",\n        \"messages\": [\n            {\n                \"role\": \"user\",\n                \"content\": [\n                    {\"type\": \"text\", \"text\": information},\n                    {\n                        \"type\": \"image_url\",\n                        \"image_url\": {\"url\": f\"data:image/jpeg;base64,{base64_image}\"},\n                    },\n                ],\n            }\n        ],\n        \"max_tokens\": 300,\n    }\n\n    response = requests.post(\n        \"https://api.openai.com/v1/chat/completions\", headers=headers, json=payload\n    )\n    result = response.json()[\"choices\"][0][\"message\"][\"content\"]\n\n    # parse the ranked indices from the response\n    start_idx = result.find(\"[\")\n    end_idx = result.find(\"]\")\n    ranked_indices_str = result[start_idx + 1 : end_idx].split(\",\")\n    ranked_indices = [int(index.strip()) for index in ranked_indices_str]\n\n    # extract explanation\n    explanation = result[end_idx + 1 :].strip()\n\n    return ranked_indices, explanation\n","ranked_indices, explanation = generate_ranking_explanation(\n    combined_image_path, query_text\n)\n","print(explanation)\n\nbest_index = ranked_indices[0]\nbest_img = Image.open(retrieved_images[best_index])\nbest_img = best_img.resize((150, 150))\nbest_img.show()\n"],"headingContent":"Multimodal RAG with Milvus","anchorList":[{"label":"Milvus를 사용한 멀티모달 RAG","href":"Multimodal-RAG-with-Milvus","type":1,"isActive":false},{"label":"준비","href":"Preparation","type":2,"isActive":false},{"label":"데이터 로드","href":"Load-Data","type":2,"isActive":false},{"label":"제너레이티브 리랭커로 멀티모달 검색하기","href":"Multimodal-Search-with-Generative-Reranker","type":2,"isActive":false}]}
    \ No newline at end of file
    diff --git a/localization/v2.4.x/site/ko/tutorials/multimodal_rag_with_milvus.md b/localization/v2.4.x/site/ko/tutorials/multimodal_rag_with_milvus.md
    index dc47bc3b4..389b1a129 100644
    --- a/localization/v2.4.x/site/ko/tutorials/multimodal_rag_with_milvus.md
    +++ b/localization/v2.4.x/site/ko/tutorials/multimodal_rag_with_milvus.md
    @@ -18,7 +18,8 @@ title: Milvus를 사용한 멀티모달 RAG
               d="M4 9h1v1H4c-1.5 0-3-1.69-3-3.5S2.55 3 4 3h4c1.45 0 3 1.69 3 3.5 0 1.41-.91 2.72-2 3.25V8.59c.58-.45 1-1.27 1-2.09C10 5.22 8.98 4 8 4H4c-.98 0-2 1.22-2 2.5S3 9 4 9zm9-3h-1v1h1c1 0 2 1.22 2 2.5S13.98 12 13 12H9c-.98 0-2-1.22-2-2.5 0-.83.42-1.64 1-2.09V6.25c-1.09.53-2 1.84-2 3.25C6 11.31 7.55 13 9 13h4c1.45 0 3-1.69 3-3.5S14.5 6 13 6z"
             >
           
    -    

    Open In Colab

    +

    Open In Colab +GitHub Repository

    이 튜토리얼에서는 Milvus, 시각화된 BGE 모델GPT-4o로 구동되는 멀티모달 RAG를 소개합니다. 이 시스템을 사용하면 사용자가 이미지를 업로드하고 텍스트 지침을 편집하면 BGE의 구성된 검색 모델에서 처리하여 후보 이미지를 검색할 수 있습니다. 그런 다음 GPT-4o는 가장 적합한 이미지를 선택하고 선택의 근거를 제공하는 재랭커 역할을 합니다. 이 강력한 조합은 효율적인 검색을 위한 Milvus, 정밀한 이미지 처리 및 매칭을 위한 BGE 모델, 고급 재랭킹을 위한 GPT-4o를 활용하여 원활하고 직관적인 이미지 검색 환경을 구현합니다.

    diff --git a/localization/v2.4.x/site/ko/tutorials/tutorials-overview.json b/localization/v2.4.x/site/ko/tutorials/tutorials-overview.json index 2c251b91f..0c72cc506 100644 --- a/localization/v2.4.x/site/ko/tutorials/tutorials-overview.json +++ b/localization/v2.4.x/site/ko/tutorials/tutorials-overview.json @@ -1 +1 @@ -{"codeList":[],"headingContent":"","anchorList":[{"label":"튜토리얼 개요","href":"Tutorials-Overview","type":1,"isActive":false}]} \ No newline at end of file +{"codeList":[],"headingContent":"Tutorials Overview","anchorList":[{"label":"튜토리얼 개요","href":"Tutorials-Overview","type":1,"isActive":false}]} \ No newline at end of file diff --git a/localization/v2.4.x/site/ko/tutorials/tutorials-overview.md b/localization/v2.4.x/site/ko/tutorials/tutorials-overview.md index ffbabe727..6dd04f963 100644 --- a/localization/v2.4.x/site/ko/tutorials/tutorials-overview.md +++ b/localization/v2.4.x/site/ko/tutorials/tutorials-overview.md @@ -29,7 +29,6 @@ title: 튜토리얼 개요 Milvus를 사용한 이미지 검색시맨틱 검색벡터 검색, 동적 필드 Milvus를 사용한 하이브리드 검색하이브리드 검색하이브리드 검색, 멀티 벡터, 고밀도 임베딩, 스파스 임베딩 멀티 벡터를 사용한 멀티모달 검색시맨틱 검색멀티 벡터, 하이브리드 검색 - 질문 답변 시스템질문 답변벡터 검색 추천 시스템추천 시스템벡터 검색 동영상 유사도 검색시맨틱 검색벡터 검색 @@ -38,5 +37,7 @@ title: 튜토리얼 개요 텍스트 검색 엔진시맨틱 검색벡터 검색 텍스트로 이미지 검색시맨틱 검색벡터 검색 이미지 중복 제거중복 제거벡터 검색 +밀버스를 사용한 그래프 RAGRAG그래프 검색 +Milvus를 사용한 컨텍스트 검색빠른 시작벡터 검색 diff --git a/localization/v2.4.x/site/ko/userGuide/clustering-compaction.json b/localization/v2.4.x/site/ko/userGuide/clustering-compaction.json index d76ed13dd..2c54f4a0c 100644 --- a/localization/v2.4.x/site/ko/userGuide/clustering-compaction.json +++ b/localization/v2.4.x/site/ko/userGuide/clustering-compaction.json @@ -1 +1 @@ -{"codeList":["dataCoord:\n compaction:\n clustering:\n enable: true \n autoEnable: false \n triggerInterval: 600 \n minInterval: 3600 \n maxInterval: 259200 \n newDataSizeThreshold: 512m \n timeout: 7200\n \nqueryNode:\n enableSegmentPrune: true \n\ndatanode:\n clusteringCompaction:\n memoryBufferRatio: 0.1 \n workPoolSize: 8 \ncommon:\n usePartitionKeyAsClusteringKey: true \n","default_fields = [\n FieldSchema(name=\"id\", dtype=DataType.INT64, is_primary=True),\n FieldSchema(name=\"key\", dtype=DataType.INT64, is_clustering_key=True),\n FieldSchema(name=\"var\", dtype=DataType.VARCHAR, max_length=1000, is_primary=False),\n FieldSchema(name=\"embeddings\", dtype=DataType.FLOAT_VECTOR, dim=dim)\n]\n\ndefault_schema = CollectionSchema(\n fields=default_fields, \n description=\"test clustering-key collection\"\n)\n\ncoll1 = Collection(name=\"clustering_test\", schema=default_schema)\n","coll1.compact(is_clustering=True)\ncoll1.get_compaction_state(is_clustering=True)\ncoll1.wait_for_compaction_completed(is_clustering=True)\n"],"headingContent":"","anchorList":[{"label":"클러스터링 압축","href":"Clustering-Compaction","type":1,"isActive":false},{"label":"개요","href":"Overview","type":2,"isActive":false},{"label":"클러스터링 압축 사용","href":"Use-Clustering-Compaction","type":2,"isActive":false},{"label":"수집 구성","href":"Collection-Configuration","type":2,"isActive":false},{"label":"클러스터링 압축 트리거","href":"Trigger-Clustering-Compaction","type":2,"isActive":false},{"label":"모범 사례","href":"Best-practices","type":2,"isActive":false}]} \ No newline at end of file +{"codeList":["dataCoord:\n compaction:\n clustering:\n enable: true \n autoEnable: false \n triggerInterval: 600 \n minInterval: 3600 \n maxInterval: 259200 \n newDataSizeThreshold: 512m \n timeout: 7200\n \nqueryNode:\n enableSegmentPrune: true \n\ndatanode:\n clusteringCompaction:\n memoryBufferRatio: 0.1 \n workPoolSize: 8 \ncommon:\n usePartitionKeyAsClusteringKey: true \n","default_fields = [\n FieldSchema(name=\"id\", dtype=DataType.INT64, is_primary=True),\n FieldSchema(name=\"key\", dtype=DataType.INT64, is_clustering_key=True),\n FieldSchema(name=\"var\", dtype=DataType.VARCHAR, max_length=1000, is_primary=False),\n FieldSchema(name=\"embeddings\", dtype=DataType.FLOAT_VECTOR, dim=dim)\n]\n\ndefault_schema = CollectionSchema(\n fields=default_fields, \n description=\"test clustering-key collection\"\n)\n\ncoll1 = Collection(name=\"clustering_test\", schema=default_schema)\n","coll1.compact(is_clustering=True)\ncoll1.get_compaction_state(is_clustering=True)\ncoll1.wait_for_compaction_completed(is_clustering=True)\n"],"headingContent":"Clustering Compaction","anchorList":[{"label":"클러스터링 압축","href":"Clustering-Compaction","type":1,"isActive":false},{"label":"개요","href":"Overview","type":2,"isActive":false},{"label":"클러스터링 압축 사용","href":"Use-Clustering-Compaction","type":2,"isActive":false},{"label":"클러스터링 압축 트리거","href":"Trigger-Clustering-Compaction","type":2,"isActive":false},{"label":"모범 사례","href":"Best-practices","type":2,"isActive":false}]} \ No newline at end of file diff --git a/localization/v2.4.x/site/ko/userGuide/clustering-compaction.md b/localization/v2.4.x/site/ko/userGuide/clustering-compaction.md index e2d4ac811..4453ecfec 100644 --- a/localization/v2.4.x/site/ko/userGuide/clustering-compaction.md +++ b/localization/v2.4.x/site/ko/userGuide/clustering-compaction.md @@ -3,8 +3,8 @@ id: clustering-compaction.md title: 클러스터링 압축 related_key: 'clustering, compaction' summary: >- - 클러스터링 압축은 대규모 컬렉션에서 검색 성능을 개선하고 비용을 절감하기 위해 고안되었습니다. 이 가이드는 클러스터링 압축을 이해하고 이 - 기능을 통해 검색 성능을 개선하는 방법을 이해하는 데 도움이 됩니다. + 클러스터링 압축은 대규모 컬렉션에서 검색 성능을 개선하고 비용을 절감하기 위해 고안되었습니다. 이 가이드는 클러스터링 압축과 이 기능이 검색 + 성능을 개선하는 방법을 이해하는 데 도움이 됩니다. ---

    클러스터링 압축

    특정 컬렉션에서 클러스터링 압축을 하려면 컬렉션에서 스칼라 필드를 클러스터링 키로 선택해야 합니다.

    +

    위의 변경 사항을 Milvus 클러스터에 적용하려면 헬름으로 Milvus 구성하기 및 Milvus 오퍼레이터로 Milvus 구성하기의 단계를 따르세요.

    +

    수집 구성

    특정 컬렉션에서 클러스터링 압축을 하려면 컬렉션에서 스칼라 필드를 클러스터링 키로 선택해야 합니다.

    default_fields = [
         FieldSchema(name="id", dtype=DataType.INT64, is_primary=True),
         FieldSchema(name="key", dtype=DataType.INT64, is_clustering_key=True),
    diff --git a/localization/v2.4.x/site/ko/userGuide/insert-update-delete.json b/localization/v2.4.x/site/ko/userGuide/insert-update-delete.json
    index b8e4ff562..25bd15f14 100644
    --- a/localization/v2.4.x/site/ko/userGuide/insert-update-delete.json
    +++ b/localization/v2.4.x/site/ko/userGuide/insert-update-delete.json
    @@ -1 +1 @@
    -{"codeList":["from pymilvus import MilvusClient\n\n# 1. Set up a Milvus client\nclient = MilvusClient(\n    uri=\"http://localhost:19530\"\n)\n\n# 2. Create a collection\nclient.create_collection(\n    collection_name=\"quick_setup\",\n    dimension=5,\n    metric_type=\"IP\"\n)\n","import io.milvus.v2.client.ConnectConfig;\nimport io.milvus.v2.client.MilvusClientV2;\nimport io.milvus.v2.service.collection.request.CreateCollectionReq;\n\nString CLUSTER_ENDPOINT = \"http://localhost:19530\";\n\n// 1. Connect to Milvus server\nConnectConfig connectConfig = ConnectConfig.builder()\n    .uri(CLUSTER_ENDPOINT)\n    .build();\n\nMilvusClientV2 client = new MilvusClientV2(connectConfig);\n\n// 2. Create a collection in quick setup mode\nCreateCollectionReq quickSetupReq = CreateCollectionReq.builder()\n    .collectionName(\"quick_setup\")\n    .dimension(5)\n    .metricType(\"IP\")\n    .build();\n\nclient.createCollection(quickSetupReq);\n","const { MilvusClient, DataType, sleep } = require(\"@zilliz/milvus2-sdk-node\")\n\nconst address = \"http://localhost:19530\"\n\n// 1. Set up a Milvus Client\nclient = new MilvusClient({address});\n\n// 2. Create a collection in quick setup mode\nawait client.createCollection({\n    collection_name: \"quick_setup\",\n    dimension: 5,\n    metric_type: \"IP\"\n});  \n","# 3. Insert some data\ndata=[\n    {\"id\": 0, \"vector\": [0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592], \"color\": \"pink_8682\"},\n    {\"id\": 1, \"vector\": [0.19886812562848388, 0.06023560599112088, 0.6976963061752597, 0.2614474506242501, 0.838729485096104], \"color\": \"red_7025\"},\n    {\"id\": 2, \"vector\": [0.43742130801983836, -0.5597502546264526, 0.6457887650909682, 0.7894058910881185, 0.20785793220625592], \"color\": \"orange_6781\"},\n    {\"id\": 3, \"vector\": [0.3172005263489739, 0.9719044792798428, -0.36981146090600725, -0.4860894583077995, 0.95791889146345], \"color\": \"pink_9298\"},\n    {\"id\": 4, \"vector\": [0.4452349528804562, -0.8757026943054742, 0.8220779437047674, 0.46406290649483184, 0.30337481143159106], \"color\": \"red_4794\"},\n    {\"id\": 5, \"vector\": [0.985825131989184, -0.8144651566660419, 0.6299267002202009, 0.1206906911183383, -0.1446277761879955], \"color\": \"yellow_4222\"},\n    {\"id\": 6, \"vector\": [0.8371977790571115, -0.015764369584852833, -0.31062937026679327, -0.562666951622192, -0.8984947637863987], \"color\": \"red_9392\"},\n    {\"id\": 7, \"vector\": [-0.33445148015177995, -0.2567135004164067, 0.8987539745369246, 0.9402995886420709, 0.5378064918413052], \"color\": \"grey_8510\"},\n    {\"id\": 8, \"vector\": [0.39524717779832685, 0.4000257286739164, -0.5890507376891594, -0.8650502298996872, -0.6140360785406336], \"color\": \"white_9381\"},\n    {\"id\": 9, \"vector\": [0.5718280481994695, 0.24070317428066512, -0.3737913482606834, -0.06726932177492717, -0.6980531615588608], \"color\": \"purple_4976\"}\n]\n\nres = client.insert(\n    collection_name=\"quick_setup\",\n    data=data\n)\n\nprint(res)\n\n# Output\n#\n# {\n#     \"insert_count\": 10,\n#     \"ids\": [\n#         0,\n#         1,\n#         2,\n#         3,\n#         4,\n#         5,\n#         6,\n#         7,\n#         8,\n#         9\n#     ]\n# }\n","import java.util.Arrays;\nimport java.util.List;\nimport java.util.Map;\nimport io.milvus.v2.service.vector.request.InsertReq;\nimport io.milvus.v2.service.vector.response.InsertResp;\n\n// 3. Insert some data\nList data = Arrays.asList(\n    new JSONObject(Map.of(\"id\", 0L, \"vector\", Arrays.asList(0.3580376395471989f, -0.6023495712049978f, 0.18414012509913835f, -0.26286205330961354f, 0.9029438446296592f), \"color\", \"pink_8682\")),\n    new JSONObject(Map.of(\"id\", 1L, \"vector\", Arrays.asList(0.19886812562848388f, 0.06023560599112088f, 0.6976963061752597f, 0.2614474506242501f, 0.838729485096104f), \"color\", \"red_7025\")),\n    new JSONObject(Map.of(\"id\", 2L, \"vector\", Arrays.asList(0.43742130801983836f, -0.5597502546264526f, 0.6457887650909682f, 0.7894058910881185f, 0.20785793220625592f), \"color\", \"orange_6781\")),\n    new JSONObject(Map.of(\"id\", 3L, \"vector\", Arrays.asList(0.3172005263489739f, 0.9719044792798428f, -0.36981146090600725f, -0.4860894583077995f, 0.95791889146345f), \"color\", \"pink_9298\")),\n    new JSONObject(Map.of(\"id\", 4L, \"vector\", Arrays.asList(0.4452349528804562f, -0.8757026943054742f, 0.8220779437047674f, 0.46406290649483184f, 0.30337481143159106f), \"color\", \"red_4794\")),\n    new JSONObject(Map.of(\"id\", 5L, \"vector\", Arrays.asList(0.985825131989184f, -0.8144651566660419f, 0.6299267002202009f, 0.1206906911183383f, -0.1446277761879955f), \"color\", \"yellow_4222\")),\n    new JSONObject(Map.of(\"id\", 6L, \"vector\", Arrays.asList(0.8371977790571115f, -0.015764369584852833f, -0.31062937026679327f, -0.562666951622192f, -0.8984947637863987f), \"color\", \"red_9392\")),\n    new JSONObject(Map.of(\"id\", 7L, \"vector\", Arrays.asList(-0.33445148015177995f, -0.2567135004164067f, 0.8987539745369246f, 0.9402995886420709f, 0.5378064918413052f), \"color\", \"grey_8510\")),\n    new JSONObject(Map.of(\"id\", 8L, \"vector\", Arrays.asList(0.39524717779832685f, 0.4000257286739164f, -0.5890507376891594f, -0.8650502298996872f, -0.6140360785406336f), \"color\", \"white_9381\")),\n    new JSONObject(Map.of(\"id\", 9L, \"vector\", Arrays.asList(0.5718280481994695f, 0.24070317428066512f, -0.3737913482606834f, -0.06726932177492717f, -0.6980531615588608f), \"color\", \"purple_4976\"))\n);\n\nInsertReq insertReq = InsertReq.builder()\n    .collectionName(\"quick_setup\")\n    .data(data)\n    .build();\n\nInsertResp insertResp = client.insert(insertReq);\n\nSystem.out.println(JSONObject.toJSON(insertResp));\n\n// Output:\n// {\"insertCnt\": 10}\n","// 3. Insert some data\n\nvar data = [\n    {id: 0, vector: [0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592], color: \"pink_8682\"},\n    {id: 1, vector: [0.19886812562848388, 0.06023560599112088, 0.6976963061752597, 0.2614474506242501, 0.838729485096104], color: \"red_7025\"},\n    {id: 2, vector: [0.43742130801983836, -0.5597502546264526, 0.6457887650909682, 0.7894058910881185, 0.20785793220625592], color: \"orange_6781\"},\n    {id: 3, vector: [0.3172005263489739, 0.9719044792798428, -0.36981146090600725, -0.4860894583077995, 0.95791889146345], color: \"pink_9298\"},\n    {id: 4, vector: [0.4452349528804562, -0.8757026943054742, 0.8220779437047674, 0.46406290649483184, 0.30337481143159106], color: \"red_4794\"},\n    {id: 5, vector: [0.985825131989184, -0.8144651566660419, 0.6299267002202009, 0.1206906911183383, -0.1446277761879955], color: \"yellow_4222\"},\n    {id: 6, vector: [0.8371977790571115, -0.015764369584852833, -0.31062937026679327, -0.562666951622192, -0.8984947637863987], color: \"red_9392\"},\n    {id: 7, vector: [-0.33445148015177995, -0.2567135004164067, 0.8987539745369246, 0.9402995886420709, 0.5378064918413052], color: \"grey_8510\"},\n    {id: 8, vector: [0.39524717779832685, 0.4000257286739164, -0.5890507376891594, -0.8650502298996872, -0.6140360785406336], color: \"white_9381\"},\n    {id: 9, vector: [0.5718280481994695, 0.24070317428066512, -0.3737913482606834, -0.06726932177492717, -0.6980531615588608], color: \"purple_4976\"}        \n]\n\nvar res = await client.insert({\n    collection_name: \"quick_setup\",\n    data: data,\n})\n\nconsole.log(res.insert_cnt)\n\n// Output\n// \n// 10\n// \n","# 4. Insert some more data into a specific partition\ndata=[\n    {\"id\": 10, \"vector\": [-0.5570353903748935, -0.8997887893201304, -0.7123782431855732, -0.6298990746450119, 0.6699215060604258], \"color\": \"red_1202\"},\n    {\"id\": 11, \"vector\": [0.6319019033373907, 0.6821488267878275, 0.8552303045704168, 0.36929791364943054, -0.14152860714878068], \"color\": \"blue_4150\"},\n    {\"id\": 12, \"vector\": [0.9483947484855766, -0.32294203351925344, 0.9759290319978025, 0.8262982148666174, -0.8351194181285713], \"color\": \"orange_4590\"},\n    {\"id\": 13, \"vector\": [-0.5449109892498731, 0.043511240563786524, -0.25105249484790804, -0.012030655265886425, -0.0010987671273892108], \"color\": \"pink_9619\"},\n    {\"id\": 14, \"vector\": [0.6603339372951424, -0.10866551787442225, -0.9435597754324891, 0.8230244263466688, -0.7986720938400362], \"color\": \"orange_4863\"},\n    {\"id\": 15, \"vector\": [-0.8825129181091456, -0.9204557711667729, -0.935350065513425, 0.5484069690287079, 0.24448151140671204], \"color\": \"orange_7984\"},\n    {\"id\": 16, \"vector\": [0.6285586391568163, 0.5389064528263487, -0.3163366239905099, 0.22036279378888013, 0.15077052220816167], \"color\": \"blue_9010\"},\n    {\"id\": 17, \"vector\": [-0.20151825016059233, -0.905239387635804, 0.6749305353372479, -0.7324272081377843, -0.33007998971889263], \"color\": \"blue_4521\"},\n    {\"id\": 18, \"vector\": [0.2432286610792349, 0.01785636564206139, -0.651356982731391, -0.35848148851027895, -0.7387383128324057], \"color\": \"orange_2529\"},\n    {\"id\": 19, \"vector\": [0.055512329053363674, 0.7100266349039421, 0.4956956543575197, 0.24541352586717702, 0.4209030729923515], \"color\": \"red_9437\"}\n]\n\nclient.create_partition(\n    collection_name=\"quick_setup\",\n    partition_name=\"partitionA\"\n)\n\nres = client.insert(\n    collection_name=\"quick_setup\",\n    data=data,\n    partition_name=\"partitionA\"\n)\n\nprint(res)\n\n# Output\n#\n# {\n#     \"insert_count\": 10,\n#     \"ids\": [\n#         10,\n#         11,\n#         12,\n#         13,\n#         14,\n#         15,\n#         16,\n#         17,\n#         18,\n#         19\n#     ]\n# }\n","// 4. Insert some more data into a specific partition\ndata = Arrays.asList(\n    new JSONObject(Map.of(\"id\", 10L, \"vector\", Arrays.asList(-0.5570353903748935f, -0.8997887893201304f, -0.7123782431855732f, -0.6298990746450119f, 0.6699215060604258f), \"color\", \"red_1202\")),\n    new JSONObject(Map.of(\"id\", 11L, \"vector\", Arrays.asList(0.6319019033373907f, 0.6821488267878275f, 0.8552303045704168f, 0.36929791364943054f, -0.14152860714878068f), \"color\", \"blue_4150\")),\n    new JSONObject(Map.of(\"id\", 12L, \"vector\", Arrays.asList(0.9483947484855766f, -0.32294203351925344f, 0.9759290319978025f, 0.8262982148666174f, -0.8351194181285713f), \"color\", \"orange_4590\")),\n    new JSONObject(Map.of(\"id\", 13L, \"vector\", Arrays.asList(-0.5449109892498731f, 0.043511240563786524f, -0.25105249484790804f, -0.012030655265886425f, -0.0010987671273892108f), \"color\", \"pink_9619\")),\n    new JSONObject(Map.of(\"id\", 14L, \"vector\", Arrays.asList(0.6603339372951424f, -0.10866551787442225f, -0.9435597754324891f, 0.8230244263466688f, -0.7986720938400362f), \"color\", \"orange_4863\")),\n    new JSONObject(Map.of(\"id\", 15L, \"vector\", Arrays.asList(-0.8825129181091456f, -0.9204557711667729f, -0.935350065513425f, 0.5484069690287079f, 0.24448151140671204f), \"color\", \"orange_7984\")),\n    new JSONObject(Map.of(\"id\", 16L, \"vector\", Arrays.asList(0.6285586391568163f, 0.5389064528263487f, -0.3163366239905099f, 0.22036279378888013f, 0.15077052220816167f), \"color\", \"blue_9010\")),\n    new JSONObject(Map.of(\"id\", 17L, \"vector\", Arrays.asList(-0.20151825016059233f, -0.905239387635804f, 0.6749305353372479f, -0.7324272081377843f, -0.33007998971889263f), \"color\", \"blue_4521\")),\n    new JSONObject(Map.of(\"id\", 18L, \"vector\", Arrays.asList(0.2432286610792349f, 0.01785636564206139f, -0.651356982731391f, -0.35848148851027895f, -0.7387383128324057f), \"color\", \"orange_2529\")),\n    new JSONObject(Map.of(\"id\", 19L, \"vector\", Arrays.asList(0.055512329053363674f, 0.7100266349039421f, 0.4956956543575197f, 0.24541352586717702f, 0.4209030729923515f), \"color\", \"red_9437\"))\n);\n\nCreatePartitionReq createPartitionReq = CreatePartitionReq.builder()\n    .collectionName(\"quick_setup\")\n    .partitionName(\"partitionA\")\n    .build();\n\nclient.createPartition(createPartitionReq);\n\ninsertReq = InsertReq.builder()\n    .collectionName(\"quick_setup\")\n    .data(data)\n    .partitionName(\"partitionA\")\n    .build();\n\ninsertResp = client.insert(insertReq);\n\nSystem.out.println(JSONObject.toJSON(insertResp));\n\n// Output:\n// {\"insertCnt\": 10}\n","// 4. Insert some more data into a specific partition\ndata = [\n    {id: 10, vector: [-0.5570353903748935, -0.8997887893201304, -0.7123782431855732, -0.6298990746450119, 0.6699215060604258], color: \"red_1202\"},\n    {id: 11, vector: [0.6319019033373907, 0.6821488267878275, 0.8552303045704168, 0.36929791364943054, -0.14152860714878068], color: \"blue_4150\"},\n    {id: 12, vector: [0.9483947484855766, -0.32294203351925344, 0.9759290319978025, 0.8262982148666174, -0.8351194181285713], color: \"orange_4590\"},\n    {id: 13, vector: [-0.5449109892498731, 0.043511240563786524, -0.25105249484790804, -0.012030655265886425, -0.0010987671273892108], color: \"pink_9619\"},\n    {id: 14, vector: [0.6603339372951424, -0.10866551787442225, -0.9435597754324891, 0.8230244263466688, -0.7986720938400362], color: \"orange_4863\"},\n    {id: 15, vector: [-0.8825129181091456, -0.9204557711667729, -0.935350065513425, 0.5484069690287079, 0.24448151140671204], color: \"orange_7984\"},\n    {id: 16, vector: [0.6285586391568163, 0.5389064528263487, -0.3163366239905099, 0.22036279378888013, 0.15077052220816167], color: \"blue_9010\"},\n    {id: 17, vector: [-0.20151825016059233, -0.905239387635804, 0.6749305353372479, -0.7324272081377843, -0.33007998971889263], color: \"blue_4521\"},\n    {id: 18, vector: [0.2432286610792349, 0.01785636564206139, -0.651356982731391, -0.35848148851027895, -0.7387383128324057], color: \"orange_2529\"},\n    {id: 19, vector: [0.055512329053363674, 0.7100266349039421, 0.4956956543575197, 0.24541352586717702, 0.4209030729923515], color: \"red_9437\"}\n]\n\nawait client.createPartition({\n    collection_name: \"quick_setup\",\n    partition_name: \"partitionA\"\n})\n\nres = await client.insert({\n    collection_name: \"quick_setup\",\n    data: data,\n    partition_name: \"partitionA\"\n})\n\nconsole.log(res.insert_cnt)\n\n// Output\n// \n// 10\n// \n","# 5. Upsert some data\ndata=[\n    {\"id\": 0, \"vector\": [-0.619954382375778, 0.4479436794798608, -0.17493894838751745, -0.4248030059917294, -0.8648452746018911], \"color\": \"black_9898\"},\n    {\"id\": 1, \"vector\": [0.4762662251462588, -0.6942502138717026, -0.4490002642657902, -0.628696575798281, 0.9660395877041965], \"color\": \"red_7319\"},\n    {\"id\": 2, \"vector\": [-0.8864122635045097, 0.9260170474445351, 0.801326976181461, 0.6383943392381306, 0.7563037341572827], \"color\": \"white_6465\"},\n    {\"id\": 3, \"vector\": [0.14594326235891586, -0.3775407299900644, -0.3765479013078812, 0.20612075380355122, 0.4902678929632145], \"color\": \"orange_7580\"},\n    {\"id\": 4, \"vector\": [0.4548498669607359, -0.887610217681605, 0.5655081329910452, 0.19220509387904117, 0.016513983433433577], \"color\": \"red_3314\"},\n    {\"id\": 5, \"vector\": [0.11755001847051827, -0.7295149788999611, 0.2608115847524266, -0.1719167007897875, 0.7417611743754855], \"color\": \"black_9955\"},\n    {\"id\": 6, \"vector\": [0.9363032158314308, 0.030699901477745373, 0.8365910312319647, 0.7823840208444011, 0.2625222076909237], \"color\": \"yellow_2461\"},\n    {\"id\": 7, \"vector\": [0.0754823906014721, -0.6390658668265143, 0.5610517334334937, -0.8986261118798251, 0.9372056764266794], \"color\": \"white_5015\"},\n    {\"id\": 8, \"vector\": [-0.3038434006935904, 0.1279149203380523, 0.503958664270957, -0.2622661156746988, 0.7407627307791929], \"color\": \"purple_6414\"},\n    {\"id\": 9, \"vector\": [-0.7125086947677588, -0.8050968321012257, -0.32608864121785786, 0.3255654958645424, 0.26227968923834233], \"color\": \"brown_7231\"}\n]\n\nres = client.upsert(\n    collection_name='quick_setup',\n    data=data\n)\n\nprint(res)\n\n# Output\n#\n# {\n#     \"upsert_count\": 10\n# }\n","// 5. Upsert some data\ndata = Arrays.asList(\n    new JSONObject(Map.of(\"id\", 0L, \"vector\", Arrays.asList(-0.619954382375778f, 0.4479436794798608f, -0.17493894838751745f, -0.4248030059917294f, -0.8648452746018911f), \"color\", \"black_9898\")),\n    new JSONObject(Map.of(\"id\", 1L, \"vector\", Arrays.asList(0.4762662251462588f, -0.6942502138717026f, -0.4490002642657902f, -0.628696575798281f, 0.9660395877041965f), \"color\", \"red_7319\")),\n    new JSONObject(Map.of(\"id\", 2L, \"vector\", Arrays.asList(-0.8864122635045097f, 0.9260170474445351f, 0.801326976181461f, 0.6383943392381306f, 0.7563037341572827f), \"color\", \"white_6465\")),\n    new JSONObject(Map.of(\"id\", 3L, \"vector\", Arrays.asList(0.14594326235891586f, -0.3775407299900644f, -0.3765479013078812f, 0.20612075380355122f, 0.4902678929632145f), \"color\", \"orange_7580\")),\n    new JSONObject(Map.of(\"id\", 4L, \"vector\", Arrays.asList(0.4548498669607359f, -0.887610217681605f, 0.5655081329910452f, 0.19220509387904117f, 0.016513983433433577f), \"color\", \"red_3314\")),\n    new JSONObject(Map.of(\"id\", 5L, \"vector\", Arrays.asList(0.11755001847051827f, -0.7295149788999611f, 0.2608115847524266f, -0.1719167007897875f, 0.7417611743754855f), \"color\", \"black_9955\")),\n    new JSONObject(Map.of(\"id\", 6L, \"vector\", Arrays.asList(0.9363032158314308f, 0.030699901477745373f, 0.8365910312319647f, 0.7823840208444011f, 0.2625222076909237f), \"color\", \"yellow_2461\")),\n    new JSONObject(Map.of(\"id\", 7L, \"vector\", Arrays.asList(0.0754823906014721f, -0.6390658668265143f, 0.5610517334334937f, -0.8986261118798251f, 0.9372056764266794f), \"color\", \"white_5015\")),\n    new JSONObject(Map.of(\"id\", 8L, \"vector\", Arrays.asList(-0.3038434006935904f, 0.1279149203380523f, 0.503958664270957f, -0.2622661156746988f, 0.7407627307791929f), \"color\", \"purple_6414\")),\n    new JSONObject(Map.of(\"id\", 9L, \"vector\", Arrays.asList(-0.7125086947677588f, -0.8050968321012257f, -0.32608864121785786f, 0.3255654958645424f, 0.26227968923834233f), \"color\", \"brown_7231\"))\n);\n\nUpsertReq upsertReq = UpsertReq.builder()\n    .collectionName(\"quick_setup\")\n    .data(data)\n    .build();\n\nUpsertResp upsertResp = client.upsert(upsertReq);\n\nSystem.out.println(JSONObject.toJSON(upsertResp));\n\n// Output:\n// {\"upsertCnt\": 10}\n","// 5. Upsert some data\ndata = [\n    {id: 0, vector: [-0.619954382375778, 0.4479436794798608, -0.17493894838751745, -0.4248030059917294, -0.8648452746018911], color: \"black_9898\"},\n    {id: 1, vector: [0.4762662251462588, -0.6942502138717026, -0.4490002642657902, -0.628696575798281, 0.9660395877041965], color: \"red_7319\"},\n    {id: 2, vector: [-0.8864122635045097, 0.9260170474445351, 0.801326976181461, 0.6383943392381306, 0.7563037341572827], color: \"white_6465\"},\n    {id: 3, vector: [0.14594326235891586, -0.3775407299900644, -0.3765479013078812, 0.20612075380355122, 0.4902678929632145], color: \"orange_7580\"},\n    {id: 4, vector: [0.4548498669607359, -0.887610217681605, 0.5655081329910452, 0.19220509387904117, 0.016513983433433577], color: \"red_3314\"},\n    {id: 5, vector: [0.11755001847051827, -0.7295149788999611, 0.2608115847524266, -0.1719167007897875, 0.7417611743754855], color: \"black_9955\"},\n    {id: 6, vector: [0.9363032158314308, 0.030699901477745373, 0.8365910312319647, 0.7823840208444011, 0.2625222076909237], color: \"yellow_2461\"},\n    {id: 7, vector: [0.0754823906014721, -0.6390658668265143, 0.5610517334334937, -0.8986261118798251, 0.9372056764266794], color: \"white_5015\"},\n    {id: 8, vector: [-0.3038434006935904, 0.1279149203380523, 0.503958664270957, -0.2622661156746988, 0.7407627307791929], color: \"purple_6414\"},\n    {id: 9, vector: [-0.7125086947677588, -0.8050968321012257, -0.32608864121785786, 0.3255654958645424, 0.26227968923834233], color: \"brown_7231\"}\n]\n\nres = await client.upsert({\n    collection_name: \"quick_setup\",\n    data: data,\n})\n\nconsole.log(res.upsert_cnt)\n\n// Output\n// \n// 10\n// \n","# 6. Upsert data in partitions\ndata=[\n    {\"id\": 10, \"vector\": [0.06998888224297328, 0.8582816610326578, -0.9657938677934292, 0.6527905683627726, -0.8668460657158576], \"color\": \"black_3651\"},\n    {\"id\": 11, \"vector\": [0.6060703043917468, -0.3765080534566074, -0.7710758854987239, 0.36993888322346136, 0.5507513364206531], \"color\": \"grey_2049\"},\n    {\"id\": 12, \"vector\": [-0.9041813104515337, -0.9610546012461163, 0.20033003106083358, 0.11842506351635174, 0.8327356724591011], \"color\": \"blue_6168\"},\n    {\"id\": 13, \"vector\": [0.3202914977909075, -0.7279137773695252, -0.04747830871620273, 0.8266053056909548, 0.8277957187455489], \"color\": \"blue_1672\"},\n    {\"id\": 14, \"vector\": [0.2975811497890859, 0.2946936202691086, 0.5399463833894609, 0.8385334966677529, -0.4450543984655133], \"color\": \"pink_1601\"},\n    {\"id\": 15, \"vector\": [-0.04697464305600074, -0.08509022265734134, 0.9067184632552001, -0.2281912685064822, -0.9747503428652762], \"color\": \"yellow_9925\"},\n    {\"id\": 16, \"vector\": [-0.9363075919673911, -0.8153981031085669, 0.7943039120490902, -0.2093886809842529, 0.0771191335807897], \"color\": \"orange_9872\"},\n    {\"id\": 17, \"vector\": [-0.050451522820639916, 0.18931572752321935, 0.7522886192190488, -0.9071793089474034, 0.6032647330692296], \"color\": \"red_6450\"},\n    {\"id\": 18, \"vector\": [-0.9181544231141592, 0.6700755998126806, -0.014174674636136642, 0.6325780463623432, -0.49662222164032976], \"color\": \"purple_7392\"},\n    {\"id\": 19, \"vector\": [0.11426945899602536, 0.6089190684002581, -0.5842735738352236, 0.057050610092692855, -0.035163433018196244], \"color\": \"pink_4996\"}\n]\n\nres = client.upsert(\n    collection_name=\"quick_setup\",\n    data=data,\n    partition_name=\"partitionA\"\n)\n\nprint(res)\n\n# Output\n#\n# {\n#     \"upsert_count\": 10\n# }\n","import io.milvus.v2.service.vector.request.UpsertReq;\nimport io.milvus.v2.service.vector.response.UpsertResp;\n\n// 6. Upsert data in parition\n\ndata = Arrays.asList(\n    new JSONObject(Map.of(\"id\", 10L, \"vector\", Arrays.asList(0.06998888224297328f, 0.8582816610326578f, -0.9657938677934292f, 0.6527905683627726f, -0.8668460657158576f), \"color\", \"black_3651\")),\n    new JSONObject(Map.of(\"id\", 11L, \"vector\", Arrays.asList(0.6060703043917468f, -0.3765080534566074f, -0.7710758854987239f, 0.36993888322346136f, 0.5507513364206531f), \"color\", \"grey_2049\")),\n    new JSONObject(Map.of(\"id\", 12L, \"vector\", Arrays.asList(-0.9041813104515337f, -0.9610546012461163f, 0.20033003106083358f, 0.11842506351635174f, 0.8327356724591011f), \"color\", \"blue_6168\")),\n    new JSONObject(Map.of(\"id\", 13L, \"vector\", Arrays.asList(0.3202914977909075f, -0.7279137773695252f, -0.04747830871620273f, 0.8266053056909548f, 0.8277957187455489f), \"color\", \"blue_1672\")),\n    new JSONObject(Map.of(\"id\", 14L, \"vector\", Arrays.asList(0.2975811497890859f, 0.2946936202691086f, 0.5399463833894609f, 0.8385334966677529f, -0.4450543984655133f), \"color\", \"pink_1601\")),\n    new JSONObject(Map.of(\"id\", 15L, \"vector\", Arrays.asList(-0.04697464305600074f, -0.08509022265734134f, 0.9067184632552001f, -0.2281912685064822f, -0.9747503428652762f), \"color\", \"yellow_9925\")),\n    new JSONObject(Map.of(\"id\", 16L, \"vector\", Arrays.asList(-0.9363075919673911f, -0.8153981031085669f, 0.7943039120490902f, -0.2093886809842529f, 0.0771191335807897f), \"color\", \"orange_9872\")),\n    new JSONObject(Map.of(\"id\", 17L, \"vector\", Arrays.asList(-0.050451522820639916f, 0.18931572752321935f, 0.7522886192190488f, -0.9071793089474034f, 0.6032647330692296f), \"color\", \"red_6450\")),\n    new JSONObject(Map.of(\"id\", 18L, \"vector\", Arrays.asList(-0.9181544231141592f, 0.6700755998126806f, -0.014174674636136642f, 0.6325780463623432f, -0.49662222164032976f), \"color\", \"purple_7392\")),\n    new JSONObject(Map.of(\"id\", 19L, \"vector\", Arrays.asList(0.11426945899602536f, 0.6089190684002581f, -0.5842735738352236f, 0.057050610092692855f, -0.035163433018196244f), \"color\", \"pink_4996\"))\n);\n\nupsertReq = UpsertReq.builder()\n    .collectionName(\"quick_setup\")\n    .data(data)\n    .partitionName(\"partitionA\")\n    .build();\n\nupsertResp = client.upsert(upsertReq);\n\nSystem.out.println(JSONObject.toJSON(upsertResp));\n\n// Output:\n// {\"upsertCnt\": 10}\n","// 6. Upsert data in partitions\ndata = [\n    {id: 10, vector: [0.06998888224297328, 0.8582816610326578, -0.9657938677934292, 0.6527905683627726, -0.8668460657158576], color: \"black_3651\"},\n    {id: 11, vector: [0.6060703043917468, -0.3765080534566074, -0.7710758854987239, 0.36993888322346136, 0.5507513364206531], color: \"grey_2049\"},\n    {id: 12, vector: [-0.9041813104515337, -0.9610546012461163, 0.20033003106083358, 0.11842506351635174, 0.8327356724591011], color: \"blue_6168\"},\n    {id: 13, vector: [0.3202914977909075, -0.7279137773695252, -0.04747830871620273, 0.8266053056909548, 0.8277957187455489], color: \"blue_1672\"},\n    {id: 14, vector: [0.2975811497890859, 0.2946936202691086, 0.5399463833894609, 0.8385334966677529, -0.4450543984655133], color: \"pink_1601\"},\n    {id: 15, vector: [-0.04697464305600074, -0.08509022265734134, 0.9067184632552001, -0.2281912685064822, -0.9747503428652762], color: \"yellow_9925\"},\n    {id: 16, vector: [-0.9363075919673911, -0.8153981031085669, 0.7943039120490902, -0.2093886809842529, 0.0771191335807897], color: \"orange_9872\"},\n    {id: 17, vector: [-0.050451522820639916, 0.18931572752321935, 0.7522886192190488, -0.9071793089474034, 0.6032647330692296], color: \"red_6450\"},\n    {id: 18, vector: [-0.9181544231141592, 0.6700755998126806, -0.014174674636136642, 0.6325780463623432, -0.49662222164032976], color: \"purple_7392\"},\n    {id: 19, vector: [0.11426945899602536, 0.6089190684002581, -0.5842735738352236, 0.057050610092692855, -0.035163433018196244], color: \"pink_4996\"}\n]\n\nres = await client.upsert({\n    collection_name: \"quick_setup\",\n    data: data,\n    partition_name: \"partitionA\"\n})\n\nconsole.log(res.upsert_cnt)\n\n// Output\n// \n// 10\n// \n","# 7. Delete entities\nres = client.delete(\n    collection_name=\"quick_setup\",\n    filter=\"id in [4,5,6]\"\n)\n\nprint(res)\n\n# Output\n#\n# {\n#     \"delete_count\": 3\n# }\n","import io.milvus.v2.service.vector.request.DeleteReq;\nimport io.milvus.v2.service.vector.response.DeleteResp;\n\n\n// 7. Delete entities\n\nDeleteReq deleteReq = DeleteReq.builder()\n    .collectionName(\"quick_setup\")\n    .filter(\"id in [4, 5, 6]\")\n    .build();\n\nDeleteResp deleteResp = client.delete(deleteReq);\n\nSystem.out.println(JSONObject.toJSON(deleteResp));\n\n// Output:\n// {\"deleteCnt\": 3}\n","// 7. Delete entities\nres = await client.delete({\n    collection_name: \"quick_setup\",\n    filter: \"id in [4,5,6]\"\n})\n\nconsole.log(res.delete_cnt)\n\n// Output\n// \n// 3\n// \n","res = client.delete(\n    collection_name=\"quick_setup\",\n    ids=[18, 19],\n    partition_name=\"partitionA\"\n)\n\nprint(res)\n\n# Output\n#\n# {\n#     \"delete_count\": 2\n# }\n","deleteReq = DeleteReq.builder()\n    .collectionName(\"quick_setup\")\n    .ids(Arrays.asList(18L, 19L))\n    .partitionName(\"partitionA\")\n    .build();\n\ndeleteResp = client.delete(deleteReq);\n\nSystem.out.println(JSONObject.toJSON(deleteResp));\n\n// Output:\n// {\"deleteCnt\": 2}\n","res = await client.delete({\n    collection_name: \"quick_setup\",\n    ids: [18, 19],\n    partition_name: \"partitionA\"\n})\n\nconsole.log(res.delete_cnt)\n\n// Output\n// \n// 2\n// \n","res = client.delete(\ncollection_name='quick_setup',\npartition_name='partitionA',\nfilter='color like \"blue%\"'\n)\n\nprint(\"Entities deleted from partitionA: \", res['delete_count'])\n\n# Output:\n# Entities deleted from partitionA:  3\n","deleteReq = DeleteReq.builder()\n    .collectionName(\"quick_setup\")\n    .filter('color like \"blue%\"')\n    .partitionName(\"partitionA\")\n    .build();\n\ndeleteResp = client.delete(deleteReq);\n\nSystem.out.println(JSONObject.toJSON(deleteResp));\n\n// Output:\n// {\"deleteCnt\": 3}\n","res = await client.delete({\ncollection_name: \"quick_setup\",\npartition_name: \"partitionA\",\nfilter: 'color like \"blue%\"'\n})\n\nconsole.log(\"Entities deleted from partitionA: \" + res.delete_cnt)\n\n// Output:\n// Entities deleted from partitionA: 3\n"],"headingContent":"","anchorList":[{"label":"삽입, 위로 올리기 및 삭제","href":"Insert-Upsert--Delete","type":1,"isActive":false},{"label":"시작하기 전에","href":"Before-you-start","type":2,"isActive":false},{"label":"개요","href":"Overview","type":2,"isActive":false},{"label":"준비","href":"Preparations","type":2,"isActive":false},{"label":"엔티티 삽입","href":"Insert-entities","type":2,"isActive":false},{"label":"엔티티 삽입","href":"Upsert-entities","type":2,"isActive":false},{"label":"엔터티 삭제","href":"Delete-entities","type":2,"isActive":false}]}
    \ No newline at end of file
    +{"codeList":["from pymilvus import MilvusClient\n\n# 1. Set up a Milvus client\nclient = MilvusClient(\n    uri=\"http://localhost:19530\"\n)\n\n# 2. Create a collection\nclient.create_collection(\n    collection_name=\"quick_setup\",\n    dimension=5,\n    metric_type=\"IP\"\n)\n","import io.milvus.v2.client.ConnectConfig;\nimport io.milvus.v2.client.MilvusClientV2;\nimport io.milvus.v2.service.collection.request.CreateCollectionReq;\n\nString CLUSTER_ENDPOINT = \"http://localhost:19530\";\n\n// 1. Connect to Milvus server\nConnectConfig connectConfig = ConnectConfig.builder()\n    .uri(CLUSTER_ENDPOINT)\n    .build();\n\nMilvusClientV2 client = new MilvusClientV2(connectConfig);\n\n// 2. Create a collection in quick setup mode\nCreateCollectionReq quickSetupReq = CreateCollectionReq.builder()\n    .collectionName(\"quick_setup\")\n    .dimension(5)\n    .metricType(\"IP\")\n    .build();\n\nclient.createCollection(quickSetupReq);\n","const { MilvusClient, DataType, sleep } = require(\"@zilliz/milvus2-sdk-node\")\n\nconst address = \"http://localhost:19530\"\n\n// 1. Set up a Milvus Client\nclient = new MilvusClient({address});\n\n// 2. Create a collection in quick setup mode\nawait client.createCollection({\n    collection_name: \"quick_setup\",\n    dimension: 5,\n    metric_type: \"IP\"\n});  \n","# 3. Insert some data\ndata=[\n    {\"id\": 0, \"vector\": [0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592], \"color\": \"pink_8682\"},\n    {\"id\": 1, \"vector\": [0.19886812562848388, 0.06023560599112088, 0.6976963061752597, 0.2614474506242501, 0.838729485096104], \"color\": \"red_7025\"},\n    {\"id\": 2, \"vector\": [0.43742130801983836, -0.5597502546264526, 0.6457887650909682, 0.7894058910881185, 0.20785793220625592], \"color\": \"orange_6781\"},\n    {\"id\": 3, \"vector\": [0.3172005263489739, 0.9719044792798428, -0.36981146090600725, -0.4860894583077995, 0.95791889146345], \"color\": \"pink_9298\"},\n    {\"id\": 4, \"vector\": [0.4452349528804562, -0.8757026943054742, 0.8220779437047674, 0.46406290649483184, 0.30337481143159106], \"color\": \"red_4794\"},\n    {\"id\": 5, \"vector\": [0.985825131989184, -0.8144651566660419, 0.6299267002202009, 0.1206906911183383, -0.1446277761879955], \"color\": \"yellow_4222\"},\n    {\"id\": 6, \"vector\": [0.8371977790571115, -0.015764369584852833, -0.31062937026679327, -0.562666951622192, -0.8984947637863987], \"color\": \"red_9392\"},\n    {\"id\": 7, \"vector\": [-0.33445148015177995, -0.2567135004164067, 0.8987539745369246, 0.9402995886420709, 0.5378064918413052], \"color\": \"grey_8510\"},\n    {\"id\": 8, \"vector\": [0.39524717779832685, 0.4000257286739164, -0.5890507376891594, -0.8650502298996872, -0.6140360785406336], \"color\": \"white_9381\"},\n    {\"id\": 9, \"vector\": [0.5718280481994695, 0.24070317428066512, -0.3737913482606834, -0.06726932177492717, -0.6980531615588608], \"color\": \"purple_4976\"}\n]\n\nres = client.insert(\n    collection_name=\"quick_setup\",\n    data=data\n)\n\nprint(res)\n\n# Output\n#\n# {\n#     \"insert_count\": 10,\n#     \"ids\": [\n#         0,\n#         1,\n#         2,\n#         3,\n#         4,\n#         5,\n#         6,\n#         7,\n#         8,\n#         9\n#     ]\n# }\n","import java.util.Arrays;\nimport java.util.List;\nimport java.util.Map;\nimport io.milvus.v2.service.vector.request.InsertReq;\nimport io.milvus.v2.service.vector.response.InsertResp;\n\n// 3. Insert some data\nList data = Arrays.asList(\n    new JSONObject(Map.of(\"id\", 0L, \"vector\", Arrays.asList(0.3580376395471989f, -0.6023495712049978f, 0.18414012509913835f, -0.26286205330961354f, 0.9029438446296592f), \"color\", \"pink_8682\")),\n    new JSONObject(Map.of(\"id\", 1L, \"vector\", Arrays.asList(0.19886812562848388f, 0.06023560599112088f, 0.6976963061752597f, 0.2614474506242501f, 0.838729485096104f), \"color\", \"red_7025\")),\n    new JSONObject(Map.of(\"id\", 2L, \"vector\", Arrays.asList(0.43742130801983836f, -0.5597502546264526f, 0.6457887650909682f, 0.7894058910881185f, 0.20785793220625592f), \"color\", \"orange_6781\")),\n    new JSONObject(Map.of(\"id\", 3L, \"vector\", Arrays.asList(0.3172005263489739f, 0.9719044792798428f, -0.36981146090600725f, -0.4860894583077995f, 0.95791889146345f), \"color\", \"pink_9298\")),\n    new JSONObject(Map.of(\"id\", 4L, \"vector\", Arrays.asList(0.4452349528804562f, -0.8757026943054742f, 0.8220779437047674f, 0.46406290649483184f, 0.30337481143159106f), \"color\", \"red_4794\")),\n    new JSONObject(Map.of(\"id\", 5L, \"vector\", Arrays.asList(0.985825131989184f, -0.8144651566660419f, 0.6299267002202009f, 0.1206906911183383f, -0.1446277761879955f), \"color\", \"yellow_4222\")),\n    new JSONObject(Map.of(\"id\", 6L, \"vector\", Arrays.asList(0.8371977790571115f, -0.015764369584852833f, -0.31062937026679327f, -0.562666951622192f, -0.8984947637863987f), \"color\", \"red_9392\")),\n    new JSONObject(Map.of(\"id\", 7L, \"vector\", Arrays.asList(-0.33445148015177995f, -0.2567135004164067f, 0.8987539745369246f, 0.9402995886420709f, 0.5378064918413052f), \"color\", \"grey_8510\")),\n    new JSONObject(Map.of(\"id\", 8L, \"vector\", Arrays.asList(0.39524717779832685f, 0.4000257286739164f, -0.5890507376891594f, -0.8650502298996872f, -0.6140360785406336f), \"color\", \"white_9381\")),\n    new JSONObject(Map.of(\"id\", 9L, \"vector\", Arrays.asList(0.5718280481994695f, 0.24070317428066512f, -0.3737913482606834f, -0.06726932177492717f, -0.6980531615588608f), \"color\", \"purple_4976\"))\n);\n\nInsertReq insertReq = InsertReq.builder()\n    .collectionName(\"quick_setup\")\n    .data(data)\n    .build();\n\nInsertResp insertResp = client.insert(insertReq);\n\nSystem.out.println(JSONObject.toJSON(insertResp));\n\n// Output:\n// {\"insertCnt\": 10}\n","// 3. Insert some data\n\nvar data = [\n    {id: 0, vector: [0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592], color: \"pink_8682\"},\n    {id: 1, vector: [0.19886812562848388, 0.06023560599112088, 0.6976963061752597, 0.2614474506242501, 0.838729485096104], color: \"red_7025\"},\n    {id: 2, vector: [0.43742130801983836, -0.5597502546264526, 0.6457887650909682, 0.7894058910881185, 0.20785793220625592], color: \"orange_6781\"},\n    {id: 3, vector: [0.3172005263489739, 0.9719044792798428, -0.36981146090600725, -0.4860894583077995, 0.95791889146345], color: \"pink_9298\"},\n    {id: 4, vector: [0.4452349528804562, -0.8757026943054742, 0.8220779437047674, 0.46406290649483184, 0.30337481143159106], color: \"red_4794\"},\n    {id: 5, vector: [0.985825131989184, -0.8144651566660419, 0.6299267002202009, 0.1206906911183383, -0.1446277761879955], color: \"yellow_4222\"},\n    {id: 6, vector: [0.8371977790571115, -0.015764369584852833, -0.31062937026679327, -0.562666951622192, -0.8984947637863987], color: \"red_9392\"},\n    {id: 7, vector: [-0.33445148015177995, -0.2567135004164067, 0.8987539745369246, 0.9402995886420709, 0.5378064918413052], color: \"grey_8510\"},\n    {id: 8, vector: [0.39524717779832685, 0.4000257286739164, -0.5890507376891594, -0.8650502298996872, -0.6140360785406336], color: \"white_9381\"},\n    {id: 9, vector: [0.5718280481994695, 0.24070317428066512, -0.3737913482606834, -0.06726932177492717, -0.6980531615588608], color: \"purple_4976\"}        \n]\n\nvar res = await client.insert({\n    collection_name: \"quick_setup\",\n    data: data,\n})\n\nconsole.log(res.insert_cnt)\n\n// Output\n// \n// 10\n// \n","# 4. Insert some more data into a specific partition\ndata=[\n    {\"id\": 10, \"vector\": [-0.5570353903748935, -0.8997887893201304, -0.7123782431855732, -0.6298990746450119, 0.6699215060604258], \"color\": \"red_1202\"},\n    {\"id\": 11, \"vector\": [0.6319019033373907, 0.6821488267878275, 0.8552303045704168, 0.36929791364943054, -0.14152860714878068], \"color\": \"blue_4150\"},\n    {\"id\": 12, \"vector\": [0.9483947484855766, -0.32294203351925344, 0.9759290319978025, 0.8262982148666174, -0.8351194181285713], \"color\": \"orange_4590\"},\n    {\"id\": 13, \"vector\": [-0.5449109892498731, 0.043511240563786524, -0.25105249484790804, -0.012030655265886425, -0.0010987671273892108], \"color\": \"pink_9619\"},\n    {\"id\": 14, \"vector\": [0.6603339372951424, -0.10866551787442225, -0.9435597754324891, 0.8230244263466688, -0.7986720938400362], \"color\": \"orange_4863\"},\n    {\"id\": 15, \"vector\": [-0.8825129181091456, -0.9204557711667729, -0.935350065513425, 0.5484069690287079, 0.24448151140671204], \"color\": \"orange_7984\"},\n    {\"id\": 16, \"vector\": [0.6285586391568163, 0.5389064528263487, -0.3163366239905099, 0.22036279378888013, 0.15077052220816167], \"color\": \"blue_9010\"},\n    {\"id\": 17, \"vector\": [-0.20151825016059233, -0.905239387635804, 0.6749305353372479, -0.7324272081377843, -0.33007998971889263], \"color\": \"blue_4521\"},\n    {\"id\": 18, \"vector\": [0.2432286610792349, 0.01785636564206139, -0.651356982731391, -0.35848148851027895, -0.7387383128324057], \"color\": \"orange_2529\"},\n    {\"id\": 19, \"vector\": [0.055512329053363674, 0.7100266349039421, 0.4956956543575197, 0.24541352586717702, 0.4209030729923515], \"color\": \"red_9437\"}\n]\n\nclient.create_partition(\n    collection_name=\"quick_setup\",\n    partition_name=\"partitionA\"\n)\n\nres = client.insert(\n    collection_name=\"quick_setup\",\n    data=data,\n    partition_name=\"partitionA\"\n)\n\nprint(res)\n\n# Output\n#\n# {\n#     \"insert_count\": 10,\n#     \"ids\": [\n#         10,\n#         11,\n#         12,\n#         13,\n#         14,\n#         15,\n#         16,\n#         17,\n#         18,\n#         19\n#     ]\n# }\n","// 4. Insert some more data into a specific partition\ndata = Arrays.asList(\n    new JSONObject(Map.of(\"id\", 10L, \"vector\", Arrays.asList(-0.5570353903748935f, -0.8997887893201304f, -0.7123782431855732f, -0.6298990746450119f, 0.6699215060604258f), \"color\", \"red_1202\")),\n    new JSONObject(Map.of(\"id\", 11L, \"vector\", Arrays.asList(0.6319019033373907f, 0.6821488267878275f, 0.8552303045704168f, 0.36929791364943054f, -0.14152860714878068f), \"color\", \"blue_4150\")),\n    new JSONObject(Map.of(\"id\", 12L, \"vector\", Arrays.asList(0.9483947484855766f, -0.32294203351925344f, 0.9759290319978025f, 0.8262982148666174f, -0.8351194181285713f), \"color\", \"orange_4590\")),\n    new JSONObject(Map.of(\"id\", 13L, \"vector\", Arrays.asList(-0.5449109892498731f, 0.043511240563786524f, -0.25105249484790804f, -0.012030655265886425f, -0.0010987671273892108f), \"color\", \"pink_9619\")),\n    new JSONObject(Map.of(\"id\", 14L, \"vector\", Arrays.asList(0.6603339372951424f, -0.10866551787442225f, -0.9435597754324891f, 0.8230244263466688f, -0.7986720938400362f), \"color\", \"orange_4863\")),\n    new JSONObject(Map.of(\"id\", 15L, \"vector\", Arrays.asList(-0.8825129181091456f, -0.9204557711667729f, -0.935350065513425f, 0.5484069690287079f, 0.24448151140671204f), \"color\", \"orange_7984\")),\n    new JSONObject(Map.of(\"id\", 16L, \"vector\", Arrays.asList(0.6285586391568163f, 0.5389064528263487f, -0.3163366239905099f, 0.22036279378888013f, 0.15077052220816167f), \"color\", \"blue_9010\")),\n    new JSONObject(Map.of(\"id\", 17L, \"vector\", Arrays.asList(-0.20151825016059233f, -0.905239387635804f, 0.6749305353372479f, -0.7324272081377843f, -0.33007998971889263f), \"color\", \"blue_4521\")),\n    new JSONObject(Map.of(\"id\", 18L, \"vector\", Arrays.asList(0.2432286610792349f, 0.01785636564206139f, -0.651356982731391f, -0.35848148851027895f, -0.7387383128324057f), \"color\", \"orange_2529\")),\n    new JSONObject(Map.of(\"id\", 19L, \"vector\", Arrays.asList(0.055512329053363674f, 0.7100266349039421f, 0.4956956543575197f, 0.24541352586717702f, 0.4209030729923515f), \"color\", \"red_9437\"))\n);\n\nCreatePartitionReq createPartitionReq = CreatePartitionReq.builder()\n    .collectionName(\"quick_setup\")\n    .partitionName(\"partitionA\")\n    .build();\n\nclient.createPartition(createPartitionReq);\n\ninsertReq = InsertReq.builder()\n    .collectionName(\"quick_setup\")\n    .data(data)\n    .partitionName(\"partitionA\")\n    .build();\n\ninsertResp = client.insert(insertReq);\n\nSystem.out.println(JSONObject.toJSON(insertResp));\n\n// Output:\n// {\"insertCnt\": 10}\n","// 4. Insert some more data into a specific partition\ndata = [\n    {id: 10, vector: [-0.5570353903748935, -0.8997887893201304, -0.7123782431855732, -0.6298990746450119, 0.6699215060604258], color: \"red_1202\"},\n    {id: 11, vector: [0.6319019033373907, 0.6821488267878275, 0.8552303045704168, 0.36929791364943054, -0.14152860714878068], color: \"blue_4150\"},\n    {id: 12, vector: [0.9483947484855766, -0.32294203351925344, 0.9759290319978025, 0.8262982148666174, -0.8351194181285713], color: \"orange_4590\"},\n    {id: 13, vector: [-0.5449109892498731, 0.043511240563786524, -0.25105249484790804, -0.012030655265886425, -0.0010987671273892108], color: \"pink_9619\"},\n    {id: 14, vector: [0.6603339372951424, -0.10866551787442225, -0.9435597754324891, 0.8230244263466688, -0.7986720938400362], color: \"orange_4863\"},\n    {id: 15, vector: [-0.8825129181091456, -0.9204557711667729, -0.935350065513425, 0.5484069690287079, 0.24448151140671204], color: \"orange_7984\"},\n    {id: 16, vector: [0.6285586391568163, 0.5389064528263487, -0.3163366239905099, 0.22036279378888013, 0.15077052220816167], color: \"blue_9010\"},\n    {id: 17, vector: [-0.20151825016059233, -0.905239387635804, 0.6749305353372479, -0.7324272081377843, -0.33007998971889263], color: \"blue_4521\"},\n    {id: 18, vector: [0.2432286610792349, 0.01785636564206139, -0.651356982731391, -0.35848148851027895, -0.7387383128324057], color: \"orange_2529\"},\n    {id: 19, vector: [0.055512329053363674, 0.7100266349039421, 0.4956956543575197, 0.24541352586717702, 0.4209030729923515], color: \"red_9437\"}\n]\n\nawait client.createPartition({\n    collection_name: \"quick_setup\",\n    partition_name: \"partitionA\"\n})\n\nres = await client.insert({\n    collection_name: \"quick_setup\",\n    data: data,\n    partition_name: \"partitionA\"\n})\n\nconsole.log(res.insert_cnt)\n\n// Output\n// \n// 10\n// \n","# 5. Upsert some data\ndata=[\n    {\"id\": 0, \"vector\": [-0.619954382375778, 0.4479436794798608, -0.17493894838751745, -0.4248030059917294, -0.8648452746018911], \"color\": \"black_9898\"},\n    {\"id\": 1, \"vector\": [0.4762662251462588, -0.6942502138717026, -0.4490002642657902, -0.628696575798281, 0.9660395877041965], \"color\": \"red_7319\"},\n    {\"id\": 2, \"vector\": [-0.8864122635045097, 0.9260170474445351, 0.801326976181461, 0.6383943392381306, 0.7563037341572827], \"color\": \"white_6465\"},\n    {\"id\": 3, \"vector\": [0.14594326235891586, -0.3775407299900644, -0.3765479013078812, 0.20612075380355122, 0.4902678929632145], \"color\": \"orange_7580\"},\n    {\"id\": 4, \"vector\": [0.4548498669607359, -0.887610217681605, 0.5655081329910452, 0.19220509387904117, 0.016513983433433577], \"color\": \"red_3314\"},\n    {\"id\": 5, \"vector\": [0.11755001847051827, -0.7295149788999611, 0.2608115847524266, -0.1719167007897875, 0.7417611743754855], \"color\": \"black_9955\"},\n    {\"id\": 6, \"vector\": [0.9363032158314308, 0.030699901477745373, 0.8365910312319647, 0.7823840208444011, 0.2625222076909237], \"color\": \"yellow_2461\"},\n    {\"id\": 7, \"vector\": [0.0754823906014721, -0.6390658668265143, 0.5610517334334937, -0.8986261118798251, 0.9372056764266794], \"color\": \"white_5015\"},\n    {\"id\": 8, \"vector\": [-0.3038434006935904, 0.1279149203380523, 0.503958664270957, -0.2622661156746988, 0.7407627307791929], \"color\": \"purple_6414\"},\n    {\"id\": 9, \"vector\": [-0.7125086947677588, -0.8050968321012257, -0.32608864121785786, 0.3255654958645424, 0.26227968923834233], \"color\": \"brown_7231\"}\n]\n\nres = client.upsert(\n    collection_name='quick_setup',\n    data=data\n)\n\nprint(res)\n\n# Output\n#\n# {\n#     \"upsert_count\": 10\n# }\n","// 5. Upsert some data\ndata = Arrays.asList(\n    new JSONObject(Map.of(\"id\", 0L, \"vector\", Arrays.asList(-0.619954382375778f, 0.4479436794798608f, -0.17493894838751745f, -0.4248030059917294f, -0.8648452746018911f), \"color\", \"black_9898\")),\n    new JSONObject(Map.of(\"id\", 1L, \"vector\", Arrays.asList(0.4762662251462588f, -0.6942502138717026f, -0.4490002642657902f, -0.628696575798281f, 0.9660395877041965f), \"color\", \"red_7319\")),\n    new JSONObject(Map.of(\"id\", 2L, \"vector\", Arrays.asList(-0.8864122635045097f, 0.9260170474445351f, 0.801326976181461f, 0.6383943392381306f, 0.7563037341572827f), \"color\", \"white_6465\")),\n    new JSONObject(Map.of(\"id\", 3L, \"vector\", Arrays.asList(0.14594326235891586f, -0.3775407299900644f, -0.3765479013078812f, 0.20612075380355122f, 0.4902678929632145f), \"color\", \"orange_7580\")),\n    new JSONObject(Map.of(\"id\", 4L, \"vector\", Arrays.asList(0.4548498669607359f, -0.887610217681605f, 0.5655081329910452f, 0.19220509387904117f, 0.016513983433433577f), \"color\", \"red_3314\")),\n    new JSONObject(Map.of(\"id\", 5L, \"vector\", Arrays.asList(0.11755001847051827f, -0.7295149788999611f, 0.2608115847524266f, -0.1719167007897875f, 0.7417611743754855f), \"color\", \"black_9955\")),\n    new JSONObject(Map.of(\"id\", 6L, \"vector\", Arrays.asList(0.9363032158314308f, 0.030699901477745373f, 0.8365910312319647f, 0.7823840208444011f, 0.2625222076909237f), \"color\", \"yellow_2461\")),\n    new JSONObject(Map.of(\"id\", 7L, \"vector\", Arrays.asList(0.0754823906014721f, -0.6390658668265143f, 0.5610517334334937f, -0.8986261118798251f, 0.9372056764266794f), \"color\", \"white_5015\")),\n    new JSONObject(Map.of(\"id\", 8L, \"vector\", Arrays.asList(-0.3038434006935904f, 0.1279149203380523f, 0.503958664270957f, -0.2622661156746988f, 0.7407627307791929f), \"color\", \"purple_6414\")),\n    new JSONObject(Map.of(\"id\", 9L, \"vector\", Arrays.asList(-0.7125086947677588f, -0.8050968321012257f, -0.32608864121785786f, 0.3255654958645424f, 0.26227968923834233f), \"color\", \"brown_7231\"))\n);\n\nUpsertReq upsertReq = UpsertReq.builder()\n    .collectionName(\"quick_setup\")\n    .data(data)\n    .build();\n\nUpsertResp upsertResp = client.upsert(upsertReq);\n\nSystem.out.println(JSONObject.toJSON(upsertResp));\n\n// Output:\n// {\"upsertCnt\": 10}\n","// 5. Upsert some data\ndata = [\n    {id: 0, vector: [-0.619954382375778, 0.4479436794798608, -0.17493894838751745, -0.4248030059917294, -0.8648452746018911], color: \"black_9898\"},\n    {id: 1, vector: [0.4762662251462588, -0.6942502138717026, -0.4490002642657902, -0.628696575798281, 0.9660395877041965], color: \"red_7319\"},\n    {id: 2, vector: [-0.8864122635045097, 0.9260170474445351, 0.801326976181461, 0.6383943392381306, 0.7563037341572827], color: \"white_6465\"},\n    {id: 3, vector: [0.14594326235891586, -0.3775407299900644, -0.3765479013078812, 0.20612075380355122, 0.4902678929632145], color: \"orange_7580\"},\n    {id: 4, vector: [0.4548498669607359, -0.887610217681605, 0.5655081329910452, 0.19220509387904117, 0.016513983433433577], color: \"red_3314\"},\n    {id: 5, vector: [0.11755001847051827, -0.7295149788999611, 0.2608115847524266, -0.1719167007897875, 0.7417611743754855], color: \"black_9955\"},\n    {id: 6, vector: [0.9363032158314308, 0.030699901477745373, 0.8365910312319647, 0.7823840208444011, 0.2625222076909237], color: \"yellow_2461\"},\n    {id: 7, vector: [0.0754823906014721, -0.6390658668265143, 0.5610517334334937, -0.8986261118798251, 0.9372056764266794], color: \"white_5015\"},\n    {id: 8, vector: [-0.3038434006935904, 0.1279149203380523, 0.503958664270957, -0.2622661156746988, 0.7407627307791929], color: \"purple_6414\"},\n    {id: 9, vector: [-0.7125086947677588, -0.8050968321012257, -0.32608864121785786, 0.3255654958645424, 0.26227968923834233], color: \"brown_7231\"}\n]\n\nres = await client.upsert({\n    collection_name: \"quick_setup\",\n    data: data,\n})\n\nconsole.log(res.upsert_cnt)\n\n// Output\n// \n// 10\n// \n","# 6. Upsert data in partitions\ndata=[\n    {\"id\": 10, \"vector\": [0.06998888224297328, 0.8582816610326578, -0.9657938677934292, 0.6527905683627726, -0.8668460657158576], \"color\": \"black_3651\"},\n    {\"id\": 11, \"vector\": [0.6060703043917468, -0.3765080534566074, -0.7710758854987239, 0.36993888322346136, 0.5507513364206531], \"color\": \"grey_2049\"},\n    {\"id\": 12, \"vector\": [-0.9041813104515337, -0.9610546012461163, 0.20033003106083358, 0.11842506351635174, 0.8327356724591011], \"color\": \"blue_6168\"},\n    {\"id\": 13, \"vector\": [0.3202914977909075, -0.7279137773695252, -0.04747830871620273, 0.8266053056909548, 0.8277957187455489], \"color\": \"blue_1672\"},\n    {\"id\": 14, \"vector\": [0.2975811497890859, 0.2946936202691086, 0.5399463833894609, 0.8385334966677529, -0.4450543984655133], \"color\": \"pink_1601\"},\n    {\"id\": 15, \"vector\": [-0.04697464305600074, -0.08509022265734134, 0.9067184632552001, -0.2281912685064822, -0.9747503428652762], \"color\": \"yellow_9925\"},\n    {\"id\": 16, \"vector\": [-0.9363075919673911, -0.8153981031085669, 0.7943039120490902, -0.2093886809842529, 0.0771191335807897], \"color\": \"orange_9872\"},\n    {\"id\": 17, \"vector\": [-0.050451522820639916, 0.18931572752321935, 0.7522886192190488, -0.9071793089474034, 0.6032647330692296], \"color\": \"red_6450\"},\n    {\"id\": 18, \"vector\": [-0.9181544231141592, 0.6700755998126806, -0.014174674636136642, 0.6325780463623432, -0.49662222164032976], \"color\": \"purple_7392\"},\n    {\"id\": 19, \"vector\": [0.11426945899602536, 0.6089190684002581, -0.5842735738352236, 0.057050610092692855, -0.035163433018196244], \"color\": \"pink_4996\"}\n]\n\nres = client.upsert(\n    collection_name=\"quick_setup\",\n    data=data,\n    partition_name=\"partitionA\"\n)\n\nprint(res)\n\n# Output\n#\n# {\n#     \"upsert_count\": 10\n# }\n","import io.milvus.v2.service.vector.request.UpsertReq;\nimport io.milvus.v2.service.vector.response.UpsertResp;\n\n// 6. Upsert data in parition\n\ndata = Arrays.asList(\n    new JSONObject(Map.of(\"id\", 10L, \"vector\", Arrays.asList(0.06998888224297328f, 0.8582816610326578f, -0.9657938677934292f, 0.6527905683627726f, -0.8668460657158576f), \"color\", \"black_3651\")),\n    new JSONObject(Map.of(\"id\", 11L, \"vector\", Arrays.asList(0.6060703043917468f, -0.3765080534566074f, -0.7710758854987239f, 0.36993888322346136f, 0.5507513364206531f), \"color\", \"grey_2049\")),\n    new JSONObject(Map.of(\"id\", 12L, \"vector\", Arrays.asList(-0.9041813104515337f, -0.9610546012461163f, 0.20033003106083358f, 0.11842506351635174f, 0.8327356724591011f), \"color\", \"blue_6168\")),\n    new JSONObject(Map.of(\"id\", 13L, \"vector\", Arrays.asList(0.3202914977909075f, -0.7279137773695252f, -0.04747830871620273f, 0.8266053056909548f, 0.8277957187455489f), \"color\", \"blue_1672\")),\n    new JSONObject(Map.of(\"id\", 14L, \"vector\", Arrays.asList(0.2975811497890859f, 0.2946936202691086f, 0.5399463833894609f, 0.8385334966677529f, -0.4450543984655133f), \"color\", \"pink_1601\")),\n    new JSONObject(Map.of(\"id\", 15L, \"vector\", Arrays.asList(-0.04697464305600074f, -0.08509022265734134f, 0.9067184632552001f, -0.2281912685064822f, -0.9747503428652762f), \"color\", \"yellow_9925\")),\n    new JSONObject(Map.of(\"id\", 16L, \"vector\", Arrays.asList(-0.9363075919673911f, -0.8153981031085669f, 0.7943039120490902f, -0.2093886809842529f, 0.0771191335807897f), \"color\", \"orange_9872\")),\n    new JSONObject(Map.of(\"id\", 17L, \"vector\", Arrays.asList(-0.050451522820639916f, 0.18931572752321935f, 0.7522886192190488f, -0.9071793089474034f, 0.6032647330692296f), \"color\", \"red_6450\")),\n    new JSONObject(Map.of(\"id\", 18L, \"vector\", Arrays.asList(-0.9181544231141592f, 0.6700755998126806f, -0.014174674636136642f, 0.6325780463623432f, -0.49662222164032976f), \"color\", \"purple_7392\")),\n    new JSONObject(Map.of(\"id\", 19L, \"vector\", Arrays.asList(0.11426945899602536f, 0.6089190684002581f, -0.5842735738352236f, 0.057050610092692855f, -0.035163433018196244f), \"color\", \"pink_4996\"))\n);\n\nupsertReq = UpsertReq.builder()\n    .collectionName(\"quick_setup\")\n    .data(data)\n    .partitionName(\"partitionA\")\n    .build();\n\nupsertResp = client.upsert(upsertReq);\n\nSystem.out.println(JSONObject.toJSON(upsertResp));\n\n// Output:\n// {\"upsertCnt\": 10}\n","// 6. Upsert data in partitions\ndata = [\n    {id: 10, vector: [0.06998888224297328, 0.8582816610326578, -0.9657938677934292, 0.6527905683627726, -0.8668460657158576], color: \"black_3651\"},\n    {id: 11, vector: [0.6060703043917468, -0.3765080534566074, -0.7710758854987239, 0.36993888322346136, 0.5507513364206531], color: \"grey_2049\"},\n    {id: 12, vector: [-0.9041813104515337, -0.9610546012461163, 0.20033003106083358, 0.11842506351635174, 0.8327356724591011], color: \"blue_6168\"},\n    {id: 13, vector: [0.3202914977909075, -0.7279137773695252, -0.04747830871620273, 0.8266053056909548, 0.8277957187455489], color: \"blue_1672\"},\n    {id: 14, vector: [0.2975811497890859, 0.2946936202691086, 0.5399463833894609, 0.8385334966677529, -0.4450543984655133], color: \"pink_1601\"},\n    {id: 15, vector: [-0.04697464305600074, -0.08509022265734134, 0.9067184632552001, -0.2281912685064822, -0.9747503428652762], color: \"yellow_9925\"},\n    {id: 16, vector: [-0.9363075919673911, -0.8153981031085669, 0.7943039120490902, -0.2093886809842529, 0.0771191335807897], color: \"orange_9872\"},\n    {id: 17, vector: [-0.050451522820639916, 0.18931572752321935, 0.7522886192190488, -0.9071793089474034, 0.6032647330692296], color: \"red_6450\"},\n    {id: 18, vector: [-0.9181544231141592, 0.6700755998126806, -0.014174674636136642, 0.6325780463623432, -0.49662222164032976], color: \"purple_7392\"},\n    {id: 19, vector: [0.11426945899602536, 0.6089190684002581, -0.5842735738352236, 0.057050610092692855, -0.035163433018196244], color: \"pink_4996\"}\n]\n\nres = await client.upsert({\n    collection_name: \"quick_setup\",\n    data: data,\n    partition_name: \"partitionA\"\n})\n\nconsole.log(res.upsert_cnt)\n\n// Output\n// \n// 10\n// \n","# 7. Delete entities\nres = client.delete(\n    collection_name=\"quick_setup\",\n    filter=\"id in [4,5,6]\"\n)\n\nprint(res)\n\n# Output\n#\n# {\n#     \"delete_count\": 3\n# }\n","import io.milvus.v2.service.vector.request.DeleteReq;\nimport io.milvus.v2.service.vector.response.DeleteResp;\n\n\n// 7. Delete entities\n\nDeleteReq deleteReq = DeleteReq.builder()\n    .collectionName(\"quick_setup\")\n    .filter(\"id in [4, 5, 6]\")\n    .build();\n\nDeleteResp deleteResp = client.delete(deleteReq);\n\nSystem.out.println(JSONObject.toJSON(deleteResp));\n\n// Output:\n// {\"deleteCnt\": 3}\n","// 7. Delete entities\nres = await client.delete({\n    collection_name: \"quick_setup\",\n    filter: \"id in [4,5,6]\"\n})\n\nconsole.log(res.delete_cnt)\n\n// Output\n// \n// 3\n// \n","res = client.delete(\n    collection_name=\"quick_setup\",\n    ids=[18, 19],\n    partition_name=\"partitionA\"\n)\n\nprint(res)\n\n# Output\n#\n# {\n#     \"delete_count\": 2\n# }\n","deleteReq = DeleteReq.builder()\n    .collectionName(\"quick_setup\")\n    .ids(Arrays.asList(18L, 19L))\n    .partitionName(\"partitionA\")\n    .build();\n\ndeleteResp = client.delete(deleteReq);\n\nSystem.out.println(JSONObject.toJSON(deleteResp));\n\n// Output:\n// {\"deleteCnt\": 2}\n","res = await client.delete({\n    collection_name: \"quick_setup\",\n    ids: [18, 19],\n    partition_name: \"partitionA\"\n})\n\nconsole.log(res.delete_cnt)\n\n// Output\n// \n// 2\n// \n","res = client.delete(\ncollection_name='quick_setup',\npartition_name='partitionA',\nfilter='color like \"blue%\"'\n)\n\nprint(\"Entities deleted from partitionA: \", res['delete_count'])\n\n# Output:\n# Entities deleted from partitionA:  3\n","deleteReq = DeleteReq.builder()\n    .collectionName(\"quick_setup\")\n    .filter('color like \"blue%\"')\n    .partitionName(\"partitionA\")\n    .build();\n\ndeleteResp = client.delete(deleteReq);\n\nSystem.out.println(JSONObject.toJSON(deleteResp));\n\n// Output:\n// {\"deleteCnt\": 3}\n","res = await client.delete({\ncollection_name: \"quick_setup\",\npartition_name: \"partitionA\",\nfilter: 'color like \"blue%\"'\n})\n\nconsole.log(\"Entities deleted from partitionA: \" + res.delete_cnt)\n\n// Output:\n// Entities deleted from partitionA: 3\n"],"headingContent":"Insert, Upsert & Delete","anchorList":[{"label":"삽입, 위로 올리기 및 삭제","href":"Insert-Upsert--Delete","type":1,"isActive":false},{"label":"시작하기 전에","href":"Before-you-start","type":2,"isActive":false},{"label":"개요","href":"Overview","type":2,"isActive":false},{"label":"준비","href":"Preparations","type":2,"isActive":false},{"label":"엔티티 삽입","href":"Insert-entities","type":2,"isActive":false},{"label":"엔티티 삽입","href":"Upsert-entities","type":2,"isActive":false},{"label":"엔터티 삭제","href":"Delete-entities","type":2,"isActive":false}]}
    \ No newline at end of file
    diff --git a/localization/v2.4.x/site/ko/userGuide/insert-update-delete.md b/localization/v2.4.x/site/ko/userGuide/insert-update-delete.md
    index 85b236b82..6dc2e8ddf 100644
    --- a/localization/v2.4.x/site/ko/userGuide/insert-update-delete.md
    +++ b/localization/v2.4.x/site/ko/userGuide/insert-update-delete.md
    @@ -1,7 +1,7 @@
     ---
     id: insert-update-delete.md
     summary: '이 가이드에서는 삽입, 삽입, 삭제 등 컬렉션 내 데이터 조작 작업에 대해 안내합니다.'
    -title: '삽입, 업서트 및 삭제'
    +title: '삽입, 위로 올리기 및 삭제'
     ---
     

    삽입, 위로 올리기 및 삭제

    Milvus가 컬렉션의 거의 모든 것을 결정하는 대신, 사용자가 직접 컬렉션의 스키마와 인덱스 매개변수를 결정할 수 있습니다.

    -

    1단계: 스키마 설정

    스키마는 컬렉션의 구조를 정의합니다. 스키마 내에서 enable_dynamic_field 를 활성화 또는 비활성화하고, 미리 정의된 필드를 추가하고, 각 필드에 대한 속성을 설정하는 옵션이 있습니다. 스키마의 개념과 사용 가능한 데이터 유형에 대한 자세한 설명은 스키마 설명을 참조하세요.

    +

    1단계: 스키마 설정

    스키마는 컬렉션의 구조를 정의합니다. 스키마 내에서 enable_dynamic_field 를 활성화 또는 비활성화하고, 미리 정의된 필드를 추가하고, 각 필드에 대한 속성을 설정할 수 있는 옵션이 있습니다. 스키마의 개념과 사용 가능한 데이터 유형에 대한 자세한 설명은 스키마 설명을 참조하세요.

    스키마를 설정하려면 create_schema() 을 사용하여 스키마 개체를 만들고 add_field() 를 사용하여 스키마에 필드를 추가합니다.

    @@ -334,7 +334,7 @@ schema.addField(AddFieldReq.builder() dim - 벡터 임베딩의 차원.
    DataType.FLOAT_VECTOR, DataType.BINARY_VECTOR, DataType .FLOAT16_VECTOR 또는 DataType.BFLOAT16_VECTOR 유형의 필드에 필수입니다. DataType.SPARSE_FLOAT_VECTOR를 사용하는 경우 이 매개변수는 생략하세요. + 벡터 임베딩의 차원.
    DataType.FLOAT_VECTOR, DataType .BINARY_VECTOR, DataType .FLOAT16_VECTOR 또는 DataType.BFLOAT16_VECTOR 유형의 필드에 필수입니다. DataType.SPARSE_FLOAT_VECTOR를 사용하는 경우 이 매개변수는 생략하세요. @@ -571,7 +571,7 @@ indexParams.add(indexParamForVectorField); index_type - 특정 필드에서 데이터를 정렬하는 데 사용되는 알고리즘의 이름입니다. 적용 가능한 알고리즘은 인메모리 인덱스온디스크 인덱스를 참조하십시오. + 특정 필드에서 데이터를 정렬하는 데 사용되는 알고리즘의 이름입니다. 적용 가능한 알고리즘은 인메모리 인덱스온디스크 인덱스를 참조하십시오. metric_type @@ -881,11 +881,11 @@ $ curl -X POST "http://schema
    -이 컬렉션의 스키마입니다.
    이를 없음으로 설정하면 이 컬렉션이 기본 설정으로 생성됩니다.
    사용자 정의 스키마로 컬렉션을 설정하려면 CollectionSchema 개체를 만들어 여기에서 참조해야 합니다. 이 경우 Milvus는 요청에 포함된 다른 모든 스키마 관련 설정을 무시합니다. +이 컬렉션의 스키마입니다.
    이를 없음으로 설정하면 이 컬렉션이 기본 설정으로 만들어집니다.
    사용자 정의 스키마로 컬렉션을 설정하려면 CollectionSchema 개체를 만들어 여기에서 참조해야 합니다. 이 경우 Milvus는 요청에 포함된 다른 모든 스키마 관련 설정을 무시합니다. index_params -이 컬렉션의 벡터 필드에 인덱스를 구축하기 위한 매개변수입니다. 사용자 정의 스키마로 컬렉션을 설정하고 컬렉션을 메모리에 자동으로 로드하려면 IndexParams 개체를 만들고 여기에서 참조해야 합니다.
    이 컬렉션의 벡터 필드에 대한 인덱스는 최소한 추가해야 합니다. 나중에 인덱스 매개변수를 설정하려는 경우 이 매개변수를 건너뛸 수도 있습니다. +이 컬렉션의 벡터 필드에 인덱스를 구축하기 위한 매개변수입니다. 사용자 정의 스키마로 컬렉션을 설정하고 컬렉션을 메모리에 자동으로 로드하려면 IndexParams 개체를 만들고 여기에서 참조해야 합니다.
    이 컬렉션의 벡터 필드에 대한 인덱스를 최소한 추가해야 합니다. 나중에 인덱스 매개변수를 설정하려는 경우 이 매개변수를 건너뛸 수도 있습니다.

    @@ -1096,7 +1096,7 @@ $ curl -X POST "http://index_params
    - 인덱스 파라미터 객체 목록이 포함된 인덱스 파라미터 객체입니다. + 인덱스 파라 미터 객체 목록이 포함된 인덱스 파라 미터 객체입니다. @@ -1593,11 +1593,38 @@ $ curl -X POST "http://# } # }
    -

    컬렉션 릴리즈

    +

    컬렉션 부분 로드(공개 미리 보기)

    +

    이 기능은 현재 공개 미리 보기 중입니다. API와 기능은 향후 변경될 수 있습니다.

    +
    +

    로드 요청을 받으면 Milvus는 모든 벡터 필드 인덱스와 모든 스칼라 필드 데이터를 메모리에 로드합니다. 일부 필드가 검색 및 쿼리에 포함되지 않을 경우, 메모리 사용량을 줄이기 위해 로드에서 제외하여 검색 성능을 개선할 수 있습니다.

    +
    +
    # 7. Load the collection
    +client.load_collection(
    +    collection_name="customized_setup_2",
    +    load_fields=["my_id", "my_vector"] # Load only the specified fields
    +    skip_load_dynamic_field=True # Skip loading the dynamic field
    +)
    +
    +res = client.get_load_state(
    +    collection_name="customized_setup_2"
    +)
    +
    +print(res)
    +
    +# Output
    +#
    +# {
    +#     "state": "<LoadState: Loaded>"
    +# }
    +
    +

    load_fields 에 나열된 필드만 검색 및 쿼리에서 필터링 조건 및 출력 필드로 사용할 수 있습니다. 목록에는 항상 기본 키를 포함해야 합니다. 로드에서 제외된 필드 이름은 필터링이나 출력에 사용할 수 없습니다.

    +

    skip_load_dynamic_field=True 을 사용하여 동적 필드 로드를 건너뛸 수 있습니다. Milvus는 동적 필드를 단일 필드로 취급하므로 동적 필드의 모든 키가 함께 포함되거나 제외됩니다.

    +
    +

    컬렉션 릴리스

    컬렉션을 릴리스하려면 컬렉션 이름을 지정하여 release_collection() 메서드를 사용하여 컬렉션 이름을 지정합니다.

    -

    컬렉션을 릴리스하려면 컬렉션 이름을 지정하여 releaseCollection() 메서드를 사용하여 컬렉션 이름을 지정합니다.

    +

    컬렉션을 해제하려면 컬렉션 이름을 지정하여 releaseCollection() 메서드를 사용하여 컬렉션 이름을 지정합니다.

    컬렉션을 릴리스하려면 컬렉션 이름을 지정하여 releaseCollection() 메서드를 사용하여 컬렉션 이름을 지정합니다.

    @@ -1708,7 +1735,7 @@ $ curl -X POST "http:// -

    컬렉션에 별칭을 지정하여 컬렉션을 특정 맥락에서 더 의미 있게 만들 수 있습니다. 컬렉션에 여러 개의 별칭을 지정할 수 있지만 여러 컬렉션이 하나의 별칭을 공유할 수는 없습니다.

    +

    컬렉션에 별칭을 지정하여 컬렉션을 특정 맥락에서 더 의미 있게 만들 수 있습니다. 컬렉션에 별칭을 여러 개 지정할 수 있지만 여러 컬렉션이 별칭을 공유할 수는 없습니다.

    별칭 만들기

    별칭을 만들려면 컬렉션 이름과 create_alias() 메서드를 사용하여 컬렉션 이름과 별칭을 지정합니다.

    @@ -2323,7 +2350,7 @@ collection.set_properties( } )
    -

    MMAP 설정

    쿼리 성능을 개선하기 위해 데이터를 메모리에 매핑할지 여부를 결정하는 컬렉션의 메모리 매핑(MMAP) 속성을 구성합니다. 자세한 내용은 메모리 매핑 구성을 참조하세요 .

    +

    MMAP 설정

    쿼리 성능을 개선하기 위해 데이터를 메모리에 매핑할지 여부를 결정하는 컬렉션의 메모리 매핑(MMAP) 속성을 구성합니다. 자세한 내용은 메모리 매핑 구성을 참조하세요.

    MMAP 속성을 설정하기 전에 먼저 컬렉션을 해제하세요. 그렇지 않으면 오류가 발생합니다.

    diff --git a/localization/v2.4.x/site/ko/userGuide/manage-indexes/index-vector-fields.json b/localization/v2.4.x/site/ko/userGuide/manage-indexes/index-vector-fields.json index 3d867a4c8..40ac6e1f0 100644 --- a/localization/v2.4.x/site/ko/userGuide/manage-indexes/index-vector-fields.json +++ b/localization/v2.4.x/site/ko/userGuide/manage-indexes/index-vector-fields.json @@ -1 +1 @@ -{"codeList":["from pymilvus import MilvusClient, DataType\n\n# 1. Set up a Milvus client\nclient = MilvusClient(\n uri=\"http://localhost:19530\"\n)\n\n# 2. Create schema\n# 2.1. Create schema\nschema = MilvusClient.create_schema(\n auto_id=False,\n enable_dynamic_field=True,\n)\n\n# 2.2. Add fields to schema\nschema.add_field(field_name=\"id\", datatype=DataType.INT64, is_primary=True)\nschema.add_field(field_name=\"vector\", datatype=DataType.FLOAT_VECTOR, dim=5)\n\n# 3. Create collection\nclient.create_collection(\n collection_name=\"customized_setup\", \n schema=schema, \n)\n","import io.milvus.v2.client.ConnectConfig;\nimport io.milvus.v2.client.MilvusClientV2;\nimport io.milvus.v2.common.DataType;\nimport io.milvus.v2.service.collection.request.CreateCollectionReq;\n\nString CLUSTER_ENDPOINT = \"http://localhost:19530\";\n\n// 1. Connect to Milvus server\nConnectConfig connectConfig = ConnectConfig.builder()\n .uri(CLUSTER_ENDPOINT)\n .build();\n\nMilvusClientV2 client = new MilvusClientV2(connectConfig);\n\n// 2. Create a collection\n\n// 2.1 Create schema\nCreateCollectionReq.CollectionSchema schema = client.createSchema();\n\n// 2.2 Add fields to schema\nschema.addField(AddFieldReq.builder().fieldName(\"id\").dataType(DataType.Int64).isPrimaryKey(true).autoID(false).build());\nschema.addField(AddFieldReq.builder().fieldName(\"vector\").dataType(DataType.FloatVector).dimension(5).build());\n\n// 3 Create a collection without schema and index parameters\nCreateCollectionReq customizedSetupReq = CreateCollectionReq.builder()\n.collectionName(\"customized_setup\")\n.collectionSchema(schema)\n.build();\n\nclient.createCollection(customizedSetupReq);\n","// 1. Set up a Milvus Client\nclient = new MilvusClient({address, token});\n\n// 2. Define fields for the collection\nconst fields = [\n {\n name: \"id\",\n data_type: DataType.Int64,\n is_primary_key: true,\n autoID: false\n },\n {\n name: \"vector\",\n data_type: DataType.FloatVector,\n dim: 5\n },\n]\n\n// 3. Create a collection\nres = await client.createCollection({\n collection_name: \"customized_setup\",\n fields: fields,\n})\n\nconsole.log(res.error_code) \n\n// Output\n// \n// Success\n// \n","# 4.1. Set up the index parameters\nindex_params = MilvusClient.prepare_index_params()\n\n# 4.2. Add an index on the vector field.\nindex_params.add_index(\n field_name=\"vector\",\n metric_type=\"COSINE\",\n index_type=\"IVF_FLAT\",\n index_name=\"vector_index\",\n params={ \"nlist\": 128 }\n)\n\n# 4.3. Create an index file\nclient.create_index(\n collection_name=\"customized_setup\",\n index_params=index_params\n)\n","import io.milvus.v2.common.IndexParam;\nimport io.milvus.v2.service.index.request.CreateIndexReq;\n\n// 4 Prepare index parameters\n\n// 4.2 Add an index for the vector field \"vector\"\nIndexParam indexParamForVectorField = IndexParam.builder()\n .fieldName(\"vector\")\n .indexName(\"vector_index\")\n .indexType(IndexParam.IndexType.IVF_FLAT)\n .metricType(IndexParam.MetricType.COSINE)\n .extraParams(Map.of(\"nlist\", 128))\n .build();\n\nList indexParams = new ArrayList<>();\nindexParams.add(indexParamForVectorField);\n\n// 4.3 Crate an index file\nCreateIndexReq createIndexReq = CreateIndexReq.builder()\n .collectionName(\"customized_setup\")\n .indexParams(indexParams)\n .build();\n\nclient.createIndex(createIndexReq);\n","// 4. Set up index for the collection\n// 4.1. Set up the index parameters\nres = await client.createIndex({\n collection_name: \"customized_setup\",\n field_name: \"vector\",\n index_type: \"AUTOINDEX\",\n metric_type: \"COSINE\", \n index_name: \"vector_index\",\n params: { \"nlist\": 128 }\n})\n\nconsole.log(res.error_code)\n\n// Output\n// \n// Success\n// \n","# 5. Describe index\nres = client.list_indexes(\n collection_name=\"customized_setup\"\n)\n\nprint(res)\n\n# Output\n#\n# [\n# \"vector_index\",\n# ]\n\nres = client.describe_index(\n collection_name=\"customized_setup\",\n index_name=\"vector_index\"\n)\n\nprint(res)\n\n# Output\n#\n# {\n# \"index_type\": ,\n# \"metric_type\": \"COSINE\",\n# \"field_name\": \"vector\",\n# \"index_name\": \"vector_index\"\n# }\n","import io.milvus.v2.service.index.request.DescribeIndexReq;\nimport io.milvus.v2.service.index.response.DescribeIndexResp;\n\n// 5. Describe index\n// 5.1 List the index names\nListIndexesReq listIndexesReq = ListIndexesReq.builder()\n .collectionName(\"customized_setup\")\n .build();\n\nList indexNames = client.listIndexes(listIndexesReq);\n\nSystem.out.println(indexNames);\n\n// Output:\n// [\n// \"vector_index\"\n// ]\n\n// 5.2 Describe an index\nDescribeIndexReq describeIndexReq = DescribeIndexReq.builder()\n .collectionName(\"customized_setup\")\n .indexName(\"vector_index\")\n .build();\n\nDescribeIndexResp describeIndexResp = client.describeIndex(describeIndexReq);\n\nSystem.out.println(JSONObject.toJSON(describeIndexResp));\n\n// Output:\n// {\n// \"metricType\": \"COSINE\",\n// \"indexType\": \"AUTOINDEX\",\n// \"fieldName\": \"vector\",\n// \"indexName\": \"vector_index\"\n// }\n","// 5. Describe the index\nres = await client.describeIndex({\n collection_name: \"customized_setup\",\n index_name: \"vector_index\"\n})\n\nconsole.log(JSON.stringify(res.index_descriptions, null, 2))\n\n// Output\n// \n// [\n// {\n// \"params\": [\n// {\n// \"key\": \"index_type\",\n// \"value\": \"AUTOINDEX\"\n// },\n// {\n// \"key\": \"metric_type\",\n// \"value\": \"COSINE\"\n// }\n// ],\n// \"index_name\": \"vector_index\",\n// \"indexID\": \"449007919953063141\",\n// \"field_name\": \"vector\",\n// \"indexed_rows\": \"0\",\n// \"total_rows\": \"0\",\n// \"state\": \"Finished\",\n// \"index_state_fail_reason\": \"\",\n// \"pending_index_rows\": \"0\"\n// }\n// ]\n// \n","# 6. Drop index\nclient.drop_index(\n collection_name=\"customized_setup\",\n index_name=\"vector_index\"\n)\n","// 6. Drop index\n\nDropIndexReq dropIndexReq = DropIndexReq.builder()\n .collectionName(\"customized_setup\")\n .indexName(\"vector_index\")\n .build();\n\nclient.dropIndex(dropIndexReq);\n","// 6. Drop the index\nres = await client.dropIndex({\n collection_name: \"customized_setup\",\n index_name: \"vector_index\"\n})\n\nconsole.log(res.error_code)\n\n// Output\n// \n// Success\n// \n"],"headingContent":"","anchorList":[{"label":"벡터 필드 색인 생성","href":"Index-Vector-Fields","type":1,"isActive":false},{"label":"개요","href":"Overview","type":2,"isActive":false},{"label":"준비","href":"Preparations","type":2,"isActive":false},{"label":"컬렉션 색인 생성","href":"Index-a-Collection","type":2,"isActive":false},{"label":"인덱스 세부 정보 확인","href":"Check-Index-Details","type":2,"isActive":false},{"label":"인덱스 삭제","href":"Drop-an-Index","type":2,"isActive":false}]} \ No newline at end of file +{"codeList":["from pymilvus import MilvusClient, DataType\n\n# 1. Set up a Milvus client\nclient = MilvusClient(\n uri=\"http://localhost:19530\"\n)\n\n# 2. Create schema\n# 2.1. Create schema\nschema = MilvusClient.create_schema(\n auto_id=False,\n enable_dynamic_field=True,\n)\n\n# 2.2. Add fields to schema\nschema.add_field(field_name=\"id\", datatype=DataType.INT64, is_primary=True)\nschema.add_field(field_name=\"vector\", datatype=DataType.FLOAT_VECTOR, dim=5)\n\n# 3. Create collection\nclient.create_collection(\n collection_name=\"customized_setup\", \n schema=schema, \n)\n","import io.milvus.v2.client.ConnectConfig;\nimport io.milvus.v2.client.MilvusClientV2;\nimport io.milvus.v2.common.DataType;\nimport io.milvus.v2.service.collection.request.CreateCollectionReq;\n\nString CLUSTER_ENDPOINT = \"http://localhost:19530\";\n\n// 1. Connect to Milvus server\nConnectConfig connectConfig = ConnectConfig.builder()\n .uri(CLUSTER_ENDPOINT)\n .build();\n\nMilvusClientV2 client = new MilvusClientV2(connectConfig);\n\n// 2. Create a collection\n\n// 2.1 Create schema\nCreateCollectionReq.CollectionSchema schema = client.createSchema();\n\n// 2.2 Add fields to schema\nschema.addField(AddFieldReq.builder().fieldName(\"id\").dataType(DataType.Int64).isPrimaryKey(true).autoID(false).build());\nschema.addField(AddFieldReq.builder().fieldName(\"vector\").dataType(DataType.FloatVector).dimension(5).build());\n\n// 3 Create a collection without schema and index parameters\nCreateCollectionReq customizedSetupReq = CreateCollectionReq.builder()\n.collectionName(\"customized_setup\")\n.collectionSchema(schema)\n.build();\n\nclient.createCollection(customizedSetupReq);\n","// 1. Set up a Milvus Client\nclient = new MilvusClient({address, token});\n\n// 2. Define fields for the collection\nconst fields = [\n {\n name: \"id\",\n data_type: DataType.Int64,\n is_primary_key: true,\n autoID: false\n },\n {\n name: \"vector\",\n data_type: DataType.FloatVector,\n dim: 5\n },\n]\n\n// 3. Create a collection\nres = await client.createCollection({\n collection_name: \"customized_setup\",\n fields: fields,\n})\n\nconsole.log(res.error_code) \n\n// Output\n// \n// Success\n// \n","# 4.1. Set up the index parameters\nindex_params = MilvusClient.prepare_index_params()\n\n# 4.2. Add an index on the vector field.\nindex_params.add_index(\n field_name=\"vector\",\n metric_type=\"COSINE\",\n index_type=\"IVF_FLAT\",\n index_name=\"vector_index\",\n params={ \"nlist\": 128 }\n)\n\n# 4.3. Create an index file\nclient.create_index(\n collection_name=\"customized_setup\",\n index_params=index_params,\n sync=False # Whether to wait for index creation to complete before returning. Defaults to True.\n)\n","import io.milvus.v2.common.IndexParam;\nimport io.milvus.v2.service.index.request.CreateIndexReq;\n\n// 4 Prepare index parameters\n\n// 4.2 Add an index for the vector field \"vector\"\nIndexParam indexParamForVectorField = IndexParam.builder()\n .fieldName(\"vector\")\n .indexName(\"vector_index\")\n .indexType(IndexParam.IndexType.IVF_FLAT)\n .metricType(IndexParam.MetricType.COSINE)\n .extraParams(Map.of(\"nlist\", 128))\n .build();\n\nList indexParams = new ArrayList<>();\nindexParams.add(indexParamForVectorField);\n\n// 4.3 Crate an index file\nCreateIndexReq createIndexReq = CreateIndexReq.builder()\n .collectionName(\"customized_setup\")\n .indexParams(indexParams)\n .build();\n\nclient.createIndex(createIndexReq);\n","// 4. Set up index for the collection\n// 4.1. Set up the index parameters\nres = await client.createIndex({\n collection_name: \"customized_setup\",\n field_name: \"vector\",\n index_type: \"AUTOINDEX\",\n metric_type: \"COSINE\", \n index_name: \"vector_index\",\n params: { \"nlist\": 128 }\n})\n\nconsole.log(res.error_code)\n\n// Output\n// \n// Success\n// \n","# 5. Describe index\nres = client.list_indexes(\n collection_name=\"customized_setup\"\n)\n\nprint(res)\n\n# Output\n#\n# [\n# \"vector_index\",\n# ]\n\nres = client.describe_index(\n collection_name=\"customized_setup\",\n index_name=\"vector_index\"\n)\n\nprint(res)\n\n# Output\n#\n# {\n# \"index_type\": ,\n# \"metric_type\": \"COSINE\",\n# \"field_name\": \"vector\",\n# \"index_name\": \"vector_index\"\n# }\n","import io.milvus.v2.service.index.request.DescribeIndexReq;\nimport io.milvus.v2.service.index.response.DescribeIndexResp;\n\n// 5. Describe index\n// 5.1 List the index names\nListIndexesReq listIndexesReq = ListIndexesReq.builder()\n .collectionName(\"customized_setup\")\n .build();\n\nList indexNames = client.listIndexes(listIndexesReq);\n\nSystem.out.println(indexNames);\n\n// Output:\n// [\n// \"vector_index\"\n// ]\n\n// 5.2 Describe an index\nDescribeIndexReq describeIndexReq = DescribeIndexReq.builder()\n .collectionName(\"customized_setup\")\n .indexName(\"vector_index\")\n .build();\n\nDescribeIndexResp describeIndexResp = client.describeIndex(describeIndexReq);\n\nSystem.out.println(JSONObject.toJSON(describeIndexResp));\n\n// Output:\n// {\n// \"metricType\": \"COSINE\",\n// \"indexType\": \"AUTOINDEX\",\n// \"fieldName\": \"vector\",\n// \"indexName\": \"vector_index\"\n// }\n","// 5. Describe the index\nres = await client.describeIndex({\n collection_name: \"customized_setup\",\n index_name: \"vector_index\"\n})\n\nconsole.log(JSON.stringify(res.index_descriptions, null, 2))\n\n// Output\n// \n// [\n// {\n// \"params\": [\n// {\n// \"key\": \"index_type\",\n// \"value\": \"AUTOINDEX\"\n// },\n// {\n// \"key\": \"metric_type\",\n// \"value\": \"COSINE\"\n// }\n// ],\n// \"index_name\": \"vector_index\",\n// \"indexID\": \"449007919953063141\",\n// \"field_name\": \"vector\",\n// \"indexed_rows\": \"0\",\n// \"total_rows\": \"0\",\n// \"state\": \"Finished\",\n// \"index_state_fail_reason\": \"\",\n// \"pending_index_rows\": \"0\"\n// }\n// ]\n// \n","# 6. Drop index\nclient.drop_index(\n collection_name=\"customized_setup\",\n index_name=\"vector_index\"\n)\n","// 6. Drop index\n\nDropIndexReq dropIndexReq = DropIndexReq.builder()\n .collectionName(\"customized_setup\")\n .indexName(\"vector_index\")\n .build();\n\nclient.dropIndex(dropIndexReq);\n","// 6. Drop the index\nres = await client.dropIndex({\n collection_name: \"customized_setup\",\n index_name: \"vector_index\"\n})\n\nconsole.log(res.error_code)\n\n// Output\n// \n// Success\n// \n"],"headingContent":"Index Vector Fields","anchorList":[{"label":"벡터 필드 색인 생성","href":"Index-Vector-Fields","type":1,"isActive":false},{"label":"개요","href":"Overview","type":2,"isActive":false},{"label":"준비","href":"Preparations","type":2,"isActive":false},{"label":"컬렉션 색인 생성","href":"Index-a-Collection","type":2,"isActive":false},{"label":"인덱스 세부 정보 확인","href":"Check-Index-Details","type":2,"isActive":false},{"label":"인덱스 삭제","href":"Drop-an-Index","type":2,"isActive":false}]} \ No newline at end of file diff --git a/localization/v2.4.x/site/ko/userGuide/manage-indexes/index-vector-fields.md b/localization/v2.4.x/site/ko/userGuide/manage-indexes/index-vector-fields.md index 0df473f6d..3d15607d0 100644 --- a/localization/v2.4.x/site/ko/userGuide/manage-indexes/index-vector-fields.md +++ b/localization/v2.4.x/site/ko/userGuide/manage-indexes/index-vector-fields.md @@ -2,7 +2,7 @@ id: index-vector-fields.md order: 1 summary: 이 가이드에서는 컬렉션의 벡터 필드에서 인덱스를 만들고 관리하는 기본 작업을 안내합니다. -title: 인덱스 벡터 필드 +title: 벡터 필드 색인 생성 ---

    벡터 필드 색인 생성

    import io.milvus.v2.common.IndexParam;
    @@ -327,7 +328,11 @@ res = await client.index_params
    -      인덱스 파라미터 객체 목록을 포함하는 인덱스 파라미터 객체입니다.
    +      IndexParams 객체 목록이 포함된 IndexParams 객체입니다.
    +    
    +    
    +      sync
    +      클라이언트의 요청과 관련하여 인덱스가 빌드되는 방식을 제어합니다. 유효한 값은 다음과 같습니다:
    • True (기본값): 클라이언트는 인덱스가 완전히 빌드될 때까지 기다렸다가 반환합니다. 즉, 프로세스가 완료될 때까지 응답을 받지 못합니다.
    • False: 클라이언트는 요청이 수신되고 인덱스가 백그라운드에서 빌드되는 즉시 반환합니다. 인덱스 생성이 완료되었는지 확인하려면 describe_index() 메서드를 사용합니다.
    diff --git a/localization/v2.4.x/site/ko/userGuide/manage-partitions.json b/localization/v2.4.x/site/ko/userGuide/manage-partitions.json index f7ebdfc0f..6d439c520 100644 --- a/localization/v2.4.x/site/ko/userGuide/manage-partitions.json +++ b/localization/v2.4.x/site/ko/userGuide/manage-partitions.json @@ -1 +1 @@ -{"codeList":["from pymilvus import MilvusClient, DataType\n\n# 1. Set up a Milvus client\nclient = MilvusClient(\n uri=\"http://localhost:19530\"\n)\n\n# 2. Create a collection\nclient.create_collection(\n collection_name=\"quick_setup\",\n dimension=5,\n)\n\n","import io.milvus.v2.client.ConnectConfig;\nimport io.milvus.v2.client.MilvusClientV2;\nimport io.milvus.v2.service.collection.request.CreateCollectionReq;\n\nString CLUSTER_ENDPOINT = \"http://localhost:19530\";\n\n// 1. Connect to Milvus server\nConnectConfig connectConfig = ConnectConfig.builder()\n .uri(CLUSTER_ENDPOINT)\n .build();\n\nMilvusClientV2 client = new MilvusClientV2(connectConfig);\n\n// 2. Create a collection in quick setup mode\nCreateCollectionReq quickSetupReq = CreateCollectionReq.builder()\n .collectionName(\"quick_setup\")\n .dimension(5)\n .build();\n\nclient.createCollection(quickSetupReq);\n","const address = \"http://localhost:19530\"\n\n// 1. Set up a Milvus Client\nclient = new MilvusClient({address});\n\n// 2. Create a collection in quick setup mode\nawait client.createCollection({\n collection_name: \"quick_setup\",\n dimension: 5,\n}); \n","# 3. List partitions\nres = client.list_partitions(collection_name=\"quick_setup\")\nprint(res)\n\n# Output\n#\n# [\"_default\"]\n","import io.milvus.v2.service.partition.request.ListPartitionsReq;\n\n// 3. List all partitions in the collection\nListPartitionsReq listPartitionsReq = ListPartitionsReq.builder()\n .collectionName(\"quick_setup\")\n .build();\n\nList partitionNames = client.listPartitions(listPartitionsReq);\n\nSystem.out.println(partitionNames);\n\n// Output:\n// [\"_default\"]\n","// 3. List partitions\nres = await client.listPartitions({\n collection_name: \"quick_setup\"\n})\n\nconsole.log(res.partition_names)\n\n// Output\n// \n// [ '_default' ]\n// \n","# 4. Create more partitions\nclient.create_partition(\n collection_name=\"quick_setup\",\n partition_name=\"partitionA\"\n)\n\nclient.create_partition(\n collection_name=\"quick_setup\",\n partition_name=\"partitionB\"\n)\n\nres = client.list_partitions(collection_name=\"quick_setup\")\nprint(res)\n\n# Output\n#\n# [\"_default\", \"partitionA\", \"partitionB\"]\n","import io.milvus.v2.service.partition.request.CreatePartitionReq;\n\n// 4. Create more partitions\nCreatePartitionReq createPartitionReq = CreatePartitionReq.builder()\n .collectionName(\"quick_setup\")\n .partitionName(\"partitionA\")\n .build();\n\nclient.createPartition(createPartitionReq);\n\ncreatePartitionReq = CreatePartitionReq.builder()\n .collectionName(\"quick_setup\")\n .partitionName(\"partitionB\")\n .build();\n\nclient.createPartition(createPartitionReq);\n\nlistPartitionsReq = ListPartitionsReq.builder()\n .collectionName(\"quick_setup\")\n .build();\n\npartitionNames = client.listPartitions(listPartitionsReq);\n\nSystem.out.println(partitionNames);\n\n// Output:\n// [\n// \"_default\",\n// \"partitionA\",\n// \"partitionB\"\n// ]\n","// 4. Create more partitions\nawait client.createPartition({\n collection_name: \"quick_setup\",\n partition_name: \"partitionA\"\n})\n\nawait client.createPartition({\n collection_name: \"quick_setup\",\n partition_name: \"partitionB\"\n})\n\nres = await client.listPartitions({\n collection_name: \"quick_setup\"\n})\n\nconsole.log(res.partition_names)\n\n// Output\n// \n// [ '_default', 'partitionA', 'partitionB' ]\n// \n","# 5. Check whether a partition exists\nres = client.has_partition(\n collection_name=\"quick_setup\",\n partition_name=\"partitionA\"\n)\nprint(res)\n\n# Output\n#\n# True\n\nres = client.has_partition(\n collection_name=\"quick_setup\",\n partition_name=\"partitionC\"\n)\nprint(res)\n\n# Output\n#\n# False\n","import io.milvus.v2.service.partition.request.HasPartitionReq;\n\n// 5. Check whether a partition exists\nHasPartitionReq hasPartitionReq = HasPartitionReq.builder()\n .collectionName(\"quick_setup\")\n .partitionName(\"partitionA\")\n .build();\n\nboolean exists = client.hasPartition(hasPartitionReq);\n\nSystem.out.println(exists);\n\n// Output:\n// true\n\nhasPartitionReq = HasPartitionReq.builder()\n .collectionName(\"quick_setup\")\n .partitionName(\"partitionC\")\n .build();\n\nexists = client.hasPartition(hasPartitionReq);\n\nSystem.out.println(exists);\n\n// Output:\n// false\n","// 5. Check whether a partition exists\nres = await client.hasPartition({\n collection_name: \"quick_setup\",\n partition_name: \"partitionA\"\n})\n\nconsole.log(res.value)\n\n// Output\n// \n// true\n// \n\nres = await client.hasPartition({\n collection_name: \"quick_setup\",\n partition_name: \"partitionC\"\n})\n\nconsole.log(res.value)\n\n// Output\n// \n// false\n// \n","# Release the collection\nclient.release_collection(collection_name=\"quick_setup\")\n\n# Check the load status\nres = client.get_load_state(collection_name=\"quick_setup\")\nprint(res)\n\n# Output\n#\n# {\n# \"state\": \"\"\n# }\n\nres = client.get_load_state(\n collection_name=\"quick_setup\", \n partition_name=\"partitionA\"\n)\n\nprint(res)\n\n# Output\n#\n# {\n# \"state\": \"\"\n# }\n\nres = client.get_load_state(\n collection_name=\"quick_setup\", \n partition_name=\"partitionB\"\n)\n\nprint(res)\n\n# Output\n#\n# {\n# \"state\": \"\"\n# }\n\n","import io.milvus.v2.service.collection.request.GetLoadStateReq;\nimport io.milvus.v2.service.collection.request.ReleaseCollectionReq;\nimport io.milvus.v2.service.partition.request.LoadPartitionsReq;\nimport io.milvus.v2.service.partition.request.ReleasePartitionsReq;\n\n// 6. Load a partition independantly\n// 6.1 Release the collection\nReleaseCollectionReq releaseCollectionReq = ReleaseCollectionReq.builder()\n .collectionName(\"quick_setup\")\n .build();\n\nclient.releaseCollection(releaseCollectionReq);\n\n// 6.2 Load partitionA\nLoadPartitionsReq loadPartitionsReq = LoadPartitionsReq.builder()\n .collectionName(\"quick_setup\")\n .partitionNames(List.of(\"partitionA\"))\n .build();\n\nclient.loadPartitions(loadPartitionsReq);\n\nThread.sleep(3000);\n\n// 6.3 Check the load status of the collection and its partitions\nGetLoadStateReq getLoadStateReq = GetLoadStateReq.builder()\n .collectionName(\"quick_setup\")\n .build();\n\nboolean state = client.getLoadState(getLoadStateReq);\n\nSystem.out.println(state);\n\n// Output:\n// true\n\ngetLoadStateReq = GetLoadStateReq.builder()\n .collectionName(\"quick_setup\")\n .partitionName(\"partitionA\")\n .build();\n\nstate = client.getLoadState(getLoadStateReq);\n\nSystem.out.println(state);\n\n// Output:\n// true\n\ngetLoadStateReq = GetLoadStateReq.builder()\n .collectionName(\"quick_setup\")\n .partitionName(\"partitionB\")\n .build();\n\nstate = client.getLoadState(getLoadStateReq);\n\nSystem.out.println(state);\n\n// Output:\n// false\n","// 6. Load a partition indenpendantly\nawait client.releaseCollection({\n collection_name: \"quick_setup\"\n})\n\nres = await client.getLoadState({\n collection_name: \"quick_setup\"\n})\n\nconsole.log(res.state)\n\n// Output\n// \n// LoadStateNotLoad\n// \n\nawait client.loadPartitions({\n collection_name: \"quick_setup\",\n partition_names: [\"partitionA\"]\n})\n\nawait sleep(3000)\n\nres = await client.getLoadState({\n collection_name: \"quick_setup\"\n})\n\nconsole.log(res.state)\n\n// Output\n// \n// LoadStateLoaded\n// \n\nres = await client.getLoadState({\n collection_name: \"quick_setup\",\n partition_name: \"partitionA\"\n})\n\nconsole.log(res.state)\n\n// Output\n// \n// LoadStateLoaded\n// \n\nres = await client.getLoadState({\n collection_name: \"quick_setup\",\n partition_name: \"partitionB\"\n})\n\nconsole.log(res.state)\n\n// Output\n// \n// LoadStateLoaded\n// \n","client.load_partitions(\n collection_name=\"quick_setup\",\n partition_names=[\"partitionA\"]\n)\n\nres = client.get_load_state(collection_name=\"quick_setup\")\nprint(res)\n\n# Output\n#\n# {\n# \"state\": \"\"\n# }\n","LoadPartitionsReq loadPartitionsReq = LoadPartitionsReq.builder()\n .collectionName(\"quick_setup\")\n .partitionNames(List.of(\"partitionA\"))\n .build();\n\nclient.loadPartitions(loadPartitionsReq);\n\ngetLoadStateReq = GetLoadStateReq.builder()\n .collectionName(\"quick_setup\")\n .partitionName(\"partitionA\")\n .build();\n\nstate = client.getLoadState(getLoadStateReq);\n\nSystem.out.println(state);\n\n// Output:\n// true\n","await client.loadPartitions({\n collection_name: \"quick_setup\",\n partition_names: [\"partitionA\"]\n})\n\nres = await client.getLoadState({\n collection_name: \"quick_setup\",\n partition_name: \"partitionA\"\n})\n\nconsole.log(res.state)\n\n// Output\n// \n// LoadStateLoaded\n//\n","client.load_partitions(\n collection_name=\"quick_setup\",\n partition_names=[\"partitionA\", \"partitionB\"]\n)\n\nres = client.get_load_status(\n collection_name=\"quick_setup\",\n partition_name=\"partitionA\"\n)\n\n# Output\n#\n# {\n# \"state\": \"\"\n# }\n\nres = client.get_load_status(\n collection_name=\"quick_setup\",\n partition_name=\"partitionB\"\n)\n\n# Output\n#\n# {\n# \"state\": \"\"\n# }\n","LoadPartitionsReq loadPartitionsReq = LoadPartitionsReq.builder()\n .collectionName(\"quick_setup\")\n .partitionNames(List.of(\"partitionA\", \"partitionB\"))\n .build();\n\nclient.loadPartitions(loadPartitionsReq);\n\ngetLoadStateReq = GetLoadStateReq.builder()\n .collectionName(\"quick_setup\")\n .partitionName(\"partitionA\")\n .build();\n\nstate = client.getLoadState(getLoadStateReq);\n\nSystem.out.println(state);\n\n// Output:\n// true\n\ngetLoadStateReq = GetLoadStateReq.builder()\n .collectionName(\"quick_setup\")\n .partitionName(\"partitionB\")\n .build();\n\nstate = client.getLoadState(getLoadStateReq);\n\nSystem.out.println(state);\n\n// Output:\n// true\n","await client.loadPartitions({\n collection_name: \"quick_setup\",\n partition_names: [\"partitionA\", \"partitionB\"]\n})\n\nres = await client.getLoadState({\n collection_name: \"quick_setup\",\n partition_name: \"partitionA\"\n})\n\nconsole.log(res)\n\n// Output\n// \n// LoadStateLoaded\n// \n\nres = await client.getLoadState({\n collection_name: \"quick_setup\",\n partition_name: \"partitionB\"\n})\n\nconsole.log(res)\n\n// Output\n// \n// LoadStateLoaded\n// \n","# 7. Release a partition\nclient.release_partitions(\n collection_name=\"quick_setup\",\n partition_names=[\"partitionA\"]\n)\n\nres = client.get_load_state(\n collection_name=\"quick_setup\", \n partition_name=\"partitionA\"\n)\n\nprint(res)\n\n# Output\n#\n# {\n# \"state\": \"\"\n# }\n\n","import io.milvus.v2.service.partition.request.ReleasePartitionsReq;\n\n// 7. Release a partition\nReleasePartitionsReq releasePartitionsReq = ReleasePartitionsReq.builder()\n .collectionName(\"quick_setup\")\n .partitionNames(List.of(\"partitionA\"))\n .build();\n\nclient.releasePartitions(releasePartitionsReq);\n\ngetLoadStateReq = GetLoadStateReq.builder()\n .collectionName(\"quick_setup\")\n .partitionName(\"partitionA\")\n .build();\n\nstate = client.getLoadState(getLoadStateReq);\n\nSystem.out.println(state);\n\n// Output:\n// false\n","// 7. Release a partition\nawait client.releasePartitions({\n collection_name: \"quick_setup\",\n partition_names: [\"partitionA\"]\n})\n\nres = await client.getLoadState({\n collection_name: \"quick_setup\"\n})\n\nconsole.log(res.state)\n\n// Output\n// \n// LoadStateNotLoad\n// \n","client.release_partitions(\n collection_name=\"quick_setup\",\n partition_names=[\"_default\", \"partitionA\", \"partitionB\"]\n)\n\nres = client.get_load_status(\n collection_name=\"quick_setup\",\n)\n\n# Output\n#\n# {\n# \"state\": \"\"\n# }\n","# 8. Drop a partition\nclient.drop_partition(\n collection_name=\"quick_setup\",\n partition_name=\"partitionB\"\n)\n\nres = client.list_partitions(collection_name=\"quick_setup\")\nprint(res)\n\n# Output\n#\n# [\"_default\", \"partitionA\"]\n","import io.milvus.v2.service.partition.request.ReleasePartitionsReq;\n\nReleasePartitionsReq releasePartitionsReq = ReleasePartitionsReq.builder()\n .collectionName(\"quick_setup\")\n .partitionNames(List.of(\"_default\", \"partitionA\", \"partitionB\"))\n .build();\n\nclient.releasePartitions(releasePartitionsReq);\n\ngetLoadStateReq = GetLoadStateReq.builder()\n .collectionName(\"quick_setup\")\n .build();\n\nstate = client.getLoadState(getLoadStateReq);\n\nSystem.out.println(state);\n\n// Output:\n// false\n","\nawait client.releasePartitions({\n collection_name: \"quick_setup\",\n partition_names: [\"_default\", \"partitionA\", \"partitionB\"]\n})\n\nres = await client.getLoadState({\n collection_name: \"quick_setup\"\n})\n\nconsole.log(res)\n\n// Output\n// \n// {\n// status: {\n// error_code: 'Success',\n// reason: '',\n// code: 0,\n// retriable: false,\n// detail: ''\n// },\n// state: 'LoadStateNotLoad'\n// }\n// \n"],"headingContent":"","anchorList":[{"label":"파티션 관리","href":"Manage-Partitions","type":1,"isActive":false},{"label":"개요","href":"Overview","type":2,"isActive":false},{"label":"준비","href":"Preparations","type":2,"isActive":false},{"label":"목록 파티션","href":"List-Partitions","type":2,"isActive":false},{"label":"파티션 만들기","href":"Create-Partitions","type":2,"isActive":false},{"label":"특정 파티션 확인","href":"Check-for-a-Specific-Partition","type":2,"isActive":false},{"label":"파티션 로드 및 해제","href":"Load--Release-Partitions","type":2,"isActive":false},{"label":"파티션 삭제","href":"Drop-Partitions","type":2,"isActive":false},{"label":"FAQ","href":"FAQ","type":2,"isActive":false}]} \ No newline at end of file +{"codeList":["from pymilvus import MilvusClient, DataType\n\n# 1. Set up a Milvus client\nclient = MilvusClient(\n uri=\"http://localhost:19530\"\n)\n\n# 2. Create a collection\nclient.create_collection(\n collection_name=\"quick_setup\",\n dimension=5,\n)\n\n","import io.milvus.v2.client.ConnectConfig;\nimport io.milvus.v2.client.MilvusClientV2;\nimport io.milvus.v2.service.collection.request.CreateCollectionReq;\n\nString CLUSTER_ENDPOINT = \"http://localhost:19530\";\n\n// 1. Connect to Milvus server\nConnectConfig connectConfig = ConnectConfig.builder()\n .uri(CLUSTER_ENDPOINT)\n .build();\n\nMilvusClientV2 client = new MilvusClientV2(connectConfig);\n\n// 2. Create a collection in quick setup mode\nCreateCollectionReq quickSetupReq = CreateCollectionReq.builder()\n .collectionName(\"quick_setup\")\n .dimension(5)\n .build();\n\nclient.createCollection(quickSetupReq);\n","const address = \"http://localhost:19530\"\n\n// 1. Set up a Milvus Client\nclient = new MilvusClient({address});\n\n// 2. Create a collection in quick setup mode\nawait client.createCollection({\n collection_name: \"quick_setup\",\n dimension: 5,\n}); \n","# 3. List partitions\nres = client.list_partitions(collection_name=\"quick_setup\")\nprint(res)\n\n# Output\n#\n# [\"_default\"]\n","import io.milvus.v2.service.partition.request.ListPartitionsReq;\n\n// 3. List all partitions in the collection\nListPartitionsReq listPartitionsReq = ListPartitionsReq.builder()\n .collectionName(\"quick_setup\")\n .build();\n\nList partitionNames = client.listPartitions(listPartitionsReq);\n\nSystem.out.println(partitionNames);\n\n// Output:\n// [\"_default\"]\n","// 3. List partitions\nres = await client.listPartitions({\n collection_name: \"quick_setup\"\n})\n\nconsole.log(res.partition_names)\n\n// Output\n// \n// [ '_default' ]\n// \n","# 4. Create more partitions\nclient.create_partition(\n collection_name=\"quick_setup\",\n partition_name=\"partitionA\"\n)\n\nclient.create_partition(\n collection_name=\"quick_setup\",\n partition_name=\"partitionB\"\n)\n\nres = client.list_partitions(collection_name=\"quick_setup\")\nprint(res)\n\n# Output\n#\n# [\"_default\", \"partitionA\", \"partitionB\"]\n","import io.milvus.v2.service.partition.request.CreatePartitionReq;\n\n// 4. Create more partitions\nCreatePartitionReq createPartitionReq = CreatePartitionReq.builder()\n .collectionName(\"quick_setup\")\n .partitionName(\"partitionA\")\n .build();\n\nclient.createPartition(createPartitionReq);\n\ncreatePartitionReq = CreatePartitionReq.builder()\n .collectionName(\"quick_setup\")\n .partitionName(\"partitionB\")\n .build();\n\nclient.createPartition(createPartitionReq);\n\nlistPartitionsReq = ListPartitionsReq.builder()\n .collectionName(\"quick_setup\")\n .build();\n\npartitionNames = client.listPartitions(listPartitionsReq);\n\nSystem.out.println(partitionNames);\n\n// Output:\n// [\n// \"_default\",\n// \"partitionA\",\n// \"partitionB\"\n// ]\n","// 4. Create more partitions\nawait client.createPartition({\n collection_name: \"quick_setup\",\n partition_name: \"partitionA\"\n})\n\nawait client.createPartition({\n collection_name: \"quick_setup\",\n partition_name: \"partitionB\"\n})\n\nres = await client.listPartitions({\n collection_name: \"quick_setup\"\n})\n\nconsole.log(res.partition_names)\n\n// Output\n// \n// [ '_default', 'partitionA', 'partitionB' ]\n// \n","# 5. Check whether a partition exists\nres = client.has_partition(\n collection_name=\"quick_setup\",\n partition_name=\"partitionA\"\n)\nprint(res)\n\n# Output\n#\n# True\n\nres = client.has_partition(\n collection_name=\"quick_setup\",\n partition_name=\"partitionC\"\n)\nprint(res)\n\n# Output\n#\n# False\n","import io.milvus.v2.service.partition.request.HasPartitionReq;\n\n// 5. Check whether a partition exists\nHasPartitionReq hasPartitionReq = HasPartitionReq.builder()\n .collectionName(\"quick_setup\")\n .partitionName(\"partitionA\")\n .build();\n\nboolean exists = client.hasPartition(hasPartitionReq);\n\nSystem.out.println(exists);\n\n// Output:\n// true\n\nhasPartitionReq = HasPartitionReq.builder()\n .collectionName(\"quick_setup\")\n .partitionName(\"partitionC\")\n .build();\n\nexists = client.hasPartition(hasPartitionReq);\n\nSystem.out.println(exists);\n\n// Output:\n// false\n","// 5. Check whether a partition exists\nres = await client.hasPartition({\n collection_name: \"quick_setup\",\n partition_name: \"partitionA\"\n})\n\nconsole.log(res.value)\n\n// Output\n// \n// true\n// \n\nres = await client.hasPartition({\n collection_name: \"quick_setup\",\n partition_name: \"partitionC\"\n})\n\nconsole.log(res.value)\n\n// Output\n// \n// false\n// \n","# Release the collection\nclient.release_collection(collection_name=\"quick_setup\")\n\n# Check the load status\nres = client.get_load_state(collection_name=\"quick_setup\")\nprint(res)\n\n# Output\n#\n# {\n# \"state\": \"\"\n# }\n\nres = client.get_load_state(\n collection_name=\"quick_setup\", \n partition_name=\"partitionA\"\n)\n\nprint(res)\n\n# Output\n#\n# {\n# \"state\": \"\"\n# }\n\nres = client.get_load_state(\n collection_name=\"quick_setup\", \n partition_name=\"partitionB\"\n)\n\nprint(res)\n\n# Output\n#\n# {\n# \"state\": \"\"\n# }\n\n","import io.milvus.v2.service.collection.request.GetLoadStateReq;\nimport io.milvus.v2.service.collection.request.ReleaseCollectionReq;\nimport io.milvus.v2.service.partition.request.LoadPartitionsReq;\nimport io.milvus.v2.service.partition.request.ReleasePartitionsReq;\n\n// 6. Load a partition independantly\n// 6.1 Release the collection\nReleaseCollectionReq releaseCollectionReq = ReleaseCollectionReq.builder()\n .collectionName(\"quick_setup\")\n .build();\n\nclient.releaseCollection(releaseCollectionReq);\n\n// 6.2 Load partitionA\nLoadPartitionsReq loadPartitionsReq = LoadPartitionsReq.builder()\n .collectionName(\"quick_setup\")\n .partitionNames(List.of(\"partitionA\"))\n .build();\n\nclient.loadPartitions(loadPartitionsReq);\n\nThread.sleep(3000);\n\n// 6.3 Check the load status of the collection and its partitions\nGetLoadStateReq getLoadStateReq = GetLoadStateReq.builder()\n .collectionName(\"quick_setup\")\n .build();\n\nboolean state = client.getLoadState(getLoadStateReq);\n\nSystem.out.println(state);\n\n// Output:\n// true\n\ngetLoadStateReq = GetLoadStateReq.builder()\n .collectionName(\"quick_setup\")\n .partitionName(\"partitionA\")\n .build();\n\nstate = client.getLoadState(getLoadStateReq);\n\nSystem.out.println(state);\n\n// Output:\n// true\n\ngetLoadStateReq = GetLoadStateReq.builder()\n .collectionName(\"quick_setup\")\n .partitionName(\"partitionB\")\n .build();\n\nstate = client.getLoadState(getLoadStateReq);\n\nSystem.out.println(state);\n\n// Output:\n// false\n","// 6. Load a partition indenpendantly\nawait client.releaseCollection({\n collection_name: \"quick_setup\"\n})\n\nres = await client.getLoadState({\n collection_name: \"quick_setup\"\n})\n\nconsole.log(res.state)\n\n// Output\n// \n// LoadStateNotLoad\n// \n\nawait client.loadPartitions({\n collection_name: \"quick_setup\",\n partition_names: [\"partitionA\"]\n})\n\nawait sleep(3000)\n\nres = await client.getLoadState({\n collection_name: \"quick_setup\"\n})\n\nconsole.log(res.state)\n\n// Output\n// \n// LoadStateLoaded\n// \n\nres = await client.getLoadState({\n collection_name: \"quick_setup\",\n partition_name: \"partitionA\"\n})\n\nconsole.log(res.state)\n\n// Output\n// \n// LoadStateLoaded\n// \n\nres = await client.getLoadState({\n collection_name: \"quick_setup\",\n partition_name: \"partitionB\"\n})\n\nconsole.log(res.state)\n\n// Output\n// \n// LoadStateLoaded\n// \n","client.load_partitions(\n collection_name=\"quick_setup\",\n partition_names=[\"partitionA\"]\n)\n\nres = client.get_load_state(collection_name=\"quick_setup\")\nprint(res)\n\n# Output\n#\n# {\n# \"state\": \"\"\n# }\n","LoadPartitionsReq loadPartitionsReq = LoadPartitionsReq.builder()\n .collectionName(\"quick_setup\")\n .partitionNames(List.of(\"partitionA\"))\n .build();\n\nclient.loadPartitions(loadPartitionsReq);\n\ngetLoadStateReq = GetLoadStateReq.builder()\n .collectionName(\"quick_setup\")\n .partitionName(\"partitionA\")\n .build();\n\nstate = client.getLoadState(getLoadStateReq);\n\nSystem.out.println(state);\n\n// Output:\n// true\n","await client.loadPartitions({\n collection_name: \"quick_setup\",\n partition_names: [\"partitionA\"]\n})\n\nres = await client.getLoadState({\n collection_name: \"quick_setup\",\n partition_name: \"partitionA\"\n})\n\nconsole.log(res.state)\n\n// Output\n// \n// LoadStateLoaded\n//\n","client.load_partitions(\n collection_name=\"quick_setup\",\n partition_names=[\"partitionA\", \"partitionB\"]\n)\n\nres = client.get_load_status(\n collection_name=\"quick_setup\",\n partition_name=\"partitionA\"\n)\n\n# Output\n#\n# {\n# \"state\": \"\"\n# }\n\nres = client.get_load_status(\n collection_name=\"quick_setup\",\n partition_name=\"partitionB\"\n)\n\n# Output\n#\n# {\n# \"state\": \"\"\n# }\n","LoadPartitionsReq loadPartitionsReq = LoadPartitionsReq.builder()\n .collectionName(\"quick_setup\")\n .partitionNames(List.of(\"partitionA\", \"partitionB\"))\n .build();\n\nclient.loadPartitions(loadPartitionsReq);\n\ngetLoadStateReq = GetLoadStateReq.builder()\n .collectionName(\"quick_setup\")\n .partitionName(\"partitionA\")\n .build();\n\nstate = client.getLoadState(getLoadStateReq);\n\nSystem.out.println(state);\n\n// Output:\n// true\n\ngetLoadStateReq = GetLoadStateReq.builder()\n .collectionName(\"quick_setup\")\n .partitionName(\"partitionB\")\n .build();\n\nstate = client.getLoadState(getLoadStateReq);\n\nSystem.out.println(state);\n\n// Output:\n// true\n","await client.loadPartitions({\n collection_name: \"quick_setup\",\n partition_names: [\"partitionA\", \"partitionB\"]\n})\n\nres = await client.getLoadState({\n collection_name: \"quick_setup\",\n partition_name: \"partitionA\"\n})\n\nconsole.log(res)\n\n// Output\n// \n// LoadStateLoaded\n// \n\nres = await client.getLoadState({\n collection_name: \"quick_setup\",\n partition_name: \"partitionB\"\n})\n\nconsole.log(res)\n\n// Output\n// \n// LoadStateLoaded\n// \n","client.load_partitions(\n collection_name=\"quick_setup\",\n partition_names=[\"partitionA\"],\n load_fields=[\"id\", \"vector\"],\n skip_load_dynamic_field=True\n)\n","# 7. Release a partition\nclient.release_partitions(\n collection_name=\"quick_setup\",\n partition_names=[\"partitionA\"]\n)\n\nres = client.get_load_state(\n collection_name=\"quick_setup\", \n partition_name=\"partitionA\"\n)\n\nprint(res)\n\n# Output\n#\n# {\n# \"state\": \"\"\n# }\n\n","import io.milvus.v2.service.partition.request.ReleasePartitionsReq;\n\n// 7. Release a partition\nReleasePartitionsReq releasePartitionsReq = ReleasePartitionsReq.builder()\n .collectionName(\"quick_setup\")\n .partitionNames(List.of(\"partitionA\"))\n .build();\n\nclient.releasePartitions(releasePartitionsReq);\n\ngetLoadStateReq = GetLoadStateReq.builder()\n .collectionName(\"quick_setup\")\n .partitionName(\"partitionA\")\n .build();\n\nstate = client.getLoadState(getLoadStateReq);\n\nSystem.out.println(state);\n\n// Output:\n// false\n","// 7. Release a partition\nawait client.releasePartitions({\n collection_name: \"quick_setup\",\n partition_names: [\"partitionA\"]\n})\n\nres = await client.getLoadState({\n collection_name: \"quick_setup\"\n})\n\nconsole.log(res.state)\n\n// Output\n// \n// LoadStateNotLoad\n// \n","client.release_partitions(\n collection_name=\"quick_setup\",\n partition_names=[\"_default\", \"partitionA\", \"partitionB\"]\n)\n\nres = client.get_load_status(\n collection_name=\"quick_setup\",\n)\n\n# Output\n#\n# {\n# \"state\": \"\"\n# }\n","# 8. Drop a partition\nclient.drop_partition(\n collection_name=\"quick_setup\",\n partition_name=\"partitionB\"\n)\n\nres = client.list_partitions(collection_name=\"quick_setup\")\nprint(res)\n\n# Output\n#\n# [\"_default\", \"partitionA\"]\n","import io.milvus.v2.service.partition.request.ReleasePartitionsReq;\n\nReleasePartitionsReq releasePartitionsReq = ReleasePartitionsReq.builder()\n .collectionName(\"quick_setup\")\n .partitionNames(List.of(\"_default\", \"partitionA\", \"partitionB\"))\n .build();\n\nclient.releasePartitions(releasePartitionsReq);\n\ngetLoadStateReq = GetLoadStateReq.builder()\n .collectionName(\"quick_setup\")\n .build();\n\nstate = client.getLoadState(getLoadStateReq);\n\nSystem.out.println(state);\n\n// Output:\n// false\n","\nawait client.releasePartitions({\n collection_name: \"quick_setup\",\n partition_names: [\"_default\", \"partitionA\", \"partitionB\"]\n})\n\nres = await client.getLoadState({\n collection_name: \"quick_setup\"\n})\n\nconsole.log(res)\n\n// Output\n// \n// {\n// status: {\n// error_code: 'Success',\n// reason: '',\n// code: 0,\n// retriable: false,\n// detail: ''\n// },\n// state: 'LoadStateNotLoad'\n// }\n// \n"],"headingContent":"Manage Partitions","anchorList":[{"label":"파티션 관리","href":"Manage-Partitions","type":1,"isActive":false},{"label":"개요","href":"Overview","type":2,"isActive":false},{"label":"준비","href":"Preparations","type":2,"isActive":false},{"label":"목록 파티션","href":"List-Partitions","type":2,"isActive":false},{"label":"파티션 만들기","href":"Create-Partitions","type":2,"isActive":false},{"label":"특정 파티션 확인","href":"Check-for-a-Specific-Partition","type":2,"isActive":false},{"label":"파티션 로드 및 해제","href":"Load--Release-Partitions","type":2,"isActive":false},{"label":"파티션 삭제","href":"Drop-Partitions","type":2,"isActive":false},{"label":"FAQ","href":"FAQ","type":2,"isActive":false}]} \ No newline at end of file diff --git a/localization/v2.4.x/site/ko/userGuide/manage-partitions.md b/localization/v2.4.x/site/ko/userGuide/manage-partitions.md index 819fbe270..04139f7d6 100644 --- a/localization/v2.4.x/site/ko/userGuide/manage-partitions.md +++ b/localization/v2.4.x/site/ko/userGuide/manage-partitions.md @@ -1,7 +1,6 @@ --- id: manage-partitions.md title: 파티션 관리 -summary: '' ---

    파티션 관리

    한 번에 여러 파티션을 로드하려면 다음과 같이 하세요:

    + 파이썬 자바 노드.js
    client.load_partitions(
         collection_name="quick_setup",
         partition_names=["partitionA", "partitionB"]
    @@ -735,8 +734,18 @@ res = await client.// LoadStateLoaded
     // 
     
    +

    하나 이상의 파티션에서 지정된 필드를 로드하려면 다음과 같이 하세요:

    +
    client.load_partitions(
    +    collection_name="quick_setup",
    +    partition_names=["partitionA"],
    +    load_fields=["id", "vector"],
    +    skip_load_dynamic_field=True
    +)
    +
    +

    load_fields 에 나열된 필드만 검색 및 쿼리에서 필터링 조건 및 출력 필드로 사용할 수 있습니다. 목록에 항상 기본 키를 포함해야 합니다. 로드에서 제외된 필드 이름은 필터링이나 출력에 사용할 수 없습니다.

    +

    skip_load_dynamic_field=True 을 사용하여 동적 필드 로드를 건너뛸 수 있습니다. Milvus는 동적 필드를 단일 필드로 취급하므로 동적 필드의 모든 키가 함께 포함되거나 제외됩니다.

    파티션 해제

    -

    컬렉션의 모든 파티션을 해제하려면, 그냥 release_collection(). 컬렉션의 특정 파티션을 해제하려면 release_partitions().

    +

    컬렉션의 모든 파티션을 해제하려면 간단히 release_collection(). 컬렉션의 특정 파티션을 해제하려면 release_partitions().

    컬렉션의 모든 파티션을 해제하려면 releaseCollection(). 컬렉션의 특정 파티션을 해제하려면 releasePartitions().

    @@ -930,7 +939,7 @@ res = await client.rootCoord.maxPartitionNum
    을 구성하여 최대 파티션 수를 조정할 수 있습니다. 자세한 내용은 시스템 구성을 참조하세요.

    +

    기본적으로 Milvus는 최대 1,024개의 파티션을 생성할 수 있습니다. rootCoord.maxPartitionNum 을 구성하여 최대 파티션 수를 조정할 수 있습니다. 자세한 내용은 시스템 구성을 참조하세요.

  • 파티션과 파티션 키를 어떻게 구분하나요?

    파티션은 물리적 저장 단위인 반면, 파티션 키는 지정된 열을 기반으로 특정 파티션에 데이터를 자동으로 할당하는 논리적 개념입니다.

    예를 들어 Milvus에서 파티션 키가 color 필드로 정의된 컬렉션이 있는 경우, 시스템은 각 엔터티의 color 필드의 해시값을 기반으로 데이터를 파티션에 자동으로 할당합니다. 이 자동화된 프로세스는 사용자가 데이터를 삽입하거나 검색할 때 파티션을 수동으로 지정해야 하는 수고를 덜어줍니다.

    diff --git a/localization/v2.4.x/site/ko/userGuide/search-query-get/single-vector-search.json b/localization/v2.4.x/site/ko/userGuide/search-query-get/single-vector-search.json index 18e9cc833..c7a0372b0 100644 --- a/localization/v2.4.x/site/ko/userGuide/search-query-get/single-vector-search.json +++ b/localization/v2.4.x/site/ko/userGuide/search-query-get/single-vector-search.json @@ -1 +1 @@ -{"codeList":["# 1. Set up a Milvus client\nclient = MilvusClient(\n uri=CLUSTER_ENDPOINT,\n token=TOKEN \n)\n\n# 2. Create a collection\nclient.create_collection(\n collection_name=\"quick_setup\",\n dimension=5,\n metric_type=\"IP\"\n)\n\n# 3. Insert randomly generated vectors \ncolors = [\"green\", \"blue\", \"yellow\", \"red\", \"black\", \"white\", \"purple\", \"pink\", \"orange\", \"brown\", \"grey\"]\ndata = []\n\nfor i in range(1000):\n current_color = random.choice(colors)\n data.append({\n \"id\": i,\n \"vector\": [ random.uniform(-1, 1) for _ in range(5) ],\n \"color\": current_color,\n \"color_tag\": f\"{current_color}_{str(random.randint(1000, 9999))}\"\n })\n\nres = client.insert(\n collection_name=\"quick_setup\",\n data=data\n)\n\nprint(res)\n\n# Output\n#\n# {\n# \"insert_count\": 1000,\n# \"ids\": [\n# 0,\n# 1,\n# 2,\n# 3,\n# 4,\n# 5,\n# 6,\n# 7,\n# 8,\n# 9,\n# \"(990 more items hidden)\"\n# ]\n# }\n\n# 6.1 Create partitions \nclient.create_partition(\n collection_name=\"quick_setup\",\n partition_name=\"red\"\n)\n\nclient.create_partition(\n collection_name=\"quick_setup\",\n partition_name=\"blue\"\n)\n\n# 6.1 Insert data into partitions\nred_data = [ {\"id\": i, \"vector\": [ random.uniform(-1, 1) for _ in range(5) ], \"color\": \"red\", \"color_tag\": f\"red_{str(random.randint(1000, 9999))}\" } for i in range(500) ]\nblue_data = [ {\"id\": i, \"vector\": [ random.uniform(-1, 1) for _ in range(5) ], \"color\": \"blue\", \"color_tag\": f\"blue_{str(random.randint(1000, 9999))}\" } for i in range(500) ]\n\nres = client.insert(\n collection_name=\"quick_setup\",\n data=red_data,\n partition_name=\"red\"\n)\n\nprint(res)\n\n# Output\n#\n# {\n# \"insert_count\": 500,\n# \"ids\": [\n# 0,\n# 1,\n# 2,\n# 3,\n# 4,\n# 5,\n# 6,\n# 7,\n# 8,\n# 9,\n# \"(490 more items hidden)\"\n# ]\n# }\n\nres = client.insert(\n collection_name=\"quick_setup\",\n data=blue_data,\n partition_name=\"blue\"\n)\n\nprint(res)\n\n# Output\n#\n# {\n# \"insert_count\": 500,\n# \"ids\": [\n# 0,\n# 1,\n# 2,\n# 3,\n# 4,\n# 5,\n# 6,\n# 7,\n# 8,\n# 9,\n# \"(490 more items hidden)\"\n# ]\n# }\n","import java.util.ArrayList;\nimport java.util.Arrays;\nimport java.util.List;\nimport java.util.Map;\nimport java.util.Random;\n\nimport com.alibaba.fastjson.JSONObject;\n\nimport io.milvus.v2.client.ConnectConfig;\nimport io.milvus.v2.client.MilvusClientV2;\nimport io.milvus.v2.service.collection.request.CreateCollectionReq;\nimport io.milvus.v2.service.collection.request.GetLoadStateReq;\nimport io.milvus.v2.service.vector.request.InsertReq;\nimport io.milvus.v2.service.vector.response.InsertResp; \n\nString CLUSTER_ENDPOINT = \"http://localhost:19530\";\n\n// 1. Connect to Milvus server\nConnectConfig connectConfig = ConnectConfig.builder()\n .uri(CLUSTER_ENDPOINT)\n .build();\n\nMilvusClientV2 client = new MilvusClientV2(connectConfig); \n\n// 2. Create a collection in quick setup mode\nCreateCollectionReq quickSetupReq = CreateCollectionReq.builder()\n .collectionName(\"quick_setup\")\n .dimension(5)\n .metricType(\"IP\")\n .build();\n\nclient.createCollection(quickSetupReq);\n\nGetLoadStateReq loadStateReq = GetLoadStateReq.builder()\n .collectionName(\"quick_setup\")\n .build();\n\nboolean state = client.getLoadState(loadStateReq);\n\nSystem.out.println(state);\n\n// Output:\n// true\n\n// 3. Insert randomly generated vectors into the collection\nList colors = Arrays.asList(\"green\", \"blue\", \"yellow\", \"red\", \"black\", \"white\", \"purple\", \"pink\", \"orange\", \"brown\", \"grey\");\nList data = new ArrayList<>();\n\nfor (int i=0; i<1000; i++) {\n Random rand = new Random();\n String current_color = colors.get(rand.nextInt(colors.size()-1));\n JSONObject row = new JSONObject();\n row.put(\"id\", Long.valueOf(i));\n row.put(\"vector\", Arrays.asList(rand.nextFloat(), rand.nextFloat(), rand.nextFloat(), rand.nextFloat(), rand.nextFloat()));\n row.put(\"color_tag\", current_color + \"_\" + String.valueOf(rand.nextInt(8999) + 1000));\n data.add(row);\n}\n\nInsertReq insertReq = InsertReq.builder()\n .collectionName(\"quick_setup\")\n .data(data)\n .build();\n\nInsertResp insertResp = client.insert(insertReq);\n\nSystem.out.println(JSONObject.toJSON(insertResp));\n\n// Output:\n// {\"insertCnt\": 1000}\n\n// 6.1. Create a partition\nCreatePartitionReq partitionReq = CreatePartitionReq.builder()\n .collectionName(\"quick_setup\")\n .partitionName(\"red\")\n .build();\n\nclient.createPartition(partitionReq);\n\npartitionReq = CreatePartitionReq.builder()\n .collectionName(\"quick_setup\")\n .partitionName(\"blue\")\n .build();\n\nclient.createPartition(partitionReq);\n\n// 6.2 Insert data into the partition\ndata = new ArrayList<>();\n\nfor (int i=1000; i<1500; i++) {\n Random rand = new Random();\n String current_color = \"red\";\n JSONObject row = new JSONObject();\n row.put(\"id\", Long.valueOf(i));\n row.put(\"vector\", Arrays.asList(rand.nextFloat(), rand.nextFloat(), rand.nextFloat(), rand.nextFloat(), rand.nextFloat()));\n row.put(\"color\", current_color);\n row.put(\"color_tag\", current_color + \"_\" + String.valueOf(rand.nextInt(8999) + 1000));\n data.add(row);\n} \n\ninsertReq = InsertReq.builder()\n .collectionName(\"quick_setup\")\n .data(data)\n .partitionName(\"red\")\n .build();\n\ninsertResp = client.insert(insertReq);\n\nSystem.out.println(JSONObject.toJSON(insertResp));\n\n// Output:\n// {\"insertCnt\": 500}\n\ndata = new ArrayList<>();\n\nfor (int i=1500; i<2000; i++) {\n Random rand = new Random();\n String current_color = \"blue\";\n JSONObject row = new JSONObject();\n row.put(\"id\", Long.valueOf(i));\n row.put(\"vector\", Arrays.asList(rand.nextFloat(), rand.nextFloat(), rand.nextFloat(), rand.nextFloat(), rand.nextFloat()));\n row.put(\"color\", current_color);\n row.put(\"color_tag\", current_color + \"_\" + String.valueOf(rand.nextInt(8999) + 1000));\n data.add(row);\n}\n\ninsertReq = InsertReq.builder()\n .collectionName(\"quick_setup\")\n .data(data)\n .partitionName(\"blue\")\n .build();\n\ninsertResp = client.insert(insertReq);\n\nSystem.out.println(JSONObject.toJSON(insertResp));\n\n// Output:\n// {\"insertCnt\": 500}\n","const { MilvusClient, DataType, sleep } = require(\"@zilliz/milvus2-sdk-node\")\n\nconst address = \"http://localhost:19530\"\n\n// 1. Set up a Milvus Client\nclient = new MilvusClient({address});\n\n// 2. Create a collection in quick setup mode\nawait client.createCollection({\n collection_name: \"quick_setup\",\n dimension: 5,\n metric_type: \"IP\"\n}); \n\n// 3. Insert randomly generated vectors\nconst colors = [\"green\", \"blue\", \"yellow\", \"red\", \"black\", \"white\", \"purple\", \"pink\", \"orange\", \"brown\", \"grey\"]\ndata = []\n\nfor (let i = 0; i < 1000; i++) {\n current_color = colors[Math.floor(Math.random() * colors.length)]\n data.push({\n id: i,\n vector: [Math.random(), Math.random(), Math.random(), Math.random(), Math.random()],\n color: current_color,\n color_tag: `${current_color}_${Math.floor(Math.random() * 8999) + 1000}`\n })\n}\n\nvar res = await client.insert({\n collection_name: \"quick_setup\",\n data: data\n})\n\nconsole.log(res.insert_cnt)\n\n// Output\n// \n// 1000\n// \n\nawait client.createPartition({\n collection_name: \"quick_setup\",\n partition_name: \"red\"\n})\n\nawait client.createPartition({\n collection_name: \"quick_setup\",\n partition_name: \"blue\"\n})\n\n// 6.1 Insert data into partitions\nvar red_data = []\nvar blue_data = []\n\nfor (let i = 1000; i < 1500; i++) {\n red_data.push({\n id: i,\n vector: [Math.random(), Math.random(), Math.random(), Math.random(), Math.random()],\n color: \"red\",\n color_tag: `red_${Math.floor(Math.random() * 8999) + 1000}`\n })\n}\n\nfor (let i = 1500; i < 2000; i++) {\n blue_data.push({\n id: i,\n vector: [Math.random(), Math.random(), Math.random(), Math.random(), Math.random()],\n color: \"blue\",\n color_tag: `blue_${Math.floor(Math.random() * 8999) + 1000}`\n })\n}\n\nres = await client.insert({\n collection_name: \"quick_setup\",\n data: red_data,\n partition_name: \"red\"\n})\n\nconsole.log(res.insert_cnt)\n\n// Output\n// \n// 500\n// \n\nres = await client.insert({\n collection_name: \"quick_setup\",\n data: blue_data,\n partition_name: \"blue\"\n})\n\nconsole.log(res.insert_cnt)\n\n// Output\n// \n// 500\n// \n","# Single vector search\nres = client.search(\n collection_name=\"test_collection\", # Replace with the actual name of your collection\n # Replace with your query vector\n data=[[0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592]],\n limit=5, # Max. number of search results to return\n search_params={\"metric_type\": \"IP\", \"params\": {}} # Search parameters\n)\n\n# Convert the output to a formatted JSON string\nresult = json.dumps(res, indent=4)\nprint(result)\n","// 4. Single vector search\nList> query_vectors = Arrays.asList(Arrays.asList(0.3580376395471989f, -0.6023495712049978f, 0.18414012509913835f, -0.26286205330961354f, 0.9029438446296592f));\n\nSearchReq searchReq = SearchReq.builder()\n .collectionName(\"quick_setup\")\n .data(query_vectors)\n .topK(3) // The number of results to return\n .build();\n\nSearchResp searchResp = client.search(searchReq);\n\nSystem.out.println(JSONObject.toJSON(searchResp));\n","// 4. Single vector search\nvar query_vector = [0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592],\n\nres = await client.search({\n collection_name: \"quick_setup\",\n data: [query_vector],\n limit: 3, // The number of results to return\n})\n\nconsole.log(res.results)\n","[\n [\n {\n \"id\": 0,\n \"distance\": 1.4093276262283325,\n \"entity\": {}\n },\n {\n \"id\": 4,\n \"distance\": 0.9902134537696838,\n \"entity\": {}\n },\n {\n \"id\": 1,\n \"distance\": 0.8519943356513977,\n \"entity\": {}\n },\n {\n \"id\": 5,\n \"distance\": 0.7972343564033508,\n \"entity\": {}\n },\n {\n \"id\": 2,\n \"distance\": 0.5928734540939331,\n \"entity\": {}\n }\n ]\n]\n","{\"searchResults\": [[\n {\n \"score\": 1.263043,\n \"fields\": {\n \"vector\": [\n 0.9533119,\n 0.02538395,\n 0.76714665,\n 0.35481733,\n 0.9845762\n ],\n \"id\": 740\n }\n },\n {\n \"score\": 1.2377806,\n \"fields\": {\n \"vector\": [\n 0.7411156,\n 0.08687937,\n 0.8254139,\n 0.08370924,\n 0.99095553\n ],\n \"id\": 640\n }\n },\n {\n \"score\": 1.1869997,\n \"fields\": {\n \"vector\": [\n 0.87928146,\n 0.05324632,\n 0.6312755,\n 0.28005534,\n 0.9542448\n ],\n \"id\": 455\n }\n }\n]]}\n","[\n { score: 1.7463608980178833, id: '854' },\n { score: 1.744946002960205, id: '425' },\n { score: 1.7258622646331787, id: '718' }\n]\n","# Bulk-vector search\nres = client.search(\n collection_name=\"test_collection\", # Replace with the actual name of your collection\n data=[\n [0.19886812562848388, 0.06023560599112088, 0.6976963061752597, 0.2614474506242501, 0.838729485096104],\n [0.3172005263489739, 0.9719044792798428, -0.36981146090600725, -0.4860894583077995, 0.95791889146345]\n ], # Replace with your query vectors\n limit=2, # Max. number of search results to return\n search_params={\"metric_type\": \"IP\", \"params\": {}} # Search parameters\n)\n\nresult = json.dumps(res, indent=4)\nprint(result)\n","// 5. Batch vector search\nquery_vectors = Arrays.asList(\n Arrays.asList(0.3580376395471989f, -0.6023495712049978f, 0.18414012509913835f, -0.26286205330961354f, 0.9029438446296592f),\n Arrays.asList(0.19886812562848388f, 0.06023560599112088f, 0.6976963061752597f, 0.2614474506242501f, 0.838729485096104f)\n);\n\nsearchReq = SearchReq.builder()\n .collectionName(\"quick_setup\")\n .data(query_vectors)\n .topK(2)\n .build();\n\nsearchResp = client.search(searchReq);\n\nSystem.out.println(JSONObject.toJSON(searchResp));\n","// 5. Batch vector search\nvar query_vectors = [\n [0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592],\n [0.19886812562848388, 0.06023560599112088, 0.6976963061752597, 0.2614474506242501, 0.838729485096104]\n]\n\nres = await client.search({\n collection_name: \"quick_setup\",\n data: query_vectors,\n limit: 2,\n})\n\nconsole.log(res.results)\n","[\n [\n {\n \"id\": 1,\n \"distance\": 1.3017789125442505,\n \"entity\": {}\n },\n {\n \"id\": 7,\n \"distance\": 1.2419954538345337,\n \"entity\": {}\n }\n ], # Result set 1\n [\n {\n \"id\": 3,\n \"distance\": 2.3358664512634277,\n \"entity\": {}\n },\n {\n \"id\": 8,\n \"distance\": 0.5642921924591064,\n \"entity\": {}\n }\n ] # Result set 2\n]\n","// Two sets of vectors are returned as expected\n\n{\"searchResults\": [\n [\n {\n \"score\": 1.263043,\n \"fields\": {\n \"vector\": [\n 0.9533119,\n 0.02538395,\n 0.76714665,\n 0.35481733,\n 0.9845762\n ],\n \"id\": 740\n }\n },\n {\n \"score\": 1.2377806,\n \"fields\": {\n \"vector\": [\n 0.7411156,\n 0.08687937,\n 0.8254139,\n 0.08370924,\n 0.99095553\n ],\n \"id\": 640\n }\n }\n ],\n [\n {\n \"score\": 1.8654699,\n \"fields\": {\n \"vector\": [\n 0.4671427,\n 0.8378432,\n 0.98844475,\n 0.82763994,\n 0.9729997\n ],\n \"id\": 638\n }\n },\n {\n \"score\": 1.8581753,\n \"fields\": {\n \"vector\": [\n 0.735541,\n 0.60140246,\n 0.86730254,\n 0.93152493,\n 0.98603314\n ],\n \"id\": 855\n }\n }\n ]\n]}\n","[\n [\n { score: 2.3590476512908936, id: '854' },\n { score: 2.2896690368652344, id: '59' }\n [\n { score: 2.664059638977051, id: '59' },\n { score: 2.59483003616333, id: '854' }\n ]\n]\n","# 6.2 Search within a partition\nquery_vector = [0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592]\n\nres = client.search(\n collection_name=\"quick_setup\",\n data=[query_vector],\n limit=5,\n search_params={\"metric_type\": \"IP\", \"params\": {\"level\": 1}},\n partition_names=[\"red\"]\n)\n\nprint(res)\n","// 6.3 Search within partitions\nquery_vectors = Arrays.asList(Arrays.asList(0.3580376395471989f, -0.6023495712049978f, 0.18414012509913835f, -0.26286205330961354f, 0.9029438446296592f));\n\nsearchReq = SearchReq.builder()\n .collectionName(\"quick_setup\")\n .data(query_vectors)\n .partitionNames(Arrays.asList(\"red\"))\n .topK(5)\n .build();\n\nsearchResp = client.search(searchReq);\n\nSystem.out.println(JSONObject.toJSON(searchResp));\n","// 6.2 Search within partitions\nquery_vector = [0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592]\n\nres = await client.search({\n collection_name: \"quick_setup\",\n data: [query_vector],\n partition_names: [\"red\"],\n limit: 5,\n})\n\nconsole.log(res.results)\n","[\n [\n {\n \"id\": 16,\n \"distance\": 0.9200337529182434,\n \"entity\": {}\n },\n {\n \"id\": 14,\n \"distance\": 0.4505271911621094,\n \"entity\": {}\n },\n {\n \"id\": 15,\n \"distance\": 0.19924677908420563,\n \"entity\": {}\n },\n {\n \"id\": 17,\n \"distance\": 0.0075093843042850494,\n \"entity\": {}\n },\n {\n \"id\": 13,\n \"distance\": -0.14609718322753906,\n \"entity\": {}\n }\n ]\n]\n","{\"searchResults\": [\n [\n {\n \"score\": 1.1677284,\n \"fields\": {\n \"vector\": [\n 0.9986977,\n 0.17964739,\n 0.49086612,\n 0.23155272,\n 0.98438674\n ],\n \"id\": 1435\n }\n },\n {\n \"score\": 1.1476475,\n \"fields\": {\n \"vector\": [\n 0.6952647,\n 0.13417172,\n 0.91045254,\n 0.119336545,\n 0.9338931\n ],\n \"id\": 1291\n }\n },\n {\n \"score\": 1.0969629,\n \"fields\": {\n \"vector\": [\n 0.3363194,\n 0.028906643,\n 0.6675426,\n 0.030419827,\n 0.9735209\n ],\n \"id\": 1168\n }\n },\n {\n \"score\": 1.0741848,\n \"fields\": {\n \"vector\": [\n 0.9980543,\n 0.36063594,\n 0.66427994,\n 0.17359233,\n 0.94954175\n ],\n \"id\": 1164\n }\n },\n {\n \"score\": 1.0584627,\n \"fields\": {\n \"vector\": [\n 0.7187005,\n 0.12674773,\n 0.987718,\n 0.3110777,\n 0.86093885\n ],\n \"id\": 1085\n }\n }\n ],\n [\n {\n \"score\": 1.8030131,\n \"fields\": {\n \"vector\": [\n 0.59726167,\n 0.7054632,\n 0.9573117,\n 0.94529945,\n 0.8664103\n ],\n \"id\": 1203\n }\n },\n {\n \"score\": 1.7728865,\n \"fields\": {\n \"vector\": [\n 0.6672442,\n 0.60448086,\n 0.9325822,\n 0.80272985,\n 0.8861626\n ],\n \"id\": 1448\n }\n },\n {\n \"score\": 1.7536311,\n \"fields\": {\n \"vector\": [\n 0.59663296,\n 0.77831805,\n 0.8578314,\n 0.88818026,\n 0.9030075\n ],\n \"id\": 1010\n }\n },\n {\n \"score\": 1.7520742,\n \"fields\": {\n \"vector\": [\n 0.854198,\n 0.72294194,\n 0.9245805,\n 0.86126596,\n 0.7969224\n ],\n \"id\": 1219\n }\n },\n {\n \"score\": 1.7452049,\n \"fields\": {\n \"vector\": [\n 0.96419,\n 0.943535,\n 0.87611496,\n 0.8268136,\n 0.79786557\n ],\n \"id\": 1149\n }\n }\n ]\n]}\n","[\n { score: 3.0258803367614746, id: '1201' },\n { score: 3.004319190979004, id: '1458' },\n { score: 2.880324363708496, id: '1187' },\n { score: 2.8246407508850098, id: '1347' },\n { score: 2.797295093536377, id: '1406' }\n]\n","res = client.search(\n collection_name=\"quick_setup\",\n data=[query_vector],\n limit=5,\n search_params={\"metric_type\": \"IP\", \"params\": {\"level\": 1}},\n partition_names=[\"blue\"]\n)\n\nprint(res)\n","searchReq = SearchReq.builder()\n .collectionName(\"quick_setup\")\n .data(query_vectors)\n .partitionNames(Arrays.asList(\"blue\"))\n .topK(5)\n .build();\n\nsearchResp = client.search(searchReq);\n\nSystem.out.println(JSONObject.toJSON(searchResp));\n","res = await client.search({\n collection_name: \"quick_setup\",\n data: [query_vector],\n partition_names: [\"blue\"],\n limit: 5,\n})\n\nconsole.log(res.results)\n","[\n [\n {\n \"id\": 20,\n \"distance\": 2.363696813583374,\n \"entity\": {}\n },\n {\n \"id\": 26,\n \"distance\": 1.0665391683578491,\n \"entity\": {}\n },\n {\n \"id\": 23,\n \"distance\": 1.066049575805664,\n \"entity\": {}\n },\n {\n \"id\": 29,\n \"distance\": 0.8353596925735474,\n \"entity\": {}\n },\n {\n \"id\": 28,\n \"distance\": 0.7484277486801147,\n \"entity\": {}\n }\n ]\n]\n","{\"searchResults\": [\n [\n {\n \"score\": 1.1628494,\n \"fields\": {\n \"vector\": [\n 0.7442872,\n 0.046407282,\n 0.71031404,\n 0.3544345,\n 0.9819991\n ],\n \"id\": 1992\n }\n },\n {\n \"score\": 1.1470042,\n \"fields\": {\n \"vector\": [\n 0.5505825,\n 0.04367262,\n 0.9985836,\n 0.18922359,\n 0.93255126\n ],\n \"id\": 1977\n }\n },\n {\n \"score\": 1.1450152,\n \"fields\": {\n \"vector\": [\n 0.89994013,\n 0.052991092,\n 0.8645576,\n 0.6406729,\n 0.95679337\n ],\n \"id\": 1573\n }\n },\n {\n \"score\": 1.1439825,\n \"fields\": {\n \"vector\": [\n 0.9253267,\n 0.15890503,\n 0.7999555,\n 0.19126713,\n 0.898583\n ],\n \"id\": 1552\n }\n },\n {\n \"score\": 1.1029172,\n \"fields\": {\n \"vector\": [\n 0.95661926,\n 0.18777144,\n 0.38115507,\n 0.14323527,\n 0.93137646\n ],\n \"id\": 1823\n }\n }\n ],\n [\n {\n \"score\": 1.8005109,\n \"fields\": {\n \"vector\": [\n 0.5953582,\n 0.7794224,\n 0.9388869,\n 0.79825854,\n 0.9197286\n ],\n \"id\": 1888\n }\n },\n {\n \"score\": 1.7714822,\n \"fields\": {\n \"vector\": [\n 0.56805456,\n 0.89422905,\n 0.88187534,\n 0.914824,\n 0.8944365\n ],\n \"id\": 1648\n }\n },\n {\n \"score\": 1.7561421,\n \"fields\": {\n \"vector\": [\n 0.83421993,\n 0.39865613,\n 0.92319834,\n 0.42695504,\n 0.96633124\n ],\n \"id\": 1688\n }\n },\n {\n \"score\": 1.7553532,\n \"fields\": {\n \"vector\": [\n 0.89994013,\n 0.052991092,\n 0.8645576,\n 0.6406729,\n 0.95679337\n ],\n \"id\": 1573\n }\n },\n {\n \"score\": 1.7543385,\n \"fields\": {\n \"vector\": [\n 0.16542226,\n 0.38248396,\n 0.9888778,\n 0.80913955,\n 0.9501492\n ],\n \"id\": 1544\n }\n }\n ]\n]}\n","[\n { score: 2.8421106338500977, id: '1745' },\n { score: 2.838560104370117, id: '1782' },\n { score: 2.8134000301361084, id: '1511' },\n { score: 2.718268871307373, id: '1679' },\n { score: 2.7014894485473633, id: '1597' }\n]\n","# Search with output fields\nres = client.search(\n collection_name=\"test_collection\", # Replace with the actual name of your collection\n data=[[0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592]],\n limit=5, # Max. number of search results to return\n search_params={\"metric_type\": \"IP\", \"params\": {}}, # Search parameters\n output_fields=[\"color\"] # Output fields to return\n)\n\nresult = json.dumps(res, indent=4)\nprint(result)\n","// 7. Search with output fields\nquery_vectors = Arrays.asList(Arrays.asList(0.3580376395471989f, -0.6023495712049978f, 0.18414012509913835f, -0.26286205330961354f, 0.9029438446296592f));\n\nsearchReq = SearchReq.builder()\n .collectionName(\"quick_setup\")\n .data(query_vectors)\n .outputFields(Arrays.asList(\"color\"))\n .topK(5)\n .build();\n\nsearchResp = client.search(searchReq);\n\nSystem.out.println(JSONObject.toJSON(searchResp));\n","// 7. Search with output fields\nquery_vector = [0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592]\n\nres = await client.search({\n collection_name: \"quick_setup\",\n data: [query_vector],\n limit: 5,\n output_fields: [\"color\"],\n})\n\nconsole.log(res.results)\n","[\n [\n {\n \"id\": 0,\n \"distance\": 1.4093276262283325,\n \"entity\": {\n \"color\": \"pink_8682\"\n }\n },\n {\n \"id\": 16,\n \"distance\": 1.0159327983856201,\n \"entity\": {\n \"color\": \"yellow_1496\"\n }\n },\n {\n \"id\": 4,\n \"distance\": 0.9902134537696838,\n \"entity\": {\n \"color\": \"red_4794\"\n }\n },\n {\n \"id\": 14,\n \"distance\": 0.9803846478462219,\n \"entity\": {\n \"color\": \"green_2899\"\n }\n },\n {\n \"id\": 1,\n \"distance\": 0.8519943356513977,\n \"entity\": {\n \"color\": \"red_7025\"\n }\n }\n ]\n]\n","{\"searchResults\": [\n [\n {\n \"score\": 1.263043,\n \"fields\": {}\n },\n {\n \"score\": 1.2377806,\n \"fields\": {}\n },\n {\n \"score\": 1.1869997,\n \"fields\": {}\n },\n {\n \"score\": 1.1748955,\n \"fields\": {}\n },\n {\n \"score\": 1.1720343,\n \"fields\": {}\n }\n ]\n]}\n","\n[\n { score: 3.036271572113037, id: '59', color: 'orange' },\n { score: 3.0267879962921143, id: '1745', color: 'blue' },\n { score: 3.0069446563720703, id: '854', color: 'black' },\n { score: 2.984386682510376, id: '718', color: 'black' },\n { score: 2.916019916534424, id: '425', color: 'purple' }\n]\n","# Search with filter\nres = client.search(\n collection_name=\"test_collection\", # Replace with the actual name of your collection\n data=[[0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592]],\n limit=5, # Max. number of search results to return\n search_params={\"metric_type\": \"IP\", \"params\": {}}, # Search parameters\n output_fields=[\"color\"], # Output fields to return\n filter='color like \"red%\"'\n)\n\nresult = json.dumps(res, indent=4)\nprint(result)\n","// 8. Filtered search\nquery_vectors = Arrays.asList(Arrays.asList(0.3580376395471989f, -0.6023495712049978f, 0.18414012509913835f, -0.26286205330961354f, 0.9029438446296592f));\n\nsearchReq = SearchReq.builder()\n .collectionName(\"quick_setup\")\n .data(query_vectors)\n .outputFields(Arrays.asList(\"color_tag\"))\n .filter(\"color_tag like \\\"red%\\\"\")\n .topK(5)\n .build();\n\nsearchResp = client.search(searchReq);\n\nSystem.out.println(JSONObject.toJSON(searchResp));\n","// 8. Filtered search\n// 8.1 Filter with \"like\" operator and prefix wildcard\nquery_vector = [0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592]\n\nres = await client.search({\n collection_name: \"quick_setup\",\n data: [query_vector],\n limit: 5,\n filters: \"color_tag like \\\"red%\\\"\",\n output_fields: [\"color_tag\"]\n})\n\nconsole.log(res.results)\n","[\n [\n {\n \"id\": 4,\n \"distance\": 0.9902134537696838,\n \"entity\": {\n \"color\": \"red_4794\"\n }\n },\n {\n \"id\": 1,\n \"distance\": 0.8519943356513977,\n \"entity\": {\n \"color\": \"red_7025\"\n }\n },\n {\n \"id\": 6,\n \"distance\": -0.4113418459892273,\n \"entity\": {\n \"color\": \"red_9392\"\n }\n }\n ]\n]\n","{\"searchResults\": [\n [\n {\n \"score\": 1.1869997,\n \"fields\": {\"color_tag\": \"red_3026\"}\n },\n {\n \"score\": 1.1677284,\n \"fields\": {\"color_tag\": \"red_9030\"}\n },\n {\n \"score\": 1.1476475,\n \"fields\": {\"color_tag\": \"red_3744\"}\n },\n {\n \"score\": 1.0969629,\n \"fields\": {\"color_tag\": \"red_4168\"}\n },\n {\n \"score\": 1.0741848,\n \"fields\": {\"color_tag\": \"red_9678\"}\n }\n ]\n]}\n","[\n { score: 2.5080761909484863, id: '1201', color_tag: 'red_8904' },\n { score: 2.491129159927368, id: '425', color_tag: 'purple_8212' },\n { score: 2.4889798164367676, id: '1458', color_tag: 'red_6891' },\n { score: 2.42964243888855, id: '724', color_tag: 'black_9885' },\n { score: 2.4004223346710205, id: '854', color_tag: 'black_5990' }\n]\n","# Infix match on color field\nres = client.search(\n collection_name=\"test_collection\", # Replace with the actual name of your collection\n data=[[0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592]],\n limit=5, # Max. number of search results to return\n search_params={\"metric_type\": \"IP\", \"params\": {}}, # Search parameters\n output_fields=[\"color\"], # Output fields to return\n filter='color like \"%ll%\"' # Filter on color field, infix match on \"ll\"\n)\n\nresult = json.dumps(res, indent=4)\nprint(result)\n","// 8. Filtered search\nquery_vectors = Arrays.asList(Arrays.asList(0.3580376395471989f, -0.6023495712049978f, 0.18414012509913835f, -0.26286205330961354f, 0.9029438446296592f));\n\nsearchReq = SearchReq.builder()\n .collectionName(\"quick_setup\")\n .data(query_vectors)\n .outputFields(Arrays.asList(\"color_tag\"))\n .filter(\"color like \\\"%ll%\\\"\")\n .topK(5)\n .build();\n\nsearchResp = client.search(searchReq);\n\nSystem.out.println(JSONObject.toJSON(searchResp));\n","// 8. Filtered search\n// 8.1 Filter with \"like\" operator and prefix wildcard\nquery_vector = [0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592]\n\nres = await client.search({\n collection_name: \"quick_setup\",\n data: [query_vector],\n limit: 5,\n filters: \"color_tag like \\\"%ll%\\\"\",\n output_fields: [\"color_tag\"]\n})\n\nconsole.log(res.results)\n","[\n [\n {\n \"id\": 5,\n \"distance\": 0.7972343564033508,\n \"entity\": {\n \"color\": \"yellow_4222\"\n }\n }\n ]\n]\n","{\"searchResults\": [\n [\n {\n \"score\": 1.1869997,\n \"fields\": {\"color_tag\": \"yellow_4222\"}\n }\n ]\n]}\n","[\n { score: 2.5080761909484863, id: '1201', color_tag: 'yellow_4222' }\n]\n","# Conduct a range search\nsearch_params = {\n \"metric_type\": \"IP\",\n \"params\": {\n \"radius\": 0.8, # Radius of the search circle\n \"range_filter\": 1.0 # Range filter to filter out vectors that are not within the search circle\n }\n}\n\nres = client.search(\n collection_name=\"test_collection\", # Replace with the actual name of your collection\n data=[[0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592]],\n limit=3, # Max. number of search results to return\n search_params=search_params, # Search parameters\n output_fields=[\"color\"], # Output fields to return\n)\n\nresult = json.dumps(res, indent=4)\nprint(result)\n","// 9. Range search\nquery_vectors = Arrays.asList(Arrays.asList(0.3580376395471989f, -0.6023495712049978f, 0.18414012509913835f, -0.26286205330961354f, 0.9029438446296592f));\n\nsearchReq = SearchReq.builder()\n .collectionName(\"quick_setup\")\n .data(query_vectors)\n .outputFields(Arrays.asList(\"color_tag\"))\n .searchParams(Map.of(\"radius\", 0.1, \"range\", 1.0))\n .topK(5)\n .build();\n\nsearchResp = client.search(searchReq);\n\nSystem.out.println(JSONObject.toJSON(searchResp));\n","// 9. Range search\nquery_vector = [0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592]\n\nres = await client.search({\n collection_name: \"quick_setup\",\n data: [query_vector],\n limit: 5,\n params: {\n radius: 0.1,\n range: 1.0\n },\n output_fields: [\"color_tag\"]\n})\n\nconsole.log(res.results)\n","[\n [\n {\n \"id\": 4,\n \"distance\": 0.9902134537696838,\n \"entity\": {\n \"color\": \"red_4794\"\n }\n },\n {\n \"id\": 14,\n \"distance\": 0.9803846478462219,\n \"entity\": {\n \"color\": \"green_2899\"\n }\n },\n {\n \"id\": 1,\n \"distance\": 0.8519943356513977,\n \"entity\": {\n \"color\": \"red_7025\"\n }\n }\n ]\n]\n","{\"searchResults\": [\n [\n {\n \"score\": 1.263043,\n \"fields\": {\"color_tag\": \"green_2052\"}\n },\n {\n \"score\": 1.2377806,\n \"fields\": {\"color_tag\": \"purple_3709\"}\n },\n {\n \"score\": 1.1869997,\n \"fields\": {\"color_tag\": \"red_3026\"}\n },\n {\n \"score\": 1.1748955,\n \"fields\": {\"color_tag\": \"black_1646\"}\n },\n {\n \"score\": 1.1720343,\n \"fields\": {\"color_tag\": \"green_4853\"}\n }\n ]\n]}\n","[\n { score: 2.3387961387634277, id: '718', color_tag: 'black_7154' },\n { score: 2.3352415561676025, id: '1745', color_tag: 'blue_8741' },\n { score: 2.290485382080078, id: '1408', color_tag: 'red_2324' },\n { score: 2.285870313644409, id: '854', color_tag: 'black_5990' },\n { score: 2.2593345642089844, id: '1309', color_tag: 'red_8458' }\n]\n","# Connect to Milvus\nclient = MilvusClient(uri='http://localhost:19530') # Milvus server address\n\n# Load data into collection\nclient.load_collection(\"group_search\") # Collection name\n\n# Group search results\nres = client.search(\n collection_name=\"group_search\", # Collection name\n data=[[0.14529211512077012, 0.9147257273453546, 0.7965055218724449, 0.7009258593102812, 0.5605206522382088]], # Query vector\n search_params={\n \"metric_type\": \"L2\",\n \"params\": {\"nprobe\": 10},\n }, # Search parameters\n limit=10, # Max. number of search results to return\n group_by_field=\"doc_id\", # Group results by document ID\n output_fields=[\"doc_id\", \"passage_id\"]\n)\n\n# Retrieve the values in the `doc_id` column\ndoc_ids = [result['entity']['doc_id'] for result in res[0]]\n\nprint(doc_ids)\n","[5, 10, 1, 7, 9, 6, 3, 4, 8, 2]\n","# Connect to Milvus\nclient = MilvusClient(uri='http://localhost:19530') # Milvus server address\n\n# Load data into collection\nclient.load_collection(\"group_search\") # Collection name\n\n# Search without `group_by_field`\nres = client.search(\n collection_name=\"group_search\", # Collection name\n data=query_passage_vector, # Replace with your query vector\n search_params={\n \"metric_type\": \"L2\",\n \"params\": {\"nprobe\": 10},\n }, # Search parameters\n limit=10, # Max. number of search results to return\n # group_by_field=\"doc_id\", # Group results by document ID\n output_fields=[\"doc_id\", \"passage_id\"]\n)\n\n# Retrieve the values in the `doc_id` column\ndoc_ids = [result['entity']['doc_id'] for result in res[0]]\n\nprint(doc_ids)\n","[1, 10, 3, 10, 1, 9, 4, 4, 8, 6]\n","# In normal cases, you do not need to set search parameters manually\n# Except for range searches.\nsearch_parameters = {\n 'metric_type': 'L2',\n 'params': {\n 'nprobe': 10,\n 'level': 1,\n 'radius': 1.0\n 'range_filter': 0.8\n }\n}\n"],"headingContent":"","anchorList":[{"label":"단일 벡터 검색","href":"Single-Vector-Search","type":1,"isActive":false},{"label":"개요","href":"Overview","type":2,"isActive":false},{"label":"준비 사항","href":"Preparations","type":2,"isActive":false},{"label":"기본 검색","href":"Basic-search","type":2,"isActive":false},{"label":"필터 검색","href":"Filtered-search","type":2,"isActive":false},{"label":"범위 검색","href":"Range-search","type":2,"isActive":false},{"label":"그룹 검색","href":"Grouping-search","type":2,"isActive":false},{"label":"검색 매개변수","href":"Search-parameters","type":2,"isActive":false}]} \ No newline at end of file +{"codeList":["# 1. Set up a Milvus client\nclient = MilvusClient(\n uri=CLUSTER_ENDPOINT,\n token=TOKEN \n)\n\n# 2. Create a collection\nclient.create_collection(\n collection_name=\"quick_setup\",\n dimension=5,\n metric_type=\"IP\"\n)\n\n# 3. Insert randomly generated vectors \ncolors = [\"green\", \"blue\", \"yellow\", \"red\", \"black\", \"white\", \"purple\", \"pink\", \"orange\", \"brown\", \"grey\"]\ndata = []\n\nfor i in range(1000):\n current_color = random.choice(colors)\n data.append({\n \"id\": i,\n \"vector\": [ random.uniform(-1, 1) for _ in range(5) ],\n \"color\": current_color,\n \"color_tag\": f\"{current_color}_{str(random.randint(1000, 9999))}\"\n })\n\nres = client.insert(\n collection_name=\"quick_setup\",\n data=data\n)\n\nprint(res)\n\n# Output\n#\n# {\n# \"insert_count\": 1000,\n# \"ids\": [\n# 0,\n# 1,\n# 2,\n# 3,\n# 4,\n# 5,\n# 6,\n# 7,\n# 8,\n# 9,\n# \"(990 more items hidden)\"\n# ]\n# }\n\n# 6.1 Create partitions \nclient.create_partition(\n collection_name=\"quick_setup\",\n partition_name=\"red\"\n)\n\nclient.create_partition(\n collection_name=\"quick_setup\",\n partition_name=\"blue\"\n)\n\n# 6.1 Insert data into partitions\nred_data = [ {\"id\": i, \"vector\": [ random.uniform(-1, 1) for _ in range(5) ], \"color\": \"red\", \"color_tag\": f\"red_{str(random.randint(1000, 9999))}\" } for i in range(500) ]\nblue_data = [ {\"id\": i, \"vector\": [ random.uniform(-1, 1) for _ in range(5) ], \"color\": \"blue\", \"color_tag\": f\"blue_{str(random.randint(1000, 9999))}\" } for i in range(500) ]\n\nres = client.insert(\n collection_name=\"quick_setup\",\n data=red_data,\n partition_name=\"red\"\n)\n\nprint(res)\n\n# Output\n#\n# {\n# \"insert_count\": 500,\n# \"ids\": [\n# 0,\n# 1,\n# 2,\n# 3,\n# 4,\n# 5,\n# 6,\n# 7,\n# 8,\n# 9,\n# \"(490 more items hidden)\"\n# ]\n# }\n\nres = client.insert(\n collection_name=\"quick_setup\",\n data=blue_data,\n partition_name=\"blue\"\n)\n\nprint(res)\n\n# Output\n#\n# {\n# \"insert_count\": 500,\n# \"ids\": [\n# 0,\n# 1,\n# 2,\n# 3,\n# 4,\n# 5,\n# 6,\n# 7,\n# 8,\n# 9,\n# \"(490 more items hidden)\"\n# ]\n# }\n","import java.util.ArrayList;\nimport java.util.Arrays;\nimport java.util.List;\nimport java.util.Map;\nimport java.util.Random;\n\nimport com.alibaba.fastjson.JSONObject;\n\nimport io.milvus.v2.client.ConnectConfig;\nimport io.milvus.v2.client.MilvusClientV2;\nimport io.milvus.v2.service.collection.request.CreateCollectionReq;\nimport io.milvus.v2.service.collection.request.GetLoadStateReq;\nimport io.milvus.v2.service.vector.request.InsertReq;\nimport io.milvus.v2.service.vector.response.InsertResp; \n\nString CLUSTER_ENDPOINT = \"http://localhost:19530\";\n\n// 1. Connect to Milvus server\nConnectConfig connectConfig = ConnectConfig.builder()\n .uri(CLUSTER_ENDPOINT)\n .build();\n\nMilvusClientV2 client = new MilvusClientV2(connectConfig); \n\n// 2. Create a collection in quick setup mode\nCreateCollectionReq quickSetupReq = CreateCollectionReq.builder()\n .collectionName(\"quick_setup\")\n .dimension(5)\n .metricType(\"IP\")\n .build();\n\nclient.createCollection(quickSetupReq);\n\nGetLoadStateReq loadStateReq = GetLoadStateReq.builder()\n .collectionName(\"quick_setup\")\n .build();\n\nboolean state = client.getLoadState(loadStateReq);\n\nSystem.out.println(state);\n\n// Output:\n// true\n\n// 3. Insert randomly generated vectors into the collection\nList colors = Arrays.asList(\"green\", \"blue\", \"yellow\", \"red\", \"black\", \"white\", \"purple\", \"pink\", \"orange\", \"brown\", \"grey\");\nList data = new ArrayList<>();\n\nfor (int i=0; i<1000; i++) {\n Random rand = new Random();\n String current_color = colors.get(rand.nextInt(colors.size()-1));\n JSONObject row = new JSONObject();\n row.put(\"id\", Long.valueOf(i));\n row.put(\"vector\", Arrays.asList(rand.nextFloat(), rand.nextFloat(), rand.nextFloat(), rand.nextFloat(), rand.nextFloat()));\n row.put(\"color_tag\", current_color + \"_\" + String.valueOf(rand.nextInt(8999) + 1000));\n data.add(row);\n}\n\nInsertReq insertReq = InsertReq.builder()\n .collectionName(\"quick_setup\")\n .data(data)\n .build();\n\nInsertResp insertResp = client.insert(insertReq);\n\nSystem.out.println(JSONObject.toJSON(insertResp));\n\n// Output:\n// {\"insertCnt\": 1000}\n\n// 6.1. Create a partition\nCreatePartitionReq partitionReq = CreatePartitionReq.builder()\n .collectionName(\"quick_setup\")\n .partitionName(\"red\")\n .build();\n\nclient.createPartition(partitionReq);\n\npartitionReq = CreatePartitionReq.builder()\n .collectionName(\"quick_setup\")\n .partitionName(\"blue\")\n .build();\n\nclient.createPartition(partitionReq);\n\n// 6.2 Insert data into the partition\ndata = new ArrayList<>();\n\nfor (int i=1000; i<1500; i++) {\n Random rand = new Random();\n String current_color = \"red\";\n JSONObject row = new JSONObject();\n row.put(\"id\", Long.valueOf(i));\n row.put(\"vector\", Arrays.asList(rand.nextFloat(), rand.nextFloat(), rand.nextFloat(), rand.nextFloat(), rand.nextFloat()));\n row.put(\"color\", current_color);\n row.put(\"color_tag\", current_color + \"_\" + String.valueOf(rand.nextInt(8999) + 1000));\n data.add(row);\n} \n\ninsertReq = InsertReq.builder()\n .collectionName(\"quick_setup\")\n .data(data)\n .partitionName(\"red\")\n .build();\n\ninsertResp = client.insert(insertReq);\n\nSystem.out.println(JSONObject.toJSON(insertResp));\n\n// Output:\n// {\"insertCnt\": 500}\n\ndata = new ArrayList<>();\n\nfor (int i=1500; i<2000; i++) {\n Random rand = new Random();\n String current_color = \"blue\";\n JSONObject row = new JSONObject();\n row.put(\"id\", Long.valueOf(i));\n row.put(\"vector\", Arrays.asList(rand.nextFloat(), rand.nextFloat(), rand.nextFloat(), rand.nextFloat(), rand.nextFloat()));\n row.put(\"color\", current_color);\n row.put(\"color_tag\", current_color + \"_\" + String.valueOf(rand.nextInt(8999) + 1000));\n data.add(row);\n}\n\ninsertReq = InsertReq.builder()\n .collectionName(\"quick_setup\")\n .data(data)\n .partitionName(\"blue\")\n .build();\n\ninsertResp = client.insert(insertReq);\n\nSystem.out.println(JSONObject.toJSON(insertResp));\n\n// Output:\n// {\"insertCnt\": 500}\n","const { MilvusClient, DataType, sleep } = require(\"@zilliz/milvus2-sdk-node\")\n\nconst address = \"http://localhost:19530\"\n\n// 1. Set up a Milvus Client\nclient = new MilvusClient({address});\n\n// 2. Create a collection in quick setup mode\nawait client.createCollection({\n collection_name: \"quick_setup\",\n dimension: 5,\n metric_type: \"IP\"\n}); \n\n// 3. Insert randomly generated vectors\nconst colors = [\"green\", \"blue\", \"yellow\", \"red\", \"black\", \"white\", \"purple\", \"pink\", \"orange\", \"brown\", \"grey\"]\ndata = []\n\nfor (let i = 0; i < 1000; i++) {\n current_color = colors[Math.floor(Math.random() * colors.length)]\n data.push({\n id: i,\n vector: [Math.random(), Math.random(), Math.random(), Math.random(), Math.random()],\n color: current_color,\n color_tag: `${current_color}_${Math.floor(Math.random() * 8999) + 1000}`\n })\n}\n\nvar res = await client.insert({\n collection_name: \"quick_setup\",\n data: data\n})\n\nconsole.log(res.insert_cnt)\n\n// Output\n// \n// 1000\n// \n\nawait client.createPartition({\n collection_name: \"quick_setup\",\n partition_name: \"red\"\n})\n\nawait client.createPartition({\n collection_name: \"quick_setup\",\n partition_name: \"blue\"\n})\n\n// 6.1 Insert data into partitions\nvar red_data = []\nvar blue_data = []\n\nfor (let i = 1000; i < 1500; i++) {\n red_data.push({\n id: i,\n vector: [Math.random(), Math.random(), Math.random(), Math.random(), Math.random()],\n color: \"red\",\n color_tag: `red_${Math.floor(Math.random() * 8999) + 1000}`\n })\n}\n\nfor (let i = 1500; i < 2000; i++) {\n blue_data.push({\n id: i,\n vector: [Math.random(), Math.random(), Math.random(), Math.random(), Math.random()],\n color: \"blue\",\n color_tag: `blue_${Math.floor(Math.random() * 8999) + 1000}`\n })\n}\n\nres = await client.insert({\n collection_name: \"quick_setup\",\n data: red_data,\n partition_name: \"red\"\n})\n\nconsole.log(res.insert_cnt)\n\n// Output\n// \n// 500\n// \n\nres = await client.insert({\n collection_name: \"quick_setup\",\n data: blue_data,\n partition_name: \"blue\"\n})\n\nconsole.log(res.insert_cnt)\n\n// Output\n// \n// 500\n// \n","# Single vector search\nres = client.search(\n collection_name=\"test_collection\", # Replace with the actual name of your collection\n # Replace with your query vector\n data=[[0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592]],\n limit=5, # Max. number of search results to return\n search_params={\"metric_type\": \"IP\", \"params\": {}} # Search parameters\n)\n\n# Convert the output to a formatted JSON string\nresult = json.dumps(res, indent=4)\nprint(result)\n","// 4. Single vector search\nList> query_vectors = Arrays.asList(Arrays.asList(0.3580376395471989f, -0.6023495712049978f, 0.18414012509913835f, -0.26286205330961354f, 0.9029438446296592f));\n\nSearchReq searchReq = SearchReq.builder()\n .collectionName(\"quick_setup\")\n .data(query_vectors)\n .topK(3) // The number of results to return\n .build();\n\nSearchResp searchResp = client.search(searchReq);\n\nSystem.out.println(JSONObject.toJSON(searchResp));\n","// 4. Single vector search\nvar query_vector = [0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592],\n\nres = await client.search({\n collection_name: \"quick_setup\",\n data: [query_vector],\n limit: 3, // The number of results to return\n})\n\nconsole.log(res.results)\n","[\n [\n {\n \"id\": 0,\n \"distance\": 1.4093276262283325,\n \"entity\": {}\n },\n {\n \"id\": 4,\n \"distance\": 0.9902134537696838,\n \"entity\": {}\n },\n {\n \"id\": 1,\n \"distance\": 0.8519943356513977,\n \"entity\": {}\n },\n {\n \"id\": 5,\n \"distance\": 0.7972343564033508,\n \"entity\": {}\n },\n {\n \"id\": 2,\n \"distance\": 0.5928734540939331,\n \"entity\": {}\n }\n ]\n]\n","{\"searchResults\": [[\n {\n \"score\": 1.263043,\n \"fields\": {\n \"vector\": [\n 0.9533119,\n 0.02538395,\n 0.76714665,\n 0.35481733,\n 0.9845762\n ],\n \"id\": 740\n }\n },\n {\n \"score\": 1.2377806,\n \"fields\": {\n \"vector\": [\n 0.7411156,\n 0.08687937,\n 0.8254139,\n 0.08370924,\n 0.99095553\n ],\n \"id\": 640\n }\n },\n {\n \"score\": 1.1869997,\n \"fields\": {\n \"vector\": [\n 0.87928146,\n 0.05324632,\n 0.6312755,\n 0.28005534,\n 0.9542448\n ],\n \"id\": 455\n }\n }\n]]}\n","[\n { score: 1.7463608980178833, id: '854' },\n { score: 1.744946002960205, id: '425' },\n { score: 1.7258622646331787, id: '718' }\n]\n","# Bulk-vector search\nres = client.search(\n collection_name=\"test_collection\", # Replace with the actual name of your collection\n data=[\n [0.19886812562848388, 0.06023560599112088, 0.6976963061752597, 0.2614474506242501, 0.838729485096104],\n [0.3172005263489739, 0.9719044792798428, -0.36981146090600725, -0.4860894583077995, 0.95791889146345]\n ], # Replace with your query vectors\n limit=2, # Max. number of search results to return\n search_params={\"metric_type\": \"IP\", \"params\": {}} # Search parameters\n)\n\nresult = json.dumps(res, indent=4)\nprint(result)\n","// 5. Batch vector search\nquery_vectors = Arrays.asList(\n Arrays.asList(0.3580376395471989f, -0.6023495712049978f, 0.18414012509913835f, -0.26286205330961354f, 0.9029438446296592f),\n Arrays.asList(0.19886812562848388f, 0.06023560599112088f, 0.6976963061752597f, 0.2614474506242501f, 0.838729485096104f)\n);\n\nsearchReq = SearchReq.builder()\n .collectionName(\"quick_setup\")\n .data(query_vectors)\n .topK(2)\n .build();\n\nsearchResp = client.search(searchReq);\n\nSystem.out.println(JSONObject.toJSON(searchResp));\n","// 5. Batch vector search\nvar query_vectors = [\n [0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592],\n [0.19886812562848388, 0.06023560599112088, 0.6976963061752597, 0.2614474506242501, 0.838729485096104]\n]\n\nres = await client.search({\n collection_name: \"quick_setup\",\n data: query_vectors,\n limit: 2,\n})\n\nconsole.log(res.results)\n","[\n [\n {\n \"id\": 1,\n \"distance\": 1.3017789125442505,\n \"entity\": {}\n },\n {\n \"id\": 7,\n \"distance\": 1.2419954538345337,\n \"entity\": {}\n }\n ], # Result set 1\n [\n {\n \"id\": 3,\n \"distance\": 2.3358664512634277,\n \"entity\": {}\n },\n {\n \"id\": 8,\n \"distance\": 0.5642921924591064,\n \"entity\": {}\n }\n ] # Result set 2\n]\n","// Two sets of vectors are returned as expected\n\n{\"searchResults\": [\n [\n {\n \"score\": 1.263043,\n \"fields\": {\n \"vector\": [\n 0.9533119,\n 0.02538395,\n 0.76714665,\n 0.35481733,\n 0.9845762\n ],\n \"id\": 740\n }\n },\n {\n \"score\": 1.2377806,\n \"fields\": {\n \"vector\": [\n 0.7411156,\n 0.08687937,\n 0.8254139,\n 0.08370924,\n 0.99095553\n ],\n \"id\": 640\n }\n }\n ],\n [\n {\n \"score\": 1.8654699,\n \"fields\": {\n \"vector\": [\n 0.4671427,\n 0.8378432,\n 0.98844475,\n 0.82763994,\n 0.9729997\n ],\n \"id\": 638\n }\n },\n {\n \"score\": 1.8581753,\n \"fields\": {\n \"vector\": [\n 0.735541,\n 0.60140246,\n 0.86730254,\n 0.93152493,\n 0.98603314\n ],\n \"id\": 855\n }\n }\n ]\n]}\n","[\n [\n { score: 2.3590476512908936, id: '854' },\n { score: 2.2896690368652344, id: '59' }\n [\n { score: 2.664059638977051, id: '59' },\n { score: 2.59483003616333, id: '854' }\n ]\n]\n","# 6.2 Search within a partition\nquery_vector = [0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592]\n\nres = client.search(\n collection_name=\"quick_setup\",\n data=[query_vector],\n limit=5,\n search_params={\"metric_type\": \"IP\", \"params\": {\"level\": 1}},\n partition_names=[\"red\"]\n)\n\nprint(res)\n","// 6.3 Search within partitions\nquery_vectors = Arrays.asList(Arrays.asList(0.3580376395471989f, -0.6023495712049978f, 0.18414012509913835f, -0.26286205330961354f, 0.9029438446296592f));\n\nsearchReq = SearchReq.builder()\n .collectionName(\"quick_setup\")\n .data(query_vectors)\n .partitionNames(Arrays.asList(\"red\"))\n .topK(5)\n .build();\n\nsearchResp = client.search(searchReq);\n\nSystem.out.println(JSONObject.toJSON(searchResp));\n","// 6.2 Search within partitions\nquery_vector = [0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592]\n\nres = await client.search({\n collection_name: \"quick_setup\",\n data: [query_vector],\n partition_names: [\"red\"],\n limit: 5,\n})\n\nconsole.log(res.results)\n","[\n [\n {\n \"id\": 16,\n \"distance\": 0.9200337529182434,\n \"entity\": {}\n },\n {\n \"id\": 14,\n \"distance\": 0.4505271911621094,\n \"entity\": {}\n },\n {\n \"id\": 15,\n \"distance\": 0.19924677908420563,\n \"entity\": {}\n },\n {\n \"id\": 17,\n \"distance\": 0.0075093843042850494,\n \"entity\": {}\n },\n {\n \"id\": 13,\n \"distance\": -0.14609718322753906,\n \"entity\": {}\n }\n ]\n]\n","{\"searchResults\": [\n [\n {\n \"score\": 1.1677284,\n \"fields\": {\n \"vector\": [\n 0.9986977,\n 0.17964739,\n 0.49086612,\n 0.23155272,\n 0.98438674\n ],\n \"id\": 1435\n }\n },\n {\n \"score\": 1.1476475,\n \"fields\": {\n \"vector\": [\n 0.6952647,\n 0.13417172,\n 0.91045254,\n 0.119336545,\n 0.9338931\n ],\n \"id\": 1291\n }\n },\n {\n \"score\": 1.0969629,\n \"fields\": {\n \"vector\": [\n 0.3363194,\n 0.028906643,\n 0.6675426,\n 0.030419827,\n 0.9735209\n ],\n \"id\": 1168\n }\n },\n {\n \"score\": 1.0741848,\n \"fields\": {\n \"vector\": [\n 0.9980543,\n 0.36063594,\n 0.66427994,\n 0.17359233,\n 0.94954175\n ],\n \"id\": 1164\n }\n },\n {\n \"score\": 1.0584627,\n \"fields\": {\n \"vector\": [\n 0.7187005,\n 0.12674773,\n 0.987718,\n 0.3110777,\n 0.86093885\n ],\n \"id\": 1085\n }\n }\n ],\n [\n {\n \"score\": 1.8030131,\n \"fields\": {\n \"vector\": [\n 0.59726167,\n 0.7054632,\n 0.9573117,\n 0.94529945,\n 0.8664103\n ],\n \"id\": 1203\n }\n },\n {\n \"score\": 1.7728865,\n \"fields\": {\n \"vector\": [\n 0.6672442,\n 0.60448086,\n 0.9325822,\n 0.80272985,\n 0.8861626\n ],\n \"id\": 1448\n }\n },\n {\n \"score\": 1.7536311,\n \"fields\": {\n \"vector\": [\n 0.59663296,\n 0.77831805,\n 0.8578314,\n 0.88818026,\n 0.9030075\n ],\n \"id\": 1010\n }\n },\n {\n \"score\": 1.7520742,\n \"fields\": {\n \"vector\": [\n 0.854198,\n 0.72294194,\n 0.9245805,\n 0.86126596,\n 0.7969224\n ],\n \"id\": 1219\n }\n },\n {\n \"score\": 1.7452049,\n \"fields\": {\n \"vector\": [\n 0.96419,\n 0.943535,\n 0.87611496,\n 0.8268136,\n 0.79786557\n ],\n \"id\": 1149\n }\n }\n ]\n]}\n","[\n { score: 3.0258803367614746, id: '1201' },\n { score: 3.004319190979004, id: '1458' },\n { score: 2.880324363708496, id: '1187' },\n { score: 2.8246407508850098, id: '1347' },\n { score: 2.797295093536377, id: '1406' }\n]\n","res = client.search(\n collection_name=\"quick_setup\",\n data=[query_vector],\n limit=5,\n search_params={\"metric_type\": \"IP\", \"params\": {\"level\": 1}},\n partition_names=[\"blue\"]\n)\n\nprint(res)\n","searchReq = SearchReq.builder()\n .collectionName(\"quick_setup\")\n .data(query_vectors)\n .partitionNames(Arrays.asList(\"blue\"))\n .topK(5)\n .build();\n\nsearchResp = client.search(searchReq);\n\nSystem.out.println(JSONObject.toJSON(searchResp));\n","res = await client.search({\n collection_name: \"quick_setup\",\n data: [query_vector],\n partition_names: [\"blue\"],\n limit: 5,\n})\n\nconsole.log(res.results)\n","[\n [\n {\n \"id\": 20,\n \"distance\": 2.363696813583374,\n \"entity\": {}\n },\n {\n \"id\": 26,\n \"distance\": 1.0665391683578491,\n \"entity\": {}\n },\n {\n \"id\": 23,\n \"distance\": 1.066049575805664,\n \"entity\": {}\n },\n {\n \"id\": 29,\n \"distance\": 0.8353596925735474,\n \"entity\": {}\n },\n {\n \"id\": 28,\n \"distance\": 0.7484277486801147,\n \"entity\": {}\n }\n ]\n]\n","{\"searchResults\": [\n [\n {\n \"score\": 1.1628494,\n \"fields\": {\n \"vector\": [\n 0.7442872,\n 0.046407282,\n 0.71031404,\n 0.3544345,\n 0.9819991\n ],\n \"id\": 1992\n }\n },\n {\n \"score\": 1.1470042,\n \"fields\": {\n \"vector\": [\n 0.5505825,\n 0.04367262,\n 0.9985836,\n 0.18922359,\n 0.93255126\n ],\n \"id\": 1977\n }\n },\n {\n \"score\": 1.1450152,\n \"fields\": {\n \"vector\": [\n 0.89994013,\n 0.052991092,\n 0.8645576,\n 0.6406729,\n 0.95679337\n ],\n \"id\": 1573\n }\n },\n {\n \"score\": 1.1439825,\n \"fields\": {\n \"vector\": [\n 0.9253267,\n 0.15890503,\n 0.7999555,\n 0.19126713,\n 0.898583\n ],\n \"id\": 1552\n }\n },\n {\n \"score\": 1.1029172,\n \"fields\": {\n \"vector\": [\n 0.95661926,\n 0.18777144,\n 0.38115507,\n 0.14323527,\n 0.93137646\n ],\n \"id\": 1823\n }\n }\n ],\n [\n {\n \"score\": 1.8005109,\n \"fields\": {\n \"vector\": [\n 0.5953582,\n 0.7794224,\n 0.9388869,\n 0.79825854,\n 0.9197286\n ],\n \"id\": 1888\n }\n },\n {\n \"score\": 1.7714822,\n \"fields\": {\n \"vector\": [\n 0.56805456,\n 0.89422905,\n 0.88187534,\n 0.914824,\n 0.8944365\n ],\n \"id\": 1648\n }\n },\n {\n \"score\": 1.7561421,\n \"fields\": {\n \"vector\": [\n 0.83421993,\n 0.39865613,\n 0.92319834,\n 0.42695504,\n 0.96633124\n ],\n \"id\": 1688\n }\n },\n {\n \"score\": 1.7553532,\n \"fields\": {\n \"vector\": [\n 0.89994013,\n 0.052991092,\n 0.8645576,\n 0.6406729,\n 0.95679337\n ],\n \"id\": 1573\n }\n },\n {\n \"score\": 1.7543385,\n \"fields\": {\n \"vector\": [\n 0.16542226,\n 0.38248396,\n 0.9888778,\n 0.80913955,\n 0.9501492\n ],\n \"id\": 1544\n }\n }\n ]\n]}\n","[\n { score: 2.8421106338500977, id: '1745' },\n { score: 2.838560104370117, id: '1782' },\n { score: 2.8134000301361084, id: '1511' },\n { score: 2.718268871307373, id: '1679' },\n { score: 2.7014894485473633, id: '1597' }\n]\n","# Search with output fields\nres = client.search(\n collection_name=\"test_collection\", # Replace with the actual name of your collection\n data=[[0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592]],\n limit=5, # Max. number of search results to return\n search_params={\"metric_type\": \"IP\", \"params\": {}}, # Search parameters\n output_fields=[\"color\"] # Output fields to return\n)\n\nresult = json.dumps(res, indent=4)\nprint(result)\n","// 7. Search with output fields\nquery_vectors = Arrays.asList(Arrays.asList(0.3580376395471989f, -0.6023495712049978f, 0.18414012509913835f, -0.26286205330961354f, 0.9029438446296592f));\n\nsearchReq = SearchReq.builder()\n .collectionName(\"quick_setup\")\n .data(query_vectors)\n .outputFields(Arrays.asList(\"color\"))\n .topK(5)\n .build();\n\nsearchResp = client.search(searchReq);\n\nSystem.out.println(JSONObject.toJSON(searchResp));\n","// 7. Search with output fields\nquery_vector = [0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592]\n\nres = await client.search({\n collection_name: \"quick_setup\",\n data: [query_vector],\n limit: 5,\n output_fields: [\"color\"],\n})\n\nconsole.log(res.results)\n","[\n [\n {\n \"id\": 0,\n \"distance\": 1.4093276262283325,\n \"entity\": {\n \"color\": \"pink_8682\"\n }\n },\n {\n \"id\": 16,\n \"distance\": 1.0159327983856201,\n \"entity\": {\n \"color\": \"yellow_1496\"\n }\n },\n {\n \"id\": 4,\n \"distance\": 0.9902134537696838,\n \"entity\": {\n \"color\": \"red_4794\"\n }\n },\n {\n \"id\": 14,\n \"distance\": 0.9803846478462219,\n \"entity\": {\n \"color\": \"green_2899\"\n }\n },\n {\n \"id\": 1,\n \"distance\": 0.8519943356513977,\n \"entity\": {\n \"color\": \"red_7025\"\n }\n }\n ]\n]\n","{\"searchResults\": [\n [\n {\n \"score\": 1.263043,\n \"fields\": {}\n },\n {\n \"score\": 1.2377806,\n \"fields\": {}\n },\n {\n \"score\": 1.1869997,\n \"fields\": {}\n },\n {\n \"score\": 1.1748955,\n \"fields\": {}\n },\n {\n \"score\": 1.1720343,\n \"fields\": {}\n }\n ]\n]}\n","\n[\n { score: 3.036271572113037, id: '59', color: 'orange' },\n { score: 3.0267879962921143, id: '1745', color: 'blue' },\n { score: 3.0069446563720703, id: '854', color: 'black' },\n { score: 2.984386682510376, id: '718', color: 'black' },\n { score: 2.916019916534424, id: '425', color: 'purple' }\n]\n","# Search with filter\nres = client.search(\n collection_name=\"test_collection\", # Replace with the actual name of your collection\n data=[[0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592]],\n limit=5, # Max. number of search results to return\n search_params={\"metric_type\": \"IP\", \"params\": {}}, # Search parameters\n output_fields=[\"color\"], # Output fields to return\n filter='color like \"red%\"'\n)\n\nresult = json.dumps(res, indent=4)\nprint(result)\n","// 8. Filtered search\nquery_vectors = Arrays.asList(Arrays.asList(0.3580376395471989f, -0.6023495712049978f, 0.18414012509913835f, -0.26286205330961354f, 0.9029438446296592f));\n\nsearchReq = SearchReq.builder()\n .collectionName(\"quick_setup\")\n .data(query_vectors)\n .outputFields(Arrays.asList(\"color_tag\"))\n .filter(\"color_tag like \\\"red%\\\"\")\n .topK(5)\n .build();\n\nsearchResp = client.search(searchReq);\n\nSystem.out.println(JSONObject.toJSON(searchResp));\n","// 8. Filtered search\n// 8.1 Filter with \"like\" operator and prefix wildcard\nquery_vector = [0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592]\n\nres = await client.search({\n collection_name: \"quick_setup\",\n data: [query_vector],\n limit: 5,\n filters: \"color_tag like \\\"red%\\\"\",\n output_fields: [\"color_tag\"]\n})\n\nconsole.log(res.results)\n","[\n [\n {\n \"id\": 4,\n \"distance\": 0.9902134537696838,\n \"entity\": {\n \"color\": \"red_4794\"\n }\n },\n {\n \"id\": 1,\n \"distance\": 0.8519943356513977,\n \"entity\": {\n \"color\": \"red_7025\"\n }\n },\n {\n \"id\": 6,\n \"distance\": -0.4113418459892273,\n \"entity\": {\n \"color\": \"red_9392\"\n }\n }\n ]\n]\n","{\"searchResults\": [\n [\n {\n \"score\": 1.1869997,\n \"fields\": {\"color_tag\": \"red_3026\"}\n },\n {\n \"score\": 1.1677284,\n \"fields\": {\"color_tag\": \"red_9030\"}\n },\n {\n \"score\": 1.1476475,\n \"fields\": {\"color_tag\": \"red_3744\"}\n },\n {\n \"score\": 1.0969629,\n \"fields\": {\"color_tag\": \"red_4168\"}\n },\n {\n \"score\": 1.0741848,\n \"fields\": {\"color_tag\": \"red_9678\"}\n }\n ]\n]}\n","[\n { score: 2.5080761909484863, id: '1201', color_tag: 'red_8904' },\n { score: 2.491129159927368, id: '425', color_tag: 'purple_8212' },\n { score: 2.4889798164367676, id: '1458', color_tag: 'red_6891' },\n { score: 2.42964243888855, id: '724', color_tag: 'black_9885' },\n { score: 2.4004223346710205, id: '854', color_tag: 'black_5990' }\n]\n","# Infix match on color field\nres = client.search(\n collection_name=\"test_collection\", # Replace with the actual name of your collection\n data=[[0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592]],\n limit=5, # Max. number of search results to return\n search_params={\"metric_type\": \"IP\", \"params\": {}}, # Search parameters\n output_fields=[\"color\"], # Output fields to return\n filter='color like \"%ll%\"' # Filter on color field, infix match on \"ll\"\n)\n\nresult = json.dumps(res, indent=4)\nprint(result)\n","// 8. Filtered search\nquery_vectors = Arrays.asList(Arrays.asList(0.3580376395471989f, -0.6023495712049978f, 0.18414012509913835f, -0.26286205330961354f, 0.9029438446296592f));\n\nsearchReq = SearchReq.builder()\n .collectionName(\"quick_setup\")\n .data(query_vectors)\n .outputFields(Arrays.asList(\"color_tag\"))\n .filter(\"color like \\\"%ll%\\\"\")\n .topK(5)\n .build();\n\nsearchResp = client.search(searchReq);\n\nSystem.out.println(JSONObject.toJSON(searchResp));\n","// 8. Filtered search\n// 8.1 Filter with \"like\" operator and prefix wildcard\nquery_vector = [0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592]\n\nres = await client.search({\n collection_name: \"quick_setup\",\n data: [query_vector],\n limit: 5,\n filters: \"color_tag like \\\"%ll%\\\"\",\n output_fields: [\"color_tag\"]\n})\n\nconsole.log(res.results)\n","[\n [\n {\n \"id\": 5,\n \"distance\": 0.7972343564033508,\n \"entity\": {\n \"color\": \"yellow_4222\"\n }\n }\n ]\n]\n","{\"searchResults\": [\n [\n {\n \"score\": 1.1869997,\n \"fields\": {\"color_tag\": \"yellow_4222\"}\n }\n ]\n]}\n","[\n { score: 2.5080761909484863, id: '1201', color_tag: 'yellow_4222' }\n]\n","# Conduct a range search\nsearch_params = {\n \"metric_type\": \"IP\",\n \"params\": {\n \"radius\": 0.8, # Radius of the search circle\n \"range_filter\": 1.0 # Range filter to filter out vectors that are not within the search circle\n }\n}\n\nres = client.search(\n collection_name=\"test_collection\", # Replace with the actual name of your collection\n data=[[0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592]],\n limit=3, # Max. number of search results to return\n search_params=search_params, # Search parameters\n output_fields=[\"color\"], # Output fields to return\n)\n\nresult = json.dumps(res, indent=4)\nprint(result)\n","// 9. Range search\nquery_vectors = Arrays.asList(Arrays.asList(0.3580376395471989f, -0.6023495712049978f, 0.18414012509913835f, -0.26286205330961354f, 0.9029438446296592f));\n\nsearchReq = SearchReq.builder()\n .collectionName(\"quick_setup\")\n .data(query_vectors)\n .outputFields(Arrays.asList(\"color_tag\"))\n .searchParams(Map.of(\"radius\", 0.1, \"range\", 1.0))\n .topK(5)\n .build();\n\nsearchResp = client.search(searchReq);\n\nSystem.out.println(JSONObject.toJSON(searchResp));\n","// 9. Range search\nquery_vector = [0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592]\n\nres = await client.search({\n collection_name: \"quick_setup\",\n data: [query_vector],\n limit: 5,\n params: {\n radius: 0.1,\n range: 1.0\n },\n output_fields: [\"color_tag\"]\n})\n\nconsole.log(res.results)\n","[\n [\n {\n \"id\": 4,\n \"distance\": 0.9902134537696838,\n \"entity\": {\n \"color\": \"red_4794\"\n }\n },\n {\n \"id\": 14,\n \"distance\": 0.9803846478462219,\n \"entity\": {\n \"color\": \"green_2899\"\n }\n },\n {\n \"id\": 1,\n \"distance\": 0.8519943356513977,\n \"entity\": {\n \"color\": \"red_7025\"\n }\n }\n ]\n]\n","{\"searchResults\": [\n [\n {\n \"score\": 1.263043,\n \"fields\": {\"color_tag\": \"green_2052\"}\n },\n {\n \"score\": 1.2377806,\n \"fields\": {\"color_tag\": \"purple_3709\"}\n },\n {\n \"score\": 1.1869997,\n \"fields\": {\"color_tag\": \"red_3026\"}\n },\n {\n \"score\": 1.1748955,\n \"fields\": {\"color_tag\": \"black_1646\"}\n },\n {\n \"score\": 1.1720343,\n \"fields\": {\"color_tag\": \"green_4853\"}\n }\n ]\n]}\n","[\n { score: 2.3387961387634277, id: '718', color_tag: 'black_7154' },\n { score: 2.3352415561676025, id: '1745', color_tag: 'blue_8741' },\n { score: 2.290485382080078, id: '1408', color_tag: 'red_2324' },\n { score: 2.285870313644409, id: '854', color_tag: 'black_5990' },\n { score: 2.2593345642089844, id: '1309', color_tag: 'red_8458' }\n]\n","# Connect to Milvus\nclient = MilvusClient(uri='http://localhost:19530') # Milvus server address\n\n# Load data into collection\nclient.load_collection(\"group_search\") # Collection name\n\n# Group search results\nres = client.search(\n collection_name=\"group_search\", # Collection name\n data=[[0.14529211512077012, 0.9147257273453546, 0.7965055218724449, 0.7009258593102812, 0.5605206522382088]], # Query vector\n search_params={\n \"metric_type\": \"L2\",\n \"params\": {\"nprobe\": 10},\n }, # Search parameters\n limit=5, # Max. number of groups to return\n group_by_field=\"doc_id\", # Group results by document ID\n group_size=2, # returned at most 2 passages per document, the default value is 1\n group_strict_size=True, # ensure every group contains exactly 3 passages\n output_fields=[\"doc_id\", \"passage_id\"]\n)\n\n# Retrieve the values in the `doc_id` column\ndoc_ids = [result['entity']['doc_id'] for result in res[0]]\npassage_ids = [result['entity']['passage_id'] for result in res[0]]\n\nprint(doc_ids)\nprint(passage_ids)\n","[\"doc_11\", \"doc_11\", \"doc_7\", \"doc_7\", \"doc_3\", \"doc_3\", \"doc_2\", \"doc_2\", \"doc_8\", \"doc_8\"]\n[5, 10, 11, 10, 9, 6, 5, 4, 9, 2]\n","# Connect to Milvus\nclient = MilvusClient(uri='http://localhost:19530') # Milvus server address\n\n# Load data into collection\nclient.load_collection(\"group_search\") # Collection name\n\n# Search without `group_by_field`\nres = client.search(\n collection_name=\"group_search\", # Collection name\n data=query_passage_vector, # Replace with your query vector\n search_params={\n \"metric_type\": \"L2\",\n \"params\": {\"nprobe\": 10},\n }, # Search parameters\n limit=5, # Max. number of search results to return\n # group_by_field=\"doc_id\", # Group results by document ID\n # group_size=2, \n # group_strict_size=True,\n output_fields=[\"doc_id\", \"passage_id\"]\n)\n\n# Retrieve the values in the `doc_id` column\ndoc_ids = [result['entity']['doc_id'] for result in res[0]]\npassage_ids = [result['entity']['passage_id'] for result in res[0]]\n\nprint(doc_ids)\nprint(passage_ids)\n","[\"doc_11\", \"doc_11\", \"doc_11\", \"doc_11\", \"doc_11\"]\n[1, 10, 3, 12, 9]\n","# In normal cases, you do not need to set search parameters manually\n# Except for range searches.\nsearch_parameters = {\n 'metric_type': 'L2',\n 'params': {\n 'nprobe': 10,\n 'level': 1,\n 'radius': 1.0\n 'range_filter': 0.8\n }\n}\n"],"headingContent":"Single-Vector Search","anchorList":[{"label":"단일 벡터 검색","href":"Single-Vector-Search","type":1,"isActive":false},{"label":"개요","href":"Overview","type":2,"isActive":false},{"label":"준비 사항","href":"Preparations","type":2,"isActive":false},{"label":"기본 검색","href":"Basic-search","type":2,"isActive":false},{"label":"필터 검색","href":"Filtered-search","type":2,"isActive":false},{"label":"범위 검색","href":"Range-search","type":2,"isActive":false},{"label":"그룹 검색","href":"Grouping-search","type":2,"isActive":false},{"label":"검색 매개변수","href":"Search-parameters","type":2,"isActive":false}]} \ No newline at end of file diff --git a/localization/v2.4.x/site/ko/userGuide/search-query-get/single-vector-search.md b/localization/v2.4.x/site/ko/userGuide/search-query-get/single-vector-search.md index 412f2748e..9b169ecf5 100644 --- a/localization/v2.4.x/site/ko/userGuide/search-query-get/single-vector-search.md +++ b/localization/v2.4.x/site/ko/userGuide/search-query-get/single-vector-search.md @@ -46,7 +46,7 @@ title: 단일 벡터 검색
  • 기본 검색: 단일 벡터 검색, 일괄 벡터 검색, 파티션 검색, 지정된 출력 필드를 사용한 검색이 포함됩니다.

  • 필터링된 검색: 스칼라 필드를 기반으로 필터링 기준을 적용하여 검색 결과를 구체화합니다.

  • 범위 검색: 쿼리 벡터로부터 특정 거리 범위 내의 벡터를 찾습니다.

  • -
  • 그룹화 검색: 특정 필드를 기준으로 검색 결과를 그룹화하여 결과의 다양성을 보장합니다.

  • +
  • 그룹 검색: 특정 필드를 기준으로 검색 결과를 그룹화하여 결과의 다양성을 보장합니다.

  • 준비 사항

    Milvus에서 특정 필드별로 그룹 검색을 하면 결과에서 동일한 필드 항목의 중복을 피할 수 있습니다. 특정 필드에 대한 다양한 결과 집합을 얻을 수 있습니다.

    -

    각 문서가 여러 구절로 나뉘어져 있는 문서 모음을 생각해 보세요. 각 구절은 하나의 벡터 임베딩으로 표현되며 하나의 문서에 속합니다. 유사한 구절 대신 관련성 있는 문서를 찾으려면 search() 옵션에 group_by_field 인수를 포함하여 문서 ID별로 결과를 그룹화할 수 있습니다. 이렇게 하면 같은 문서에서 분리된 구절이 아닌 가장 관련성이 높고 고유한 문서를 반환하는 데 도움이 됩니다.

    +

    Milvus에서 그룹화 검색은 검색 결과의 포괄성과 정확성을 향상시키기 위해 고안되었습니다.

    +

    많은 문서가 여러 구절로 나뉘어 있고 각 구절이 하나의 벡터 임베딩으로 표현되는 RAG의 시나리오를 생각해 보겠습니다. 사용자는 가장 관련성이 높은 구절을 찾아서 LLM에게 정확하게 안내하기를 원합니다. 일반적인 밀버스 검색 기능은 이러한 요구 사항을 충족할 수 있지만, 대부분의 구절이 소수의 문서에서만 나오며 검색 결과의 포괄성이 매우 떨어지기 때문에 매우 왜곡되고 편향된 결과를 초래할 수 있습니다. 이는 LLM이 제공하는 결과의 정확성 또는 정확성을 심각하게 저해하고 LLM 사용자의 경험에 부정적인 영향을 미칠 수 있습니다.

    +

    그룹 검색은 이 문제를 효과적으로 해결할 수 있습니다. Milvus 사용자는 그룹별 필드와 그룹 크기를 전달함으로써 검색 결과를 여러 그룹으로 버킷화하고 각 그룹의 엔티티 수가 특정 그룹 크기를 초과하지 않도록 할 수 있습니다. 이 기능은 검색 결과의 포괄성과 공정성을 크게 향상시켜 LLM 출력의 품질을 눈에 띄게 개선할 수 있습니다.

    다음은 필드별로 검색 결과를 그룹화하는 예제 코드입니다:

    # Connect to Milvus
     client = MilvusClient(uri='http://localhost:19530') # Milvus server address
    @@ -1732,21 +1733,26 @@ res = client.search(
         "metric_type": "L2",
         "params": {"nprobe": 10},
         }, # Search parameters
    -    limit=10, # Max. number of search results to return
    +    limit=5, # Max. number of groups to return
         group_by_field="doc_id", # Group results by document ID
    +    group_size=2, # returned at most 2 passages per document, the default value is 1
    +    group_strict_size=True, # ensure every group contains exactly 3 passages
         output_fields=["doc_id", "passage_id"]
     )
     
     # Retrieve the values in the `doc_id` column
     doc_ids = [result['entity']['doc_id'] for result in res[0]]
    +passage_ids = [result['entity']['passage_id'] for result in res[0]]
     
     print(doc_ids)
    +print(passage_ids)
     

    출력은 다음과 비슷합니다:

    -
    [5, 10, 1, 7, 9, 6, 3, 4, 8, 2]
    +
    ["doc_11", "doc_11", "doc_7", "doc_7", "doc_3", "doc_3", "doc_2", "doc_2", "doc_8", "doc_8"]
    +[5, 10, 11, 10, 9, 6, 5, 4, 9, 2]
     
    -

    주어진 출력에서 반환된 엔티티에 중복된 doc_id 값이 포함되어 있지 않음을 확인할 수 있습니다.

    -

    비교를 위해 group_by_field 을 주석 처리하고 일반 검색을 수행해 보겠습니다:

    +

    주어진 출력에서 각 문서에 대해 정확히 두 개의 구절이 검색되고 총 5개의 문서가 결과를 구성하는 것을 볼 수 있습니다.

    +

    비교를 위해 그룹 관련 매개변수를 주석 처리하고 일반 검색을 수행해 보겠습니다:

    # Connect to Milvus
     client = MilvusClient(uri='http://localhost:19530') # Milvus server address
     
    @@ -1761,27 +1767,33 @@ res = client.search(
         "metric_type": "L2",
         "params": {"nprobe": 10},
         }, # Search parameters
    -    limit=10, # Max. number of search results to return
    +    limit=5, # Max. number of search results to return
         # group_by_field="doc_id", # Group results by document ID
    +    # group_size=2, 
    +    # group_strict_size=True,
         output_fields=["doc_id", "passage_id"]
     )
     
     # Retrieve the values in the `doc_id` column
     doc_ids = [result['entity']['doc_id'] for result in res[0]]
    +passage_ids = [result['entity']['passage_id'] for result in res[0]]
     
     print(doc_ids)
    +print(passage_ids)
     

    출력은 다음과 비슷합니다:

    -
    [1, 10, 3, 10, 1, 9, 4, 4, 8, 6]
    +
    ["doc_11", "doc_11", "doc_11", "doc_11", "doc_11"]
    +[1, 10, 3, 12, 9]
     
    -

    주어진 출력에서 반환된 엔티티에 중복된 doc_id 값이 포함되어 있음을 확인할 수 있습니다.

    +

    주어진 출력에서 'doc_11'이 검색 결과를 완전히 지배하여 다른 문서의 고품질 단락을 가리는 것을 볼 수 있으며, 이는 LLM에 좋지 않은 프롬프트가 될 수 있습니다.

    +

    한 가지 더 주의할 점은 기본적으로 grouping_search는 그룹이 충분하면 즉시 결과를 반환하므로 각 그룹의 결과 수가 group_size를 충족하기에 충분하지 않을 수 있다는 점입니다. 각 그룹의 결과 수를 신경쓰는 경우 위 코드에 표시된 것처럼 group_strict_size=True로 설정하세요. 이렇게 하면 밀버스가 성능에 약간의 손해를 보더라도 각 그룹에 대해 충분한 결과를 얻기 위해 노력합니다.

    제한 사항

    • 인덱싱: 이 그룹화 기능은 HNSW, IVF_FLAT 또는 FLAT 유형으로 인덱싱된 컬렉션에서만 작동합니다. 자세한 내용은 인메모리 인덱스를 참조하세요.

    • 벡터: 현재 그룹화 검색은 BINARY_VECTOR 유형의 벡터 필드를 지원하지 않습니다. 데이터 유형에 대한 자세한 내용은 지원되는 데이터 유형을 참조하세요.

    • 필드: 현재 그룹 검색에서는 단일 열만 허용됩니다. group_by_field 구성에서는 여러 필드 이름을 지정할 수 없습니다. 또한 그룹화 검색은 JSON, FLOAT, DOUBLE, ARRAY 또는 벡터 필드의 데이터 유형과 호환되지 않습니다.

    • 성능 영향: 쿼리 벡터 수가 증가하면 성능이 저하된다는 점에 유의하세요. CPU 코어 2개와 8GB 메모리가 있는 클러스터를 예로 들면, 그룹화 검색의 실행 시간은 입력 쿼리 벡터의 수에 비례하여 증가합니다.

    • -
    • 기능: 현재 그룹화 검색은 범위 검색, 검색 반복자 또는 하이브리드 검색에서 지원되지 않습니다.

    • +
    • 기능: 현재 그룹화 검색은 범위 검색, 검색 반복자에서 지원되지 않습니다.

    검색 매개변수

    Milvus는 대량의 엔티티로 결과를 반복하기 위한 검색 및 쿼리 반복기를 제공합니다. Milvus는 TopK를 16384로 제한하므로 사용자는 반복기를 사용하여 일괄 모드에서 컬렉션의 많은 수 또는 전체 엔티티를 반환할 수 있습니다.

    +

    Milvus는 대량의 엔티티를 반복할 수 있는 검색 및 쿼리 반복기를 제공합니다. Milvus는 TopK를 16384로 제한하므로 사용자는 반복기를 사용하여 일괄 모드에서 컬렉션의 많은 수 또는 전체 엔티티를 반환할 수 있습니다.

    개요

    반복기는 기본 키 값과 부울 표현식을 사용하여 컬렉션 내의 대량의 데이터 또는 모든 데이터를 반복할 수 있는 강력한 도구입니다. 이를 통해 데이터를 검색하는 방식을 크게 개선할 수 있습니다. 시간이 지나면서 효율성이 떨어질 수 있는 기존의 오프셋제한 매개변수 사용과 달리, 반복기는 보다 확장 가능한 솔루션을 제공합니다.

    +

    반복기는 기본 키 값이나 필터 표현식을 지정하여 전체 컬렉션을 스캔하거나 대량의 엔티티를 반복하는 데 효율적인 도구입니다. 오프셋제한 매개변수가 있는 검색 또는 쿼리 호출에 비해 반복기를 사용하는 것이 더 효율적이고 확장성이 뛰어납니다.

    반복기 사용의 이점

      -
    • 단순성: 복잡한 오프셋제한 설정이 필요 없습니다.

    • +
    • 단순성: 복잡한 오프셋제한 설정이 필요하지 않습니다.

    • 효율성: 필요한 데이터만 가져와서 확장 가능한 데이터 검색을 제공합니다.

    • 일관성: 부울 필터로 일관된 데이터 세트 크기를 보장합니다.

    @@ -62,12 +62,12 @@ title: 이터레이터 사용 d="M4 9h1v1H4c-1.5 0-3-1.69-3-3.5S2.55 3 4 3h4c1.45 0 3 1.69 3 3.5 0 1.41-.91 2.72-2 3.25V8.59c.58-.45 1-1.27 1-2.09C10 5.22 8.98 4 8 4H4c-.98 0-2 1.22-2 2.5S3 9 4 9zm9-3h-1v1h1c1 0 2 1.22 2 2.5S13.98 12 13 12H9c-.98 0-2-1.22-2-2.5 0-.83.42-1.64 1-2.09V6.25c-1.09.53-2 1.84-2 3.25C6 11.31 7.55 13 9 13h4c1.45 0 3-1.69 3-3.5S14.5 6 13 6z" > -

    다음 단계에서는 Milvus에 연결하고, 컬렉션을 빠르게 설정하고, 무작위로 생성된 10,000개 이상의 엔티티를 컬렉션에 삽입하기 위해 코드의 용도를 변경합니다.

    +

    다음 준비 단계에서는 Milvus에 연결하여 무작위로 생성된 엔티티를 컬렉션에 삽입합니다.

    1단계: 컬렉션 만들기

    를 사용하여 MilvusClient 을 사용하여 Milvus 서버에 연결하고 create_collection() 를 사용하여 컬렉션을 만듭니다.

    -

    사용 MilvusClientV2 을 사용하여 Milvus 서버에 연결하고 createCollection() 를 사용하여 컬렉션을 만듭니다.

    +

    를 사용하여 MilvusClientV2 을 사용하여 Milvus 서버에 연결하고 createCollection() 를 사용하여 컬렉션을 만듭니다.

    @@ -264,8 +264,9 @@ iterator = collection.search_iterator( batch_size=10, param=search_params, output_fields=["color_tag"], - limit=3 + limit=300 ) +# search 300 entities totally with 10 entities per page results = [] diff --git a/localization/v2.4.x/site/ko/userGuide/tools/cli_commands.json b/localization/v2.4.x/site/ko/userGuide/tools/cli_commands.json index 8c199fc4d..c8324540b 100644 --- a/localization/v2.4.x/site/ko/userGuide/tools/cli_commands.json +++ b/localization/v2.4.x/site/ko/userGuide/tools/cli_commands.json @@ -1 +1 @@ -{"codeList":["clear\n","connect [-uri (text)] [-t (text)]\n","milvus_cli > connect -uri http://127.0.0.1:19530 \n","create database -db (text) \n","milvus_cli > create database -db testdb\n","use database -db (text) \n","milvus_cli > use database -db testdb\n","list databases \n","milvus_cli > list databases\n","delete database -db (text) \n","milvus_cli > delete database -db testdb\n","create user -u (text) -p (text)\n","milvus_cli > create user -u zilliz -p zilliz\n","create alias -c (text) -a (text) [-A] \n","milvus_cli > create alias -c car -a carAlias1\n","milvus_cli > create alias -c car2 -A -a carAlias1\n","create collection -c (text) -f (text) -p (text) [-a] [-d (text)]\n","## For array field: --schema-field support :::(:if Varchar)\n\nmilvus_cli > create collection -c car -f id:INT64:primary_field -f vector:FLOAT_VECTOR:128 -f color:INT64:color -f brand:ARRAY:64:VARCHAR:128 -p id -A -d 'car_collection'\n","create partition -c (text) -p (text) [-d (text)]\n","milvus_cli > create partition -c car -p new_partition -d test_add_partition\n","create index\n","milvus_cli > create index\n\nCollection name (car, car2): car2\n\nThe name of the field to create an index for (vector): vector\n\nIndex name: vectorIndex\n\n# Default is ''\nIndex type FLAT, IVF_FLAT, IVF_SQ8, IVF_PQ, RNSG, HNSW, ANNOY, AUTOINDEX, DISKANN, GPU_IVF_FLAT, GPU_IVF_PQ, SPARSE_INVERTED_INDEX, SPARSE_WAND, SCANN, STL_SORT, Trie, INVERTED, ) []: IVF_FLAT \n\n# Default is ''\nIndex metric type (L2, IP, HAMMING, TANIMOTO, COSINE, ) []: \n\nTimeout []:\n","delete user -u (text)\n","milvus_cli > delete user -u zilliz\n","delete alias -a (text) \n","delete collection -c (text) \n","milvus_cli > delete collection -c car\n","delete entities -c (text) -p (text) \n","milvus_cli > delete entities -c car\n\nThe expression to specify entities to be deleted, such as \"film_id in [ 0, 1 ]\": film_id in [ 0, 1 ]\n\nYou are trying to delete the entities of collection. This action cannot be undone!\n\nDo you want to continue? [y/N]: y\n","delete partition -c (text) -p (text)\n","milvus_cli > delete partition -c car -p new_partition\n","delete index -c (text) -in (text)\n","milvus_cli > delete index -c car -in indexName\n","show collection -c (text)\n","milvus_cli > show collection -c test_collection_insert\n","show partition -c (text) -p (text)\n","milvus_cli > show partition -c test_collection_insert -p _default\n","show index -c (text) -in (text)\n","milvus_cli > show index -c test_collection -in index_name\n","exit\n","help \n","import -c (text)[-p (text)] \n","milvus_cli > import -c car 'examples/import_csv/vectors.csv'\n\nReading csv file... [####################################] 100%\n\nColumn names are ['vector', 'color', 'brand']\n\nProcessed 50001 lines.\n\nInserting ...\n\nInsert successfully.\n-------------------------- ------------------\nTotal insert entities: 50000\nTotal collection entities: 150000\nMilvus timestamp: 428849214449254403\n-------------------------- ------------------\n","milvus_cli > import -c car 'https://raw.githubusercontent.com/milvus-\nio/milvus_cli/main/examples/import_csv/vectors.csv'\n\nReading file from remote URL.\n\nReading csv file... [####################################] 100%\n\nColumn names are ['vector', 'color', 'brand']\n\nProcessed 50001 lines.\n\nInserting ...\n\nInsert successfully.\n\n-------------------------- ------------------\nTotal insert entities: 50000\nTotal collection entities: 150000\nMilvus timestamp: 428849214449254403\n-------------------------- ------------------\n","list users\n","list collections\n","list indexes -c (text)\n","list partitions -c (text)\n","load -c (text) [-p (text)]\n","query\n","milvus_cli > query\n\nCollection name: car\n\nThe query expression: id in [ 428960801420883491, 428960801420883492,\n428960801420883493 ]\n\nName of partitions that contain entities(split by \",\" if multiple) []:\ndefault\n\nA list of fields to return(split by \",\" if multiple) []: color, brand\n\ntimeout []:\n\nGuarantee timestamp. This instructs Milvus to see all operations performed before a provided timestamp. If no such timestamp is provided, then Milvus will search all operations performed to date. [0]:\nGraceful time. Only used in bounded consistency level. If graceful_time is set, PyMilvus will use current timestamp minus the graceful_time as the guarantee_timestamp. This option is 5s by default if not set. [5]:\n","milvus_cli > query\n\nCollection name: car\n\nThe query expression: id > 428960801420883491\n\nName of partitions that contain entities(split by \",\" if multiple) []:\ndefault\n\nA list of fields to return(split by \",\" if multiple) []: id, color,\nbrand\n\ntimeout []:\n\nGuarantee timestamp. This instructs Milvus to see all operations performed before a provided timestamp. If no such timestamp is provided, then Milvus will search all operations performed to date. [0]:\nGraceful time. Only used in bounded consistency level. If graceful_time is set, PyMilvus will use current timestamp minus the graceful_time as the guarantee_timestamp. This option is 5s by default if not set. [5]:\n","release -c (text) [-p (text)]\n","search\n","milvus_cli > search\n\nCollection name (car, test_collection): car\n\nThe vectors of search data(the length of data is number of query (nq), the dim of every vector in data must be equal to vector field’s of collection. You can also import a csv file\nout headers): examples/import_csv/search_vectors.csv\n\nThe vector field used to search of collection (vector): vector\n\nSearch parameter nprobe's value: 10\n\nThe max number of returned record, also known as topk: 2\n\nThe boolean expression used to filter attribute []: id > 0\n\nThe names of partitions to search (split by \",\" if multiple) ['_default'] []: _default\n\ntimeout []:\n\nGuarantee Timestamp(It instructs Milvus to see all operations performed before a provided timestamp. If no such timestamp is provided, then Milvus will search all operations performed to date) [0]:\n\n","milvus_cli > search\n\nCollection name (car, test_collection): car\n\nThe vectors of search data(the length of data is number of query (nq), the dim of every vector in data must be equal to vector field’s of collection. You can also import a csv file without headers):\n [[0.71, 0.76, 0.17, 0.13, 0.42, 0.07, 0.15, 0.67, 0.58, 0.02, 0.39, 0.47, 0.58, 0.88, 0.73, 0.31, 0.23, 0.57, 0.33, 0.2, 0.03, 0.43, 0.78, 0.49, 0.17, 0.56, 0.76, 0.54, 0.45, 0.46, 0.05, 0.1, 0.43, 0.63, 0.29, 0.44, 0.65, 0.01, 0.35, 0.46, 0.66, 0.7, 0.88, 0.07, 0.49, 0.92, 0.57, 0.5, 0.16, 0.77, 0.98, 0.1, 0.44, 0.88, 0.82, 0.16, 0.67, 0.63, 0.57, 0.55, 0.95, 0.13, 0.64, 0.43, 0.71, 0.81, 0.43, 0.65, 0.76, 0.7, 0.05, 0.24, 0.03, 0.9, 0.46, 0.28, 0.92, 0.25, 0.97, 0.79, 0.73, 0.97, 0.49, 0.28, 0.64, 0.19, 0.23, 0.51, 0.09, 0.1, 0.53, 0.03, 0.23, 0.94, 0.87, 0.14, 0.42, 0.82, 0.91, 0.11, 0.91, 0.37, 0.26, 0.6, 0.89, 0.6, 0.32, 0.11, 0.98, 0.67, 0.12, 0.66, 0.47, 0.02, 0.15, 0.6, 0.64, 0.57, 0.14, 0.81, 0.75, 0.11, 0.49, 0.78, 0.16, 0.63, 0.57, 0.18]]\n\nThe vector field used to search of collection (vector): vector\n\nSearch parameter nprobe's value: 10\n\nThe specified number of decimal places of returned distance [-1]: 5\n\nThe max number of returned record, also known as topk: 2\n\nThe boolean expression used to filter attribute []: id > 0\n\nThe names of partitions to search (split by \",\" if multiple) ['_default'] []: _default\n\ntimeout []:\n\nGuarantee Timestamp(It instructs Milvus to see all operations performed before a provided timestamp. If no such timestamp is provided, then Milvus will search all operations performed to date) [0]:\n\n","milvus_cli > search\n\nCollection name (car, car2): car\n\nThe vectors of search data(the length of data is number of query (nq), the dim of every vector in data must be equal to vector field’s of collection. You can also import a csv file without headers): examples/import_csv/search_vectors.csv\n\nThe vector field used to search of collection (vector): vector\n\nThe specified number of decimal places of returned distance [-1]: 5\n\nThe max number of returned record, also known as topk: 2\n\nThe boolean expression used to filter attribute []:\n\nThe names of partitions to search (split by \",\" if multiple) ['_default'] []:\n\ntimeout []:\n\nGuarantee Timestamp(It instructs Milvus to see all operations performed before a provided timestamp. If no such timestamp is provided, then Milvus will search all operations performed to date) [0]:\n\n","list connections \n","show index_progress -c (text) [-i (text)]\n","show loading_progress -c (text) [-p (text)]\n","version\n","$ milvus_cli --version\nMilvus_CLI v0.4.0\n"],"headingContent":"","anchorList":[{"label":"Milvus_CLI 명령 참조","href":"MilvusCLI-Command-Reference","type":1,"isActive":false},{"label":"clear","href":"clear","type":2,"isActive":false},{"label":"connect","href":"connect","type":2,"isActive":false},{"label":"데이터베이스 생성","href":"create-Database","type":2,"isActive":false},{"label":"데이터베이스 사용","href":"use-Database","type":2,"isActive":false},{"label":"데이터베이스 나열","href":"List-Databases","type":2,"isActive":false},{"label":"데이터베이스 삭제","href":"Delete-Database","type":2,"isActive":false},{"label":"사용자 만들기","href":"create-user","type":2,"isActive":false},{"label":"별칭 만들기","href":"create-alias","type":2,"isActive":false},{"label":"컬렉션 만들기","href":"create-collection","type":2,"isActive":false},{"label":"create partition","href":"create-partition","type":2,"isActive":false},{"label":"create index","href":"create-index","type":2,"isActive":false},{"label":"delete user","href":"delete-user","type":2,"isActive":false},{"label":"삭제 별칭","href":"delete-alias","type":2,"isActive":false},{"label":"컬렉션 삭제","href":"delete-collection","type":2,"isActive":false},{"label":"delete entities","href":"delete-entities","type":2,"isActive":false},{"label":"삭제 파티션","href":"delete-partition","type":2,"isActive":false},{"label":"삭제 인덱스","href":"delete-index","type":2,"isActive":false},{"label":"show collection","href":"show-collection","type":2,"isActive":false},{"label":"show partition","href":"show-partition","type":2,"isActive":false},{"label":"show index","href":"show-index","type":2,"isActive":false},{"label":"exit","href":"exit","type":2,"isActive":false},{"label":"help","href":"help","type":2,"isActive":false},{"label":"가져오기","href":"import","type":2,"isActive":false},{"label":"사용자 목록","href":"list-users","type":2,"isActive":false},{"label":"컬렉션 목록","href":"list-collections","type":2,"isActive":false},{"label":"인덱스 목록","href":"list-indexes","type":2,"isActive":false},{"label":"파티션 목록","href":"list-partitions","type":2,"isActive":false},{"label":"load","href":"load","type":2,"isActive":false},{"label":"쿼리","href":"query","type":2,"isActive":false},{"label":"release","href":"release","type":2,"isActive":false},{"label":"검색","href":"search","type":2,"isActive":false},{"label":"목록 연결","href":"List-connection","type":2,"isActive":false},{"label":"show index_progress","href":"show-indexprogress","type":2,"isActive":false},{"label":"show loading_progress","href":"show-loadingprogress","type":2,"isActive":false},{"label":"version","href":"version","type":2,"isActive":false}]} \ No newline at end of file +{"codeList":["clear\n","connect [-uri (text)] [-t (text)]\n","milvus_cli > connect -uri http://127.0.0.1:19530 \n","create database -db (text) \n","milvus_cli > create database -db testdb\n","use database -db (text) \n","milvus_cli > use database -db testdb\n","list databases \n","milvus_cli > list databases\n","delete database -db (text) \n","milvus_cli > delete database -db testdb\n","create user -u (text) -p (text)\n","milvus_cli > create user -u zilliz -p zilliz\n","create alias -c (text) -a (text) [-A] \n","milvus_cli > create alias -c car -a carAlias1\n","milvus_cli > create alias -c car2 -A -a carAlias1\n","create collection -c (text) -f (text) -p (text) [-a] [-d (text)]\n","## For array field: --schema-field support :::(:if Varchar)\n\nmilvus_cli > create collection -c car -f id:INT64:primary_field -f vector:FLOAT_VECTOR:128 -f color:INT64:color -f brand:ARRAY:64:VARCHAR:128 -p id -A -d 'car_collection'\n","create partition -c (text) -p (text) [-d (text)]\n","milvus_cli > create partition -c car -p new_partition -d test_add_partition\n","create index\n","milvus_cli > create index\n\nCollection name (car, car2): car2\n\nThe name of the field to create an index for (vector): vector\n\nIndex name: vectorIndex\n\n# Default is ''\nIndex type FLAT, IVF_FLAT, IVF_SQ8, IVF_PQ, RNSG, HNSW, ANNOY, AUTOINDEX, DISKANN, GPU_IVF_FLAT, GPU_IVF_PQ, SPARSE_INVERTED_INDEX, SPARSE_WAND, SCANN, STL_SORT, Trie, INVERTED, ) []: IVF_FLAT \n\n# Default is ''\nIndex metric type (L2, IP, HAMMING, TANIMOTO, COSINE, ) []: \n\nTimeout []:\n","delete user -u (text)\n","milvus_cli > delete user -u zilliz\n","delete alias -a (text) \n","delete collection -c (text) \n","milvus_cli > delete collection -c car\n","delete entities -c (text) -p (text) \n","milvus_cli > delete entities -c car\n\nThe expression to specify entities to be deleted, such as \"film_id in [ 0, 1 ]\": film_id in [ 0, 1 ]\n\nYou are trying to delete the entities of collection. This action cannot be undone!\n\nDo you want to continue? [y/N]: y\n","delete partition -c (text) -p (text)\n","milvus_cli > delete partition -c car -p new_partition\n","delete index -c (text) -in (text)\n","milvus_cli > delete index -c car -in indexName\n","show collection -c (text)\n","milvus_cli > show collection -c test_collection_insert\n","show partition -c (text) -p (text)\n","milvus_cli > show partition -c test_collection_insert -p _default\n","show index -c (text) -in (text)\n","milvus_cli > show index -c test_collection -in index_name\n","exit\n","help \n","import -c (text)[-p (text)] \n","milvus_cli > import -c car 'examples/import_csv/vectors.csv'\n\nReading csv file... [####################################] 100%\n\nColumn names are ['vector', 'color', 'brand']\n\nProcessed 50001 lines.\n\nInserting ...\n\nInsert successfully.\n-------------------------- ------------------\nTotal insert entities: 50000\nTotal collection entities: 150000\nMilvus timestamp: 428849214449254403\n-------------------------- ------------------\n","milvus_cli > import -c car 'https://raw.githubusercontent.com/milvus-\nio/milvus_cli/main/examples/import_csv/vectors.csv'\n\nReading file from remote URL.\n\nReading csv file... [####################################] 100%\n\nColumn names are ['vector', 'color', 'brand']\n\nProcessed 50001 lines.\n\nInserting ...\n\nInsert successfully.\n\n-------------------------- ------------------\nTotal insert entities: 50000\nTotal collection entities: 150000\nMilvus timestamp: 428849214449254403\n-------------------------- ------------------\n","list users\n","list collections\n","list indexes -c (text)\n","list partitions -c (text)\n","load -c (text) [-p (text)]\n","query\n","milvus_cli > query\n\nCollection name: car\n\nThe query expression: id in [ 428960801420883491, 428960801420883492,\n428960801420883493 ]\n\nName of partitions that contain entities(split by \",\" if multiple) []:\ndefault\n\nA list of fields to return(split by \",\" if multiple) []: color, brand\n\ntimeout []:\n\nGuarantee timestamp. This instructs Milvus to see all operations performed before a provided timestamp. If no such timestamp is provided, then Milvus will search all operations performed to date. [0]:\nGraceful time. Only used in bounded consistency level. If graceful_time is set, PyMilvus will use current timestamp minus the graceful_time as the guarantee_timestamp. This option is 5s by default if not set. [5]:\n","milvus_cli > query\n\nCollection name: car\n\nThe query expression: id > 428960801420883491\n\nName of partitions that contain entities(split by \",\" if multiple) []:\ndefault\n\nA list of fields to return(split by \",\" if multiple) []: id, color,\nbrand\n\ntimeout []:\n\nGuarantee timestamp. This instructs Milvus to see all operations performed before a provided timestamp. If no such timestamp is provided, then Milvus will search all operations performed to date. [0]:\nGraceful time. Only used in bounded consistency level. If graceful_time is set, PyMilvus will use current timestamp minus the graceful_time as the guarantee_timestamp. This option is 5s by default if not set. [5]:\n","release -c (text) [-p (text)]\n","search\n","milvus_cli > search\n\nCollection name (car, test_collection): car\n\nThe vectors of search data(the length of data is number of query (nq), the dim of every vector in data must be equal to vector field’s of collection. You can also import a csv file\nout headers): examples/import_csv/search_vectors.csv\n\nThe vector field used to search of collection (vector): vector\n\nSearch parameter nprobe's value: 10\n\nThe max number of returned record, also known as topk: 2\n\nThe boolean expression used to filter attribute []: id > 0\n\nThe names of partitions to search (split by \",\" if multiple) ['_default'] []: _default\n\ntimeout []:\n\nGuarantee Timestamp(It instructs Milvus to see all operations performed before a provided timestamp. If no such timestamp is provided, then Milvus will search all operations performed to date) [0]:\n\n","milvus_cli > search\n\nCollection name (car, test_collection): car\n\nThe vectors of search data(the length of data is number of query (nq), the dim of every vector in data must be equal to vector field’s of collection. You can also import a csv file without headers):\n [[0.71, 0.76, 0.17, 0.13, 0.42, 0.07, 0.15, 0.67, 0.58, 0.02, 0.39, 0.47, 0.58, 0.88, 0.73, 0.31, 0.23, 0.57, 0.33, 0.2, 0.03, 0.43, 0.78, 0.49, 0.17, 0.56, 0.76, 0.54, 0.45, 0.46, 0.05, 0.1, 0.43, 0.63, 0.29, 0.44, 0.65, 0.01, 0.35, 0.46, 0.66, 0.7, 0.88, 0.07, 0.49, 0.92, 0.57, 0.5, 0.16, 0.77, 0.98, 0.1, 0.44, 0.88, 0.82, 0.16, 0.67, 0.63, 0.57, 0.55, 0.95, 0.13, 0.64, 0.43, 0.71, 0.81, 0.43, 0.65, 0.76, 0.7, 0.05, 0.24, 0.03, 0.9, 0.46, 0.28, 0.92, 0.25, 0.97, 0.79, 0.73, 0.97, 0.49, 0.28, 0.64, 0.19, 0.23, 0.51, 0.09, 0.1, 0.53, 0.03, 0.23, 0.94, 0.87, 0.14, 0.42, 0.82, 0.91, 0.11, 0.91, 0.37, 0.26, 0.6, 0.89, 0.6, 0.32, 0.11, 0.98, 0.67, 0.12, 0.66, 0.47, 0.02, 0.15, 0.6, 0.64, 0.57, 0.14, 0.81, 0.75, 0.11, 0.49, 0.78, 0.16, 0.63, 0.57, 0.18]]\n\nThe vector field used to search of collection (vector): vector\n\nSearch parameter nprobe's value: 10\n\nThe specified number of decimal places of returned distance [-1]: 5\n\nThe max number of returned record, also known as topk: 2\n\nThe boolean expression used to filter attribute []: id > 0\n\nThe names of partitions to search (split by \",\" if multiple) ['_default'] []: _default\n\ntimeout []:\n\nGuarantee Timestamp(It instructs Milvus to see all operations performed before a provided timestamp. If no such timestamp is provided, then Milvus will search all operations performed to date) [0]:\n\n","milvus_cli > search\n\nCollection name (car, car2): car\n\nThe vectors of search data(the length of data is number of query (nq), the dim of every vector in data must be equal to vector field’s of collection. You can also import a csv file without headers): examples/import_csv/search_vectors.csv\n\nThe vector field used to search of collection (vector): vector\n\nThe specified number of decimal places of returned distance [-1]: 5\n\nThe max number of returned record, also known as topk: 2\n\nThe boolean expression used to filter attribute []:\n\nThe names of partitions to search (split by \",\" if multiple) ['_default'] []:\n\ntimeout []:\n\nGuarantee Timestamp(It instructs Milvus to see all operations performed before a provided timestamp. If no such timestamp is provided, then Milvus will search all operations performed to date) [0]:\n\n","list connections \n","show index_progress -c (text) [-i (text)]\n","show loading_progress -c (text) [-p (text)]\n","version\n","$ milvus_cli --version\nMilvus_CLI v0.4.0\n"],"headingContent":"Milvus_CLI Command Reference","anchorList":[{"label":"Milvus_CLI 명령 참조","href":"MilvusCLI-Command-Reference","type":1,"isActive":false},{"label":"clear","href":"clear","type":2,"isActive":false},{"label":"connect","href":"connect","type":2,"isActive":false},{"label":"데이터베이스 생성","href":"create-Database","type":2,"isActive":false},{"label":"데이터베이스 사용","href":"use-Database","type":2,"isActive":false},{"label":"데이터베이스 목록","href":"list-Databases","type":2,"isActive":false},{"label":"데이터베이스 삭제","href":"delete-Database","type":2,"isActive":false},{"label":"사용자 만들기","href":"create-user","type":2,"isActive":false},{"label":"별칭 만들기","href":"create-alias","type":2,"isActive":false},{"label":"컬렉션 만들기","href":"create-collection","type":2,"isActive":false},{"label":"create partition","href":"create-partition","type":2,"isActive":false},{"label":"create index","href":"create-index","type":2,"isActive":false},{"label":"delete user","href":"delete-user","type":2,"isActive":false},{"label":"삭제 별칭","href":"delete-alias","type":2,"isActive":false},{"label":"컬렉션 삭제","href":"delete-collection","type":2,"isActive":false},{"label":"delete entities","href":"delete-entities","type":2,"isActive":false},{"label":"삭제 파티션","href":"delete-partition","type":2,"isActive":false},{"label":"삭제 인덱스","href":"delete-index","type":2,"isActive":false},{"label":"show collection","href":"show-collection","type":2,"isActive":false},{"label":"show partition","href":"show-partition","type":2,"isActive":false},{"label":"show index","href":"show-index","type":2,"isActive":false},{"label":"exit","href":"exit","type":2,"isActive":false},{"label":"help","href":"help","type":2,"isActive":false},{"label":"가져오기","href":"import","type":2,"isActive":false},{"label":"사용자 목록","href":"list-users","type":2,"isActive":false},{"label":"컬렉션 목록","href":"list-collections","type":2,"isActive":false},{"label":"인덱스 목록","href":"list-indexes","type":2,"isActive":false},{"label":"파티션 목록","href":"list-partitions","type":2,"isActive":false},{"label":"load","href":"load","type":2,"isActive":false},{"label":"쿼리","href":"query","type":2,"isActive":false},{"label":"release","href":"release","type":2,"isActive":false},{"label":"검색","href":"search","type":2,"isActive":false},{"label":"목록 연결","href":"list-connection","type":2,"isActive":false},{"label":"show index_progress","href":"show-indexprogress","type":2,"isActive":false},{"label":"show loading_progress","href":"show-loadingprogress","type":2,"isActive":false},{"label":"version","href":"version","type":2,"isActive":false}]} \ No newline at end of file diff --git a/localization/v2.4.x/site/ko/userGuide/tools/cli_commands.md b/localization/v2.4.x/site/ko/userGuide/tools/cli_commands.md index 1c3691338..507a69d47 100644 --- a/localization/v2.4.x/site/ko/userGuide/tools/cli_commands.md +++ b/localization/v2.4.x/site/ko/userGuide/tools/cli_commands.md @@ -143,7 +143,7 @@ title: Milvus_CLI 명령 참조

    예제

    예제 1

    다음 예에서는 밀버스에서 testdb 데이터베이스를 사용합니다.

    milvus_cli > use database -db testdb
     
    -

    데이터베이스 나열

    -

    예제

    예제 1

    다음 예제는 밀버스에서 데이터베이스를 나열합니다.

    +

    예제

    예제 1

    다음 예제는 밀버스로 데이터베이스를 나열합니다.

    milvus_cli > list databases
     
    -

    데이터베이스 삭제

    -

    색인되지 않은 컬렉션에서 검색을 수행하고 필요한 입력을 묻는 메시지를 표시하려면 다음과 같이 하세요:

    +

    인덱싱되지 않은 컬렉션에서 검색을 수행하고 필요한 입력을 묻는 메시지를 표시하려면 다음과 같이 하세요:

    milvus_cli > search
     
     Collection name (car, car2): car
    @@ -1165,7 +1165,7 @@ timeout []:
     Guarantee Timestamp(It instructs Milvus to see all operations performed before a provided timestamp. If no such timestamp is provided, then Milvus will search all operations performed to date) [0]:
     
     
    -

    목록 연결

    엔티티 로딩 진행률을 표시합니다.

    +

    컬렉션 로딩 진행률을 표시합니다.

    구문

    show loading_progress -c (text) [-p (text)]
     
    diff --git a/localization/v2.4.x/site/ko/userGuide/tools/milvus_backup_overview.json b/localization/v2.4.x/site/ko/userGuide/tools/milvus_backup_overview.json index c9b78600c..5fdf5ed3f 100644 --- a/localization/v2.4.x/site/ko/userGuide/tools/milvus_backup_overview.json +++ b/localization/v2.4.x/site/ko/userGuide/tools/milvus_backup_overview.json @@ -1 +1 @@ -{"codeList":[],"headingContent":"","anchorList":[{"label":"Milvus 백업","href":"Milvus-Backup","type":1,"isActive":false},{"label":"전제 조건","href":"Prerequisites","type":2,"isActive":false},{"label":"아키텍처","href":"Architecture","type":2,"isActive":false},{"label":"최신 릴리스","href":"Latest-release","type":2,"isActive":false}]} \ No newline at end of file +{"codeList":[],"headingContent":"Milvus Backup","anchorList":[{"label":"Milvus 백업","href":"Milvus-Backup","type":1,"isActive":false},{"label":"전제 조건","href":"Prerequisites","type":2,"isActive":false},{"label":"아키텍처","href":"Architecture","type":2,"isActive":false},{"label":"최신 릴리스","href":"Latest-release","type":2,"isActive":false}]} \ No newline at end of file diff --git a/localization/v2.4.x/site/ko/userGuide/tools/milvus_backup_overview.md b/localization/v2.4.x/site/ko/userGuide/tools/milvus_backup_overview.md index a756f8036..5cafab0c6 100644 --- a/localization/v2.4.x/site/ko/userGuide/tools/milvus_backup_overview.md +++ b/localization/v2.4.x/site/ko/userGuide/tools/milvus_backup_overview.md @@ -77,5 +77,5 @@ title: Milvus 백업 > diff --git a/localization/v2.4.x/site/ko/userGuide/use-partition-key.json b/localization/v2.4.x/site/ko/userGuide/use-partition-key.json index e7630a3f7..bc249f258 100644 --- a/localization/v2.4.x/site/ko/userGuide/use-partition-key.json +++ b/localization/v2.4.x/site/ko/userGuide/use-partition-key.json @@ -1 +1 @@ -{"codeList":["import random, time\nfrom pymilvus import connections, MilvusClient, DataType\n\nSERVER_ADDR = \"http://localhost:19530\"\n\n# 1. Set up a Milvus client\nclient = MilvusClient(\n uri=SERVER_ADDR\n)\n\n# 2. Create a collection\nschema = MilvusClient.create_schema(\n auto_id=False,\n enable_dynamic_field=True,\n partition_key_field=\"color\",\n num_partitions=16 # Number of partitions. Defaults to 16.\n)\n\nschema.add_field(field_name=\"id\", datatype=DataType.INT64, is_primary=True)\nschema.add_field(field_name=\"vector\", datatype=DataType.FLOAT_VECTOR, dim=5)\nschema.add_field(field_name=\"color\", datatype=DataType.VARCHAR, max_length=512)\n","import io.milvus.v2.client.ConnectConfig;\nimport io.milvus.v2.client.MilvusClientV2;\nimport io.milvus.v2.common.DataType;\nimport io.milvus.v2.common.IndexParam;\nimport io.milvus.v2.service.collection.request.AddFieldReq;\nimport io.milvus.v2.service.collection.request.CreateCollectionReq;\n\nString CLUSTER_ENDPOINT = \"http://localhost:19530\";\n\n// 1. Connect to Milvus server\nConnectConfig connectConfig = ConnectConfig.builder()\n .uri(CLUSTER_ENDPOINT)\n .build();\n\nMilvusClientV2 client = new MilvusClientV2(connectConfig);\n\n// 2. Create a collection in customized setup mode\n\n// 2.1 Create schema\nCreateCollectionReq.CollectionSchema schema = client.createSchema();\n\n// 2.2 Add fields to schema\nschema.addField(AddFieldReq.builder()\n .fieldName(\"id\")\n .dataType(DataType.Int64)\n .isPrimaryKey(true)\n .autoID(false)\n .build());\n\nschema.addField(AddFieldReq.builder()\n .fieldName(\"vector\")\n .dataType(DataType.FloatVector)\n .dimension(5)\n .build());\n \nschema.addField(AddFieldReq.builder()\n .fieldName(\"color\")\n .dataType(DataType.VarChar)\n .maxLength(512)\n .isPartitionKey(true)\n .build());\n","const { MilvusClient, DataType, sleep } = require(\"@zilliz/milvus2-sdk-node\")\n\nconst address = \"http://localhost:19530\"\n\nasync function main() {\n// 1. Set up a Milvus Client\nclient = new MilvusClient({address}); \n\n// 2. Create a collection\n// 2.1 Define fields\nconst fields = [\n {\n name: \"id\",\n data_type: DataType.Int64,\n is_primary_key: true,\n auto_id: false\n },\n {\n name: \"vector\",\n data_type: DataType.FloatVector,\n dim: 5\n },\n {\n name: \"color\",\n data_type: DataType.VarChar,\n max_length: 512,\n is_partition_key: true\n }\n]\n","index_params = MilvusClient.prepare_index_params()\n\nindex_params.add_index(\n field_name=\"id\",\n index_type=\"STL_SORT\"\n)\n\nindex_params.add_index(\n field_name=\"color\",\n index_type=\"Trie\"\n)\n\nindex_params.add_index(\n field_name=\"vector\",\n index_type=\"IVF_FLAT\",\n metric_type=\"L2\",\n params={\"nlist\": 1024}\n)\n","// 2.3 Prepare index parameters\nIndexParam indexParamForVectorField = IndexParam.builder()\n .fieldName(\"vector\")\n .indexType(IndexParam.IndexType.IVF_FLAT)\n .metricType(IndexParam.MetricType.IP)\n .extraParams(Map.of(\"nlist\", 1024))\n .build();\n\nList indexParams = new ArrayList<>();\nindexParams.add(indexParamForVectorField);\n","// 2.2 Prepare index parameters\nconst index_params = [{\n field_name: \"color\",\n index_type: \"Trie\"\n},{\n field_name: \"id\",\n index_type: \"STL_SORT\"\n},{\n field_name: \"vector\",\n index_type: \"IVF_FLAT\",\n metric_type: \"IP\",\n params: { nlist: 1024}\n}]\n","client.create_collection(\n collection_name=\"test_collection\",\n schema=schema,\n index_params=index_params\n)\n","// 2.4 Create a collection with schema and index parameters\nCreateCollectionReq customizedSetupReq = CreateCollectionReq.builder()\n .collectionName(\"test_collection\")\n .collectionSchema(schema)\n .indexParams(indexParams) \n .build();\n\nclient.createCollection(customizedSetupReq);\n","// 2.3 Create a collection with fields and index parameters\nres = await client.createCollection({\n collection_name: \"test_collection\",\n fields: fields, \n index_params: index_params,\n})\n\nconsole.log(res.error_code)\n\n// Output\n// \n// Success\n//\n","# 3. Insert randomly generated vectors \ncolors = [\"green\", \"blue\", \"yellow\", \"red\", \"black\", \"white\", \"purple\", \"pink\", \"orange\", \"brown\", \"grey\"]\ndata = []\n\nfor i in range(1000):\n current_color = random.choice(colors)\n current_tag = random.randint(1000, 9999)\n data.append({\n \"id\": i,\n \"vector\": [ random.uniform(-1, 1) for _ in range(5) ],\n \"color\": current_color,\n \"tag\": current_tag,\n \"color_tag\": f\"{current_color}_{str(current_tag)}\"\n })\n\nprint(data[0])\n","// 3. Insert randomly generated vectors\nList colors = Arrays.asList(\"green\", \"blue\", \"yellow\", \"red\", \"black\", \"white\", \"purple\", \"pink\", \"orange\", \"brown\", \"grey\");\nList data = new ArrayList<>();\n\nfor (int i=0; i<1000; i++) {\n Random rand = new Random();\n String current_color = colors.get(rand.nextInt(colors.size()-1));\n int current_tag = rand.nextInt(8999) + 1000;\n JSONObject row = new JSONObject();\n row.put(\"id\", Long.valueOf(i));\n row.put(\"vector\", Arrays.asList(rand.nextFloat(), rand.nextFloat(), rand.nextFloat(), rand.nextFloat(), rand.nextFloat()));\n row.put(\"color\", current_color);\n row.put(\"tag\", current_tag);\n row.put(\"color_tag\", current_color + \"_\" + String.valueOf(rand.nextInt(8999) + 1000));\n data.add(row);\n}\n\nSystem.out.println(JSONObject.toJSON(data.get(0))); \n","// 3. Insert randomly generated vectors \nconst colors = [\"green\", \"blue\", \"yellow\", \"red\", \"black\", \"white\", \"purple\", \"pink\", \"orange\", \"brown\", \"grey\"]\nvar data = []\n\nfor (let i = 0; i < 1000; i++) {\n const current_color = colors[Math.floor(Math.random() * colors.length)]\n const current_tag = Math.floor(Math.random() * 8999 + 1000)\n data.push({\n id: i,\n vector: [Math.random(), Math.random(), Math.random(), Math.random(), Math.random()],\n color: current_color,\n tag: current_tag,\n color_tag: `${current_color}_${current_tag}`\n })\n}\n\nconsole.log(data[0])\n","{\n id: 0,\n vector: [\n 0.1275656405044483,\n 0.47417858592773277,\n 0.13858264437643286,\n 0.2390904907020377,\n 0.8447862593689635\n ],\n color: 'blue',\n tag: 2064,\n color_tag: 'blue_2064'\n}\n","res = client.insert(\n collection_name=\"test_collection\",\n data=data\n)\n\nprint(res)\n\n# Output\n#\n# {\n# \"insert_count\": 1000,\n# \"ids\": [\n# 0,\n# 1,\n# 2,\n# 3,\n# 4,\n# 5,\n# 6,\n# 7,\n# 8,\n# 9,\n# \"(990 more items hidden)\"\n# ]\n# }\n","// 3.1 Insert data into the collection\nInsertReq insertReq = InsertReq.builder()\n .collectionName(\"test_collection\")\n .data(data)\n .build();\n\nInsertResp insertResp = client.insert(insertReq);\n\nSystem.out.println(JSONObject.toJSON(insertResp));\n\n// Output:\n// {\"insertCnt\": 1000}\n","res = await client.insert({\n collection_name: \"test_collection\",\n data: data,\n})\n\nconsole.log(res.insert_cnt)\n\n// Output\n// \n// 1000\n// \n","# 4. Search with partition key\nquery_vectors = [[0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592]]\n\nres = client.search(\n collection_name=\"test_collection\",\n data=query_vectors,\n filter=\"color == 'green'\",\n search_params={\"metric_type\": \"L2\", \"params\": {\"nprobe\": 10}},\n output_fields=[\"id\", \"color_tag\"],\n limit=3\n)\n\nprint(res)\n\n# Output\n#\n# [\n# [\n# {\n# \"id\": 970,\n# \"distance\": 0.5770174264907837,\n# \"entity\": {\n# \"id\": 970,\n# \"color_tag\": \"green_9828\"\n# }\n# },\n# {\n# \"id\": 115,\n# \"distance\": 0.6898155808448792,\n# \"entity\": {\n# \"id\": 115,\n# \"color_tag\": \"green_4073\"\n# }\n# },\n# {\n# \"id\": 899,\n# \"distance\": 0.7028976678848267,\n# \"entity\": {\n# \"id\": 899,\n# \"color_tag\": \"green_9897\"\n# }\n# }\n# ]\n# ]\n","// 4. Search with partition key\nList> query_vectors = Arrays.asList(Arrays.asList(0.3580376395471989f, -0.6023495712049978f, 0.18414012509913835f, -0.26286205330961354f, 0.9029438446296592f));\n\nSearchReq searchReq = SearchReq.builder()\n .collectionName(\"test_collection\")\n .data(query_vectors)\n .filter(\"color == \\\"green\\\"\")\n .topK(3)\n .build();\n\nSearchResp searchResp = client.search(searchReq);\n\nSystem.out.println(JSONObject.toJSON(searchResp)); \n\n// Output:\n// {\"searchResults\": [[\n// {\n// \"distance\": 1.0586997,\n// \"id\": 414,\n// \"entity\": {}\n// },\n// {\n// \"distance\": 0.981384,\n// \"id\": 293,\n// \"entity\": {}\n// },\n// {\n// \"distance\": 0.9548756,\n// \"id\": 325,\n// \"entity\": {}\n// }\n// ]]}\n","// 4. Search with partition key\nconst query_vectors = [0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592]\n\nres = await client.search({\n collection_name: \"test_collection\",\n data: query_vectors,\n filter: \"color == 'green'\",\n output_fields: [\"color_tag\"],\n limit: 3\n})\n\nconsole.log(res.results)\n\n// Output\n// \n// [\n// { score: 2.402090549468994, id: '135', color_tag: 'green_2694' },\n// { score: 2.3938629627227783, id: '326', color_tag: 'green_7104' },\n// { score: 2.3235254287719727, id: '801', color_tag: 'green_3162' }\n// ]\n// \n"],"headingContent":"","anchorList":[{"label":"Use Partition Key","href":"Use-Partition-Key","type":1,"isActive":false},{"label":"Overview","href":"Overview","type":2,"isActive":false},{"label":"Enable partition key","href":"Enable-partition-key","type":2,"isActive":false},{"label":"List partitions","href":"List-partitions","type":2,"isActive":false},{"label":"Insert data","href":"Insert-data","type":2,"isActive":false},{"label":"Use partition key","href":"Use-partition-key","type":2,"isActive":false},{"label":"Typical use cases","href":"Typical-use-cases","type":2,"isActive":false}]} \ No newline at end of file +{"codeList":["import random, time\nfrom pymilvus import connections, MilvusClient, DataType\n\nSERVER_ADDR = \"http://localhost:19530\"\n\n# 1. Set up a Milvus client\nclient = MilvusClient(\n uri=SERVER_ADDR\n)\n\n# 2. Create a collection\nschema = MilvusClient.create_schema(\n auto_id=False,\n enable_dynamic_field=True,\n partition_key_field=\"color\",\n num_partitions=64 # Number of partitions. Defaults to 64.\n)\n\nschema.add_field(field_name=\"id\", datatype=DataType.INT64, is_primary=True)\nschema.add_field(field_name=\"vector\", datatype=DataType.FLOAT_VECTOR, dim=5)\nschema.add_field(field_name=\"color\", datatype=DataType.VARCHAR, max_length=512)\n","import io.milvus.v2.client.ConnectConfig;\nimport io.milvus.v2.client.MilvusClientV2;\nimport io.milvus.v2.common.DataType;\nimport io.milvus.v2.common.IndexParam;\nimport io.milvus.v2.service.collection.request.AddFieldReq;\nimport io.milvus.v2.service.collection.request.CreateCollectionReq;\n\nString CLUSTER_ENDPOINT = \"http://localhost:19530\";\n\n// 1. Connect to Milvus server\nConnectConfig connectConfig = ConnectConfig.builder()\n .uri(CLUSTER_ENDPOINT)\n .build();\n\nMilvusClientV2 client = new MilvusClientV2(connectConfig);\n\n// 2. Create a collection in customized setup mode\n\n// 2.1 Create schema\nCreateCollectionReq.CollectionSchema schema = client.createSchema();\n\n// 2.2 Add fields to schema\nschema.addField(AddFieldReq.builder()\n .fieldName(\"id\")\n .dataType(DataType.Int64)\n .isPrimaryKey(true)\n .autoID(false)\n .build());\n\nschema.addField(AddFieldReq.builder()\n .fieldName(\"vector\")\n .dataType(DataType.FloatVector)\n .dimension(5)\n .build());\n \nschema.addField(AddFieldReq.builder()\n .fieldName(\"color\")\n .dataType(DataType.VarChar)\n .maxLength(512)\n .isPartitionKey(true)\n .build());\n","const { MilvusClient, DataType, sleep } = require(\"@zilliz/milvus2-sdk-node\")\n\nconst address = \"http://localhost:19530\"\n\nasync function main() {\n// 1. Set up a Milvus Client\nclient = new MilvusClient({address}); \n\n// 2. Create a collection\n// 2.1 Define fields\nconst fields = [\n {\n name: \"id\",\n data_type: DataType.Int64,\n is_primary_key: true,\n auto_id: false\n },\n {\n name: \"vector\",\n data_type: DataType.FloatVector,\n dim: 5\n },\n {\n name: \"color\",\n data_type: DataType.VarChar,\n max_length: 512,\n is_partition_key: true\n }\n]\n","index_params = MilvusClient.prepare_index_params()\n\nindex_params.add_index(\n field_name=\"id\",\n index_type=\"STL_SORT\"\n)\n\nindex_params.add_index(\n field_name=\"color\",\n index_type=\"Trie\"\n)\n\nindex_params.add_index(\n field_name=\"vector\",\n index_type=\"IVF_FLAT\",\n metric_type=\"L2\",\n params={\"nlist\": 1024}\n)\n","// 2.3 Prepare index parameters\nIndexParam indexParamForVectorField = IndexParam.builder()\n .fieldName(\"vector\")\n .indexType(IndexParam.IndexType.IVF_FLAT)\n .metricType(IndexParam.MetricType.IP)\n .extraParams(Map.of(\"nlist\", 1024))\n .build();\n\nList indexParams = new ArrayList<>();\nindexParams.add(indexParamForVectorField);\n","// 2.2 Prepare index parameters\nconst index_params = [{\n field_name: \"color\",\n index_type: \"Trie\"\n},{\n field_name: \"id\",\n index_type: \"STL_SORT\"\n},{\n field_name: \"vector\",\n index_type: \"IVF_FLAT\",\n metric_type: \"IP\",\n params: { nlist: 1024}\n}]\n","client.create_collection(\n collection_name=\"test_collection\",\n schema=schema,\n index_params=index_params\n)\n","// 2.4 Create a collection with schema and index parameters\nCreateCollectionReq customizedSetupReq = CreateCollectionReq.builder()\n .collectionName(\"test_collection\")\n .collectionSchema(schema)\n .indexParams(indexParams) \n .build();\n\nclient.createCollection(customizedSetupReq);\n","// 2.3 Create a collection with fields and index parameters\nres = await client.createCollection({\n collection_name: \"test_collection\",\n fields: fields, \n index_params: index_params,\n})\n\nconsole.log(res.error_code)\n\n// Output\n// \n// Success\n//\n","# 3. Insert randomly generated vectors \ncolors = [\"green\", \"blue\", \"yellow\", \"red\", \"black\", \"white\", \"purple\", \"pink\", \"orange\", \"brown\", \"grey\"]\ndata = []\n\nfor i in range(1000):\n current_color = random.choice(colors)\n current_tag = random.randint(1000, 9999)\n data.append({\n \"id\": i,\n \"vector\": [ random.uniform(-1, 1) for _ in range(5) ],\n \"color\": current_color,\n \"tag\": current_tag,\n \"color_tag\": f\"{current_color}_{str(current_tag)}\"\n })\n\nprint(data[0])\n","// 3. Insert randomly generated vectors\nList colors = Arrays.asList(\"green\", \"blue\", \"yellow\", \"red\", \"black\", \"white\", \"purple\", \"pink\", \"orange\", \"brown\", \"grey\");\nList data = new ArrayList<>();\n\nfor (int i=0; i<1000; i++) {\n Random rand = new Random();\n String current_color = colors.get(rand.nextInt(colors.size()-1));\n int current_tag = rand.nextInt(8999) + 1000;\n JSONObject row = new JSONObject();\n row.put(\"id\", Long.valueOf(i));\n row.put(\"vector\", Arrays.asList(rand.nextFloat(), rand.nextFloat(), rand.nextFloat(), rand.nextFloat(), rand.nextFloat()));\n row.put(\"color\", current_color);\n row.put(\"tag\", current_tag);\n row.put(\"color_tag\", current_color + \"_\" + String.valueOf(rand.nextInt(8999) + 1000));\n data.add(row);\n}\n\nSystem.out.println(JSONObject.toJSON(data.get(0))); \n","// 3. Insert randomly generated vectors \nconst colors = [\"green\", \"blue\", \"yellow\", \"red\", \"black\", \"white\", \"purple\", \"pink\", \"orange\", \"brown\", \"grey\"]\nvar data = []\n\nfor (let i = 0; i < 1000; i++) {\n const current_color = colors[Math.floor(Math.random() * colors.length)]\n const current_tag = Math.floor(Math.random() * 8999 + 1000)\n data.push({\n id: i,\n vector: [Math.random(), Math.random(), Math.random(), Math.random(), Math.random()],\n color: current_color,\n tag: current_tag,\n color_tag: `${current_color}_${current_tag}`\n })\n}\n\nconsole.log(data[0])\n","{\n id: 0,\n vector: [\n 0.1275656405044483,\n 0.47417858592773277,\n 0.13858264437643286,\n 0.2390904907020377,\n 0.8447862593689635\n ],\n color: 'blue',\n tag: 2064,\n color_tag: 'blue_2064'\n}\n","res = client.insert(\n collection_name=\"test_collection\",\n data=data\n)\n\nprint(res)\n\n# Output\n#\n# {\n# \"insert_count\": 1000,\n# \"ids\": [\n# 0,\n# 1,\n# 2,\n# 3,\n# 4,\n# 5,\n# 6,\n# 7,\n# 8,\n# 9,\n# \"(990 more items hidden)\"\n# ]\n# }\n","// 3.1 Insert data into the collection\nInsertReq insertReq = InsertReq.builder()\n .collectionName(\"test_collection\")\n .data(data)\n .build();\n\nInsertResp insertResp = client.insert(insertReq);\n\nSystem.out.println(JSONObject.toJSON(insertResp));\n\n// Output:\n// {\"insertCnt\": 1000}\n","res = await client.insert({\n collection_name: \"test_collection\",\n data: data,\n})\n\nconsole.log(res.insert_cnt)\n\n// Output\n// \n// 1000\n// \n","# 4. Search with partition key\nquery_vectors = [[0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592]]\n\nres = client.search(\n collection_name=\"test_collection\",\n data=query_vectors,\n filter=\"color == 'green'\",\n search_params={\"metric_type\": \"L2\", \"params\": {\"nprobe\": 10}},\n output_fields=[\"id\", \"color_tag\"],\n limit=3\n)\n\nprint(res)\n\n# Output\n#\n# [\n# [\n# {\n# \"id\": 970,\n# \"distance\": 0.5770174264907837,\n# \"entity\": {\n# \"id\": 970,\n# \"color_tag\": \"green_9828\"\n# }\n# },\n# {\n# \"id\": 115,\n# \"distance\": 0.6898155808448792,\n# \"entity\": {\n# \"id\": 115,\n# \"color_tag\": \"green_4073\"\n# }\n# },\n# {\n# \"id\": 899,\n# \"distance\": 0.7028976678848267,\n# \"entity\": {\n# \"id\": 899,\n# \"color_tag\": \"green_9897\"\n# }\n# }\n# ]\n# ]\n","// 4. Search with partition key\nList> query_vectors = Arrays.asList(Arrays.asList(0.3580376395471989f, -0.6023495712049978f, 0.18414012509913835f, -0.26286205330961354f, 0.9029438446296592f));\n\nSearchReq searchReq = SearchReq.builder()\n .collectionName(\"test_collection\")\n .data(query_vectors)\n .filter(\"color == \\\"green\\\"\")\n .topK(3)\n .build();\n\nSearchResp searchResp = client.search(searchReq);\n\nSystem.out.println(JSONObject.toJSON(searchResp)); \n\n// Output:\n// {\"searchResults\": [[\n// {\n// \"distance\": 1.0586997,\n// \"id\": 414,\n// \"entity\": {}\n// },\n// {\n// \"distance\": 0.981384,\n// \"id\": 293,\n// \"entity\": {}\n// },\n// {\n// \"distance\": 0.9548756,\n// \"id\": 325,\n// \"entity\": {}\n// }\n// ]]}\n","// 4. Search with partition key\nconst query_vectors = [0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592]\n\nres = await client.search({\n collection_name: \"test_collection\",\n data: query_vectors,\n filter: \"color == 'green'\",\n output_fields: [\"color_tag\"],\n limit: 3\n})\n\nconsole.log(res.results)\n\n// Output\n// \n// [\n// { score: 2.402090549468994, id: '135', color_tag: 'green_2694' },\n// { score: 2.3938629627227783, id: '326', color_tag: 'green_7104' },\n// { score: 2.3235254287719727, id: '801', color_tag: 'green_3162' }\n// ]\n// \n"],"headingContent":"Use Partition Key","anchorList":[{"label":"파티션 키 사용","href":"Use-Partition-Key","type":1,"isActive":false},{"label":"개요","href":"Overview","type":2,"isActive":false},{"label":"파티션 키 사용","href":"Enable-partition-key","type":2,"isActive":false},{"label":"목록 파티션","href":"List-partitions","type":2,"isActive":false},{"label":"데이터 삽입","href":"Insert-data","type":2,"isActive":false},{"label":"파티션 키 사용","href":"Use-partition-key","type":2,"isActive":false},{"label":"일반적인 사용 사례","href":"Typical-use-cases","type":2,"isActive":false}]} \ No newline at end of file diff --git a/localization/v2.4.x/site/ko/userGuide/use-partition-key.md b/localization/v2.4.x/site/ko/userGuide/use-partition-key.md index 990c59b03..16494a645 100644 --- a/localization/v2.4.x/site/ko/userGuide/use-partition-key.md +++ b/localization/v2.4.x/site/ko/userGuide/use-partition-key.md @@ -1,9 +1,8 @@ --- id: use-partition-key.md -title: Use Partition Key -summary: '' +title: 파티션 키 사용 --- -

    Use Partition Key

    This guide walks you through using the partition key to accelerate data retrieval from your collection.

    -

    Overview

    이 가이드에서는 컬렉션에서 데이터 검색 속도를 높이기 위해 파티션 키를 사용하는 방법을 안내합니다.

    +

    개요

    You can set a particular field in a collection as the partition key so that Milvus distributes incoming entities into different partitions according to their respective partition values in this field. This allows entities with the same key value to be grouped in a partition, accelerating search performance by avoiding the need to scan irrelevant partitions when filtering by the key field. When compared to traditional filtering methods, the partition key can greatly enhance query performance.

    -

    You can use the partition key to implement multi-tenancy. For details on multi-tenancy, read Multi-tenancy for more.

    -

    Enable partition key

    컬렉션의 특정 필드를 파티션 키로 설정하면 Milvus가 이 필드의 각 파티션 값에 따라 들어오는 엔티티를 다른 파티션으로 분배할 수 있습니다. 이렇게 하면 키 값이 동일한 엔티티를 파티션에 그룹화할 수 있으므로 키 필드로 필터링할 때 관련 없는 파티션을 스캔할 필요가 없어 검색 성능이 빨라집니다. 기존 필터링 방법과 비교했을 때, 파티션 키는 쿼리 성능을 크게 향상시킬 수 있습니다.

    +

    파티션 키를 사용하여 멀티테넌시를 구현할 수 있습니다. 멀티 테넌시에 대한 자세한 내용은 멀티 테넌시에서 자세히 알아보세요.

    +

    파티션 키 사용

    To set a field as the partition key, specify partition_key_field when creating a collection schema.

    -

    In the example code below, num_partitions determines the number of partitions that will be created. By default, it is set to 16. We recommend you retain the default value.

    +

    필드를 파티션 키로 설정하려면 컬렉션 스키마를 만들 때 partition_key_field 을 지정합니다.

    +

    아래 예제 코드에서 num_partitions 은 생성될 파티션의 수를 결정합니다. 기본적으로 64 으로 설정되어 있습니다. 기본값을 그대로 유지하는 것이 좋습니다.

    -

    For more information on parameters, refer to MilvusClient, create_schema(), and add_field() in the SDK reference.

    +

    매개 변수에 대한 자세한 내용은 MilvusClient, create_schema()add_field() 를 참조하세요.

    -

    For more information on parameters, refer to MilvusClientV2, createSchema(), and addField() in the SDK reference.

    +

    파라미터에 대한 자세한 내용은 MilvusClientV2, createSchema()addField() 를 참조하세요.

    -

    For more information on parameters, refer to MilvusClient and createCollection() in the SDK reference.

    +

    파라미터에 대한 자세한 내용은 MilvusClientcreateCollection() 를 참조하세요.

    + 파이썬 자바 Node.js
    import random, time
     from pymilvus import connections, MilvusClient, DataType
     
    @@ -82,7 +78,7 @@ schema = MilvusClient.create_schema(
         auto_id=False,
         enable_dynamic_field=True,
         partition_key_field="color",
    -    num_partitions=16 # Number of partitions. Defaults to 16.
    +    num_partitions=64 # Number of partitions. Defaults to 64.
     )
     
     schema.add_field(field_name="id", datatype=DataType.INT64, is_primary=True)
    @@ -161,12 +157,9 @@ client = new M
         }
     ]
     
    -

    After you have defined the fields, set up the index parameters.

    +

    필드를 정의한 후 인덱스 매개변수를 설정합니다.

    + 파이썬 자바 Node.js
    index_params = MilvusClient.prepare_index_params()
     
     index_params.add_index(
    @@ -211,12 +204,9 @@ indexParams.add(indexParamForVectorFie
         params: { nlist: 1024}
     }]
     
    -

    Finally, you can create a collection.

    +

    마지막으로 컬렉션을 만들 수 있습니다.

    + 파이썬 자바 노드.js
    client.create_collection(
         collection_name="test_collection",
         schema=schema,
    @@ -246,7 +236,7 @@ res = await client.// Success
     //
     
    -

    List partitions

    Once a field of a collection is used as the partition key, Milvus creates the specified number of partitions and manages them on your behalf. Therefore, you cannot manipulate the partitions in this collection anymore.

    -

    The following snippet demonstrates that 64 partitions in a collection once one of its fields is used as the partition key.

    -

    Insert data

    컬렉션의 필드가 파티션 키로 사용되면 Milvus는 지정된 수의 파티션을 생성하고 사용자를 대신하여 관리합니다. 따라서 이 컬렉션의 파티션은 더 이상 조작할 수 없습니다.

    +

    다음 코드 조각은 컬렉션의 필드 중 하나가 파티션 키로 사용되면 컬렉션에 64개의 파티션이 생성되는 것을 보여줍니다.

    +

    데이터 삽입

    Once the collection is ready, start inserting data as follows:

    -

    Prepare data

    +

    컬렉션이 준비되면 다음과 같이 데이터 삽입을 시작합니다:

    +

    데이터 준비

    # 3. Insert randomly generated vectors 
     colors = ["green", "blue", "yellow", "red", "black", "white", "purple", "pink", "orange", "brown", "grey"]
     data = []
    @@ -338,7 +325,7 @@ data = []
     
     console.log(data[0])
     
    -

    You can view the structure of the generated data by checking its first entry.

    +

    첫 번째 항목을 확인하여 생성된 데이터의 구조를 볼 수 있습니다.

    {
         id: 0,
         vector: [
    @@ -353,20 +340,17 @@ data = []
         color_tag: 'blue_2064'
     }
     
    -

    Insert data

    -

    Use the insert() method to insert the data into the collection.

    +

    데이터 삽입

    +

    컬렉션에 데이터를 삽입하려면 insert() 메서드를 사용하여 컬렉션에 데이터를 삽입합니다.

    -

    Use the insert() method to insert the data into the collection.

    +

    컬렉션에 데이터를 삽입하려면 insert() 메서드를 사용하여 컬렉션에 데이터를 삽입합니다.

    -

    Use the insert() method to insert the data into the collection.

    +

    컬렉션에 데이터를 삽입하려면 insert() 메서드를 사용하여 컬렉션에 데이터를 삽입합니다.

    + 파이썬 자바 Node.js
    res = client.insert(
         collection_name="test_collection",
         data=data
    @@ -418,7 +402,7 @@ data = []
     // 1000
     // 
     
    -

    Use partition key

    Once you have indexed and loaded the collection as well as inserted data, you can conduct a similarity search using the partition key.

    +

    컬렉션을 색인하고 로드하고 데이터를 삽입한 후에는 파티션 키를 사용하여 유사성 검색을 수행할 수 있습니다.

    -

    For more information on parameters, refer to search() in the SDK reference.

    +

    매개변수에 대한 자세한 내용은 search() 를 참조하세요.

    -

    For more information on parameters, refer to search() in the SDK reference.

    +

    매개변수에 대한 자세한 내용은 search() 를 참조하세요.

    -

    For more information on parameters, refer to search() in the SDK reference.

    +

    파라미터에 대한 자세한 내용은 search() 를 참조하세요.

    -

    notes

    -

    To conduct a similarity search using the partition key, you should include either of the following in the boolean expression of the search request:

    +

    참고

    +

    파티션 키를 사용하여 유사도 검색을 수행하려면 검색 요청의 부울 표현식에 다음 중 하나를 포함해야 합니다:

    • expr='<partition_key>=="xxxx"'

    • expr='<partition_key> in ["xxx", "xxx"]'

    -

    Do replace <partition_key> with the name of the field that is designated as the partition key.

    +

    <partition_key> 을 파티션 키로 지정된 필드의 이름으로 대체하세요.

    + Python Java Node.js
    # 4. Search with partition key
     query_vectors = [[0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592]]
     
    @@ -557,7 +538,7 @@ res = await client.// ]
     // 
     
    -

    Typical use cases

    You can utilize the partition key feature to achieve better search performance and enable multi-tenancy. This can be done by assigning a tenant-specific value as the partition key field for each entity. When searching or querying the collection, you can filter entities by the tenant-specific value by including the partition key field in the boolean expression. This approach ensures data isolation by tenants and avoids scanning unnecessary partitions.

    +

    파티션 키 기능을 활용하여 검색 성능을 개선하고 멀티테넌시를 활성화할 수 있습니다. 각 엔티티의 파티션 키 필드에 테넌트별 값을 할당하면 됩니다. 컬렉션을 검색하거나 쿼리할 때 부울 표현식에 파티션 키 필드를 포함시켜 테넌트별 값을 기준으로 엔티티를 필터링할 수 있습니다. 이 접근 방식은 테넌트별 데이터 격리를 보장하고 불필요한 파티션 스캔을 피할 수 있습니다.

    diff --git a/localization/v2.4.x/site/pt/adminGuide/clouds/aws/s3.json b/localization/v2.4.x/site/pt/adminGuide/clouds/aws/s3.json index ef8642c81..59db4005e 100644 --- a/localization/v2.4.x/site/pt/adminGuide/clouds/aws/s3.json +++ b/localization/v2.4.x/site/pt/adminGuide/clouds/aws/s3.json @@ -1 +1 @@ -{"codeList":["milvus_bucket_name=\"milvus-bucket-$(openssl rand -hex 12)\"\n\naws s3api create-bucket --bucket \"$milvus_bucket_name\" --region 'us-east-2' --acl private --object-ownership ObjectWriter --create-bucket-configuration LocationConstraint='us-east-2'\n\n\n# Output\n#\n# \"Location\": \"http://milvus-bucket-039dd013c0712f085d60e21f.s3.amazonaws.com/\"\n","echo '{\n \"Version\": \"2012-10-17\",\n \"Statement\": [\n {\n \"Effect\": \"Allow\",\n \"Action\": [\n \"s3:GetObject\",\n \"s3:PutObject\",\n \"s3:ListBucket\",\n \"s3:DeleteObject\"\n ],\n \"Resource\": [\n \"arn:aws:s3:::\",\n \"arn:aws:s3:::/*\"\n ]\n }\n ]\n}' > milvus-s3-policy.json\n\naws iam create-policy --policy-name MilvusS3ReadWrite --policy-document file://milvus-s3-policy.json\n\n\n# Get the ARN from the command output as follows:\n# {\n# \"Policy\": {\n# \"PolicyName\": \"MilvusS3ReadWrite\",\n# \"PolicyId\": \"AN5QQVVPM1BVTFlBNkdZT\",\n# \"Arn\": \"arn:aws:iam::12345678901:policy/MilvusS3ReadWrite\",\n# \"Path\": \"/\",\n# \"DefaultVersionId\": \"v1\",\n# \"AttachmentCount\": 0,\n# \"PermissionsBoundaryUsageCount\": 0,\n# \"IsAttachable\": true,\n# \"CreateDate\": \"2023-11-16T06:00:01+00:00\",\n# \"UpdateDate\": \"2023-11-16T06:00:01+00:00\"\n# }\n# } \n","eksctl create iamserviceaccount --name milvus-s3-access-sa --namespace milvus --cluster milvus-eks-cluster --role-name milvus-s3-access-sa \\\n --attach-policy-arn arn:aws:iam:::policy/MilvusS3ReadWrite --approve\n","aws iam get-role --role-name milvus-s3-access-sa --query Role.AssumeRolePolicyDocument\n# An example output is as follows\n{\n \"Version\": \"2012-10-17\",\n \"Statement\": [\n {\n \"Effect\": \"Allow\",\n \"Principal\": {\n \"Federated\": \"arn:aws:iam::111122223333:oidc-provider/oidc.eks.region-code.amazonaws.com/id/EXAMPLED539D4633E53DE1B71EXAMPLE\"\n },\n \"Action\": \"sts:AssumeRoleWithWebIdentity\",\n \"Condition\": {\n \"StringEquals\": {\n \"oidc.eks.region-code.amazonaws.com/id/EXAMPLED539D4633E53DE1B71EXAMPLE:sub\": \"system:serviceaccount:default:my-service-account\",\n \"oidc.eks.region-code.amazonaws.com/id/EXAMPLED539D4633E53DE1B71EXAMPLE:aud\": \"sts.amazonaws.com\"\n }\n }\n }\n ]\n}\n","aws iam list-attached-role-policies --role-name milvus-s3-access-sa --query 'AttachedPolicies[].PolicyArn' --output text\n# An example output is as follows\narn:aws:iam::12345678901:policy/MilvusS3ReadWrite\n","export policy_arn='arn:aws:iam::12345678901:policy/MilvusS3ReadWrite'\naws iam get-policy --policy-arn $policy_arn\n# An example output is as follows\n{\n \"Policy\": {\n \"PolicyName\": \"MilvusS3ReadWrite\",\n \"PolicyId\": \"EXAMPLEBIOWGLDEXAMPLE\",\n \"Arn\": \"arn:aws:iam::12345678901:policy/MilvusS3ReadWrite\",\n \"Path\": \"/\",\n \"DefaultVersionId\": \"v2\",\n [...]\n }\n}\n","aws iam get-policy-version --policy-arn $policy_arn --version-id v2\n# An example output is as follows\n{\n \"PolicyVersion\": {\n \"Document\": {\n \"Version\": \"2012-10-17\",\n \"Statement\": [\n {\n \"Effect\": \"Allow\",\n \"Action\": [\n \"s3:GetObject\",\n \"s3:PutObject\",\n \"s3:ListBucket\",\n \"s3:DeleteObject\"\n ],\n \"Resource\": [\n \"arn:aws:s3:::\",\n \"arn:aws:s3:::/*\"\n ]\n }\n ]\n },\n [...]\n }\n}\n","kubectl describe serviceaccount milvus-s3-access-sa -n milvus\n# An example output is as follows\nName: milvus-s3-access-sa\nNamespace: milvus\nLabels: app.kubernetes.io/managed-by=eksctl\nAnnotations: eks.amazonaws.com/role-arn: arn:aws:iam::12345678901:role/milvus-s3-access-sa\n[...]\n","helm repo add milvus https://zilliztech.github.io/milvus-helm/\nhelm repo update\n","cluster:\n enabled: true\n\nservice:\n type: LoadBalancer\n port: 19530\n annotations: \n service.beta.kubernetes.io/aws-load-balancer-type: external\n service.beta.kubernetes.io/aws-load-balancer-name: milvus-service\n service.beta.kubernetes.io/aws-load-balancer-scheme: internet-facing\n service.beta.kubernetes.io/aws-load-balancer-nlb-target-type: ip\n\nserviceAccount:\n create: false\n name: milvus-s3-access-sa\n\nminio:\n enabled: false\n\nexternalS3:\n enabled: true\n host: \"s3.us-east-2.amazonaws.com\"\n port: \"443\"\n useSSL: true\n bucketName: \"\"\n useIAM: true\n cloudProvider: \"aws\"\n iamEndpoint: \"\"\n\nrootCoordinator:\n replicas: 2\n activeStandby:\n enabled: true\n resources: \n limits:\n cpu: 1\n memory: 2Gi\n\nindexCoordinator:\n replicas: 2\n activeStandby:\n enabled: true\n resources: \n limits:\n cpu: \"0.5\"\n memory: 0.5Gi\n\nqueryCoordinator:\n replicas: 2\n activeStandby:\n enabled: true\n resources: \n limits:\n cpu: \"0.5\"\n memory: 0.5Gi\n\ndataCoordinator:\n replicas: 2\n activeStandby:\n enabled: true\n resources: \n limits:\n cpu: \"0.5\"\n memory: 0.5Gi\n\nproxy:\n replicas: 2\n resources: \n limits:\n cpu: 1\n memory: 2Gi \n","helm upgrade --install milvus-demo milvus/milvus -n milvus -f milvus.yaml\n"],"headingContent":"","anchorList":[{"label":"Configurar o acesso ao S3 por função do IAM","href":"Configure-S3-Access-by-IAM-Role","type":1,"isActive":false},{"label":"Antes de começar","href":"Before-you-start","type":2,"isActive":false},{"label":"Associar uma função do IAM a uma conta de serviço do Kubernetes","href":"Associate-an-IAM-role-with-a-Kubernetes-service-account","type":2,"isActive":false},{"label":"Verifique a configuração da função e da conta de serviço","href":"Verify-the-role-and-service-account-setup","type":2,"isActive":false},{"label":"Implantar o Milvus","href":"Deploy-Milvus","type":2,"isActive":false},{"label":"Verificar a instalação","href":"Verify-the-installation","type":2,"isActive":false}]} \ No newline at end of file +{"codeList":["milvus_bucket_name=\"milvus-bucket-$(openssl rand -hex 12)\"\n\naws s3api create-bucket --bucket \"$milvus_bucket_name\" --region 'us-east-2' --acl private --object-ownership ObjectWriter --create-bucket-configuration LocationConstraint='us-east-2'\n\n\n# Output\n#\n# \"Location\": \"http://milvus-bucket-039dd013c0712f085d60e21f.s3.amazonaws.com/\"\n","echo '{\n \"Version\": \"2012-10-17\",\n \"Statement\": [\n {\n \"Effect\": \"Allow\",\n \"Action\": [\n \"s3:ListBucket\"\n ],\n \"Resource\": [\n \"arn:aws:s3:::\"\n ]\n },\n {\n \"Effect\": \"Allow\",\n \"Action\": [\n \"s3:DeleteObject\",\n \"s3:GetObject\",\n \"s3:PutObject\"\n ],\n \"Resource\": [\n \"arn:aws:s3:::/*\"\n ]\n }\n ]\n}' > milvus-s3-policy.json\n\naws iam create-policy --policy-name MilvusS3ReadWrite --policy-document file://milvus-s3-policy.json\n\n\n# Get the ARN from the command output as follows:\n# {\n# \"Policy\": {\n# \"PolicyName\": \"MilvusS3ReadWrite\",\n# \"PolicyId\": \"AN5QQVVPM1BVTFlBNkdZT\",\n# \"Arn\": \"arn:aws:iam::12345678901:policy/MilvusS3ReadWrite\",\n# \"Path\": \"/\",\n# \"DefaultVersionId\": \"v1\",\n# \"AttachmentCount\": 0,\n# \"PermissionsBoundaryUsageCount\": 0,\n# \"IsAttachable\": true,\n# \"CreateDate\": \"2023-11-16T06:00:01+00:00\",\n# \"UpdateDate\": \"2023-11-16T06:00:01+00:00\"\n# }\n# } \n","eksctl create iamserviceaccount --name milvus-s3-access-sa --namespace milvus --cluster milvus-eks-cluster --role-name milvus-s3-access-sa \\\n --attach-policy-arn arn:aws:iam:::policy/MilvusS3ReadWrite --approve\n","aws iam get-role --role-name milvus-s3-access-sa --query Role.AssumeRolePolicyDocument\n# An example output is as follows\n{\n \"Version\": \"2012-10-17\",\n \"Statement\": [\n {\n \"Effect\": \"Allow\",\n \"Principal\": {\n \"Federated\": \"arn:aws:iam::111122223333:oidc-provider/oidc.eks.region-code.amazonaws.com/id/EXAMPLED539D4633E53DE1B71EXAMPLE\"\n },\n \"Action\": \"sts:AssumeRoleWithWebIdentity\",\n \"Condition\": {\n \"StringEquals\": {\n \"oidc.eks.region-code.amazonaws.com/id/EXAMPLED539D4633E53DE1B71EXAMPLE:sub\": \"system:serviceaccount:default:my-service-account\",\n \"oidc.eks.region-code.amazonaws.com/id/EXAMPLED539D4633E53DE1B71EXAMPLE:aud\": \"sts.amazonaws.com\"\n }\n }\n }\n ]\n}\n","aws iam list-attached-role-policies --role-name milvus-s3-access-sa --query 'AttachedPolicies[].PolicyArn' --output text\n# An example output is as follows\narn:aws:iam::12345678901:policy/MilvusS3ReadWrite\n","export policy_arn='arn:aws:iam::12345678901:policy/MilvusS3ReadWrite'\naws iam get-policy --policy-arn $policy_arn\n# An example output is as follows\n{\n \"Policy\": {\n \"PolicyName\": \"MilvusS3ReadWrite\",\n \"PolicyId\": \"EXAMPLEBIOWGLDEXAMPLE\",\n \"Arn\": \"arn:aws:iam::12345678901:policy/MilvusS3ReadWrite\",\n \"Path\": \"/\",\n \"DefaultVersionId\": \"v2\",\n [...]\n }\n}\n","aws iam get-policy-version --policy-arn $policy_arn --version-id v2\n# An example output is as follows\n{\n \"PolicyVersion\": {\n \"Document\": {\n \"Version\": \"2012-10-17\",\n \"Statement\": [\n {\n \"Effect\": \"Allow\",\n \"Action\": [\n \"s3:GetObject\",\n \"s3:PutObject\",\n \"s3:ListBucket\",\n \"s3:DeleteObject\"\n ],\n \"Resource\": [\n \"arn:aws:s3:::\",\n \"arn:aws:s3:::/*\"\n ]\n }\n ]\n },\n [...]\n }\n}\n","kubectl describe serviceaccount milvus-s3-access-sa -n milvus\n# An example output is as follows\nName: milvus-s3-access-sa\nNamespace: milvus\nLabels: app.kubernetes.io/managed-by=eksctl\nAnnotations: eks.amazonaws.com/role-arn: arn:aws:iam::12345678901:role/milvus-s3-access-sa\n[...]\n","helm repo add milvus https://zilliztech.github.io/milvus-helm/\nhelm repo update\n","cluster:\n enabled: true\n\nservice:\n type: LoadBalancer\n port: 19530\n annotations: \n service.beta.kubernetes.io/aws-load-balancer-type: external\n service.beta.kubernetes.io/aws-load-balancer-name: milvus-service\n service.beta.kubernetes.io/aws-load-balancer-scheme: internet-facing\n service.beta.kubernetes.io/aws-load-balancer-nlb-target-type: ip\n\nserviceAccount:\n create: false\n name: milvus-s3-access-sa\n\nminio:\n enabled: false\n\nexternalS3:\n enabled: true\n host: \"s3.us-east-2.amazonaws.com\"\n port: \"443\"\n useSSL: true\n bucketName: \"\"\n useIAM: true\n cloudProvider: \"aws\"\n iamEndpoint: \"\"\n\nrootCoordinator:\n replicas: 2\n activeStandby:\n enabled: true\n resources: \n limits:\n cpu: 1\n memory: 2Gi\n\nindexCoordinator:\n replicas: 2\n activeStandby:\n enabled: true\n resources: \n limits:\n cpu: \"0.5\"\n memory: 0.5Gi\n\nqueryCoordinator:\n replicas: 2\n activeStandby:\n enabled: true\n resources: \n limits:\n cpu: \"0.5\"\n memory: 0.5Gi\n\ndataCoordinator:\n replicas: 2\n activeStandby:\n enabled: true\n resources: \n limits:\n cpu: \"0.5\"\n memory: 0.5Gi\n\nproxy:\n replicas: 2\n resources: \n limits:\n cpu: 1\n memory: 2Gi \n","helm upgrade --install milvus-demo milvus/milvus -n milvus -f milvus.yaml\n"],"headingContent":"Configure S3 Access by IAM Role","anchorList":[{"label":"Configurar o acesso ao S3 por função do IAM","href":"Configure-S3-Access-by-IAM-Role","type":1,"isActive":false},{"label":"Antes de começar","href":"Before-you-start","type":2,"isActive":false},{"label":"Associar uma função do IAM a uma conta de serviço do Kubernetes","href":"Associate-an-IAM-role-with-a-Kubernetes-service-account","type":2,"isActive":false},{"label":"Verifique a configuração da função e da conta de serviço","href":"Verify-the-role-and-service-account-setup","type":2,"isActive":false},{"label":"Implantar o Milvus","href":"Deploy-Milvus","type":2,"isActive":false},{"label":"Verificar a instalação","href":"Verify-the-installation","type":2,"isActive":false}]} \ No newline at end of file diff --git a/localization/v2.4.x/site/pt/adminGuide/clouds/aws/s3.md b/localization/v2.4.x/site/pt/adminGuide/clouds/aws/s3.md index 8c5ac04a7..aa283c1bf 100644 --- a/localization/v2.4.x/site/pt/adminGuide/clouds/aws/s3.md +++ b/localization/v2.4.x/site/pt/adminGuide/clouds/aws/s3.md @@ -1,6 +1,6 @@ --- id: s3.md -title: Configurar o acesso ao S3 por função de IAM +title: Configurar o acesso ao S3 por função do IAM related_key: 's3, storage, iam' summary: Saiba como configurar o s3 com a função IAM. --- @@ -73,13 +73,20 @@ aws s3api create-bucket --bucket "
  • Aplique o ficheiro:

    kubectl apply -f milvus-operator-certificate.yaml
    diff --git a/localization/v2.4.x/site/pt/adminGuide/configure-docker.json b/localization/v2.4.x/site/pt/adminGuide/configure-docker.json
    index 9589f6640..a7d8186b5 100644
    --- a/localization/v2.4.x/site/pt/adminGuide/configure-docker.json
    +++ b/localization/v2.4.x/site/pt/adminGuide/configure-docker.json
    @@ -1 +1 @@
    -{"codeList":["$ wget https://raw.githubusercontent.com/milvus-io/milvus/v2.4.9/configs/milvus.yaml\n","# For Milvus standalone\n$ wget https://github.com/milvus-io/milvus/releases/download/v2.4.9/milvus-standalone-docker-compose.yml -O docker-compose.yml\n","...\n  standalone:\n    container_name: milvus-standalone\n    image: milvusdb/milvus:v2.2.13\n    command: [\"milvus\", \"run\", \"standalone\"]\n    environment:\n      ETCD_ENDPOINTS: etcd:2379\n      MINIO_ADDRESS: minio:9000\n    volumes:\n      - /local/path/to/your/milvus.yaml:/milvus/configs/milvus.yaml   # Map the local path to the container path\n      - ${DOCKER_VOLUME_DIRECTORY:-.}/volumes/milvus:/var/lib/milvus\n    ports:\n      - \"19530:19530\"\n      - \"9091:9091\"\n    depends_on:\n      - \"etcd\"\n      - \"minio\"\n...\n","$ sudo docker compose up -d\n"],"headingContent":"","anchorList":[{"label":"Configurar o Milvus com o Docker Compose","href":"Configure-Milvus-with-Docker-Compose","type":1,"isActive":false},{"label":"Descarregar um ficheiro de configuração","href":"Download-a-configuration-file","type":2,"isActive":false},{"label":"Modificar o ficheiro de configuração","href":"Modify-the-configuration-file","type":2,"isActive":false},{"label":"Descarregar um ficheiro de instalação","href":"Download-an-installation-file","type":2,"isActive":false},{"label":"Modificar o ficheiro de instalação","href":"Modify-the-installation-file","type":2,"isActive":false},{"label":"Iniciar o Milvus","href":"Start-Milvus","type":2,"isActive":false},{"label":"O que se segue","href":"Whats-next","type":2,"isActive":false}]}
    \ No newline at end of file
    +{"codeList":["$ wget https://raw.githubusercontent.com/milvus-io/milvus/v2.4.13-hotfix/configs/milvus.yaml\n","# For Milvus standalone\n$ wget https://github.com/milvus-io/milvus/releases/download/v2.4.13-hotfix/milvus-standalone-docker-compose.yml -O docker-compose.yml\n","...\n  standalone:\n    container_name: milvus-standalone\n    image: milvusdb/milvus:v2.2.13\n    command: [\"milvus\", \"run\", \"standalone\"]\n    environment:\n      ETCD_ENDPOINTS: etcd:2379\n      MINIO_ADDRESS: minio:9000\n    volumes:\n      - /local/path/to/your/milvus.yaml:/milvus/configs/milvus.yaml   # Map the local path to the container path\n      - ${DOCKER_VOLUME_DIRECTORY:-.}/volumes/milvus:/var/lib/milvus\n    ports:\n      - \"19530:19530\"\n      - \"9091:9091\"\n    depends_on:\n      - \"etcd\"\n      - \"minio\"\n...\n","$ sudo docker compose up -d\n"],"headingContent":"Configure Milvus with Docker Compose","anchorList":[{"label":"Configurar o Milvus com o Docker Compose","href":"Configure-Milvus-with-Docker-Compose","type":1,"isActive":false},{"label":"Descarregar um ficheiro de configuração","href":"Download-a-configuration-file","type":2,"isActive":false},{"label":"Modificar o ficheiro de configuração","href":"Modify-the-configuration-file","type":2,"isActive":false},{"label":"Descarregar um ficheiro de instalação","href":"Download-an-installation-file","type":2,"isActive":false},{"label":"Modificar o ficheiro de instalação","href":"Modify-the-installation-file","type":2,"isActive":false},{"label":"Iniciar o Milvus","href":"Start-Milvus","type":2,"isActive":false},{"label":"O que se segue","href":"Whats-next","type":2,"isActive":false}]}
    \ No newline at end of file
    diff --git a/localization/v2.4.x/site/pt/adminGuide/configure-docker.md b/localization/v2.4.x/site/pt/adminGuide/configure-docker.md
    index 1f4947d85..6e5975bd6 100644
    --- a/localization/v2.4.x/site/pt/adminGuide/configure-docker.md
    +++ b/localization/v2.4.x/site/pt/adminGuide/configure-docker.md
    @@ -38,8 +38,8 @@ Na versão atual, todos os parâmetros entram em vigor somente após o reinício
               d="M4 9h1v1H4c-1.5 0-3-1.69-3-3.5S2.55 3 4 3h4c1.45 0 3 1.69 3 3.5 0 1.41-.91 2.72-2 3.25V8.59c.58-.45 1-1.27 1-2.09C10 5.22 8.98 4 8 4H4c-.98 0-2 1.22-2 2.5S3 9 4 9zm9-3h-1v1h1c1 0 2 1.22 2 2.5S13.98 12 13 12H9c-.98 0-2-1.22-2-2.5 0-.83.42-1.64 1-2.09V6.25c-1.09.53-2 1.84-2 3.25C6 11.31 7.55 13 9 13h4c1.45 0 3-1.69 3-3.5S14.5 6 13 6z"
             >
           
    -    

    Faça o download de milvus.yaml diretamente ou com o seguinte comando.

    -
    $ wget https://raw.githubusercontent.com/milvus-io/milvus/v2.4.9/configs/milvus.yaml
    +    

    Faça o download de milvus.yaml diretamente ou com o seguinte comando.

    +
    $ wget https://raw.githubusercontent.com/milvus-io/milvus/v2.4.13-hotfix/configs/milvus.yaml
     

    Modificar o ficheiro de configuração

    Descarregue o ficheiro de instalação do Milvus standalone e guarde-o como docker-compose.yml.

    +

    Descarregue o ficheiro de instalação do Milvus standalone e guarde-o como docker-compose.yml.

    Também pode simplesmente executar o seguinte comando.

    # For Milvus standalone
    -$ wget https://github.com/milvus-io/milvus/releases/download/v2.4.9/milvus-standalone-docker-compose.yml -O docker-compose.yml
    +$ wget https://github.com/milvus-io/milvus/releases/download/v2.4.13-hotfix/milvus-standalone-docker-compose.yml -O docker-compose.yml
     

    Modificar o ficheiro de instalação

    rootCoord:
    -    maxGeneralCapacity: 1024
    +    maxGeneralCapacity: 65536
     
    -

    O parâmetro maxGeneralCapacity define o número máximo de coleções que a instância atual do Milvus pode conter. O valor predefinido é 1024.

    +

    O parâmetro maxGeneralCapacity define o número máximo de coleções que a instância atual do Milvus pode conter. O valor predefinido é 65536.

    Cálculo do número de colecções

    O exemplo seguinte demonstra como conceder a permissão de pesquisar todas as colecções à função denominada roleA.

    -

    O object_type especifica o tipo de objeto, que também pode ser entendido como o tipo de recurso. Atualmente, os valores válidos incluem Coleção/Utilizador/Global, etc., em que Global significa que não existe um tipo de recurso específico. O object_name é o nome do recurso. Se o tipo de objetofor Coleção, então o nome do objeto pode ser referido a um nome de coleção específico, ou pode utilizar * para especificar todas as colecções. Seotipo de objetofor Global, o nome do objeto só pode ser especificado como *. Consulte Utilizadores e funções para outros tipos de privilégios que pode conceder.

    +

    O object_type especifica o tipo de objeto, que também pode ser entendido como o tipo de recurso. Atualmente, os valores válidos incluem Coleção/Utilizador/Global, etc., em que Global significa que não existe um tipo de recurso específico. O object_name é o nome do recurso. Se o tipo de objetofor Coleção, então o nome do objeto pode ser referido a um nome de coleção específico, ou pode utilizar * para especificar todas as colecções. Seo tipo de objeto for Global, o nome do objeto só pode ser especificado como *. Consulte Utilizadores e funções para outros tipos de privilégios que pode conceder.

    Antes de gerir os privilégios de função, certifique-se de que activou a autenticação do utilizador. Caso contrário, poderá ocorrer um erro. Para obter informações sobre como ativar a autenticação do utilizador, consulte Autenticar o acesso do utilizador.

    # grant privilege to a role
     
    @@ -182,7 +182,7 @@ client.grant_privilege(
               d="M4 9h1v1H4c-1.5 0-3-1.69-3-3.5S2.55 3 4 3h4c1.45 0 3 1.69 3 3.5 0 1.41-.91 2.72-2 3.25V8.59c.58-.45 1-1.27 1-2.09C10 5.22 8.98 4 8 4H4c-.98 0-2 1.22-2 2.5S3 9 4 9zm9-3h-1v1h1c1 0 2 1.22 2 2.5S13.98 12 13 12H9c-.98 0-2-1.22-2-2.5 0-.83.42-1.64 1-2.09V6.25c-1.09.53-2 1.84-2 3.25C6 11.31 7.55 13 9 13h4c1.45 0 3-1.69 3-3.5S14.5 6 13 6z"
             >
           
    -    

    Conceder a função a um utilizador para que este utilizador possa herdar todos os privilégios da função.

    +

    Conceder a função a um utilizador para que este possa herdar todos os privilégios da função.

    # grant a role to a user
     
     client.grant_role(
    diff --git a/localization/v2.4.x/site/pt/adminGuide/resource_group.json b/localization/v2.4.x/site/pt/adminGuide/resource_group.json
    index e59af17ad..6f6374996 100644
    --- a/localization/v2.4.x/site/pt/adminGuide/resource_group.json
    +++ b/localization/v2.4.x/site/pt/adminGuide/resource_group.json
    @@ -1 +1 @@
    -{"codeList":["{\n    \"requests\": { \"nodeNum\": 1 },\n    \"limits\": { \"nodeNum\": 1 },\n    \"transfer_from\": [{ \"resource_group\": \"rg1\" }],\n    \"transfer_to\": [{ \"resource_group\": \"rg2\" }]\n}\n","import pymilvus\n\n# A resource group name should be a string of 1 to 255 characters, starting with a letter or an underscore (_) and containing only numbers, letters, and underscores (_).\nname = \"rg\"\nnode_num = 0\n\n# create a resource group that exactly hold no query node.\ntry:\n    utility.create_resource_group(name, config=utility.ResourceGroupConfig(\n        requests={\"node_num\": node_num},\n        limits={\"node_num\": node_num},\n    ), using='default')\n    print(f\"Succeeded in creating resource group {name}.\")\nexcept Exception:\n    print(\"Failed to create the resource group.\")\n","rgs = utility.list_resource_groups(using='default')\nprint(f\"Resource group list: {rgs}\")\n\n# Resource group list: ['__default_resource_group', 'rg']\n","info = utility.describe_resource_group(name, using=\"default\")\nprint(f\"Resource group description: {info}\")\n\n# Resource group description: \n#        ,           // string, rg name\n#        ,            // int, num_node which has been transfer to this rg\n#        ,  // int, available node_num, some node may shutdown\n#        , // map[string]int, from collection_name to loaded replica of each collecion in this rg\n#        ,  // map[string]int, from collection_name to outgoging accessed node num by replica loaded in this rg \n#        .  // map[string]int, from collection_name to incoming accessed node num by replica loaded in other rg\n","source = '__default_resource_group'\ntarget = 'rg'\nexpected_num_nodes_in_default = 0\nexpected_num_nodes_in_rg = 1\n\ntry:\n    utility.update_resource_groups({\n        source: ResourceGroupConfig(\n            requests={\"node_num\": expected_num_nodes_in_default},\n            limits={\"node_num\": expected_num_nodes_in_default},\n        ),\n        target: ResourceGroupConfig(\n            requests={\"node_num\": expected_num_nodes_in_rg},\n            limits={\"node_num\": expected_num_nodes_in_rg},\n        )\n    }, using=\"default\")\n    print(f\"Succeeded in move 1 node(s) from {source} to {target}.\")\nexcept Exception:\n    print(\"Something went wrong while moving nodes.\")\n\n# After a while, succeeded in moving 1 node(s) from __default_resource_group to rg.\n","from pymilvus import Collection\n\ncollection = Collection('demo')\n\n# Milvus loads the collection to the default resource group.\ncollection.load(replica_number=2)\n\n# Or, you can ask Milvus load the collection to the desired resource group.\n# make sure that query nodes num should be greater or equal to replica_number\nresource_groups = ['rg']\ncollection.load(replica_number=2, _resource_groups=resource_groups) \n","collection = Collection(\"Books\")\n\n# Use the load method of a collection to load one of its partition\ncollection.load([\"Novels\"], replica_number=2, _resource_groups=resource_groups)\n\n# Or, you can use the load method of a partition directly\npartition = Partition(collection, \"Novels\")\npartition.load(replica_number=2, _resource_groups=resource_groups)\n","source = '__default_resource_group'\ntarget = 'rg'\ncollection_name = 'c'\nnum_replicas = 1\n\ntry:\n    utility.transfer_replica(source, target, collection_name, num_replicas, using=\"default\")\n    print(f\"Succeeded in moving {num_node} replica(s) of {collection_name} from {source} to {target}.\")\nexcept Exception:\n    print(\"Something went wrong while moving replicas.\")\n\n# Succeeded in moving 1 replica(s) of c from __default_resource_group to rg.\n","try:\n    utility.update_resource_groups({\n        \"rg\": utility.ResourceGroupConfig(\n            requests={\"node_num\": 0},\n            limits={\"node_num\": 0},\n        ),\n    }, using=\"default\")\n    utility.drop_resource_group(\"rg\", using=\"default\")\n    print(f\"Succeeded in dropping {source}.\")\nexcept Exception:\n    print(f\"Something went wrong while dropping {source}.\")\n","from pymilvus import utility\nfrom pymilvus.client.types import ResourceGroupConfig\n\n_PENDING_NODES_RESOURCE_GROUP=\"__pending_nodes\"\n\ndef init_cluster(node_num: int):\n    print(f\"Init cluster with {node_num} nodes, all nodes will be put in default resource group\")\n    # create a pending resource group, which can used to hold the pending nodes that do not hold any data.\n    utility.create_resource_group(name=_PENDING_NODES_RESOURCE_GROUP, config=ResourceGroupConfig(\n        requests={\"node_num\": 0}, # this resource group can hold 0 nodes, no data will be load on it.\n        limits={\"node_num\": 10000}, # this resource group can hold at most 10000 nodes \n    ))\n\n    # update default resource group, which can used to hold the nodes that all initial node in it.\n    utility.update_resource_groups({\n        \"__default_resource_group\": ResourceGroupConfig(\n            requests={\"node_num\": node_num},\n            limits={\"node_num\": node_num},\n            transfer_from=[{\"resource_group\": _PENDING_NODES_RESOURCE_GROUP}], # recover missing node from pending resource group at high priority.\n            transfer_to=[{\"resource_group\": _PENDING_NODES_RESOURCE_GROUP}], # recover redundant node to pending resource group at low priority.\n        )})\n    utility.create_resource_group(name=\"rg1\", config=ResourceGroupConfig(\n        requests={\"node_num\": 0},\n        limits={\"node_num\": 0},\n        transfer_from=[{\"resource_group\": _PENDING_NODES_RESOURCE_GROUP}], \n        transfer_to=[{\"resource_group\": _PENDING_NODES_RESOURCE_GROUP}],\n    ))\n    utility.create_resource_group(name=\"rg2\", config=ResourceGroupConfig(\n        requests={\"node_num\": 0},\n        limits={\"node_num\": 0},\n        transfer_from=[{\"resource_group\": _PENDING_NODES_RESOURCE_GROUP}], \n        transfer_to=[{\"resource_group\": _PENDING_NODES_RESOURCE_GROUP}],\n    ))\n\ninit_cluster(1)\n","\ndef scale_to(node_num: int):\n    # scale the querynode number in Milvus into node_num.\n    pass\n","# scale rg1 into 3 nodes, rg2 into 1 nodes\nutility.update_resource_groups({\n    \"rg1\": ResourceGroupConfig(\n        requests={\"node_num\": 3},\n        limits={\"node_num\": 3},\n        transfer_from=[{\"resource_group\": _PENDING_NODES_RESOURCE_GROUP}],\n        transfer_to=[{\"resource_group\": _PENDING_NODES_RESOURCE_GROUP}],\n    ),\n    \"rg2\": ResourceGroupConfig(\n        requests={\"node_num\": 1},\n        limits={\"node_num\": 1},\n        transfer_from=[{\"resource_group\": _PENDING_NODES_RESOURCE_GROUP}],\n        transfer_to=[{\"resource_group\": _PENDING_NODES_RESOURCE_GROUP}],\n    ),\n})\nscale_to(5)\n# rg1 has 3 nodes, rg2 has 1 node, __default_resource_group has 1 node.\n","# scale rg1 from 3 nodes into 2 nodes\nutility.update_resource_groups({\n    \"rg1\": ResourceGroupConfig(\n        requests={\"node_num\": 2},\n        limits={\"node_num\": 2},\n        transfer_from=[{\"resource_group\": _PENDING_NODES_RESOURCE_GROUP}],\n        transfer_to=[{\"resource_group\": _PENDING_NODES_RESOURCE_GROUP}],\n    ),\n})\n\n# rg1 has 2 nodes, rg2 has 1 node, __default_resource_group has 1 node, __pending_nodes has 1 node.\nscale_to(4)\n# scale the node in __pending_nodes\n"],"headingContent":"","anchorList":[{"label":"Gerenciar grupos de recursos","href":"Manage-Resource-Groups","type":1,"isActive":false},{"label":"O que é um grupo de recursos","href":"What-is-a-resource-group","type":2,"isActive":false},{"label":"Conceitos de grupo de recursos","href":"Concepts-of-resource-group","type":2,"isActive":false},{"label":"Usar a API declarativa para gerenciar o grupo de recursos","href":"Use-declarative-api-to-manage-resource-group","type":2,"isActive":false},{"label":"Uma boa prática para gerir o escalonamento do cluster","href":"A-good-practice-to-manage-cluster-scaling","type":2,"isActive":false},{"label":"Como os grupos de recursos interagem com várias réplicas","href":"How-resource-groups-interacts-with-multiple-replicas","type":2,"isActive":false},{"label":"O que vem a seguir","href":"Whats-next","type":1,"isActive":false}]}
    \ No newline at end of file
    +{"codeList":["{\n    \"requests\": { \"nodeNum\": 1 },\n    \"limits\": { \"nodeNum\": 1 },\n    \"transfer_from\": [{ \"resource_group\": \"rg1\" }],\n    \"transfer_to\": [{ \"resource_group\": \"rg2\" }]\n}\n","import pymilvus\n\n# A resource group name should be a string of 1 to 255 characters, starting with a letter or an underscore (_) and containing only numbers, letters, and underscores (_).\nname = \"rg\"\nnode_num = 0\n\n# create a resource group that exactly hold no query node.\ntry:\n    utility.create_resource_group(name, config=utility.ResourceGroupConfig(\n        requests={\"node_num\": node_num},\n        limits={\"node_num\": node_num},\n    ), using='default')\n    print(f\"Succeeded in creating resource group {name}.\")\nexcept Exception:\n    print(\"Failed to create the resource group.\")\n","rgs = utility.list_resource_groups(using='default')\nprint(f\"Resource group list: {rgs}\")\n\n# Resource group list: ['__default_resource_group', 'rg']\n","info = utility.describe_resource_group(name, using=\"default\")\nprint(f\"Resource group description: {info}\")\n\n# Resource group description: \n#        ,           // string, rg name\n#        ,            // int, num_node which has been transfer to this rg\n#        ,  // int, available node_num, some node may shutdown\n#        , // map[string]int, from collection_name to loaded replica of each collecion in this rg\n#        ,  // map[string]int, from collection_name to outgoging accessed node num by replica loaded in this rg \n#        .  // map[string]int, from collection_name to incoming accessed node num by replica loaded in other rg\n","source = '__default_resource_group'\ntarget = 'rg'\nexpected_num_nodes_in_default = 0\nexpected_num_nodes_in_rg = 1\n\ntry:\n    utility.update_resource_groups({\n        source: ResourceGroupConfig(\n            requests={\"node_num\": expected_num_nodes_in_default},\n            limits={\"node_num\": expected_num_nodes_in_default},\n        ),\n        target: ResourceGroupConfig(\n            requests={\"node_num\": expected_num_nodes_in_rg},\n            limits={\"node_num\": expected_num_nodes_in_rg},\n        )\n    }, using=\"default\")\n    print(f\"Succeeded in move 1 node(s) from {source} to {target}.\")\nexcept Exception:\n    print(\"Something went wrong while moving nodes.\")\n\n# After a while, succeeded in moving 1 node(s) from __default_resource_group to rg.\n","from pymilvus import Collection\n\ncollection = Collection('demo')\n\n# Milvus loads the collection to the default resource group.\ncollection.load(replica_number=2)\n\n# Or, you can ask Milvus load the collection to the desired resource group.\n# make sure that query nodes num should be greater or equal to replica_number\nresource_groups = ['rg']\ncollection.load(replica_number=2, _resource_groups=resource_groups) \n","collection = Collection(\"Books\")\n\n# Use the load method of a collection to load one of its partition\ncollection.load([\"Novels\"], replica_number=2, _resource_groups=resource_groups)\n\n# Or, you can use the load method of a partition directly\npartition = Partition(collection, \"Novels\")\npartition.load(replica_number=2, _resource_groups=resource_groups)\n","source = '__default_resource_group'\ntarget = 'rg'\ncollection_name = 'c'\nnum_replicas = 1\n\ntry:\n    utility.transfer_replica(source, target, collection_name, num_replicas, using=\"default\")\n    print(f\"Succeeded in moving {num_node} replica(s) of {collection_name} from {source} to {target}.\")\nexcept Exception:\n    print(\"Something went wrong while moving replicas.\")\n\n# Succeeded in moving 1 replica(s) of c from __default_resource_group to rg.\n","try:\n    utility.update_resource_groups({\n        \"rg\": utility.ResourceGroupConfig(\n            requests={\"node_num\": 0},\n            limits={\"node_num\": 0},\n        ),\n    }, using=\"default\")\n    utility.drop_resource_group(\"rg\", using=\"default\")\n    print(f\"Succeeded in dropping {source}.\")\nexcept Exception:\n    print(f\"Something went wrong while dropping {source}.\")\n","from pymilvus import utility\nfrom pymilvus.client.types import ResourceGroupConfig\n\n_PENDING_NODES_RESOURCE_GROUP=\"__pending_nodes\"\n\ndef init_cluster(node_num: int):\n    print(f\"Init cluster with {node_num} nodes, all nodes will be put in default resource group\")\n    # create a pending resource group, which can used to hold the pending nodes that do not hold any data.\n    utility.create_resource_group(name=_PENDING_NODES_RESOURCE_GROUP, config=ResourceGroupConfig(\n        requests={\"node_num\": 0}, # this resource group can hold 0 nodes, no data will be load on it.\n        limits={\"node_num\": 10000}, # this resource group can hold at most 10000 nodes \n    ))\n\n    # update default resource group, which can used to hold the nodes that all initial node in it.\n    utility.update_resource_groups({\n        \"__default_resource_group\": ResourceGroupConfig(\n            requests={\"node_num\": node_num},\n            limits={\"node_num\": node_num},\n            transfer_from=[{\"resource_group\": _PENDING_NODES_RESOURCE_GROUP}], # recover missing node from pending resource group at high priority.\n            transfer_to=[{\"resource_group\": _PENDING_NODES_RESOURCE_GROUP}], # recover redundant node to pending resource group at low priority.\n        )})\n    utility.create_resource_group(name=\"rg1\", config=ResourceGroupConfig(\n        requests={\"node_num\": 0},\n        limits={\"node_num\": 0},\n        transfer_from=[{\"resource_group\": _PENDING_NODES_RESOURCE_GROUP}], \n        transfer_to=[{\"resource_group\": _PENDING_NODES_RESOURCE_GROUP}],\n    ))\n    utility.create_resource_group(name=\"rg2\", config=ResourceGroupConfig(\n        requests={\"node_num\": 0},\n        limits={\"node_num\": 0},\n        transfer_from=[{\"resource_group\": _PENDING_NODES_RESOURCE_GROUP}], \n        transfer_to=[{\"resource_group\": _PENDING_NODES_RESOURCE_GROUP}],\n    ))\n\ninit_cluster(1)\n","\ndef scale_to(node_num: int):\n    # scale the querynode number in Milvus into node_num.\n    pass\n","# scale rg1 into 3 nodes, rg2 into 1 nodes\nutility.update_resource_groups({\n    \"rg1\": ResourceGroupConfig(\n        requests={\"node_num\": 3},\n        limits={\"node_num\": 3},\n        transfer_from=[{\"resource_group\": _PENDING_NODES_RESOURCE_GROUP}],\n        transfer_to=[{\"resource_group\": _PENDING_NODES_RESOURCE_GROUP}],\n    ),\n    \"rg2\": ResourceGroupConfig(\n        requests={\"node_num\": 1},\n        limits={\"node_num\": 1},\n        transfer_from=[{\"resource_group\": _PENDING_NODES_RESOURCE_GROUP}],\n        transfer_to=[{\"resource_group\": _PENDING_NODES_RESOURCE_GROUP}],\n    ),\n})\nscale_to(5)\n# rg1 has 3 nodes, rg2 has 1 node, __default_resource_group has 1 node.\n","# scale rg1 from 3 nodes into 2 nodes\nutility.update_resource_groups({\n    \"rg1\": ResourceGroupConfig(\n        requests={\"node_num\": 2},\n        limits={\"node_num\": 2},\n        transfer_from=[{\"resource_group\": _PENDING_NODES_RESOURCE_GROUP}],\n        transfer_to=[{\"resource_group\": _PENDING_NODES_RESOURCE_GROUP}],\n    ),\n})\n\n# rg1 has 2 nodes, rg2 has 1 node, __default_resource_group has 1 node, __pending_nodes has 1 node.\nscale_to(4)\n# scale the node in __pending_nodes\n"],"headingContent":"Manage Resource Groups","anchorList":[{"label":"Gerenciar grupos de recursos","href":"Manage-Resource-Groups","type":1,"isActive":false},{"label":"O que é um grupo de recursos","href":"What-is-a-resource-group","type":2,"isActive":false},{"label":"Conceitos de grupo de recursos","href":"Concepts-of-resource-group","type":2,"isActive":false},{"label":"Usar a API declarativa para gerenciar o grupo de recursos","href":"Use-declarative-api-to-manage-resource-group","type":2,"isActive":false},{"label":"Uma boa prática para gerir o escalonamento do cluster","href":"A-good-practice-to-manage-cluster-scaling","type":2,"isActive":false},{"label":"Como os grupos de recursos interagem com várias réplicas","href":"How-resource-groups-interacts-with-multiple-replicas","type":2,"isActive":false},{"label":"O que vem a seguir","href":"Whats-next","type":1,"isActive":false}]}
    \ No newline at end of file
    diff --git a/localization/v2.4.x/site/pt/adminGuide/resource_group.md b/localization/v2.4.x/site/pt/adminGuide/resource_group.md
    index 629343b89..08c53360d 100644
    --- a/localization/v2.4.x/site/pt/adminGuide/resource_group.md
    +++ b/localization/v2.4.x/site/pt/adminGuide/resource_group.md
    @@ -2,7 +2,7 @@
     id: resource_group.md
     related_key: Manage Resource Groups
     summary: Saiba como gerir grupos de recursos.
    -title: Gerir grupos de recursos
    +title: Gerenciar grupos de recursos
     ---
     

    Gerenciar grupos de recursos

    -

    Todos os exemplos de código nesta página estão no PyMilvus 2.4.5. Atualize sua instalação do PyMilvus antes de executá-los.

    +

    Todos os exemplos de código nesta página estão no PyMilvus 2.4.8. Atualize sua instalação do PyMilvus antes de executá-los.

    1. Criar um grupo de recursos.

      diff --git a/localization/v2.4.x/site/pt/adminGuide/tls.json b/localization/v2.4.x/site/pt/adminGuide/tls.json index 46bf260a3..1d820eba8 100644 --- a/localization/v2.4.x/site/pt/adminGuide/tls.json +++ b/localization/v2.4.x/site/pt/adminGuide/tls.json @@ -1 +1 @@ -{"codeList":["openssl version\n","sudo apt install openssl\n","mkdir cert && cd cert\ntouch openssl.cnf gen.sh\n","#\n# OpenSSL example configuration file.\n# This is mostly being used for generation of certificate requests.\n#\n\n# This definition stops the following lines choking if HOME isn't\n# defined.\nHOME = .\nRANDFILE = $ENV::HOME/.rnd\n\n# Extra OBJECT IDENTIFIER info:\n#oid_file = $ENV::HOME/.oid\noid_section = new_oids\n\n# To use this configuration file with the \"-extfile\" option of the\n# \"openssl x509\" utility, name here the section containing the\n# X.509v3 extensions to use:\n# extensions = \n# (Alternatively, use a configuration file that has only\n# X.509v3 extensions in its main [= default] section.)\n\n[ new_oids ]\n\n# We can add new OIDs in here for use by 'ca', 'req' and 'ts'.\n# Add a simple OID like this:\n# testoid1=1.2.3.4\n# Or use config file substitution like this:\n# testoid2=${testoid1}.5.6\n\n# Policies used by the TSA examples.\ntsa_policy1 = 1.2.3.4.1\ntsa_policy2 = 1.2.3.4.5.6\ntsa_policy3 = 1.2.3.4.5.7\n\n####################################################################\n[ ca ]\ndefault_ca = CA_default # The default ca section\n\n####################################################################\n[ CA_default ]\n\ndir = ./demoCA # Where everything is kept\ncerts = $dir/certs # Where the issued certs are kept\ncrl_dir = $dir/crl # Where the issued crl are kept\ndatabase = $dir/index.txt # database index file.\n#unique_subject = no # Set to 'no' to allow creation of\n # several ctificates with same subject.\nnew_certs_dir = $dir/newcerts # default place for new certs.\n\ncertificate = $dir/cacert.pem # The CA certificate\nserial = $dir/serial # The current serial number\ncrlnumber = $dir/crlnumber # the current crl number\n # must be commented out to leave a V1 CRL\ncrl = $dir/crl.pem # The current CRL\nprivate_key = $dir/private/cakey.pem# The private key\nRANDFILE = $dir/private/.rand # private random number file\n\nx509_extensions = usr_cert # The extentions to add to the cert\n\n# Comment out the following two lines for the \"traditional\"\n# (and highly broken) format.\nname_opt = ca_default # Subject Name options\ncert_opt = ca_default # Certificate field options\n\n# Extension copying option: use with caution.\ncopy_extensions = copy\n\n# Extensions to add to a CRL. Note: Netscape communicator chokes on V2 CRLs\n# so this is commented out by default to leave a V1 CRL.\n# crlnumber must also be commented out to leave a V1 CRL.\n# crl_extensions = crl_ext\n\ndefault_days = 365 # how long to certify for\ndefault_crl_days= 30 # how long before next CRL\ndefault_md = default # use public key default MD\npreserve = no # keep passed DN ordering\n\n# A few difference way of specifying how similar the request should look\n# For type CA, the listed attributes must be the same, and the optional\n# and supplied fields are just that :-)\npolicy = policy_match\n\n# For the CA policy\n[ policy_match ]\ncountryName = match\nstateOrProvinceName = match\norganizationName = match\norganizationalUnitName = optional\ncommonName = supplied\nemailAddress = optional\n\n# For the 'anything' policy\n# At this point in time, you must list all acceptable 'object'\n# types.\n[ policy_anything ]\ncountryName = optional\nstateOrProvinceName = optional\nlocalityName = optional\norganizationName = optional\norganizationalUnitName = optional\ncommonName = supplied\nemailAddress = optional\n\n####################################################################\n[ req ]\ndefault_bits = 2048\ndefault_keyfile = privkey.pem\ndistinguished_name = req_distinguished_name\nattributes = req_attributes\nx509_extensions = v3_ca # The extentions to add to the self signed cert\n\n# Passwords for private keys if not present they will be prompted for\n# input_password = secret\n# output_password = secret\n\n# This sets a mask for permitted string types. There are several options. \n# default: PrintableString, T61String, BMPString.\n# pkix : PrintableString, BMPString (PKIX recommendation before 2004)\n# utf8only: only UTF8Strings (PKIX recommendation after 2004).\n# nombstr : PrintableString, T61String (no BMPStrings or UTF8Strings).\n# MASK:XXXX a literal mask value.\n# WARNING: ancient versions of Netscape crash on BMPStrings or UTF8Strings.\nstring_mask = utf8only\n\nreq_extensions = v3_req # The extensions to add to a certificate request\n\n[ req_distinguished_name ]\ncountryName = Country Name (2 letter code)\ncountryName_default = AU\ncountryName_min = 2\ncountryName_max = 2\n\nstateOrProvinceName = State or Province Name (full name)\nstateOrProvinceName_default = Some-State\n\nlocalityName = Locality Name (eg, city)\n\n0.organizationName = Organization Name (eg, company)\n0.organizationName_default = Internet Widgits Pty Ltd\n\n# we can do this but it is not needed normally :-)\n#1.organizationName = Second Organization Name (eg, company)\n#1.organizationName_default = World Wide Web Pty Ltd\n\norganizationalUnitName = Organizational Unit Name (eg, section)\n#organizationalUnitName_default =\n\ncommonName = Common Name (e.g. server FQDN or YOUR name)\ncommonName_max = 64\n\nemailAddress = Email Address\nemailAddress_max = 64\n\n# SET-ex3 = SET extension number 3\n\n[ req_attributes ]\nchallengePassword = A challenge password\nchallengePassword_min = 4\nchallengePassword_max = 20\n\nunstructuredName = An optional company name\n\n[ usr_cert ]\n\n# These extensions are added when 'ca' signs a request.\n\n# This goes against PKIX guidelines but some CAs do it and some software\n# requires this to avoid interpreting an end user certificate as a CA.\n\nbasicConstraints=CA:FALSE\n\n# Here are some examples of the usage of nsCertType. If it is omitted\n# the certificate can be used for anything *except* object signing.\n\n# This is OK for an SSL server.\n# nsCertType = server\n\n# For an object signing certificate this would be used.\n# nsCertType = objsign\n\n# For normal client use this is typical\n# nsCertType = client, email\n\n# and for everything including object signing:\n# nsCertType = client, email, objsign\n\n# This is typical in keyUsage for a client certificate.\n# keyUsage = nonRepudiation, digitalSignature, keyEncipherment\n\n# This will be displayed in Netscape's comment listbox.\nnsComment = \"OpenSSL Generated Certificate\"\n\n# PKIX recommendations harmless if included in all certificates.\nsubjectKeyIdentifier=hash\nauthorityKeyIdentifier=keyid,issuer\n\n# This stuff is for subjectAltName and issuerAltname.\n# Import the email address.\n# subjectAltName=email:copy\n# An alternative to produce certificates that aren't\n# deprecated according to PKIX.\n# subjectAltName=email:move\n\n# Copy subject details\n# issuerAltName=issuer:copy\n\n#nsCaRevocationUrl = http://www.domain.dom/ca-crl.pem\n#nsBaseUrl\n#nsRevocationUrl\n#nsRenewalUrl\n#nsCaPolicyUrl\n#nsSslServerName\n\n# This is required for TSA certificates.\n# extendedKeyUsage = critical,timeStamping\n\n[ v3_req ]\n\n# Extensions to add to a certificate request\n\nbasicConstraints = CA:FALSE\nkeyUsage = nonRepudiation, digitalSignature, keyEncipherment\n\n\n[ v3_ca ]\n\n\n# Extensions for a typical CA\n\n\n# PKIX recommendation.\n\nsubjectKeyIdentifier=hash\n\nauthorityKeyIdentifier=keyid:always,issuer\n\n# This is what PKIX recommends but some broken software chokes on critical\n# extensions.\n#basicConstraints = critical,CA:true\n# So we do this instead.\nbasicConstraints = CA:true\n\n# Key usage: this is typical for a CA certificate. However since it will\n# prevent it being used as an test self-signed certificate it is best\n# left out by default.\n# keyUsage = cRLSign, keyCertSign\n\n# Some might want this also\n# nsCertType = sslCA, emailCA\n\n# Include email address in subject alt name: another PKIX recommendation\n# subjectAltName=email:copy\n# Copy issuer details\n# issuerAltName=issuer:copy\n\n# DER hex encoding of an extension: beware experts only!\n# obj=DER:02:03\n# Where 'obj' is a standard or added object\n# You can even override a supported extension:\n# basicConstraints= critical, DER:30:03:01:01:FF\n\n[ crl_ext ]\n\n# CRL extensions.\n# Only issuerAltName and authorityKeyIdentifier make any sense in a CRL.\n\n# issuerAltName=issuer:copy\nauthorityKeyIdentifier=keyid:always\n\n[ proxy_cert_ext ]\n# These extensions should be added when creating a proxy certificate\n\n# This goes against PKIX guidelines but some CAs do it and some software\n# requires this to avoid interpreting an end user certificate as a CA.\n\nbasicConstraints=CA:FALSE\n\n# Here are some examples of the usage of nsCertType. If it is omitted\n# the certificate can be used for anything *except* object signing.\n\n# This is OK for an SSL server.\n# nsCertType = server\n\n# For an object signing certificate this would be used.\n# nsCertType = objsign\n\n# For normal client use this is typical\n# nsCertType = client, email\n\n# and for everything including object signing:\n# nsCertType = client, email, objsign\n\n# This is typical in keyUsage for a client certificate.\n# keyUsage = nonRepudiation, digitalSignature, keyEncipherment\n\n# This will be displayed in Netscape's comment listbox.\nnsComment = \"OpenSSL Generated Certificate\"\n\n# PKIX recommendations harmless if included in all certificates.\nsubjectKeyIdentifier=hash\nauthorityKeyIdentifier=keyid,issuer\n\n# This stuff is for subjectAltName and issuerAltname.\n# Import the email address.\n# subjectAltName=email:copy\n# An alternative to produce certificates that aren't\n# deprecated according to PKIX.\n# subjectAltName=email:move\n\n# Copy subject details\n# issuerAltName=issuer:copy\n\n#nsCaRevocationUrl = http://www.domain.dom/ca-crl.pem\n#nsBaseUrl\n#nsRevocationUrl\n#nsRenewalUrl\n#nsCaPolicyUrl\n#nsSslServerName\n\n# This really needs to be in place for it to be a proxy certificate.\nproxyCertInfo=critical,language:id-ppl-anyLanguage,pathlen:3,policy:foo\n\n####################################################################\n[ tsa ]\n\ndefault_tsa = tsa_config1 # the default TSA section\n\n[ tsa_config1 ]\n\n# These are used by the TSA reply generation only.\ndir = ./demoCA # TSA root directory\nserial = $dir/tsaserial # The current serial number (mandatory)\ncrypto_device = builtin # OpenSSL engine to use for signing\nsigner_cert = $dir/tsacert.pem # The TSA signing certificate\n # (optional)\ncerts = $dir/cacert.pem # Certificate chain to include in reply\n # (optional)\nsigner_key = $dir/private/tsakey.pem # The TSA private key (optional)\n\ndefault_policy = tsa_policy1 # Policy if request did not specify it\n # (optional)\nother_policies = tsa_policy2, tsa_policy3 # acceptable policies (optional)\ndigests = md5, sha1 # Acceptable message digests (mandatory)\naccuracy = secs:1, millisecs:500, microsecs:100 # (optional)\nclock_precision_digits = 0 # number of digits after dot. (optional)\nordering = yes # Is ordering defined for timestamps?\n # (optional, default: no)\ntsa_name = yes # Must the TSA name be included in the reply?\n # (optional, default: no)\ness_cert_id_chain = no # Must the ESS cert id chain be included?\n # (optional, default: no)\n","#!/usr/bin/env sh\n# your variables\nCountry=\"CN\"\nState=\"Shanghai\"\nLocation=\"Shanghai\"\nOrganization=\"milvus\"\nOrganizational=\"milvus\"\nCommonName=\"localhost\"\n\necho \"generate ca.key\"\nopenssl genrsa -out ca.key 2048\n\necho \"generate ca.pem\"\nopenssl req -new -x509 -key ca.key -out ca.pem -days 3650 -subj \"/C=$Country/ST=$State/L=$Location/O=$Organization/OU=$Organizational/CN=$CommonName\"\n\necho \"generate server SAN certificate\"\nopenssl genpkey -algorithm RSA -out server.key\nopenssl req -new -nodes -key server.key -out server.csr -days 3650 -subj \"/C=$Country/O=$Organization/OU=$Organizational/CN=$CommonName\" -config ./openssl.cnf -extensions v3_req\nopenssl x509 -req -days 3650 -in server.csr -out server.pem -CA ca.pem -CAkey ca.key -CAcreateserial -extfile ./openssl.cnf -extensions v3_req\n\necho \"generate client SAN certificate\"\nopenssl genpkey -algorithm RSA -out client.key\nopenssl req -new -nodes -key client.key -out client.csr -days 3650 -subj \"/C=$Country/O=$Organization/OU=$Organizational/CN=$CommonName\" -config ./openssl.cnf -extensions v3_req\nopenssl x509 -req -days 3650 -in client.csr -out client.pem -CA ca.pem -CAkey ca.key -CAcreateserial -extfile ./openssl.cnf -extensions v3_req\n\n","chmod +x gen.sh\n./gen.sh\n","openssl genpkey -algorithm RSA -out ca.key\n","openssl req -new -x509 -key ca.key -out ca.pem -days 3650 -subj \"/C=$Country/ST=$State/L=$Location/O=$Organization/OU=$Organizational/CN=$CommonName\"\n","openssl genpkey -algorithm RSA -out server.key\n","openssl req -new -nodes -key server.key -out server.csr -days 3650 -subj \"/C=$Country/O=$Organization/OU=$Organizational/CN=$CommonName\" -config ./openssl.cnf -extensions v3_req\n","openssl x509 -req -days 3650 -in server.csr -out server.pem -CA ca.pem -CAkey ca.key -CAcreateserial -extfile ./openssl.cnf -extensions v3_req\n","tls:\n serverPemPath: /milvus/tls/server.pem\n serverKeyPath: /milvus/tls/server.key\n caPemPath: /milvus/tls/ca.pem\n\ncommon:\n security:\n tlsMode: 1\n","├── docker-compose.yml\n├── milvus.yaml\n└── tls\n ├── server.pem\n ├── server.key\n └── ca.pem\n"," standalone:\n container_name: milvus-standalone\n image: milvusdb/milvus:latest\n command: [\"milvus\", \"run\", \"standalone\"]\n security_opt:\n - seccomp:unconfined\n environment:\n ETCD_ENDPOINTS: etcd:2379\n MINIO_ADDRESS: minio:9000\n volumes:\n - ${DOCKER_VOLUME_DIRECTORY:-.}/volumes/milvus:/var/lib/milvus\n - ${DOCKER_VOLUME_DIRECTORY:-.}/tls:/milvus/tls\n - ${DOCKER_VOLUME_DIRECTORY:-.}/milvus.yaml:/milvus/configs/milvus.yaml\n","sudo docker compose up -d\n","from pymilvus import MilvusClient\n\nclient = MilvusClient(\n uri=\"http://localhost:19530\",\n secure=True,\n server_pem_path=\"path_to/server.pem\",\n server_name=\"localhost\"\n)\n","from pymilvus import MilvusClient\n\nclient = MilvusClient(\n uri=\"http://localhost:19530\",\n secure=True,\n client_pem_path=\"path_to/client.pem\",\n client_key_path=\"path_to/client.key\",\n ca_pem_path=\"path_to/ca.pem\",\n server_name=\"localhost\"\n)\n"],"headingContent":"","anchorList":[{"label":"Encriptação em trânsito","href":"Encryption-in-Transit","type":1,"isActive":false},{"label":"Criar seu próprio certificado","href":"Create-your-own-certificate","type":2,"isActive":false},{"label":"Configurar um servidor Milvus com TLS","href":"Set-up-a-Milvus-server-with-TLS","type":2,"isActive":false},{"label":"Conectar-se ao servidor Milvus com TLS","href":"Connect-to-the-Milvus-server-with-TLS","type":2,"isActive":false}]} \ No newline at end of file +{"codeList":["openssl version\n","sudo apt install openssl\n","mkdir cert && cd cert\ntouch openssl.cnf gen.sh\n","#\n# OpenSSL example configuration file.\n# This is mostly being used for generation of certificate requests.\n#\n\n# This definition stops the following lines choking if HOME isn't\n# defined.\nHOME = .\nRANDFILE = $ENV::HOME/.rnd\n\n# Extra OBJECT IDENTIFIER info:\n#oid_file = $ENV::HOME/.oid\noid_section = new_oids\n\n# To use this configuration file with the \"-extfile\" option of the\n# \"openssl x509\" utility, name here the section containing the\n# X.509v3 extensions to use:\n# extensions = \n# (Alternatively, use a configuration file that has only\n# X.509v3 extensions in its main [= default] section.)\n\n[ new_oids ]\n\n# We can add new OIDs in here for use by 'ca', 'req' and 'ts'.\n# Add a simple OID like this:\n# testoid1=1.2.3.4\n# Or use config file substitution like this:\n# testoid2=${testoid1}.5.6\n\n# Policies used by the TSA examples.\ntsa_policy1 = 1.2.3.4.1\ntsa_policy2 = 1.2.3.4.5.6\ntsa_policy3 = 1.2.3.4.5.7\n\n####################################################################\n[ ca ]\ndefault_ca = CA_default # The default ca section\n\n####################################################################\n[ CA_default ]\n\ndir = ./demoCA # Where everything is kept\ncerts = $dir/certs # Where the issued certs are kept\ncrl_dir = $dir/crl # Where the issued crl are kept\ndatabase = $dir/index.txt # database index file.\n#unique_subject = no # Set to 'no' to allow creation of\n # several ctificates with same subject.\nnew_certs_dir = $dir/newcerts # default place for new certs.\n\ncertificate = $dir/cacert.pem # The CA certificate\nserial = $dir/serial # The current serial number\ncrlnumber = $dir/crlnumber # the current crl number\n # must be commented out to leave a V1 CRL\ncrl = $dir/crl.pem # The current CRL\nprivate_key = $dir/private/cakey.pem# The private key\nRANDFILE = $dir/private/.rand # private random number file\n\nx509_extensions = usr_cert # The extentions to add to the cert\n\n# Comment out the following two lines for the \"traditional\"\n# (and highly broken) format.\nname_opt = ca_default # Subject Name options\ncert_opt = ca_default # Certificate field options\n\n# Extension copying option: use with caution.\ncopy_extensions = copy\n\n# Extensions to add to a CRL. Note: Netscape communicator chokes on V2 CRLs\n# so this is commented out by default to leave a V1 CRL.\n# crlnumber must also be commented out to leave a V1 CRL.\n# crl_extensions = crl_ext\n\ndefault_days = 365 # how long to certify for\ndefault_crl_days= 30 # how long before next CRL\ndefault_md = default # use public key default MD\npreserve = no # keep passed DN ordering\n\n# A few difference way of specifying how similar the request should look\n# For type CA, the listed attributes must be the same, and the optional\n# and supplied fields are just that :-)\npolicy = policy_match\n\n# For the CA policy\n[ policy_match ]\ncountryName = match\nstateOrProvinceName = match\norganizationName = match\norganizationalUnitName = optional\ncommonName = supplied\nemailAddress = optional\n\n# For the 'anything' policy\n# At this point in time, you must list all acceptable 'object'\n# types.\n[ policy_anything ]\ncountryName = optional\nstateOrProvinceName = optional\nlocalityName = optional\norganizationName = optional\norganizationalUnitName = optional\ncommonName = supplied\nemailAddress = optional\n\n####################################################################\n[ req ]\ndefault_bits = 2048\ndefault_keyfile = privkey.pem\ndistinguished_name = req_distinguished_name\nattributes = req_attributes\nx509_extensions = v3_ca # The extentions to add to the self signed cert\n\n# Passwords for private keys if not present they will be prompted for\n# input_password = secret\n# output_password = secret\n\n# This sets a mask for permitted string types. There are several options. \n# default: PrintableString, T61String, BMPString.\n# pkix : PrintableString, BMPString (PKIX recommendation before 2004)\n# utf8only: only UTF8Strings (PKIX recommendation after 2004).\n# nombstr : PrintableString, T61String (no BMPStrings or UTF8Strings).\n# MASK:XXXX a literal mask value.\n# WARNING: ancient versions of Netscape crash on BMPStrings or UTF8Strings.\nstring_mask = utf8only\n\nreq_extensions = v3_req # The extensions to add to a certificate request\n\n[ req_distinguished_name ]\ncountryName = Country Name (2 letter code)\ncountryName_default = AU\ncountryName_min = 2\ncountryName_max = 2\n\nstateOrProvinceName = State or Province Name (full name)\nstateOrProvinceName_default = Some-State\n\nlocalityName = Locality Name (eg, city)\n\n0.organizationName = Organization Name (eg, company)\n0.organizationName_default = Internet Widgits Pty Ltd\n\n# we can do this but it is not needed normally :-)\n#1.organizationName = Second Organization Name (eg, company)\n#1.organizationName_default = World Wide Web Pty Ltd\n\norganizationalUnitName = Organizational Unit Name (eg, section)\n#organizationalUnitName_default =\n\ncommonName = Common Name (e.g. server FQDN or YOUR name)\ncommonName_max = 64\n\nemailAddress = Email Address\nemailAddress_max = 64\n\n# SET-ex3 = SET extension number 3\n\n[ req_attributes ]\nchallengePassword = A challenge password\nchallengePassword_min = 4\nchallengePassword_max = 20\n\nunstructuredName = An optional company name\n\n[ usr_cert ]\n\n# These extensions are added when 'ca' signs a request.\n\n# This goes against PKIX guidelines but some CAs do it and some software\n# requires this to avoid interpreting an end user certificate as a CA.\n\nbasicConstraints=CA:FALSE\n\n# Here are some examples of the usage of nsCertType. If it is omitted\n# the certificate can be used for anything *except* object signing.\n\n# This is OK for an SSL server.\n# nsCertType = server\n\n# For an object signing certificate this would be used.\n# nsCertType = objsign\n\n# For normal client use this is typical\n# nsCertType = client, email\n\n# and for everything including object signing:\n# nsCertType = client, email, objsign\n\n# This is typical in keyUsage for a client certificate.\n# keyUsage = nonRepudiation, digitalSignature, keyEncipherment\n\n# This will be displayed in Netscape's comment listbox.\nnsComment = \"OpenSSL Generated Certificate\"\n\n# PKIX recommendations harmless if included in all certificates.\nsubjectKeyIdentifier=hash\nauthorityKeyIdentifier=keyid,issuer\n\n# This stuff is for subjectAltName and issuerAltname.\n# Import the email address.\n# subjectAltName=email:copy\n# An alternative to produce certificates that aren't\n# deprecated according to PKIX.\n# subjectAltName=email:move\n\n# Copy subject details\n# issuerAltName=issuer:copy\n\n#nsCaRevocationUrl = http://www.domain.dom/ca-crl.pem\n#nsBaseUrl\n#nsRevocationUrl\n#nsRenewalUrl\n#nsCaPolicyUrl\n#nsSslServerName\n\n# This is required for TSA certificates.\n# extendedKeyUsage = critical,timeStamping\n\n[ v3_req ]\n\n# Extensions to add to a certificate request\n\nbasicConstraints = CA:FALSE\nkeyUsage = nonRepudiation, digitalSignature, keyEncipherment\n\n\n[ v3_ca ]\n\n\n# Extensions for a typical CA\n\n\n# PKIX recommendation.\n\nsubjectKeyIdentifier=hash\n\nauthorityKeyIdentifier=keyid:always,issuer\n\n# This is what PKIX recommends but some broken software chokes on critical\n# extensions.\n#basicConstraints = critical,CA:true\n# So we do this instead.\nbasicConstraints = CA:true\n\n# Key usage: this is typical for a CA certificate. However since it will\n# prevent it being used as an test self-signed certificate it is best\n# left out by default.\n# keyUsage = cRLSign, keyCertSign\n\n# Some might want this also\n# nsCertType = sslCA, emailCA\n\n# Include email address in subject alt name: another PKIX recommendation\n# subjectAltName=email:copy\n# Copy issuer details\n# issuerAltName=issuer:copy\n\n# DER hex encoding of an extension: beware experts only!\n# obj=DER:02:03\n# Where 'obj' is a standard or added object\n# You can even override a supported extension:\n# basicConstraints= critical, DER:30:03:01:01:FF\n\n[ crl_ext ]\n\n# CRL extensions.\n# Only issuerAltName and authorityKeyIdentifier make any sense in a CRL.\n\n# issuerAltName=issuer:copy\nauthorityKeyIdentifier=keyid:always\n\n[ proxy_cert_ext ]\n# These extensions should be added when creating a proxy certificate\n\n# This goes against PKIX guidelines but some CAs do it and some software\n# requires this to avoid interpreting an end user certificate as a CA.\n\nbasicConstraints=CA:FALSE\n\n# Here are some examples of the usage of nsCertType. If it is omitted\n# the certificate can be used for anything *except* object signing.\n\n# This is OK for an SSL server.\n# nsCertType = server\n\n# For an object signing certificate this would be used.\n# nsCertType = objsign\n\n# For normal client use this is typical\n# nsCertType = client, email\n\n# and for everything including object signing:\n# nsCertType = client, email, objsign\n\n# This is typical in keyUsage for a client certificate.\n# keyUsage = nonRepudiation, digitalSignature, keyEncipherment\n\n# This will be displayed in Netscape's comment listbox.\nnsComment = \"OpenSSL Generated Certificate\"\n\n# PKIX recommendations harmless if included in all certificates.\nsubjectKeyIdentifier=hash\nauthorityKeyIdentifier=keyid,issuer\n\n# This stuff is for subjectAltName and issuerAltname.\n# Import the email address.\n# subjectAltName=email:copy\n# An alternative to produce certificates that aren't\n# deprecated according to PKIX.\n# subjectAltName=email:move\n\n# Copy subject details\n# issuerAltName=issuer:copy\n\n#nsCaRevocationUrl = http://www.domain.dom/ca-crl.pem\n#nsBaseUrl\n#nsRevocationUrl\n#nsRenewalUrl\n#nsCaPolicyUrl\n#nsSslServerName\n\n# This really needs to be in place for it to be a proxy certificate.\nproxyCertInfo=critical,language:id-ppl-anyLanguage,pathlen:3,policy:foo\n\n####################################################################\n[ tsa ]\n\ndefault_tsa = tsa_config1 # the default TSA section\n\n[ tsa_config1 ]\n\n# These are used by the TSA reply generation only.\ndir = ./demoCA # TSA root directory\nserial = $dir/tsaserial # The current serial number (mandatory)\ncrypto_device = builtin # OpenSSL engine to use for signing\nsigner_cert = $dir/tsacert.pem # The TSA signing certificate\n # (optional)\ncerts = $dir/cacert.pem # Certificate chain to include in reply\n # (optional)\nsigner_key = $dir/private/tsakey.pem # The TSA private key (optional)\n\ndefault_policy = tsa_policy1 # Policy if request did not specify it\n # (optional)\nother_policies = tsa_policy2, tsa_policy3 # acceptable policies (optional)\ndigests = md5, sha1 # Acceptable message digests (mandatory)\naccuracy = secs:1, millisecs:500, microsecs:100 # (optional)\nclock_precision_digits = 0 # number of digits after dot. (optional)\nordering = yes # Is ordering defined for timestamps?\n # (optional, default: no)\ntsa_name = yes # Must the TSA name be included in the reply?\n # (optional, default: no)\ness_cert_id_chain = no # Must the ESS cert id chain be included?\n # (optional, default: no)\n","#!/usr/bin/env sh\n# your variables\nCountry=\"CN\"\nState=\"Shanghai\"\nLocation=\"Shanghai\"\nOrganization=\"milvus\"\nOrganizational=\"milvus\"\nCommonName=\"localhost\"\n\necho \"generate ca.key\"\nopenssl genrsa -out ca.key 2048\n\necho \"generate ca.pem\"\nopenssl req -new -x509 -key ca.key -out ca.pem -days 3650 -subj \"/C=$Country/ST=$State/L=$Location/O=$Organization/OU=$Organizational/CN=$CommonName\"\n\necho \"generate server SAN certificate\"\nopenssl genpkey -algorithm RSA -out server.key\nopenssl req -new -nodes -key server.key -out server.csr -days 3650 -subj \"/C=$Country/O=$Organization/OU=$Organizational/CN=$CommonName\" -config ./openssl.cnf -extensions v3_req\nopenssl x509 -req -days 3650 -in server.csr -out server.pem -CA ca.pem -CAkey ca.key -CAcreateserial -extfile ./openssl.cnf -extensions v3_req\n\necho \"generate client SAN certificate\"\nopenssl genpkey -algorithm RSA -out client.key\nopenssl req -new -nodes -key client.key -out client.csr -days 3650 -subj \"/C=$Country/O=$Organization/OU=$Organizational/CN=$CommonName\" -config ./openssl.cnf -extensions v3_req\nopenssl x509 -req -days 3650 -in client.csr -out client.pem -CA ca.pem -CAkey ca.key -CAcreateserial -extfile ./openssl.cnf -extensions v3_req\n\n","chmod +x gen.sh\n./gen.sh\n","openssl genpkey -algorithm RSA -out ca.key\n","openssl req -new -x509 -key ca.key -out ca.pem -days 3650 -subj \"/C=$Country/ST=$State/L=$Location/O=$Organization/OU=$Organizational/CN=$CommonName\"\n","openssl genpkey -algorithm RSA -out server.key\n","openssl req -new -nodes -key server.key -out server.csr -days 3650 -subj \"/C=$Country/O=$Organization/OU=$Organizational/CN=$CommonName\" -config ./openssl.cnf -extensions v3_req\n","openssl x509 -req -days 3650 -in server.csr -out server.pem -CA ca.pem -CAkey ca.key -CAcreateserial -extfile ./openssl.cnf -extensions v3_req\n","tls:\n serverPemPath: /milvus/tls/server.pem\n serverKeyPath: /milvus/tls/server.key\n caPemPath: /milvus/tls/ca.pem\n\ncommon:\n security:\n tlsMode: 1\n","├── docker-compose.yml\n├── milvus.yaml\n└── tls\n ├── server.pem\n ├── server.key\n └── ca.pem\n"," standalone:\n container_name: milvus-standalone\n image: milvusdb/milvus:latest\n command: [\"milvus\", \"run\", \"standalone\"]\n security_opt:\n - seccomp:unconfined\n environment:\n ETCD_ENDPOINTS: etcd:2379\n MINIO_ADDRESS: minio:9000\n volumes:\n - ${DOCKER_VOLUME_DIRECTORY:-.}/volumes/milvus:/var/lib/milvus\n - ${DOCKER_VOLUME_DIRECTORY:-.}/tls:/milvus/tls\n - ${DOCKER_VOLUME_DIRECTORY:-.}/milvus.yaml:/milvus/configs/milvus.yaml\n","sudo docker compose up -d\n","from pymilvus import MilvusClient\n\nclient = MilvusClient(\n uri=\"https://localhost:19530\",\n secure=True,\n server_pem_path=\"path_to/server.pem\",\n server_name=\"localhost\"\n)\n","from pymilvus import MilvusClient\n\nclient = MilvusClient(\n uri=\"https://localhost:19530\",\n secure=True,\n client_pem_path=\"path_to/client.pem\",\n client_key_path=\"path_to/client.key\",\n ca_pem_path=\"path_to/ca.pem\",\n server_name=\"localhost\"\n)\n","curl --cacert path_to/ca.pem https://localhost:19530/v2/vectordb/collections/list\n","curl --cert path_to/client.pem --key path_to/client.key --cacert path_to/ca.pem https://localhost:19530/v2/vectordb/collections/list\n"],"headingContent":"Encryption in Transit","anchorList":[{"label":"Encriptação em trânsito","href":"Encryption-in-Transit","type":1,"isActive":false},{"label":"Criar seu próprio certificado","href":"Create-your-own-certificate","type":2,"isActive":false},{"label":"Configurar um servidor Milvus com TLS","href":"Set-up-a-Milvus-server-with-TLS","type":2,"isActive":false},{"label":"Conectar-se ao servidor Milvus com TLS","href":"Connect-to-the-Milvus-server-with-TLS","type":2,"isActive":false},{"label":"Ligar ao servidor Milvus RESTful com TLS","href":"Connect-to-the-Milvus-RESTful-server-with-TLS","type":2,"isActive":false}]} \ No newline at end of file diff --git a/localization/v2.4.x/site/pt/adminGuide/tls.md b/localization/v2.4.x/site/pt/adminGuide/tls.md index 06404e85c..aabbe2e0b 100644 --- a/localization/v2.4.x/site/pt/adminGuide/tls.md +++ b/localization/v2.4.x/site/pt/adminGuide/tls.md @@ -19,9 +19,9 @@ summary: Saiba como ativar o proxy TLS no Milvus. >

      O TLS (Transport Layer Security) é um protocolo de encriptação que garante a segurança das comunicações. O proxy Milvus utiliza a autenticação unidirecional e bidirecional TLS.

      -

      Este tópico descreve como ativar o proxy TLS no Milvus.

      +

      Este tópico descreve como ativar o TLS no proxy Milvus para os tráfegos gRPC e RESTful.

      -

      O TLS e a autenticação do utilizador são duas abordagens de segurança distintas. Se tiver ativado a autenticação do utilizador e o TLS no seu sistema Milvus, terá de fornecer um nome de utilizador, uma palavra-passe e caminhos de ficheiros de certificados. Para obter informações sobre como ativar a autenticação do utilizador, consulte Autenticar o acesso do utilizador.

      +

      O TLS e a autenticação do utilizador são duas abordagens de segurança distintas. Se tiver ativado a autenticação do utilizador e o TLS no seu sistema Milvus, terá de fornecer um nome de utilizador, uma palavra-passe e caminhos de ficheiros de certificados. Para obter informações sobre como ativar a autenticação do utilizador, consulte Autenticar o acesso do utilizador.

      Criar seu próprio certificado

    Consulte example_tls1.py e example_tls2.py para obter mais informações.

    +

    Ligar ao servidor Milvus RESTful com TLS

    Para APIs RESTful, pode verificar o tls utilizando o comando curl.

    +

    Conexão TLS unidirecional

    curl --cacert path_to/ca.pem https://localhost:19530/v2/vectordb/collections/list
    +
    +

    Ligação TLS bidirecional

    curl --cert path_to/client.pem --key path_to/client.key --cacert path_to/ca.pem https://localhost:19530/v2/vectordb/collections/list
    +
    diff --git a/localization/v2.4.x/site/pt/adminGuide/upgrade_milvus_cluster-docker.json b/localization/v2.4.x/site/pt/adminGuide/upgrade_milvus_cluster-docker.json index b705248f0..f972166b3 100644 --- a/localization/v2.4.x/site/pt/adminGuide/upgrade_milvus_cluster-docker.json +++ b/localization/v2.4.x/site/pt/adminGuide/upgrade_milvus_cluster-docker.json @@ -1 +1 @@ -{"codeList":["...\nrootcoord:\n container_name: milvus-rootcoord\n image: milvusdb/milvus:v2.4.9\n...\nproxy:\n container_name: milvus-proxy\n image: milvusdb/milvus:v2.4.9\n...\nquerycoord:\n container_name: milvus-querycoord\n image: milvusdb/milvus:v2.4.9 \n...\nquerynode:\n container_name: milvus-querynode\n image: milvusdb/milvus:v2.4.9\n...\nindexcoord:\n container_name: milvus-indexcoord\n image: milvusdb/milvus:v2.4.9\n...\nindexnode:\n container_name: milvus-indexnode\n image: milvusdb/milvus:v2.4.9 \n...\ndatacoord:\n container_name: milvus-datacoord\n image: milvusdb/milvus:v2.4.9 \n...\ndatanode:\n container_name: milvus-datanode\n image: milvusdb/milvus:v2.4.9\n","docker compose down\ndocker compose up -d\n","docker stop \n","# migration.yaml\ncmd:\n # Option: run/backup/rollback\n type: run\n runWithBackup: true\nconfig:\n sourceVersion: 2.1.4 # Specify your milvus version\n targetVersion: 2.4.9\n backupFilePath: /tmp/migration.bak\nmetastore:\n type: etcd\netcd:\n endpoints:\n - milvus-etcd:2379 # Use the etcd container name\n rootPath: by-dev # The root path where data is stored in etcd\n metaSubPath: meta\n kvSubPath: kv\n","# Suppose your docker-compose run with the default milvus network,\n# and you put migration.yaml in the same directory with docker-compose.yaml.\ndocker run --rm -it --network milvus -v $(pwd)/migration.yaml:/milvus/configs/migration.yaml milvus/meta-migration:v2.2.0 /milvus/bin/meta-migration -config=/milvus/configs/migration.yaml\n","Update the milvus image tag in the docker-compose.yaml\ndocker compose down\ndocker compose up -d\n"],"headingContent":"","anchorList":[{"label":"Atualizar o cluster do Milvus com o Docker Compose","href":"Upgrade-Milvus-Cluster-with-Docker-Compose","type":1,"isActive":false},{"label":"Atualizar o Milvus alterando a sua imagem","href":"Upgrade-Milvus-by-changing-its-image","type":2,"isActive":false},{"label":"Migrar os metadados","href":"Migrate-the-metadata","type":2,"isActive":false},{"label":"O que se segue","href":"Whats-next","type":2,"isActive":false}]} \ No newline at end of file +{"codeList":["...\nrootcoord:\n container_name: milvus-rootcoord\n image: milvusdb/milvus:v2.4.13-hotfix\n...\nproxy:\n container_name: milvus-proxy\n image: milvusdb/milvus:v2.4.13-hotfix\n...\nquerycoord:\n container_name: milvus-querycoord\n image: milvusdb/milvus:v2.4.13-hotfix \n...\nquerynode:\n container_name: milvus-querynode\n image: milvusdb/milvus:v2.4.13-hotfix\n...\nindexcoord:\n container_name: milvus-indexcoord\n image: milvusdb/milvus:v2.4.13-hotfix\n...\nindexnode:\n container_name: milvus-indexnode\n image: milvusdb/milvus:v2.4.13-hotfix \n...\ndatacoord:\n container_name: milvus-datacoord\n image: milvusdb/milvus:v2.4.13-hotfix \n...\ndatanode:\n container_name: milvus-datanode\n image: milvusdb/milvus:v2.4.13-hotfix\n","docker compose down\ndocker compose up -d\n","docker stop \n","# migration.yaml\ncmd:\n # Option: run/backup/rollback\n type: run\n runWithBackup: true\nconfig:\n sourceVersion: 2.1.4 # Specify your milvus version\n targetVersion: 2.4.13-hotfix\n backupFilePath: /tmp/migration.bak\nmetastore:\n type: etcd\netcd:\n endpoints:\n - milvus-etcd:2379 # Use the etcd container name\n rootPath: by-dev # The root path where data is stored in etcd\n metaSubPath: meta\n kvSubPath: kv\n","# Suppose your docker-compose run with the default milvus network,\n# and you put migration.yaml in the same directory with docker-compose.yaml.\ndocker run --rm -it --network milvus -v $(pwd)/migration.yaml:/milvus/configs/migration.yaml milvus/meta-migration:v2.2.0 /milvus/bin/meta-migration -config=/milvus/configs/migration.yaml\n","Update the milvus image tag in the docker-compose.yaml\ndocker compose down\ndocker compose up -d\n"],"headingContent":"Upgrade Milvus Cluster with Docker Compose","anchorList":[{"label":"Atualizar o cluster do Milvus com o Docker Compose","href":"Upgrade-Milvus-Cluster-with-Docker-Compose","type":1,"isActive":false},{"label":"Atualizar o Milvus alterando a sua imagem","href":"Upgrade-Milvus-by-changing-its-image","type":2,"isActive":false},{"label":"Migrar os metadados","href":"Migrate-the-metadata","type":2,"isActive":false},{"label":"O que se segue","href":"Whats-next","type":2,"isActive":false}]} \ No newline at end of file diff --git a/localization/v2.4.x/site/pt/adminGuide/upgrade_milvus_cluster-docker.md b/localization/v2.4.x/site/pt/adminGuide/upgrade_milvus_cluster-docker.md index f5b834fab..91df4a50f 100644 --- a/localization/v2.4.x/site/pt/adminGuide/upgrade_milvus_cluster-docker.md +++ b/localization/v2.4.x/site/pt/adminGuide/upgrade_milvus_cluster-docker.md @@ -1,7 +1,7 @@ --- id: upgrade_milvus_cluster-docker.md summary: Saiba como atualizar o cluster Milvus com o Docker Compose. -title: Atualizar o Cluster Milvus com o Docker Compose +title: Atualizar o cluster do Milvus com o Docker Compose ---

    Atualizar o cluster do Milvus com o Docker Compose

    Este tópico descreve como atualizar seu Milvus usando o Docker Compose.

    -

    Em casos normais, você pode atualizar o Milvus alterando sua imagem. No entanto, é necessário migrar os metadados antes de qualquer atualização da v2.1.x para a v2.4.9.

    +

    Em casos normais, você pode atualizar o Milvus alterando sua imagem. No entanto, é necessário migrar os metadados antes de qualquer atualização da v2.1.x para a v2.4.13-hotfix.

    Atualizar o Milvus alterando a sua imagem

    Em casos normais, você pode atualizar o Milvus da seguinte forma:

    +

    Em casos normais, pode atualizar o Milvus da seguinte forma:

    1. Altere as etiquetas de imagem do Milvus em docker-compose.yaml.

      Note que é necessário alterar as etiquetas de imagem para o Proxy, todos os coordenadores e todos os nós de trabalho.

      ...
       rootcoord:
         container_name: milvus-rootcoord
      -  image: milvusdb/milvus:v2.4.9
      +  image: milvusdb/milvus:v2.4.13-hotfix
       ...
       proxy:
         container_name: milvus-proxy
      -  image: milvusdb/milvus:v2.4.9
      +  image: milvusdb/milvus:v2.4.13-hotfix
       ...
       querycoord:
         container_name: milvus-querycoord
      -  image: milvusdb/milvus:v2.4.9  
      +  image: milvusdb/milvus:v2.4.13-hotfix  
       ...
       querynode:
         container_name: milvus-querynode
      -  image: milvusdb/milvus:v2.4.9
      +  image: milvusdb/milvus:v2.4.13-hotfix
       ...
       indexcoord:
         container_name: milvus-indexcoord
      -  image: milvusdb/milvus:v2.4.9
      +  image: milvusdb/milvus:v2.4.13-hotfix
       ...
       indexnode:
         container_name: milvus-indexnode
      -  image: milvusdb/milvus:v2.4.9 
      +  image: milvusdb/milvus:v2.4.13-hotfix 
       ...
       datacoord:
         container_name: milvus-datacoord
      -  image: milvusdb/milvus:v2.4.9   
      +  image: milvusdb/milvus:v2.4.13-hotfix   
       ...
       datanode:
         container_name: milvus-datanode
      -  image: milvusdb/milvus:v2.4.9
      +  image: milvusdb/milvus:v2.4.13-hotfix
       
    2. Execute os seguintes comandos para realizar a atualização.

      docker compose down
      @@ -105,7 +105,7 @@ cmd:
         runWithBackup: true
       config:
         sourceVersion: 2.1.4   # Specify your milvus version
      -  targetVersion: 2.4.9
      +  targetVersion: 2.4.13-hotfix
         backupFilePath: /tmp/migration.bak
       metastore:
         type: etcd
      @@ -148,7 +148,7 @@ docker compose up -d
       
    3. Se estiver pronto para implantar seu cluster em nuvens:
    4. diff --git a/localization/v2.4.x/site/pt/adminGuide/upgrade_milvus_cluster-helm.json b/localization/v2.4.x/site/pt/adminGuide/upgrade_milvus_cluster-helm.json index b9157d283..dc68df51a 100644 --- a/localization/v2.4.x/site/pt/adminGuide/upgrade_milvus_cluster-helm.json +++ b/localization/v2.4.x/site/pt/adminGuide/upgrade_milvus_cluster-helm.json @@ -1 +1 @@ -{"codeList":["$ helm repo update\n$ helm search repo zilliztech/milvus --versions\n","helm repo add zilliztech https://zilliztech.github.io/milvus-helm\nhelm repo update\n# upgrade existing helm release\nhelm upgrade my-release zilliztech/milvus\n","NAME CHART VERSION APP VERSION DESCRIPTION \nzilliztech/milvus 4.1.34 2.4.5 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.33 2.4.4 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.32 2.4.3 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.31 2.4.1 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.30 2.4.1 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.29 2.4.0 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.24 2.3.11 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.23 2.3.10 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.22 2.3.10 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.21 2.3.10 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.20 2.3.10 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.18 2.3.10 Milvus is an open-source vector database built ... \nzilliztech/milvus 4.1.18 2.3.9 Milvus is an open-source vector database built ... \nzilliztech/milvus 4.1.17 2.3.8 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.16 2.3.7 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.15 2.3.5 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.14 2.3.6 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.13 2.3.5 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.12 2.3.5 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.11 2.3.4 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.10 2.3.3 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.9 2.3.3 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.8 2.3.2 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.7 2.3.2 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.6 2.3.1 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.5 2.3.1 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.4 2.3.1 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.3 2.3.1 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.2 2.3.1 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.1 2.3.0 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.0 2.3.0 Milvus is an open-source vector database built ...\n","sh rollingUpdate.sh -n default -i my-release -o update -t 2.4.9 -w 'milvusdb/milvus:v2.4.9'\n","helm repo update\nhelm upgrade my-release zilliztech/milvus --reuse-values --version=4.1.24 # use the helm chart version here\n","NAME NAMESPACE REVISION UPDATED STATUS CHART APP VERSION \nnew-release default 1 2022-11-21 15:41:25.51539 +0800 CST deployed milvus-3.2.18 2.1.4 \n","NAME READY STATUS RESTARTS AGE\nmy-release-etcd-0 1/1 Running 0 21m\nmy-release-etcd-1 1/1 Running 0 21m\nmy-release-etcd-2 1/1 Running 0 21m\nmy-release-milvus-datacoord-664c58798d-fl75s 1/1 Running 0 21m\nmy-release-milvus-datanode-5f75686c55-xfg2r 1/1 Running 0 21m\nmy-release-milvus-indexcoord-5f98b97589-2l48r 1/1 Running 0 21m\nmy-release-milvus-indexnode-857b4ddf98-vmd75 1/1 Running 0 21m\nmy-release-milvus-proxy-6c548f787f-scspp 1/1 Running 0 21m\nmy-release-milvus-querycoord-c454f44cd-dwmwq 1/1 Running 0 21m\nmy-release-milvus-querynode-76bb4946d-lbrz6 1/1 Running 0 21m\nmy-release-milvus-rootcoord-7764c5b686-62msm 1/1 Running 0 21m\nmy-release-minio-0 1/1 Running 0 21m\nmy-release-minio-1 1/1 Running 0 21m\nmy-release-minio-2 1/1 Running 0 21m\nmy-release-minio-3 1/1 Running 0 21m\nmy-release-pulsar-bookie-0 1/1 Running 0 21m\nmy-release-pulsar-bookie-1 1/1 Running 0 21m\nmy-release-pulsar-bookie-2 1/1 Running 0 21m\nmy-release-pulsar-bookie-init-tjxpj 0/1 Completed 0 21m\nmy-release-pulsar-broker-0 1/1 Running 0 21m\nmy-release-pulsar-proxy-0 1/1 Running 0 21m\nmy-release-pulsar-pulsar-init-c8vvc 0/1 Completed 0 21m\nmy-release-pulsar-recovery-0 1/1 Running 0 21m\nmy-release-pulsar-zookeeper-0 1/1 Running 0 21m\nmy-release-pulsar-zookeeper-1 1/1 Running 0 20m\nmy-release-pulsar-zookeeper-2 1/1 Running 0 20m\n","$ kubectl get pods my-release-milvus-proxy-6c548f787f-scspp -o=jsonpath='{$.spec.containers[0].image}'\n# milvusdb/milvus:v2.1.4\n","./migrate.sh -i my-release -s 2.1.4 -t 2.4.9\n","./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.9\n","./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.9 -r by-dev\n","./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.9 -r by-dev -w milvusdb/milvus:v2.4.9\n","./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.9 -w milvusdb/milvus:v2.4.9 -d true\n","./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.9 -r by-dev -o rollback -w milvusdb/milvus:v2.1.1\n./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.9 -r by-dev -o migrate -w milvusdb/milvus:v2.4.9\n"],"headingContent":"","anchorList":[{"label":"Atualizar o Cluster Milvus com o Gráfico Helm","href":"Upgrade-Milvus-Cluster-with-Helm-Chart","type":1,"isActive":false},{"label":"Verificar o gráfico do Milvus Helm","href":"Check-Milvus-Helm-Chart","type":2,"isActive":false},{"label":"Realizar uma atualização contínua","href":"Conduct-a-rolling-upgrade","type":2,"isActive":false},{"label":"Atualizar o Milvus usando o Helm","href":"Upgrade-Milvus-using-Helm","type":2,"isActive":false},{"label":"Migrar os metadados","href":"Migrate-the-metadata","type":2,"isActive":false}]} \ No newline at end of file +{"codeList":["$ helm repo update\n$ helm search repo zilliztech/milvus --versions\n","helm repo add zilliztech https://zilliztech.github.io/milvus-helm\nhelm repo update\n# upgrade existing helm release\nhelm upgrade my-release zilliztech/milvus\n","NAME CHART VERSION APP VERSION DESCRIPTION \nzilliztech/milvus 4.1.34 2.4.5 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.33 2.4.4 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.32 2.4.3 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.31 2.4.1 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.30 2.4.1 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.29 2.4.0 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.24 2.3.11 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.23 2.3.10 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.22 2.3.10 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.21 2.3.10 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.20 2.3.10 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.18 2.3.10 Milvus is an open-source vector database built ... \nzilliztech/milvus 4.1.18 2.3.9 Milvus is an open-source vector database built ... \nzilliztech/milvus 4.1.17 2.3.8 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.16 2.3.7 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.15 2.3.5 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.14 2.3.6 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.13 2.3.5 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.12 2.3.5 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.11 2.3.4 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.10 2.3.3 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.9 2.3.3 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.8 2.3.2 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.7 2.3.2 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.6 2.3.1 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.5 2.3.1 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.4 2.3.1 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.3 2.3.1 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.2 2.3.1 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.1 2.3.0 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.0 2.3.0 Milvus is an open-source vector database built ...\n","sh rollingUpdate.sh -n default -i my-release -o update -t 2.4.13-hotfix -w 'milvusdb/milvus:v2.4.13-hotfix'\n","helm repo update\nhelm upgrade my-release zilliztech/milvus --reuse-values --version=4.1.24 # use the helm chart version here\n","NAME NAMESPACE REVISION UPDATED STATUS CHART APP VERSION \nnew-release default 1 2022-11-21 15:41:25.51539 +0800 CST deployed milvus-3.2.18 2.1.4 \n","NAME READY STATUS RESTARTS AGE\nmy-release-etcd-0 1/1 Running 0 21m\nmy-release-etcd-1 1/1 Running 0 21m\nmy-release-etcd-2 1/1 Running 0 21m\nmy-release-milvus-datacoord-664c58798d-fl75s 1/1 Running 0 21m\nmy-release-milvus-datanode-5f75686c55-xfg2r 1/1 Running 0 21m\nmy-release-milvus-indexcoord-5f98b97589-2l48r 1/1 Running 0 21m\nmy-release-milvus-indexnode-857b4ddf98-vmd75 1/1 Running 0 21m\nmy-release-milvus-proxy-6c548f787f-scspp 1/1 Running 0 21m\nmy-release-milvus-querycoord-c454f44cd-dwmwq 1/1 Running 0 21m\nmy-release-milvus-querynode-76bb4946d-lbrz6 1/1 Running 0 21m\nmy-release-milvus-rootcoord-7764c5b686-62msm 1/1 Running 0 21m\nmy-release-minio-0 1/1 Running 0 21m\nmy-release-minio-1 1/1 Running 0 21m\nmy-release-minio-2 1/1 Running 0 21m\nmy-release-minio-3 1/1 Running 0 21m\nmy-release-pulsar-bookie-0 1/1 Running 0 21m\nmy-release-pulsar-bookie-1 1/1 Running 0 21m\nmy-release-pulsar-bookie-2 1/1 Running 0 21m\nmy-release-pulsar-bookie-init-tjxpj 0/1 Completed 0 21m\nmy-release-pulsar-broker-0 1/1 Running 0 21m\nmy-release-pulsar-proxy-0 1/1 Running 0 21m\nmy-release-pulsar-pulsar-init-c8vvc 0/1 Completed 0 21m\nmy-release-pulsar-recovery-0 1/1 Running 0 21m\nmy-release-pulsar-zookeeper-0 1/1 Running 0 21m\nmy-release-pulsar-zookeeper-1 1/1 Running 0 20m\nmy-release-pulsar-zookeeper-2 1/1 Running 0 20m\n","$ kubectl get pods my-release-milvus-proxy-6c548f787f-scspp -o=jsonpath='{$.spec.containers[0].image}'\n# milvusdb/milvus:v2.1.4\n","./migrate.sh -i my-release -s 2.1.4 -t 2.4.13-hotfix\n","./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.13-hotfix\n","./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.13-hotfix -r by-dev\n","./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.13-hotfix -r by-dev -w milvusdb/milvus:v2.4.13-hotfix\n","./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.13-hotfix -w milvusdb/milvus:v2.4.13-hotfix -d true\n","./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.13-hotfix -r by-dev -o rollback -w milvusdb/milvus:v2.1.1\n./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.13-hotfix -r by-dev -o migrate -w milvusdb/milvus:v2.4.13-hotfix\n"],"headingContent":"Upgrade Milvus Cluster with Helm Chart","anchorList":[{"label":"Atualizar o Cluster Milvus com o Gráfico Helm","href":"Upgrade-Milvus-Cluster-with-Helm-Chart","type":1,"isActive":false},{"label":"Verificar o gráfico do Milvus Helm","href":"Check-Milvus-Helm-Chart","type":2,"isActive":false},{"label":"Realizar uma atualização contínua","href":"Conduct-a-rolling-upgrade","type":2,"isActive":false},{"label":"Atualizar o Milvus usando o Helm","href":"Upgrade-Milvus-using-Helm","type":2,"isActive":false},{"label":"Migrar os metadados","href":"Migrate-the-metadata","type":2,"isActive":false}]} \ No newline at end of file diff --git a/localization/v2.4.x/site/pt/adminGuide/upgrade_milvus_cluster-helm.md b/localization/v2.4.x/site/pt/adminGuide/upgrade_milvus_cluster-helm.md index 8493d7660..c41dd7018 100644 --- a/localization/v2.4.x/site/pt/adminGuide/upgrade_milvus_cluster-helm.md +++ b/localization/v2.4.x/site/pt/adminGuide/upgrade_milvus_cluster-helm.md @@ -5,7 +5,7 @@ order: 1 group: upgrade_milvus_cluster-operator.md related_key: upgrade Milvus Cluster summary: Saiba como atualizar o cluster Milvus com o Helm Chart. -title: Atualizar o Milvus Cluster com a carta do leme +title: Atualizar o Cluster Milvus com o Gráfico Helm ---

      Atualizar o Cluster Milvus com o Gráfico Helm

    Pode escolher o caminho de atualização para o seu Milvus da seguinte forma:

    -
    - [Conduzir uma atualização contínua](#conduct-a-rolling-upgrade) do Milvus v2.2.3 e versões posteriores para a v2.4.9.
    +
    - [Conduzir uma atualização contínua](#conduct-a-rolling-upgrade) do Milvus v2.2.3 e versões posteriores para a v2.4.13-hotfix.

    Realizar uma atualização contínua

      @@ -231,29 +231,29 @@ my-release-pulsar-zookeeper-2

      1. Migrar os metadados

      1. Descarregar o script de migração.
      2. Parar os componentes do Milvus. Qualquer sessão em direto no Milvus etcd pode causar uma falha na migração.
      3. -
      4. Crie uma cópia de segurança para os metadados do Milvus.
      5. +
      6. Criar uma cópia de segurança dos metadados do Milvus.
      7. Migrar os metadados do Milvus.
      8. Inicie os componentes do Milvus com uma nova imagem.
      -

      2. Atualizar o Milvus da v2.1.x para a 2.4.9

      Os comandos a seguir assumem que você atualizou o Milvus da v2.1.4 para a 2.4.9. Altere-os para as versões que atendam às suas necessidades.

      +

      2. Atualizar o Milvus da v2.1.x para a 2.4.13-hotfix

      Os comandos a seguir assumem que você atualizou o Milvus da v2.1.4 para a 2.4.13-hotfix. Altere-os para as versões que atendam às suas necessidades.

      1. Especifique o nome da instância do Milvus, a versão do Milvus de origem e a versão do Milvus de destino.

        -
        ./migrate.sh -i my-release -s 2.1.4 -t 2.4.9
        +
        ./migrate.sh -i my-release -s 2.1.4 -t 2.4.13-hotfix
         
      2. Especifique o espaço de nomes com -n se o seu Milvus não estiver instalado no espaço de nomes K8s predefinido.

        -
        ./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.9
        +
        ./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.13-hotfix
         
      3. Especifique o caminho da raiz com -r se o seu Milvus estiver instalado com o rootpath personalizado.

        -
        ./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.9 -r by-dev
        +
        ./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.13-hotfix -r by-dev
         
      4. Especifique a tag de imagem com -w se o seu Milvus estiver instalado com um image personalizado.

        -
        ./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.9 -r by-dev -w milvusdb/milvus:v2.4.9
        +
        ./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.13-hotfix -r by-dev -w milvusdb/milvus:v2.4.13-hotfix
         
      5. Defina -d true se pretender remover automaticamente o pod de migração após a conclusão da migração.

        -
        ./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.9 -w milvusdb/milvus:v2.4.9 -d true
        +
        ./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.13-hotfix -w milvusdb/milvus:v2.4.13-hotfix -d true
         
      6. Reverter e migrar novamente se a migração falhar.

        -
        ./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.9 -r by-dev -o rollback -w milvusdb/milvus:v2.1.1
        -./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.9 -r by-dev -o migrate -w milvusdb/milvus:v2.4.9
        +
        ./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.13-hotfix -r by-dev -o rollback -w milvusdb/milvus:v2.1.1
        +./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.13-hotfix -r by-dev -o migrate -w milvusdb/milvus:v2.4.13-hotfix
         
      diff --git a/localization/v2.4.x/site/pt/adminGuide/upgrade_milvus_cluster-operator.json b/localization/v2.4.x/site/pt/adminGuide/upgrade_milvus_cluster-operator.json index 873ea2301..9173a5905 100644 --- a/localization/v2.4.x/site/pt/adminGuide/upgrade_milvus_cluster-operator.json +++ b/localization/v2.4.x/site/pt/adminGuide/upgrade_milvus_cluster-operator.json @@ -1 +1 @@ -{"codeList":["helm repo add zilliztech-milvus-operator https://zilliztech.github.io/milvus-operator/\nhelm repo update zilliztech-milvus-operator\nhelm -n milvus-operator upgrade milvus-operator zilliztech-milvus-operator/milvus-operator\n","apiVersion: milvus.io/v1beta1\nkind: Milvus\nmetadata:\n name: my-release\nspec:\n components:\n enableRollingUpdate: true\n imageUpdateMode: rollingUpgrade # Default value, can be omitted\n image: milvusdb/milvus:v2.4.9\n","apiVersion: milvus.io/v1beta1\nkind: Milvus\nmetadata:\n name: my-release\nspec:\n components:\n enableRollingUpdate: true\n imageUpdateMode: all\n image: milvusdb/milvus:v2.4.9\n","apiVersion: milvus.io/v1beta1\nkind: Milvus\nmetadata:\n name: my-release\nspec:\n components:\n enableRollingUpdate: true\n imageUpdateMode: rollingDowngrade\n image: milvusdb/milvus:\n","kubectl apply -f milvusupgrade.yml\n","apiVersion: milvus.io/v1beta1\nkind: Milvus\nmetadata:\n name: my-release\nspec:\n # Omit other fields ...\n components:\n image: milvusdb/milvus:v2.4.9\n","kubectl apply -f milvusupgrade.yaml\n","apiVersion: milvus.io/v1beta1\nkind: MilvusUpgrade\nmetadata:\n name: my-release-upgrade\nspec:\n milvus:\n namespace: default\n name: my-release\n sourceVersion: \"v2.1.4\"\n targetVersion: \"v2.4.9\"\n # below are some omit default values:\n # targetImage: \"milvusdb/milvus:v2.4.9\"\n # toolImage: \"milvusdb/meta-migration:v2.2.0\"\n # operation: upgrade\n # rollbackIfFailed: true\n # backupPVC: \"\"\n # maxRetry: 3\n","$ kubectl apply -f https://github.com/zilliztech/milvus-operator/blob/main/config/samples/beta/milvusupgrade.yaml\n","kubectl describe milvus release-name\n"],"headingContent":"","anchorList":[{"label":"Atualização do Cluster Milvus com o Milvus Operator","href":"Upgrade-Milvus-Cluster-with-Milvus-Operator","type":1,"isActive":false},{"label":"Atualize seu operador Milvus","href":"Upgrade-your-Milvus-operator","type":2,"isActive":false},{"label":"Conduzir uma atualização contínua","href":"Conduct-a-rolling-upgrade","type":2,"isActive":false},{"label":"Atualizar o Milvus alterando sua imagem","href":"Upgrade-Milvus-by-changing-its-image","type":2,"isActive":false},{"label":"Migrar os metadados","href":"Migrate-the-metadata","type":2,"isActive":false}]} \ No newline at end of file +{"codeList":["helm repo add zilliztech-milvus-operator https://zilliztech.github.io/milvus-operator/\nhelm repo update zilliztech-milvus-operator\nhelm -n milvus-operator upgrade milvus-operator zilliztech-milvus-operator/milvus-operator\n","apiVersion: milvus.io/v1beta1\nkind: Milvus\nmetadata:\n name: my-release\nspec:\n components:\n enableRollingUpdate: true\n imageUpdateMode: rollingUpgrade # Default value, can be omitted\n image: milvusdb/milvus:v2.4.13-hotfix\n","apiVersion: milvus.io/v1beta1\nkind: Milvus\nmetadata:\n name: my-release\nspec:\n components:\n enableRollingUpdate: true\n imageUpdateMode: all\n image: milvusdb/milvus:v2.4.13-hotfix\n","apiVersion: milvus.io/v1beta1\nkind: Milvus\nmetadata:\n name: my-release\nspec:\n components:\n enableRollingUpdate: true\n imageUpdateMode: rollingDowngrade\n image: milvusdb/milvus:\n","kubectl apply -f milvusupgrade.yml\n","apiVersion: milvus.io/v1beta1\nkind: Milvus\nmetadata:\n name: my-release\nspec:\n # Omit other fields ...\n components:\n image: milvusdb/milvus:v2.4.13-hotfix\n","kubectl apply -f milvusupgrade.yaml\n","apiVersion: milvus.io/v1beta1\nkind: MilvusUpgrade\nmetadata:\n name: my-release-upgrade\nspec:\n milvus:\n namespace: default\n name: my-release\n sourceVersion: \"v2.1.4\"\n targetVersion: \"v2.4.13-hotfix\"\n # below are some omit default values:\n # targetImage: \"milvusdb/milvus:v2.4.13-hotfix\"\n # toolImage: \"milvusdb/meta-migration:v2.2.0\"\n # operation: upgrade\n # rollbackIfFailed: true\n # backupPVC: \"\"\n # maxRetry: 3\n","$ kubectl apply -f https://github.com/zilliztech/milvus-operator/blob/main/config/samples/beta/milvusupgrade.yaml\n","kubectl describe milvus release-name\n"],"headingContent":"Upgrade Milvus Cluster with Milvus Operator","anchorList":[{"label":"Atualização do Cluster Milvus com o Milvus Operator","href":"Upgrade-Milvus-Cluster-with-Milvus-Operator","type":1,"isActive":false},{"label":"Atualize seu operador Milvus","href":"Upgrade-your-Milvus-operator","type":2,"isActive":false},{"label":"Realizar uma atualização contínua","href":"Conduct-a-rolling-upgrade","type":2,"isActive":false},{"label":"Atualizar o Milvus alterando sua imagem","href":"Upgrade-Milvus-by-changing-its-image","type":2,"isActive":false},{"label":"Migrar os metadados","href":"Migrate-the-metadata","type":2,"isActive":false}]} \ No newline at end of file diff --git a/localization/v2.4.x/site/pt/adminGuide/upgrade_milvus_cluster-operator.md b/localization/v2.4.x/site/pt/adminGuide/upgrade_milvus_cluster-operator.md index e39a09786..30f9da1af 100644 --- a/localization/v2.4.x/site/pt/adminGuide/upgrade_milvus_cluster-operator.md +++ b/localization/v2.4.x/site/pt/adminGuide/upgrade_milvus_cluster-operator.md @@ -5,7 +5,7 @@ order: 0 group: upgrade_milvus_cluster-operator.md related_key: upgrade Milvus Cluster summary: Saiba como atualizar o cluster Milvus com o Milvus Operator. -title: Atualizar o Cluster Milvus com o Milvus Operator +title: Atualização do Cluster Milvus com o Milvus Operator ---

      Atualização do Cluster Milvus com o Milvus Operator

    Depois de ter atualizado o seu Milvus Operator para a versão mais recente, tem as seguintes opções:

    -

    Conduzir uma atualização contínua

    Desde a versão 2.2.3 do Milvus, é possível configurar os coordenadores do Milvus para funcionarem em modo de espera ativa e ativar a funcionalidade de atualização contínua para os mesmos, de modo a que o Milvus possa responder aos pedidos recebidos durante as actualizações do coordenador. Nas versões anteriores, os coordenadores devem ser removidos e depois criados durante uma atualização, o que pode provocar um certo tempo de inatividade do serviço.

    -

    Com base nas capacidades de atualização contínua fornecidas pelo Kubernetes, o operador do Milvus impõe uma atualização ordenada das implementações de acordo com as suas dependências. Além disso, o Milvus implementa um mecanismo para garantir que os seus componentes permanecem compatíveis com os que dependem deles durante a atualização, reduzindo significativamente o potencial tempo de inatividade do serviço.

    +

    Desde a versão 2.2.3 do Milvus, é possível configurar os coordenadores do Milvus para trabalharem em modo de espera ativa e ativar a funcionalidade de atualização contínua para os mesmos, de modo a que o Milvus possa responder aos pedidos recebidos durante as actualizações do coordenador. Nas versões anteriores, os coordenadores devem ser removidos e depois criados durante uma atualização, o que pode provocar um certo tempo de inatividade do serviço.

    +

    Com base nas capacidades de atualização contínua fornecidas pela Kubernetes, o operador do Milvus impõe uma atualização ordenada das implementações de acordo com as suas dependências. Além disso, o Milvus implementa um mecanismo para garantir que os seus componentes permanecem compatíveis com os que dependem deles durante a atualização, reduzindo significativamente o potencial tempo de inatividade do serviço.

    A funcionalidade de atualização contínua está desactivada por defeito. É necessário activá-la explicitamente através de um ficheiro de configuração.

    apiVersion: milvus.io/v1beta1
     kind: Milvus
    @@ -76,7 +76,7 @@ spec:
       components:
         enableRollingUpdate: true
         imageUpdateMode: rollingUpgrade # Default value, can be omitted
    -    image: milvusdb/milvus:v2.4.9
    +    image: milvusdb/milvus:v2.4.13-hotfix
     

    Neste ficheiro de configuração acima, defina spec.components.enableRollingUpdate para true e defina spec.components.image para a versão desejada do Milvus.

    Por predefinição, o Milvus efectua uma atualização contínua para os coordenadores de forma ordenada, substituindo as imagens do pod do coordenador uma após a outra. Para reduzir o tempo de atualização, considere definir spec.components.imageUpdateMode como all para que o Milvus substitua todas as imagens de pod ao mesmo tempo.

    @@ -88,7 +88,7 @@ spec: components: enableRollingUpdate: true imageUpdateMode: all - image: milvusdb/milvus:v2.4.9 + image: milvusdb/milvus:v2.4.13-hotfix
  • Você pode definir spec.components.imageUpdateMode como rollingDowngrade para que o Milvus substitua as imagens do pod coordenador por uma versão inferior.

    apiVersion: milvus.io/v1beta1
    @@ -119,7 +119,7 @@ spec:
               d="M4 9h1v1H4c-1.5 0-3-1.69-3-3.5S2.55 3 4 3h4c1.45 0 3 1.69 3 3.5 0 1.41-.91 2.72-2 3.25V8.59c.58-.45 1-1.27 1-2.09C10 5.22 8.98 4 8 4H4c-.98 0-2 1.22-2 2.5S3 9 4 9zm9-3h-1v1h1c1 0 2 1.22 2 2.5S13.98 12 13 12H9c-.98 0-2-1.22-2-2.5 0-.83.42-1.64 1-2.09V6.25c-1.09.53-2 1.84-2 3.25C6 11.31 7.55 13 9 13h4c1.45 0 3-1.69 3-3.5S14.5 6 13 6z"
             >
           
    -    

    Em casos normais, pode simplesmente atualizar o seu Milvus para a versão mais recente, alterando a sua imagem. No entanto, tenha em atenção que haverá um certo tempo de inatividade quando atualizar o Milvus desta forma.

    +

    Em casos normais, pode simplesmente atualizar o seu Milvus para a versão mais recente, alterando a sua imagem. No entanto, tenha em atenção que haverá um certo tempo de inatividade ao atualizar o Milvus desta forma.

    Crie um ficheiro de configuração da seguinte forma e guarde-o como milvusupgrade.yaml:

    apiVersion: milvus.io/v1beta1
     kind: Milvus
    @@ -128,7 +128,7 @@ metadata:
     spec:
       # Omit other fields ...
       components:
    -   image: milvusdb/milvus:v2.4.9
    +   image: milvusdb/milvus:v2.4.13-hotfix
     

    Em seguida, execute o seguinte para realizar a atualização:

    kubectl apply -f milvusupgrade.yaml
    @@ -148,8 +148,8 @@ spec:
               d="M4 9h1v1H4c-1.5 0-3-1.69-3-3.5S2.55 3 4 3h4c1.45 0 3 1.69 3 3.5 0 1.41-.91 2.72-2 3.25V8.59c.58-.45 1-1.27 1-2.09C10 5.22 8.98 4 8 4H4c-.98 0-2 1.22-2 2.5S3 9 4 9zm9-3h-1v1h1c1 0 2 1.22 2 2.5S13.98 12 13 12H9c-.98 0-2-1.22-2-2.5 0-.83.42-1.64 1-2.09V6.25c-1.09.53-2 1.84-2 3.25C6 11.31 7.55 13 9 13h4c1.45 0 3-1.69 3-3.5S14.5 6 13 6z"
             >
           
    -    

    Desde o Milvus 2.2.0, os metadados são incompatíveis com os das versões anteriores. Os seguintes exemplos pressupõem uma atualização do Milvus 2.1.4 para o Milvus 2.4.9.

    -

    1. Criar um ficheiro .yaml para migração de metadados

    Crie um ficheiro de migração de metadados. Segue-se um exemplo. É necessário especificar name, sourceVersion e targetVersion no ficheiro de configuração. O exemplo seguinte define name para my-release-upgrade, sourceVersion para v2.1.4 e targetVersion para v2.4.9. Isto significa que o seu cluster Milvus será atualizado da v2.1.4 para a v2.4.9.

    +

    Desde o Milvus 2.2.0, os metadados são incompatíveis com os das versões anteriores. Os seguintes exemplos assumem uma atualização do Milvus 2.1.4 para o Milvus 2.4.13-hotfix.

    +

    1. Criar um ficheiro .yaml para migração de metadados

    Crie um ficheiro de migração de metadados. Segue-se um exemplo. É necessário especificar name, sourceVersion e targetVersion no ficheiro de configuração. O exemplo seguinte define name para my-release-upgrade, sourceVersion para v2.1.4 e targetVersion para v2.4.13-hotfix. Isto significa que o seu cluster Milvus será atualizado da v2.1.4 para a v2.4.13-hotfix.

    apiVersion: milvus.io/v1beta1
     kind: MilvusUpgrade
     metadata:
    @@ -159,9 +159,9 @@ spec:
         namespace: default
         name: my-release
       sourceVersion: "v2.1.4"
    -  targetVersion: "v2.4.9"
    +  targetVersion: "v2.4.13-hotfix"
       # below are some omit default values:
    -  # targetImage: "milvusdb/milvus:v2.4.9"
    +  # targetImage: "milvusdb/milvus:v2.4.13-hotfix"
       # toolImage: "milvusdb/meta-migration:v2.2.0"
       # operation: upgrade
       # rollbackIfFailed: true
    diff --git a/localization/v2.4.x/site/pt/adminGuide/upgrade_milvus_standalone-docker.json b/localization/v2.4.x/site/pt/adminGuide/upgrade_milvus_standalone-docker.json
    index beb3d48b9..a8e06ab99 100644
    --- a/localization/v2.4.x/site/pt/adminGuide/upgrade_milvus_standalone-docker.json
    +++ b/localization/v2.4.x/site/pt/adminGuide/upgrade_milvus_standalone-docker.json
    @@ -1 +1 @@
    -{"codeList":["...\nstandalone:\n  container_name: milvus-standalone\n  image: milvusdb/milvus:v2.4.9\n","docker compose down\ndocker compose up -d\n","docker stop \n","# migration.yaml\ncmd:\n  # Option: run/backup/rollback\n  type: run\n  runWithBackup: true\nconfig:\n  sourceVersion: 2.1.4   # Specify your milvus version\n  targetVersion: 2.4.9\n  backupFilePath: /tmp/migration.bak\nmetastore:\n  type: etcd\netcd:\n  endpoints:\n    - milvus-etcd:2379  # Use the etcd container name\n  rootPath: by-dev # The root path where data is stored in etcd\n  metaSubPath: meta\n  kvSubPath: kv\n","# Suppose your docker-compose run with the default milvus network,\n# and you put migration.yaml in the same directory with docker-compose.yaml.\ndocker run --rm -it --network milvus -v $(pwd)/migration.yaml:/milvus/configs/migration.yaml milvusdb/meta-migration:v2.2.0 /milvus/bin/meta-migration -config=/milvus/configs/migration.yaml\n","// Run the following only after update the milvus image tag in the docker-compose.yaml\ndocker compose down\ndocker compose up -d\n"],"headingContent":"","anchorList":[{"label":"Atualizar o Milvus Standalone com o Docker Compose","href":"Upgrade-Milvus-Standalone-with-Docker-Compose","type":1,"isActive":false},{"label":"Atualizar o Milvus alterando a sua imagem","href":"Upgrade-Milvus-by-changing-its-image","type":2,"isActive":false},{"label":"Migrar os metadados","href":"Migrate-the-metadata","type":2,"isActive":false},{"label":"O que se segue","href":"Whats-next","type":2,"isActive":false}]}
    \ No newline at end of file
    +{"codeList":["...\nstandalone:\n  container_name: milvus-standalone\n  image: milvusdb/milvus:v2.4.13-hotfix\n","docker compose down\ndocker compose up -d\n","docker stop \n","# migration.yaml\ncmd:\n  # Option: run/backup/rollback\n  type: run\n  runWithBackup: true\nconfig:\n  sourceVersion: 2.1.4   # Specify your milvus version\n  targetVersion: 2.4.13-hotfix\n  backupFilePath: /tmp/migration.bak\nmetastore:\n  type: etcd\netcd:\n  endpoints:\n    - milvus-etcd:2379  # Use the etcd container name\n  rootPath: by-dev # The root path where data is stored in etcd\n  metaSubPath: meta\n  kvSubPath: kv\n","# Suppose your docker-compose run with the default milvus network,\n# and you put migration.yaml in the same directory with docker-compose.yaml.\ndocker run --rm -it --network milvus -v $(pwd)/migration.yaml:/milvus/configs/migration.yaml milvusdb/meta-migration:v2.2.0 /milvus/bin/meta-migration -config=/milvus/configs/migration.yaml\n","// Run the following only after update the milvus image tag in the docker-compose.yaml\ndocker compose down\ndocker compose up -d\n"],"headingContent":"Upgrade Milvus Standalone with Docker Compose","anchorList":[{"label":"Atualizar o Milvus Standalone com o Docker Compose","href":"Upgrade-Milvus-Standalone-with-Docker-Compose","type":1,"isActive":false},{"label":"Atualizar o Milvus alterando a sua imagem","href":"Upgrade-Milvus-by-changing-its-image","type":2,"isActive":false},{"label":"Migrar os metadados","href":"Migrate-the-metadata","type":2,"isActive":false},{"label":"O que se segue","href":"Whats-next","type":2,"isActive":false}]}
    \ No newline at end of file
    diff --git a/localization/v2.4.x/site/pt/adminGuide/upgrade_milvus_standalone-docker.md b/localization/v2.4.x/site/pt/adminGuide/upgrade_milvus_standalone-docker.md
    index b0f20fa99..bb7e2b18a 100644
    --- a/localization/v2.4.x/site/pt/adminGuide/upgrade_milvus_standalone-docker.md
    +++ b/localization/v2.4.x/site/pt/adminGuide/upgrade_milvus_standalone-docker.md
    @@ -24,9 +24,9 @@ title: Atualizar o Milvus Standalone com o Docker Compose
             >
           
         

    Este tópico descreve como atualizar seu Milvus usando o Docker Compose.

    -

    Em casos normais, você pode atualizar o Milvus alterando sua imagem. No entanto, é necessário migrar os metadados antes de qualquer atualização da v2.1.x para a v2.4.9.

    +

    Em casos normais, você pode atualizar o Milvus alterando sua imagem. No entanto, é necessário migrar os metadados antes de qualquer atualização da v2.1.x para a v2.4.13-hotfix.

    -

    Devido a questões de segurança, o Milvus actualiza o seu MinIO para RELEASE.2023-03-20T20-16-18Z com o lançamento da v2.2.5. Antes de qualquer atualização de versões anteriores do Milvus Standalone instaladas usando o Docker Compose, você deve criar uma implantação do MinIO de nó único e unidade única e migrar as configurações e o conteúdo existentes do MinIO para a nova implantação. Para obter detalhes, consulte este guia.

    +

    Devido a preocupações de segurança, o Milvus actualiza o seu MinIO para RELEASE.2023-03-20T20-16-18Z com o lançamento da v2.2.5. Antes de qualquer atualização de versões anteriores do Milvus Standalone instaladas usando o Docker Compose, você deve criar uma implantação MinIO de nó único e unidade única e migrar as configurações e o conteúdo existentes do MinIO para a nova implantação. Para obter detalhes, consulte este guia.

    Atualizar o Milvus alterando a sua imagem

  • Execute os seguintes comandos para realizar a atualização.

    docker compose down
    @@ -83,7 +83,7 @@ cmd:
       runWithBackup: true
     config:
       sourceVersion: 2.1.4   # Specify your milvus version
    -  targetVersion: 2.4.9
    +  targetVersion: 2.4.13-hotfix
       backupFilePath: /tmp/migration.bak
     metastore:
       type: etcd
    @@ -126,7 +126,7 @@ docker compose up -d
     
  • Se estiver pronto para implantar seu cluster em nuvens:
  • diff --git a/localization/v2.4.x/site/pt/adminGuide/upgrade_milvus_standalone-helm.json b/localization/v2.4.x/site/pt/adminGuide/upgrade_milvus_standalone-helm.json index d3629739c..3b06bc6da 100644 --- a/localization/v2.4.x/site/pt/adminGuide/upgrade_milvus_standalone-helm.json +++ b/localization/v2.4.x/site/pt/adminGuide/upgrade_milvus_standalone-helm.json @@ -1 +1 @@ -{"codeList":["$ helm repo update\n$ helm search repo zilliztech/milvus --versions\n","helm repo add zilliztech https://zilliztech.github.io/milvus-helm\nhelm repo update\n# upgrade existing helm release\nhelm upgrade my-release zilliztech/milvus\n","NAME CHART VERSION APP VERSION DESCRIPTION \nzilliztech/milvus 4.1.34 2.4.5 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.33 2.4.4 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.32 2.4.3 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.31 2.4.1 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.30 2.4.1 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.29 2.4.0 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.24 2.3.11 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.23 2.3.10 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.22 2.3.10 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.21 2.3.10 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.20 2.3.10 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.18 2.3.10 Milvus is an open-source vector database built ... \nzilliztech/milvus 4.1.18 2.3.9 Milvus is an open-source vector database built ... \nzilliztech/milvus 4.1.17 2.3.8 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.16 2.3.7 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.15 2.3.5 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.14 2.3.6 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.13 2.3.5 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.12 2.3.5 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.11 2.3.4 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.10 2.3.3 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.9 2.3.3 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.8 2.3.2 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.7 2.3.2 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.6 2.3.1 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.5 2.3.1 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.4 2.3.1 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.3 2.3.1 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.2 2.3.1 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.1 2.3.0 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.0 2.3.0 Milvus is an open-source vector database built ...\n","sh rollingUpdate.sh -n default -i my-release -o update -t 2.4.9 -w 'milvusdb/milvus:v2.4.9'\n","helm repo update\nhelm upgrade my-release milvus/milvus --reuse-values --version=4.1.24 # use the helm chart version here\n","NAME NAMESPACE REVISION UPDATED STATUS CHART APP VERSION \nmy-release default 1 2022-11-21 15:41:25.51539 +0800 CST deployed milvus-3.2.18 2.1.4\n","NAME READY STATUS RESTARTS AGE\nmy-release-etcd-0 1/1 Running 0 84s\nmy-release-milvus-standalone-75c599fffc-6rwlj 1/1 Running 0 84s\nmy-release-minio-744dd9586f-qngzv 1/1 Running 0 84s\n","$ kubectl get pods my-release-milvus-proxy-6c548f787f-scspp -o=jsonpath='{$.spec.containers[0].image}'\n# milvusdb/milvus:v2.1.4\n","./migrate.sh -i my-release -s 2.1.4 -t 2.4.9\n","./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.9\n","./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.9 -r by-dev\n","./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.9 -r by-dev -w milvusdb/milvus:v2.4.9\n","./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.9 -w milvusdb/milvus:v2.4.9 -d true\n","./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.9 -r by-dev -o rollback -w milvusdb/milvus:v2.1.1\n./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.9 -r by-dev -o migrate -w milvusdb/milvus:v2.4.9\n"],"headingContent":"","anchorList":[{"label":"Atualizar o Milvus Standalone com o Helm Chart","href":"Upgrade-Milvus-Standalone-with-Helm-Chart","type":1,"isActive":false},{"label":"Verificar a versão do Milvus","href":"Check-the-Milvus-version","type":2,"isActive":false},{"label":"Realizar uma atualização contínua","href":"Conduct-a-rolling-upgrade","type":2,"isActive":false},{"label":"Atualizar o Milvus usando o Helm","href":"Upgrade-Milvus-using-Helm","type":2,"isActive":false},{"label":"Migrar os metadados","href":"Migrate-the-metadata","type":2,"isActive":false}]} \ No newline at end of file +{"codeList":["$ helm repo update\n$ helm search repo zilliztech/milvus --versions\n","helm repo add zilliztech https://zilliztech.github.io/milvus-helm\nhelm repo update\n# upgrade existing helm release\nhelm upgrade my-release zilliztech/milvus\n","NAME CHART VERSION APP VERSION DESCRIPTION \nzilliztech/milvus 4.1.34 2.4.5 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.33 2.4.4 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.32 2.4.3 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.31 2.4.1 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.30 2.4.1 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.29 2.4.0 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.24 2.3.11 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.23 2.3.10 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.22 2.3.10 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.21 2.3.10 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.20 2.3.10 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.18 2.3.10 Milvus is an open-source vector database built ... \nzilliztech/milvus 4.1.18 2.3.9 Milvus is an open-source vector database built ... \nzilliztech/milvus 4.1.17 2.3.8 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.16 2.3.7 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.15 2.3.5 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.14 2.3.6 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.13 2.3.5 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.12 2.3.5 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.11 2.3.4 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.10 2.3.3 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.9 2.3.3 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.8 2.3.2 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.7 2.3.2 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.6 2.3.1 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.5 2.3.1 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.4 2.3.1 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.3 2.3.1 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.2 2.3.1 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.1 2.3.0 Milvus is an open-source vector database built ...\nzilliztech/milvus 4.1.0 2.3.0 Milvus is an open-source vector database built ...\n","sh rollingUpdate.sh -n default -i my-release -o update -t 2.4.13-hotfix -w 'milvusdb/milvus:v2.4.13-hotfix'\n","helm repo update\nhelm upgrade my-release milvus/milvus --reuse-values --version=4.1.24 # use the helm chart version here\n","NAME NAMESPACE REVISION UPDATED STATUS CHART APP VERSION \nmy-release default 1 2022-11-21 15:41:25.51539 +0800 CST deployed milvus-3.2.18 2.1.4\n","NAME READY STATUS RESTARTS AGE\nmy-release-etcd-0 1/1 Running 0 84s\nmy-release-milvus-standalone-75c599fffc-6rwlj 1/1 Running 0 84s\nmy-release-minio-744dd9586f-qngzv 1/1 Running 0 84s\n","$ kubectl get pods my-release-milvus-proxy-6c548f787f-scspp -o=jsonpath='{$.spec.containers[0].image}'\n# milvusdb/milvus:v2.1.4\n","./migrate.sh -i my-release -s 2.1.4 -t 2.4.13-hotfix\n","./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.13-hotfix\n","./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.13-hotfix -r by-dev\n","./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.13-hotfix -r by-dev -w milvusdb/milvus:v2.4.13-hotfix\n","./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.13-hotfix -w milvusdb/milvus:v2.4.13-hotfix -d true\n","./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.13-hotfix -r by-dev -o rollback -w milvusdb/milvus:v2.1.1\n./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.13-hotfix -r by-dev -o migrate -w milvusdb/milvus:v2.4.13-hotfix\n"],"headingContent":"Upgrade Milvus Standalone with Helm Chart","anchorList":[{"label":"Atualizar o Milvus Standalone com o Helm Chart","href":"Upgrade-Milvus-Standalone-with-Helm-Chart","type":1,"isActive":false},{"label":"Verificar a versão do Milvus","href":"Check-the-Milvus-version","type":2,"isActive":false},{"label":"Realizar uma atualização contínua","href":"Conduct-a-rolling-upgrade","type":2,"isActive":false},{"label":"Atualizar o Milvus usando o Helm","href":"Upgrade-Milvus-using-Helm","type":2,"isActive":false},{"label":"Migrar os metadados","href":"Migrate-the-metadata","type":2,"isActive":false}]} \ No newline at end of file diff --git a/localization/v2.4.x/site/pt/adminGuide/upgrade_milvus_standalone-helm.md b/localization/v2.4.x/site/pt/adminGuide/upgrade_milvus_standalone-helm.md index dbafccfe2..f9905f7ed 100644 --- a/localization/v2.4.x/site/pt/adminGuide/upgrade_milvus_standalone-helm.md +++ b/localization/v2.4.x/site/pt/adminGuide/upgrade_milvus_standalone-helm.md @@ -5,7 +5,7 @@ order: 1 group: upgrade_milvus_standalone-operator.md related_key: upgrade Milvus Standalone summary: Saiba como atualizar o Milvus standalone com o Helm Chart. -title: Atualizar Milvus Standalone com Helm Chart +title: Atualizar o Milvus Standalone com o Helm Chart ---

    Atualizar o Milvus Standalone com o Helm Chart

    -

    O repositório de Milvus Helm Charts em https://milvus-io.github.io/milvus-helm/ foi arquivado e pode obter mais actualizações em https://zilliztech.github.io/milvus-helm/ da seguinte forma:

    +

    O repositório do Milvus Helm Charts em https://milvus-io.github.io/milvus-helm/ foi arquivado e pode obter mais actualizações em https://zilliztech.github.io/milvus-helm/ da seguinte forma:

    helm repo add zilliztech https://zilliztech.github.io/milvus-helm
     helm repo update
     # upgrade existing helm release
    @@ -86,10 +86,10 @@ zilliztech/milvus       4.1.1           2.3.0                   Milvus is an ope
     zilliztech/milvus       4.1.0           2.3.0                   Milvus is an open-source vector database built ...
     

    Pode escolher o caminho de atualização para o seu Milvus da seguinte forma:

    -
    - [Conduzir uma atualização contínua](#conduct-a-rolling-upgrade) do Milvus v2.2.3 e versões posteriores para a v2.4.9.
    +
    - [Conduzir uma atualização contínua](#conduct-a-rolling-upgrade) do Milvus v2.2.3 e versões posteriores para a v2.4.13-hotfix.

    Realizar uma atualização contínua

      @@ -213,25 +213,25 @@ my-release-minio-744dd9586f-qngzv 1/1 Running 0 84s
    1. Migrar os metadados do Milvus.
    2. Inicie os componentes do Milvus com uma nova imagem.
    -

    2. Atualizar o Milvus da v2.1.x para a 2.4.9

    Os comandos a seguir assumem que você atualizou o Milvus da v2.1.4 para a 2.4.9. Altere-os para as versões que atendam às suas necessidades.

    +

    2. Atualizar o Milvus da v2.1.x para a 2.4.13-hotfix

    Os comandos a seguir assumem que você atualizou o Milvus da v2.1.4 para a 2.4.13-hotfix. Altere-os para as versões que atendam às suas necessidades.

      -
    1. Especifique o nome da instância do Milvus, a versão de origem do Milvus e a versão de destino do Milvus.

      -
      ./migrate.sh -i my-release -s 2.1.4 -t 2.4.9
      +
    2. Especifique o nome da instância do Milvus, a versão do Milvus de origem e a versão do Milvus de destino.

      +
      ./migrate.sh -i my-release -s 2.1.4 -t 2.4.13-hotfix
       
    3. Especifique o espaço de nomes com -n se o seu Milvus não estiver instalado no espaço de nomes K8s predefinido.

      -
      ./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.9
      +
      ./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.13-hotfix
       
    4. Especifique o caminho da raiz com -r se o seu Milvus estiver instalado com o rootpath personalizado.

      -
      ./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.9 -r by-dev
      +
      ./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.13-hotfix -r by-dev
       
    5. Especifique a tag de imagem com -w se o seu Milvus estiver instalado com um image personalizado.

      -
      ./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.9 -r by-dev -w milvusdb/milvus:v2.4.9
      +
      ./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.13-hotfix -r by-dev -w milvusdb/milvus:v2.4.13-hotfix
       
    6. Defina -d true se pretender remover automaticamente o pod de migração após a conclusão da migração.

      -
      ./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.9 -w milvusdb/milvus:v2.4.9 -d true
      +
      ./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.13-hotfix -w milvusdb/milvus:v2.4.13-hotfix -d true
       
    7. Reverter e migrar novamente se a migração falhar.

      -
      ./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.9 -r by-dev -o rollback -w milvusdb/milvus:v2.1.1
      -./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.9 -r by-dev -o migrate -w milvusdb/milvus:v2.4.9
      +
      ./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.13-hotfix -r by-dev -o rollback -w milvusdb/milvus:v2.1.1
      +./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.13-hotfix -r by-dev -o migrate -w milvusdb/milvus:v2.4.13-hotfix
       
    diff --git a/localization/v2.4.x/site/pt/adminGuide/upgrade_milvus_standalone-operator.json b/localization/v2.4.x/site/pt/adminGuide/upgrade_milvus_standalone-operator.json index 6abfdd6f5..deb979615 100644 --- a/localization/v2.4.x/site/pt/adminGuide/upgrade_milvus_standalone-operator.json +++ b/localization/v2.4.x/site/pt/adminGuide/upgrade_milvus_standalone-operator.json @@ -1 +1 @@ -{"codeList":["helm repo add zilliztech-milvus-operator https://zilliztech.github.io/milvus-operator/\nhelm repo update zilliztech-milvus-operator\nhelm -n milvus-operator upgrade milvus-operator zilliztech-milvus-operator/milvus-operator\n","apiVersion: milvus.io/v1beta1\nkind: Milvus\nmetadata:\n name: my-release\nspec:\n components:\n enableRollingUpdate: true\n imageUpdateMode: rollingUpgrade # Default value, can be omitted\n image: milvusdb/milvus:v2.4.9\n","apiVersion: milvus.io/v1beta1\nkind: Milvus\nmetadata:\n name: my-release\nspec:\n components:\n enableRollingUpdate: true\n imageUpdateMode: all\n image: milvusdb/milvus:v2.4.9\n","apiVersion: milvus.io/v1beta1\nkind: Milvus\nmetadata:\n name: my-release\nspec:\n components:\n enableRollingUpdate: true\n imageUpdateMode: rollingDowngrade\n image: milvusdb/milvus:\n","kubectl apply -f milvusupgrade.yml\n","apiVersion: milvus.io/v1beta1\nkind: Milvus\nmetadata:\n name: my-release\nlabels:\n app: milvus\nspec:\n # Omit other fields ...\n components:\n image: milvusdb/milvus:v2.4.9\n","kubectl apply -f milvusupgrade.yaml\n","apiVersion: milvus.io/v1beta1\nkind: MilvusUpgrade\nmetadata:\n name: my-release-upgrade\nspec:\n milvus:\n namespace: default\n name: my-release\n sourceVersion: \"v2.1.4\"\n targetVersion: \"v2.4.9\"\n # below are some omit default values:\n # targetImage: \"milvusdb/milvus:v2.4.9\"\n # toolImage: \"milvusdb/meta-migration:v2.2.0\"\n # operation: upgrade\n # rollbackIfFailed: true\n # backupPVC: \"\"\n # maxRetry: 3\n","$ kubectl apply -f https://raw.githubusercontent.com/zilliztech/milvus-operator/main/config/samples/milvusupgrade.yaml\n","kubectl describe milvus release-name\n"],"headingContent":"","anchorList":[{"label":"Atualizar o Milvus Standalone com o Milvus Operator","href":"Upgrade-Milvus-Standalone-with-Milvus-Operator","type":1,"isActive":false},{"label":"Atualizar o seu Milvus Operator","href":"Upgrade-your-Milvus-operator","type":2,"isActive":false},{"label":"Conduzir uma atualização contínua","href":"Conduct-a-rolling-upgrade","type":2,"isActive":false},{"label":"Atualizar o Milvus alterando sua imagem","href":"Upgrade-Milvus-by-changing-its-image","type":2,"isActive":false},{"label":"Migrar os metadados","href":"Migrate-the-metadata","type":2,"isActive":false}]} \ No newline at end of file +{"codeList":["helm repo add zilliztech-milvus-operator https://zilliztech.github.io/milvus-operator/\nhelm repo update zilliztech-milvus-operator\nhelm -n milvus-operator upgrade milvus-operator zilliztech-milvus-operator/milvus-operator\n","apiVersion: milvus.io/v1beta1\nkind: Milvus\nmetadata:\n name: my-release\nspec:\n components:\n enableRollingUpdate: true\n imageUpdateMode: rollingUpgrade # Default value, can be omitted\n image: milvusdb/milvus:v2.4.13-hotfix\n","apiVersion: milvus.io/v1beta1\nkind: Milvus\nmetadata:\n name: my-release\nspec:\n components:\n enableRollingUpdate: true\n imageUpdateMode: all\n image: milvusdb/milvus:v2.4.13-hotfix\n","apiVersion: milvus.io/v1beta1\nkind: Milvus\nmetadata:\n name: my-release\nspec:\n components:\n enableRollingUpdate: true\n imageUpdateMode: rollingDowngrade\n image: milvusdb/milvus:\n","kubectl apply -f milvusupgrade.yml\n","apiVersion: milvus.io/v1beta1\nkind: Milvus\nmetadata:\n name: my-release\nlabels:\n app: milvus\nspec:\n # Omit other fields ...\n components:\n image: milvusdb/milvus:v2.4.13-hotfix\n","kubectl apply -f milvusupgrade.yaml\n","apiVersion: milvus.io/v1beta1\nkind: MilvusUpgrade\nmetadata:\n name: my-release-upgrade\nspec:\n milvus:\n namespace: default\n name: my-release\n sourceVersion: \"v2.1.4\"\n targetVersion: \"v2.4.13-hotfix\"\n # below are some omit default values:\n # targetImage: \"milvusdb/milvus:v2.4.13-hotfix\"\n # toolImage: \"milvusdb/meta-migration:v2.2.0\"\n # operation: upgrade\n # rollbackIfFailed: true\n # backupPVC: \"\"\n # maxRetry: 3\n","$ kubectl apply -f https://raw.githubusercontent.com/zilliztech/milvus-operator/main/config/samples/milvusupgrade.yaml\n","kubectl describe milvus release-name\n"],"headingContent":"Upgrade Milvus Standalone with Milvus Operator","anchorList":[{"label":"Atualizar o Milvus Standalone com o Milvus Operator","href":"Upgrade-Milvus-Standalone-with-Milvus-Operator","type":1,"isActive":false},{"label":"Atualizar o seu Milvus Operator","href":"Upgrade-your-Milvus-operator","type":2,"isActive":false},{"label":"Realizar uma atualização contínua","href":"Conduct-a-rolling-upgrade","type":2,"isActive":false},{"label":"Atualizar o Milvus alterando sua imagem","href":"Upgrade-Milvus-by-changing-its-image","type":2,"isActive":false},{"label":"Migrar os metadados","href":"Migrate-the-metadata","type":2,"isActive":false}]} \ No newline at end of file diff --git a/localization/v2.4.x/site/pt/adminGuide/upgrade_milvus_standalone-operator.md b/localization/v2.4.x/site/pt/adminGuide/upgrade_milvus_standalone-operator.md index ef4dadb89..ac3f648ac 100644 --- a/localization/v2.4.x/site/pt/adminGuide/upgrade_milvus_standalone-operator.md +++ b/localization/v2.4.x/site/pt/adminGuide/upgrade_milvus_standalone-operator.md @@ -5,7 +5,7 @@ order: 0 group: upgrade_milvus_standalone-operator.md related_key: upgrade Milvus Standalone summary: Saiba como atualizar o Milvus standalone com o Milvus operator. -title: Atualizar Milvus Standalone com Milvus Operator +title: Atualizar o Milvus Standalone com o Milvus Operator ---

    Atualizar o Milvus Standalone com o Milvus Operator

    Depois de ter atualizado o seu Milvus operator para a versão mais recente, tem as seguintes opções:

    -

    Conduzir uma atualização contínua

    Desde a versão 2.2.3 do Milvus, é possível configurar os coordenadores do Milvus para funcionarem em modo de espera ativa e ativar a funcionalidade de atualização contínua para os mesmos, de modo a que o Milvus possa responder aos pedidos recebidos durante as actualizações do coordenador. Nas versões anteriores, os coordenadores devem ser removidos e depois criados durante uma atualização, o que pode provocar um certo tempo de inatividade do serviço.

    +

    Desde a versão 2.2.3 do Milvus, é possível configurar os coordenadores do Milvus para trabalharem em modo de espera ativa e ativar a funcionalidade de atualização contínua para os mesmos, de modo a que o Milvus possa responder aos pedidos recebidos durante as actualizações do coordenador. Nas versões anteriores, os coordenadores devem ser removidos e depois criados durante uma atualização, o que pode provocar um certo tempo de inatividade do serviço.

    Com base nas capacidades de atualização contínua fornecidas pelo Kubernetes, o operador do Milvus impõe uma atualização ordenada das implementações de acordo com as suas dependências. Além disso, o Milvus implementa um mecanismo para garantir que os seus componentes permanecem compatíveis com os que dependem deles durante a atualização, reduzindo significativamente o potencial tempo de inatividade do serviço.

    A funcionalidade de atualização contínua está desactivada por defeito. É necessário activá-la explicitamente através de um ficheiro de configuração.

    apiVersion: milvus.io/v1beta1
    @@ -76,7 +76,7 @@ spec:
       components:
         enableRollingUpdate: true
         imageUpdateMode: rollingUpgrade # Default value, can be omitted
    -    image: milvusdb/milvus:v2.4.9
    +    image: milvusdb/milvus:v2.4.13-hotfix
     

    Neste ficheiro de configuração acima, defina spec.components.enableRollingUpdate para true e defina spec.components.image para a versão desejada do Milvus.

    Por predefinição, o Milvus efectua uma atualização contínua para os coordenadores de forma ordenada, substituindo as imagens do pod do coordenador uma após a outra. Para reduzir o tempo de atualização, considere definir spec.components.imageUpdateMode como all para que o Milvus substitua todas as imagens de pod ao mesmo tempo.

    @@ -88,7 +88,7 @@ spec: components: enableRollingUpdate: true imageUpdateMode: all - image: milvusdb/milvus:v2.4.9 + image: milvusdb/milvus:v2.4.13-hotfix

    Você pode definir spec.components.imageUpdateMode como rollingDowngrade para que o Milvus substitua as imagens do pod coordenador por uma versão inferior.

    apiVersion: milvus.io/v1beta1
    @@ -119,7 +119,7 @@ spec:
               d="M4 9h1v1H4c-1.5 0-3-1.69-3-3.5S2.55 3 4 3h4c1.45 0 3 1.69 3 3.5 0 1.41-.91 2.72-2 3.25V8.59c.58-.45 1-1.27 1-2.09C10 5.22 8.98 4 8 4H4c-.98 0-2 1.22-2 2.5S3 9 4 9zm9-3h-1v1h1c1 0 2 1.22 2 2.5S13.98 12 13 12H9c-.98 0-2-1.22-2-2.5 0-.83.42-1.64 1-2.09V6.25c-1.09.53-2 1.84-2 3.25C6 11.31 7.55 13 9 13h4c1.45 0 3-1.69 3-3.5S14.5 6 13 6z"
             >
           
    -    

    Em casos normais, pode simplesmente atualizar o seu Milvus para a versão mais recente, alterando a sua imagem. No entanto, tenha em atenção que haverá um certo tempo de inatividade ao atualizar o Milvus desta forma.

    +

    Em casos normais, pode simplesmente atualizar o seu Milvus para a versão mais recente, alterando a sua imagem. No entanto, tenha em atenção que haverá um certo tempo de inatividade quando atualizar o Milvus desta forma.

    Crie um ficheiro de configuração da seguinte forma e guarde-o como milvusupgrade.yaml:

    apiVersion: milvus.io/v1beta1
     kind: Milvus
    @@ -130,7 +130,7 @@ labels:
     spec:
       # Omit other fields ...
       components:
    -   image: milvusdb/milvus:v2.4.9
    +   image: milvusdb/milvus:v2.4.13-hotfix
     

    Em seguida, execute o seguinte para realizar a atualização:

    kubectl apply -f milvusupgrade.yaml
    @@ -150,8 +150,8 @@ spec:
               d="M4 9h1v1H4c-1.5 0-3-1.69-3-3.5S2.55 3 4 3h4c1.45 0 3 1.69 3 3.5 0 1.41-.91 2.72-2 3.25V8.59c.58-.45 1-1.27 1-2.09C10 5.22 8.98 4 8 4H4c-.98 0-2 1.22-2 2.5S3 9 4 9zm9-3h-1v1h1c1 0 2 1.22 2 2.5S13.98 12 13 12H9c-.98 0-2-1.22-2-2.5 0-.83.42-1.64 1-2.09V6.25c-1.09.53-2 1.84-2 3.25C6 11.31 7.55 13 9 13h4c1.45 0 3-1.69 3-3.5S14.5 6 13 6z"
             >
           
    -    

    Desde o Milvus 2.2.0, os metadados são incompatíveis com os das versões anteriores. Os seguintes exemplos pressupõem uma atualização do Milvus 2.1.4 para o Milvus v2.4.9.

    -

    1. Criar um ficheiro .yaml para migração de metadados

    Crie um ficheiro de migração de metadados. Segue-se um exemplo. É necessário especificar name, sourceVersion e targetVersion no ficheiro de configuração. O exemplo seguinte define name para my-release-upgrade, sourceVersion para v2.1.4, e targetVersion para v2.4.9. Isto significa que a sua instância Milvus será actualizada da v2.1.4 para a v2.4.9.

    +

    Desde o Milvus 2.2.0, os metadados são incompatíveis com os das versões anteriores. Os seguintes exemplos assumem uma atualização do Milvus 2.1.4 para o Milvus v2.4.13-hotfix.

    +

    1. Criar um ficheiro .yaml para migração de metadados

    Crie um ficheiro de migração de metadados. Segue-se um exemplo. É necessário especificar name, sourceVersion e targetVersion no ficheiro de configuração. O exemplo seguinte define name para my-release-upgrade, sourceVersion para v2.1.4, e targetVersion para v2.4.13-hotfix. Isto significa que a sua instância Milvus será actualizada da v2.1.4 para a v2.4.13-hotfix.

    apiVersion: milvus.io/v1beta1
     kind: MilvusUpgrade
     metadata:
    @@ -161,9 +161,9 @@ spec:
         namespace: default
         name: my-release
       sourceVersion: "v2.1.4"
    -  targetVersion: "v2.4.9"
    +  targetVersion: "v2.4.13-hotfix"
       # below are some omit default values:
    -  # targetImage: "milvusdb/milvus:v2.4.9"
    +  # targetImage: "milvusdb/milvus:v2.4.13-hotfix"
       # toolImage: "milvusdb/meta-migration:v2.2.0"
       # operation: upgrade
       # rollbackIfFailed: true
    diff --git a/localization/v2.4.x/site/pt/embeddings/embed-with-cohere.json b/localization/v2.4.x/site/pt/embeddings/embed-with-cohere.json
    index 40401b602..3ce35a476 100644
    --- a/localization/v2.4.x/site/pt/embeddings/embed-with-cohere.json
    +++ b/localization/v2.4.x/site/pt/embeddings/embed-with-cohere.json
    @@ -1 +1 @@
    -{"codeList":["pip install --upgrade pymilvus\npip install \"pymilvus[model]\"\n","cohere_ef = CohereEmbeddingFunction(\n    model_name=\"embed-english-light-v3.0\",\n    api_key=\"YOUR_COHERE_API_KEY\",\n    input_type=\"search_document\",\n    embedding_types=[\"float\"]\n)\n","docs = [\n    \"Artificial intelligence was founded as an academic discipline in 1956.\",\n    \"Alan Turing was the first person to conduct substantial research in AI.\",\n    \"Born in Maida Vale, London, Turing was raised in southern England.\",\n]\n\ndocs_embeddings = cohere_ef.encode_documents(docs)\n\n# Print embeddings\nprint(\"Embeddings:\", docs_embeddings)\n# Print dimension and shape of embeddings\nprint(\"Dim:\", cohere_ef.dim, docs_embeddings[0].shape)\n","Embeddings: [array([ 3.43322754e-02,  1.16252899e-03, -5.25207520e-02,  1.32846832e-03,\n       -6.80541992e-02,  6.10961914e-02, -7.06176758e-02,  1.48925781e-01,\n        1.54174805e-01,  1.98516846e-02,  2.43835449e-02,  3.55224609e-02,\n        1.82952881e-02,  7.57446289e-02, -2.40783691e-02,  4.40063477e-02,\n...\n        0.06359863, -0.01971436, -0.02253723,  0.00354195,  0.00222015,\n        0.00184727,  0.03408813, -0.00777817,  0.04919434,  0.01519775,\n       -0.02862549,  0.04760742, -0.07891846,  0.0124054 ], dtype=float32)]\nDim: 384 (384,)\n","queries = [\"When was artificial intelligence founded\", \n           \"Where was Alan Turing born?\"]\n\nquery_embeddings = cohere_ef.encode_queries(queries)\n\nprint(\"Embeddings:\", query_embeddings)\nprint(\"Dim\", cohere_ef.dim, query_embeddings[0].shape)\n","Embeddings: [array([-1.33361816e-02,  9.79423523e-04, -7.28759766e-02, -1.93786621e-02,\n       -9.71679688e-02,  4.34875488e-02, -9.81445312e-02,  1.16882324e-01,\n        5.89904785e-02, -4.19921875e-02,  4.95910645e-02,  5.83496094e-02,\n        3.47595215e-02, -5.87463379e-03, -7.30514526e-03,  2.92816162e-02,\n...\n        0.00749969, -0.01192474,  0.02719116,  0.03347778,  0.07696533,\n        0.01409149,  0.00964355, -0.01681519, -0.0073204 ,  0.00043154,\n       -0.04577637,  0.03591919, -0.02807617, -0.04812622], dtype=float32)]\nDim 384 (384,)\n"],"headingContent":"","anchorList":[{"label":"Cohere","href":"Cohere","type":1,"isActive":false}]}
    \ No newline at end of file
    +{"codeList":["pip install --upgrade pymilvus\npip install \"pymilvus[model]\"\n","from pymilvus.model.dense import CohereEmbeddingFunction\n\ncohere_ef = CohereEmbeddingFunction(\n    model_name=\"embed-english-light-v3.0\",\n    api_key=\"YOUR_COHERE_API_KEY\",\n    input_type=\"search_document\",\n    embedding_types=[\"float\"]\n)\n","docs = [\n    \"Artificial intelligence was founded as an academic discipline in 1956.\",\n    \"Alan Turing was the first person to conduct substantial research in AI.\",\n    \"Born in Maida Vale, London, Turing was raised in southern England.\",\n]\n\ndocs_embeddings = cohere_ef.encode_documents(docs)\n\n# Print embeddings\nprint(\"Embeddings:\", docs_embeddings)\n# Print dimension and shape of embeddings\nprint(\"Dim:\", cohere_ef.dim, docs_embeddings[0].shape)\n","Embeddings: [array([ 3.43322754e-02,  1.16252899e-03, -5.25207520e-02,  1.32846832e-03,\n       -6.80541992e-02,  6.10961914e-02, -7.06176758e-02,  1.48925781e-01,\n        1.54174805e-01,  1.98516846e-02,  2.43835449e-02,  3.55224609e-02,\n        1.82952881e-02,  7.57446289e-02, -2.40783691e-02,  4.40063477e-02,\n...\n        0.06359863, -0.01971436, -0.02253723,  0.00354195,  0.00222015,\n        0.00184727,  0.03408813, -0.00777817,  0.04919434,  0.01519775,\n       -0.02862549,  0.04760742, -0.07891846,  0.0124054 ], dtype=float32)]\nDim: 384 (384,)\n","queries = [\"When was artificial intelligence founded\", \n           \"Where was Alan Turing born?\"]\n\nquery_embeddings = cohere_ef.encode_queries(queries)\n\nprint(\"Embeddings:\", query_embeddings)\nprint(\"Dim\", cohere_ef.dim, query_embeddings[0].shape)\n","Embeddings: [array([-1.33361816e-02,  9.79423523e-04, -7.28759766e-02, -1.93786621e-02,\n       -9.71679688e-02,  4.34875488e-02, -9.81445312e-02,  1.16882324e-01,\n        5.89904785e-02, -4.19921875e-02,  4.95910645e-02,  5.83496094e-02,\n        3.47595215e-02, -5.87463379e-03, -7.30514526e-03,  2.92816162e-02,\n...\n        0.00749969, -0.01192474,  0.02719116,  0.03347778,  0.07696533,\n        0.01409149,  0.00964355, -0.01681519, -0.0073204 ,  0.00043154,\n       -0.04577637,  0.03591919, -0.02807617, -0.04812622], dtype=float32)]\nDim 384 (384,)\n"],"headingContent":"Cohere","anchorList":[{"label":"Cohere","href":"Cohere","type":1,"isActive":false}]}
    \ No newline at end of file
    diff --git a/localization/v2.4.x/site/pt/embeddings/embed-with-cohere.md b/localization/v2.4.x/site/pt/embeddings/embed-with-cohere.md
    index ce090f320..7cfdf97e5 100644
    --- a/localization/v2.4.x/site/pt/embeddings/embed-with-cohere.md
    +++ b/localization/v2.4.x/site/pt/embeddings/embed-with-cohere.md
    @@ -28,7 +28,9 @@ title: Incorporar o Cohere
     pip install "pymilvus[model]"
     

    Em seguida, instancie a classe CohereEmbeddingFunction:

    -
    cohere_ef = CohereEmbeddingFunction(
    +
    from pymilvus.model.dense import CohereEmbeddingFunction
    +
    +cohere_ef = CohereEmbeddingFunction(
         model_name="embed-english-light-v3.0",
         api_key="YOUR_COHERE_API_KEY",
         input_type="search_document",
    diff --git a/localization/v2.4.x/site/pt/embeddings/embed-with-jina.json b/localization/v2.4.x/site/pt/embeddings/embed-with-jina.json
    index 3ba04b590..4cee744ff 100644
    --- a/localization/v2.4.x/site/pt/embeddings/embed-with-jina.json
    +++ b/localization/v2.4.x/site/pt/embeddings/embed-with-jina.json
    @@ -1 +1 @@
    -{"codeList":["pip install --upgrade pymilvus\npip install \"pymilvus[model]\"\n","from pymilvus.model.dense import JinaEmbeddingFunction\n\njina_ef = JinaEmbeddingFunction(\n    model_name=\"jina-embeddings-v2-base-en\", # Defaults to `jina-embeddings-v2-base-en`\n    api_key=JINAAI_API_KEY # Provide your Jina AI API key\n)\n","docs = [\n    \"Artificial intelligence was founded as an academic discipline in 1956.\",\n    \"Alan Turing was the first person to conduct substantial research in AI.\",\n    \"Born in Maida Vale, London, Turing was raised in southern England.\",\n]\n\ndocs_embeddings = jina_ef.encode_documents(docs)\n\n# Print embeddings\nprint(\"Embeddings:\", docs_embeddings)\n# Print dimension and shape of embeddings\nprint(\"Dim:\", jina_ef.dim, docs_embeddings[0].shape)\n","Embeddings: [array([-4.88487840e-01, -4.28095880e-01,  4.90086500e-01, -1.63274320e-01,\n        3.43437800e-01,  3.21476880e-01,  2.83173790e-02, -3.10403670e-01,\n        4.76985040e-01, -1.77410420e-01, -3.84803180e-01, -2.19224200e-01,\n       -2.52898000e-01,  6.62411900e-02, -8.58173100e-01,  1.05221800e+00,\n...\n       -2.04462400e-01,  7.14229800e-01, -1.66823000e-01,  8.72551440e-01,\n        5.53560140e-01,  8.92506300e-01, -2.39408610e-01, -4.22413560e-01,\n       -3.19551350e-01,  5.59153850e-01,  2.44338100e-01, -8.60452100e-01])]\nDim: 768 (768,)\n","queries = [\"When was artificial intelligence founded\", \n           \"Where was Alan Turing born?\"]\n\nquery_embeddings = jina_ef.encode_queries(queries)\n\nprint(\"Embeddings:\", query_embeddings)\nprint(\"Dim\", jina_ef.dim, query_embeddings[0].shape)\n","Embeddings: [array([-5.99164660e-01, -3.49827350e-01,  8.22405160e-01, -1.18632730e-01,\n        5.78107540e-01,  1.09789170e-01,  2.91604200e-01, -3.29306450e-01,\n        2.93779640e-01, -2.17880800e-01, -6.84535440e-01, -3.79752000e-01,\n       -3.47541800e-01,  9.20846100e-02, -6.13804400e-01,  6.31312800e-01,\n...\n       -1.84993740e-02,  9.38629150e-01,  2.74858470e-02,  1.09396360e+00,\n        3.96270750e-01,  7.44445800e-01, -1.95404050e-01, -6.08383200e-01,\n       -3.75076300e-01,  3.87512200e-01,  8.11889650e-01, -3.76407620e-01])]\nDim 768 (768,)\n"],"headingContent":"","anchorList":[{"label":"Jina AI","href":"Jina-AI","type":1,"isActive":false}]}
    \ No newline at end of file
    +{"codeList":["pip install --upgrade pymilvus\npip install \"pymilvus[model]\"\n","from pymilvus.model.dense import JinaEmbeddingFunction\n\njina_ef = JinaEmbeddingFunction(\n    model_name=\"jina-embeddings-v3\", # Defaults to `jina-embeddings-v3`\n    api_key=JINAAI_API_KEY, # Provide your Jina AI API key\n    task=\"retrieval.passage\", # Specify the task\n    dimensions=1024, # Defaults to 1024\n)\n","\n```python\ndocs = [\n    \"Artificial intelligence was founded as an academic discipline in 1956.\",\n    \"Alan Turing was the first person to conduct substantial research in AI.\",\n    \"Born in Maida Vale, London, Turing was raised in southern England.\",\n]\n\ndocs_embeddings = jina_ef.encode_documents(docs)\n\n# Print embeddings\nprint(\"Embeddings:\", docs_embeddings)\n# Print dimension and shape of embeddings\nprint(\"Dim:\", jina_ef.dim, docs_embeddings[0].shape)\n","Embeddings: [array([9.80641991e-02, -8.51697400e-02,  7.36531913e-02,  1.42558888e-02,\n       -2.23589484e-02,  1.68494112e-03, -3.50753777e-02, -3.11530549e-02,\n       -3.26012149e-02,  5.04568312e-03,  3.69836427e-02,  3.48948985e-02,\n        8.19722563e-03,  5.88679723e-02, -6.71099266e-03, -1.82369724e-02,\n...\n        2.48654783e-02,  3.43279652e-02, -1.66154150e-02, -9.90478322e-03,\n       -2.96043139e-03, -8.57473817e-03, -7.39028037e-04,  6.25024503e-03,\n       -1.08831357e-02, -4.00776342e-02,  3.25369164e-02, -1.42691191e-03])]\nDim: 1024 (1024,)\n","queries = [\"When was artificial intelligence founded\", \n           \"Where was Alan Turing born?\"]\n\nquery_embeddings = jina_ef.encode_queries(queries)\n\nprint(\"Embeddings:\", query_embeddings)\nprint(\"Dim\", jina_ef.dim, query_embeddings[0].shape)\n","Embeddings: [array([8.79201014e-03,  1.47551354e-02,  4.02722731e-02, -2.52991207e-02,\n        1.12719582e-02,  3.75947170e-02,  3.97946090e-02, -7.36681819e-02,\n       -2.17952449e-02, -1.16298944e-02, -6.83426252e-03, -5.12507409e-02,\n        5.26071340e-02,  6.75181448e-02,  3.92445624e-02, -1.40817231e-02,\n...\n        8.81703943e-03,  4.24629413e-02, -2.32944116e-02, -2.05193572e-02,\n       -3.22035812e-02,  2.81896023e-03,  3.85326855e-02,  3.64372656e-02,\n       -1.65050142e-02, -4.26847413e-02,  2.02664156e-02, -1.72684863e-02])]\nDim 1024 (1024,)\n","from pymilvus.model.dense import JinaEmbeddingFunction\n\njina_ef = JinaEmbeddingFunction(\n    model_name=\"jina-embeddings-v3\", # Defaults to `jina-embeddings-v3`\n    api_key=JINA_API_KEY, # Provide your Jina AI API key\n    task=\"text-matching\",\n    dimensions=1024, # Defaults to 1024\n)\n\ntexts = [\n    \"Follow the white rabbit.\",  # English\n    \"Sigue al conejo blanco.\",  # Spanish\n    \"Suis le lapin blanc.\",  # French\n    \"跟着白兔走。\",  # Chinese\n    \"اتبع الأرنب الأبيض.\",  # Arabic\n    \"Folge dem weißen Kaninchen.\",  # German\n]\n\nembeddings = jina_ef(texts)\n\n# Compute similarities\nprint(embeddings[0] @ embeddings[1].T)\n"],"headingContent":"Jina AI","anchorList":[{"label":"Jina AI","href":"Jina-AI","type":1,"isActive":false}]}
    \ No newline at end of file
    diff --git a/localization/v2.4.x/site/pt/embeddings/embed-with-jina.md b/localization/v2.4.x/site/pt/embeddings/embed-with-jina.md
    index 0b36b1705..39786ac97 100644
    --- a/localization/v2.4.x/site/pt/embeddings/embed-with-jina.md
    +++ b/localization/v2.4.x/site/pt/embeddings/embed-with-jina.md
    @@ -31,19 +31,36 @@ pip install "pymilvus[model]"
     
    from pymilvus.model.dense import JinaEmbeddingFunction
     
     jina_ef = JinaEmbeddingFunction(
    -    model_name="jina-embeddings-v2-base-en", # Defaults to `jina-embeddings-v2-base-en`
    -    api_key=JINAAI_API_KEY # Provide your Jina AI API key
    +    model_name="jina-embeddings-v3", # Defaults to `jina-embeddings-v3`
    +    api_key=JINAAI_API_KEY, # Provide your Jina AI API key
    +    task="retrieval.passage", # Specify the task
    +    dimensions=1024, # Defaults to 1024
     )
     

    Parâmetros:

    • model_name (string)

      -

      O nome do modelo de incorporação do Jina AI a utilizar para a codificação. Pode especificar qualquer um dos nomes de modelos de incorporação Jina AI disponíveis, por exemplo, jina-embeddings-v2-base-en, jina-embeddings-v2-small-en, etc. Se deixar este parâmetro não especificado, será utilizado jina-embeddings-v2-base-en. Para obter uma lista dos modelos disponíveis, consulte Jina Embeddings.

    • +

      O nome do modelo de incorporação do Jina AI a utilizar para a codificação. Pode especificar qualquer um dos nomes de modelos de incorporação Jina AI disponíveis, por exemplo, jina-embeddings-v3, jina-embeddings-v2-base-en, etc. Se deixar este parâmetro não especificado, será utilizado jina-embeddings-v3. Para obter uma lista dos modelos disponíveis, consulte Jina Embeddings.

    • api_key (string)

      A chave API para aceder à API Jina AI.

    • +
    • task (string)

      +

      O tipo de entrada passado para o modelo. Necessário para modelos de incorporação v3 e superiores.

      +
        +
      • "retrieval.passage": Usado para codificar documentos grandes em tarefas de recuperação no momento da indexação.
      • +
      • "retrieval.query": Utilizado para codificar consultas ou perguntas do utilizador em tarefas de recuperação.
      • +
      • "classification": Utilizado para codificar texto para tarefas de classificação de texto.
      • +
      • "text-matching": Usado para codificar texto para correspondência de similaridade, como medir a similaridade entre duas frases.
      • +
      • "clustering": Utilizado para tarefas de agrupamento ou de reordenamento.
      • +
    • +
    • dimensions (int)

      +

      O número de dimensões que os embeddings de saída resultantes devem ter. A predefinição é 1024. Apenas suportado para modelos de incorporação v3 e superiores.

    • +
    • late_chunking (bool)

      +

      Este parâmetro controla se deve ser usado o novo método de fragmentação (chunking) que o Jina AI introduziu no mês passado para codificar um lote de frases. A predefinição é False. Quando definido para True, a API do Jina AI concatena todas as frases no campo de entrada e apresenta-as como uma única cadeia de caracteres ao modelo. Internamente, o modelo incorpora esta longa cadeia concatenada e, em seguida, efectua uma fragmentação tardia, devolvendo uma lista de incorporações que corresponde ao tamanho da lista de entrada.

    -

    Para criar embeddings para documentos, utilize o método encode_documents():

    -
    docs = [
    +

    Para criar incrustações para documentos, utilize o método encode_documents(). Este método foi concebido para a incorporação de documentos em tarefas de recuperação assimétricas, como a indexação de documentos para tarefas de pesquisa ou recomendação. Este método utiliza retrieval.passage como tarefa.

    +
    
    +```python
    +docs = [
         "Artificial intelligence was founded as an academic discipline in 1956.",
         "Alan Turing was the first person to conduct substantial research in AI.",
         "Born in Maida Vale, London, Turing was raised in southern England.",
    @@ -57,17 +74,17 @@ docs_embeddings = jina_ef.encode_documents(docs)
     print("Dim:", jina_ef.dim, docs_embeddings[0].shape)
     

    O resultado esperado é semelhante ao seguinte:

    -
    Embeddings: [array([-4.88487840e-01, -4.28095880e-01,  4.90086500e-01, -1.63274320e-01,
    -        3.43437800e-01,  3.21476880e-01,  2.83173790e-02, -3.10403670e-01,
    -        4.76985040e-01, -1.77410420e-01, -3.84803180e-01, -2.19224200e-01,
    -       -2.52898000e-01,  6.62411900e-02, -8.58173100e-01,  1.05221800e+00,
    +
    Embeddings: [array([9.80641991e-02, -8.51697400e-02,  7.36531913e-02,  1.42558888e-02,
    +       -2.23589484e-02,  1.68494112e-03, -3.50753777e-02, -3.11530549e-02,
    +       -3.26012149e-02,  5.04568312e-03,  3.69836427e-02,  3.48948985e-02,
    +        8.19722563e-03,  5.88679723e-02, -6.71099266e-03, -1.82369724e-02,
     ...
    -       -2.04462400e-01,  7.14229800e-01, -1.66823000e-01,  8.72551440e-01,
    -        5.53560140e-01,  8.92506300e-01, -2.39408610e-01, -4.22413560e-01,
    -       -3.19551350e-01,  5.59153850e-01,  2.44338100e-01, -8.60452100e-01])]
    -Dim: 768 (768,)
    +        2.48654783e-02,  3.43279652e-02, -1.66154150e-02, -9.90478322e-03,
    +       -2.96043139e-03, -8.57473817e-03, -7.39028037e-04,  6.25024503e-03,
    +       -1.08831357e-02, -4.00776342e-02,  3.25369164e-02, -1.42691191e-03])]
    +Dim: 1024 (1024,)
     
    -

    Para criar embeddings para consultas, utilize o método encode_queries():

    +

    Para criar incrustações para consultas, utilize o método encode_queries(). Este método foi concebido para a incorporação de consultas em tarefas de recuperação assimétricas, tais como consultas de pesquisa ou perguntas. Este método utiliza retrieval.query como tarefa.

    queries = ["When was artificial intelligence founded", 
                "Where was Alan Turing born?"]
     
    @@ -76,14 +93,38 @@ query_embeddings = jina_ef.encode_queries(queries)
     print("Embeddings:", query_embeddings)
     print("Dim", jina_ef.dim, query_embeddings[0].shape)
     
    -

    A saída esperada é semelhante à seguinte:

    -
    Embeddings: [array([-5.99164660e-01, -3.49827350e-01,  8.22405160e-01, -1.18632730e-01,
    -        5.78107540e-01,  1.09789170e-01,  2.91604200e-01, -3.29306450e-01,
    -        2.93779640e-01, -2.17880800e-01, -6.84535440e-01, -3.79752000e-01,
    -       -3.47541800e-01,  9.20846100e-02, -6.13804400e-01,  6.31312800e-01,
    +

    O resultado esperado é semelhante ao seguinte:

    +
    Embeddings: [array([8.79201014e-03,  1.47551354e-02,  4.02722731e-02, -2.52991207e-02,
    +        1.12719582e-02,  3.75947170e-02,  3.97946090e-02, -7.36681819e-02,
    +       -2.17952449e-02, -1.16298944e-02, -6.83426252e-03, -5.12507409e-02,
    +        5.26071340e-02,  6.75181448e-02,  3.92445624e-02, -1.40817231e-02,
     ...
    -       -1.84993740e-02,  9.38629150e-01,  2.74858470e-02,  1.09396360e+00,
    -        3.96270750e-01,  7.44445800e-01, -1.95404050e-01, -6.08383200e-01,
    -       -3.75076300e-01,  3.87512200e-01,  8.11889650e-01, -3.76407620e-01])]
    -Dim 768 (768,)
    +        8.81703943e-03,  4.24629413e-02, -2.32944116e-02, -2.05193572e-02,
    +       -3.22035812e-02,  2.81896023e-03,  3.85326855e-02,  3.64372656e-02,
    +       -1.65050142e-02, -4.26847413e-02,  2.02664156e-02, -1.72684863e-02])]
    +Dim 1024 (1024,)
    +
    +

    Para criar incrustações de entradas para correspondência de similaridade (como tarefas de recuperação STS ou simétricas), classificação de texto, agrupamento ou tarefas de classificação, use o valor de parâmetro task apropriado ao instanciar a classe JinaEmbeddingFunction.

    +
    from pymilvus.model.dense import JinaEmbeddingFunction
    +
    +jina_ef = JinaEmbeddingFunction(
    +    model_name="jina-embeddings-v3", # Defaults to `jina-embeddings-v3`
    +    api_key=JINA_API_KEY, # Provide your Jina AI API key
    +    task="text-matching",
    +    dimensions=1024, # Defaults to 1024
    +)
    +
    +texts = [
    +    "Follow the white rabbit.",  # English
    +    "Sigue al conejo blanco.",  # Spanish
    +    "Suis le lapin blanc.",  # French
    +    "跟着白兔走。",  # Chinese
    +    "اتبع الأرنب الأبيض.",  # Arabic
    +    "Folge dem weißen Kaninchen.",  # German
    +]
    +
    +embeddings = jina_ef(texts)
    +
    +# Compute similarities
    +print(embeddings[0] @ embeddings[1].T)
     
    diff --git a/localization/v2.4.x/site/pt/embeddings/embed-with-voyage.json b/localization/v2.4.x/site/pt/embeddings/embed-with-voyage.json index c5fe18415..ba7547304 100644 --- a/localization/v2.4.x/site/pt/embeddings/embed-with-voyage.json +++ b/localization/v2.4.x/site/pt/embeddings/embed-with-voyage.json @@ -1 +1 @@ -{"codeList":["pip install --upgrade pymilvus\npip install \"pymilvus[model]\"\n","from pymilvus.model.dense import VoyageEmbeddingFunction\n\nvoyage_ef = VoyageEmbeddingFunction(\n model_name=\"voyage-lite-02-instruct\", # Defaults to `voyage-2`\n api_key=VOYAGE_API_KEY # Provide your Voyage API key\n)\n","docs = [\n \"Artificial intelligence was founded as an academic discipline in 1956.\",\n \"Alan Turing was the first person to conduct substantial research in AI.\",\n \"Born in Maida Vale, London, Turing was raised in southern England.\",\n]\n\ndocs_embeddings = voyage_ef.encode_documents(docs)\n\n# Print embeddings\nprint(\"Embeddings:\", docs_embeddings)\n# Print dimension and shape of embeddings\nprint(\"Dim:\", voyage_ef.dim, docs_embeddings[0].shape)\n","Embeddings: [array([ 0.02582654, -0.00907086, -0.04604037, ..., -0.01227521,\n 0.04420955, -0.00038829]), array([ 0.03844212, -0.01597065, -0.03728884, ..., -0.02118733,\n 0.03349845, 0.0065346 ]), array([ 0.05143557, -0.01096631, -0.02690451, ..., -0.02416254,\n 0.07658645, 0.03064499])]\nDim: 1024 (1024,)\n","queries = [\"When was artificial intelligence founded\", \n \"Where was Alan Turing born?\"]\n\nquery_embeddings = voyage_ef.encode_queries(queries)\n\nprint(\"Embeddings:\", query_embeddings)\nprint(\"Dim\", voyage_ef.dim, query_embeddings[0].shape)\n","Embeddings: [array([ 0.01733501, -0.0230672 , -0.05208827, ..., -0.00957995,\n 0.04493361, 0.01485138]), array([ 0.05937521, -0.00729363, -0.02184347, ..., -0.02107683,\n 0.05706626, 0.0263358 ])]\nDim 1024 (1024,)\n"],"headingContent":"","anchorList":[{"label":"Voyage","href":"Voyage","type":1,"isActive":false}]} \ No newline at end of file +{"codeList":["pip install --upgrade pymilvus\npip install \"pymilvus[model]\"\n","from pymilvus.model.dense import VoyageEmbeddingFunction\n\nvoyage_ef = VoyageEmbeddingFunction(\n model_name=\"voyage-3\", # Defaults to `voyage-3`\n api_key=VOYAGE_API_KEY # Provide your Voyage API key\n)\n","docs = [\n \"Artificial intelligence was founded as an academic discipline in 1956.\",\n \"Alan Turing was the first person to conduct substantial research in AI.\",\n \"Born in Maida Vale, London, Turing was raised in southern England.\",\n]\n\ndocs_embeddings = voyage_ef.encode_documents(docs)\n\n# Print embeddings\nprint(\"Embeddings:\", docs_embeddings)\n# Print dimension and shape of embeddings\nprint(\"Dim:\", voyage_ef.dim, docs_embeddings[0].shape)\n","Embeddings: [array([ 0.02582654, -0.00907086, -0.04604037, ..., -0.01227521,\n 0.04420955, -0.00038829]), array([ 0.03844212, -0.01597065, -0.03728884, ..., -0.02118733,\n 0.03349845, 0.0065346 ]), array([ 0.05143557, -0.01096631, -0.02690451, ..., -0.02416254,\n 0.07658645, 0.03064499])]\nDim: 1024 (1024,)\n","queries = [\"When was artificial intelligence founded\", \n \"Where was Alan Turing born?\"]\n\nquery_embeddings = voyage_ef.encode_queries(queries)\n\nprint(\"Embeddings:\", query_embeddings)\nprint(\"Dim\", voyage_ef.dim, query_embeddings[0].shape)\n","Embeddings: [array([ 0.01733501, -0.0230672 , -0.05208827, ..., -0.00957995,\n 0.04493361, 0.01485138]), array([ 0.05937521, -0.00729363, -0.02184347, ..., -0.02107683,\n 0.05706626, 0.0263358 ])]\nDim 1024 (1024,)\n"],"headingContent":"Voyage","anchorList":[{"label":"Voyage","href":"Voyage","type":1,"isActive":false}]} \ No newline at end of file diff --git a/localization/v2.4.x/site/pt/embeddings/embed-with-voyage.md b/localization/v2.4.x/site/pt/embeddings/embed-with-voyage.md index 26a5ae77f..fca6d703e 100644 --- a/localization/v2.4.x/site/pt/embeddings/embed-with-voyage.md +++ b/localization/v2.4.x/site/pt/embeddings/embed-with-voyage.md @@ -30,13 +30,13 @@ pip install "pymilvus[model]"
    from pymilvus.model.dense import VoyageEmbeddingFunction
     
     voyage_ef = VoyageEmbeddingFunction(
    -    model_name="voyage-lite-02-instruct", # Defaults to `voyage-2`
    +    model_name="voyage-3", # Defaults to `voyage-3`
         api_key=VOYAGE_API_KEY # Provide your Voyage API key
     )
     

    Parâmetros:

      -
    • model_name (string) O nome do modelo Voyage a ser usado para codificação. Pode especificar qualquer um dos nomes de modelos Voyage disponíveis, por exemplo, voyage-law-2, voyage-code-2, etc. Se este parâmetro não for especificado, será utilizado voyage-2. Para obter uma lista dos modelos disponíveis, consulte a documentação oficial do Voyage.
    • +
    • model_name (string) O nome do modelo Voyage a ser usado para codificação. Pode especificar qualquer um dos nomes de modelos Voyage disponíveis, por exemplo, voyage-3-lite, voyage-finance-2, etc. Se este parâmetro não for especificado, será utilizado voyage-3. Para obter uma lista dos modelos disponíveis, consulte a documentação oficial do Voyage.
    • api_key (string) A chave da API para acessar a API do Voyage. Para obter informações sobre como criar uma chave de API, consulte Chave de API e cliente Python.

    Para criar embeddings para documentos, use o método encode_documents():

    diff --git a/localization/v2.4.x/site/pt/embeddings/embeddings.json b/localization/v2.4.x/site/pt/embeddings/embeddings.json index 0f256950a..7da9dabb9 100644 --- a/localization/v2.4.x/site/pt/embeddings/embeddings.json +++ b/localization/v2.4.x/site/pt/embeddings/embeddings.json @@ -1 +1 @@ -{"codeList":["pip install \"pymilvus[model]\"\n","from pymilvus import model\n\n# This will download \"all-MiniLM-L6-v2\", a light weight model.\nef = model.DefaultEmbeddingFunction()\n\n# Data from which embeddings are to be generated \ndocs = [\n \"Artificial intelligence was founded as an academic discipline in 1956.\",\n \"Alan Turing was the first person to conduct substantial research in AI.\",\n \"Born in Maida Vale, London, Turing was raised in southern England.\",\n]\n\nembeddings = ef.encode_documents(docs)\n\n# Print embeddings\nprint(\"Embeddings:\", embeddings)\n# Print dimension and shape of embeddings\nprint(\"Dim:\", ef.dim, embeddings[0].shape)\n","Embeddings: [array([-3.09392996e-02, -1.80662833e-02, 1.34775648e-02, 2.77156215e-02,\n -4.86349640e-03, -3.12581174e-02, -3.55921760e-02, 5.76934684e-03,\n 2.80773244e-03, 1.35783911e-01, 3.59678417e-02, 6.17732145e-02,\n...\n -4.61330153e-02, -4.85207550e-02, 3.13997865e-02, 7.82178566e-02,\n -4.75336798e-02, 5.21207601e-02, 9.04406682e-02, -5.36676683e-02],\n dtype=float32)]\nDim: 384 (384,)\n","from pymilvus.model.hybrid import BGEM3EmbeddingFunction\nfrom pymilvus import (\n utility,\n FieldSchema, CollectionSchema, DataType,\n Collection, AnnSearchRequest, RRFRanker, connections,\n)\n","# 1. prepare a small corpus to search\ndocs = [\n \"Artificial intelligence was founded as an academic discipline in 1956.\",\n \"Alan Turing was the first person to conduct substantial research in AI.\",\n \"Born in Maida Vale, London, Turing was raised in southern England.\",\n]\nquery = \"Who started AI research?\"\n\n# BGE-M3 model can embed texts as dense and sparse vectors.\n# It is included in the optional `model` module in pymilvus, to install it,\n# simply run \"pip install pymilvus[model]\".\n\nbge_m3_ef = BGEM3EmbeddingFunction(use_fp16=False, device=\"cpu\")\n\ndocs_embeddings = bge_m3_ef(docs)\nquery_embeddings = bge_m3_ef([query])\n","from pymilvus.model.sparse import BM25EmbeddingFunction\n","# 1. prepare a small corpus to search\ndocs = [\n \"Artificial intelligence was founded as an academic discipline in 1956.\",\n \"Alan Turing was the first person to conduct substantial research in AI.\",\n \"Born in Maida Vale, London, Turing was raised in southern England.\",\n]\nquery = \"Where was Turing born?\"\nbm25_ef = BM25EmbeddingFunction()\n\n# 2. fit the corpus to get BM25 model parameters on your documents.\nbm25_ef.fit(docs)\n\n# 3. store the fitted parameters to disk to expedite future processing.\nbm25_ef.save(\"bm25_params.json\")\n\n# 4. load the saved params\nnew_bm25_ef = BM25EmbeddingFunction()\nnew_bm25_ef.load(\"bm25_params.json\")\n\ndocs_embeddings = new_bm25_ef.encode_documents(docs)\nquery_embeddings = new_bm25_ef.encode_queries([query])\nprint(\"Dim:\", new_bm25_ef.dim, list(docs_embeddings)[0].shape)\n","Dim: 21 (1, 21)\n"],"headingContent":"","anchorList":[{"label":"Descrição geral da incorporação","href":"Embedding-Overview","type":1,"isActive":false},{"label":"Exemplo 1: Usar a função de embedding padrão para gerar vetores densos","href":"Example-1-Use-default-embedding-function-to-generate-dense-vectors","type":2,"isActive":false},{"label":"Exemplo 2: Gerar vectores densos e esparsos numa única chamada com o modelo BGE M3","href":"Example-2-Generate-dense-and-sparse-vectors-in-one-call-with-BGE-M3-model","type":2,"isActive":false},{"label":"Exemplo 3: Gerar vectores esparsos utilizando o modelo BM25","href":"Example-3-Generate--sparse-vectors-using-BM25-model","type":2,"isActive":false}]} \ No newline at end of file +{"codeList":["pip install \"pymilvus[model]\"\n","from pymilvus import model\n\n# This will download \"all-MiniLM-L6-v2\", a light weight model.\nef = model.DefaultEmbeddingFunction()\n\n# Data from which embeddings are to be generated \ndocs = [\n \"Artificial intelligence was founded as an academic discipline in 1956.\",\n \"Alan Turing was the first person to conduct substantial research in AI.\",\n \"Born in Maida Vale, London, Turing was raised in southern England.\",\n]\n\nembeddings = ef.encode_documents(docs)\n\n# Print embeddings\nprint(\"Embeddings:\", embeddings)\n# Print dimension and shape of embeddings\nprint(\"Dim:\", ef.dim, embeddings[0].shape)\n","Embeddings: [array([-3.09392996e-02, -1.80662833e-02, 1.34775648e-02, 2.77156215e-02,\n -4.86349640e-03, -3.12581174e-02, -3.55921760e-02, 5.76934684e-03,\n 2.80773244e-03, 1.35783911e-01, 3.59678417e-02, 6.17732145e-02,\n...\n -4.61330153e-02, -4.85207550e-02, 3.13997865e-02, 7.82178566e-02,\n -4.75336798e-02, 5.21207601e-02, 9.04406682e-02, -5.36676683e-02],\n dtype=float32)]\nDim: 384 (384,)\n","from pymilvus.model.hybrid import BGEM3EmbeddingFunction\nfrom pymilvus import (\n utility,\n FieldSchema, CollectionSchema, DataType,\n Collection, AnnSearchRequest, RRFRanker, connections,\n)\n","# 1. prepare a small corpus to search\ndocs = [\n \"Artificial intelligence was founded as an academic discipline in 1956.\",\n \"Alan Turing was the first person to conduct substantial research in AI.\",\n \"Born in Maida Vale, London, Turing was raised in southern England.\",\n]\nquery = \"Who started AI research?\"\n\n# BGE-M3 model can embed texts as dense and sparse vectors.\n# It is included in the optional `model` module in pymilvus, to install it,\n# simply run \"pip install pymilvus[model]\".\n\nbge_m3_ef = BGEM3EmbeddingFunction(use_fp16=False, device=\"cpu\")\n\ndocs_embeddings = bge_m3_ef(docs)\nquery_embeddings = bge_m3_ef([query])\n","from pymilvus.model.sparse import BM25EmbeddingFunction\n","# 1. prepare a small corpus to search\ndocs = [\n \"Artificial intelligence was founded as an academic discipline in 1956.\",\n \"Alan Turing was the first person to conduct substantial research in AI.\",\n \"Born in Maida Vale, London, Turing was raised in southern England.\",\n]\nquery = \"Where was Turing born?\"\nbm25_ef = BM25EmbeddingFunction()\n\n# 2. fit the corpus to get BM25 model parameters on your documents.\nbm25_ef.fit(docs)\n\n# 3. store the fitted parameters to disk to expedite future processing.\nbm25_ef.save(\"bm25_params.json\")\n\n# 4. load the saved params\nnew_bm25_ef = BM25EmbeddingFunction()\nnew_bm25_ef.load(\"bm25_params.json\")\n\ndocs_embeddings = new_bm25_ef.encode_documents(docs)\nquery_embeddings = new_bm25_ef.encode_queries([query])\nprint(\"Dim:\", new_bm25_ef.dim, list(docs_embeddings)[0].shape)\n","Dim: 21 (1, 21)\n"],"headingContent":"Embedding Overview","anchorList":[{"label":"Descrição geral da incorporação","href":"Embedding-Overview","type":1,"isActive":false},{"label":"Exemplo 1: Utilizar a função de incorporação predefinida para gerar vectores densos","href":"Example-1-Use-default-embedding-function-to-generate-dense-vectors","type":2,"isActive":false},{"label":"Exemplo 2: Gerar vectores densos e esparsos numa única chamada com o modelo BGE M3","href":"Example-2-Generate-dense-and-sparse-vectors-in-one-call-with-BGE-M3-model","type":2,"isActive":false},{"label":"Exemplo 3: Gerar vectores esparsos utilizando o modelo BM25","href":"Example-3-Generate--sparse-vectors-using-BM25-model","type":2,"isActive":false}]} \ No newline at end of file diff --git a/localization/v2.4.x/site/pt/embeddings/embeddings.md b/localization/v2.4.x/site/pt/embeddings/embeddings.md index acbc9f5af..da5895209 100644 --- a/localization/v2.4.x/site/pt/embeddings/embeddings.md +++ b/localization/v2.4.x/site/pt/embeddings/embeddings.md @@ -2,7 +2,7 @@ id: embeddings.md order: 1 summary: Saiba como gerar embeddings para os seus dados. -title: Visão geral da incorporação +title: Descrição geral da incorporação ---

    Descrição geral da incorporação

    Para usar funções de incorporação com Milvus, primeiro instale a biblioteca cliente PyMilvus com o subpacote model que envolve todos os utilitários para geração de incorporação.

    +

    Para usar funções de incorporação com Milvus, primeiro instale a biblioteca cliente PyMilvus com o subpacote model que envolve todos os utilitários para a geração de incorporação.

    pip install "pymilvus[model]"
     

    O subpacote model suporta vários modelos de incorporação, desde OpenAI, Sentence Transformers, BGE M3, BM25, até modelos pré-treinados SPLADE. Para simplificar, este exemplo utiliza o DefaultEmbeddingFunction que é um modelo de transformador de frases totalmente MiniLM-L6-v2, o modelo tem cerca de 70MB e será descarregado durante a primeira utilização:

    diff --git a/localization/v2.4.x/site/pt/faq/operational_faq.json b/localization/v2.4.x/site/pt/faq/operational_faq.json index 7ea58d3c1..e18e149be 100644 --- a/localization/v2.4.x/site/pt/faq/operational_faq.json +++ b/localization/v2.4.x/site/pt/faq/operational_faq.json @@ -1 +1 @@ -{"codeList":["{\n \"registry-mirrors\": [\"https://registry.docker-cn.com\"]\n}\n","$ lscpu | grep -e sse4_2 -e avx -e avx2 -e avx512\n","pip install pymilvus>=2.4.2\n"],"headingContent":"","anchorList":[{"label":"FAQ operacional","href":"Operational-FAQ","type":1,"isActive":false}]} \ No newline at end of file +{"codeList":["{\n \"registry-mirrors\": [\"https://registry.docker-cn.com\"]\n}\n","$ lscpu | grep -e sse4_2 -e avx -e avx2 -e avx512\n","pip install pymilvus>=2.4.2\n","# Python Example: result of len() str cannot be used as \"max-length\" in Milvus \n>>> s = \"你好,世界!\"\n>>> len(s) # Number of characters of s.\n6\n>>> len(bytes(s, \"utf-8\")) # Size in bytes of s, max-length in Milvus.\n18\n"],"headingContent":"Operational FAQ","anchorList":[{"label":"FAQ operacional","href":"Operational-FAQ","type":1,"isActive":false}]} \ No newline at end of file diff --git a/localization/v2.4.x/site/pt/faq/operational_faq.md b/localization/v2.4.x/site/pt/faq/operational_faq.md index e0c9275d5..252e08bcc 100644 --- a/localization/v2.4.x/site/pt/faq/operational_faq.md +++ b/localization/v2.4.x/site/pt/faq/operational_faq.md @@ -53,7 +53,7 @@ title: FAQ operacional

    Veja Executar o Milvus no Windows para saber como instalar o Milvus no Windows.

    Recebi um erro ao instalar o pymilvus no Windows. O que devo fazer?

    Não é recomendado instalar o PyMilvus no Windows. Mas se tiveres de instalar o PyMilvus no Windows mas obtiveres um erro, tenta instalá-lo num ambiente Conda. Consulte Instalar o Milvus SDK para obter mais informações sobre como instalar o PyMilvus no ambiente Conda.

    Posso implementar o Milvus quando estou desligado da Internet?

    Sim. Pode instalar o Milvus num ambiente offline. Consulte Instalar o Milvus offline para obter mais informações.

    -

    Onde posso encontrar os registos gerados pelo Milvus?

    Por defeito, o registo do Milvus é impresso em stout (standard output) e stderr (standard error), no entanto recomendamos vivamente que redireccione o seu registo para um volume persistente em produção. Para o fazer, actualize log.file.rootPath em milvus.yaml. E se implementar o Milvus com o gráfico milvus-helm, também precisa de ativar a persistência do registo primeiro através de --set log.persistence.enabled=true.

    +

    Onde posso encontrar os registos gerados pelo Milvus?

    Por defeito, o registo do Milvus é impresso em stout (saída padrão) e stderr (erro padrão), no entanto, recomendamos vivamente que redireccione o seu registo para um volume persistente em produção. Para o fazer, actualize log.file.rootPath em milvus.yaml. E se implementar o Milvus com o gráfico milvus-helm, também precisa de ativar a persistência do registo primeiro através de --set log.persistence.enabled=true.

    Se você não alterou a configuração, usar kubectl logs <pod-name> ou docker logs CONTAINER também pode ajudá-lo a encontrar o log.

    Posso criar um índice para um segmento antes de inserir dados nele?

    Sim, você pode. Mas recomendamos a inserção de dados em lotes, cada um dos quais não deve exceder 256 MB, antes de indexar cada segmento.

    Posso partilhar uma instância etcd entre várias instâncias Milvus?

    Sim, pode partilhar uma instância etcd entre várias instâncias Milvus. Para isso, é necessário alterar etcd.rootPath para um valor separado para cada instância Milvus nos ficheiros de configuração de cada uma delas antes de as iniciar.

    @@ -72,9 +72,34 @@ title: FAQ operacional
  • Dados limitados: A coleção pode não ter entidades suficientes para atender ao limite solicitado. Se o número total de entidades na coleção for inferior ao limite, receberá naturalmente menos resultados.

  • Chaves primárias duplicadas: O Milvus dá prioridade a entidades específicas quando encontra chaves primárias duplicadas durante uma pesquisa. Este comportamento varia consoante o tipo de pesquisa:

  • Consulta (correspondência exacta): Milvus seleciona a última entidade com a PK correspondente. ANN Search: Milvus seleciona a entidade com a maior pontuação de similaridade, mesmo que as entidades partilhem o mesmo PK. Esta priorização pode resultar em menos resultados únicos do que o limite se a sua coleção tiver muitas chaves primárias duplicadas.

  • -
  • Correspondências insuficientes: As expressões de filtragem da pesquisa podem ser muito rigorosas, resultando em menos entidades que atendem ao limite de similaridade. Se as condições definidas para a pesquisa forem demasiado restritivas, não haverá correspondência suficiente entre as entidades, o que levará a menos resultados do que o esperado.

  • +
  • Correspondências insuficientes: As expressões de filtragem da pesquisa podem ser muito rigorosas, resultando em menos entidades que atendem ao limite de similaridade. Se as condições definidas para a pesquisa forem muito restritivas, não haverá correspondência suficiente de entidades, levando a menos resultados do que o esperado.

  • -

    Ainda tem dúvidas?

    Pode fazê-lo:

    +

    MilvusClient("milvus_demo.db") gives an error: ModuleNotFoundError: No module named 'milvus_lite'. O que é que provoca este erro e como é que pode ser resolvido?

    Este erro ocorre quando se tenta utilizar o Milvus Lite numa plataforma Windows. O Milvus Lite foi concebido principalmente para ambientes Linux e pode não ter suporte nativo para Windows.

    +

    A solução é utilizar um ambiente Linux:

    +
      +
    • Use um sistema operacional baseado em Linux ou uma máquina virtual para executar o Milvus Lite.
    • +
    • Esta abordagem garantirá a compatibilidade com as dependências e funcionalidades da biblioteca.
    • +
    +

    O que são os erros de "comprimento excede o comprimento máximo" no Milvus, e como eles podem ser entendidos e resolvidos?

    Os erros de "comprimento excede o comprimento máximo" no Milvus ocorrem quando o tamanho de um elemento de dados ultrapassa o tamanho máximo permitido para uma coleção ou campo. Eis alguns exemplos e explicações:

    +
      +
    • Erro de campo JSON: <MilvusException: (code=1100, message=the length (398324) of json field (metadata) exceeds max length (65536): expected=valid length json string, actual=length exceeds max length: invalid parameter)>

    • +
    • Erro de comprimento da cadeia de caracteres: <ParamError: (code=1, message=invalid input, length of string exceeds max length. length: 74238, max length: 60535)>

    • +
    • Erro no campo VarChar: <MilvusException: (code=1100, message=the length (60540) of 0th VarChar paragraph exceeds max length (0)%!(EXTRA int64=60535): invalid parameter)>

    • +
    +

    Para compreender e resolver estes erros:

    +
      +
    • Compreenda que len(str) em Python representa o número de caracteres, não o tamanho em bytes.
    • +
    • Para tipos de dados baseados em cadeias de caracteres como VARCHAR e JSON, utilize len(bytes(str, encoding='utf-8')) para determinar o tamanho real em bytes, que é o que Milvus utiliza para "max-length".
    • +
    +

    Exemplo em Python:

    +
    # Python Example: result of len() str cannot be used as "max-length" in Milvus 
    +>>> s = "你好,世界!"
    +>>> len(s) # Number of characters of s.
    +6
    +>>> len(bytes(s, "utf-8")) # Size in bytes of s, max-length in Milvus.
    +18
    +
    +

    Ainda tem dúvidas?

    Você pode:

    • Verificar o Milvus no GitHub. Sinta-se à vontade para fazer perguntas, partilhar ideias e ajudar os outros.
    • Junte-se ao nosso Fórum Milvus ou Canal Slack para encontrar apoio e envolver-se com a nossa comunidade de código aberto.
    • diff --git a/localization/v2.4.x/site/pt/faq/performance_faq.json b/localization/v2.4.x/site/pt/faq/performance_faq.json index ec123f619..aa359b0cd 100644 --- a/localization/v2.4.x/site/pt/faq/performance_faq.json +++ b/localization/v2.4.x/site/pt/faq/performance_faq.json @@ -1 +1 @@ -{"codeList":[],"headingContent":"","anchorList":[{"label":"FAQ sobre desempenho","href":"Performance-FAQ","type":1,"isActive":false}]} \ No newline at end of file +{"codeList":[],"headingContent":"Performance FAQ","anchorList":[{"label":"FAQ sobre desempenho","href":"Performance-FAQ","type":1,"isActive":false}]} \ No newline at end of file diff --git a/localization/v2.4.x/site/pt/faq/performance_faq.md b/localization/v2.4.x/site/pt/faq/performance_faq.md index 457a1f9c7..0c4c9e752 100644 --- a/localization/v2.4.x/site/pt/faq/performance_faq.md +++ b/localization/v2.4.x/site/pt/faq/performance_faq.md @@ -3,7 +3,7 @@ id: performance_faq.md summary: >- Encontre respostas a perguntas frequentes sobre o desempenho da pesquisa, melhorias de desempenho e outras questões relacionadas com o desempenho. -title: FAQ de desempenho +title: FAQ sobre desempenho ---

      FAQ sobre desempenho

      Você pode:

      • Verificar o Milvus no GitHub. Sinta-se à vontade para fazer perguntas, compartilhar ideias e ajudar outras pessoas.
      • -
      • Junte-se ao nosso Canal Slack para encontrar suporte e envolver-se com a nossa comunidade de código aberto.
      • +
      • Junte-se ao nosso Canal Slack para obter suporte e envolver-se com a nossa comunidade de código aberto.
      diff --git a/localization/v2.4.x/site/pt/faq/product_faq.json b/localization/v2.4.x/site/pt/faq/product_faq.json index 457f227c4..fc0c93efb 100644 --- a/localization/v2.4.x/site/pt/faq/product_faq.json +++ b/localization/v2.4.x/site/pt/faq/product_faq.json @@ -1 +1 @@ -{"codeList":["60 * 2 * 4 + 40 * 1 * 12 = 960\n"],"headingContent":"","anchorList":[{"label":"FAQ do produto","href":"Product-FAQ","type":1,"isActive":false}]} \ No newline at end of file +{"codeList":["60 * 2 * 4 + 40 * 1 * 12 = 960\n"],"headingContent":"Product FAQ","anchorList":[{"label":"FAQ do produto","href":"Product-FAQ","type":1,"isActive":false}]} \ No newline at end of file diff --git a/localization/v2.4.x/site/pt/faq/product_faq.md b/localization/v2.4.x/site/pt/faq/product_faq.md index fcf355ee8..a5d338950 100644 --- a/localization/v2.4.x/site/pt/faq/product_faq.md +++ b/localization/v2.4.x/site/pt/faq/product_faq.md @@ -64,9 +64,9 @@ title: FAQ do produto

      Para evitar esta situação, tente definir nprobe maior e nlist e k menor.

      Consulte Índice de vetores para obter mais informações.

      Qual é a dimensão máxima do vetor suportada no Milvus?

      Por predefinição, o Milvus pode gerir vectores com um máximo de 32.768 dimensões. Pode aumentar o valor de Proxy.maxDimension para permitir um vetor de maior dimensão.

      -

      O Milvus suporta o CPU Apple M1?

      A versão atual do Milvus não suporta o CPU Apple M1.

      -

      Que tipos de dados o Milvus suporta no campo chave primária?

      Na versão atual, o Milvus suporta INT64 e string.

      -

      O Milvus é escalável?

      Sim. Você pode implantar o cluster Milvus com vários nós via Helm Chart no Kubernetes. Consulte o Guia de Escala para obter mais instruções.

      +

      O Milvus suporta o CPU Apple M1?

      A versão atual do Milvus não suporta diretamente o CPU Apple M1. Após Milvus 2.3, Milvus fornece imagens Docker para a arquitetura ARM64.

      +

      Que tipos de dados o Milvus suporta no campo de chave primária?

      Na versão atual, o Milvus suporta INT64 e string.

      +

      O Milvus é escalável?

      Sim. Pode implementar o cluster Milvus com vários nós através do Helm Chart no Kubernetes. Consulte o Guia de Escala para obter mais instruções.

      A consulta é executada na memória? O que são dados incrementais e dados históricos?

      Sim. Quando chega um pedido de consulta, o Milvus pesquisa os dados incrementais e os dados históricos carregando-os na memória. Os dados incrementais estão nos segmentos crescentes, que são armazenados em memória antes de atingirem o limiar para serem persistidos no motor de armazenamento, enquanto os dados históricos são dos segmentos selados que são armazenados no armazenamento de objectos. Os dados incrementais e os dados históricos constituem o conjunto completo de dados a pesquisar.

      Sim. Para pesquisas na mesma coleção, o Milvus pesquisa simultaneamente os dados incrementais e históricos. No entanto, as pesquisas em diferentes colecções são realizadas em série. Considerando que os dados históricos podem ser um conjunto de dados extremamente grande, as pesquisas nos dados históricos são relativamente mais demoradas e essencialmente efectuadas em série.

      Porque é que os dados no MinIO permanecem após a eliminação da coleção correspondente?

      Os dados no MinIO foram concebidos para permanecerem durante um determinado período de tempo para conveniência da reversão de dados.

      diff --git a/localization/v2.4.x/site/pt/getstarted/install_SDKs/install-java.json b/localization/v2.4.x/site/pt/getstarted/install_SDKs/install-java.json index 2e9f251ab..dff783778 100644 --- a/localization/v2.4.x/site/pt/getstarted/install_SDKs/install-java.json +++ b/localization/v2.4.x/site/pt/getstarted/install_SDKs/install-java.json @@ -1 +1 @@ -{"codeList":["\n io.milvus\n milvus-sdk-java\n 2.4.3\n\n","implementation 'io.milvus:milvus-sdk-java:2.4.3'\n"],"headingContent":"","anchorList":[{"label":"Instalar o Milvus Java SDK","href":"Install-Milvus-Java-SDK","type":1,"isActive":false},{"label":"Requisitos","href":"Requirement","type":2,"isActive":false},{"label":"Instalar o Milvus Java SDK","href":"Install-Milvus-Java-SDK","type":2,"isActive":false},{"label":"O que se segue","href":"Whats-next","type":2,"isActive":false}]} \ No newline at end of file +{"codeList":["\n io.milvus\n milvus-sdk-java\n 2.4.5\n\n","implementation 'io.milvus:milvus-sdk-java:2.4.5'\n"],"headingContent":"Install Milvus Java SDK","anchorList":[{"label":"Instalar o Milvus Java SDK","href":"Install-Milvus-Java-SDK","type":1,"isActive":false},{"label":"Requisitos","href":"Requirement","type":2,"isActive":false},{"label":"Instalar o Milvus Java SDK","href":"Install-Milvus-Java-SDK","type":2,"isActive":false},{"label":"O que se segue","href":"Whats-next","type":2,"isActive":false}]} \ No newline at end of file diff --git a/localization/v2.4.x/site/pt/getstarted/install_SDKs/install-java.md b/localization/v2.4.x/site/pt/getstarted/install_SDKs/install-java.md index 5c2646e31..3147490e2 100644 --- a/localization/v2.4.x/site/pt/getstarted/install_SDKs/install-java.md +++ b/localization/v2.4.x/site/pt/getstarted/install_SDKs/install-java.md @@ -63,13 +63,13 @@ title: Instalar o Milvus Java SDK
      <dependency>
           <groupId>io.milvus</groupId>
           <artifactId>milvus-sdk-java</artifactId>
      -    <version>2.4.3</version>
      +    <version>2.4.5</version>
       </dependency>
       
      • Gradle/Grails
      -
      implementation 'io.milvus:milvus-sdk-java:2.4.3'
      +
      implementation 'io.milvus:milvus-sdk-java:2.4.5'
       

      O que se segue

      O PyMilvus está disponível no Python Package Index.

      Recomenda-se a instalação de uma versão do PyMilvus que corresponda à versão do servidor Milvus que instalou. Para obter mais informações, consulte as Notas de versão.
      -
      $ python3 -m pip install pymilvus==2.4.5
      +
      $ python3 -m pip install pymilvus==2.4.8
       

      Verificar a instalação

      Open In Colab

      +

      Open In Colab +GitHub Repository

      Os vectores, o formato de dados de saída dos modelos de Redes Neuronais, podem codificar eficazmente a informação e desempenhar um papel fundamental em aplicações de IA, tais como bases de dados de conhecimento, pesquisa semântica, Retrieval Augmented Generation (RAG) e muito mais.

      O Milvus é uma base de dados vetorial de código aberto que se adequa a aplicações de IA de todas as dimensões, desde a execução de um chatbot de demonstração no notebook Jupyter até à criação de uma pesquisa à escala da Web que serve milhares de milhões de utilizadores. Neste guia, vamos mostrar-lhe como configurar o Milvus localmente em minutos e utilizar a biblioteca cliente Python para gerar, armazenar e pesquisar vectores.

      Instalar o Milvus

      Por defeito, os campos escalares não são indexados. Se precisar de efetuar uma pesquisa filtrada de metadados num grande conjunto de dados, pode considerar a utilização de um esquema fixo e também ativar o índice para melhorar o desempenho da pesquisa.

      Para além da pesquisa vetorial, também pode efetuar outros tipos de pesquisa:

      -

      Consulta

      Uma query() é uma operação que recupera todas as entidades que correspondem a uma cretria, tal como uma expressão de filtro ou que correspondem a alguns ids.

      +

      Consulta

      Uma query() é uma operação que recupera todas as entidades que correspondem a um critério, tal como uma expressão de filtro ou a correspondência de alguns ids.

      Por exemplo, recuperar todas as entidades cujo campo escalar tem um determinado valor:

      res = client.query(
           collection_name="demo_collection",
      @@ -329,7 +331,7 @@ res = client.search(
                 d="M4 9h1v1H4c-1.5 0-3-1.69-3-3.5S2.55 3 4 3h4c1.45 0 3 1.69 3 3.5 0 1.41-.91 2.72-2 3.25V8.59c.58-.45 1-1.27 1-2.09C10 5.22 8.98 4 8 4H4c-.98 0-2 1.22-2 2.5S3 9 4 9zm9-3h-1v1h1c1 0 2 1.22 2 2.5S13.98 12 13 12H9c-.98 0-2-1.22-2-2.5 0-.83.42-1.64 1-2.09V6.25c-1.09.53-2 1.84-2 3.25C6 11.31 7.55 13 9 13h4c1.45 0 3-1.69 3-3.5S14.5 6 13 6z"
               >
             
      -    

      Se pretender limpar dados, pode eliminar entidades especificando a chave primária ou eliminar todas as entidades que correspondam a uma determinada expressão de filtro.

      +

      Se pretender purgar dados, pode eliminar entidades especificando a chave primária ou eliminar todas as entidades que correspondam a uma determinada expressão de filtro.

      # Delete entities by primary key
       res = client.delete(collection_name="demo_collection", ids=[0, 2])
       
      @@ -361,7 +363,7 @@ res = client.delete(
                 d="M4 9h1v1H4c-1.5 0-3-1.69-3-3.5S2.55 3 4 3h4c1.45 0 3 1.69 3 3.5 0 1.41-.91 2.72-2 3.25V8.59c.58-.45 1-1.27 1-2.09C10 5.22 8.98 4 8 4H4c-.98 0-2 1.22-2 2.5S3 9 4 9zm9-3h-1v1h1c1 0 2 1.22 2 2.5S13.98 12 13 12H9c-.98 0-2-1.22-2-2.5 0-.83.42-1.64 1-2.09V6.25c-1.09.53-2 1.84-2 3.25C6 11.31 7.55 13 9 13h4c1.45 0 3-1.69 3-3.5S14.5 6 13 6z"
               >
             
      -    

      Uma vez que todos os dados do Milvus Lite são armazenados num ficheiro local, pode carregar todos os dados para a memória mesmo depois do programa terminar, criando um MilvusClient com o ficheiro existente. Por exemplo, isto irá recuperar as colecções do ficheiro "milvus_demo.db" e continuar a escrever dados no mesmo.

      +

      Uma vez que todos os dados do Milvus Lite são armazenados num ficheiro local, pode carregar todos os dados para a memória mesmo após o programa terminar, criando um MilvusClient com o ficheiro existente. Por exemplo, isto irá recuperar as colecções do ficheiro "milvus_demo.db" e continuar a escrever dados no mesmo.

      from pymilvus import MilvusClient
       
       client = MilvusClient("milvus_demo.db")
      @@ -400,7 +402,7 @@ client.drop_collection(collection_name="demo_coll
                 d="M4 9h1v1H4c-1.5 0-3-1.69-3-3.5S2.55 3 4 3h4c1.45 0 3 1.69 3 3.5 0 1.41-.91 2.72-2 3.25V8.59c.58-.45 1-1.27 1-2.09C10 5.22 8.98 4 8 4H4c-.98 0-2 1.22-2 2.5S3 9 4 9zm9-3h-1v1h1c1 0 2 1.22 2 2.5S13.98 12 13 12H9c-.98 0-2-1.22-2-2.5 0-.83.42-1.64 1-2.09V6.25c-1.09.53-2 1.84-2 3.25C6 11.31 7.55 13 9 13h4c1.45 0 3-1.69 3-3.5S14.5 6 13 6z"
               >
             
      -    

      O Milvus Lite é ótimo para começar com um programa python local. Se você tem dados em grande escala ou gostaria de usar o Milvus em produção, você pode aprender sobre a implantação do Milvus no Docker e no Kubernetes. Todos os modos de implantação do Milvus compartilham a mesma API, portanto, seu código do lado do cliente não precisa mudar muito se mudar para outro modo de implantação. Basta especificar o URI e o Token de um servidor Milvus implantado em qualquer lugar:

      +

      O Milvus Lite é ótimo para começar com um programa python local. Se você tem dados em grande escala ou gostaria de usar o Milvus em produção, pode aprender sobre a implantação do Milvus no Docker e no Kubernetes. Todos os modos de implantação do Milvus compartilham a mesma API, portanto, seu código do lado do cliente não precisa mudar muito se mudar para outro modo de implantação. Basta especificar o URI e o Token de um servidor Milvus implantado em qualquer lugar:

      client = MilvusClient(uri="http://localhost:19530", token="root:Milvus")
       

      Milvus fornece API REST e gRPC, com bibliotecas de clientes em linguagens como Python, Java, Go, C# e Node.js.

      diff --git a/localization/v2.4.x/site/pt/getstarted/run-milvus-docker/install_standalone-docker-compose.json b/localization/v2.4.x/site/pt/getstarted/run-milvus-docker/install_standalone-docker-compose.json index 028de0449..b24070d77 100644 --- a/localization/v2.4.x/site/pt/getstarted/run-milvus-docker/install_standalone-docker-compose.json +++ b/localization/v2.4.x/site/pt/getstarted/run-milvus-docker/install_standalone-docker-compose.json @@ -1 +1 @@ -{"codeList":["# Download the configuration file\n$ wget https://github.com/milvus-io/milvus/releases/download/v2.4.9/milvus-standalone-docker-compose.yml -O docker-compose.yml\n\n# Start Milvus\n$ sudo docker compose up -d\n\nCreating milvus-etcd ... done\nCreating milvus-minio ... done\nCreating milvus-standalone ... done\n","$ sudo docker compose ps\n\n Name Command State Ports\n--------------------------------------------------------------------------------------------------------------------\nmilvus-etcd etcd -advertise-client-url ... Up 2379/tcp, 2380/tcp\nmilvus-minio /usr/bin/docker-entrypoint ... Up (healthy) 9000/tcp\nmilvus-standalone /tini -- milvus run standalone Up 0.0.0.0:19530->19530/tcp, 0.0.0.0:9091->9091/tcp\n","# Stop Milvus\n$ sudo docker compose down\n\n# Delete service data\n$ sudo rm -rf volumes\n"],"headingContent":"","anchorList":[{"label":"Executar o Milvus com o Docker Compose","href":"Run-Milvus-with-Docker-Compose","type":1,"isActive":false},{"label":"Pré-requisitos","href":"Prerequisites","type":2,"isActive":false},{"label":"Instalar o Milvus","href":"Install-Milvus","type":2,"isActive":false},{"label":"O que se segue","href":"Whats-next","type":2,"isActive":false}]} \ No newline at end of file +{"codeList":["# Download the configuration file\n$ wget https://github.com/milvus-io/milvus/releases/download/v2.4.13-hotfix/milvus-standalone-docker-compose.yml -O docker-compose.yml\n\n# Start Milvus\n$ sudo docker-compose up -d\n\nCreating milvus-etcd ... done\nCreating milvus-minio ... done\nCreating milvus-standalone ... done\n","$ sudo docker-compose ps\n\n Name Command State Ports\n--------------------------------------------------------------------------------------------------------------------\nmilvus-etcd etcd -advertise-client-url ... Up 2379/tcp, 2380/tcp\nmilvus-minio /usr/bin/docker-entrypoint ... Up (healthy) 9000/tcp\nmilvus-standalone /tini -- milvus run standalone Up 0.0.0.0:19530->19530/tcp, 0.0.0.0:9091->9091/tcp\n","# Stop Milvus\n$ sudo docker-compose down\n\n# Delete service data\n$ sudo rm -rf volumes\n"],"headingContent":"Run Milvus with Docker Compose","anchorList":[{"label":"Executar o Milvus com o Docker Compose","href":"Run-Milvus-with-Docker-Compose","type":1,"isActive":false},{"label":"Pré-requisitos","href":"Prerequisites","type":2,"isActive":false},{"label":"Instalar o Milvus","href":"Install-Milvus","type":2,"isActive":false},{"label":"O que se segue","href":"Whats-next","type":2,"isActive":false}]} \ No newline at end of file diff --git a/localization/v2.4.x/site/pt/getstarted/run-milvus-docker/install_standalone-docker-compose.md b/localization/v2.4.x/site/pt/getstarted/run-milvus-docker/install_standalone-docker-compose.md index 0a6ff5971..23908e34b 100644 --- a/localization/v2.4.x/site/pt/getstarted/run-milvus-docker/install_standalone-docker-compose.md +++ b/localization/v2.4.x/site/pt/getstarted/run-milvus-docker/install_standalone-docker-compose.md @@ -57,10 +57,10 @@ title: Executar o Milvus com o Docker Compose

      O Milvus fornece um arquivo de configuração do Docker Compose no repositório do Milvus. Para instalar o Milvus usando o Docker Compose, basta executar

      # Download the configuration file
      -$ wget https://github.com/milvus-io/milvus/releases/download/v2.4.9/milvus-standalone-docker-compose.yml -O docker-compose.yml
      +$ wget https://github.com/milvus-io/milvus/releases/download/v2.4.13-hotfix/milvus-standalone-docker-compose.yml -O docker-compose.yml
       
       # Start Milvus
      -$ sudo docker compose up -d
      +$ sudo docker-compose up -d
       
       Creating milvus-etcd  ... done
       Creating milvus-minio ... done
      @@ -81,7 +81,7 @@ Creating milvus-standalone ... done
       

    Pode verificar se os contentores estão activos e a funcionar utilizando o seguinte comando:

    -
    $ sudo docker compose ps
    +
    $ sudo docker-compose ps
     
           Name                     Command                  State                            Ports
     --------------------------------------------------------------------------------------------------------------------
    @@ -91,7 +91,7 @@ milvus-standalone   /tini -- milvus run standalone   Up             0.0.0.0:1953
     

    Pode parar e eliminar este contentor da seguinte forma

    # Stop Milvus
    -$ sudo docker compose down
    +$ sudo docker-compose down
     
     # Delete service data
     $ sudo rm -rf volumes
    @@ -125,14 +125,14 @@ $ sudo rm
     
     
  • Atualizar o Milvus usando o Helm Chart.

  • Escalar seu cluster Milvus.

  • -
  • Implantar seu cluster Milvu em nuvens:

    +
  • Implantar seu cluster Milvus em nuvens:

  • Explore o Milvus Backup, uma ferramenta de código aberto para backups de dados do Milvus.

  • -
  • Explore o Birdwatcher, uma ferramenta de código aberto para depuração do Milvus e actualizações dinâmicas de configuração.

  • +
  • Explore o Birdwatcher, uma ferramenta de código aberto para depuração do Milvus e actualizações de configuração dinâmica.

  • Explore o Attu, uma ferramenta GUI de código aberto para gerenciamento intuitivo do Milvus.

  • Monitore o Milvus com o Prometheus.

  • diff --git a/localization/v2.4.x/site/pt/getstarted/run-milvus-gpu/install_standalone-docker-compose-gpu.json b/localization/v2.4.x/site/pt/getstarted/run-milvus-gpu/install_standalone-docker-compose-gpu.json index afd286509..9c87e5e76 100644 --- a/localization/v2.4.x/site/pt/getstarted/run-milvus-gpu/install_standalone-docker-compose-gpu.json +++ b/localization/v2.4.x/site/pt/getstarted/run-milvus-gpu/install_standalone-docker-compose-gpu.json @@ -1 +1 @@ -{"codeList":["$ wget https://github.com/milvus-io/milvus/releases/download/v2.4.9/milvus-standalone-docker-compose-gpu.yml -O docker-compose.yml\n","...\nstandalone:\n ...\n deploy:\n resources:\n reservations:\n devices:\n - driver: nvidia\n capabilities: [\"gpu\"]\n device_ids: [\"0\"]\n...\n","...\nstandalone:\n ...\n deploy:\n resources:\n reservations:\n devices:\n - driver: nvidia\n capabilities: [\"gpu\"]\n device_ids: ['0', '1']\n...\n","$ sudo docker compose up -d\n\nCreating milvus-etcd ... done\nCreating milvus-minio ... done\nCreating milvus-standalone ... done\n","$ sudo docker compose ps\n\n Name Command State Ports\n--------------------------------------------------------------------------------------------------------------------\nmilvus-etcd etcd -advertise-client-url ... Up 2379/tcp, 2380/tcp\nmilvus-minio /usr/bin/docker-entrypoint ... Up (healthy) 9000/tcp\nmilvus-standalone /tini -- milvus run standalone Up 0.0.0.0:19530->19530/tcp, 0.0.0.0:9091->9091/tcp\n","$ CUDA_VISIBLE_DEVICES=0 ./milvus run standalone\n","$ CUDA_VISIBLE_DEVICES=0,1 ./milvus run standalone\n","# Stop Milvus\n$ sudo docker compose down\n\n# Delete service data\n$ sudo rm -rf volumes\n","docker cp :/milvus/configs/milvus.yaml milvus.yaml\n","vim milvus.yaml\n","...\ngpu:\n initMemSize: 0\n maxMemSize: 0\n...\n","docker cp milvus.yaml :/milvus/configs/milvus.yaml\n","docker stop \ndocker start \n"],"headingContent":"","anchorList":[{"label":"Executar o Milvus com suporte a GPU usando o Docker Compose","href":"Run-Milvus-with-GPU-Support-Using-Docker-Compose","type":1,"isActive":false},{"label":"Pré-requisitos","href":"Prerequisites","type":2,"isActive":false},{"label":"Instalar o Milvus","href":"Install-Milvus","type":2,"isActive":false},{"label":"Configurar o pool de memória","href":"Configure-memory-pool","type":2,"isActive":false},{"label":"O que vem a seguir","href":"Whats-next","type":2,"isActive":false}]} \ No newline at end of file +{"codeList":["$ wget https://github.com/milvus-io/milvus/releases/download/v2.4.13-hotfix/milvus-standalone-docker-compose-gpu.yml -O docker-compose.yml\n","...\nstandalone:\n ...\n deploy:\n resources:\n reservations:\n devices:\n - driver: nvidia\n capabilities: [\"gpu\"]\n device_ids: [\"0\"]\n...\n","...\nstandalone:\n ...\n deploy:\n resources:\n reservations:\n devices:\n - driver: nvidia\n capabilities: [\"gpu\"]\n device_ids: ['0', '1']\n...\n","$ sudo docker compose up -d\n\nCreating milvus-etcd ... done\nCreating milvus-minio ... done\nCreating milvus-standalone ... done\n","$ sudo docker compose ps\n\n Name Command State Ports\n--------------------------------------------------------------------------------------------------------------------\nmilvus-etcd etcd -advertise-client-url ... Up 2379/tcp, 2380/tcp\nmilvus-minio /usr/bin/docker-entrypoint ... Up (healthy) 9000/tcp\nmilvus-standalone /tini -- milvus run standalone Up 0.0.0.0:19530->19530/tcp, 0.0.0.0:9091->9091/tcp\n","$ CUDA_VISIBLE_DEVICES=0 ./milvus run standalone\n","$ CUDA_VISIBLE_DEVICES=0,1 ./milvus run standalone\n","# Stop Milvus\n$ sudo docker compose down\n\n# Delete service data\n$ sudo rm -rf volumes\n","docker cp :/milvus/configs/milvus.yaml milvus.yaml\n","vim milvus.yaml\n","...\ngpu:\n initMemSize: 0\n maxMemSize: 0\n...\n","docker cp milvus.yaml :/milvus/configs/milvus.yaml\n","docker stop \ndocker start \n"],"headingContent":"Run Milvus with GPU Support Using Docker Compose","anchorList":[{"label":"Executar o Milvus com suporte a GPU usando o Docker Compose","href":"Run-Milvus-with-GPU-Support-Using-Docker-Compose","type":1,"isActive":false},{"label":"Pré-requisitos","href":"Prerequisites","type":2,"isActive":false},{"label":"Instalar o Milvus","href":"Install-Milvus","type":2,"isActive":false},{"label":"Configurar o pool de memória","href":"Configure-memory-pool","type":2,"isActive":false},{"label":"O que vem a seguir","href":"Whats-next","type":2,"isActive":false}]} \ No newline at end of file diff --git a/localization/v2.4.x/site/pt/getstarted/run-milvus-gpu/install_standalone-docker-compose-gpu.md b/localization/v2.4.x/site/pt/getstarted/run-milvus-gpu/install_standalone-docker-compose-gpu.md index 66fc09f39..209fc996e 100644 --- a/localization/v2.4.x/site/pt/getstarted/run-milvus-gpu/install_standalone-docker-compose-gpu.md +++ b/localization/v2.4.x/site/pt/getstarted/run-milvus-gpu/install_standalone-docker-compose-gpu.md @@ -59,12 +59,12 @@ title: Executar o Milvus com suporte a GPU usando o Docker Compose >

    Para instalar o Milvus com suporte a GPU usando o Docker Compose, siga estas etapas.

    -

    1. Descarregar e configurar o ficheiro YAML

    Faça o download milvus-standalone-docker-compose-gpu.yml e salve-o como docker-compose.yml manualmente ou com o seguinte comando.

    -
    $ wget https://github.com/milvus-io/milvus/releases/download/v2.4.9/milvus-standalone-docker-compose-gpu.yml -O docker-compose.yml
    +

    1. Descarregar e configurar o ficheiro YAML

    Faça o download milvus-standalone-docker-compose-gpu.yml e salve-o como docker-compose.yml manualmente ou com o seguinte comando.

    +
    $ wget https://github.com/milvus-io/milvus/releases/download/v2.4.13-hotfix/milvus-standalone-docker-compose-gpu.yml -O docker-compose.yml
     

    É necessário fazer algumas alterações nas variáveis de ambiente do serviço autónomo no ficheiro YAML, como se segue:

      -
    • Para atribuir um dispositivo GPU específico ao Milvus, localize o campo deploy.resources.reservations.devices[0].devices_ids na definição do serviço standalone e substitua o seu valor pelo ID da GPU pretendida. É possível utilizar a ferramenta nvidia-smi, incluída nos controladores de visualização da GPU NVIDIA, para determinar a ID de um dispositivo GPU. O Milvus suporta múltiplos dispositivos GPU.
    • +
    • Para atribuir um dispositivo GPU específico ao Milvus, localize o campo deploy.resources.reservations.devices[0].devices_ids na definição do serviço standalone e substitua o seu valor pelo ID da GPU pretendida. Pode utilizar a ferramenta nvidia-smi, incluída nos controladores de visualização da GPU NVIDIA, para determinar a ID de um dispositivo GPU. O Milvus suporta múltiplos dispositivos GPU.

    Atribuir um único dispositivo GPU ao Milvus:

    ...
    @@ -169,7 +169,7 @@ gpu:
     
    • initMemSize: Initial size of the memory pool. A predefinição é 1024.
    • -
    • maxMemSize: Tamanho máximo do conjunto de memória. O padrão é 2048.
    • +
    • maxMemSize: Tamanho máximo do conjunto de memória. A predefinição é 2048.
  • Utilize o seguinte comando para copiar o ficheiro milvus.yaml modificado de volta para o contentor Milvus. Substitua <milvus_container_id> pelo seu ID real do contentor Milvus.

    docker cp milvus.yaml <milvus_container_id>:/milvus/configs/milvus.yaml
    diff --git a/localization/v2.4.x/site/pt/integrations/evaluation_with_deepeval.md b/localization/v2.4.x/site/pt/integrations/evaluation_with_deepeval.md
    index 8ce450ee1..834bd987d 100644
    --- a/localization/v2.4.x/site/pt/integrations/evaluation_with_deepeval.md
    +++ b/localization/v2.4.x/site/pt/integrations/evaluation_with_deepeval.md
    @@ -20,7 +20,8 @@ title: Avaliação com DeepEval
               d="M4 9h1v1H4c-1.5 0-3-1.69-3-3.5S2.55 3 4 3h4c1.45 0 3 1.69 3 3.5 0 1.41-.91 2.72-2 3.25V8.59c.58-.45 1-1.27 1-2.09C10 5.22 8.98 4 8 4H4c-.98 0-2 1.22-2 2.5S3 9 4 9zm9-3h-1v1h1c1 0 2 1.22 2 2.5S13.98 12 13 12H9c-.98 0-2-1.22-2-2.5 0-.83.42-1.64 1-2.09V6.25c-1.09.53-2 1.84-2 3.25C6 11.31 7.55 13 9 13h4c1.45 0 3-1.69 3-3.5S14.5 6 13 6z"
             >
           
    -    

    Open In Colab

    +

    Open In Colab +GitHub Repository

    Este guia demonstra como usar o DeepEval para avaliar um pipeline Retrieval-Augmented Generation (RAG) criado com base no Milvus.

    O sistema RAG combina um sistema de recuperação com um modelo generativo para gerar um novo texto com base em um determinado prompt. O sistema começa por recuperar documentos relevantes de um corpus utilizando o Milvus e, em seguida, utiliza um modelo generativo para gerar novo texto com base nos documentos recuperados.

    O DeepEval é uma estrutura que ajuda a avaliar os pipelines RAG. Existem ferramentas e estruturas que ajudam a construir esses pipelines, mas avaliá-los e quantificar o desempenho do pipeline pode ser difícil. É aqui que entra o DeepEval.

    diff --git a/localization/v2.4.x/site/pt/integrations/evaluation_with_phoenix.md b/localization/v2.4.x/site/pt/integrations/evaluation_with_phoenix.md index 58a4f289e..825642d9c 100644 --- a/localization/v2.4.x/site/pt/integrations/evaluation_with_phoenix.md +++ b/localization/v2.4.x/site/pt/integrations/evaluation_with_phoenix.md @@ -20,10 +20,11 @@ title: Avaliação com Arize Pheonix d="M4 9h1v1H4c-1.5 0-3-1.69-3-3.5S2.55 3 4 3h4c1.45 0 3 1.69 3 3.5 0 1.41-.91 2.72-2 3.25V8.59c.58-.45 1-1.27 1-2.09C10 5.22 8.98 4 8 4H4c-.98 0-2 1.22-2 2.5S3 9 4 9zm9-3h-1v1h1c1 0 2 1.22 2 2.5S13.98 12 13 12H9c-.98 0-2-1.22-2-2.5 0-.83.42-1.64 1-2.09V6.25c-1.09.53-2 1.84-2 3.25C6 11.31 7.55 13 9 13h4c1.45 0 3-1.69 3-3.5S14.5 6 13 6z" > -

    Open In Colab

    +

    Open In Colab +GitHub Repository

    Este guia demonstra como utilizar o Arize Pheonix para avaliar um pipeline Retrieval-Augmented Generation (RAG) baseado no Milvus.

    O sistema RAG combina um sistema de recuperação com um modelo generativo para gerar novo texto com base num determinado pedido. O sistema começa por recuperar documentos relevantes de um corpus utilizando o Milvus e, em seguida, utiliza um modelo generativo para gerar novo texto com base nos documentos recuperados.

    -

    O Arize Pheonix é um quadro que ajuda a avaliar os pipelines RAG. Existem ferramentas e estruturas que ajudam a construir estas condutas, mas avaliá-las e quantificar o seu desempenho pode ser difícil. É aqui que entra o Arize Pheonix.

    +

    Arize Pheonix é um quadro que ajuda a avaliar as condutas RAG. Existem ferramentas e estruturas que ajudam a construir estas condutas, mas avaliá-las e quantificar o seu desempenho pode ser difícil. É aqui que entra o Arize Pheonix.

    Pré-requisitos

    Usamos o Arize Phoenix para avaliar nosso pipeline de geração aumentada de recuperação (RAG), com foco em duas métricas principais:

      -
    • Avaliação de alucinação: Determina se o conteúdo é factual ou alucinatório (informação não fundamentada no contexto), garantindo a integridade dos dados.

      +
    • Avaliação de alucinação: Determina se o conteúdo é factual ou alucinatório (informações não fundamentadas no contexto), garantindo a integridade dos dados.

      • Explicação da alucinação: Explica porque é que uma resposta é factual ou não.
    • @@ -341,7 +342,7 @@ Answering questions: 100%|██████████| 3/3 [00:03<00:00,
      • Latência do aplicativo: Identificar e otimizar invocações LLM lentas e desempenho de componentes.
      • Uso de token: Divida o consumo de token para otimização de custos.
      • -
      • Exceções de tempo de execução: Capturar problemas críticos como limitação de taxa.
      • +
      • Exceções em tempo de execução: Capturar problemas críticos como limitação de taxa.
      • Documentos recuperados: Analise a recuperação, a pontuação e a ordem dos documentos.

      Ao utilizar o rastreamento do Phoenix, é possível identificar gargalos, otimizar recursos e garantir a confiabilidade do sistema em várias estruturas e linguagens.

      @@ -359,7 +360,7 @@ OpenAIInstrumentor().instrument()

    - Alt Text + Alt Text Texto alternativo

    import nest_asyncio
     
    diff --git a/localization/v2.4.x/site/pt/integrations/integrate_with_bentoml.json b/localization/v2.4.x/site/pt/integrations/integrate_with_bentoml.json
    index afb54ddba..40080408e 100644
    --- a/localization/v2.4.x/site/pt/integrations/integrate_with_bentoml.json
    +++ b/localization/v2.4.x/site/pt/integrations/integrate_with_bentoml.json
    @@ -1 +1 @@
    -{"codeList":["$ pip install -U pymilvus bentoml\n","import bentoml\n\nBENTO_EMBEDDING_MODEL_END_POINT = \"BENTO_EMBEDDING_MODEL_END_POINT\"\nBENTO_API_TOKEN = \"BENTO_API_TOKEN\"\n\nembedding_client = bentoml.SyncHTTPClient(\n    BENTO_EMBEDDING_MODEL_END_POINT, token=BENTO_API_TOKEN\n)\n","# naively chunk on newlines\ndef chunk_text(filename: str) -> list:\n    with open(filename, \"r\") as f:\n        text = f.read()\n    sentences = text.split(\"\\n\")\n    return sentences\n","import os\nimport requests\nimport urllib.request\n\n# set up the data source\nrepo = \"ytang07/bento_octo_milvus_RAG\"\ndirectory = \"data\"\nsave_dir = \"./city_data\"\napi_url = f\"https://api.github.com/repos/{repo}/contents/{directory}\"\n\n\nresponse = requests.get(api_url)\ndata = response.json()\n\nif not os.path.exists(save_dir):\n    os.makedirs(save_dir)\n\nfor item in data:\n    if item[\"type\"] == \"file\":\n        file_url = item[\"download_url\"]\n        file_path = os.path.join(save_dir, item[\"name\"])\n        urllib.request.urlretrieve(file_url, file_path)\n","# please upload your data directory under this file's folder\ncities = os.listdir(\"city_data\")\n# store chunked text for each of the cities in a list of dicts\ncity_chunks = []\nfor city in cities:\n    chunked = chunk_text(f\"city_data/{city}\")\n    cleaned = []\n    for chunk in chunked:\n        if len(chunk) > 7:\n            cleaned.append(chunk)\n    mapped = {\"city_name\": city.split(\".\")[0], \"chunks\": cleaned}\n    city_chunks.append(mapped)\n","def get_embeddings(texts: list) -> list:\n    if len(texts) > 25:\n        splits = [texts[x : x + 25] for x in range(0, len(texts), 25)]\n        embeddings = []\n        for split in splits:\n            embedding_split = embedding_client.encode(sentences=split)\n            embeddings += embedding_split\n        return embeddings\n    return embedding_client.encode(\n        sentences=texts,\n    )\n","entries = []\nfor city_dict in city_chunks:\n    # No need for the embeddings list if get_embeddings already returns a list of lists\n    embedding_list = get_embeddings(city_dict[\"chunks\"])  # returns a list of lists\n    # Now match texts with embeddings and city name\n    for i, embedding in enumerate(embedding_list):\n        entry = {\n            \"embedding\": embedding,\n            \"sentence\": city_dict[\"chunks\"][\n                i\n            ],  # Assume \"chunks\" has the corresponding texts for the embeddings\n            \"city\": city_dict[\"city_name\"],\n        }\n        entries.append(entry)\n    print(entries)\n","from pymilvus import MilvusClient\n\nCOLLECTION_NAME = \"Bento_Milvus_RAG\"  # random name for your collection\nDIMENSION = 384\n\n# Initialize a Milvus Lite client\nmilvus_client = MilvusClient(\"milvus_demo.db\")\n","from pymilvus import connections\n\nconnections.connect(uri=\"milvus_demo.db\")\n","from pymilvus import MilvusClient, DataType, Collection\n\n# Create schema\nschema = MilvusClient.create_schema(\n    auto_id=True,\n    enable_dynamic_field=True,\n)\n\n# 3.2. Add fields to schema\nschema.add_field(field_name=\"id\", datatype=DataType.INT64, is_primary=True)\nschema.add_field(field_name=\"embedding\", datatype=DataType.FLOAT_VECTOR, dim=DIMENSION)\n","# prepare index parameters\nindex_params = milvus_client.prepare_index_params()\n\n# add index\nindex_params.add_index(\n    field_name=\"embedding\",\n    index_type=\"AUTOINDEX\",  # use autoindex instead of other complex indexing method\n    metric_type=\"COSINE\",  # L2, COSINE, or IP\n)\n\n# create collection\nif milvus_client.has_collection(collection_name=COLLECTION_NAME):\n    milvus_client.drop_collection(collection_name=COLLECTION_NAME)\nmilvus_client.create_collection(\n    collection_name=COLLECTION_NAME, schema=schema, index_params=index_params\n)\n\n# Outside the loop, now you upsert all the entries at once\nmilvus_client.insert(collection_name=COLLECTION_NAME, data=entries)\n","BENTO_LLM_END_POINT = \"BENTO_LLM_END_POINT\"\n\nllm_client = bentoml.SyncHTTPClient(BENTO_LLM_END_POINT, token=BENTO_API_TOKEN)\n","def dorag(question: str, context: str):\n\n    prompt = (\n        f\"You are a helpful assistant. The user has a question. Answer the user question based only on the context: {context}. \\n\"\n        f\"The user question is {question}\"\n    )\n\n    results = llm_client.generate(\n        max_tokens=1024,\n        prompt=prompt,\n    )\n\n    res = \"\"\n    for result in results:\n        res += result\n\n    return res\n","question = \"What state is Cambridge in?\"\n\n\ndef ask_a_question(question):\n    embeddings = get_embeddings([question])\n    res = milvus_client.search(\n        collection_name=COLLECTION_NAME,\n        data=embeddings,  # search for the one (1) embedding returned as a list of lists\n        anns_field=\"embedding\",  # Search across embeddings\n        limit=5,  # get me the top 5 results\n        output_fields=[\"sentence\"],  # get the sentence/chunk and city\n    )\n\n    sentences = []\n    for hits in res:\n        for hit in hits:\n            print(hit)\n            sentences.append(hit[\"entity\"][\"sentence\"])\n    context = \". \".join(sentences)\n    return context\n\n\ncontext = ask_a_question(question=question)\nprint(context)\n","print(dorag(question=question, context=context))\n"],"headingContent":"","anchorList":[{"label":"Geração Aumentada por Recuperação (RAG) com Milvus e BentoML","href":"Retrieval-Augmented-Generation-RAG-with-Milvus-and-BentoML","type":1,"isActive":false},{"label":"Introdução","href":"Introduction","type":2,"isActive":false},{"label":"Antes de começar","href":"Before-you-begin","type":2,"isActive":false},{"label":"Servir Embeddings com BentoML/BentoCloud","href":"Serving-Embeddings-with-BentoMLBentoCloud","type":2,"isActive":false},{"label":"Inserção de dados numa base de dados vetorial para recuperação","href":"Inserting-Data-into-a-Vector-Database-for-Retrieval","type":2,"isActive":false},{"label":"Criar a sua coleção Milvus Lite","href":"Creating-Your-Milvus-Lite-Collection","type":2,"isActive":false},{"label":"Configurar o LLM para o RAG","href":"Set-up-Your-LLM-for-RAG","type":2,"isActive":false},{"label":"Instruções do LLM","href":"LLM-Instructions","type":2,"isActive":false},{"label":"Um exemplo de RAG","href":"A-RAG-Example","type":2,"isActive":false}]}
    \ No newline at end of file
    +{"codeList":["$ pip install -U pymilvus bentoml\n","import bentoml\n\nBENTO_EMBEDDING_MODEL_END_POINT = \"BENTO_EMBEDDING_MODEL_END_POINT\"\nBENTO_API_TOKEN = \"BENTO_API_TOKEN\"\n\nembedding_client = bentoml.SyncHTTPClient(\n    BENTO_EMBEDDING_MODEL_END_POINT, token=BENTO_API_TOKEN\n)\n","# naively chunk on newlines\ndef chunk_text(filename: str) -> list:\n    with open(filename, \"r\") as f:\n        text = f.read()\n    sentences = text.split(\"\\n\")\n    return sentences\n","import os\nimport requests\nimport urllib.request\n\n# set up the data source\nrepo = \"ytang07/bento_octo_milvus_RAG\"\ndirectory = \"data\"\nsave_dir = \"./city_data\"\napi_url = f\"https://api.github.com/repos/{repo}/contents/{directory}\"\n\n\nresponse = requests.get(api_url)\ndata = response.json()\n\nif not os.path.exists(save_dir):\n    os.makedirs(save_dir)\n\nfor item in data:\n    if item[\"type\"] == \"file\":\n        file_url = item[\"download_url\"]\n        file_path = os.path.join(save_dir, item[\"name\"])\n        urllib.request.urlretrieve(file_url, file_path)\n","# please upload your data directory under this file's folder\ncities = os.listdir(\"city_data\")\n# store chunked text for each of the cities in a list of dicts\ncity_chunks = []\nfor city in cities:\n    chunked = chunk_text(f\"city_data/{city}\")\n    cleaned = []\n    for chunk in chunked:\n        if len(chunk) > 7:\n            cleaned.append(chunk)\n    mapped = {\"city_name\": city.split(\".\")[0], \"chunks\": cleaned}\n    city_chunks.append(mapped)\n","def get_embeddings(texts: list) -> list:\n    if len(texts) > 25:\n        splits = [texts[x : x + 25] for x in range(0, len(texts), 25)]\n        embeddings = []\n        for split in splits:\n            embedding_split = embedding_client.encode(sentences=split)\n            embeddings += embedding_split\n        return embeddings\n    return embedding_client.encode(\n        sentences=texts,\n    )\n","entries = []\nfor city_dict in city_chunks:\n    # No need for the embeddings list if get_embeddings already returns a list of lists\n    embedding_list = get_embeddings(city_dict[\"chunks\"])  # returns a list of lists\n    # Now match texts with embeddings and city name\n    for i, embedding in enumerate(embedding_list):\n        entry = {\n            \"embedding\": embedding,\n            \"sentence\": city_dict[\"chunks\"][\n                i\n            ],  # Assume \"chunks\" has the corresponding texts for the embeddings\n            \"city\": city_dict[\"city_name\"],\n        }\n        entries.append(entry)\n    print(entries)\n","from pymilvus import MilvusClient\n\nCOLLECTION_NAME = \"Bento_Milvus_RAG\"  # random name for your collection\nDIMENSION = 384\n\n# Initialize a Milvus Lite client\nmilvus_client = MilvusClient(\"milvus_demo.db\")\n","from pymilvus import connections\n\nconnections.connect(uri=\"milvus_demo.db\")\n","from pymilvus import MilvusClient, DataType, Collection\n\n# Create schema\nschema = MilvusClient.create_schema(\n    auto_id=True,\n    enable_dynamic_field=True,\n)\n\n# 3.2. Add fields to schema\nschema.add_field(field_name=\"id\", datatype=DataType.INT64, is_primary=True)\nschema.add_field(field_name=\"embedding\", datatype=DataType.FLOAT_VECTOR, dim=DIMENSION)\n","# prepare index parameters\nindex_params = milvus_client.prepare_index_params()\n\n# add index\nindex_params.add_index(\n    field_name=\"embedding\",\n    index_type=\"AUTOINDEX\",  # use autoindex instead of other complex indexing method\n    metric_type=\"COSINE\",  # L2, COSINE, or IP\n)\n\n# create collection\nif milvus_client.has_collection(collection_name=COLLECTION_NAME):\n    milvus_client.drop_collection(collection_name=COLLECTION_NAME)\nmilvus_client.create_collection(\n    collection_name=COLLECTION_NAME, schema=schema, index_params=index_params\n)\n\n# Outside the loop, now you upsert all the entries at once\nmilvus_client.insert(collection_name=COLLECTION_NAME, data=entries)\n","BENTO_LLM_END_POINT = \"BENTO_LLM_END_POINT\"\n\nllm_client = bentoml.SyncHTTPClient(BENTO_LLM_END_POINT, token=BENTO_API_TOKEN)\n","def dorag(question: str, context: str):\n\n    prompt = (\n        f\"You are a helpful assistant. The user has a question. Answer the user question based only on the context: {context}. \\n\"\n        f\"The user question is {question}\"\n    )\n\n    results = llm_client.generate(\n        max_tokens=1024,\n        prompt=prompt,\n    )\n\n    res = \"\"\n    for result in results:\n        res += result\n\n    return res\n","question = \"What state is Cambridge in?\"\n\n\ndef ask_a_question(question):\n    embeddings = get_embeddings([question])\n    res = milvus_client.search(\n        collection_name=COLLECTION_NAME,\n        data=embeddings,  # search for the one (1) embedding returned as a list of lists\n        anns_field=\"embedding\",  # Search across embeddings\n        limit=5,  # get me the top 5 results\n        output_fields=[\"sentence\"],  # get the sentence/chunk and city\n    )\n\n    sentences = []\n    for hits in res:\n        for hit in hits:\n            print(hit)\n            sentences.append(hit[\"entity\"][\"sentence\"])\n    context = \". \".join(sentences)\n    return context\n\n\ncontext = ask_a_question(question=question)\nprint(context)\n","print(dorag(question=question, context=context))\n"],"headingContent":"Retrieval-Augmented Generation (RAG) with Milvus and BentoML","anchorList":[{"label":"Geração Aumentada por Recuperação (RAG) com Milvus e BentoML","href":"Retrieval-Augmented-Generation-RAG-with-Milvus-and-BentoML","type":1,"isActive":false},{"label":"Introdução","href":"Introduction","type":2,"isActive":false},{"label":"Antes de começar","href":"Before-you-begin","type":2,"isActive":false},{"label":"Servir Embeddings com BentoML/BentoCloud","href":"Serving-Embeddings-with-BentoMLBentoCloud","type":2,"isActive":false},{"label":"Inserção de dados numa base de dados vetorial para recuperação","href":"Inserting-Data-into-a-Vector-Database-for-Retrieval","type":2,"isActive":false},{"label":"Criar a sua coleção Milvus Lite","href":"Creating-Your-Milvus-Lite-Collection","type":2,"isActive":false},{"label":"Configurar o LLM para o RAG","href":"Set-up-Your-LLM-for-RAG","type":2,"isActive":false},{"label":"Instruções do LLM","href":"LLM-Instructions","type":2,"isActive":false},{"label":"Um exemplo de RAG","href":"A-RAG-Example","type":2,"isActive":false}]}
    \ No newline at end of file
    diff --git a/localization/v2.4.x/site/pt/integrations/integrate_with_bentoml.md b/localization/v2.4.x/site/pt/integrations/integrate_with_bentoml.md
    index 49c605cc9..6601e41a9 100644
    --- a/localization/v2.4.x/site/pt/integrations/integrate_with_bentoml.md
    +++ b/localization/v2.4.x/site/pt/integrations/integrate_with_bentoml.md
    @@ -21,7 +21,8 @@ title: Geração Aumentada por Recuperação (RAG) com Milvus e BentoML
               d="M4 9h1v1H4c-1.5 0-3-1.69-3-3.5S2.55 3 4 3h4c1.45 0 3 1.69 3 3.5 0 1.41-.91 2.72-2 3.25V8.59c.58-.45 1-1.27 1-2.09C10 5.22 8.98 4 8 4H4c-.98 0-2 1.22-2 2.5S3 9 4 9zm9-3h-1v1h1c1 0 2 1.22 2 2.5S13.98 12 13 12H9c-.98 0-2-1.22-2-2.5 0-.83.42-1.64 1-2.09V6.25c-1.09.53-2 1.84-2 3.25C6 11.31 7.55 13 9 13h4c1.45 0 3-1.69 3-3.5S14.5 6 13 6z"
             >
           
    -    

    Open In Colab

    +

    Open In Colab +GitHub Repository

    Introdução

    Open In Colab

    +

    Open In Colab +GitHub Repository

    Este guia demonstra como construir um sistema RAG (Retrieval-Augmented Generation) utilizando o CAMEL e o Milvus.

    O sistema RAG combina um sistema de recuperação com um modelo generativo para gerar novo texto com base num determinado pedido. O sistema começa por recuperar documentos relevantes de um corpus utilizando o Milvus e, em seguida, utiliza um modelo generativo para gerar novo texto com base nos documentos recuperados.

    CAMEL é uma estrutura multi-agente. O Milvus é a base de dados vetorial de código aberto mais avançada do mundo, criada para alimentar a pesquisa de semelhanças de incorporação e as aplicações de IA.

    @@ -116,7 +117,7 @@ vector_retriever = VectorRetriever( embedding_model=embedding_instance, storage=storage_instance )
    -

    Utilizamos o Unstructured Module integrado para dividir o conteúdo em pequenos pedaços, o conteúdo será dividido automaticamente com a sua função chunk_by_title, o carácter máximo para cada pedaço é de 500 caracteres, que é um comprimento adequado para OpenAIEmbedding. Todo o texto nos pedaços será incorporado e armazenado na instância de armazenamento de vectores, o que levará algum tempo, por favor aguarde.

    +

    Utilizamos o Unstructured Module integrado para dividir o conteúdo em pequenos pedaços, o conteúdo será dividido automaticamente com a sua função chunk_by_title, o carácter máximo para cada pedaço é de 500 caracteres, que é um comprimento adequado para OpenAIEmbedding. Todo o texto nos pedaços será incorporado e armazenado na instância de armazenamento de vectores, o que demorará algum tempo, por favor aguarde.

    vector_retriever.process(content_input_path="local_data/camel paper.pdf")
     
    [nltk_data] Downloading package punkt to /root/nltk_data...
    diff --git a/localization/v2.4.x/site/pt/integrations/integrate_with_dspy.json b/localization/v2.4.x/site/pt/integrations/integrate_with_dspy.json
    index 488888618..85b578539 100644
    --- a/localization/v2.4.x/site/pt/integrations/integrate_with_dspy.json
    +++ b/localization/v2.4.x/site/pt/integrations/integrate_with_dspy.json
    @@ -1 +1 @@
    -{"codeList":["$ pip install \"dspy-ai[milvus]\"\n$ pip install -U pymilvus\n","from dspy.datasets import HotPotQA\n\n# Load the dataset.\ndataset = HotPotQA(\n    train_seed=1, train_size=20, eval_seed=2023, dev_size=50, test_size=0\n)\n\n# Tell DSPy that the 'question' field is the input. Any other fields are labels and/or metadata.\ntrainset = [x.with_inputs(\"question\") for x in dataset.train]\ndevset = [x.with_inputs(\"question\") for x in dataset.dev]\n","import requests\nimport os\n\nos.environ[\"OPENAI_API_KEY\"] = \"\"\nMILVUS_URI = \"example.db\"\nMILVUS_TOKEN = \"\"\n\nfrom pymilvus import MilvusClient, DataType, Collection\nfrom dspy.retrieve.milvus_rm import openai_embedding_function\n\nclient = MilvusClient(uri=MILVUS_URI, token=MILVUS_TOKEN)\n\nif \"dspy_example\" not in client.list_collections():\n    client.create_collection(\n        collection_name=\"dspy_example\",\n        overwrite=True,\n        dimension=1536,\n        primary_field_name=\"id\",\n        vector_field_name=\"embedding\",\n        id_type=\"int\",\n        metric_type=\"IP\",\n        max_length=65535,\n        enable_dynamic=True,\n    )\ntext = requests.get(\n    \"https://raw.githubusercontent.com/wxywb/dspy_dataset_sample/master/sample_data.txt\"\n).text\n\nfor idx, passage in enumerate(text.split(\"\\n\")):\n    if len(passage) == 0:\n        continue\n    client.insert(\n        collection_name=\"dspy_example\",\n        data=[\n            {\n                \"id\": idx,\n                \"embedding\": openai_embedding_function(passage)[0],\n                \"text\": passage,\n            }\n        ],\n    )\n","from dspy.retrieve.milvus_rm import MilvusRM\nimport dspy\n\nretriever_model = MilvusRM(\n    collection_name=\"dspy_example\",\n    uri=MILVUS_URI,\n    token=MILVUS_TOKEN,  # ignore this if no token is required for Milvus connection\n    embedding_function=openai_embedding_function,\n)\nturbo = dspy.OpenAI(model=\"gpt-3.5-turbo\")\ndspy.settings.configure(lm=turbo)\n","class GenerateAnswer(dspy.Signature):\n    \"\"\"Answer questions with short factoid answers.\"\"\"\n\n    context = dspy.InputField(desc=\"may contain relevant facts\")\n    question = dspy.InputField()\n    answer = dspy.OutputField(desc=\"often between 1 and 5 words\")\n","class RAG(dspy.Module):\n    def __init__(self, rm):\n        super().__init__()\n        self.retrieve = rm\n\n        # This signature indicates the task imposed on the COT module.\n        self.generate_answer = dspy.ChainOfThought(GenerateAnswer)\n\n    def forward(self, question):\n        # Use milvus_rm to retrieve context for the question.\n        context = self.retrieve(question).passages\n        # COT module takes \"context, query\" and output \"answer\".\n        prediction = self.generate_answer(context=context, question=question)\n        return dspy.Prediction(\n            context=[item.long_text for item in context], answer=prediction.answer\n        )\n","rag = RAG(retriever_model)\nprint(rag(\"who write At My Window\").answer)\n","from dspy.evaluate.evaluate import Evaluate\nfrom dspy.datasets import HotPotQA\n\nevaluate_on_hotpotqa = Evaluate(\n    devset=devset, num_threads=1, display_progress=False, display_table=5\n)\n\nmetric = dspy.evaluate.answer_exact_match\nscore = evaluate_on_hotpotqa(rag, metric=metric)\nprint(\"rag:\", score)\n","from dspy.teleprompt import BootstrapFewShot\n\n# Validation logic: check that the predicted answer is correct.# Also check that the retrieved context does contain that answer.\n\n\ndef validate_context_and_answer(example, pred, trace=None):\n    answer_EM = dspy.evaluate.answer_exact_match(example, pred)\n    answer_PM = dspy.evaluate.answer_passage_match(example, pred)\n    return answer_EM and answer_PM\n\n\n# Set up a basic teleprompter, which will compile our RAG program.\nteleprompter = BootstrapFewShot(metric=validate_context_and_answer)\n\n# Compile!\ncompiled_rag = teleprompter.compile(rag, trainset=trainset)\n\n# Now compiled_rag is optimized and ready to answer your new question!\n# Now, let’s evaluate the compiled RAG program.\nscore = evaluate_on_hotpotqa(compiled_rag, metric=metric)\nprint(score)\nprint(\"compile_rag:\", score)\n"],"headingContent":"","anchorList":[{"label":"Integrar Milvus com DSPy","href":"Integrate-Milvus-with-DSPy","type":1,"isActive":false},{"label":"O que é DSPy","href":"What-is-DSPy","type":2,"isActive":false},{"label":"Benefícios da utilização de DSPy","href":"Benefits-of-using-DSPy","type":2,"isActive":false},{"label":"Módulos","href":"Modules","type":2,"isActive":false},{"label":"Porquê Milvus em DSPy","href":"Why-Milvus-in-DSPy","type":2,"isActive":false},{"label":"Exemplos","href":"Examples","type":2,"isActive":false},{"label":"Resumo","href":"Summary","type":2,"isActive":false}]}
    \ No newline at end of file
    +{"codeList":["$ pip install \"dspy-ai[milvus]\"\n$ pip install -U pymilvus\n","from dspy.datasets import HotPotQA\n\n# Load the dataset.\ndataset = HotPotQA(\n    train_seed=1, train_size=20, eval_seed=2023, dev_size=50, test_size=0\n)\n\n# Tell DSPy that the 'question' field is the input. Any other fields are labels and/or metadata.\ntrainset = [x.with_inputs(\"question\") for x in dataset.train]\ndevset = [x.with_inputs(\"question\") for x in dataset.dev]\n","import requests\nimport os\n\nos.environ[\"OPENAI_API_KEY\"] = \"\"\nMILVUS_URI = \"example.db\"\nMILVUS_TOKEN = \"\"\n\nfrom pymilvus import MilvusClient, DataType, Collection\nfrom dspy.retrieve.milvus_rm import openai_embedding_function\n\nclient = MilvusClient(uri=MILVUS_URI, token=MILVUS_TOKEN)\n\nif \"dspy_example\" not in client.list_collections():\n    client.create_collection(\n        collection_name=\"dspy_example\",\n        overwrite=True,\n        dimension=1536,\n        primary_field_name=\"id\",\n        vector_field_name=\"embedding\",\n        id_type=\"int\",\n        metric_type=\"IP\",\n        max_length=65535,\n        enable_dynamic=True,\n    )\ntext = requests.get(\n    \"https://raw.githubusercontent.com/wxywb/dspy_dataset_sample/master/sample_data.txt\"\n).text\n\nfor idx, passage in enumerate(text.split(\"\\n\")):\n    if len(passage) == 0:\n        continue\n    client.insert(\n        collection_name=\"dspy_example\",\n        data=[\n            {\n                \"id\": idx,\n                \"embedding\": openai_embedding_function(passage)[0],\n                \"text\": passage,\n            }\n        ],\n    )\n","from dspy.retrieve.milvus_rm import MilvusRM\nimport dspy\n\nretriever_model = MilvusRM(\n    collection_name=\"dspy_example\",\n    uri=MILVUS_URI,\n    token=MILVUS_TOKEN,  # ignore this if no token is required for Milvus connection\n    embedding_function=openai_embedding_function,\n)\nturbo = dspy.OpenAI(model=\"gpt-3.5-turbo\")\ndspy.settings.configure(lm=turbo)\n","class GenerateAnswer(dspy.Signature):\n    \"\"\"Answer questions with short factoid answers.\"\"\"\n\n    context = dspy.InputField(desc=\"may contain relevant facts\")\n    question = dspy.InputField()\n    answer = dspy.OutputField(desc=\"often between 1 and 5 words\")\n","class RAG(dspy.Module):\n    def __init__(self, rm):\n        super().__init__()\n        self.retrieve = rm\n\n        # This signature indicates the task imposed on the COT module.\n        self.generate_answer = dspy.ChainOfThought(GenerateAnswer)\n\n    def forward(self, question):\n        # Use milvus_rm to retrieve context for the question.\n        context = self.retrieve(question).passages\n        # COT module takes \"context, query\" and output \"answer\".\n        prediction = self.generate_answer(context=context, question=question)\n        return dspy.Prediction(\n            context=[item.long_text for item in context], answer=prediction.answer\n        )\n","rag = RAG(retriever_model)\nprint(rag(\"who write At My Window\").answer)\n","from dspy.evaluate.evaluate import Evaluate\nfrom dspy.datasets import HotPotQA\n\nevaluate_on_hotpotqa = Evaluate(\n    devset=devset, num_threads=1, display_progress=False, display_table=5\n)\n\nmetric = dspy.evaluate.answer_exact_match\nscore = evaluate_on_hotpotqa(rag, metric=metric)\nprint(\"rag:\", score)\n","from dspy.teleprompt import BootstrapFewShot\n\n# Validation logic: check that the predicted answer is correct.# Also check that the retrieved context does contain that answer.\n\n\ndef validate_context_and_answer(example, pred, trace=None):\n    answer_EM = dspy.evaluate.answer_exact_match(example, pred)\n    answer_PM = dspy.evaluate.answer_passage_match(example, pred)\n    return answer_EM and answer_PM\n\n\n# Set up a basic teleprompter, which will compile our RAG program.\nteleprompter = BootstrapFewShot(metric=validate_context_and_answer)\n\n# Compile!\ncompiled_rag = teleprompter.compile(rag, trainset=trainset)\n\n# Now compiled_rag is optimized and ready to answer your new question!\n# Now, let’s evaluate the compiled RAG program.\nscore = evaluate_on_hotpotqa(compiled_rag, metric=metric)\nprint(score)\nprint(\"compile_rag:\", score)\n"],"headingContent":"Integrate Milvus with DSPy","anchorList":[{"label":"Integrar Milvus com DSPy","href":"Integrate-Milvus-with-DSPy","type":1,"isActive":false},{"label":"O que é DSPy","href":"What-is-DSPy","type":2,"isActive":false},{"label":"Benefícios da utilização de DSPy","href":"Benefits-of-using-DSPy","type":2,"isActive":false},{"label":"Módulos","href":"Modules","type":2,"isActive":false},{"label":"Porquê Milvus em DSPy","href":"Why-Milvus-in-DSPy","type":2,"isActive":false},{"label":"Exemplos","href":"Examples","type":2,"isActive":false},{"label":"Resumo","href":"Summary","type":2,"isActive":false}]}
    \ No newline at end of file
    diff --git a/localization/v2.4.x/site/pt/integrations/integrate_with_dspy.md b/localization/v2.4.x/site/pt/integrations/integrate_with_dspy.md
    index ec0024032..4f8fcd79e 100644
    --- a/localization/v2.4.x/site/pt/integrations/integrate_with_dspy.md
    +++ b/localization/v2.4.x/site/pt/integrations/integrate_with_dspy.md
    @@ -20,7 +20,8 @@ title: Integrar Milvus com DSPy
               d="M4 9h1v1H4c-1.5 0-3-1.69-3-3.5S2.55 3 4 3h4c1.45 0 3 1.69 3 3.5 0 1.41-.91 2.72-2 3.25V8.59c.58-.45 1-1.27 1-2.09C10 5.22 8.98 4 8 4H4c-.98 0-2 1.22-2 2.5S3 9 4 9zm9-3h-1v1h1c1 0 2 1.22 2 2.5S13.98 12 13 12H9c-.98 0-2-1.22-2-2.5 0-.83.42-1.64 1-2.09V6.25c-1.09.53-2 1.84-2 3.25C6 11.31 7.55 13 9 13h4c1.45 0 3-1.69 3-3.5S14.5 6 13 6z"
             >
           
    -    

    Open In Colab

    +

    Open In Colab +GitHub Repository

    O que é DSPy

    Depois de definir esse programa, a próxima etapa é a compilação. Esse processo atualiza os parâmetros em cada módulo para melhorar o desempenho. O processo de compilação depende de três fatores críticos:

    • Conjunto de treinamento: Utilizaremos os 20 exemplos de perguntas e respostas do nosso conjunto de dados de treinamento para esta demonstração.
    • -
    • Métrica de validação: Estabeleceremos uma métrica validate_context_and_answer simples. Esta métrica verifica a exatidão da resposta prevista e garante que o contexto recuperado inclui a resposta.
    • +
    • Métrica de validação: Estabeleceremos uma métrica validate_context_and_answer simples. Esta métrica verifica a exatidão da resposta prevista e assegura que o contexto recuperado inclui a resposta.
    • Optimizador específico (Teleprompter): O compilador do DSPy incorpora vários teleprompters concebidos para otimizar os seus programas de forma eficaz.
    from dspy.teleprompt import BootstrapFewShot
    diff --git a/localization/v2.4.x/site/pt/integrations/integrate_with_haystack.json b/localization/v2.4.x/site/pt/integrations/integrate_with_haystack.json
    index 3244fbf90..9dc7eb4bf 100644
    --- a/localization/v2.4.x/site/pt/integrations/integrate_with_haystack.json
    +++ b/localization/v2.4.x/site/pt/integrations/integrate_with_haystack.json
    @@ -1 +1 @@
    -{"codeList":["! pip install --upgrade --quiet pymilvus milvus-haystack markdown-it-py mdit_plain\n","import os\n\nos.environ[\"OPENAI_API_KEY\"] = \"sk-***********\"\n","import os\nimport urllib.request\n\nurl = \"https://www.gutenberg.org/cache/epub/7785/pg7785.txt\"\nfile_path = \"./davinci.txt\"\n\nif not os.path.exists(file_path):\n    urllib.request.urlretrieve(url, file_path)\n","from haystack import Pipeline\nfrom haystack.components.converters import MarkdownToDocument\nfrom haystack.components.embedders import OpenAIDocumentEmbedder, OpenAITextEmbedder\nfrom haystack.components.preprocessors import DocumentSplitter\nfrom haystack.components.writers import DocumentWriter\nfrom haystack.utils import Secret\n\nfrom milvus_haystack import MilvusDocumentStore\nfrom milvus_haystack.milvus_embedding_retriever import MilvusEmbeddingRetriever\n\n\ndocument_store = MilvusDocumentStore(\n    connection_args={\"uri\": \"./milvus.db\"},\n    # connection_args={\"uri\": \"http://localhost:19530\"},\n    # connection_args={\"uri\": YOUR_ZILLIZ_CLOUD_URI, \"token\": Secret.from_env_var(\"ZILLIZ_CLOUD_API_KEY\")},\n    drop_old=True,\n)\n","indexing_pipeline = Pipeline()\nindexing_pipeline.add_component(\"converter\", MarkdownToDocument())\nindexing_pipeline.add_component(\n    \"splitter\", DocumentSplitter(split_by=\"sentence\", split_length=2)\n)\nindexing_pipeline.add_component(\"embedder\", OpenAIDocumentEmbedder())\nindexing_pipeline.add_component(\"writer\", DocumentWriter(document_store))\nindexing_pipeline.connect(\"converter\", \"splitter\")\nindexing_pipeline.connect(\"splitter\", \"embedder\")\nindexing_pipeline.connect(\"embedder\", \"writer\")\nindexing_pipeline.run({\"converter\": {\"sources\": [file_path]}})\n\nprint(\"Number of documents:\", document_store.count_documents())\n","question = 'Where is the painting \"Warrior\" currently stored?'\n\nretrieval_pipeline = Pipeline()\nretrieval_pipeline.add_component(\"embedder\", OpenAITextEmbedder())\nretrieval_pipeline.add_component(\n    \"retriever\", MilvusEmbeddingRetriever(document_store=document_store, top_k=3)\n)\nretrieval_pipeline.connect(\"embedder\", \"retriever\")\n\nretrieval_results = retrieval_pipeline.run({\"embedder\": {\"text\": question}})\n\nfor doc in retrieval_results[\"retriever\"][\"documents\"]:\n    print(doc.content)\n    print(\"-\" * 10)\n","from haystack.utils import Secret\nfrom haystack.components.builders import PromptBuilder\nfrom haystack.components.generators import OpenAIGenerator\n\nprompt_template = \"\"\"Answer the following query based on the provided context. If the context does\n                     not include an answer, reply with 'I don't know'.\\n\n                     Query: {{query}}\n                     Documents:\n                     {% for doc in documents %}\n                        {{ doc.content }}\n                     {% endfor %}\n                     Answer:\n                  \"\"\"\n\nrag_pipeline = Pipeline()\nrag_pipeline.add_component(\"text_embedder\", OpenAITextEmbedder())\nrag_pipeline.add_component(\n    \"retriever\", MilvusEmbeddingRetriever(document_store=document_store, top_k=3)\n)\nrag_pipeline.add_component(\"prompt_builder\", PromptBuilder(template=prompt_template))\nrag_pipeline.add_component(\n    \"generator\",\n    OpenAIGenerator(\n        api_key=Secret.from_token(os.getenv(\"OPENAI_API_KEY\")),\n        generation_kwargs={\"temperature\": 0},\n    ),\n)\nrag_pipeline.connect(\"text_embedder.embedding\", \"retriever.query_embedding\")\nrag_pipeline.connect(\"retriever.documents\", \"prompt_builder.documents\")\nrag_pipeline.connect(\"prompt_builder\", \"generator\")\n\nresults = rag_pipeline.run(\n    {\n        \"text_embedder\": {\"text\": question},\n        \"prompt_builder\": {\"query\": question},\n    }\n)\nprint(\"RAG answer:\", results[\"generator\"][\"replies\"][0])\n"],"headingContent":"","anchorList":[{"label":"Geração Aumentada por Recuperação (RAG) com Milvus e Haystack","href":"Retrieval-Augmented-Generation-RAG-with-Milvus-and-Haystack","type":1,"isActive":false},{"label":"Pré-requisitos","href":"Prerequisites","type":2,"isActive":false},{"label":"Preparar os dados","href":"Prepare-the-data","type":2,"isActive":false},{"label":"Criar o pipeline de indexação","href":"Create-the-indexing-Pipeline","type":2,"isActive":false},{"label":"Criar o pipeline de recuperação","href":"Create-the-retrieval-pipeline","type":2,"isActive":false},{"label":"Criar o pipeline RAG","href":"Create-the-RAG-pipeline","type":2,"isActive":false}]}
    \ No newline at end of file
    +{"codeList":["! pip install --upgrade --quiet pymilvus milvus-haystack markdown-it-py mdit_plain\n","import os\n\nos.environ[\"OPENAI_API_KEY\"] = \"sk-***********\"\n","import os\nimport urllib.request\n\nurl = \"https://www.gutenberg.org/cache/epub/7785/pg7785.txt\"\nfile_path = \"./davinci.txt\"\n\nif not os.path.exists(file_path):\n    urllib.request.urlretrieve(url, file_path)\n","from haystack import Pipeline\nfrom haystack.components.converters import MarkdownToDocument\nfrom haystack.components.embedders import OpenAIDocumentEmbedder, OpenAITextEmbedder\nfrom haystack.components.preprocessors import DocumentSplitter\nfrom haystack.components.writers import DocumentWriter\nfrom haystack.utils import Secret\n\nfrom milvus_haystack import MilvusDocumentStore\nfrom milvus_haystack.milvus_embedding_retriever import MilvusEmbeddingRetriever\n\n\ndocument_store = MilvusDocumentStore(\n    connection_args={\"uri\": \"./milvus.db\"},\n    # connection_args={\"uri\": \"http://localhost:19530\"},\n    # connection_args={\"uri\": YOUR_ZILLIZ_CLOUD_URI, \"token\": Secret.from_env_var(\"ZILLIZ_CLOUD_API_KEY\")},\n    drop_old=True,\n)\n","indexing_pipeline = Pipeline()\nindexing_pipeline.add_component(\"converter\", MarkdownToDocument())\nindexing_pipeline.add_component(\n    \"splitter\", DocumentSplitter(split_by=\"sentence\", split_length=2)\n)\nindexing_pipeline.add_component(\"embedder\", OpenAIDocumentEmbedder())\nindexing_pipeline.add_component(\"writer\", DocumentWriter(document_store))\nindexing_pipeline.connect(\"converter\", \"splitter\")\nindexing_pipeline.connect(\"splitter\", \"embedder\")\nindexing_pipeline.connect(\"embedder\", \"writer\")\nindexing_pipeline.run({\"converter\": {\"sources\": [file_path]}})\n\nprint(\"Number of documents:\", document_store.count_documents())\n","question = 'Where is the painting \"Warrior\" currently stored?'\n\nretrieval_pipeline = Pipeline()\nretrieval_pipeline.add_component(\"embedder\", OpenAITextEmbedder())\nretrieval_pipeline.add_component(\n    \"retriever\", MilvusEmbeddingRetriever(document_store=document_store, top_k=3)\n)\nretrieval_pipeline.connect(\"embedder\", \"retriever\")\n\nretrieval_results = retrieval_pipeline.run({\"embedder\": {\"text\": question}})\n\nfor doc in retrieval_results[\"retriever\"][\"documents\"]:\n    print(doc.content)\n    print(\"-\" * 10)\n","from haystack.utils import Secret\nfrom haystack.components.builders import PromptBuilder\nfrom haystack.components.generators import OpenAIGenerator\n\nprompt_template = \"\"\"Answer the following query based on the provided context. If the context does\n                     not include an answer, reply with 'I don't know'.\\n\n                     Query: {{query}}\n                     Documents:\n                     {% for doc in documents %}\n                        {{ doc.content }}\n                     {% endfor %}\n                     Answer:\n                  \"\"\"\n\nrag_pipeline = Pipeline()\nrag_pipeline.add_component(\"text_embedder\", OpenAITextEmbedder())\nrag_pipeline.add_component(\n    \"retriever\", MilvusEmbeddingRetriever(document_store=document_store, top_k=3)\n)\nrag_pipeline.add_component(\"prompt_builder\", PromptBuilder(template=prompt_template))\nrag_pipeline.add_component(\n    \"generator\",\n    OpenAIGenerator(\n        api_key=Secret.from_token(os.getenv(\"OPENAI_API_KEY\")),\n        generation_kwargs={\"temperature\": 0},\n    ),\n)\nrag_pipeline.connect(\"text_embedder.embedding\", \"retriever.query_embedding\")\nrag_pipeline.connect(\"retriever.documents\", \"prompt_builder.documents\")\nrag_pipeline.connect(\"prompt_builder\", \"generator\")\n\nresults = rag_pipeline.run(\n    {\n        \"text_embedder\": {\"text\": question},\n        \"prompt_builder\": {\"query\": question},\n    }\n)\nprint(\"RAG answer:\", results[\"generator\"][\"replies\"][0])\n"],"headingContent":"Retrieval-Augmented Generation (RAG) with Milvus and Haystack","anchorList":[{"label":"Geração Aumentada por Recuperação (RAG) com Milvus e Haystack","href":"Retrieval-Augmented-Generation-RAG-with-Milvus-and-Haystack","type":1,"isActive":false},{"label":"Pré-requisitos","href":"Prerequisites","type":2,"isActive":false},{"label":"Preparar os dados","href":"Prepare-the-data","type":2,"isActive":false},{"label":"Criar o pipeline de indexação","href":"Create-the-indexing-Pipeline","type":2,"isActive":false},{"label":"Criar o pipeline de recuperação","href":"Create-the-retrieval-pipeline","type":2,"isActive":false},{"label":"Criar o pipeline RAG","href":"Create-the-RAG-pipeline","type":2,"isActive":false}]}
    \ No newline at end of file
    diff --git a/localization/v2.4.x/site/pt/integrations/integrate_with_haystack.md b/localization/v2.4.x/site/pt/integrations/integrate_with_haystack.md
    index 6598987f3..c7648a89d 100644
    --- a/localization/v2.4.x/site/pt/integrations/integrate_with_haystack.md
    +++ b/localization/v2.4.x/site/pt/integrations/integrate_with_haystack.md
    @@ -20,7 +20,8 @@ title: Geração Aumentada por Recuperação (RAG) com Milvus e Haystack
               d="M4 9h1v1H4c-1.5 0-3-1.69-3-3.5S2.55 3 4 3h4c1.45 0 3 1.69 3 3.5 0 1.41-.91 2.72-2 3.25V8.59c.58-.45 1-1.27 1-2.09C10 5.22 8.98 4 8 4H4c-.98 0-2 1.22-2 2.5S3 9 4 9zm9-3h-1v1h1c1 0 2 1.22 2 2.5S13.98 12 13 12H9c-.98 0-2-1.22-2-2.5 0-.83.42-1.64 1-2.09V6.25c-1.09.53-2 1.84-2 3.25C6 11.31 7.55 13 9 13h4c1.45 0 3-1.69 3-3.5S14.5 6 13 6z"
             >
           
    -    

    Open In Colab

    +

    Open In Colab +GitHub Repository

    Este guia demonstra como construir um sistema RAG (Retrieval-Augmented Generation) utilizando o Haystack e o Milvus.

    O sistema RAG combina um sistema de recuperação com um modelo generativo para gerar novo texto com base num determinado pedido. O sistema começa por recuperar documentos relevantes de um corpus utilizando o Milvus e, em seguida, utiliza um modelo generativo para gerar novo texto com base nos documentos recuperados.

    Haystack é a estrutura Python de código aberto da deepset para criar aplicações personalizadas com modelos de linguagem de grande dimensão (LLMs). Milvus é o banco de dados vetorial de código aberto mais avançado do mundo, criado para alimentar a pesquisa de similaridade de incorporação e aplicativos de IA.

    @@ -39,11 +40,11 @@ title: Geração Aumentada por Recuperação (RAG) com Milvus e Haystack d="M4 9h1v1H4c-1.5 0-3-1.69-3-3.5S2.55 3 4 3h4c1.45 0 3 1.69 3 3.5 0 1.41-.91 2.72-2 3.25V8.59c.58-.45 1-1.27 1-2.09C10 5.22 8.98 4 8 4H4c-.98 0-2 1.22-2 2.5S3 9 4 9zm9-3h-1v1h1c1 0 2 1.22 2 2.5S13.98 12 13 12H9c-.98 0-2-1.22-2-2.5 0-.83.42-1.64 1-2.09V6.25c-1.09.53-2 1.84-2 3.25C6 11.31 7.55 13 9 13h4c1.45 0 3-1.69 3-3.5S14.5 6 13 6z" > -

    Antes de executar este bloco de notas, certifique-se de que tem as seguintes dependências instaladas:

    +

    Antes de executar este notebook, certifique-se de que tem as seguintes dependências instaladas:

    ! pip install --upgrade --quiet pymilvus milvus-haystack markdown-it-py mdit_plain
     
    -

    Se estiver a utilizar o Google Colab, para ativar as dependências que acabou de instalar, poderá ter de reiniciar o tempo de execução (clique no menu "Tempo de execução" na parte superior do ecrã e selecione "Reiniciar sessão" no menu pendente).

    +

    Se estiver a utilizar o Google Colab, para ativar as dependências acabadas de instalar, poderá ser necessário reiniciar o tempo de execução (clique no menu "Tempo de execução" na parte superior do ecrã e selecione "Reiniciar sessão" no menu pendente).

    Vamos utilizar os modelos do OpenAI. Deve preparar a chave api OPENAI_API_KEY como uma variável de ambiente.

    import os
    @@ -65,7 +66,7 @@ os.environ["OP
               d="M4 9h1v1H4c-1.5 0-3-1.69-3-3.5S2.55 3 4 3h4c1.45 0 3 1.69 3 3.5 0 1.41-.91 2.72-2 3.25V8.59c.58-.45 1-1.27 1-2.09C10 5.22 8.98 4 8 4H4c-.98 0-2 1.22-2 2.5S3 9 4 9zm9-3h-1v1h1c1 0 2 1.22 2 2.5S13.98 12 13 12H9c-.98 0-2-1.22-2-2.5 0-.83.42-1.64 1-2.09V6.25c-1.09.53-2 1.84-2 3.25C6 11.31 7.55 13 9 13h4c1.45 0 3-1.69 3-3.5S14.5 6 13 6z"
             >
           
    -    

    Utilizamos um conteúdo em linha sobre Leonardo Da Vinci como armazenamento de conhecimento privado para o nosso pipeline RAG, que é uma boa fonte de dados para um pipeline RAG simples.

    +

    Utilizamos um conteúdo em linha sobre Leonardo Da Vinci como um armazenamento de conhecimento privado para o nosso pipeline RAG, que é uma boa fonte de dados para um pipeline RAG simples.

    Descarregue-o e guarde-o como um ficheiro de texto local.

    import os
     import urllib.request
    diff --git a/localization/v2.4.x/site/pt/integrations/integrate_with_hugging-face.json b/localization/v2.4.x/site/pt/integrations/integrate_with_hugging-face.json
    index 8c4b037c4..f798711f4 100644
    --- a/localization/v2.4.x/site/pt/integrations/integrate_with_hugging-face.json
    +++ b/localization/v2.4.x/site/pt/integrations/integrate_with_hugging-face.json
    @@ -1 +1 @@
    -{"codeList":["$ pip install --upgrade pymilvus transformers datasets torch\n","from datasets import load_dataset\n\n\nDATASET = \"squad\"  # Name of dataset from HuggingFace Datasets\nINSERT_RATIO = 0.001  # Ratio of example dataset to be inserted\n\ndata = load_dataset(DATASET, split=\"validation\")\n# Generates a fixed subset. To generate a random subset, remove the seed.\ndata = data.train_test_split(test_size=INSERT_RATIO, seed=42)[\"test\"]\n# Clean up the data structure in the dataset.\ndata = data.map(\n    lambda val: {\"answer\": val[\"answers\"][\"text\"][0]},\n    remove_columns=[\"id\", \"answers\", \"context\"],\n)\n\n# View summary of example data\nprint(data)\n","from transformers import AutoTokenizer, AutoModel\nimport torch\n\nMODEL = (\n    \"sentence-transformers/all-MiniLM-L6-v2\"  # Name of model from HuggingFace Models\n)\nINFERENCE_BATCH_SIZE = 64  # Batch size of model inference\n\n# Load tokenizer & model from HuggingFace Hub\ntokenizer = AutoTokenizer.from_pretrained(MODEL)\nmodel = AutoModel.from_pretrained(MODEL)\n\n\ndef encode_text(batch):\n    # Tokenize sentences\n    encoded_input = tokenizer(\n        batch[\"question\"], padding=True, truncation=True, return_tensors=\"pt\"\n    )\n\n    # Compute token embeddings\n    with torch.no_grad():\n        model_output = model(**encoded_input)\n\n    # Perform pooling\n    token_embeddings = model_output[0]\n    attention_mask = encoded_input[\"attention_mask\"]\n    input_mask_expanded = (\n        attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float()\n    )\n    sentence_embeddings = torch.sum(\n        token_embeddings * input_mask_expanded, 1\n    ) / torch.clamp(input_mask_expanded.sum(1), min=1e-9)\n\n    # Normalize embeddings\n    batch[\"question_embedding\"] = torch.nn.functional.normalize(\n        sentence_embeddings, p=2, dim=1\n    )\n    return batch\n\n\ndata = data.map(encode_text, batched=True, batch_size=INFERENCE_BATCH_SIZE)\ndata_list = data.to_list()\n","from pymilvus import MilvusClient\n\n\nMILVUS_URI = \"./huggingface_milvus_test.db\"  # Connection URI\nCOLLECTION_NAME = \"huggingface_test\"  # Collection name\nDIMENSION = 384  # Embedding dimension depending on model\n\nmilvus_client = MilvusClient(MILVUS_URI)\nif milvus_client.has_collection(collection_name=COLLECTION_NAME):\n    milvus_client.drop_collection(collection_name=COLLECTION_NAME)\nmilvus_client.create_collection(\n    collection_name=COLLECTION_NAME,\n    dimension=DIMENSION,\n    auto_id=True,  # Enable auto id\n    enable_dynamic_field=True,  # Enable dynamic fields\n    vector_field_name=\"question_embedding\",  # Map vector field name and embedding column in dataset\n    consistency_level=\"Strong\",  # To enable search with latest data\n)\n","milvus_client.insert(collection_name=COLLECTION_NAME, data=data_list)\n","questions = {\n    \"question\": [\n        \"What is LGM?\",\n        \"When did Massachusetts first mandate that children be educated in schools?\",\n    ]\n}\n\n# Generate question embeddings\nquestion_embeddings = [v.tolist() for v in encode_text(questions)[\"question_embedding\"]]\n\n# Search across Milvus\nsearch_results = milvus_client.search(\n    collection_name=COLLECTION_NAME,\n    data=question_embeddings,\n    limit=3,  # How many search results to output\n    output_fields=[\"answer\", \"question\"],  # Include these fields in search results\n)\n\n# Print out results\nfor q, res in zip(questions[\"question\"], search_results):\n    print(\"Question:\", q)\n    for r in res:\n        print(\n            {\n                \"answer\": r[\"entity\"][\"answer\"],\n                \"score\": r[\"distance\"],\n                \"original question\": r[\"entity\"][\"question\"],\n            }\n        )\n    print(\"\\n\")\n"],"headingContent":"","anchorList":[{"label":"Resposta a perguntas utilizando Milvus e Hugging Face","href":"Question-Answering-Using-Milvus-and-Hugging-Face","type":1,"isActive":false},{"label":"Antes de começar","href":"Before-you-begin","type":2,"isActive":false},{"label":"Preparar dados","href":"Prepare-data","type":2,"isActive":false},{"label":"Inserir dados","href":"Insert-data","type":2,"isActive":false},{"label":"Fazer perguntas","href":"Ask-questions","type":2,"isActive":false}]}
    \ No newline at end of file
    +{"codeList":["$ pip install --upgrade pymilvus transformers datasets torch\n","from datasets import load_dataset\n\n\nDATASET = \"squad\"  # Name of dataset from HuggingFace Datasets\nINSERT_RATIO = 0.001  # Ratio of example dataset to be inserted\n\ndata = load_dataset(DATASET, split=\"validation\")\n# Generates a fixed subset. To generate a random subset, remove the seed.\ndata = data.train_test_split(test_size=INSERT_RATIO, seed=42)[\"test\"]\n# Clean up the data structure in the dataset.\ndata = data.map(\n    lambda val: {\"answer\": val[\"answers\"][\"text\"][0]},\n    remove_columns=[\"id\", \"answers\", \"context\"],\n)\n\n# View summary of example data\nprint(data)\n","from transformers import AutoTokenizer, AutoModel\nimport torch\n\nMODEL = (\n    \"sentence-transformers/all-MiniLM-L6-v2\"  # Name of model from HuggingFace Models\n)\nINFERENCE_BATCH_SIZE = 64  # Batch size of model inference\n\n# Load tokenizer & model from HuggingFace Hub\ntokenizer = AutoTokenizer.from_pretrained(MODEL)\nmodel = AutoModel.from_pretrained(MODEL)\n\n\ndef encode_text(batch):\n    # Tokenize sentences\n    encoded_input = tokenizer(\n        batch[\"question\"], padding=True, truncation=True, return_tensors=\"pt\"\n    )\n\n    # Compute token embeddings\n    with torch.no_grad():\n        model_output = model(**encoded_input)\n\n    # Perform pooling\n    token_embeddings = model_output[0]\n    attention_mask = encoded_input[\"attention_mask\"]\n    input_mask_expanded = (\n        attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float()\n    )\n    sentence_embeddings = torch.sum(\n        token_embeddings * input_mask_expanded, 1\n    ) / torch.clamp(input_mask_expanded.sum(1), min=1e-9)\n\n    # Normalize embeddings\n    batch[\"question_embedding\"] = torch.nn.functional.normalize(\n        sentence_embeddings, p=2, dim=1\n    )\n    return batch\n\n\ndata = data.map(encode_text, batched=True, batch_size=INFERENCE_BATCH_SIZE)\ndata_list = data.to_list()\n","from pymilvus import MilvusClient\n\n\nMILVUS_URI = \"./huggingface_milvus_test.db\"  # Connection URI\nCOLLECTION_NAME = \"huggingface_test\"  # Collection name\nDIMENSION = 384  # Embedding dimension depending on model\n\nmilvus_client = MilvusClient(MILVUS_URI)\nif milvus_client.has_collection(collection_name=COLLECTION_NAME):\n    milvus_client.drop_collection(collection_name=COLLECTION_NAME)\nmilvus_client.create_collection(\n    collection_name=COLLECTION_NAME,\n    dimension=DIMENSION,\n    auto_id=True,  # Enable auto id\n    enable_dynamic_field=True,  # Enable dynamic fields\n    vector_field_name=\"question_embedding\",  # Map vector field name and embedding column in dataset\n    consistency_level=\"Strong\",  # To enable search with latest data\n)\n","milvus_client.insert(collection_name=COLLECTION_NAME, data=data_list)\n","questions = {\n    \"question\": [\n        \"What is LGM?\",\n        \"When did Massachusetts first mandate that children be educated in schools?\",\n    ]\n}\n\n# Generate question embeddings\nquestion_embeddings = [v.tolist() for v in encode_text(questions)[\"question_embedding\"]]\n\n# Search across Milvus\nsearch_results = milvus_client.search(\n    collection_name=COLLECTION_NAME,\n    data=question_embeddings,\n    limit=3,  # How many search results to output\n    output_fields=[\"answer\", \"question\"],  # Include these fields in search results\n)\n\n# Print out results\nfor q, res in zip(questions[\"question\"], search_results):\n    print(\"Question:\", q)\n    for r in res:\n        print(\n            {\n                \"answer\": r[\"entity\"][\"answer\"],\n                \"score\": r[\"distance\"],\n                \"original question\": r[\"entity\"][\"question\"],\n            }\n        )\n    print(\"\\n\")\n"],"headingContent":"Question Answering Using Milvus and Hugging Face","anchorList":[{"label":"Resposta a perguntas utilizando Milvus e Hugging Face","href":"Question-Answering-Using-Milvus-and-Hugging-Face","type":1,"isActive":false},{"label":"Antes de começar","href":"Before-you-begin","type":2,"isActive":false},{"label":"Preparar dados","href":"Prepare-data","type":2,"isActive":false},{"label":"Inserir dados","href":"Insert-data","type":2,"isActive":false},{"label":"Fazer perguntas","href":"Ask-questions","type":2,"isActive":false}]}
    \ No newline at end of file
    diff --git a/localization/v2.4.x/site/pt/integrations/integrate_with_hugging-face.md b/localization/v2.4.x/site/pt/integrations/integrate_with_hugging-face.md
    index 2604fde9a..dc05c87ea 100644
    --- a/localization/v2.4.x/site/pt/integrations/integrate_with_hugging-face.md
    +++ b/localization/v2.4.x/site/pt/integrations/integrate_with_hugging-face.md
    @@ -22,7 +22,8 @@ title: Resposta a perguntas utilizando Milvus e Hugging Face
               d="M4 9h1v1H4c-1.5 0-3-1.69-3-3.5S2.55 3 4 3h4c1.45 0 3 1.69 3 3.5 0 1.41-.91 2.72-2 3.25V8.59c.58-.45 1-1.27 1-2.09C10 5.22 8.98 4 8 4H4c-.98 0-2 1.22-2 2.5S3 9 4 9zm9-3h-1v1h1c1 0 2 1.22 2 2.5S13.98 12 13 12H9c-.98 0-2-1.22-2-2.5 0-.83.42-1.64 1-2.09V6.25c-1.09.53-2 1.84-2 3.25C6 11.31 7.55 13 9 13h4c1.45 0 3-1.69 3-3.5S14.5 6 13 6z"
             >
           
    -    

    Open In Colab

    +

    Open In Colab +GitHub Repository

    Um sistema de resposta a perguntas baseado na pesquisa semântica funciona encontrando a pergunta mais semelhante de um conjunto de dados de pares pergunta-resposta para uma determinada pergunta de consulta. Uma vez identificada a pergunta mais semelhante, a resposta correspondente do conjunto de dados é considerada como a resposta à pergunta. Esta abordagem baseia-se em medidas de semelhança semântica para determinar a semelhança entre perguntas e obter respostas relevantes.

    Este tutorial mostra como construir um sistema de resposta a perguntas usando Hugging Face como carregador de dados e gerador de incorporação para processamento de dados e Milvus como base de dados vetorial para pesquisa semântica.

    Antes de começar

    Agora temos pares pergunta-resposta prontos com a incorporação de perguntas. O próximo passo é inseri-los na base de dados vetorial.

    +

    Agora temos pares pergunta-resposta prontos com a incorporação de perguntas. O próximo passo é inseri-los na base de dados de vectores.

    Primeiro, temos de nos ligar ao serviço Milvus e criar uma coleção Milvus.

    from pymilvus import MilvusClient
     
    diff --git a/localization/v2.4.x/site/pt/integrations/integrate_with_jina.json b/localization/v2.4.x/site/pt/integrations/integrate_with_jina.json
    index a7abd50e3..37cda2cab 100644
    --- a/localization/v2.4.x/site/pt/integrations/integrate_with_jina.json
    +++ b/localization/v2.4.x/site/pt/integrations/integrate_with_jina.json
    @@ -1 +1 @@
    -{"codeList":["$ pip install -U pymilvus\n$ pip install \"pymilvus[model]\"\n","from pymilvus.model.dense import JinaEmbeddingFunction\n\njina_api_key = \"\"\nef = JinaEmbeddingFunction(\"jina-embeddings-v2-base-en\", jina_api_key)\n\nquery = \"what is information retrieval?\"\ndoc = \"Information retrieval is the process of finding relevant information from a large collection of data or documents.\"\n\nqvecs = ef.encode_queries([query])\ndvecs = ef.encode_documents([doc])\n","from pymilvus.model.dense import JinaEmbeddingFunction\n\njina_api_key = \"\"\nef = JinaEmbeddingFunction(\"jina-embeddings-v2-base-de\", jina_api_key)\n\nquery = \"what is information retrieval?\"\ndoc = \"Information Retrieval ist der Prozess, relevante Informationen aus einer großen Sammlung von Daten oder Dokumenten zu finden.\"\n\nqvecs = ef.encode_queries([query])\ndvecs = ef.encode_documents([doc])\n","from pymilvus.model.dense import JinaEmbeddingFunction\n\njina_api_key = \"\"\nef = JinaEmbeddingFunction(\"jina-embeddings-v2-base-code\", jina_api_key)\n\n# Case1: Enhanced Code Navigation\n# query: text description of the functionality\n# document: relevant code snippet\n\nquery = \"function to calculate average in Python.\"\ndoc = \"\"\"\ndef calculate_average(numbers):\n    total = sum(numbers)\n    count = len(numbers)\n    return total / count\n\"\"\"\n\n# Case2: Streamlined Code Review\n# query: text description of the programming concept\n# document: relevante code snippet or PR\n\nquery = \"pull quest related to Collection\"\ndoc = \"fix:[restful v2] parameters of create collection ...\"\n\n# Case3: Automatic Documentation Assistance\n# query: code snippet you need explanation\n# document: relevante document or DocsString\n\nquery = \"What is Collection in Milvus\"\ndoc = \"\"\"\nIn Milvus, you store your vector embeddings in collections. All vector embeddings within a collection share the same dimensionality and distance metric for measuring similarity.\nMilvus collections support dynamic fields (i.e., fields not pre-defined in the schema) and automatic incrementation of primary keys.\n\"\"\"\n\nqvecs = ef.encode_queries([query])\ndvecs = ef.encode_documents([doc])\n","from pymilvus.model.dense import JinaEmbeddingFunction\nfrom pymilvus import MilvusClient\n\njina_api_key = \"\"\nef = JinaEmbeddingFunction(\"jina-embeddings-v2-base-en\", jina_api_key)\nDIMENSION = 768  # size of jina-embeddings-v2-base-en\n\ndoc = [\n    \"In 1950, Alan Turing published his seminal paper, 'Computing Machinery and Intelligence,' proposing the Turing Test as a criterion of intelligence, a foundational concept in the philosophy and development of artificial intelligence.\",\n    \"The Dartmouth Conference in 1956 is considered the birthplace of artificial intelligence as a field; here, John McCarthy and others coined the term 'artificial intelligence' and laid out its basic goals.\",\n    \"In 1951, British mathematician and computer scientist Alan Turing also developed the first program designed to play chess, demonstrating an early example of AI in game strategy.\",\n    \"The invention of the Logic Theorist by Allen Newell, Herbert A. Simon, and Cliff Shaw in 1955 marked the creation of the first true AI program, which was capable of solving logic problems, akin to proving mathematical theorems.\",\n]\n\ndvecs = ef.encode_documents(doc)\n\ndata = [\n    {\"id\": i, \"vector\": dvecs[i], \"text\": doc[i], \"subject\": \"history\"}\n    for i in range(len(dvecs))\n]\n\nmilvus_client = MilvusClient(\"./milvus_jina_demo.db\")\nCOLLECTION_NAME = \"demo_collection\"  # Milvus collection name\nif milvus_client.has_collection(collection_name=COLLECTION_NAME):\n    milvus_client.drop_collection(collection_name=COLLECTION_NAME)\nmilvus_client.create_collection(collection_name=COLLECTION_NAME, dimension=DIMENSION)\n\nres = milvus_client.insert(collection_name=COLLECTION_NAME, data=data)\n\nprint(res[\"insert_count\"])\n","queries = \"What event in 1956 marked the official birth of artificial intelligence as a discipline?\"\nqvecs = ef.encode_queries([queries])\n\nres = milvus_client.search(\n    collection_name=COLLECTION_NAME,  # target collection\n    data=[qvecs[0]],  # query vectors\n    limit=3,  # number of returned entities\n    output_fields=[\"text\", \"subject\"],  # specifies fields to be returned\n)[0]\n\nfor result in res:\n    print(result)\n","from pymilvus.model.reranker import JinaRerankFunction\n\njina_api_key = \"\"\n\nrf = JinaRerankFunction(\"jina-reranker-v1-base-en\", jina_api_key)\n\nquery = \"What event in 1956 marked the official birth of artificial intelligence as a discipline?\"\n\ndocuments = [\n    \"In 1950, Alan Turing published his seminal paper, 'Computing Machinery and Intelligence,' proposing the Turing Test as a criterion of intelligence, a foundational concept in the philosophy and development of artificial intelligence.\",\n    \"The Dartmouth Conference in 1956 is considered the birthplace of artificial intelligence as a field; here, John McCarthy and others coined the term 'artificial intelligence' and laid out its basic goals.\",\n    \"In 1951, British mathematician and computer scientist Alan Turing also developed the first program designed to play chess, demonstrating an early example of AI in game strategy.\",\n    \"The invention of the Logic Theorist by Allen Newell, Herbert A. Simon, and Cliff Shaw in 1955 marked the creation of the first true AI program, which was capable of solving logic problems, akin to proving mathematical theorems.\",\n]\n\nrf(query, documents)\n"],"headingContent":"","anchorList":[{"label":"Integrar o Milvus com o Jina AI","href":"Integrate-Milvus-with-Jina-AI","type":1,"isActive":false},{"label":"Quem é a Jina AI","href":"Who-is-Jina-AI","type":2,"isActive":false},{"label":"Incorporação de Milvus e Jina AI","href":"Milvus-and-Jina-AIs-Embedding","type":2,"isActive":false},{"label":"Exemplos","href":"Examples","type":2,"isActive":false},{"label":"Incorporação de objetivo geral","href":"General-Purpose-Embedding","type":2,"isActive":false},{"label":"Embeddings bilingues","href":"Bilingual-Embeddings","type":2,"isActive":false},{"label":"Incorporação de código","href":"Code-Embeddings","type":2,"isActive":false},{"label":"Pesquisa semântica com Jina e Milvus","href":"Semantic-Search-with-Jina--Milvus","type":2,"isActive":false},{"label":"Jina Reranker","href":"Jina-Reranker","type":2,"isActive":false}]}
    \ No newline at end of file
    +{"codeList":["$ pip install -U pymilvus\n$ pip install \"pymilvus[model]\"\n","from pymilvus.model.dense import JinaEmbeddingFunction\n\njina_api_key = \"\"\nef = JinaEmbeddingFunction(\n    \"jina-embeddings-v3\", \n    jina_api_key,\n    task=\"retrieval.passage\",\n    dimensions=1024\n)\n\nquery = \"what is information retrieval?\"\ndoc = \"Information retrieval is the process of finding relevant information from a large collection of data or documents.\"\n\nqvecs = ef.encode_queries([query])  # This method uses `retrieval.query` as the task\ndvecs = ef.encode_documents([doc])  # This method uses `retrieval.passage` as the task\n","from pymilvus.model.dense import JinaEmbeddingFunction\n\njina_api_key = \"\"\nef = JinaEmbeddingFunction(\"jina-embeddings-v2-base-de\", jina_api_key)\n\nquery = \"what is information retrieval?\"\ndoc = \"Information Retrieval ist der Prozess, relevante Informationen aus einer großen Sammlung von Daten oder Dokumenten zu finden.\"\n\nqvecs = ef.encode_queries([query])\ndvecs = ef.encode_documents([doc])\n","from pymilvus.model.dense import JinaEmbeddingFunction\n\njina_api_key = \"\"\nef = JinaEmbeddingFunction(\"jina-embeddings-v2-base-code\", jina_api_key)\n\n# Case1: Enhanced Code Navigation\n# query: text description of the functionality\n# document: relevant code snippet\n\nquery = \"function to calculate average in Python.\"\ndoc = \"\"\"\ndef calculate_average(numbers):\n    total = sum(numbers)\n    count = len(numbers)\n    return total / count\n\"\"\"\n\n# Case2: Streamlined Code Review\n# query: text description of the programming concept\n# document: relevante code snippet or PR\n\nquery = \"pull quest related to Collection\"\ndoc = \"fix:[restful v2] parameters of create collection ...\"\n\n# Case3: Automatic Documentation Assistance\n# query: code snippet you need explanation\n# document: relevante document or DocsString\n\nquery = \"What is Collection in Milvus\"\ndoc = \"\"\"\nIn Milvus, you store your vector embeddings in collections. All vector embeddings within a collection share the same dimensionality and distance metric for measuring similarity.\nMilvus collections support dynamic fields (i.e., fields not pre-defined in the schema) and automatic incrementation of primary keys.\n\"\"\"\n\nqvecs = ef.encode_queries([query])\ndvecs = ef.encode_documents([doc])\n","from pymilvus.model.dense import JinaEmbeddingFunction\nfrom pymilvus import MilvusClient\n\njina_api_key = \"\"\nDIMENSION = 1024  # `jina-embeddings-v3` supports flexible embedding sizes (32, 64, 128, 256, 512, 768, 1024), allowing for truncating embeddings to fit your application. \nef = JinaEmbeddingFunction(\n    \"jina-embeddings-v3\", \n    jina_api_key,\n    task=\"retrieval.passage\",\n    dimensions=DIMENSION,\n)\n\n\ndoc = [\n    \"In 1950, Alan Turing published his seminal paper, 'Computing Machinery and Intelligence,' proposing the Turing Test as a criterion of intelligence, a foundational concept in the philosophy and development of artificial intelligence.\",\n    \"The Dartmouth Conference in 1956 is considered the birthplace of artificial intelligence as a field; here, John McCarthy and others coined the term 'artificial intelligence' and laid out its basic goals.\",\n    \"In 1951, British mathematician and computer scientist Alan Turing also developed the first program designed to play chess, demonstrating an early example of AI in game strategy.\",\n    \"The invention of the Logic Theorist by Allen Newell, Herbert A. Simon, and Cliff Shaw in 1955 marked the creation of the first true AI program, which was capable of solving logic problems, akin to proving mathematical theorems.\",\n]\n\ndvecs = ef.encode_documents(doc) # This method uses `retrieval.passage` as the task\n\ndata = [\n    {\"id\": i, \"vector\": dvecs[i], \"text\": doc[i], \"subject\": \"history\"}\n    for i in range(len(dvecs))\n]\n\nmilvus_client = MilvusClient(\"./milvus_jina_demo.db\")\nCOLLECTION_NAME = \"demo_collection\"  # Milvus collection name\nif milvus_client.has_collection(collection_name=COLLECTION_NAME):\n    milvus_client.drop_collection(collection_name=COLLECTION_NAME)\nmilvus_client.create_collection(collection_name=COLLECTION_NAME, dimension=DIMENSION)\n\nres = milvus_client.insert(collection_name=COLLECTION_NAME, data=data)\n\nprint(res[\"insert_count\"])\n","queries = \"What event in 1956 marked the official birth of artificial intelligence as a discipline?\"\nqvecs = ef.encode_queries([queries]) # This method uses `retrieval.query` as the task\n\nres = milvus_client.search(\n    collection_name=COLLECTION_NAME,  # target collection\n    data=[qvecs[0]],  # query vectors\n    limit=3,  # number of returned entities\n    output_fields=[\"text\", \"subject\"],  # specifies fields to be returned\n)[0]\n\nfor result in res:\n    print(result)\n","from pymilvus.model.reranker import JinaRerankFunction\n\njina_api_key = \"\"\n\nrf = JinaRerankFunction(\"jina-reranker-v1-base-en\", jina_api_key)\n\nquery = \"What event in 1956 marked the official birth of artificial intelligence as a discipline?\"\n\ndocuments = [\n    \"In 1950, Alan Turing published his seminal paper, 'Computing Machinery and Intelligence,' proposing the Turing Test as a criterion of intelligence, a foundational concept in the philosophy and development of artificial intelligence.\",\n    \"The Dartmouth Conference in 1956 is considered the birthplace of artificial intelligence as a field; here, John McCarthy and others coined the term 'artificial intelligence' and laid out its basic goals.\",\n    \"In 1951, British mathematician and computer scientist Alan Turing also developed the first program designed to play chess, demonstrating an early example of AI in game strategy.\",\n    \"The invention of the Logic Theorist by Allen Newell, Herbert A. Simon, and Cliff Shaw in 1955 marked the creation of the first true AI program, which was capable of solving logic problems, akin to proving mathematical theorems.\",\n]\n\nrf(query, documents)\n"],"headingContent":"Integrate Milvus with Jina AI","anchorList":[{"label":"Integrar o Milvus com o Jina AI","href":"Integrate-Milvus-with-Jina-AI","type":1,"isActive":false},{"label":"Quem é a Jina AI","href":"Who-is-Jina-AI","type":2,"isActive":false},{"label":"Incorporação de Milvus e Jina AI","href":"Milvus-and-Jina-AIs-Embedding","type":2,"isActive":false},{"label":"Exemplos","href":"Examples","type":2,"isActive":false},{"label":"Incorporação de objetivo geral","href":"General-Purpose-Embedding","type":2,"isActive":false},{"label":"Embeddings bilingues","href":"Bilingual-Embeddings","type":2,"isActive":false},{"label":"Incorporação de código","href":"Code-Embeddings","type":2,"isActive":false},{"label":"Pesquisa semântica com Jina e Milvus","href":"Semantic-Search-with-Jina--Milvus","type":2,"isActive":false},{"label":"Jina Reranker","href":"Jina-Reranker","type":2,"isActive":false}]}
    \ No newline at end of file
    diff --git a/localization/v2.4.x/site/pt/integrations/integrate_with_jina.md b/localization/v2.4.x/site/pt/integrations/integrate_with_jina.md
    index 9d017e281..6434858f6 100644
    --- a/localization/v2.4.x/site/pt/integrations/integrate_with_jina.md
    +++ b/localization/v2.4.x/site/pt/integrations/integrate_with_jina.md
    @@ -20,7 +20,8 @@ title: Integrar o Milvus com o Jina
               d="M4 9h1v1H4c-1.5 0-3-1.69-3-3.5S2.55 3 4 3h4c1.45 0 3 1.69 3 3.5 0 1.41-.91 2.72-2 3.25V8.59c.58-.45 1-1.27 1-2.09C10 5.22 8.98 4 8 4H4c-.98 0-2 1.22-2 2.5S3 9 4 9zm9-3h-1v1h1c1 0 2 1.22 2 2.5S13.98 12 13 12H9c-.98 0-2-1.22-2-2.5 0-.83.42-1.64 1-2.09V6.25c-1.09.53-2 1.84-2 3.25C6 11.31 7.55 13 9 13h4c1.45 0 3-1.69 3-3.5S14.5 6 13 6z"
             >
           
    -    

    Open In Colab

    +

    Open In Colab +GitHub Repository

    Este guia demonstra como utilizar os embeddings do Jina AI e o Milvus para realizar tarefas de pesquisa e recuperação por semelhança.

    Quem é a Jina AI

    O modelo de incorporação principal do Jina AI destaca-se na compreensão de texto detalhado, tornando-o ideal para pesquisa semântica, classificação de conteúdo, suportando assim análise avançada de sentimentos, resumo de texto e sistemas de recomendação personalizados.

    -
    from pymilvus.model.dense import JinaEmbeddingFunction
    +
    from pymilvus.model.dense import JinaEmbeddingFunction
     
     jina_api_key = "<YOUR_JINA_API_KEY>"
    -ef = JinaEmbeddingFunction("jina-embeddings-v2-base-en", jina_api_key)
    +ef = JinaEmbeddingFunction(
    +    "jina-embeddings-v3", 
    +    jina_api_key,
    +    task="retrieval.passage",
    +    dimensions=1024
    +)
     
     query = "what is information retrieval?"
     doc = "Information retrieval is the process of finding relevant information from a large collection of data or documents."
     
    -qvecs = ef.encode_queries([query])
    -dvecs = ef.encode_documents([doc])
    +qvecs = ef.encode_queries([query])  # This method uses `retrieval.query` as the task
    +dvecs = ef.encode_documents([doc])  # This method uses `retrieval.passage` as the task
     

    Embeddings bilingues

    Open In Colab

    +

    Open In Colab +GitHub Repository

    Este guia demonstra como construir um sistema de Geração Aumentada por Recuperação (RAG) utilizando o LlamaIndex e o Milvus.

    O sistema RAG combina um sistema de recuperação com um modelo generativo para gerar novo texto com base num determinado pedido. O sistema começa por recuperar documentos relevantes de um corpus utilizando o Milvus e, em seguida, utiliza um modelo generativo para gerar novo texto com base nos documentos recuperados.

    O LlamaIndex é uma estrutura de dados simples e flexível para ligar fontes de dados personalizadas a modelos de linguagem de grande dimensão (LLM). Milvus é a base de dados vetorial de código aberto mais avançada do mundo, criada para alimentar a pesquisa de semelhanças de incorporação e as aplicações de IA.

    diff --git a/localization/v2.4.x/site/pt/integrations/integrate_with_openai.json b/localization/v2.4.x/site/pt/integrations/integrate_with_openai.json index 73c845d09..d0953611d 100644 --- a/localization/v2.4.x/site/pt/integrations/integrate_with_openai.json +++ b/localization/v2.4.x/site/pt/integrations/integrate_with_openai.json @@ -1 +1 @@ -{"codeList":["pip install --upgrade openai pymilvus\n","from openai import OpenAI\nfrom pymilvus import MilvusClient\n\nMODEL_NAME = \"text-embedding-3-small\" # Which model to use, please check https://platform.openai.com/docs/guides/embeddings for available models\nDIMENSION = 1536 # Dimension of vector embedding\n\n# Connect to OpenAI with API Key.\nopenai_client = OpenAI(api_key=\"\")\n\ndocs = [\n \"Artificial intelligence was founded as an academic discipline in 1956.\",\n \"Alan Turing was the first person to conduct substantial research in AI.\",\n \"Born in Maida Vale, London, Turing was raised in southern England.\",\n]\n\nvectors = [\n vec.embedding\n for vec in openai_client.embeddings.create(input=docs, model=MODEL_NAME).data\n]\n\n# Prepare data to be stored in Milvus vector database.\n# We can store the id, vector representation, raw text and labels such as \"subject\" in this case in Milvus.\ndata = [\n {\"id\": i, \"vector\": vectors[i], \"text\": docs[i], \"subject\": \"history\"}\n for i in range(len(docs))\n]\n\n\n# Connect to Milvus, all data is stored in a local file named \"milvus_openai_demo.db\"\n# in current directory. You can also connect to a remote Milvus server following this\n# instruction: https://milvus.io/docs/install_standalone-docker.md.\nmilvus_client = MilvusClient(uri=\"milvus_openai_demo.db\")\nCOLLECTION_NAME = \"demo_collection\" # Milvus collection name\n# Create a collection to store the vectors and text.\nif milvus_client.has_collection(collection_name=COLLECTION_NAME):\n milvus_client.drop_collection(collection_name=COLLECTION_NAME)\nmilvus_client.create_collection(collection_name=COLLECTION_NAME, dimension=DIMENSION)\n\n# Insert all data into Milvus vector database.\nres = milvus_client.insert(collection_name=\"demo_collection\", data=data)\n\nprint(res[\"insert_count\"])\n","queries = [\"When was artificial intelligence founded?\"]\n\nquery_vectors = [\n vec.embedding\n for vec in openai_client.embeddings.create(input=queries, model=MODEL_NAME).data\n]\n\nres = milvus_client.search(\n collection_name=COLLECTION_NAME, # target collection\n data=query_vectors, # query vectors\n limit=2, # number of returned entities\n output_fields=[\"text\", \"subject\"], # specifies fields to be returned\n)\n\nfor q in queries:\n print(\"Query:\", q)\n for result in res:\n print(result)\n print(\"\\n\")\n","[\n {\n \"id\": 0,\n \"distance\": -0.772376537322998,\n \"entity\": {\n \"text\": \"Artificial intelligence was founded as an academic discipline in 1956.\",\n \"subject\": \"history\",\n },\n },\n {\n \"id\": 1,\n \"distance\": -0.58596271276474,\n \"entity\": {\n \"text\": \"Alan Turing was the first person to conduct substantial research in AI.\",\n \"subject\": \"history\",\n },\n },\n]\n"],"headingContent":"","anchorList":[{"label":"Pesquisa semântica com Milvus e OpenAI","href":"Semantic-Search-with-Milvus-and-OpenAI","type":1,"isActive":false},{"label":"Introdução","href":"Getting-started","type":2,"isActive":false},{"label":"Pesquisa de títulos de livros com o OpenAI e o Milvus","href":"Searching-book-titles-with-OpenAI--Milvus","type":2,"isActive":false}]} \ No newline at end of file +{"codeList":["pip install --upgrade openai pymilvus\n","from openai import OpenAI\nfrom pymilvus import MilvusClient\n\nMODEL_NAME = \"text-embedding-3-small\" # Which model to use, please check https://platform.openai.com/docs/guides/embeddings for available models\nDIMENSION = 1536 # Dimension of vector embedding\n\n# Connect to OpenAI with API Key.\nopenai_client = OpenAI(api_key=\"\")\n\ndocs = [\n \"Artificial intelligence was founded as an academic discipline in 1956.\",\n \"Alan Turing was the first person to conduct substantial research in AI.\",\n \"Born in Maida Vale, London, Turing was raised in southern England.\",\n]\n\nvectors = [\n vec.embedding\n for vec in openai_client.embeddings.create(input=docs, model=MODEL_NAME).data\n]\n\n# Prepare data to be stored in Milvus vector database.\n# We can store the id, vector representation, raw text and labels such as \"subject\" in this case in Milvus.\ndata = [\n {\"id\": i, \"vector\": vectors[i], \"text\": docs[i], \"subject\": \"history\"}\n for i in range(len(docs))\n]\n\n\n# Connect to Milvus, all data is stored in a local file named \"milvus_openai_demo.db\"\n# in current directory. You can also connect to a remote Milvus server following this\n# instruction: https://milvus.io/docs/install_standalone-docker.md.\nmilvus_client = MilvusClient(uri=\"milvus_openai_demo.db\")\nCOLLECTION_NAME = \"demo_collection\" # Milvus collection name\n# Create a collection to store the vectors and text.\nif milvus_client.has_collection(collection_name=COLLECTION_NAME):\n milvus_client.drop_collection(collection_name=COLLECTION_NAME)\nmilvus_client.create_collection(collection_name=COLLECTION_NAME, dimension=DIMENSION)\n\n# Insert all data into Milvus vector database.\nres = milvus_client.insert(collection_name=\"demo_collection\", data=data)\n\nprint(res[\"insert_count\"])\n","queries = [\"When was artificial intelligence founded?\"]\n\nquery_vectors = [\n vec.embedding\n for vec in openai_client.embeddings.create(input=queries, model=MODEL_NAME).data\n]\n\nres = milvus_client.search(\n collection_name=COLLECTION_NAME, # target collection\n data=query_vectors, # query vectors\n limit=2, # number of returned entities\n output_fields=[\"text\", \"subject\"], # specifies fields to be returned\n)\n\nfor q in queries:\n print(\"Query:\", q)\n for result in res:\n print(result)\n print(\"\\n\")\n","[\n {\n \"id\": 0,\n \"distance\": -0.772376537322998,\n \"entity\": {\n \"text\": \"Artificial intelligence was founded as an academic discipline in 1956.\",\n \"subject\": \"history\",\n },\n },\n {\n \"id\": 1,\n \"distance\": -0.58596271276474,\n \"entity\": {\n \"text\": \"Alan Turing was the first person to conduct substantial research in AI.\",\n \"subject\": \"history\",\n },\n },\n]\n"],"headingContent":"Semantic Search with Milvus and OpenAI","anchorList":[{"label":"Pesquisa semântica com Milvus e OpenAI","href":"Semantic-Search-with-Milvus-and-OpenAI","type":1,"isActive":false},{"label":"Introdução","href":"Getting-started","type":2,"isActive":false},{"label":"Pesquisa de títulos de livros com o OpenAI e o Milvus","href":"Searching-book-titles-with-OpenAI--Milvus","type":2,"isActive":false}]} \ No newline at end of file diff --git a/localization/v2.4.x/site/pt/integrations/integrate_with_openai.md b/localization/v2.4.x/site/pt/integrations/integrate_with_openai.md index cbfd589d6..82992252d 100644 --- a/localization/v2.4.x/site/pt/integrations/integrate_with_openai.md +++ b/localization/v2.4.x/site/pt/integrations/integrate_with_openai.md @@ -20,7 +20,8 @@ summary: >- d="M4 9h1v1H4c-1.5 0-3-1.69-3-3.5S2.55 3 4 3h4c1.45 0 3 1.69 3 3.5 0 1.41-.91 2.72-2 3.25V8.59c.58-.45 1-1.27 1-2.09C10 5.22 8.98 4 8 4H4c-.98 0-2 1.22-2 2.5S3 9 4 9zm9-3h-1v1h1c1 0 2 1.22 2 2.5S13.98 12 13 12H9c-.98 0-2-1.22-2-2.5 0-.83.42-1.64 1-2.09V6.25c-1.09.53-2 1.84-2 3.25C6 11.31 7.55 13 9 13h4c1.45 0 3-1.69 3-3.5S14.5 6 13 6z" > -

    Open In Colab

    +

    Open In Colab +GitHub Repository

    Este guia mostra como a API de incorporação do OpenAI pode ser utilizada com a base de dados vetorial Milvus para efetuar uma pesquisa semântica de texto.

    Introdução

    Open In Colab

    +

    Open In Colab +GitHub Repository

    Este guia demonstra como utilizar o Ragas para avaliar um pipeline Retrieval-Augmented Generation (RAG) baseado no Milvus.

    O sistema RAG combina um sistema de recuperação com um modelo generativo para gerar um novo texto com base num determinado pedido. O sistema começa por recuperar documentos relevantes de um corpus utilizando o Milvus e, em seguida, utiliza um modelo generativo para gerar novo texto com base nos documentos recuperados.

    O Ragas é um quadro que ajuda a avaliar as condutas RAG. Existem ferramentas e estruturas que ajudam a construir estas condutas, mas avaliá-las e quantificar o seu desempenho pode ser difícil. É aqui que entra o Ragas (Avaliação RAG).

    diff --git a/localization/v2.4.x/site/pt/integrations/integrate_with_vanna.json b/localization/v2.4.x/site/pt/integrations/integrate_with_vanna.json index 563139fc6..c21349e60 100644 --- a/localization/v2.4.x/site/pt/integrations/integrate_with_vanna.json +++ b/localization/v2.4.x/site/pt/integrations/integrate_with_vanna.json @@ -1 +1 @@ -{"codeList":["$ pip install \"vanna[milvus,openai]\"\n","import os\n\nos.environ[\"OPENAI_API_KEY\"] = \"sk-***********\"\n","from pymilvus import MilvusClient, model\nfrom vanna.milvus import Milvus_VectorStore\nfrom vanna.openai import OpenAI_Chat\n\n\nclass VannaMilvus(Milvus_VectorStore, OpenAI_Chat):\n def __init__(self, config=None):\n Milvus_VectorStore.__init__(self, config=config)\n OpenAI_Chat.__init__(self, config=config)\n","milvus_uri = \"./milvus_vanna.db\"\n\nmilvus_client = MilvusClient(uri=milvus_uri)\n\nvn_milvus = VannaMilvus(\n config={\n \"api_key\": os.getenv(\"OPENAI_API_KEY\"),\n \"model\": \"gpt-3.5-turbo\",\n \"milvus_client\": milvus_client,\n \"embedding_function\": model.DefaultEmbeddingFunction(),\n \"n_results\": 2, # The number of results to return from Milvus semantic search.\n }\n)\n","import sqlite3\n\nsqlite_path = \"./my-database.sqlite\"\nsql_connect = sqlite3.connect(sqlite_path)\nc = sql_connect.cursor()\n\ninit_sqls = \"\"\"\nCREATE TABLE IF NOT EXISTS Customer (\n ID INTEGER PRIMARY KEY AUTOINCREMENT,\n Name TEXT NOT NULL,\n Company TEXT NOT NULL,\n City TEXT NOT NULL,\n Phone TEXT NOT NULL\n);\n\nCREATE TABLE IF NOT EXISTS Company (\n ID INTEGER PRIMARY KEY AUTOINCREMENT,\n Name TEXT NOT NULL,\n Industry TEXT NOT NULL,\n Location TEXT NOT NULL,\n EmployeeCount INTEGER NOT NULL\n);\n\nCREATE TABLE IF NOT EXISTS User (\n ID INTEGER PRIMARY KEY AUTOINCREMENT,\n Username TEXT NOT NULL UNIQUE,\n Email TEXT NOT NULL UNIQUE\n);\n\nINSERT INTO Customer (Name, Company, City, Phone) \nVALUES ('John Doe', 'ABC Corp', 'New York', '123-456-7890');\n\nINSERT INTO Customer (Name, Company, City, Phone) \nVALUES ('Jane Smith', 'XYZ Inc', 'Los Angeles', '098-765-4321');\n\nINSERT INTO Company (Name, Industry, Location, EmployeeCount)\nVALUES ('ABC Corp', 'cutting-edge technology', 'New York', 100);\n\nINSERT INTO User (Username, Email)\nVALUES ('johndoe123', 'johndoe123@example.com');\n\"\"\"\n\nfor sql in init_sqls.split(\";\"):\n c.execute(sql)\n\nsql_connect.commit()\n\n# Connect to the SQLite database\nvn_milvus.connect_to_sqlite(sqlite_path)\n","# If there exists training data, we should remove it before training.\nexisting_training_data = vn_milvus.get_training_data()\nif len(existing_training_data) > 0:\n for _, training_data in existing_training_data.iterrows():\n vn_milvus.remove_training_data(training_data[\"id\"])\n\n# Get the DDL of the SQLite database\ndf_ddl = vn_milvus.run_sql(\"SELECT type, sql FROM sqlite_master WHERE sql is not null\")\n\n# Train the model on the DDL data\nfor ddl in df_ddl[\"sql\"].to_list():\n vn_milvus.train(ddl=ddl)\n","# Add documentation about your business terminology or definitions.\nvn_milvus.train(\n documentation=\"ABC Corp specializes in cutting-edge technology solutions and innovation.\"\n)\nvn_milvus.train(\n documentation=\"XYZ Inc is a global leader in manufacturing and supply chain management.\"\n)\n\n# You can also add SQL queries to your training data.\nvn_milvus.train(sql=\"SELECT * FROM Customer WHERE Name = 'John Doe'\")\n","training_data = vn_milvus.get_training_data()\ntraining_data\n","sql = vn_milvus.generate_sql(\"what is the phone number of John Doe?\")\nvn_milvus.run_sql(sql)\n","sql = vn_milvus.generate_sql(\"which customer works for a manufacturing corporation?\")\nvn_milvus.run_sql(sql)\n","sql_connect.close()\nmilvus_client.close()\n\nos.remove(sqlite_path)\nif os.path.exists(milvus_uri):\n os.remove(milvus_uri)\n"],"headingContent":"","anchorList":[{"label":"Escreva SQL com Vanna e Milvus","href":"Write-SQL-with-Vanna-and-Milvus","type":1,"isActive":false},{"label":"Pré-requisitos","href":"Prerequisites","type":2,"isActive":false},{"label":"Preparação dos dados","href":"Data-preparation","type":2,"isActive":false},{"label":"Treinar com dados","href":"Train-with-data","type":2,"isActive":false},{"label":"Gerar SQLs e executá-los","href":"Generate-SQLs-and-execute-them","type":2,"isActive":false}]} \ No newline at end of file +{"codeList":["$ pip install \"vanna[milvus,openai]\"\n","import os\n\nos.environ[\"OPENAI_API_KEY\"] = \"sk-***********\"\n","from pymilvus import MilvusClient, model\nfrom vanna.milvus import Milvus_VectorStore\nfrom vanna.openai import OpenAI_Chat\n\n\nclass VannaMilvus(Milvus_VectorStore, OpenAI_Chat):\n def __init__(self, config=None):\n Milvus_VectorStore.__init__(self, config=config)\n OpenAI_Chat.__init__(self, config=config)\n","milvus_uri = \"./milvus_vanna.db\"\n\nmilvus_client = MilvusClient(uri=milvus_uri)\n\nvn_milvus = VannaMilvus(\n config={\n \"api_key\": os.getenv(\"OPENAI_API_KEY\"),\n \"model\": \"gpt-3.5-turbo\",\n \"milvus_client\": milvus_client,\n \"embedding_function\": model.DefaultEmbeddingFunction(),\n \"n_results\": 2, # The number of results to return from Milvus semantic search.\n }\n)\n","import sqlite3\n\nsqlite_path = \"./my-database.sqlite\"\nsql_connect = sqlite3.connect(sqlite_path)\nc = sql_connect.cursor()\n\ninit_sqls = \"\"\"\nCREATE TABLE IF NOT EXISTS Customer (\n ID INTEGER PRIMARY KEY AUTOINCREMENT,\n Name TEXT NOT NULL,\n Company TEXT NOT NULL,\n City TEXT NOT NULL,\n Phone TEXT NOT NULL\n);\n\nCREATE TABLE IF NOT EXISTS Company (\n ID INTEGER PRIMARY KEY AUTOINCREMENT,\n Name TEXT NOT NULL,\n Industry TEXT NOT NULL,\n Location TEXT NOT NULL,\n EmployeeCount INTEGER NOT NULL\n);\n\nCREATE TABLE IF NOT EXISTS User (\n ID INTEGER PRIMARY KEY AUTOINCREMENT,\n Username TEXT NOT NULL UNIQUE,\n Email TEXT NOT NULL UNIQUE\n);\n\nINSERT INTO Customer (Name, Company, City, Phone) \nVALUES ('John Doe', 'ABC Corp', 'New York', '123-456-7890');\n\nINSERT INTO Customer (Name, Company, City, Phone) \nVALUES ('Jane Smith', 'XYZ Inc', 'Los Angeles', '098-765-4321');\n\nINSERT INTO Company (Name, Industry, Location, EmployeeCount)\nVALUES ('ABC Corp', 'cutting-edge technology', 'New York', 100);\n\nINSERT INTO User (Username, Email)\nVALUES ('johndoe123', 'johndoe123@example.com');\n\"\"\"\n\nfor sql in init_sqls.split(\";\"):\n c.execute(sql)\n\nsql_connect.commit()\n\n# Connect to the SQLite database\nvn_milvus.connect_to_sqlite(sqlite_path)\n","# If there exists training data, we should remove it before training.\nexisting_training_data = vn_milvus.get_training_data()\nif len(existing_training_data) > 0:\n for _, training_data in existing_training_data.iterrows():\n vn_milvus.remove_training_data(training_data[\"id\"])\n\n# Get the DDL of the SQLite database\ndf_ddl = vn_milvus.run_sql(\"SELECT type, sql FROM sqlite_master WHERE sql is not null\")\n\n# Train the model on the DDL data\nfor ddl in df_ddl[\"sql\"].to_list():\n vn_milvus.train(ddl=ddl)\n","# Add documentation about your business terminology or definitions.\nvn_milvus.train(\n documentation=\"ABC Corp specializes in cutting-edge technology solutions and innovation.\"\n)\nvn_milvus.train(\n documentation=\"XYZ Inc is a global leader in manufacturing and supply chain management.\"\n)\n\n# You can also add SQL queries to your training data.\nvn_milvus.train(sql=\"SELECT * FROM Customer WHERE Name = 'John Doe'\")\n","training_data = vn_milvus.get_training_data()\ntraining_data\n","sql = vn_milvus.generate_sql(\"what is the phone number of John Doe?\")\nvn_milvus.run_sql(sql)\n","sql = vn_milvus.generate_sql(\"which customer works for a manufacturing corporation?\")\nvn_milvus.run_sql(sql)\n","sql_connect.close()\nmilvus_client.close()\n\nos.remove(sqlite_path)\nif os.path.exists(milvus_uri):\n os.remove(milvus_uri)\n"],"headingContent":"Write SQL with Vanna and Milvus","anchorList":[{"label":"Escreva SQL com Vanna e Milvus","href":"Write-SQL-with-Vanna-and-Milvus","type":1,"isActive":false},{"label":"Pré-requisitos","href":"Prerequisites","type":2,"isActive":false},{"label":"Preparação dos dados","href":"Data-preparation","type":2,"isActive":false},{"label":"Treinar com dados","href":"Train-with-data","type":2,"isActive":false},{"label":"Gerar SQLs e executá-los","href":"Generate-SQLs-and-execute-them","type":2,"isActive":false}]} \ No newline at end of file diff --git a/localization/v2.4.x/site/pt/integrations/integrate_with_vanna.md b/localization/v2.4.x/site/pt/integrations/integrate_with_vanna.md index ec0061fe4..5cc3bbb13 100644 --- a/localization/v2.4.x/site/pt/integrations/integrate_with_vanna.md +++ b/localization/v2.4.x/site/pt/integrations/integrate_with_vanna.md @@ -3,7 +3,7 @@ id: integrate_with_vanna.md summary: >- Este guia demonstra como utilizar o Vanna para gerar e executar consultas SQL com base nos seus dados armazenados numa base de dados. -title: Escrever SQL com Vanna e Milvus +title: Escreva SQL com Vanna e Milvus ---

    Escreva SQL com Vanna e Milvus

    Vanna é uma estrutura Python RAG (Retrieval-Augmented Generation) de código aberto para geração de SQL e funcionalidades relacionadas. O Milvus é a base de dados vetorial open-source mais avançada do mundo, criada para potenciar a pesquisa de semelhanças de incorporação e aplicações de IA.

    +

    Open In Colab +GitHub Repository

    +

    Vanna é uma estrutura Python RAG (Retrieval-Augmented Generation) de código aberto para geração de SQL e funcionalidades relacionadas. O Milvus é a base de dados vetorial open-source mais avançada do mundo, criada para potenciar a pesquisa de semelhanças de incorporação e aplicações de IA.

    O Vanna funciona em dois passos simples - treina um "modelo" RAG nos seus dados e, em seguida, faz perguntas que devolvem consultas SQL que podem ser configuradas para serem executadas na sua base de dados. Este guia demonstra como utilizar o Vanna para gerar e executar consultas SQL com base nos seus dados armazenados numa base de dados.

    Pré-requisitos

    Open In Colab

    +

    Open In Colab +GitHub Repository

    Este guia mostra como a API de incorporação do VoyageAI pode ser utilizada com a base de dados vetorial Milvus para efetuar uma pesquisa semântica em texto.

    Introdução

    Open In Colab

    +

    Open In Colab +GitHub Repository

    Este guia demonstra como construir um sistema de Geração Aumentada por Recuperação (RAG) utilizando LangChain e Milvus.

    O sistema RAG combina um sistema de recuperação com um modelo generativo para gerar novo texto com base num determinado pedido. O sistema começa por recuperar documentos relevantes de um corpus utilizando o Milvus e, em seguida, utiliza um modelo generativo para gerar novo texto com base nos documentos recuperados.

    LangChain é uma estrutura para o desenvolvimento de aplicações baseadas em modelos de linguagem de grande dimensão (LLMs). O Milvus é a base de dados vetorial de código aberto mais avançada do mundo, criada para alimentar a pesquisa de semelhança de incorporação e aplicações de IA.

    diff --git a/localization/v2.4.x/site/pt/reference/architecture/architecture_overview.json b/localization/v2.4.x/site/pt/reference/architecture/architecture_overview.json index 7dedefcb5..f952cb8a5 100644 --- a/localization/v2.4.x/site/pt/reference/architecture/architecture_overview.json +++ b/localization/v2.4.x/site/pt/reference/architecture/architecture_overview.json @@ -1 +1 @@ -{"codeList":[],"headingContent":"","anchorList":[{"label":"Visão geral da arquitetura do Milvus","href":"Milvus-Architecture-Overview","type":1,"isActive":false},{"label":"O que vem a seguir","href":"Whats-next","type":2,"isActive":false}]} \ No newline at end of file +{"codeList":[],"headingContent":"Milvus Architecture Overview","anchorList":[{"label":"Visão geral da arquitetura do Milvus","href":"Milvus-Architecture-Overview","type":1,"isActive":false},{"label":"O que se segue","href":"Whats-next","type":2,"isActive":false}]} \ No newline at end of file diff --git a/localization/v2.4.x/site/pt/reference/architecture/architecture_overview.md b/localization/v2.4.x/site/pt/reference/architecture/architecture_overview.md index eadb7fc4c..4c9be433f 100644 --- a/localization/v2.4.x/site/pt/reference/architecture/architecture_overview.md +++ b/localization/v2.4.x/site/pt/reference/architecture/architecture_overview.md @@ -3,7 +3,7 @@ id: architecture_overview.md summary: >- O Milvus fornece uma base de dados vetorial rápida, fiável e estável, criada especificamente para a pesquisa de semelhanças e a inteligência artificial. -title: Descrição geral da arquitetura Milvus +title: Visão geral da arquitetura do Milvus ---

    Visão geral da arquitetura do Milvus

    Construído com base em bibliotecas populares de pesquisa vetorial, incluindo Faiss, HNSW, DiskANN, SCANN e outras, o Milvus foi concebido para pesquisa por semelhança em conjuntos de dados vectoriais densos contendo milhões, milhares de milhões ou mesmo triliões de vectores. Antes de prosseguir, familiarize-se com os princípios básicos da recuperação por incorporação.

    -

    O Milvus também suporta a fragmentação de dados, a ingestão de dados em fluxo contínuo, o esquema dinâmico, a pesquisa que combina dados vectoriais e escalares, a pesquisa multi-vetorial e híbrida, o vetor esparso e muitas outras funções avançadas. A plataforma oferece desempenho a pedido e pode ser optimizada para se adequar a qualquer cenário de recuperação de incorporação. Recomendamos a implantação do Milvus usando o Kubernetes para obter disponibilidade e elasticidade ideais.

    +

    O Milvus também suporta a fragmentação de dados, a ingestão de dados em fluxo contínuo, o esquema dinâmico, a pesquisa que combina dados vectoriais e escalares, a pesquisa multi-vetorial e híbrida, o vetor esparso e muitas outras funções avançadas. A plataforma oferece desempenho a pedido e pode ser optimizada para se adaptar a qualquer cenário de recuperação de incorporação. Recomendamos a implantação do Milvus usando o Kubernetes para obter disponibilidade e elasticidade ideais.

    O Milvus adopta uma arquitetura de armazenamento partilhado com desagregação do armazenamento e da computação e escalabilidade horizontal para os seus nós de computação. Seguindo o princípio da desagregação do plano de dados e do plano de controlo, o Milvus é composto por quatro camadas: camada de acesso, serviço coordenador, nó de trabalho e armazenamento. Estas camadas são mutuamente independentes quando se trata de escalonamento ou de recuperação de desastres.

    Architecture_diagram Diagrama de arquitetura

    -

    O que vem a seguir

    diff --git a/localization/v2.4.x/site/pt/reference/disk_index.json b/localization/v2.4.x/site/pt/reference/disk_index.json index 71adc9835..49ccfb8bb 100644 --- a/localization/v2.4.x/site/pt/reference/disk_index.json +++ b/localization/v2.4.x/site/pt/reference/disk_index.json @@ -1 +1 @@ -{"codeList":["...\nDiskIndex:\n MaxDegree: 56\n SearchListSize: 100\n PQCodeBugetGBRatio: 0.125\n SearchCacheBudgetGBRatio: 0.125\n BeamWidthRatio: 4.0\n...\n"],"headingContent":"","anchorList":[{"label":"Índice no disco","href":"On-disk-Index","type":1,"isActive":false},{"label":"Pré-requisitos","href":"Prerequisites","type":2,"isActive":false},{"label":"Limites","href":"Limits","type":2,"isActive":false},{"label":"Definições de índice e pesquisa","href":"Index-and-search-settings","type":2,"isActive":false},{"label":"Configurações de Milvus relacionadas com DiskANN","href":"DiskANN-related-Milvus-configurations","type":2,"isActive":false},{"label":"Resolução de problemas","href":"Troubleshooting","type":2,"isActive":false}]} \ No newline at end of file +{"codeList":["...\nDiskIndex:\n MaxDegree: 56\n SearchListSize: 100\n PQCodeBugetGBRatio: 0.125\n SearchCacheBudgetGBRatio: 0.125\n BeamWidthRatio: 4.0\n...\n"],"headingContent":"On-disk Index","anchorList":[{"label":"Índice no disco","href":"On-disk-Index","type":1,"isActive":false},{"label":"Pré-requisitos","href":"Prerequisites","type":2,"isActive":false},{"label":"Limites","href":"Limits","type":2,"isActive":false},{"label":"Configurações de índice e pesquisa","href":"Index-and-search-settings","type":2,"isActive":false},{"label":"Configurações de Milvus relacionadas com DiskANN","href":"DiskANN-related-Milvus-configurations","type":2,"isActive":false},{"label":"Resolução de problemas","href":"Troubleshooting","type":2,"isActive":false}]} \ No newline at end of file diff --git a/localization/v2.4.x/site/pt/reference/disk_index.md b/localization/v2.4.x/site/pt/reference/disk_index.md index ae675cefe..b7a8113f6 100644 --- a/localization/v2.4.x/site/pt/reference/disk_index.md +++ b/localization/v2.4.x/site/pt/reference/disk_index.md @@ -68,9 +68,9 @@ Atualmente, um campo vetorial apenas suporta um tipo de índice. O Milvus exclui

    Para utilizar o DiskANN, certifique-se de que

    • Utilize apenas vectores de flutuação com pelo menos 1 dimensão nos seus dados.
    • -
    • Utilize apenas a Distância Euclidiana (L2) ou o Produto Interno (IP) para medir a distância entre vectores.
    • +
    • Utilize apenas a Distância Euclidiana (L2), o Produto Interno (IP) ou COSINE para medir a distância entre vectores.
    -

    Definições de índice e pesquisa

    As réplicas na memória são organizadas como grupos de réplicas. Cada grupo de réplicas contém réplicas de fragmento. Cada réplica de fragmento tem uma réplica de fluxo contínuo e uma réplica histórica que correspondem aos segmentos crescentes e selados no fragmento (ou seja, canal DML).

    - An illustration of how in-memory replica works + An illustration of how in-memory replica works Uma ilustração de como funciona a réplica na memória

    Grupo de réplicas

    Um grupo de réplicas consiste em vários nós de consulta que são responsáveis pelo tratamento de dados históricos e réplicas.

    Réplica de fragmento

    Uma réplica de fragmento é composta por uma réplica de fluxo contínuo e uma réplica histórica, ambas pertencentes ao mesmo fragmento. O número de réplicas de fragmentos num grupo de réplicas é determinado pelo número de fragmentos numa coleção especificada.

    @@ -88,4 +88,4 @@ title: Réplica na memória

    Transferência em caso de falha

    As caches no proxy nem sempre estão actualizadas. Alguns segmentos ou canais podem ter sido movidos para outros nós de consulta quando um pedido é recebido. Neste caso, o proxy receberá uma resposta de erro, actualizará a cache e tentará atribuí-la a outro nó de consulta.

    Um segmento será ignorado se o proxy ainda não o conseguir encontrar depois de atualizar a cache. Isto pode acontecer se o segmento tiver sido compactado.

    Se a cache não for exacta, o proxy pode não encontrar alguns segmentos. Os nós de consulta com canais DML (segmentos crescentes) devolvem respostas de pesquisa juntamente com uma lista de segmentos fiáveis com os quais o proxy pode comparar e atualizar a cache.

    -

    Melhoria

    O proxy não pode atribuir pedidos de pesquisa aos nós de consulta de forma completamente igual e os nós de consulta podem ter recursos diferentes para servir os pedidos de pesquisa. Para evitar uma distribuição de recursos com cauda longa, o proxy atribuirá segmentos activos noutros nós de consulta a um nó de consulta inativo que também tenha esses segmentos.

    +

    Melhoria

    O proxy não pode atribuir os pedidos de pesquisa aos nós de consulta de forma completamente igual e os nós de consulta podem ter recursos diferentes para servir os pedidos de pesquisa. Para evitar uma distribuição de recursos com cauda longa, o proxy atribuirá segmentos activos noutros nós de consulta a um nó de consulta inativo que também tenha esses segmentos.

    diff --git a/localization/v2.4.x/site/pt/release_notes.json b/localization/v2.4.x/site/pt/release_notes.json index ffb78f026..5df33b9ef 100644 --- a/localization/v2.4.x/site/pt/release_notes.json +++ b/localization/v2.4.x/site/pt/release_notes.json @@ -1 +1 @@ -{"codeList":[],"headingContent":"Release Notes","anchorList":[{"label":"Notas de lançamento","href":"Release-Notes","type":1,"isActive":false},{"label":"v2.4.11","href":"v2411","type":2,"isActive":false},{"label":"v2.4.10","href":"v2410","type":2,"isActive":false},{"label":"v2.4.9","href":"v249","type":2,"isActive":false},{"label":"v2.4.8","href":"v248","type":2,"isActive":false},{"label":"v2.4.6","href":"v246","type":2,"isActive":false},{"label":"v2.4.5","href":"v245","type":2,"isActive":false},{"label":"v2.4.4","href":"v244","type":2,"isActive":false},{"label":"v2.4.3","href":"v243","type":2,"isActive":false},{"label":"v2.4.1","href":"v241","type":2,"isActive":false},{"label":"v2.4.0","href":"v240","type":2,"isActive":false},{"label":"v2.4.0-rc.1","href":"v240-rc1","type":2,"isActive":false}]} \ No newline at end of file +{"codeList":[],"headingContent":"Release Notes","anchorList":[{"label":"Notas de lançamento","href":"Release-Notes","type":1,"isActive":false},{"label":"v2.4.13-hotfix","href":"v2413-hotfix","type":2,"isActive":false},{"label":"[Depreciado] v2.4.13","href":"Deprecated-v2413","type":2,"isActive":false},{"label":"v2.4.12","href":"v2412","type":2,"isActive":false},{"label":"v2.4.11","href":"v2411","type":2,"isActive":false},{"label":"v2.4.10","href":"v2410","type":2,"isActive":false},{"label":"v2.4.9","href":"v249","type":2,"isActive":false},{"label":"v2.4.8","href":"v248","type":2,"isActive":false},{"label":"v2.4.6","href":"v246","type":2,"isActive":false},{"label":"v2.4.5","href":"v245","type":2,"isActive":false},{"label":"v2.4.4","href":"v244","type":2,"isActive":false},{"label":"v2.4.3","href":"v243","type":2,"isActive":false},{"label":"v2.4.1","href":"v241","type":2,"isActive":false},{"label":"v2.4.0","href":"v240","type":2,"isActive":false},{"label":"v2.4.0-rc.1","href":"v240-rc1","type":2,"isActive":false}]} \ No newline at end of file diff --git a/localization/v2.4.x/site/pt/release_notes.md b/localization/v2.4.x/site/pt/release_notes.md index 6e94bf312..f6fe2a717 100644 --- a/localization/v2.4.x/site/pt/release_notes.md +++ b/localization/v2.4.x/site/pt/release_notes.md @@ -19,6 +19,163 @@ title: Notas de lançamento >

    Descubra o que há de novo no Milvus! Esta página resume as novas funcionalidades, melhorias, problemas conhecidos e correcções de erros em cada versão. Pode encontrar as notas de lançamento para cada versão lançada após a v2.4.0 nesta secção. Sugerimos que visite regularmente esta página para se informar sobre as actualizações.

    +

    v2.4.13-hotfix

    Data de lançamento: 17 de outubro de 2024

    + + + + + + + +
    Versão do MilvusVersão do Python SDKVersão do Java SDKVersão do SDK do Node.js
    2.4.13-hotfix2.4.82.4.52.4.9
    +

    Milvus v2.4.13-hotfix aborda um problema crítico específico da v2.4.13, onde Milvus pode falhar em recuperar informações de coleta após uma reinicialização se todos os snapshots MetaKV foram coletados no lixo(#36933). Os utilizadores que atualmente executam a v2.4.13 são aconselhados a atualizar para a v2.4.13-hotfix na primeira oportunidade para evitar potenciais interrupções.

    +

    Correções críticas

      +
    • Carregar chave original se o carimbo de data/hora for MaxTimestamp(#36935)
    • +
    +

    [Depreciado] v2.4.13

    Data de lançamento: 12 de outubro de 2024

    + + + + + + + +
    Versão do MilvusVersão do Python SDKVersão do Java SDKVersão do SDK do Node.js
    2.4.132.4.82.4.52.4.9
    +

    O Milvus 2.4.13 introduz a carga dinâmica de réplicas, permitindo aos utilizadores ajustar o número de réplicas da coleção sem necessidade de libertar e recarregar a coleção. Esta versão também resolve vários erros críticos relacionados com a importação em massa, análise de expressões, balanceamento de carga e recuperação de falhas. Além disso, foram efectuadas melhorias significativas na utilização de recursos MMAP e no desempenho da importação, melhorando a eficiência geral do sistema. Recomendamos vivamente a atualização para esta versão para um melhor desempenho e estabilidade.

    +

    Caraterísticas

      +
    • Ajuste dinâmico de réplicas para colecções carregadas(#36417)
    • +
    • MMAP de vetor esparso em tipos de segmentos crescentes(#36565)
    • +
    +

    Correcções de erros

      +
    • Corrigido um problema de desempenho de descarga(#36741)
    • +
    • Corrigido um erro com expressões JSON em "[]"(#36722)
    • +
    • Removidos os vizinhos se o alvo compacto não estiver indexado(#36694)
    • +
    • Melhorado o desempenho para Rocksmq quando o canal está cheio(#36618)
    • +
    • Corrigido um problema em que os erros durante a remoção de pinos não eram adiados(#36665)
    • +
    • Resolvido um vazamento de memória para segmentos importados no gerenciador de segmentos(#36631)
    • +
    • Saltou verificações de saúde desnecessárias para nós de consulta no proxy(#36553)
    • +
    • Corrigido um problema de excesso com expressões de termo(#36534)
    • +
    • ID do nó registado antes de atribuir tarefas para evitar a atribuição incorrecta de tarefas(#36493)
    • +
    • Resolvidos os problemas de corrida de dados na compactação de clustering(#36499)
    • +
    • Adicionada uma verificação para o comprimento máximo do conjunto de strings após a correspondência de tipos(#36497)
    • +
    • Resolvidas as condições de corrida no modo mix ou autónomo(#36459)
    • +
    • Corrigido o desequilíbrio de segmentos após repetidas operações de carregamento e libertação(#36543)
    • +
    • Corrigido um caso de canto onde os segmentos não podiam ser movidos de um nó de paragem(#36475)
    • +
    • Actualizada a informação do segmento corretamente mesmo que alguns segmentos estivessem em falta(#36729)
    • +
    • Evitou que as transacções etcd excedessem o limite máximo no snapshot KV(#36773)
    • +
    +

    Melhorias

      +
    • Melhorada a estimativa de recursos MMAP:
        +
      • Melhorado o código relacionado com o MMAP em column.h(#36521)
      • +
      • Estimativa de recursos refinada ao carregar colecções(#36728)
      • +
    • +
    • Melhorias de desempenho:
        +
      • Melhorada a eficiência da análise de expressões convertendo Unicode para ASCII(#36676)
      • +
      • Activada a produção paralela de mensagens para múltiplos tópicos(#36462)
      • +
      • Reduzida a sobrecarga da CPU ao calcular o tamanho do ficheiro de índice(#36580)
      • +
      • Recuperado o tipo de mensagem do cabeçalho para minimizar o desmarcamento(#36454)
      • +
      • Optimizada a política de seleção de réplicas baseada na carga de trabalho(#36384)
      • +
    • +
    • Divisão de mensagens de tarefas de eliminação para caber dentro dos limites máximos de tamanho de mensagem(#36574)
    • +
    • Adicionado novo URL RESTful para descrever trabalhos de importação(#36754)
    • +
    • Optimizado o agendamento de importações e adicionada uma métrica de custo de tempo(#36684)
    • +
    • Adicionado registo de relatório de balanço para o balanceador do coordenador de consultas(#36749)
    • +
    • Mudança para a utilização da configuração comum do GC(#36670)
    • +
    • Adicionado interrutor de política de encaminhamento de fluxo para o delegador(#36712)
    • +
    • Activada a compactação manual para colecções sem índices(#36581)
    • +
    • Ativado o balanceamento de carga em nós de consulta com capacidades de memória variáveis(#36625)
    • +
    • Caso unificado para etiquetas de entrada usando metrics.label(#36616)
    • +
    • Tornou as operações de canal/segmento de transferência idempotentes(#36552)
    • +
    • Adicionadas métricas para monitorizar o rendimento da importação e a contagem de linhas importadas(#36588)
    • +
    • Impedida a criação de múltiplos objectos de temporizador nos alvos(#36573)
    • +
    • Versão de expressão actualizada e resposta HTTP formatada para expressões(#36467)
    • +
    • Melhorada a recolha de lixo no snapshot KV(#36793)
    • +
    • Adicionado suporte para executar métodos com parâmetros de contexto(#36798)
    • +
    +

    v2.4.12

    Data de lançamento: 26 de setembro de 2024

    + + + + + + + +
    Versão do MilvusVersão do Python SDKVersão do Java SDKVersão do SDK do Node.js
    2.4.122.4.72.4.42.4.9
    +

    O Milvus 2.4.12 apresenta melhorias significativas e correcções de erros críticos. Esta versão aborda problemas de duplicação de dados e melhora a velocidade de recuperação de falhas, especialmente ao lidar com um grande número de exclusões. No entanto, persiste um problema conhecido em que a recuperação de falhas pode ser lenta ao excluir grandes quantidades de dados. Estamos a trabalhar ativamente na resolução deste problema.

    +

    Melhorias

      +
    • Implementada paragem graciosa para o gestor de fluxogramas(#36358)
    • +
    • Desactivadas as verificações de índice para campos vectoriais não carregados(#36280)
    • +
    • Filtrados os registos de eliminação não atingidos durante o carregamento delta(#36272)
    • +
    • Melhorado o tratamento de erros para excepções std::stoi(#36296)
    • +
    • Palavras-chave não permitidas como nomes de campos ou nomes de campos dinâmicos(#36108)
    • +
    • Adicionada métrica para apagar entradas em segmentos L0(#36227)
    • +
    • Implementada a política de encaminhamento L0 para suportar o carregamento remoto(#36208)
    • +
    • Adicionada verificação de carregamento de campo ANN no proxy(#36194)
    • +
    • Ativado o suporte de linhas esparsas vazias(#36061)
    • +
    • Corrigida uma vulnerabilidade de segurança(#36156)
    • +
    • Implementado o tratamento de estatísticas para métricas de tamanho de pedido/resposta(#36118)
    • +
    • Corrigida a estimativa de tamanho para dados de array codificados(#36379)
    • +
    +

    Correcções de erros

      +
    • Resolvidos erros de tipo de métrica para colecções com dois campos vectoriais(#36473)
    • +
    • Corrigidos problemas de buffering longo que causavam falhas na receção da fila de mensagens(#36425)
    • +
    • Implementado o retorno correto de compact-to-segments após suporte de divisão(#36429)
    • +
    • Resolvidos problemas de corrida de dados com a goroutina de verificação de ID de nó(#36377)
    • +
    • Removida a verificação do tipo de elemento(#36324)
    • +
    • Resolvidos problemas de acesso simultâneo para segmentos crescentes e selados(#36288)
    • +
    • Implementado o bloqueio de estado futuro(#36333)
    • +
    • Correção da utilização de offset no HybridSearch(#36287, #36253)
    • +
    • Resolvida a fuga de segmentos/canais sujos no QueryNode(#36259)
    • +
    • Corrigido o tratamento de duplicação de chave primária(#36274)
    • +
    • Definida a definição do tipo de métrica nos pedidos de pesquisa(#36279)
    • +
    • Corrigido o problema de limpeza da métrica stored_index_files_size(#36161)
    • +
    • Corrigido o comportamento do grupo de privilégios readwrite para acesso global à API(#36145)
    • +

    v2.4.11

    Data de lançamento: 31 de maio de 2024

    - + @@ -544,7 +701,7 @@ title: Notas de lançamento
  • Suporte para inserção em massa de vetor de flutuação esparsa para binlog/json/parquet(#32649)
  • Melhorias

      -
    • Implementação do canal de observação Datacoord/node baseado em RPC(#32036)
    • +
    • Implementação do canal de observação Datacoord/nó baseado em RPC(#32036)
    • Filtro bloom optimizado para acelerar a filtragem de eliminação(#32642, #33329, #33284)
    • Carregamento de dados brutos via mmap se o índice escalar não tiver dados brutos(#33317)
    • Sincronizada a configuração do milvus para milvus.yaml(#33322, #32920, #32857, #32946)
    • @@ -628,14 +785,14 @@ title: Notas de lançamento
    • Tornou o compactador capaz de limpar segmentos vazios(#32821)
    • Preenchido o número de entrada do deltalog e o intervalo de tempo nas compactações l0(#33004)
    • Corrigido o crash do proxy devido à corrida de dados da cache do líder do shard(#32971)
    • -
    • Corrigida a unidade de tempo para a métrica de índice de carga(#32935)
    • +
    • Corrigida a unidade de tempo para a métrica do índice de carga(#32935)
    • Corrigido o problema onde o segmento na paragem do nó de consulta não podia ser libertado com sucesso(#32929)
    • Corrigida a estimativa de recursos do índice(#32842)
    • Definido o ponto de controlo do canal para a posição delta(#32878)
    • Fez o syncmgr bloquear a chave antes de retornar o futuro(#32865)
    • Assegurado que o índice invertido tinha apenas um segmento(#32858)
    • Corrigido o gatilho de compactação escolhendo dois segmentos idênticos(#32800)
    • -
    • Corrigido o problema em que o nome da partição não podia ser especificado na importação do binlog(#32730, #33027)
    • +
    • Corrigido o problema onde o nome da partição não podia ser especificado na importação do binlog(#32730, #33027)
    • Tornada a coluna dinâmica opcional na importação de parquet(#32738)
    • Saltava a verificação do ID automático ao inserir dados(#32775)
    • Validado o número de linhas para inserir dados de campo com o esquema(#32770)
    • @@ -708,7 +865,7 @@ title: Notas de lançamento
    • Corrigidos os erros relacionados com a interface RESTFulV2, incluindo uma correção importante que permite que os parâmetros numéricos nos pedidos aceitem a entrada numérica em vez do tipo string(#32485, #32355)
    • Corrigida a fuga de memória no proxy ao remover o evento de configuração de observação no limitador de taxa(#32313)
    • Corrigido o problema em que o limitador de taxa informava incorretamente que a partição não podia ser encontrada quando partitionName não era especificado(#32647)
    • -
    • Adicionada a deteção entre os casos da coleção estar no estado de recuperação e não ser carregada no tipo de erro.(#32447)
    • +
    • Adicionada a deteção entre os casos da coleção estar no estado de recuperação e não estar carregada no tipo de erro.(#32447)
    • Correção da métrica negativa de entidades numéricas consultáveis(#32361)

    v2.4.0

    Open In Colab

    +

    Open In Colab +GitHub Repository

    Neste tutorial, vamos mostrar-lhe como construir um pipeline RAG (Retrieval-Augmented Generation) com o Milvus.

    O sistema RAG combina um sistema de recuperação com um modelo generativo para gerar um novo texto com base num determinado pedido. O sistema começa por recuperar documentos relevantes de um corpus utilizando o Milvus e, em seguida, utiliza um modelo generativo para gerar um novo texto com base nos documentos recuperados.

    diff --git a/localization/v2.4.x/site/pt/tutorials/graph_rag_with_milvus.md b/localization/v2.4.x/site/pt/tutorials/graph_rag_with_milvus.md index 38fb62078..1835aa988 100644 --- a/localization/v2.4.x/site/pt/tutorials/graph_rag_with_milvus.md +++ b/localization/v2.4.x/site/pt/tutorials/graph_rag_with_milvus.md @@ -18,7 +18,8 @@ title: Grafo RAG com Milvus d="M4 9h1v1H4c-1.5 0-3-1.69-3-3.5S2.55 3 4 3h4c1.45 0 3 1.69 3 3.5 0 1.41-.91 2.72-2 3.25V8.59c.58-.45 1-1.27 1-2.09C10 5.22 8.98 4 8 4H4c-.98 0-2 1.22-2 2.5S3 9 4 9zm9-3h-1v1h1c1 0 2 1.22 2 2.5S13.98 12 13 12H9c-.98 0-2-1.22-2-2.5 0-.83.42-1.64 1-2.09V6.25c-1.09.53-2 1.84-2 3.25C6 11.31 7.55 13 9 13h4c1.45 0 3-1.69 3-3.5S14.5 6 13 6z" > -

    Open In Colab

    +

    Open In Colab +GitHub Repository

    A aplicação generalizada de modelos linguísticos de grande dimensão realça a importância de melhorar a exatidão e a relevância das suas respostas. A Geração Aumentada por Recuperação (RAG) melhora os modelos com bases de conhecimento externas, fornecendo mais informações contextuais e atenuando problemas como alucinação e conhecimento insuficiente. No entanto, confiar apenas em paradigmas RAG simples tem as suas limitações, especialmente quando se lida com relações complexas entre entidades e perguntas com vários saltos, em que o modelo tem frequentemente dificuldades em fornecer respostas exactas.

    A introdução de gráficos de conhecimento (knowledge graphs - KGs) no sistema RAG oferece uma nova solução. Os KGs apresentam as entidades e as suas relações de forma estruturada, fornecendo informações de recuperação mais precisas e ajudando o RAG a lidar melhor com tarefas complexas de resposta a perguntas. O KG-RAG ainda está na sua fase inicial e não há consenso sobre como recuperar eficazmente entidades e relações a partir de KGs ou como integrar a pesquisa de semelhanças vectoriais com estruturas de grafos.

    Neste caderno, apresentamos uma abordagem simples mas poderosa para melhorar significativamente o desempenho deste cenário. Trata-se de um paradigma RAG simples, com recuperação em vários sentidos e, em seguida, reordenação, mas que implementa logicamente o Graph RAG e atinge um desempenho de ponta no tratamento de questões multi-hop. Vamos ver como ele é implementado.

    diff --git a/localization/v2.4.x/site/pt/tutorials/hybrid_search_with_milvus.json b/localization/v2.4.x/site/pt/tutorials/hybrid_search_with_milvus.json index 3cd7b915f..60425e3c1 100644 --- a/localization/v2.4.x/site/pt/tutorials/hybrid_search_with_milvus.json +++ b/localization/v2.4.x/site/pt/tutorials/hybrid_search_with_milvus.json @@ -1 +1 @@ -{"codeList":["$ pip install --upgrade pymilvus \"pymilvus[model]\"\n","# Run this cell to download the dataset\n$ wget http://qim.fs.quoracdn.net/quora_duplicate_questions.tsv\n","import pandas as pd\n\nfile_path = \"quora_duplicate_questions.tsv\"\ndf = pd.read_csv(file_path, sep=\"\\t\")\nquestions = set()\nfor _, row in df.iterrows():\n obj = row.to_dict()\n questions.add(obj[\"question1\"][:512])\n questions.add(obj[\"question2\"][:512])\n if len(questions) > 500: # Skip this if you want to use the full dataset\n break\n\ndocs = list(questions)\n\n# example question\nprint(docs[0])\n","from milvus_model.hybrid import BGEM3EmbeddingFunction\n\nef = BGEM3EmbeddingFunction(use_fp16=False, device=\"cpu\")\ndense_dim = ef.dim[\"dense\"]\n\n# Generate embeddings using BGE-M3 model\ndocs_embeddings = ef(docs)\n","from pymilvus import (\n connections,\n utility,\n FieldSchema,\n CollectionSchema,\n DataType,\n Collection,\n)\n\n# Connect to Milvus given URI\nconnections.connect(uri=\"./milvus.db\")\n\n# Specify the data schema for the new Collection\nfields = [\n # Use auto generated id as primary key\n FieldSchema(\n name=\"pk\", dtype=DataType.VARCHAR, is_primary=True, auto_id=True, max_length=100\n ),\n # Store the original text to retrieve based on semantically distance\n FieldSchema(name=\"text\", dtype=DataType.VARCHAR, max_length=512),\n # Milvus now supports both sparse and dense vectors,\n # we can store each in a separate field to conduct hybrid search on both vectors\n FieldSchema(name=\"sparse_vector\", dtype=DataType.SPARSE_FLOAT_VECTOR),\n FieldSchema(name=\"dense_vector\", dtype=DataType.FLOAT_VECTOR, dim=dense_dim),\n]\nschema = CollectionSchema(fields)\n\n# Create collection (drop the old one if exists)\ncol_name = \"hybrid_demo\"\nif utility.has_collection(col_name):\n Collection(col_name).drop()\ncol = Collection(col_name, schema, consistency_level=\"Strong\")\n\n# To make vector search efficient, we need to create indices for the vector fields\nsparse_index = {\"index_type\": \"SPARSE_INVERTED_INDEX\", \"metric_type\": \"IP\"}\ncol.create_index(\"sparse_vector\", sparse_index)\ndense_index = {\"index_type\": \"AUTOINDEX\", \"metric_type\": \"IP\"}\ncol.create_index(\"dense_vector\", dense_index)\ncol.load()\n","# For efficiency, we insert 50 records in each small batch\nfor i in range(0, len(docs), 50):\n batched_entities = [\n docs[i : i + 50],\n docs_embeddings[\"sparse\"][i : i + 50],\n docs_embeddings[\"dense\"][i : i + 50],\n ]\n col.insert(batched_entities)\nprint(\"Number of entities inserted:\", col.num_entities)\n","# Enter your search query\nquery = input(\"Enter your search query: \")\nprint(query)\n\n# Generate embeddings for the query\nquery_embeddings = ef([query])\n# print(query_embeddings)\n","from pymilvus import (\n AnnSearchRequest,\n WeightedRanker,\n)\n\n\ndef dense_search(col, query_dense_embedding, limit=10):\n search_params = {\"metric_type\": \"IP\", \"params\": {}}\n res = col.search(\n [query_dense_embedding],\n anns_field=\"dense_vector\",\n limit=limit,\n output_fields=[\"text\"],\n param=search_params,\n )[0]\n return [hit.get(\"text\") for hit in res]\n\n\ndef sparse_search(col, query_sparse_embedding, limit=10):\n search_params = {\n \"metric_type\": \"IP\",\n \"params\": {},\n }\n res = col.search(\n [query_sparse_embedding],\n anns_field=\"sparse_vector\",\n limit=limit,\n output_fields=[\"text\"],\n param=search_params,\n )[0]\n return [hit.get(\"text\") for hit in res]\n\n\ndef hybrid_search(\n col,\n query_dense_embedding,\n query_sparse_embedding,\n sparse_weight=1.0,\n dense_weight=1.0,\n limit=10,\n):\n dense_search_params = {\"metric_type\": \"IP\", \"params\": {}}\n dense_req = AnnSearchRequest(\n [query_dense_embedding], \"dense_vector\", dense_search_params, limit=limit\n )\n sparse_search_params = {\"metric_type\": \"IP\", \"params\": {}}\n sparse_req = AnnSearchRequest(\n [query_sparse_embedding], \"sparse_vector\", sparse_search_params, limit=limit\n )\n rerank = WeightedRanker(sparse_weight, dense_weight)\n res = col.hybrid_search(\n [sparse_req, dense_req], rerank=rerank, limit=limit, output_fields=[\"text\"]\n )[0]\n return [hit.get(\"text\") for hit in res]\n","dense_results = dense_search(col, query_embeddings[\"dense\"][0])\nsparse_results = sparse_search(col, query_embeddings[\"sparse\"][0])\nhybrid_results = hybrid_search(\n col,\n query_embeddings[\"dense\"][0],\n query_embeddings[\"sparse\"][0],\n sparse_weight=0.7,\n dense_weight=1.0,\n)\n","def doc_text_formatting(ef, query, docs):\n tokenizer = ef.model.tokenizer\n query_tokens_ids = tokenizer.encode(query, return_offsets_mapping=True)\n query_tokens = tokenizer.convert_ids_to_tokens(query_tokens_ids)\n formatted_texts = []\n\n for doc in docs:\n ldx = 0\n landmarks = []\n encoding = tokenizer.encode_plus(doc, return_offsets_mapping=True)\n tokens = tokenizer.convert_ids_to_tokens(encoding[\"input_ids\"])[1:-1]\n offsets = encoding[\"offset_mapping\"][1:-1]\n for token, (start, end) in zip(tokens, offsets):\n if token in query_tokens:\n if len(landmarks) != 0 and start == landmarks[-1]:\n landmarks[-1] = end\n else:\n landmarks.append(start)\n landmarks.append(end)\n close = False\n formatted_text = \"\"\n for i, c in enumerate(doc):\n if ldx == len(landmarks):\n pass\n elif i == landmarks[ldx]:\n if close:\n formatted_text += \"\"\n else:\n formatted_text += \"\"\n close = not close\n ldx = ldx + 1\n formatted_text += c\n if close is True:\n formatted_text += \"\"\n formatted_texts.append(formatted_text)\n return formatted_texts\n","from IPython.display import Markdown, display\n\n# Dense search results\ndisplay(Markdown(\"**Dense Search Results:**\"))\nformatted_results = doc_text_formatting(ef, query, dense_results)\nfor result in dense_results:\n display(Markdown(result))\n\n# Sparse search results\ndisplay(Markdown(\"\\n**Sparse Search Results:**\"))\nformatted_results = doc_text_formatting(ef, query, sparse_results)\nfor result in formatted_results:\n display(Markdown(result))\n\n# Hybrid search results\ndisplay(Markdown(\"\\n**Hybrid Search Results:**\"))\nformatted_results = doc_text_formatting(ef, query, hybrid_results)\nfor result in formatted_results:\n display(Markdown(result))\n"],"headingContent":"","anchorList":[{"label":"Pesquisa híbrida com Milvus","href":"Hybrid-Search-with-Milvus","type":1,"isActive":false}]} \ No newline at end of file +{"codeList":["$ pip install --upgrade pymilvus \"pymilvus[model]\"\n","# Run this cell to download the dataset\n$ wget http://qim.fs.quoracdn.net/quora_duplicate_questions.tsv\n","import pandas as pd\n\nfile_path = \"quora_duplicate_questions.tsv\"\ndf = pd.read_csv(file_path, sep=\"\\t\")\nquestions = set()\nfor _, row in df.iterrows():\n obj = row.to_dict()\n questions.add(obj[\"question1\"][:512])\n questions.add(obj[\"question2\"][:512])\n if len(questions) > 500: # Skip this if you want to use the full dataset\n break\n\ndocs = list(questions)\n\n# example question\nprint(docs[0])\n","from milvus_model.hybrid import BGEM3EmbeddingFunction\n\nef = BGEM3EmbeddingFunction(use_fp16=False, device=\"cpu\")\ndense_dim = ef.dim[\"dense\"]\n\n# Generate embeddings using BGE-M3 model\ndocs_embeddings = ef(docs)\n","from pymilvus import (\n connections,\n utility,\n FieldSchema,\n CollectionSchema,\n DataType,\n Collection,\n)\n\n# Connect to Milvus given URI\nconnections.connect(uri=\"./milvus.db\")\n\n# Specify the data schema for the new Collection\nfields = [\n # Use auto generated id as primary key\n FieldSchema(\n name=\"pk\", dtype=DataType.VARCHAR, is_primary=True, auto_id=True, max_length=100\n ),\n # Store the original text to retrieve based on semantically distance\n FieldSchema(name=\"text\", dtype=DataType.VARCHAR, max_length=512),\n # Milvus now supports both sparse and dense vectors,\n # we can store each in a separate field to conduct hybrid search on both vectors\n FieldSchema(name=\"sparse_vector\", dtype=DataType.SPARSE_FLOAT_VECTOR),\n FieldSchema(name=\"dense_vector\", dtype=DataType.FLOAT_VECTOR, dim=dense_dim),\n]\nschema = CollectionSchema(fields)\n\n# Create collection (drop the old one if exists)\ncol_name = \"hybrid_demo\"\nif utility.has_collection(col_name):\n Collection(col_name).drop()\ncol = Collection(col_name, schema, consistency_level=\"Strong\")\n\n# To make vector search efficient, we need to create indices for the vector fields\nsparse_index = {\"index_type\": \"SPARSE_INVERTED_INDEX\", \"metric_type\": \"IP\"}\ncol.create_index(\"sparse_vector\", sparse_index)\ndense_index = {\"index_type\": \"AUTOINDEX\", \"metric_type\": \"IP\"}\ncol.create_index(\"dense_vector\", dense_index)\ncol.load()\n","# For efficiency, we insert 50 records in each small batch\nfor i in range(0, len(docs), 50):\n batched_entities = [\n docs[i : i + 50],\n docs_embeddings[\"sparse\"][i : i + 50],\n docs_embeddings[\"dense\"][i : i + 50],\n ]\n col.insert(batched_entities)\nprint(\"Number of entities inserted:\", col.num_entities)\n","# Enter your search query\nquery = input(\"Enter your search query: \")\nprint(query)\n\n# Generate embeddings for the query\nquery_embeddings = ef([query])\n# print(query_embeddings)\n","from pymilvus import (\n AnnSearchRequest,\n WeightedRanker,\n)\n\n\ndef dense_search(col, query_dense_embedding, limit=10):\n search_params = {\"metric_type\": \"IP\", \"params\": {}}\n res = col.search(\n [query_dense_embedding],\n anns_field=\"dense_vector\",\n limit=limit,\n output_fields=[\"text\"],\n param=search_params,\n )[0]\n return [hit.get(\"text\") for hit in res]\n\n\ndef sparse_search(col, query_sparse_embedding, limit=10):\n search_params = {\n \"metric_type\": \"IP\",\n \"params\": {},\n }\n res = col.search(\n [query_sparse_embedding],\n anns_field=\"sparse_vector\",\n limit=limit,\n output_fields=[\"text\"],\n param=search_params,\n )[0]\n return [hit.get(\"text\") for hit in res]\n\n\ndef hybrid_search(\n col,\n query_dense_embedding,\n query_sparse_embedding,\n sparse_weight=1.0,\n dense_weight=1.0,\n limit=10,\n):\n dense_search_params = {\"metric_type\": \"IP\", \"params\": {}}\n dense_req = AnnSearchRequest(\n [query_dense_embedding], \"dense_vector\", dense_search_params, limit=limit\n )\n sparse_search_params = {\"metric_type\": \"IP\", \"params\": {}}\n sparse_req = AnnSearchRequest(\n [query_sparse_embedding], \"sparse_vector\", sparse_search_params, limit=limit\n )\n rerank = WeightedRanker(sparse_weight, dense_weight)\n res = col.hybrid_search(\n [sparse_req, dense_req], rerank=rerank, limit=limit, output_fields=[\"text\"]\n )[0]\n return [hit.get(\"text\") for hit in res]\n","dense_results = dense_search(col, query_embeddings[\"dense\"][0])\nsparse_results = sparse_search(col, query_embeddings[\"sparse\"]._getrow(0))\nhybrid_results = hybrid_search(\n col,\n query_embeddings[\"dense\"][0],\n query_embeddings[\"sparse\"]._getrow(0),\n sparse_weight=0.7,\n dense_weight=1.0,\n)\n","def doc_text_formatting(ef, query, docs):\n tokenizer = ef.model.tokenizer\n query_tokens_ids = tokenizer.encode(query, return_offsets_mapping=True)\n query_tokens = tokenizer.convert_ids_to_tokens(query_tokens_ids)\n formatted_texts = []\n\n for doc in docs:\n ldx = 0\n landmarks = []\n encoding = tokenizer.encode_plus(doc, return_offsets_mapping=True)\n tokens = tokenizer.convert_ids_to_tokens(encoding[\"input_ids\"])[1:-1]\n offsets = encoding[\"offset_mapping\"][1:-1]\n for token, (start, end) in zip(tokens, offsets):\n if token in query_tokens:\n if len(landmarks) != 0 and start == landmarks[-1]:\n landmarks[-1] = end\n else:\n landmarks.append(start)\n landmarks.append(end)\n close = False\n formatted_text = \"\"\n for i, c in enumerate(doc):\n if ldx == len(landmarks):\n pass\n elif i == landmarks[ldx]:\n if close:\n formatted_text += \"\"\n else:\n formatted_text += \"\"\n close = not close\n ldx = ldx + 1\n formatted_text += c\n if close is True:\n formatted_text += \"\"\n formatted_texts.append(formatted_text)\n return formatted_texts\n","from IPython.display import Markdown, display\n\n# Dense search results\ndisplay(Markdown(\"**Dense Search Results:**\"))\nformatted_results = doc_text_formatting(ef, query, dense_results)\nfor result in dense_results:\n display(Markdown(result))\n\n# Sparse search results\ndisplay(Markdown(\"\\n**Sparse Search Results:**\"))\nformatted_results = doc_text_formatting(ef, query, sparse_results)\nfor result in formatted_results:\n display(Markdown(result))\n\n# Hybrid search results\ndisplay(Markdown(\"\\n**Hybrid Search Results:**\"))\nformatted_results = doc_text_formatting(ef, query, hybrid_results)\nfor result in formatted_results:\n display(Markdown(result))\n"],"headingContent":"Hybrid Search with Milvus","anchorList":[{"label":"Pesquisa híbrida com Milvus","href":"Hybrid-Search-with-Milvus","type":1,"isActive":false}]} \ No newline at end of file diff --git a/localization/v2.4.x/site/pt/tutorials/hybrid_search_with_milvus.md b/localization/v2.4.x/site/pt/tutorials/hybrid_search_with_milvus.md index c287b8c0d..6899c0b27 100644 --- a/localization/v2.4.x/site/pt/tutorials/hybrid_search_with_milvus.md +++ b/localization/v2.4.x/site/pt/tutorials/hybrid_search_with_milvus.md @@ -18,7 +18,8 @@ title: Pesquisa híbrida com Milvus d="M4 9h1v1H4c-1.5 0-3-1.69-3-3.5S2.55 3 4 3h4c1.45 0 3 1.69 3 3.5 0 1.41-.91 2.72-2 3.25V8.59c.58-.45 1-1.27 1-2.09C10 5.22 8.98 4 8 4H4c-.98 0-2 1.22-2 2.5S3 9 4 9zm9-3h-1v1h1c1 0 2 1.22 2 2.5S13.98 12 13 12H9c-.98 0-2-1.22-2-2.5 0-.83.42-1.64 1-2.09V6.25c-1.09.53-2 1.84-2 3.25C6 11.31 7.55 13 9 13h4c1.45 0 3-1.69 3-3.5S14.5 6 13 6z" > -

    Open In Colab

    +

    Open In Colab +GitHub Repository

    Neste tutorial, vamos demonstrar como efetuar uma pesquisa híbrida com Milvus e o modelo BGE-M3. O modelo BGE-M3 pode converter texto em vectores densos e esparsos. O Milvus suporta o armazenamento de ambos os tipos de vectores numa única coleção, permitindo uma pesquisa híbrida que aumenta a relevância dos resultados.

    Milvus suporta métodos de recuperação densos, esparsos e híbridos:

    @@ -201,11 +202,11 @@ def dense_search(col,

    Vamos executar três pesquisas diferentes com as funções definidas:

    dense_results = dense_search(col, query_embeddings["dense"][0])
    -sparse_results = sparse_search(col, query_embeddings["sparse"][0])
    +sparse_results = sparse_search(col, query_embeddings["sparse"]._getrow(0))
     hybrid_results = hybrid_search(
         col,
         query_embeddings["dense"][0],
    -    query_embeddings["sparse"][0],
    +    query_embeddings["sparse"]._getrow(0),
         sparse_weight=0.7,
         dense_weight=1.0,
     )
    @@ -294,13 +295,13 @@ formatted_results = doc_text_formatting(ef, query, hybrid_results)
     

    Resultados da pesquisa híbrida:

    Qual é a melhor maneira de começar a trabalhar com robótica? Qual é a melhor placa de desenvolvimento para começar a trabalhar nela?

    O que é a programação Java? Como aprender a linguagem de programação Java?

    -

    Qual é a melhor forma de começar a aprender robótica?

    +

    Qual é a melhor forma de começar a aprender robótica?

    Como é que nos preparamos para o UPSC?

    Como é que se pode tornar a física fácil de aprender?

    Quais são as melhores maneiras de aprender francês?

    Como posso aprender a falar inglês fluentemente?

    -

    Como éque posso aprender segurança informática?

    -

    Como éque posso começar a aprender segurança informática?

    +

    Como é que posso aprender segurança informática?

    +

    Como é que posso começar a aprender segurança informática?

    Como é que aprendo uma linguagem informática como o java?

    Qual é a alternativa à aprendizagem automática?

    Como criar um novo Terminal e um novo shell no Linux usando programação C?

    diff --git a/localization/v2.4.x/site/pt/tutorials/image_similarity_search.json b/localization/v2.4.x/site/pt/tutorials/image_similarity_search.json index 51153da01..8b570c363 100644 --- a/localization/v2.4.x/site/pt/tutorials/image_similarity_search.json +++ b/localization/v2.4.x/site/pt/tutorials/image_similarity_search.json @@ -1 +1 @@ -{"codeList":["!wget https://github.com/milvus-io/pymilvus-assets/releases/download/imagedata/reverse_image_search.zip\n!unzip -q -o reverse_image_search.zip\n","$ pip install pymilvus --upgrade\n$ pip install timm\n","import torch\nfrom PIL import Image\nimport timm\nfrom sklearn.preprocessing import normalize\nfrom timm.data import resolve_data_config\nfrom timm.data.transforms_factory import create_transform\n\n\nclass FeatureExtractor:\n def __init__(self, modelname):\n # Load the pre-trained model\n self.model = timm.create_model(\n modelname, pretrained=True, num_classes=0, global_pool=\"avg\"\n )\n self.model.eval()\n\n # Get the input size required by the model\n self.input_size = self.model.default_cfg[\"input_size\"]\n\n config = resolve_data_config({}, model=modelname)\n # Get the preprocessing function provided by TIMM for the model\n self.preprocess = create_transform(**config)\n\n def __call__(self, imagepath):\n # Preprocess the input image\n input_image = Image.open(imagepath).convert(\"RGB\") # Convert to RGB if needed\n input_image = self.preprocess(input_image)\n\n # Convert the image to a PyTorch tensor and add a batch dimension\n input_tensor = input_image.unsqueeze(0)\n\n # Perform inference\n with torch.no_grad():\n output = self.model(input_tensor)\n\n # Extract the feature vector\n feature_vector = output.squeeze().numpy()\n\n return normalize(feature_vector.reshape(1, -1), norm=\"l2\").flatten()\n","from pymilvus import MilvusClient\n\n# Set up a Milvus client\nclient = MilvusClient(uri=\"example.db\")\n# Create a collection in quick setup mode\nif client.has_collection(collection_name=\"image_embeddings\"):\n client.drop_collection(collection_name=\"image_embeddings\")\nclient.create_collection(\n collection_name=\"image_embeddings\",\n vector_field_name=\"vector\",\n dimension=512,\n auto_id=True,\n enable_dynamic_field=True,\n metric_type=\"COSINE\",\n)\n","import os\n\nextractor = FeatureExtractor(\"resnet34\")\n\nroot = \"./train\"\ninsert = True\nif insert is True:\n for dirpath, foldername, filenames in os.walk(root):\n for filename in filenames:\n if filename.endswith(\".JPEG\"):\n filepath = dirpath + \"/\" + filename\n image_embedding = extractor(filepath)\n client.insert(\n \"image_embeddings\",\n {\"vector\": image_embedding, \"filename\": filepath},\n )\n","from IPython.display import display\n\nquery_image = \"./test/Afghan_hound/n02088094_4261.JPEG\"\n\nresults = client.search(\n \"image_embeddings\",\n data=[extractor(query_image)],\n output_fields=[\"filename\"],\n search_params={\"metric_type\": \"COSINE\"},\n)\nimages = []\nfor result in results:\n for hit in result[:10]:\n filename = hit[\"entity\"][\"filename\"]\n img = Image.open(filename)\n img = img.resize((150, 150))\n images.append(img)\n\nwidth = 150 * 5\nheight = 150 * 2\nconcatenated_image = Image.new(\"RGB\", (width, height))\n\nfor idx, img in enumerate(images):\n x = idx % 5\n y = idx // 5\n concatenated_image.paste(img, (x * 150, y * 150))\ndisplay(\"query\")\ndisplay(Image.open(query_image).resize((150, 150)))\ndisplay(\"results\")\ndisplay(concatenated_image)\n"],"headingContent":"","anchorList":[{"label":"Pesquisa de imagens com o Milvus","href":"Image-Search-with-Milvus","type":1,"isActive":false},{"label":"Preparação do conjunto de dados","href":"Dataset-Preparation","type":2,"isActive":false},{"label":"Pré-requisitos","href":"Prequisites","type":2,"isActive":false},{"label":"Definir o Extrator de caraterísticas","href":"Define-the-Feature-Extractor","type":2,"isActive":false},{"label":"Criar uma coleção Milvus","href":"Create-a-Milvus-Collection","type":2,"isActive":false},{"label":"Inserir os embeddings no Milvus","href":"Insert-the-Embeddings-to-Milvus","type":2,"isActive":false},{"label":"Implementação rápida","href":"Quick-Deploy","type":2,"isActive":false}]} \ No newline at end of file +{"codeList":["!wget https://github.com/milvus-io/pymilvus-assets/releases/download/imagedata/reverse_image_search.zip\n!unzip -q -o reverse_image_search.zip\n","$ pip install pymilvus --upgrade\n$ pip install timm\n","import torch\nfrom PIL import Image\nimport timm\nfrom sklearn.preprocessing import normalize\nfrom timm.data import resolve_data_config\nfrom timm.data.transforms_factory import create_transform\n\n\nclass FeatureExtractor:\n def __init__(self, modelname):\n # Load the pre-trained model\n self.model = timm.create_model(\n modelname, pretrained=True, num_classes=0, global_pool=\"avg\"\n )\n self.model.eval()\n\n # Get the input size required by the model\n self.input_size = self.model.default_cfg[\"input_size\"]\n\n config = resolve_data_config({}, model=modelname)\n # Get the preprocessing function provided by TIMM for the model\n self.preprocess = create_transform(**config)\n\n def __call__(self, imagepath):\n # Preprocess the input image\n input_image = Image.open(imagepath).convert(\"RGB\") # Convert to RGB if needed\n input_image = self.preprocess(input_image)\n\n # Convert the image to a PyTorch tensor and add a batch dimension\n input_tensor = input_image.unsqueeze(0)\n\n # Perform inference\n with torch.no_grad():\n output = self.model(input_tensor)\n\n # Extract the feature vector\n feature_vector = output.squeeze().numpy()\n\n return normalize(feature_vector.reshape(1, -1), norm=\"l2\").flatten()\n","from pymilvus import MilvusClient\n\n# Set up a Milvus client\nclient = MilvusClient(uri=\"example.db\")\n# Create a collection in quick setup mode\nif client.has_collection(collection_name=\"image_embeddings\"):\n client.drop_collection(collection_name=\"image_embeddings\")\nclient.create_collection(\n collection_name=\"image_embeddings\",\n vector_field_name=\"vector\",\n dimension=512,\n auto_id=True,\n enable_dynamic_field=True,\n metric_type=\"COSINE\",\n)\n","import os\n\nextractor = FeatureExtractor(\"resnet34\")\n\nroot = \"./train\"\ninsert = True\nif insert is True:\n for dirpath, foldername, filenames in os.walk(root):\n for filename in filenames:\n if filename.endswith(\".JPEG\"):\n filepath = dirpath + \"/\" + filename\n image_embedding = extractor(filepath)\n client.insert(\n \"image_embeddings\",\n {\"vector\": image_embedding, \"filename\": filepath},\n )\n","from IPython.display import display\n\nquery_image = \"./test/Afghan_hound/n02088094_4261.JPEG\"\n\nresults = client.search(\n \"image_embeddings\",\n data=[extractor(query_image)],\n output_fields=[\"filename\"],\n search_params={\"metric_type\": \"COSINE\"},\n)\nimages = []\nfor result in results:\n for hit in result[:10]:\n filename = hit[\"entity\"][\"filename\"]\n img = Image.open(filename)\n img = img.resize((150, 150))\n images.append(img)\n\nwidth = 150 * 5\nheight = 150 * 2\nconcatenated_image = Image.new(\"RGB\", (width, height))\n\nfor idx, img in enumerate(images):\n x = idx % 5\n y = idx // 5\n concatenated_image.paste(img, (x * 150, y * 150))\ndisplay(\"query\")\ndisplay(Image.open(query_image).resize((150, 150)))\ndisplay(\"results\")\ndisplay(concatenated_image)\n"],"headingContent":"Image Search with Milvus","anchorList":[{"label":"Pesquisa de imagens com o Milvus","href":"Image-Search-with-Milvus","type":1,"isActive":false},{"label":"Preparação do conjunto de dados","href":"Dataset-Preparation","type":2,"isActive":false},{"label":"Pré-requisitos","href":"Prequisites","type":2,"isActive":false},{"label":"Definir o Extrator de caraterísticas","href":"Define-the-Feature-Extractor","type":2,"isActive":false},{"label":"Criar uma coleção Milvus","href":"Create-a-Milvus-Collection","type":2,"isActive":false},{"label":"Inserir os embeddings no Milvus","href":"Insert-the-Embeddings-to-Milvus","type":2,"isActive":false},{"label":"Implementação rápida","href":"Quick-Deploy","type":2,"isActive":false}]} \ No newline at end of file diff --git a/localization/v2.4.x/site/pt/tutorials/image_similarity_search.md b/localization/v2.4.x/site/pt/tutorials/image_similarity_search.md index 099abc904..44e4001fb 100644 --- a/localization/v2.4.x/site/pt/tutorials/image_similarity_search.md +++ b/localization/v2.4.x/site/pt/tutorials/image_similarity_search.md @@ -1,7 +1,7 @@ --- id: image_similarity_search.md summary: pesquisa de imagens com Milvus -title: Pesquisa de imagens com Milvus +title: Pesquisa de imagens com o Milvus ---

    Pesquisa de imagens com o Milvus

    Open In Colab

    +

    Open In Colab +GitHub Repository

    Neste bloco de notas, vamos mostrar-lhe como utilizar o Milvus para procurar imagens semelhantes num conjunto de dados. Para o demonstrar, utilizaremos um subconjunto do conjunto de dados ImageNet e procuraremos uma imagem de um cão afegão.

    Preparação do conjunto de dados

    Open In Colab

    +

    Open In Colab +GitHub Repository

    -

    Este tutorial apresenta o RAG multimodal com o Milvus, o modelo BGE visualizado e o GPT-4o. Com este sistema, os utilizadores podem carregar uma imagem e editar instruções de texto, que são processadas pelo modelo de recuperação composto da BGE para procurar imagens candidatas. O GPT-4o actua então como um reranker, selecionando a imagem mais adequada e fornecendo a lógica por detrás da escolha. Esta poderosa combinação permite uma experiência de pesquisa de imagens intuitiva e sem falhas, tirando partido do Milvus para uma recuperação eficiente, do modelo BGE para um processamento e correspondência precisos das imagens e do GPT-4o para uma classificação avançada.

    +

    Este tutorial apresenta o RAG multimodal com o Milvus, o modelo BGE visualizado e o GPT-4o. Com este sistema, os utilizadores podem carregar uma imagem e editar instruções de texto, que são processadas pelo modelo de recuperação composto da BGE para procurar imagens candidatas. O GPT-4o actua então como um reranker, selecionando a imagem mais adequada e fornecendo a lógica por detrás da escolha. Esta combinação poderosa permite uma experiência de pesquisa de imagens intuitiva e sem falhas, tirando partido do Milvus para uma recuperação eficiente, do modelo BGE para um processamento e correspondência precisos das imagens e do GPT-4o para uma classificação avançada.

    Preparação

    Versão MilvusVersão Python SDKVersão do Java SDKVersão do SDK do Node.js
    Versão do MilvusVersão Python SDKVersão do Java SDKVersão do SDK do Node.js
    2.4.42.4.32.4.12.4.2
    diff --git a/localization/v2.4.x/site/pt/userGuide/clustering-compaction.json b/localization/v2.4.x/site/pt/userGuide/clustering-compaction.json index 12a12024c..e43e265f0 100644 --- a/localization/v2.4.x/site/pt/userGuide/clustering-compaction.json +++ b/localization/v2.4.x/site/pt/userGuide/clustering-compaction.json @@ -1 +1 @@ -{"codeList":["dataCoord:\n compaction:\n clustering:\n enable: true \n autoEnable: false \n triggerInterval: 600 \n minInterval: 3600 \n maxInterval: 259200 \n newDataSizeThreshold: 512m \n timeout: 7200\n \nqueryNode:\n enableSegmentPrune: true \n\ndatanode:\n clusteringCompaction:\n memoryBufferRatio: 0.1 \n workPoolSize: 8 \ncommon:\n usePartitionKeyAsClusteringKey: true \n","default_fields = [\n FieldSchema(name=\"id\", dtype=DataType.INT64, is_primary=True),\n FieldSchema(name=\"key\", dtype=DataType.INT64, is_clustering_key=True),\n FieldSchema(name=\"var\", dtype=DataType.VARCHAR, max_length=1000, is_primary=False),\n FieldSchema(name=\"embeddings\", dtype=DataType.FLOAT_VECTOR, dim=dim)\n]\n\ndefault_schema = CollectionSchema(\n fields=default_fields, \n description=\"test clustering-key collection\"\n)\n\ncoll1 = Collection(name=\"clustering_test\", schema=default_schema)\n","coll1.compact(is_clustering=True)\ncoll1.get_compaction_state(is_clustering=True)\ncoll1.wait_for_compaction_completed(is_clustering=True)\n"],"headingContent":"","anchorList":[{"label":"Compactação de clusters","href":"Clustering-Compaction","type":1,"isActive":false},{"label":"Visão geral","href":"Overview","type":2,"isActive":false},{"label":"Usar compactação de clustering","href":"Use-Clustering-Compaction","type":2,"isActive":false},{"label":"Configuração da coleção","href":"Collection-Configuration","type":2,"isActive":false},{"label":"Acionar compactação de clustering","href":"Trigger-Clustering-Compaction","type":2,"isActive":false},{"label":"Melhores práticas","href":"Best-practices","type":2,"isActive":false}]} \ No newline at end of file +{"codeList":["dataCoord:\n compaction:\n clustering:\n enable: true \n autoEnable: false \n triggerInterval: 600 \n minInterval: 3600 \n maxInterval: 259200 \n newDataSizeThreshold: 512m \n timeout: 7200\n \nqueryNode:\n enableSegmentPrune: true \n\ndatanode:\n clusteringCompaction:\n memoryBufferRatio: 0.1 \n workPoolSize: 8 \ncommon:\n usePartitionKeyAsClusteringKey: true \n","default_fields = [\n FieldSchema(name=\"id\", dtype=DataType.INT64, is_primary=True),\n FieldSchema(name=\"key\", dtype=DataType.INT64, is_clustering_key=True),\n FieldSchema(name=\"var\", dtype=DataType.VARCHAR, max_length=1000, is_primary=False),\n FieldSchema(name=\"embeddings\", dtype=DataType.FLOAT_VECTOR, dim=dim)\n]\n\ndefault_schema = CollectionSchema(\n fields=default_fields, \n description=\"test clustering-key collection\"\n)\n\ncoll1 = Collection(name=\"clustering_test\", schema=default_schema)\n","coll1.compact(is_clustering=True)\ncoll1.get_compaction_state(is_clustering=True)\ncoll1.wait_for_compaction_completed(is_clustering=True)\n"],"headingContent":"Clustering Compaction","anchorList":[{"label":"Compactação de clusters","href":"Clustering-Compaction","type":1,"isActive":false},{"label":"Visão geral","href":"Overview","type":2,"isActive":false},{"label":"Usar compactação de clustering","href":"Use-Clustering-Compaction","type":2,"isActive":false},{"label":"Acionar compactação de clustering","href":"Trigger-Clustering-Compaction","type":2,"isActive":false},{"label":"Melhores práticas","href":"Best-practices","type":2,"isActive":false}]} \ No newline at end of file diff --git a/localization/v2.4.x/site/pt/userGuide/clustering-compaction.md b/localization/v2.4.x/site/pt/userGuide/clustering-compaction.md index 08b2cacb2..859809499 100644 --- a/localization/v2.4.x/site/pt/userGuide/clustering-compaction.md +++ b/localization/v2.4.x/site/pt/userGuide/clustering-compaction.md @@ -99,9 +99,9 @@ common: enableEspecifica se a compactação do agrupamento deve ser activada.
    Defina esta opção para true se precisar de ativar esta funcionalidade para todas as colecções que tenham uma chave de agrupamento.false autoEnableEspecifica se a compactação é activada automaticamente.
    Definir isto para true indica que o Milvus compacta as colecções com uma chave de agrupamento nos intervalos especificados.false triggerIntervalEspecifica o intervalo, em milissegundos, em que o Milvus inicia a compactação do agrupamento.
    Este parâmetro só é válido se autoEnable estiver definido como true.- -minIntervalEspecifica o intervalo mínimo em milissegundos.
    Este parâmetro é válido apenas quando autoEnable está definido como true.
    A definição deste parâmetro como um número inteiro superior a triggerInterval ajuda a evitar compactações repetidas num curto período.- -maxIntervalEspecifica o intervalo máximo em milissegundos.
    Este parâmetro é válido apenas quando autoEnable está definido como true.
    Quando o Milvus detecta que uma coleção não foi compactada por um período superior a este valor, força uma compactação por clustering.- -newDataSizeThresholdEspecifica o limite superior para desencadear uma compactação de agrupamento.
    Este parâmetro só é válido se autoEnable estiver definido como true.
    Quando o Milvus detecta que o volume de dados de uma coleção excede este valor, inicia um processo de compactação em cluster.- +minIntervalEspecifica o intervalo mínimo em segundos.
    Este parâmetro é válido apenas quando autoEnable está definido como true.
    Definir este parâmetro como um número inteiro superior a triggerInterval ajuda a evitar compactações repetidas num curto período de tempo.- +maxIntervalEspecifica o intervalo máximo em segundos.
    Este parâmetro só é válido se autoEnable estiver definido como true.
    Quando o Milvus detecta que uma coleção não foi compactada por um período superior a este valor, força uma compactação por clustering.- +newDataSizeThresholdEspecifica o limite superior para desencadear uma compactação de agrupamento.
    Este parâmetro só é válido quando autoEnable está definido como true.
    Quando o Milvus detecta que o volume de dados de uma coleção excede este valor, inicia um processo de compactação em cluster.- timeoutEspecifica a duração do timeout para uma compactação de clustering.
    Uma compactação de agrupamento falha se o tempo de execução exceder esse valor.- @@ -139,22 +139,7 @@ common:
  • Para aplicar as alterações acima ao seu cluster Milvus, siga os passos em Configurar Milvus com Helm e Configurar Milvus com Milvus Operators.

    -

    Configuração da coleção

    Para compactar o cluster numa coleção específica, deve selecionar um campo escalar da coleção como chave de cluster.

    +

    Configuração da coleção

    Para compactar o cluster numa coleção específica, deve selecionar um campo escalar da coleção como chave de cluster.

    default_fields = [
         FieldSchema(name="id", dtype=DataType.INT64, is_primary=True),
         FieldSchema(name="key", dtype=DataType.INT64, is_clustering_key=True),
    diff --git a/localization/v2.4.x/site/pt/userGuide/insert-update-delete.json b/localization/v2.4.x/site/pt/userGuide/insert-update-delete.json
    index b0ba0ae93..9f7e962d9 100644
    --- a/localization/v2.4.x/site/pt/userGuide/insert-update-delete.json
    +++ b/localization/v2.4.x/site/pt/userGuide/insert-update-delete.json
    @@ -1 +1 @@
    -{"codeList":["from pymilvus import MilvusClient\n\n# 1. Set up a Milvus client\nclient = MilvusClient(\n    uri=\"http://localhost:19530\"\n)\n\n# 2. Create a collection\nclient.create_collection(\n    collection_name=\"quick_setup\",\n    dimension=5,\n    metric_type=\"IP\"\n)\n","import io.milvus.v2.client.ConnectConfig;\nimport io.milvus.v2.client.MilvusClientV2;\nimport io.milvus.v2.service.collection.request.CreateCollectionReq;\n\nString CLUSTER_ENDPOINT = \"http://localhost:19530\";\n\n// 1. Connect to Milvus server\nConnectConfig connectConfig = ConnectConfig.builder()\n    .uri(CLUSTER_ENDPOINT)\n    .build();\n\nMilvusClientV2 client = new MilvusClientV2(connectConfig);\n\n// 2. Create a collection in quick setup mode\nCreateCollectionReq quickSetupReq = CreateCollectionReq.builder()\n    .collectionName(\"quick_setup\")\n    .dimension(5)\n    .metricType(\"IP\")\n    .build();\n\nclient.createCollection(quickSetupReq);\n","const { MilvusClient, DataType, sleep } = require(\"@zilliz/milvus2-sdk-node\")\n\nconst address = \"http://localhost:19530\"\n\n// 1. Set up a Milvus Client\nclient = new MilvusClient({address});\n\n// 2. Create a collection in quick setup mode\nawait client.createCollection({\n    collection_name: \"quick_setup\",\n    dimension: 5,\n    metric_type: \"IP\"\n});  \n","# 3. Insert some data\ndata=[\n    {\"id\": 0, \"vector\": [0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592], \"color\": \"pink_8682\"},\n    {\"id\": 1, \"vector\": [0.19886812562848388, 0.06023560599112088, 0.6976963061752597, 0.2614474506242501, 0.838729485096104], \"color\": \"red_7025\"},\n    {\"id\": 2, \"vector\": [0.43742130801983836, -0.5597502546264526, 0.6457887650909682, 0.7894058910881185, 0.20785793220625592], \"color\": \"orange_6781\"},\n    {\"id\": 3, \"vector\": [0.3172005263489739, 0.9719044792798428, -0.36981146090600725, -0.4860894583077995, 0.95791889146345], \"color\": \"pink_9298\"},\n    {\"id\": 4, \"vector\": [0.4452349528804562, -0.8757026943054742, 0.8220779437047674, 0.46406290649483184, 0.30337481143159106], \"color\": \"red_4794\"},\n    {\"id\": 5, \"vector\": [0.985825131989184, -0.8144651566660419, 0.6299267002202009, 0.1206906911183383, -0.1446277761879955], \"color\": \"yellow_4222\"},\n    {\"id\": 6, \"vector\": [0.8371977790571115, -0.015764369584852833, -0.31062937026679327, -0.562666951622192, -0.8984947637863987], \"color\": \"red_9392\"},\n    {\"id\": 7, \"vector\": [-0.33445148015177995, -0.2567135004164067, 0.8987539745369246, 0.9402995886420709, 0.5378064918413052], \"color\": \"grey_8510\"},\n    {\"id\": 8, \"vector\": [0.39524717779832685, 0.4000257286739164, -0.5890507376891594, -0.8650502298996872, -0.6140360785406336], \"color\": \"white_9381\"},\n    {\"id\": 9, \"vector\": [0.5718280481994695, 0.24070317428066512, -0.3737913482606834, -0.06726932177492717, -0.6980531615588608], \"color\": \"purple_4976\"}\n]\n\nres = client.insert(\n    collection_name=\"quick_setup\",\n    data=data\n)\n\nprint(res)\n\n# Output\n#\n# {\n#     \"insert_count\": 10,\n#     \"ids\": [\n#         0,\n#         1,\n#         2,\n#         3,\n#         4,\n#         5,\n#         6,\n#         7,\n#         8,\n#         9\n#     ]\n# }\n","import java.util.Arrays;\nimport java.util.List;\nimport java.util.Map;\nimport io.milvus.v2.service.vector.request.InsertReq;\nimport io.milvus.v2.service.vector.response.InsertResp;\n\n// 3. Insert some data\nList data = Arrays.asList(\n    new JSONObject(Map.of(\"id\", 0L, \"vector\", Arrays.asList(0.3580376395471989f, -0.6023495712049978f, 0.18414012509913835f, -0.26286205330961354f, 0.9029438446296592f), \"color\", \"pink_8682\")),\n    new JSONObject(Map.of(\"id\", 1L, \"vector\", Arrays.asList(0.19886812562848388f, 0.06023560599112088f, 0.6976963061752597f, 0.2614474506242501f, 0.838729485096104f), \"color\", \"red_7025\")),\n    new JSONObject(Map.of(\"id\", 2L, \"vector\", Arrays.asList(0.43742130801983836f, -0.5597502546264526f, 0.6457887650909682f, 0.7894058910881185f, 0.20785793220625592f), \"color\", \"orange_6781\")),\n    new JSONObject(Map.of(\"id\", 3L, \"vector\", Arrays.asList(0.3172005263489739f, 0.9719044792798428f, -0.36981146090600725f, -0.4860894583077995f, 0.95791889146345f), \"color\", \"pink_9298\")),\n    new JSONObject(Map.of(\"id\", 4L, \"vector\", Arrays.asList(0.4452349528804562f, -0.8757026943054742f, 0.8220779437047674f, 0.46406290649483184f, 0.30337481143159106f), \"color\", \"red_4794\")),\n    new JSONObject(Map.of(\"id\", 5L, \"vector\", Arrays.asList(0.985825131989184f, -0.8144651566660419f, 0.6299267002202009f, 0.1206906911183383f, -0.1446277761879955f), \"color\", \"yellow_4222\")),\n    new JSONObject(Map.of(\"id\", 6L, \"vector\", Arrays.asList(0.8371977790571115f, -0.015764369584852833f, -0.31062937026679327f, -0.562666951622192f, -0.8984947637863987f), \"color\", \"red_9392\")),\n    new JSONObject(Map.of(\"id\", 7L, \"vector\", Arrays.asList(-0.33445148015177995f, -0.2567135004164067f, 0.8987539745369246f, 0.9402995886420709f, 0.5378064918413052f), \"color\", \"grey_8510\")),\n    new JSONObject(Map.of(\"id\", 8L, \"vector\", Arrays.asList(0.39524717779832685f, 0.4000257286739164f, -0.5890507376891594f, -0.8650502298996872f, -0.6140360785406336f), \"color\", \"white_9381\")),\n    new JSONObject(Map.of(\"id\", 9L, \"vector\", Arrays.asList(0.5718280481994695f, 0.24070317428066512f, -0.3737913482606834f, -0.06726932177492717f, -0.6980531615588608f), \"color\", \"purple_4976\"))\n);\n\nInsertReq insertReq = InsertReq.builder()\n    .collectionName(\"quick_setup\")\n    .data(data)\n    .build();\n\nInsertResp insertResp = client.insert(insertReq);\n\nSystem.out.println(JSONObject.toJSON(insertResp));\n\n// Output:\n// {\"insertCnt\": 10}\n","// 3. Insert some data\n\nvar data = [\n    {id: 0, vector: [0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592], color: \"pink_8682\"},\n    {id: 1, vector: [0.19886812562848388, 0.06023560599112088, 0.6976963061752597, 0.2614474506242501, 0.838729485096104], color: \"red_7025\"},\n    {id: 2, vector: [0.43742130801983836, -0.5597502546264526, 0.6457887650909682, 0.7894058910881185, 0.20785793220625592], color: \"orange_6781\"},\n    {id: 3, vector: [0.3172005263489739, 0.9719044792798428, -0.36981146090600725, -0.4860894583077995, 0.95791889146345], color: \"pink_9298\"},\n    {id: 4, vector: [0.4452349528804562, -0.8757026943054742, 0.8220779437047674, 0.46406290649483184, 0.30337481143159106], color: \"red_4794\"},\n    {id: 5, vector: [0.985825131989184, -0.8144651566660419, 0.6299267002202009, 0.1206906911183383, -0.1446277761879955], color: \"yellow_4222\"},\n    {id: 6, vector: [0.8371977790571115, -0.015764369584852833, -0.31062937026679327, -0.562666951622192, -0.8984947637863987], color: \"red_9392\"},\n    {id: 7, vector: [-0.33445148015177995, -0.2567135004164067, 0.8987539745369246, 0.9402995886420709, 0.5378064918413052], color: \"grey_8510\"},\n    {id: 8, vector: [0.39524717779832685, 0.4000257286739164, -0.5890507376891594, -0.8650502298996872, -0.6140360785406336], color: \"white_9381\"},\n    {id: 9, vector: [0.5718280481994695, 0.24070317428066512, -0.3737913482606834, -0.06726932177492717, -0.6980531615588608], color: \"purple_4976\"}        \n]\n\nvar res = await client.insert({\n    collection_name: \"quick_setup\",\n    data: data,\n})\n\nconsole.log(res.insert_cnt)\n\n// Output\n// \n// 10\n// \n","# 4. Insert some more data into a specific partition\ndata=[\n    {\"id\": 10, \"vector\": [-0.5570353903748935, -0.8997887893201304, -0.7123782431855732, -0.6298990746450119, 0.6699215060604258], \"color\": \"red_1202\"},\n    {\"id\": 11, \"vector\": [0.6319019033373907, 0.6821488267878275, 0.8552303045704168, 0.36929791364943054, -0.14152860714878068], \"color\": \"blue_4150\"},\n    {\"id\": 12, \"vector\": [0.9483947484855766, -0.32294203351925344, 0.9759290319978025, 0.8262982148666174, -0.8351194181285713], \"color\": \"orange_4590\"},\n    {\"id\": 13, \"vector\": [-0.5449109892498731, 0.043511240563786524, -0.25105249484790804, -0.012030655265886425, -0.0010987671273892108], \"color\": \"pink_9619\"},\n    {\"id\": 14, \"vector\": [0.6603339372951424, -0.10866551787442225, -0.9435597754324891, 0.8230244263466688, -0.7986720938400362], \"color\": \"orange_4863\"},\n    {\"id\": 15, \"vector\": [-0.8825129181091456, -0.9204557711667729, -0.935350065513425, 0.5484069690287079, 0.24448151140671204], \"color\": \"orange_7984\"},\n    {\"id\": 16, \"vector\": [0.6285586391568163, 0.5389064528263487, -0.3163366239905099, 0.22036279378888013, 0.15077052220816167], \"color\": \"blue_9010\"},\n    {\"id\": 17, \"vector\": [-0.20151825016059233, -0.905239387635804, 0.6749305353372479, -0.7324272081377843, -0.33007998971889263], \"color\": \"blue_4521\"},\n    {\"id\": 18, \"vector\": [0.2432286610792349, 0.01785636564206139, -0.651356982731391, -0.35848148851027895, -0.7387383128324057], \"color\": \"orange_2529\"},\n    {\"id\": 19, \"vector\": [0.055512329053363674, 0.7100266349039421, 0.4956956543575197, 0.24541352586717702, 0.4209030729923515], \"color\": \"red_9437\"}\n]\n\nclient.create_partition(\n    collection_name=\"quick_setup\",\n    partition_name=\"partitionA\"\n)\n\nres = client.insert(\n    collection_name=\"quick_setup\",\n    data=data,\n    partition_name=\"partitionA\"\n)\n\nprint(res)\n\n# Output\n#\n# {\n#     \"insert_count\": 10,\n#     \"ids\": [\n#         10,\n#         11,\n#         12,\n#         13,\n#         14,\n#         15,\n#         16,\n#         17,\n#         18,\n#         19\n#     ]\n# }\n","// 4. Insert some more data into a specific partition\ndata = Arrays.asList(\n    new JSONObject(Map.of(\"id\", 10L, \"vector\", Arrays.asList(-0.5570353903748935f, -0.8997887893201304f, -0.7123782431855732f, -0.6298990746450119f, 0.6699215060604258f), \"color\", \"red_1202\")),\n    new JSONObject(Map.of(\"id\", 11L, \"vector\", Arrays.asList(0.6319019033373907f, 0.6821488267878275f, 0.8552303045704168f, 0.36929791364943054f, -0.14152860714878068f), \"color\", \"blue_4150\")),\n    new JSONObject(Map.of(\"id\", 12L, \"vector\", Arrays.asList(0.9483947484855766f, -0.32294203351925344f, 0.9759290319978025f, 0.8262982148666174f, -0.8351194181285713f), \"color\", \"orange_4590\")),\n    new JSONObject(Map.of(\"id\", 13L, \"vector\", Arrays.asList(-0.5449109892498731f, 0.043511240563786524f, -0.25105249484790804f, -0.012030655265886425f, -0.0010987671273892108f), \"color\", \"pink_9619\")),\n    new JSONObject(Map.of(\"id\", 14L, \"vector\", Arrays.asList(0.6603339372951424f, -0.10866551787442225f, -0.9435597754324891f, 0.8230244263466688f, -0.7986720938400362f), \"color\", \"orange_4863\")),\n    new JSONObject(Map.of(\"id\", 15L, \"vector\", Arrays.asList(-0.8825129181091456f, -0.9204557711667729f, -0.935350065513425f, 0.5484069690287079f, 0.24448151140671204f), \"color\", \"orange_7984\")),\n    new JSONObject(Map.of(\"id\", 16L, \"vector\", Arrays.asList(0.6285586391568163f, 0.5389064528263487f, -0.3163366239905099f, 0.22036279378888013f, 0.15077052220816167f), \"color\", \"blue_9010\")),\n    new JSONObject(Map.of(\"id\", 17L, \"vector\", Arrays.asList(-0.20151825016059233f, -0.905239387635804f, 0.6749305353372479f, -0.7324272081377843f, -0.33007998971889263f), \"color\", \"blue_4521\")),\n    new JSONObject(Map.of(\"id\", 18L, \"vector\", Arrays.asList(0.2432286610792349f, 0.01785636564206139f, -0.651356982731391f, -0.35848148851027895f, -0.7387383128324057f), \"color\", \"orange_2529\")),\n    new JSONObject(Map.of(\"id\", 19L, \"vector\", Arrays.asList(0.055512329053363674f, 0.7100266349039421f, 0.4956956543575197f, 0.24541352586717702f, 0.4209030729923515f), \"color\", \"red_9437\"))\n);\n\nCreatePartitionReq createPartitionReq = CreatePartitionReq.builder()\n    .collectionName(\"quick_setup\")\n    .partitionName(\"partitionA\")\n    .build();\n\nclient.createPartition(createPartitionReq);\n\ninsertReq = InsertReq.builder()\n    .collectionName(\"quick_setup\")\n    .data(data)\n    .partitionName(\"partitionA\")\n    .build();\n\ninsertResp = client.insert(insertReq);\n\nSystem.out.println(JSONObject.toJSON(insertResp));\n\n// Output:\n// {\"insertCnt\": 10}\n","// 4. Insert some more data into a specific partition\ndata = [\n    {id: 10, vector: [-0.5570353903748935, -0.8997887893201304, -0.7123782431855732, -0.6298990746450119, 0.6699215060604258], color: \"red_1202\"},\n    {id: 11, vector: [0.6319019033373907, 0.6821488267878275, 0.8552303045704168, 0.36929791364943054, -0.14152860714878068], color: \"blue_4150\"},\n    {id: 12, vector: [0.9483947484855766, -0.32294203351925344, 0.9759290319978025, 0.8262982148666174, -0.8351194181285713], color: \"orange_4590\"},\n    {id: 13, vector: [-0.5449109892498731, 0.043511240563786524, -0.25105249484790804, -0.012030655265886425, -0.0010987671273892108], color: \"pink_9619\"},\n    {id: 14, vector: [0.6603339372951424, -0.10866551787442225, -0.9435597754324891, 0.8230244263466688, -0.7986720938400362], color: \"orange_4863\"},\n    {id: 15, vector: [-0.8825129181091456, -0.9204557711667729, -0.935350065513425, 0.5484069690287079, 0.24448151140671204], color: \"orange_7984\"},\n    {id: 16, vector: [0.6285586391568163, 0.5389064528263487, -0.3163366239905099, 0.22036279378888013, 0.15077052220816167], color: \"blue_9010\"},\n    {id: 17, vector: [-0.20151825016059233, -0.905239387635804, 0.6749305353372479, -0.7324272081377843, -0.33007998971889263], color: \"blue_4521\"},\n    {id: 18, vector: [0.2432286610792349, 0.01785636564206139, -0.651356982731391, -0.35848148851027895, -0.7387383128324057], color: \"orange_2529\"},\n    {id: 19, vector: [0.055512329053363674, 0.7100266349039421, 0.4956956543575197, 0.24541352586717702, 0.4209030729923515], color: \"red_9437\"}\n]\n\nawait client.createPartition({\n    collection_name: \"quick_setup\",\n    partition_name: \"partitionA\"\n})\n\nres = await client.insert({\n    collection_name: \"quick_setup\",\n    data: data,\n    partition_name: \"partitionA\"\n})\n\nconsole.log(res.insert_cnt)\n\n// Output\n// \n// 10\n// \n","# 5. Upsert some data\ndata=[\n    {\"id\": 0, \"vector\": [-0.619954382375778, 0.4479436794798608, -0.17493894838751745, -0.4248030059917294, -0.8648452746018911], \"color\": \"black_9898\"},\n    {\"id\": 1, \"vector\": [0.4762662251462588, -0.6942502138717026, -0.4490002642657902, -0.628696575798281, 0.9660395877041965], \"color\": \"red_7319\"},\n    {\"id\": 2, \"vector\": [-0.8864122635045097, 0.9260170474445351, 0.801326976181461, 0.6383943392381306, 0.7563037341572827], \"color\": \"white_6465\"},\n    {\"id\": 3, \"vector\": [0.14594326235891586, -0.3775407299900644, -0.3765479013078812, 0.20612075380355122, 0.4902678929632145], \"color\": \"orange_7580\"},\n    {\"id\": 4, \"vector\": [0.4548498669607359, -0.887610217681605, 0.5655081329910452, 0.19220509387904117, 0.016513983433433577], \"color\": \"red_3314\"},\n    {\"id\": 5, \"vector\": [0.11755001847051827, -0.7295149788999611, 0.2608115847524266, -0.1719167007897875, 0.7417611743754855], \"color\": \"black_9955\"},\n    {\"id\": 6, \"vector\": [0.9363032158314308, 0.030699901477745373, 0.8365910312319647, 0.7823840208444011, 0.2625222076909237], \"color\": \"yellow_2461\"},\n    {\"id\": 7, \"vector\": [0.0754823906014721, -0.6390658668265143, 0.5610517334334937, -0.8986261118798251, 0.9372056764266794], \"color\": \"white_5015\"},\n    {\"id\": 8, \"vector\": [-0.3038434006935904, 0.1279149203380523, 0.503958664270957, -0.2622661156746988, 0.7407627307791929], \"color\": \"purple_6414\"},\n    {\"id\": 9, \"vector\": [-0.7125086947677588, -0.8050968321012257, -0.32608864121785786, 0.3255654958645424, 0.26227968923834233], \"color\": \"brown_7231\"}\n]\n\nres = client.upsert(\n    collection_name='quick_setup',\n    data=data\n)\n\nprint(res)\n\n# Output\n#\n# {\n#     \"upsert_count\": 10\n# }\n","// 5. Upsert some data\ndata = Arrays.asList(\n    new JSONObject(Map.of(\"id\", 0L, \"vector\", Arrays.asList(-0.619954382375778f, 0.4479436794798608f, -0.17493894838751745f, -0.4248030059917294f, -0.8648452746018911f), \"color\", \"black_9898\")),\n    new JSONObject(Map.of(\"id\", 1L, \"vector\", Arrays.asList(0.4762662251462588f, -0.6942502138717026f, -0.4490002642657902f, -0.628696575798281f, 0.9660395877041965f), \"color\", \"red_7319\")),\n    new JSONObject(Map.of(\"id\", 2L, \"vector\", Arrays.asList(-0.8864122635045097f, 0.9260170474445351f, 0.801326976181461f, 0.6383943392381306f, 0.7563037341572827f), \"color\", \"white_6465\")),\n    new JSONObject(Map.of(\"id\", 3L, \"vector\", Arrays.asList(0.14594326235891586f, -0.3775407299900644f, -0.3765479013078812f, 0.20612075380355122f, 0.4902678929632145f), \"color\", \"orange_7580\")),\n    new JSONObject(Map.of(\"id\", 4L, \"vector\", Arrays.asList(0.4548498669607359f, -0.887610217681605f, 0.5655081329910452f, 0.19220509387904117f, 0.016513983433433577f), \"color\", \"red_3314\")),\n    new JSONObject(Map.of(\"id\", 5L, \"vector\", Arrays.asList(0.11755001847051827f, -0.7295149788999611f, 0.2608115847524266f, -0.1719167007897875f, 0.7417611743754855f), \"color\", \"black_9955\")),\n    new JSONObject(Map.of(\"id\", 6L, \"vector\", Arrays.asList(0.9363032158314308f, 0.030699901477745373f, 0.8365910312319647f, 0.7823840208444011f, 0.2625222076909237f), \"color\", \"yellow_2461\")),\n    new JSONObject(Map.of(\"id\", 7L, \"vector\", Arrays.asList(0.0754823906014721f, -0.6390658668265143f, 0.5610517334334937f, -0.8986261118798251f, 0.9372056764266794f), \"color\", \"white_5015\")),\n    new JSONObject(Map.of(\"id\", 8L, \"vector\", Arrays.asList(-0.3038434006935904f, 0.1279149203380523f, 0.503958664270957f, -0.2622661156746988f, 0.7407627307791929f), \"color\", \"purple_6414\")),\n    new JSONObject(Map.of(\"id\", 9L, \"vector\", Arrays.asList(-0.7125086947677588f, -0.8050968321012257f, -0.32608864121785786f, 0.3255654958645424f, 0.26227968923834233f), \"color\", \"brown_7231\"))\n);\n\nUpsertReq upsertReq = UpsertReq.builder()\n    .collectionName(\"quick_setup\")\n    .data(data)\n    .build();\n\nUpsertResp upsertResp = client.upsert(upsertReq);\n\nSystem.out.println(JSONObject.toJSON(upsertResp));\n\n// Output:\n// {\"upsertCnt\": 10}\n","// 5. Upsert some data\ndata = [\n    {id: 0, vector: [-0.619954382375778, 0.4479436794798608, -0.17493894838751745, -0.4248030059917294, -0.8648452746018911], color: \"black_9898\"},\n    {id: 1, vector: [0.4762662251462588, -0.6942502138717026, -0.4490002642657902, -0.628696575798281, 0.9660395877041965], color: \"red_7319\"},\n    {id: 2, vector: [-0.8864122635045097, 0.9260170474445351, 0.801326976181461, 0.6383943392381306, 0.7563037341572827], color: \"white_6465\"},\n    {id: 3, vector: [0.14594326235891586, -0.3775407299900644, -0.3765479013078812, 0.20612075380355122, 0.4902678929632145], color: \"orange_7580\"},\n    {id: 4, vector: [0.4548498669607359, -0.887610217681605, 0.5655081329910452, 0.19220509387904117, 0.016513983433433577], color: \"red_3314\"},\n    {id: 5, vector: [0.11755001847051827, -0.7295149788999611, 0.2608115847524266, -0.1719167007897875, 0.7417611743754855], color: \"black_9955\"},\n    {id: 6, vector: [0.9363032158314308, 0.030699901477745373, 0.8365910312319647, 0.7823840208444011, 0.2625222076909237], color: \"yellow_2461\"},\n    {id: 7, vector: [0.0754823906014721, -0.6390658668265143, 0.5610517334334937, -0.8986261118798251, 0.9372056764266794], color: \"white_5015\"},\n    {id: 8, vector: [-0.3038434006935904, 0.1279149203380523, 0.503958664270957, -0.2622661156746988, 0.7407627307791929], color: \"purple_6414\"},\n    {id: 9, vector: [-0.7125086947677588, -0.8050968321012257, -0.32608864121785786, 0.3255654958645424, 0.26227968923834233], color: \"brown_7231\"}\n]\n\nres = await client.upsert({\n    collection_name: \"quick_setup\",\n    data: data,\n})\n\nconsole.log(res.upsert_cnt)\n\n// Output\n// \n// 10\n// \n","# 6. Upsert data in partitions\ndata=[\n    {\"id\": 10, \"vector\": [0.06998888224297328, 0.8582816610326578, -0.9657938677934292, 0.6527905683627726, -0.8668460657158576], \"color\": \"black_3651\"},\n    {\"id\": 11, \"vector\": [0.6060703043917468, -0.3765080534566074, -0.7710758854987239, 0.36993888322346136, 0.5507513364206531], \"color\": \"grey_2049\"},\n    {\"id\": 12, \"vector\": [-0.9041813104515337, -0.9610546012461163, 0.20033003106083358, 0.11842506351635174, 0.8327356724591011], \"color\": \"blue_6168\"},\n    {\"id\": 13, \"vector\": [0.3202914977909075, -0.7279137773695252, -0.04747830871620273, 0.8266053056909548, 0.8277957187455489], \"color\": \"blue_1672\"},\n    {\"id\": 14, \"vector\": [0.2975811497890859, 0.2946936202691086, 0.5399463833894609, 0.8385334966677529, -0.4450543984655133], \"color\": \"pink_1601\"},\n    {\"id\": 15, \"vector\": [-0.04697464305600074, -0.08509022265734134, 0.9067184632552001, -0.2281912685064822, -0.9747503428652762], \"color\": \"yellow_9925\"},\n    {\"id\": 16, \"vector\": [-0.9363075919673911, -0.8153981031085669, 0.7943039120490902, -0.2093886809842529, 0.0771191335807897], \"color\": \"orange_9872\"},\n    {\"id\": 17, \"vector\": [-0.050451522820639916, 0.18931572752321935, 0.7522886192190488, -0.9071793089474034, 0.6032647330692296], \"color\": \"red_6450\"},\n    {\"id\": 18, \"vector\": [-0.9181544231141592, 0.6700755998126806, -0.014174674636136642, 0.6325780463623432, -0.49662222164032976], \"color\": \"purple_7392\"},\n    {\"id\": 19, \"vector\": [0.11426945899602536, 0.6089190684002581, -0.5842735738352236, 0.057050610092692855, -0.035163433018196244], \"color\": \"pink_4996\"}\n]\n\nres = client.upsert(\n    collection_name=\"quick_setup\",\n    data=data,\n    partition_name=\"partitionA\"\n)\n\nprint(res)\n\n# Output\n#\n# {\n#     \"upsert_count\": 10\n# }\n","import io.milvus.v2.service.vector.request.UpsertReq;\nimport io.milvus.v2.service.vector.response.UpsertResp;\n\n// 6. Upsert data in parition\n\ndata = Arrays.asList(\n    new JSONObject(Map.of(\"id\", 10L, \"vector\", Arrays.asList(0.06998888224297328f, 0.8582816610326578f, -0.9657938677934292f, 0.6527905683627726f, -0.8668460657158576f), \"color\", \"black_3651\")),\n    new JSONObject(Map.of(\"id\", 11L, \"vector\", Arrays.asList(0.6060703043917468f, -0.3765080534566074f, -0.7710758854987239f, 0.36993888322346136f, 0.5507513364206531f), \"color\", \"grey_2049\")),\n    new JSONObject(Map.of(\"id\", 12L, \"vector\", Arrays.asList(-0.9041813104515337f, -0.9610546012461163f, 0.20033003106083358f, 0.11842506351635174f, 0.8327356724591011f), \"color\", \"blue_6168\")),\n    new JSONObject(Map.of(\"id\", 13L, \"vector\", Arrays.asList(0.3202914977909075f, -0.7279137773695252f, -0.04747830871620273f, 0.8266053056909548f, 0.8277957187455489f), \"color\", \"blue_1672\")),\n    new JSONObject(Map.of(\"id\", 14L, \"vector\", Arrays.asList(0.2975811497890859f, 0.2946936202691086f, 0.5399463833894609f, 0.8385334966677529f, -0.4450543984655133f), \"color\", \"pink_1601\")),\n    new JSONObject(Map.of(\"id\", 15L, \"vector\", Arrays.asList(-0.04697464305600074f, -0.08509022265734134f, 0.9067184632552001f, -0.2281912685064822f, -0.9747503428652762f), \"color\", \"yellow_9925\")),\n    new JSONObject(Map.of(\"id\", 16L, \"vector\", Arrays.asList(-0.9363075919673911f, -0.8153981031085669f, 0.7943039120490902f, -0.2093886809842529f, 0.0771191335807897f), \"color\", \"orange_9872\")),\n    new JSONObject(Map.of(\"id\", 17L, \"vector\", Arrays.asList(-0.050451522820639916f, 0.18931572752321935f, 0.7522886192190488f, -0.9071793089474034f, 0.6032647330692296f), \"color\", \"red_6450\")),\n    new JSONObject(Map.of(\"id\", 18L, \"vector\", Arrays.asList(-0.9181544231141592f, 0.6700755998126806f, -0.014174674636136642f, 0.6325780463623432f, -0.49662222164032976f), \"color\", \"purple_7392\")),\n    new JSONObject(Map.of(\"id\", 19L, \"vector\", Arrays.asList(0.11426945899602536f, 0.6089190684002581f, -0.5842735738352236f, 0.057050610092692855f, -0.035163433018196244f), \"color\", \"pink_4996\"))\n);\n\nupsertReq = UpsertReq.builder()\n    .collectionName(\"quick_setup\")\n    .data(data)\n    .partitionName(\"partitionA\")\n    .build();\n\nupsertResp = client.upsert(upsertReq);\n\nSystem.out.println(JSONObject.toJSON(upsertResp));\n\n// Output:\n// {\"upsertCnt\": 10}\n","// 6. Upsert data in partitions\ndata = [\n    {id: 10, vector: [0.06998888224297328, 0.8582816610326578, -0.9657938677934292, 0.6527905683627726, -0.8668460657158576], color: \"black_3651\"},\n    {id: 11, vector: [0.6060703043917468, -0.3765080534566074, -0.7710758854987239, 0.36993888322346136, 0.5507513364206531], color: \"grey_2049\"},\n    {id: 12, vector: [-0.9041813104515337, -0.9610546012461163, 0.20033003106083358, 0.11842506351635174, 0.8327356724591011], color: \"blue_6168\"},\n    {id: 13, vector: [0.3202914977909075, -0.7279137773695252, -0.04747830871620273, 0.8266053056909548, 0.8277957187455489], color: \"blue_1672\"},\n    {id: 14, vector: [0.2975811497890859, 0.2946936202691086, 0.5399463833894609, 0.8385334966677529, -0.4450543984655133], color: \"pink_1601\"},\n    {id: 15, vector: [-0.04697464305600074, -0.08509022265734134, 0.9067184632552001, -0.2281912685064822, -0.9747503428652762], color: \"yellow_9925\"},\n    {id: 16, vector: [-0.9363075919673911, -0.8153981031085669, 0.7943039120490902, -0.2093886809842529, 0.0771191335807897], color: \"orange_9872\"},\n    {id: 17, vector: [-0.050451522820639916, 0.18931572752321935, 0.7522886192190488, -0.9071793089474034, 0.6032647330692296], color: \"red_6450\"},\n    {id: 18, vector: [-0.9181544231141592, 0.6700755998126806, -0.014174674636136642, 0.6325780463623432, -0.49662222164032976], color: \"purple_7392\"},\n    {id: 19, vector: [0.11426945899602536, 0.6089190684002581, -0.5842735738352236, 0.057050610092692855, -0.035163433018196244], color: \"pink_4996\"}\n]\n\nres = await client.upsert({\n    collection_name: \"quick_setup\",\n    data: data,\n    partition_name: \"partitionA\"\n})\n\nconsole.log(res.upsert_cnt)\n\n// Output\n// \n// 10\n// \n","# 7. Delete entities\nres = client.delete(\n    collection_name=\"quick_setup\",\n    filter=\"id in [4,5,6]\"\n)\n\nprint(res)\n\n# Output\n#\n# {\n#     \"delete_count\": 3\n# }\n","import io.milvus.v2.service.vector.request.DeleteReq;\nimport io.milvus.v2.service.vector.response.DeleteResp;\n\n\n// 7. Delete entities\n\nDeleteReq deleteReq = DeleteReq.builder()\n    .collectionName(\"quick_setup\")\n    .filter(\"id in [4, 5, 6]\")\n    .build();\n\nDeleteResp deleteResp = client.delete(deleteReq);\n\nSystem.out.println(JSONObject.toJSON(deleteResp));\n\n// Output:\n// {\"deleteCnt\": 3}\n","// 7. Delete entities\nres = await client.delete({\n    collection_name: \"quick_setup\",\n    filter: \"id in [4,5,6]\"\n})\n\nconsole.log(res.delete_cnt)\n\n// Output\n// \n// 3\n// \n","res = client.delete(\n    collection_name=\"quick_setup\",\n    ids=[18, 19],\n    partition_name=\"partitionA\"\n)\n\nprint(res)\n\n# Output\n#\n# {\n#     \"delete_count\": 2\n# }\n","deleteReq = DeleteReq.builder()\n    .collectionName(\"quick_setup\")\n    .ids(Arrays.asList(18L, 19L))\n    .partitionName(\"partitionA\")\n    .build();\n\ndeleteResp = client.delete(deleteReq);\n\nSystem.out.println(JSONObject.toJSON(deleteResp));\n\n// Output:\n// {\"deleteCnt\": 2}\n","res = await client.delete({\n    collection_name: \"quick_setup\",\n    ids: [18, 19],\n    partition_name: \"partitionA\"\n})\n\nconsole.log(res.delete_cnt)\n\n// Output\n// \n// 2\n// \n","res = client.delete(\ncollection_name='quick_setup',\npartition_name='partitionA',\nfilter='color like \"blue%\"'\n)\n\nprint(\"Entities deleted from partitionA: \", res['delete_count'])\n\n# Output:\n# Entities deleted from partitionA:  3\n","deleteReq = DeleteReq.builder()\n    .collectionName(\"quick_setup\")\n    .filter('color like \"blue%\"')\n    .partitionName(\"partitionA\")\n    .build();\n\ndeleteResp = client.delete(deleteReq);\n\nSystem.out.println(JSONObject.toJSON(deleteResp));\n\n// Output:\n// {\"deleteCnt\": 3}\n","res = await client.delete({\ncollection_name: \"quick_setup\",\npartition_name: \"partitionA\",\nfilter: 'color like \"blue%\"'\n})\n\nconsole.log(\"Entities deleted from partitionA: \" + res.delete_cnt)\n\n// Output:\n// Entities deleted from partitionA: 3\n"],"headingContent":"","anchorList":[{"label":"Inserção, inserção ascendente e eliminação","href":"Insert-Upsert--Delete","type":1,"isActive":false},{"label":"Antes de começar","href":"Before-you-start","type":2,"isActive":false},{"label":"Síntese","href":"Overview","type":2,"isActive":false},{"label":"Preparações","href":"Preparations","type":2,"isActive":false},{"label":"Inserir entidades","href":"Insert-entities","type":2,"isActive":false},{"label":"Upsert entidades","href":"Upsert-entities","type":2,"isActive":false},{"label":"Excluir entidades","href":"Delete-entities","type":2,"isActive":false}]}
    \ No newline at end of file
    +{"codeList":["from pymilvus import MilvusClient\n\n# 1. Set up a Milvus client\nclient = MilvusClient(\n    uri=\"http://localhost:19530\"\n)\n\n# 2. Create a collection\nclient.create_collection(\n    collection_name=\"quick_setup\",\n    dimension=5,\n    metric_type=\"IP\"\n)\n","import io.milvus.v2.client.ConnectConfig;\nimport io.milvus.v2.client.MilvusClientV2;\nimport io.milvus.v2.service.collection.request.CreateCollectionReq;\n\nString CLUSTER_ENDPOINT = \"http://localhost:19530\";\n\n// 1. Connect to Milvus server\nConnectConfig connectConfig = ConnectConfig.builder()\n    .uri(CLUSTER_ENDPOINT)\n    .build();\n\nMilvusClientV2 client = new MilvusClientV2(connectConfig);\n\n// 2. Create a collection in quick setup mode\nCreateCollectionReq quickSetupReq = CreateCollectionReq.builder()\n    .collectionName(\"quick_setup\")\n    .dimension(5)\n    .metricType(\"IP\")\n    .build();\n\nclient.createCollection(quickSetupReq);\n","const { MilvusClient, DataType, sleep } = require(\"@zilliz/milvus2-sdk-node\")\n\nconst address = \"http://localhost:19530\"\n\n// 1. Set up a Milvus Client\nclient = new MilvusClient({address});\n\n// 2. Create a collection in quick setup mode\nawait client.createCollection({\n    collection_name: \"quick_setup\",\n    dimension: 5,\n    metric_type: \"IP\"\n});  \n","# 3. Insert some data\ndata=[\n    {\"id\": 0, \"vector\": [0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592], \"color\": \"pink_8682\"},\n    {\"id\": 1, \"vector\": [0.19886812562848388, 0.06023560599112088, 0.6976963061752597, 0.2614474506242501, 0.838729485096104], \"color\": \"red_7025\"},\n    {\"id\": 2, \"vector\": [0.43742130801983836, -0.5597502546264526, 0.6457887650909682, 0.7894058910881185, 0.20785793220625592], \"color\": \"orange_6781\"},\n    {\"id\": 3, \"vector\": [0.3172005263489739, 0.9719044792798428, -0.36981146090600725, -0.4860894583077995, 0.95791889146345], \"color\": \"pink_9298\"},\n    {\"id\": 4, \"vector\": [0.4452349528804562, -0.8757026943054742, 0.8220779437047674, 0.46406290649483184, 0.30337481143159106], \"color\": \"red_4794\"},\n    {\"id\": 5, \"vector\": [0.985825131989184, -0.8144651566660419, 0.6299267002202009, 0.1206906911183383, -0.1446277761879955], \"color\": \"yellow_4222\"},\n    {\"id\": 6, \"vector\": [0.8371977790571115, -0.015764369584852833, -0.31062937026679327, -0.562666951622192, -0.8984947637863987], \"color\": \"red_9392\"},\n    {\"id\": 7, \"vector\": [-0.33445148015177995, -0.2567135004164067, 0.8987539745369246, 0.9402995886420709, 0.5378064918413052], \"color\": \"grey_8510\"},\n    {\"id\": 8, \"vector\": [0.39524717779832685, 0.4000257286739164, -0.5890507376891594, -0.8650502298996872, -0.6140360785406336], \"color\": \"white_9381\"},\n    {\"id\": 9, \"vector\": [0.5718280481994695, 0.24070317428066512, -0.3737913482606834, -0.06726932177492717, -0.6980531615588608], \"color\": \"purple_4976\"}\n]\n\nres = client.insert(\n    collection_name=\"quick_setup\",\n    data=data\n)\n\nprint(res)\n\n# Output\n#\n# {\n#     \"insert_count\": 10,\n#     \"ids\": [\n#         0,\n#         1,\n#         2,\n#         3,\n#         4,\n#         5,\n#         6,\n#         7,\n#         8,\n#         9\n#     ]\n# }\n","import java.util.Arrays;\nimport java.util.List;\nimport java.util.Map;\nimport io.milvus.v2.service.vector.request.InsertReq;\nimport io.milvus.v2.service.vector.response.InsertResp;\n\n// 3. Insert some data\nList data = Arrays.asList(\n    new JSONObject(Map.of(\"id\", 0L, \"vector\", Arrays.asList(0.3580376395471989f, -0.6023495712049978f, 0.18414012509913835f, -0.26286205330961354f, 0.9029438446296592f), \"color\", \"pink_8682\")),\n    new JSONObject(Map.of(\"id\", 1L, \"vector\", Arrays.asList(0.19886812562848388f, 0.06023560599112088f, 0.6976963061752597f, 0.2614474506242501f, 0.838729485096104f), \"color\", \"red_7025\")),\n    new JSONObject(Map.of(\"id\", 2L, \"vector\", Arrays.asList(0.43742130801983836f, -0.5597502546264526f, 0.6457887650909682f, 0.7894058910881185f, 0.20785793220625592f), \"color\", \"orange_6781\")),\n    new JSONObject(Map.of(\"id\", 3L, \"vector\", Arrays.asList(0.3172005263489739f, 0.9719044792798428f, -0.36981146090600725f, -0.4860894583077995f, 0.95791889146345f), \"color\", \"pink_9298\")),\n    new JSONObject(Map.of(\"id\", 4L, \"vector\", Arrays.asList(0.4452349528804562f, -0.8757026943054742f, 0.8220779437047674f, 0.46406290649483184f, 0.30337481143159106f), \"color\", \"red_4794\")),\n    new JSONObject(Map.of(\"id\", 5L, \"vector\", Arrays.asList(0.985825131989184f, -0.8144651566660419f, 0.6299267002202009f, 0.1206906911183383f, -0.1446277761879955f), \"color\", \"yellow_4222\")),\n    new JSONObject(Map.of(\"id\", 6L, \"vector\", Arrays.asList(0.8371977790571115f, -0.015764369584852833f, -0.31062937026679327f, -0.562666951622192f, -0.8984947637863987f), \"color\", \"red_9392\")),\n    new JSONObject(Map.of(\"id\", 7L, \"vector\", Arrays.asList(-0.33445148015177995f, -0.2567135004164067f, 0.8987539745369246f, 0.9402995886420709f, 0.5378064918413052f), \"color\", \"grey_8510\")),\n    new JSONObject(Map.of(\"id\", 8L, \"vector\", Arrays.asList(0.39524717779832685f, 0.4000257286739164f, -0.5890507376891594f, -0.8650502298996872f, -0.6140360785406336f), \"color\", \"white_9381\")),\n    new JSONObject(Map.of(\"id\", 9L, \"vector\", Arrays.asList(0.5718280481994695f, 0.24070317428066512f, -0.3737913482606834f, -0.06726932177492717f, -0.6980531615588608f), \"color\", \"purple_4976\"))\n);\n\nInsertReq insertReq = InsertReq.builder()\n    .collectionName(\"quick_setup\")\n    .data(data)\n    .build();\n\nInsertResp insertResp = client.insert(insertReq);\n\nSystem.out.println(JSONObject.toJSON(insertResp));\n\n// Output:\n// {\"insertCnt\": 10}\n","// 3. Insert some data\n\nvar data = [\n    {id: 0, vector: [0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592], color: \"pink_8682\"},\n    {id: 1, vector: [0.19886812562848388, 0.06023560599112088, 0.6976963061752597, 0.2614474506242501, 0.838729485096104], color: \"red_7025\"},\n    {id: 2, vector: [0.43742130801983836, -0.5597502546264526, 0.6457887650909682, 0.7894058910881185, 0.20785793220625592], color: \"orange_6781\"},\n    {id: 3, vector: [0.3172005263489739, 0.9719044792798428, -0.36981146090600725, -0.4860894583077995, 0.95791889146345], color: \"pink_9298\"},\n    {id: 4, vector: [0.4452349528804562, -0.8757026943054742, 0.8220779437047674, 0.46406290649483184, 0.30337481143159106], color: \"red_4794\"},\n    {id: 5, vector: [0.985825131989184, -0.8144651566660419, 0.6299267002202009, 0.1206906911183383, -0.1446277761879955], color: \"yellow_4222\"},\n    {id: 6, vector: [0.8371977790571115, -0.015764369584852833, -0.31062937026679327, -0.562666951622192, -0.8984947637863987], color: \"red_9392\"},\n    {id: 7, vector: [-0.33445148015177995, -0.2567135004164067, 0.8987539745369246, 0.9402995886420709, 0.5378064918413052], color: \"grey_8510\"},\n    {id: 8, vector: [0.39524717779832685, 0.4000257286739164, -0.5890507376891594, -0.8650502298996872, -0.6140360785406336], color: \"white_9381\"},\n    {id: 9, vector: [0.5718280481994695, 0.24070317428066512, -0.3737913482606834, -0.06726932177492717, -0.6980531615588608], color: \"purple_4976\"}        \n]\n\nvar res = await client.insert({\n    collection_name: \"quick_setup\",\n    data: data,\n})\n\nconsole.log(res.insert_cnt)\n\n// Output\n// \n// 10\n// \n","# 4. Insert some more data into a specific partition\ndata=[\n    {\"id\": 10, \"vector\": [-0.5570353903748935, -0.8997887893201304, -0.7123782431855732, -0.6298990746450119, 0.6699215060604258], \"color\": \"red_1202\"},\n    {\"id\": 11, \"vector\": [0.6319019033373907, 0.6821488267878275, 0.8552303045704168, 0.36929791364943054, -0.14152860714878068], \"color\": \"blue_4150\"},\n    {\"id\": 12, \"vector\": [0.9483947484855766, -0.32294203351925344, 0.9759290319978025, 0.8262982148666174, -0.8351194181285713], \"color\": \"orange_4590\"},\n    {\"id\": 13, \"vector\": [-0.5449109892498731, 0.043511240563786524, -0.25105249484790804, -0.012030655265886425, -0.0010987671273892108], \"color\": \"pink_9619\"},\n    {\"id\": 14, \"vector\": [0.6603339372951424, -0.10866551787442225, -0.9435597754324891, 0.8230244263466688, -0.7986720938400362], \"color\": \"orange_4863\"},\n    {\"id\": 15, \"vector\": [-0.8825129181091456, -0.9204557711667729, -0.935350065513425, 0.5484069690287079, 0.24448151140671204], \"color\": \"orange_7984\"},\n    {\"id\": 16, \"vector\": [0.6285586391568163, 0.5389064528263487, -0.3163366239905099, 0.22036279378888013, 0.15077052220816167], \"color\": \"blue_9010\"},\n    {\"id\": 17, \"vector\": [-0.20151825016059233, -0.905239387635804, 0.6749305353372479, -0.7324272081377843, -0.33007998971889263], \"color\": \"blue_4521\"},\n    {\"id\": 18, \"vector\": [0.2432286610792349, 0.01785636564206139, -0.651356982731391, -0.35848148851027895, -0.7387383128324057], \"color\": \"orange_2529\"},\n    {\"id\": 19, \"vector\": [0.055512329053363674, 0.7100266349039421, 0.4956956543575197, 0.24541352586717702, 0.4209030729923515], \"color\": \"red_9437\"}\n]\n\nclient.create_partition(\n    collection_name=\"quick_setup\",\n    partition_name=\"partitionA\"\n)\n\nres = client.insert(\n    collection_name=\"quick_setup\",\n    data=data,\n    partition_name=\"partitionA\"\n)\n\nprint(res)\n\n# Output\n#\n# {\n#     \"insert_count\": 10,\n#     \"ids\": [\n#         10,\n#         11,\n#         12,\n#         13,\n#         14,\n#         15,\n#         16,\n#         17,\n#         18,\n#         19\n#     ]\n# }\n","// 4. Insert some more data into a specific partition\ndata = Arrays.asList(\n    new JSONObject(Map.of(\"id\", 10L, \"vector\", Arrays.asList(-0.5570353903748935f, -0.8997887893201304f, -0.7123782431855732f, -0.6298990746450119f, 0.6699215060604258f), \"color\", \"red_1202\")),\n    new JSONObject(Map.of(\"id\", 11L, \"vector\", Arrays.asList(0.6319019033373907f, 0.6821488267878275f, 0.8552303045704168f, 0.36929791364943054f, -0.14152860714878068f), \"color\", \"blue_4150\")),\n    new JSONObject(Map.of(\"id\", 12L, \"vector\", Arrays.asList(0.9483947484855766f, -0.32294203351925344f, 0.9759290319978025f, 0.8262982148666174f, -0.8351194181285713f), \"color\", \"orange_4590\")),\n    new JSONObject(Map.of(\"id\", 13L, \"vector\", Arrays.asList(-0.5449109892498731f, 0.043511240563786524f, -0.25105249484790804f, -0.012030655265886425f, -0.0010987671273892108f), \"color\", \"pink_9619\")),\n    new JSONObject(Map.of(\"id\", 14L, \"vector\", Arrays.asList(0.6603339372951424f, -0.10866551787442225f, -0.9435597754324891f, 0.8230244263466688f, -0.7986720938400362f), \"color\", \"orange_4863\")),\n    new JSONObject(Map.of(\"id\", 15L, \"vector\", Arrays.asList(-0.8825129181091456f, -0.9204557711667729f, -0.935350065513425f, 0.5484069690287079f, 0.24448151140671204f), \"color\", \"orange_7984\")),\n    new JSONObject(Map.of(\"id\", 16L, \"vector\", Arrays.asList(0.6285586391568163f, 0.5389064528263487f, -0.3163366239905099f, 0.22036279378888013f, 0.15077052220816167f), \"color\", \"blue_9010\")),\n    new JSONObject(Map.of(\"id\", 17L, \"vector\", Arrays.asList(-0.20151825016059233f, -0.905239387635804f, 0.6749305353372479f, -0.7324272081377843f, -0.33007998971889263f), \"color\", \"blue_4521\")),\n    new JSONObject(Map.of(\"id\", 18L, \"vector\", Arrays.asList(0.2432286610792349f, 0.01785636564206139f, -0.651356982731391f, -0.35848148851027895f, -0.7387383128324057f), \"color\", \"orange_2529\")),\n    new JSONObject(Map.of(\"id\", 19L, \"vector\", Arrays.asList(0.055512329053363674f, 0.7100266349039421f, 0.4956956543575197f, 0.24541352586717702f, 0.4209030729923515f), \"color\", \"red_9437\"))\n);\n\nCreatePartitionReq createPartitionReq = CreatePartitionReq.builder()\n    .collectionName(\"quick_setup\")\n    .partitionName(\"partitionA\")\n    .build();\n\nclient.createPartition(createPartitionReq);\n\ninsertReq = InsertReq.builder()\n    .collectionName(\"quick_setup\")\n    .data(data)\n    .partitionName(\"partitionA\")\n    .build();\n\ninsertResp = client.insert(insertReq);\n\nSystem.out.println(JSONObject.toJSON(insertResp));\n\n// Output:\n// {\"insertCnt\": 10}\n","// 4. Insert some more data into a specific partition\ndata = [\n    {id: 10, vector: [-0.5570353903748935, -0.8997887893201304, -0.7123782431855732, -0.6298990746450119, 0.6699215060604258], color: \"red_1202\"},\n    {id: 11, vector: [0.6319019033373907, 0.6821488267878275, 0.8552303045704168, 0.36929791364943054, -0.14152860714878068], color: \"blue_4150\"},\n    {id: 12, vector: [0.9483947484855766, -0.32294203351925344, 0.9759290319978025, 0.8262982148666174, -0.8351194181285713], color: \"orange_4590\"},\n    {id: 13, vector: [-0.5449109892498731, 0.043511240563786524, -0.25105249484790804, -0.012030655265886425, -0.0010987671273892108], color: \"pink_9619\"},\n    {id: 14, vector: [0.6603339372951424, -0.10866551787442225, -0.9435597754324891, 0.8230244263466688, -0.7986720938400362], color: \"orange_4863\"},\n    {id: 15, vector: [-0.8825129181091456, -0.9204557711667729, -0.935350065513425, 0.5484069690287079, 0.24448151140671204], color: \"orange_7984\"},\n    {id: 16, vector: [0.6285586391568163, 0.5389064528263487, -0.3163366239905099, 0.22036279378888013, 0.15077052220816167], color: \"blue_9010\"},\n    {id: 17, vector: [-0.20151825016059233, -0.905239387635804, 0.6749305353372479, -0.7324272081377843, -0.33007998971889263], color: \"blue_4521\"},\n    {id: 18, vector: [0.2432286610792349, 0.01785636564206139, -0.651356982731391, -0.35848148851027895, -0.7387383128324057], color: \"orange_2529\"},\n    {id: 19, vector: [0.055512329053363674, 0.7100266349039421, 0.4956956543575197, 0.24541352586717702, 0.4209030729923515], color: \"red_9437\"}\n]\n\nawait client.createPartition({\n    collection_name: \"quick_setup\",\n    partition_name: \"partitionA\"\n})\n\nres = await client.insert({\n    collection_name: \"quick_setup\",\n    data: data,\n    partition_name: \"partitionA\"\n})\n\nconsole.log(res.insert_cnt)\n\n// Output\n// \n// 10\n// \n","# 5. Upsert some data\ndata=[\n    {\"id\": 0, \"vector\": [-0.619954382375778, 0.4479436794798608, -0.17493894838751745, -0.4248030059917294, -0.8648452746018911], \"color\": \"black_9898\"},\n    {\"id\": 1, \"vector\": [0.4762662251462588, -0.6942502138717026, -0.4490002642657902, -0.628696575798281, 0.9660395877041965], \"color\": \"red_7319\"},\n    {\"id\": 2, \"vector\": [-0.8864122635045097, 0.9260170474445351, 0.801326976181461, 0.6383943392381306, 0.7563037341572827], \"color\": \"white_6465\"},\n    {\"id\": 3, \"vector\": [0.14594326235891586, -0.3775407299900644, -0.3765479013078812, 0.20612075380355122, 0.4902678929632145], \"color\": \"orange_7580\"},\n    {\"id\": 4, \"vector\": [0.4548498669607359, -0.887610217681605, 0.5655081329910452, 0.19220509387904117, 0.016513983433433577], \"color\": \"red_3314\"},\n    {\"id\": 5, \"vector\": [0.11755001847051827, -0.7295149788999611, 0.2608115847524266, -0.1719167007897875, 0.7417611743754855], \"color\": \"black_9955\"},\n    {\"id\": 6, \"vector\": [0.9363032158314308, 0.030699901477745373, 0.8365910312319647, 0.7823840208444011, 0.2625222076909237], \"color\": \"yellow_2461\"},\n    {\"id\": 7, \"vector\": [0.0754823906014721, -0.6390658668265143, 0.5610517334334937, -0.8986261118798251, 0.9372056764266794], \"color\": \"white_5015\"},\n    {\"id\": 8, \"vector\": [-0.3038434006935904, 0.1279149203380523, 0.503958664270957, -0.2622661156746988, 0.7407627307791929], \"color\": \"purple_6414\"},\n    {\"id\": 9, \"vector\": [-0.7125086947677588, -0.8050968321012257, -0.32608864121785786, 0.3255654958645424, 0.26227968923834233], \"color\": \"brown_7231\"}\n]\n\nres = client.upsert(\n    collection_name='quick_setup',\n    data=data\n)\n\nprint(res)\n\n# Output\n#\n# {\n#     \"upsert_count\": 10\n# }\n","// 5. Upsert some data\ndata = Arrays.asList(\n    new JSONObject(Map.of(\"id\", 0L, \"vector\", Arrays.asList(-0.619954382375778f, 0.4479436794798608f, -0.17493894838751745f, -0.4248030059917294f, -0.8648452746018911f), \"color\", \"black_9898\")),\n    new JSONObject(Map.of(\"id\", 1L, \"vector\", Arrays.asList(0.4762662251462588f, -0.6942502138717026f, -0.4490002642657902f, -0.628696575798281f, 0.9660395877041965f), \"color\", \"red_7319\")),\n    new JSONObject(Map.of(\"id\", 2L, \"vector\", Arrays.asList(-0.8864122635045097f, 0.9260170474445351f, 0.801326976181461f, 0.6383943392381306f, 0.7563037341572827f), \"color\", \"white_6465\")),\n    new JSONObject(Map.of(\"id\", 3L, \"vector\", Arrays.asList(0.14594326235891586f, -0.3775407299900644f, -0.3765479013078812f, 0.20612075380355122f, 0.4902678929632145f), \"color\", \"orange_7580\")),\n    new JSONObject(Map.of(\"id\", 4L, \"vector\", Arrays.asList(0.4548498669607359f, -0.887610217681605f, 0.5655081329910452f, 0.19220509387904117f, 0.016513983433433577f), \"color\", \"red_3314\")),\n    new JSONObject(Map.of(\"id\", 5L, \"vector\", Arrays.asList(0.11755001847051827f, -0.7295149788999611f, 0.2608115847524266f, -0.1719167007897875f, 0.7417611743754855f), \"color\", \"black_9955\")),\n    new JSONObject(Map.of(\"id\", 6L, \"vector\", Arrays.asList(0.9363032158314308f, 0.030699901477745373f, 0.8365910312319647f, 0.7823840208444011f, 0.2625222076909237f), \"color\", \"yellow_2461\")),\n    new JSONObject(Map.of(\"id\", 7L, \"vector\", Arrays.asList(0.0754823906014721f, -0.6390658668265143f, 0.5610517334334937f, -0.8986261118798251f, 0.9372056764266794f), \"color\", \"white_5015\")),\n    new JSONObject(Map.of(\"id\", 8L, \"vector\", Arrays.asList(-0.3038434006935904f, 0.1279149203380523f, 0.503958664270957f, -0.2622661156746988f, 0.7407627307791929f), \"color\", \"purple_6414\")),\n    new JSONObject(Map.of(\"id\", 9L, \"vector\", Arrays.asList(-0.7125086947677588f, -0.8050968321012257f, -0.32608864121785786f, 0.3255654958645424f, 0.26227968923834233f), \"color\", \"brown_7231\"))\n);\n\nUpsertReq upsertReq = UpsertReq.builder()\n    .collectionName(\"quick_setup\")\n    .data(data)\n    .build();\n\nUpsertResp upsertResp = client.upsert(upsertReq);\n\nSystem.out.println(JSONObject.toJSON(upsertResp));\n\n// Output:\n// {\"upsertCnt\": 10}\n","// 5. Upsert some data\ndata = [\n    {id: 0, vector: [-0.619954382375778, 0.4479436794798608, -0.17493894838751745, -0.4248030059917294, -0.8648452746018911], color: \"black_9898\"},\n    {id: 1, vector: [0.4762662251462588, -0.6942502138717026, -0.4490002642657902, -0.628696575798281, 0.9660395877041965], color: \"red_7319\"},\n    {id: 2, vector: [-0.8864122635045097, 0.9260170474445351, 0.801326976181461, 0.6383943392381306, 0.7563037341572827], color: \"white_6465\"},\n    {id: 3, vector: [0.14594326235891586, -0.3775407299900644, -0.3765479013078812, 0.20612075380355122, 0.4902678929632145], color: \"orange_7580\"},\n    {id: 4, vector: [0.4548498669607359, -0.887610217681605, 0.5655081329910452, 0.19220509387904117, 0.016513983433433577], color: \"red_3314\"},\n    {id: 5, vector: [0.11755001847051827, -0.7295149788999611, 0.2608115847524266, -0.1719167007897875, 0.7417611743754855], color: \"black_9955\"},\n    {id: 6, vector: [0.9363032158314308, 0.030699901477745373, 0.8365910312319647, 0.7823840208444011, 0.2625222076909237], color: \"yellow_2461\"},\n    {id: 7, vector: [0.0754823906014721, -0.6390658668265143, 0.5610517334334937, -0.8986261118798251, 0.9372056764266794], color: \"white_5015\"},\n    {id: 8, vector: [-0.3038434006935904, 0.1279149203380523, 0.503958664270957, -0.2622661156746988, 0.7407627307791929], color: \"purple_6414\"},\n    {id: 9, vector: [-0.7125086947677588, -0.8050968321012257, -0.32608864121785786, 0.3255654958645424, 0.26227968923834233], color: \"brown_7231\"}\n]\n\nres = await client.upsert({\n    collection_name: \"quick_setup\",\n    data: data,\n})\n\nconsole.log(res.upsert_cnt)\n\n// Output\n// \n// 10\n// \n","# 6. Upsert data in partitions\ndata=[\n    {\"id\": 10, \"vector\": [0.06998888224297328, 0.8582816610326578, -0.9657938677934292, 0.6527905683627726, -0.8668460657158576], \"color\": \"black_3651\"},\n    {\"id\": 11, \"vector\": [0.6060703043917468, -0.3765080534566074, -0.7710758854987239, 0.36993888322346136, 0.5507513364206531], \"color\": \"grey_2049\"},\n    {\"id\": 12, \"vector\": [-0.9041813104515337, -0.9610546012461163, 0.20033003106083358, 0.11842506351635174, 0.8327356724591011], \"color\": \"blue_6168\"},\n    {\"id\": 13, \"vector\": [0.3202914977909075, -0.7279137773695252, -0.04747830871620273, 0.8266053056909548, 0.8277957187455489], \"color\": \"blue_1672\"},\n    {\"id\": 14, \"vector\": [0.2975811497890859, 0.2946936202691086, 0.5399463833894609, 0.8385334966677529, -0.4450543984655133], \"color\": \"pink_1601\"},\n    {\"id\": 15, \"vector\": [-0.04697464305600074, -0.08509022265734134, 0.9067184632552001, -0.2281912685064822, -0.9747503428652762], \"color\": \"yellow_9925\"},\n    {\"id\": 16, \"vector\": [-0.9363075919673911, -0.8153981031085669, 0.7943039120490902, -0.2093886809842529, 0.0771191335807897], \"color\": \"orange_9872\"},\n    {\"id\": 17, \"vector\": [-0.050451522820639916, 0.18931572752321935, 0.7522886192190488, -0.9071793089474034, 0.6032647330692296], \"color\": \"red_6450\"},\n    {\"id\": 18, \"vector\": [-0.9181544231141592, 0.6700755998126806, -0.014174674636136642, 0.6325780463623432, -0.49662222164032976], \"color\": \"purple_7392\"},\n    {\"id\": 19, \"vector\": [0.11426945899602536, 0.6089190684002581, -0.5842735738352236, 0.057050610092692855, -0.035163433018196244], \"color\": \"pink_4996\"}\n]\n\nres = client.upsert(\n    collection_name=\"quick_setup\",\n    data=data,\n    partition_name=\"partitionA\"\n)\n\nprint(res)\n\n# Output\n#\n# {\n#     \"upsert_count\": 10\n# }\n","import io.milvus.v2.service.vector.request.UpsertReq;\nimport io.milvus.v2.service.vector.response.UpsertResp;\n\n// 6. Upsert data in parition\n\ndata = Arrays.asList(\n    new JSONObject(Map.of(\"id\", 10L, \"vector\", Arrays.asList(0.06998888224297328f, 0.8582816610326578f, -0.9657938677934292f, 0.6527905683627726f, -0.8668460657158576f), \"color\", \"black_3651\")),\n    new JSONObject(Map.of(\"id\", 11L, \"vector\", Arrays.asList(0.6060703043917468f, -0.3765080534566074f, -0.7710758854987239f, 0.36993888322346136f, 0.5507513364206531f), \"color\", \"grey_2049\")),\n    new JSONObject(Map.of(\"id\", 12L, \"vector\", Arrays.asList(-0.9041813104515337f, -0.9610546012461163f, 0.20033003106083358f, 0.11842506351635174f, 0.8327356724591011f), \"color\", \"blue_6168\")),\n    new JSONObject(Map.of(\"id\", 13L, \"vector\", Arrays.asList(0.3202914977909075f, -0.7279137773695252f, -0.04747830871620273f, 0.8266053056909548f, 0.8277957187455489f), \"color\", \"blue_1672\")),\n    new JSONObject(Map.of(\"id\", 14L, \"vector\", Arrays.asList(0.2975811497890859f, 0.2946936202691086f, 0.5399463833894609f, 0.8385334966677529f, -0.4450543984655133f), \"color\", \"pink_1601\")),\n    new JSONObject(Map.of(\"id\", 15L, \"vector\", Arrays.asList(-0.04697464305600074f, -0.08509022265734134f, 0.9067184632552001f, -0.2281912685064822f, -0.9747503428652762f), \"color\", \"yellow_9925\")),\n    new JSONObject(Map.of(\"id\", 16L, \"vector\", Arrays.asList(-0.9363075919673911f, -0.8153981031085669f, 0.7943039120490902f, -0.2093886809842529f, 0.0771191335807897f), \"color\", \"orange_9872\")),\n    new JSONObject(Map.of(\"id\", 17L, \"vector\", Arrays.asList(-0.050451522820639916f, 0.18931572752321935f, 0.7522886192190488f, -0.9071793089474034f, 0.6032647330692296f), \"color\", \"red_6450\")),\n    new JSONObject(Map.of(\"id\", 18L, \"vector\", Arrays.asList(-0.9181544231141592f, 0.6700755998126806f, -0.014174674636136642f, 0.6325780463623432f, -0.49662222164032976f), \"color\", \"purple_7392\")),\n    new JSONObject(Map.of(\"id\", 19L, \"vector\", Arrays.asList(0.11426945899602536f, 0.6089190684002581f, -0.5842735738352236f, 0.057050610092692855f, -0.035163433018196244f), \"color\", \"pink_4996\"))\n);\n\nupsertReq = UpsertReq.builder()\n    .collectionName(\"quick_setup\")\n    .data(data)\n    .partitionName(\"partitionA\")\n    .build();\n\nupsertResp = client.upsert(upsertReq);\n\nSystem.out.println(JSONObject.toJSON(upsertResp));\n\n// Output:\n// {\"upsertCnt\": 10}\n","// 6. Upsert data in partitions\ndata = [\n    {id: 10, vector: [0.06998888224297328, 0.8582816610326578, -0.9657938677934292, 0.6527905683627726, -0.8668460657158576], color: \"black_3651\"},\n    {id: 11, vector: [0.6060703043917468, -0.3765080534566074, -0.7710758854987239, 0.36993888322346136, 0.5507513364206531], color: \"grey_2049\"},\n    {id: 12, vector: [-0.9041813104515337, -0.9610546012461163, 0.20033003106083358, 0.11842506351635174, 0.8327356724591011], color: \"blue_6168\"},\n    {id: 13, vector: [0.3202914977909075, -0.7279137773695252, -0.04747830871620273, 0.8266053056909548, 0.8277957187455489], color: \"blue_1672\"},\n    {id: 14, vector: [0.2975811497890859, 0.2946936202691086, 0.5399463833894609, 0.8385334966677529, -0.4450543984655133], color: \"pink_1601\"},\n    {id: 15, vector: [-0.04697464305600074, -0.08509022265734134, 0.9067184632552001, -0.2281912685064822, -0.9747503428652762], color: \"yellow_9925\"},\n    {id: 16, vector: [-0.9363075919673911, -0.8153981031085669, 0.7943039120490902, -0.2093886809842529, 0.0771191335807897], color: \"orange_9872\"},\n    {id: 17, vector: [-0.050451522820639916, 0.18931572752321935, 0.7522886192190488, -0.9071793089474034, 0.6032647330692296], color: \"red_6450\"},\n    {id: 18, vector: [-0.9181544231141592, 0.6700755998126806, -0.014174674636136642, 0.6325780463623432, -0.49662222164032976], color: \"purple_7392\"},\n    {id: 19, vector: [0.11426945899602536, 0.6089190684002581, -0.5842735738352236, 0.057050610092692855, -0.035163433018196244], color: \"pink_4996\"}\n]\n\nres = await client.upsert({\n    collection_name: \"quick_setup\",\n    data: data,\n    partition_name: \"partitionA\"\n})\n\nconsole.log(res.upsert_cnt)\n\n// Output\n// \n// 10\n// \n","# 7. Delete entities\nres = client.delete(\n    collection_name=\"quick_setup\",\n    filter=\"id in [4,5,6]\"\n)\n\nprint(res)\n\n# Output\n#\n# {\n#     \"delete_count\": 3\n# }\n","import io.milvus.v2.service.vector.request.DeleteReq;\nimport io.milvus.v2.service.vector.response.DeleteResp;\n\n\n// 7. Delete entities\n\nDeleteReq deleteReq = DeleteReq.builder()\n    .collectionName(\"quick_setup\")\n    .filter(\"id in [4, 5, 6]\")\n    .build();\n\nDeleteResp deleteResp = client.delete(deleteReq);\n\nSystem.out.println(JSONObject.toJSON(deleteResp));\n\n// Output:\n// {\"deleteCnt\": 3}\n","// 7. Delete entities\nres = await client.delete({\n    collection_name: \"quick_setup\",\n    filter: \"id in [4,5,6]\"\n})\n\nconsole.log(res.delete_cnt)\n\n// Output\n// \n// 3\n// \n","res = client.delete(\n    collection_name=\"quick_setup\",\n    ids=[18, 19],\n    partition_name=\"partitionA\"\n)\n\nprint(res)\n\n# Output\n#\n# {\n#     \"delete_count\": 2\n# }\n","deleteReq = DeleteReq.builder()\n    .collectionName(\"quick_setup\")\n    .ids(Arrays.asList(18L, 19L))\n    .partitionName(\"partitionA\")\n    .build();\n\ndeleteResp = client.delete(deleteReq);\n\nSystem.out.println(JSONObject.toJSON(deleteResp));\n\n// Output:\n// {\"deleteCnt\": 2}\n","res = await client.delete({\n    collection_name: \"quick_setup\",\n    ids: [18, 19],\n    partition_name: \"partitionA\"\n})\n\nconsole.log(res.delete_cnt)\n\n// Output\n// \n// 2\n// \n","res = client.delete(\ncollection_name='quick_setup',\npartition_name='partitionA',\nfilter='color like \"blue%\"'\n)\n\nprint(\"Entities deleted from partitionA: \", res['delete_count'])\n\n# Output:\n# Entities deleted from partitionA:  3\n","deleteReq = DeleteReq.builder()\n    .collectionName(\"quick_setup\")\n    .filter('color like \"blue%\"')\n    .partitionName(\"partitionA\")\n    .build();\n\ndeleteResp = client.delete(deleteReq);\n\nSystem.out.println(JSONObject.toJSON(deleteResp));\n\n// Output:\n// {\"deleteCnt\": 3}\n","res = await client.delete({\ncollection_name: \"quick_setup\",\npartition_name: \"partitionA\",\nfilter: 'color like \"blue%\"'\n})\n\nconsole.log(\"Entities deleted from partitionA: \" + res.delete_cnt)\n\n// Output:\n// Entities deleted from partitionA: 3\n"],"headingContent":"Insert, Upsert & Delete","anchorList":[{"label":"Inserção, inserção ascendente e eliminação","href":"Insert-Upsert--Delete","type":1,"isActive":false},{"label":"Antes de começar","href":"Before-you-start","type":2,"isActive":false},{"label":"Síntese","href":"Overview","type":2,"isActive":false},{"label":"Preparações","href":"Preparations","type":2,"isActive":false},{"label":"Inserir entidades","href":"Insert-entities","type":2,"isActive":false},{"label":"Upsert entidades","href":"Upsert-entities","type":2,"isActive":false},{"label":"Excluir entidades","href":"Delete-entities","type":2,"isActive":false}]}
    \ No newline at end of file
    diff --git a/localization/v2.4.x/site/pt/userGuide/insert-update-delete.md b/localization/v2.4.x/site/pt/userGuide/insert-update-delete.md
    index a3e6bbb15..81c21b525 100644
    --- a/localization/v2.4.x/site/pt/userGuide/insert-update-delete.md
    +++ b/localization/v2.4.x/site/pt/userGuide/insert-update-delete.md
    @@ -3,7 +3,7 @@ id: insert-update-delete.md
     summary: >-
       Este guia orienta-o nas operações de manipulação de dados numa coleção,
       incluindo a inserção, a inserção ascendente e a eliminação.
    -title: 'Inserir, Upsert e Apagar'
    +title: 'Inserção, inserção ascendente e eliminação'
     ---
     

    Inserção, inserção ascendente e eliminação

    Pode criar uma coleção de uma das seguintes formas:

    • Configuração rápida

      -

      Desta forma, pode criar uma coleção dando-lhe simplesmente um nome e especificando o número de dimensões das incorporações vectoriais a armazenar nesta coleção. Para obter detalhes, consulte Configuração rápida.

    • +

      Desta forma, pode criar uma coleção dando-lhe simplesmente um nome e especificando o número de dimensões dos embeddings de vetor a armazenar nesta coleção. Para obter detalhes, consulte Configuração rápida.

    • Configuração personalizada

      Em vez de deixar o In Milvus decidir quase tudo para a sua coleção, pode determinar o esquema e os parâmetros de índice da coleção por si próprio. Para mais detalhes, consulte Configuração personalizada.

    @@ -222,7 +222,7 @@ $ curl -X POST "http://auto_id

    A ativação desta definição garante que a chave primária é incrementada automaticamente. Não há necessidade de fornecimento manual de chaves primárias durante a inserção de dados.

  • enable_dynamic_field

    -

    Quando activada, todos os campos, excluindo id e vector nos dados a inserir, são tratados como campos dinâmicos. Estes campos adicionais são guardados como pares chave-valor num campo especial denominado $meta. Esta funcionalidade permite a inclusão de campos extra durante a inserção de dados.

  • +

    Quando activada, todos os campos, excluindo id e vector nos dados a inserir, são tratados como campos dinâmicos. Estes campos adicionais são guardados como pares chave-valor num campo especial chamado $meta. Esta funcionalidade permite a inclusão de campos extra durante a inserção de dados.

    A coleção automaticamente indexada e carregada a partir do código fornecido está pronta para a inserção imediata de dados.

    Configuração personalizada

    Em vez de deixar o Milvus decidir quase tudo para a sua coleção, pode determinar o esquema e os parâmetros de indexação da coleção por si próprio.

    @@ -433,7 +433,7 @@ schema.addField(AddFieldReq.builder()

    Para configurar os parâmetros de índice, utilize prepare_index_params() para preparar os parâmetros do índice e add_index() para adicionar o índice.

    -

    Para configurar os parâmetros do índice, utilize IndexParam.

    +

    Para configurar os parâmetros de índice, utilize IndexParam.

    Para configurar os parâmetros do índice, utilize createIndex().

    @@ -515,7 +515,7 @@ indexParams.add(indexParamForVectorField); field_name - O nome do ficheiro de destino a que se aplica este objeto. + O nome do ficheiro de destino ao qual se aplica este objeto. index_type @@ -971,7 +971,7 @@ $ curl -X POST "http://fields.isPrimary
    -Se o campo atual é o campo primário. Definir isto como Verdadeiro torna o campo atual o campo primário. +Se o campo atual é o campo primário. Definir isto como True torna o campo atual o campo primário. fields.elementTypeParams @@ -1466,7 +1466,7 @@ System.out.println(listCollectionsRes.getCollectionNames()); # ] # }
    -

    Carregar e libertar uma coleção

    +

    Carregar uma coleção parcialmente (Pré-visualização pública)

    +

    Esta funcionalidade está atualmente em pré-visualização pública. A API e a funcionalidade podem mudar no futuro.

    +
    +

    Ao receber o seu pedido de carregamento, o Milvus carrega todos os índices dos campos vectoriais e todos os dados dos campos escalares para a memória. Se alguns campos não estiverem envolvidos em pesquisas e consultas, pode excluí-los do carregamento para reduzir a utilização da memória, melhorando o desempenho da pesquisa.

    +
    +
    # 7. Load the collection
    +client.load_collection(
    +    collection_name="customized_setup_2",
    +    load_fields=["my_id", "my_vector"] # Load only the specified fields
    +    skip_load_dynamic_field=True # Skip loading the dynamic field
    +)
    +
    +res = client.get_load_state(
    +    collection_name="customized_setup_2"
    +)
    +
    +print(res)
    +
    +# Output
    +#
    +# {
    +#     "state": "<LoadState: Loaded>"
    +# }
    +
    +

    Note-se que apenas os campos listados em load_fields podem ser utilizados como condições de filtragem e campos de saída em pesquisas e consultas. Deve incluir sempre a chave primária na lista. Os nomes de campo excluídos do carregamento não estarão disponíveis para filtragem ou saída.

    +

    Pode utilizar skip_load_dynamic_field=True para saltar o carregamento do campo dinâmico. O Milvus trata o campo dinâmico como um único campo, pelo que todas as chaves do campo dinâmico serão incluídas ou excluídas em conjunto.

    +

    Libertar uma coleção

    Para libertar uma coleção, utilize o método release_collection() especificando o nome da coleção.

    @@ -2042,7 +2069,7 @@ res = await client.# } # }
    -

    Reatribuir aliases

    +

    Reatribuir pseudónimos

    Para reatribuir aliases a outras colecções, utilize o método alter_alias() especificando o nome da coleção e o alias.

    @@ -2323,7 +2350,7 @@ collection.set_properties( } )
    -

    Definir MMAP

    Configure a propriedade de mapeamento de memória (MMAP) para a coleção, que determina se os dados são mapeados na memória para melhorar o desempenho da consulta. Para obter mais informações, consulte Configurar mapeamento de memória.

    +

    Definir MMAP

    Configure a propriedade de mapeamento de memória (MMAP) para a coleção, que determina se os dados são mapeados na memória para melhorar o desempenho da consulta. Para obter mais informações, consulte Configurar mapeamento de memória.

    Antes de definir a propriedade MMAP, liberte primeiro a coleção. Caso contrário, ocorrerá um erro.

    diff --git a/localization/v2.4.x/site/pt/userGuide/manage-indexes/index-vector-fields.json b/localization/v2.4.x/site/pt/userGuide/manage-indexes/index-vector-fields.json index 28be09f64..d2981c8c4 100644 --- a/localization/v2.4.x/site/pt/userGuide/manage-indexes/index-vector-fields.json +++ b/localization/v2.4.x/site/pt/userGuide/manage-indexes/index-vector-fields.json @@ -1 +1 @@ -{"codeList":["from pymilvus import MilvusClient, DataType\n\n# 1. Set up a Milvus client\nclient = MilvusClient(\n uri=\"http://localhost:19530\"\n)\n\n# 2. Create schema\n# 2.1. Create schema\nschema = MilvusClient.create_schema(\n auto_id=False,\n enable_dynamic_field=True,\n)\n\n# 2.2. Add fields to schema\nschema.add_field(field_name=\"id\", datatype=DataType.INT64, is_primary=True)\nschema.add_field(field_name=\"vector\", datatype=DataType.FLOAT_VECTOR, dim=5)\n\n# 3. Create collection\nclient.create_collection(\n collection_name=\"customized_setup\", \n schema=schema, \n)\n","import io.milvus.v2.client.ConnectConfig;\nimport io.milvus.v2.client.MilvusClientV2;\nimport io.milvus.v2.common.DataType;\nimport io.milvus.v2.service.collection.request.CreateCollectionReq;\n\nString CLUSTER_ENDPOINT = \"http://localhost:19530\";\n\n// 1. Connect to Milvus server\nConnectConfig connectConfig = ConnectConfig.builder()\n .uri(CLUSTER_ENDPOINT)\n .build();\n\nMilvusClientV2 client = new MilvusClientV2(connectConfig);\n\n// 2. Create a collection\n\n// 2.1 Create schema\nCreateCollectionReq.CollectionSchema schema = client.createSchema();\n\n// 2.2 Add fields to schema\nschema.addField(AddFieldReq.builder().fieldName(\"id\").dataType(DataType.Int64).isPrimaryKey(true).autoID(false).build());\nschema.addField(AddFieldReq.builder().fieldName(\"vector\").dataType(DataType.FloatVector).dimension(5).build());\n\n// 3 Create a collection without schema and index parameters\nCreateCollectionReq customizedSetupReq = CreateCollectionReq.builder()\n.collectionName(\"customized_setup\")\n.collectionSchema(schema)\n.build();\n\nclient.createCollection(customizedSetupReq);\n","// 1. Set up a Milvus Client\nclient = new MilvusClient({address, token});\n\n// 2. Define fields for the collection\nconst fields = [\n {\n name: \"id\",\n data_type: DataType.Int64,\n is_primary_key: true,\n autoID: false\n },\n {\n name: \"vector\",\n data_type: DataType.FloatVector,\n dim: 5\n },\n]\n\n// 3. Create a collection\nres = await client.createCollection({\n collection_name: \"customized_setup\",\n fields: fields,\n})\n\nconsole.log(res.error_code) \n\n// Output\n// \n// Success\n// \n","# 4.1. Set up the index parameters\nindex_params = MilvusClient.prepare_index_params()\n\n# 4.2. Add an index on the vector field.\nindex_params.add_index(\n field_name=\"vector\",\n metric_type=\"COSINE\",\n index_type=\"IVF_FLAT\",\n index_name=\"vector_index\",\n params={ \"nlist\": 128 }\n)\n\n# 4.3. Create an index file\nclient.create_index(\n collection_name=\"customized_setup\",\n index_params=index_params\n)\n","import io.milvus.v2.common.IndexParam;\nimport io.milvus.v2.service.index.request.CreateIndexReq;\n\n// 4 Prepare index parameters\n\n// 4.2 Add an index for the vector field \"vector\"\nIndexParam indexParamForVectorField = IndexParam.builder()\n .fieldName(\"vector\")\n .indexName(\"vector_index\")\n .indexType(IndexParam.IndexType.IVF_FLAT)\n .metricType(IndexParam.MetricType.COSINE)\n .extraParams(Map.of(\"nlist\", 128))\n .build();\n\nList indexParams = new ArrayList<>();\nindexParams.add(indexParamForVectorField);\n\n// 4.3 Crate an index file\nCreateIndexReq createIndexReq = CreateIndexReq.builder()\n .collectionName(\"customized_setup\")\n .indexParams(indexParams)\n .build();\n\nclient.createIndex(createIndexReq);\n","// 4. Set up index for the collection\n// 4.1. Set up the index parameters\nres = await client.createIndex({\n collection_name: \"customized_setup\",\n field_name: \"vector\",\n index_type: \"AUTOINDEX\",\n metric_type: \"COSINE\", \n index_name: \"vector_index\",\n params: { \"nlist\": 128 }\n})\n\nconsole.log(res.error_code)\n\n// Output\n// \n// Success\n// \n","# 5. Describe index\nres = client.list_indexes(\n collection_name=\"customized_setup\"\n)\n\nprint(res)\n\n# Output\n#\n# [\n# \"vector_index\",\n# ]\n\nres = client.describe_index(\n collection_name=\"customized_setup\",\n index_name=\"vector_index\"\n)\n\nprint(res)\n\n# Output\n#\n# {\n# \"index_type\": ,\n# \"metric_type\": \"COSINE\",\n# \"field_name\": \"vector\",\n# \"index_name\": \"vector_index\"\n# }\n","import io.milvus.v2.service.index.request.DescribeIndexReq;\nimport io.milvus.v2.service.index.response.DescribeIndexResp;\n\n// 5. Describe index\n// 5.1 List the index names\nListIndexesReq listIndexesReq = ListIndexesReq.builder()\n .collectionName(\"customized_setup\")\n .build();\n\nList indexNames = client.listIndexes(listIndexesReq);\n\nSystem.out.println(indexNames);\n\n// Output:\n// [\n// \"vector_index\"\n// ]\n\n// 5.2 Describe an index\nDescribeIndexReq describeIndexReq = DescribeIndexReq.builder()\n .collectionName(\"customized_setup\")\n .indexName(\"vector_index\")\n .build();\n\nDescribeIndexResp describeIndexResp = client.describeIndex(describeIndexReq);\n\nSystem.out.println(JSONObject.toJSON(describeIndexResp));\n\n// Output:\n// {\n// \"metricType\": \"COSINE\",\n// \"indexType\": \"AUTOINDEX\",\n// \"fieldName\": \"vector\",\n// \"indexName\": \"vector_index\"\n// }\n","// 5. Describe the index\nres = await client.describeIndex({\n collection_name: \"customized_setup\",\n index_name: \"vector_index\"\n})\n\nconsole.log(JSON.stringify(res.index_descriptions, null, 2))\n\n// Output\n// \n// [\n// {\n// \"params\": [\n// {\n// \"key\": \"index_type\",\n// \"value\": \"AUTOINDEX\"\n// },\n// {\n// \"key\": \"metric_type\",\n// \"value\": \"COSINE\"\n// }\n// ],\n// \"index_name\": \"vector_index\",\n// \"indexID\": \"449007919953063141\",\n// \"field_name\": \"vector\",\n// \"indexed_rows\": \"0\",\n// \"total_rows\": \"0\",\n// \"state\": \"Finished\",\n// \"index_state_fail_reason\": \"\",\n// \"pending_index_rows\": \"0\"\n// }\n// ]\n// \n","# 6. Drop index\nclient.drop_index(\n collection_name=\"customized_setup\",\n index_name=\"vector_index\"\n)\n","// 6. Drop index\n\nDropIndexReq dropIndexReq = DropIndexReq.builder()\n .collectionName(\"customized_setup\")\n .indexName(\"vector_index\")\n .build();\n\nclient.dropIndex(dropIndexReq);\n","// 6. Drop the index\nres = await client.dropIndex({\n collection_name: \"customized_setup\",\n index_name: \"vector_index\"\n})\n\nconsole.log(res.error_code)\n\n// Output\n// \n// Success\n// \n"],"headingContent":"","anchorList":[{"label":"Indexar campos vectoriais","href":"Index-Vector-Fields","type":1,"isActive":false},{"label":"Visão geral","href":"Overview","type":2,"isActive":false},{"label":"Preparações","href":"Preparations","type":2,"isActive":false},{"label":"Indexar uma coleção","href":"Index-a-Collection","type":2,"isActive":false},{"label":"Verificar detalhes do índice","href":"Check-Index-Details","type":2,"isActive":false},{"label":"Eliminar um índice","href":"Drop-an-Index","type":2,"isActive":false}]} \ No newline at end of file +{"codeList":["from pymilvus import MilvusClient, DataType\n\n# 1. Set up a Milvus client\nclient = MilvusClient(\n uri=\"http://localhost:19530\"\n)\n\n# 2. Create schema\n# 2.1. Create schema\nschema = MilvusClient.create_schema(\n auto_id=False,\n enable_dynamic_field=True,\n)\n\n# 2.2. Add fields to schema\nschema.add_field(field_name=\"id\", datatype=DataType.INT64, is_primary=True)\nschema.add_field(field_name=\"vector\", datatype=DataType.FLOAT_VECTOR, dim=5)\n\n# 3. Create collection\nclient.create_collection(\n collection_name=\"customized_setup\", \n schema=schema, \n)\n","import io.milvus.v2.client.ConnectConfig;\nimport io.milvus.v2.client.MilvusClientV2;\nimport io.milvus.v2.common.DataType;\nimport io.milvus.v2.service.collection.request.CreateCollectionReq;\n\nString CLUSTER_ENDPOINT = \"http://localhost:19530\";\n\n// 1. Connect to Milvus server\nConnectConfig connectConfig = ConnectConfig.builder()\n .uri(CLUSTER_ENDPOINT)\n .build();\n\nMilvusClientV2 client = new MilvusClientV2(connectConfig);\n\n// 2. Create a collection\n\n// 2.1 Create schema\nCreateCollectionReq.CollectionSchema schema = client.createSchema();\n\n// 2.2 Add fields to schema\nschema.addField(AddFieldReq.builder().fieldName(\"id\").dataType(DataType.Int64).isPrimaryKey(true).autoID(false).build());\nschema.addField(AddFieldReq.builder().fieldName(\"vector\").dataType(DataType.FloatVector).dimension(5).build());\n\n// 3 Create a collection without schema and index parameters\nCreateCollectionReq customizedSetupReq = CreateCollectionReq.builder()\n.collectionName(\"customized_setup\")\n.collectionSchema(schema)\n.build();\n\nclient.createCollection(customizedSetupReq);\n","// 1. Set up a Milvus Client\nclient = new MilvusClient({address, token});\n\n// 2. Define fields for the collection\nconst fields = [\n {\n name: \"id\",\n data_type: DataType.Int64,\n is_primary_key: true,\n autoID: false\n },\n {\n name: \"vector\",\n data_type: DataType.FloatVector,\n dim: 5\n },\n]\n\n// 3. Create a collection\nres = await client.createCollection({\n collection_name: \"customized_setup\",\n fields: fields,\n})\n\nconsole.log(res.error_code) \n\n// Output\n// \n// Success\n// \n","# 4.1. Set up the index parameters\nindex_params = MilvusClient.prepare_index_params()\n\n# 4.2. Add an index on the vector field.\nindex_params.add_index(\n field_name=\"vector\",\n metric_type=\"COSINE\",\n index_type=\"IVF_FLAT\",\n index_name=\"vector_index\",\n params={ \"nlist\": 128 }\n)\n\n# 4.3. Create an index file\nclient.create_index(\n collection_name=\"customized_setup\",\n index_params=index_params,\n sync=False # Whether to wait for index creation to complete before returning. Defaults to True.\n)\n","import io.milvus.v2.common.IndexParam;\nimport io.milvus.v2.service.index.request.CreateIndexReq;\n\n// 4 Prepare index parameters\n\n// 4.2 Add an index for the vector field \"vector\"\nIndexParam indexParamForVectorField = IndexParam.builder()\n .fieldName(\"vector\")\n .indexName(\"vector_index\")\n .indexType(IndexParam.IndexType.IVF_FLAT)\n .metricType(IndexParam.MetricType.COSINE)\n .extraParams(Map.of(\"nlist\", 128))\n .build();\n\nList indexParams = new ArrayList<>();\nindexParams.add(indexParamForVectorField);\n\n// 4.3 Crate an index file\nCreateIndexReq createIndexReq = CreateIndexReq.builder()\n .collectionName(\"customized_setup\")\n .indexParams(indexParams)\n .build();\n\nclient.createIndex(createIndexReq);\n","// 4. Set up index for the collection\n// 4.1. Set up the index parameters\nres = await client.createIndex({\n collection_name: \"customized_setup\",\n field_name: \"vector\",\n index_type: \"AUTOINDEX\",\n metric_type: \"COSINE\", \n index_name: \"vector_index\",\n params: { \"nlist\": 128 }\n})\n\nconsole.log(res.error_code)\n\n// Output\n// \n// Success\n// \n","# 5. Describe index\nres = client.list_indexes(\n collection_name=\"customized_setup\"\n)\n\nprint(res)\n\n# Output\n#\n# [\n# \"vector_index\",\n# ]\n\nres = client.describe_index(\n collection_name=\"customized_setup\",\n index_name=\"vector_index\"\n)\n\nprint(res)\n\n# Output\n#\n# {\n# \"index_type\": ,\n# \"metric_type\": \"COSINE\",\n# \"field_name\": \"vector\",\n# \"index_name\": \"vector_index\"\n# }\n","import io.milvus.v2.service.index.request.DescribeIndexReq;\nimport io.milvus.v2.service.index.response.DescribeIndexResp;\n\n// 5. Describe index\n// 5.1 List the index names\nListIndexesReq listIndexesReq = ListIndexesReq.builder()\n .collectionName(\"customized_setup\")\n .build();\n\nList indexNames = client.listIndexes(listIndexesReq);\n\nSystem.out.println(indexNames);\n\n// Output:\n// [\n// \"vector_index\"\n// ]\n\n// 5.2 Describe an index\nDescribeIndexReq describeIndexReq = DescribeIndexReq.builder()\n .collectionName(\"customized_setup\")\n .indexName(\"vector_index\")\n .build();\n\nDescribeIndexResp describeIndexResp = client.describeIndex(describeIndexReq);\n\nSystem.out.println(JSONObject.toJSON(describeIndexResp));\n\n// Output:\n// {\n// \"metricType\": \"COSINE\",\n// \"indexType\": \"AUTOINDEX\",\n// \"fieldName\": \"vector\",\n// \"indexName\": \"vector_index\"\n// }\n","// 5. Describe the index\nres = await client.describeIndex({\n collection_name: \"customized_setup\",\n index_name: \"vector_index\"\n})\n\nconsole.log(JSON.stringify(res.index_descriptions, null, 2))\n\n// Output\n// \n// [\n// {\n// \"params\": [\n// {\n// \"key\": \"index_type\",\n// \"value\": \"AUTOINDEX\"\n// },\n// {\n// \"key\": \"metric_type\",\n// \"value\": \"COSINE\"\n// }\n// ],\n// \"index_name\": \"vector_index\",\n// \"indexID\": \"449007919953063141\",\n// \"field_name\": \"vector\",\n// \"indexed_rows\": \"0\",\n// \"total_rows\": \"0\",\n// \"state\": \"Finished\",\n// \"index_state_fail_reason\": \"\",\n// \"pending_index_rows\": \"0\"\n// }\n// ]\n// \n","# 6. Drop index\nclient.drop_index(\n collection_name=\"customized_setup\",\n index_name=\"vector_index\"\n)\n","// 6. Drop index\n\nDropIndexReq dropIndexReq = DropIndexReq.builder()\n .collectionName(\"customized_setup\")\n .indexName(\"vector_index\")\n .build();\n\nclient.dropIndex(dropIndexReq);\n","// 6. Drop the index\nres = await client.dropIndex({\n collection_name: \"customized_setup\",\n index_name: \"vector_index\"\n})\n\nconsole.log(res.error_code)\n\n// Output\n// \n// Success\n// \n"],"headingContent":"Index Vector Fields","anchorList":[{"label":"Indexar campos vectoriais","href":"Index-Vector-Fields","type":1,"isActive":false},{"label":"Visão geral","href":"Overview","type":2,"isActive":false},{"label":"Preparações","href":"Preparations","type":2,"isActive":false},{"label":"Indexar uma coleção","href":"Index-a-Collection","type":2,"isActive":false},{"label":"Verificar detalhes do índice","href":"Check-Index-Details","type":2,"isActive":false},{"label":"Eliminar um índice","href":"Drop-an-Index","type":2,"isActive":false}]} \ No newline at end of file diff --git a/localization/v2.4.x/site/pt/userGuide/manage-indexes/index-vector-fields.md b/localization/v2.4.x/site/pt/userGuide/manage-indexes/index-vector-fields.md index fbd25971a..09815d02f 100644 --- a/localization/v2.4.x/site/pt/userGuide/manage-indexes/index-vector-fields.md +++ b/localization/v2.4.x/site/pt/userGuide/manage-indexes/index-vector-fields.md @@ -4,7 +4,7 @@ order: 1 summary: >- Este guia orienta-o nas operações básicas de criação e gestão de índices em campos vectoriais de uma coleção. -title: Campos de vectores de índice +title: Indexar campos vectoriais ---

    Indexar campos vectoriais

    import io.milvus.v2.common.IndexParam;
    @@ -331,6 +332,10 @@ res = await client.index_params
           Um objeto IndexParams que contém uma lista de objectos IndexParam.
         
    +    
    +      sync
    +      Controla a forma como o índice é criado em relação ao pedido do cliente. Valores válidos:
    • True (predefinição): O cliente espera até que o índice esteja totalmente construído antes de retornar. Isso significa que você não receberá uma resposta até que o processo esteja concluído.
    • False: O cliente regressa imediatamente após a receção do pedido e o índice está a ser criado em segundo plano. Para saber se a criação do índice foi concluída, utilize o método describe_index().
    + @@ -343,7 +348,7 @@ res = await client.fieldName - + @@ -416,7 +421,7 @@ res = await client. -

    Depois de ter criado um índice, pode verificar os seus detalhes.

    +

    Depois de ter criado um índice, pode verificar os respectivos detalhes.

    Para verificar os detalhes do índice, utilize list_indexes() para listar os nomes dos índices e describe_index() para obter os detalhes do índice.

    diff --git a/localization/v2.4.x/site/pt/userGuide/manage-partitions.json b/localization/v2.4.x/site/pt/userGuide/manage-partitions.json index 3ad81fedf..89ba1b1aa 100644 --- a/localization/v2.4.x/site/pt/userGuide/manage-partitions.json +++ b/localization/v2.4.x/site/pt/userGuide/manage-partitions.json @@ -1 +1 @@ -{"codeList":["from pymilvus import MilvusClient, DataType\n\n# 1. Set up a Milvus client\nclient = MilvusClient(\n uri=\"http://localhost:19530\"\n)\n\n# 2. Create a collection\nclient.create_collection(\n collection_name=\"quick_setup\",\n dimension=5,\n)\n\n","import io.milvus.v2.client.ConnectConfig;\nimport io.milvus.v2.client.MilvusClientV2;\nimport io.milvus.v2.service.collection.request.CreateCollectionReq;\n\nString CLUSTER_ENDPOINT = \"http://localhost:19530\";\n\n// 1. Connect to Milvus server\nConnectConfig connectConfig = ConnectConfig.builder()\n .uri(CLUSTER_ENDPOINT)\n .build();\n\nMilvusClientV2 client = new MilvusClientV2(connectConfig);\n\n// 2. Create a collection in quick setup mode\nCreateCollectionReq quickSetupReq = CreateCollectionReq.builder()\n .collectionName(\"quick_setup\")\n .dimension(5)\n .build();\n\nclient.createCollection(quickSetupReq);\n","const address = \"http://localhost:19530\"\n\n// 1. Set up a Milvus Client\nclient = new MilvusClient({address});\n\n// 2. Create a collection in quick setup mode\nawait client.createCollection({\n collection_name: \"quick_setup\",\n dimension: 5,\n}); \n","# 3. List partitions\nres = client.list_partitions(collection_name=\"quick_setup\")\nprint(res)\n\n# Output\n#\n# [\"_default\"]\n","import io.milvus.v2.service.partition.request.ListPartitionsReq;\n\n// 3. List all partitions in the collection\nListPartitionsReq listPartitionsReq = ListPartitionsReq.builder()\n .collectionName(\"quick_setup\")\n .build();\n\nList partitionNames = client.listPartitions(listPartitionsReq);\n\nSystem.out.println(partitionNames);\n\n// Output:\n// [\"_default\"]\n","// 3. List partitions\nres = await client.listPartitions({\n collection_name: \"quick_setup\"\n})\n\nconsole.log(res.partition_names)\n\n// Output\n// \n// [ '_default' ]\n// \n","# 4. Create more partitions\nclient.create_partition(\n collection_name=\"quick_setup\",\n partition_name=\"partitionA\"\n)\n\nclient.create_partition(\n collection_name=\"quick_setup\",\n partition_name=\"partitionB\"\n)\n\nres = client.list_partitions(collection_name=\"quick_setup\")\nprint(res)\n\n# Output\n#\n# [\"_default\", \"partitionA\", \"partitionB\"]\n","import io.milvus.v2.service.partition.request.CreatePartitionReq;\n\n// 4. Create more partitions\nCreatePartitionReq createPartitionReq = CreatePartitionReq.builder()\n .collectionName(\"quick_setup\")\n .partitionName(\"partitionA\")\n .build();\n\nclient.createPartition(createPartitionReq);\n\ncreatePartitionReq = CreatePartitionReq.builder()\n .collectionName(\"quick_setup\")\n .partitionName(\"partitionB\")\n .build();\n\nclient.createPartition(createPartitionReq);\n\nlistPartitionsReq = ListPartitionsReq.builder()\n .collectionName(\"quick_setup\")\n .build();\n\npartitionNames = client.listPartitions(listPartitionsReq);\n\nSystem.out.println(partitionNames);\n\n// Output:\n// [\n// \"_default\",\n// \"partitionA\",\n// \"partitionB\"\n// ]\n","// 4. Create more partitions\nawait client.createPartition({\n collection_name: \"quick_setup\",\n partition_name: \"partitionA\"\n})\n\nawait client.createPartition({\n collection_name: \"quick_setup\",\n partition_name: \"partitionB\"\n})\n\nres = await client.listPartitions({\n collection_name: \"quick_setup\"\n})\n\nconsole.log(res.partition_names)\n\n// Output\n// \n// [ '_default', 'partitionA', 'partitionB' ]\n// \n","# 5. Check whether a partition exists\nres = client.has_partition(\n collection_name=\"quick_setup\",\n partition_name=\"partitionA\"\n)\nprint(res)\n\n# Output\n#\n# True\n\nres = client.has_partition(\n collection_name=\"quick_setup\",\n partition_name=\"partitionC\"\n)\nprint(res)\n\n# Output\n#\n# False\n","import io.milvus.v2.service.partition.request.HasPartitionReq;\n\n// 5. Check whether a partition exists\nHasPartitionReq hasPartitionReq = HasPartitionReq.builder()\n .collectionName(\"quick_setup\")\n .partitionName(\"partitionA\")\n .build();\n\nboolean exists = client.hasPartition(hasPartitionReq);\n\nSystem.out.println(exists);\n\n// Output:\n// true\n\nhasPartitionReq = HasPartitionReq.builder()\n .collectionName(\"quick_setup\")\n .partitionName(\"partitionC\")\n .build();\n\nexists = client.hasPartition(hasPartitionReq);\n\nSystem.out.println(exists);\n\n// Output:\n// false\n","// 5. Check whether a partition exists\nres = await client.hasPartition({\n collection_name: \"quick_setup\",\n partition_name: \"partitionA\"\n})\n\nconsole.log(res.value)\n\n// Output\n// \n// true\n// \n\nres = await client.hasPartition({\n collection_name: \"quick_setup\",\n partition_name: \"partitionC\"\n})\n\nconsole.log(res.value)\n\n// Output\n// \n// false\n// \n","# Release the collection\nclient.release_collection(collection_name=\"quick_setup\")\n\n# Check the load status\nres = client.get_load_state(collection_name=\"quick_setup\")\nprint(res)\n\n# Output\n#\n# {\n# \"state\": \"\"\n# }\n\nres = client.get_load_state(\n collection_name=\"quick_setup\", \n partition_name=\"partitionA\"\n)\n\nprint(res)\n\n# Output\n#\n# {\n# \"state\": \"\"\n# }\n\nres = client.get_load_state(\n collection_name=\"quick_setup\", \n partition_name=\"partitionB\"\n)\n\nprint(res)\n\n# Output\n#\n# {\n# \"state\": \"\"\n# }\n\n","import io.milvus.v2.service.collection.request.GetLoadStateReq;\nimport io.milvus.v2.service.collection.request.ReleaseCollectionReq;\nimport io.milvus.v2.service.partition.request.LoadPartitionsReq;\nimport io.milvus.v2.service.partition.request.ReleasePartitionsReq;\n\n// 6. Load a partition independantly\n// 6.1 Release the collection\nReleaseCollectionReq releaseCollectionReq = ReleaseCollectionReq.builder()\n .collectionName(\"quick_setup\")\n .build();\n\nclient.releaseCollection(releaseCollectionReq);\n\n// 6.2 Load partitionA\nLoadPartitionsReq loadPartitionsReq = LoadPartitionsReq.builder()\n .collectionName(\"quick_setup\")\n .partitionNames(List.of(\"partitionA\"))\n .build();\n\nclient.loadPartitions(loadPartitionsReq);\n\nThread.sleep(3000);\n\n// 6.3 Check the load status of the collection and its partitions\nGetLoadStateReq getLoadStateReq = GetLoadStateReq.builder()\n .collectionName(\"quick_setup\")\n .build();\n\nboolean state = client.getLoadState(getLoadStateReq);\n\nSystem.out.println(state);\n\n// Output:\n// true\n\ngetLoadStateReq = GetLoadStateReq.builder()\n .collectionName(\"quick_setup\")\n .partitionName(\"partitionA\")\n .build();\n\nstate = client.getLoadState(getLoadStateReq);\n\nSystem.out.println(state);\n\n// Output:\n// true\n\ngetLoadStateReq = GetLoadStateReq.builder()\n .collectionName(\"quick_setup\")\n .partitionName(\"partitionB\")\n .build();\n\nstate = client.getLoadState(getLoadStateReq);\n\nSystem.out.println(state);\n\n// Output:\n// false\n","// 6. Load a partition indenpendantly\nawait client.releaseCollection({\n collection_name: \"quick_setup\"\n})\n\nres = await client.getLoadState({\n collection_name: \"quick_setup\"\n})\n\nconsole.log(res.state)\n\n// Output\n// \n// LoadStateNotLoad\n// \n\nawait client.loadPartitions({\n collection_name: \"quick_setup\",\n partition_names: [\"partitionA\"]\n})\n\nawait sleep(3000)\n\nres = await client.getLoadState({\n collection_name: \"quick_setup\"\n})\n\nconsole.log(res.state)\n\n// Output\n// \n// LoadStateLoaded\n// \n\nres = await client.getLoadState({\n collection_name: \"quick_setup\",\n partition_name: \"partitionA\"\n})\n\nconsole.log(res.state)\n\n// Output\n// \n// LoadStateLoaded\n// \n\nres = await client.getLoadState({\n collection_name: \"quick_setup\",\n partition_name: \"partitionB\"\n})\n\nconsole.log(res.state)\n\n// Output\n// \n// LoadStateLoaded\n// \n","client.load_partitions(\n collection_name=\"quick_setup\",\n partition_names=[\"partitionA\"]\n)\n\nres = client.get_load_state(collection_name=\"quick_setup\")\nprint(res)\n\n# Output\n#\n# {\n# \"state\": \"\"\n# }\n","LoadPartitionsReq loadPartitionsReq = LoadPartitionsReq.builder()\n .collectionName(\"quick_setup\")\n .partitionNames(List.of(\"partitionA\"))\n .build();\n\nclient.loadPartitions(loadPartitionsReq);\n\ngetLoadStateReq = GetLoadStateReq.builder()\n .collectionName(\"quick_setup\")\n .partitionName(\"partitionA\")\n .build();\n\nstate = client.getLoadState(getLoadStateReq);\n\nSystem.out.println(state);\n\n// Output:\n// true\n","await client.loadPartitions({\n collection_name: \"quick_setup\",\n partition_names: [\"partitionA\"]\n})\n\nres = await client.getLoadState({\n collection_name: \"quick_setup\",\n partition_name: \"partitionA\"\n})\n\nconsole.log(res.state)\n\n// Output\n// \n// LoadStateLoaded\n//\n","client.load_partitions(\n collection_name=\"quick_setup\",\n partition_names=[\"partitionA\", \"partitionB\"]\n)\n\nres = client.get_load_status(\n collection_name=\"quick_setup\",\n partition_name=\"partitionA\"\n)\n\n# Output\n#\n# {\n# \"state\": \"\"\n# }\n\nres = client.get_load_status(\n collection_name=\"quick_setup\",\n partition_name=\"partitionB\"\n)\n\n# Output\n#\n# {\n# \"state\": \"\"\n# }\n","LoadPartitionsReq loadPartitionsReq = LoadPartitionsReq.builder()\n .collectionName(\"quick_setup\")\n .partitionNames(List.of(\"partitionA\", \"partitionB\"))\n .build();\n\nclient.loadPartitions(loadPartitionsReq);\n\ngetLoadStateReq = GetLoadStateReq.builder()\n .collectionName(\"quick_setup\")\n .partitionName(\"partitionA\")\n .build();\n\nstate = client.getLoadState(getLoadStateReq);\n\nSystem.out.println(state);\n\n// Output:\n// true\n\ngetLoadStateReq = GetLoadStateReq.builder()\n .collectionName(\"quick_setup\")\n .partitionName(\"partitionB\")\n .build();\n\nstate = client.getLoadState(getLoadStateReq);\n\nSystem.out.println(state);\n\n// Output:\n// true\n","await client.loadPartitions({\n collection_name: \"quick_setup\",\n partition_names: [\"partitionA\", \"partitionB\"]\n})\n\nres = await client.getLoadState({\n collection_name: \"quick_setup\",\n partition_name: \"partitionA\"\n})\n\nconsole.log(res)\n\n// Output\n// \n// LoadStateLoaded\n// \n\nres = await client.getLoadState({\n collection_name: \"quick_setup\",\n partition_name: \"partitionB\"\n})\n\nconsole.log(res)\n\n// Output\n// \n// LoadStateLoaded\n// \n","# 7. Release a partition\nclient.release_partitions(\n collection_name=\"quick_setup\",\n partition_names=[\"partitionA\"]\n)\n\nres = client.get_load_state(\n collection_name=\"quick_setup\", \n partition_name=\"partitionA\"\n)\n\nprint(res)\n\n# Output\n#\n# {\n# \"state\": \"\"\n# }\n\n","import io.milvus.v2.service.partition.request.ReleasePartitionsReq;\n\n// 7. Release a partition\nReleasePartitionsReq releasePartitionsReq = ReleasePartitionsReq.builder()\n .collectionName(\"quick_setup\")\n .partitionNames(List.of(\"partitionA\"))\n .build();\n\nclient.releasePartitions(releasePartitionsReq);\n\ngetLoadStateReq = GetLoadStateReq.builder()\n .collectionName(\"quick_setup\")\n .partitionName(\"partitionA\")\n .build();\n\nstate = client.getLoadState(getLoadStateReq);\n\nSystem.out.println(state);\n\n// Output:\n// false\n","// 7. Release a partition\nawait client.releasePartitions({\n collection_name: \"quick_setup\",\n partition_names: [\"partitionA\"]\n})\n\nres = await client.getLoadState({\n collection_name: \"quick_setup\"\n})\n\nconsole.log(res.state)\n\n// Output\n// \n// LoadStateNotLoad\n// \n","client.release_partitions(\n collection_name=\"quick_setup\",\n partition_names=[\"_default\", \"partitionA\", \"partitionB\"]\n)\n\nres = client.get_load_status(\n collection_name=\"quick_setup\",\n)\n\n# Output\n#\n# {\n# \"state\": \"\"\n# }\n","# 8. Drop a partition\nclient.drop_partition(\n collection_name=\"quick_setup\",\n partition_name=\"partitionB\"\n)\n\nres = client.list_partitions(collection_name=\"quick_setup\")\nprint(res)\n\n# Output\n#\n# [\"_default\", \"partitionA\"]\n","import io.milvus.v2.service.partition.request.ReleasePartitionsReq;\n\nReleasePartitionsReq releasePartitionsReq = ReleasePartitionsReq.builder()\n .collectionName(\"quick_setup\")\n .partitionNames(List.of(\"_default\", \"partitionA\", \"partitionB\"))\n .build();\n\nclient.releasePartitions(releasePartitionsReq);\n\ngetLoadStateReq = GetLoadStateReq.builder()\n .collectionName(\"quick_setup\")\n .build();\n\nstate = client.getLoadState(getLoadStateReq);\n\nSystem.out.println(state);\n\n// Output:\n// false\n","\nawait client.releasePartitions({\n collection_name: \"quick_setup\",\n partition_names: [\"_default\", \"partitionA\", \"partitionB\"]\n})\n\nres = await client.getLoadState({\n collection_name: \"quick_setup\"\n})\n\nconsole.log(res)\n\n// Output\n// \n// {\n// status: {\n// error_code: 'Success',\n// reason: '',\n// code: 0,\n// retriable: false,\n// detail: ''\n// },\n// state: 'LoadStateNotLoad'\n// }\n// \n"],"headingContent":"","anchorList":[{"label":"Gerir partições","href":"Manage-Partitions","type":1,"isActive":false},{"label":"Visão geral","href":"Overview","type":2,"isActive":false},{"label":"Preparações","href":"Preparations","type":2,"isActive":false},{"label":"Listar partições","href":"List-Partitions","type":2,"isActive":false},{"label":"Criar partições","href":"Create-Partitions","type":2,"isActive":false},{"label":"Verificar a existência de uma partição específica","href":"Check-for-a-Specific-Partition","type":2,"isActive":false},{"label":"Carregar e liberar partições","href":"Load--Release-Partitions","type":2,"isActive":false},{"label":"Soltar partições","href":"Drop-Partitions","type":2,"isActive":false},{"label":"FAQ","href":"FAQ","type":2,"isActive":false}]} \ No newline at end of file +{"codeList":["from pymilvus import MilvusClient, DataType\n\n# 1. Set up a Milvus client\nclient = MilvusClient(\n uri=\"http://localhost:19530\"\n)\n\n# 2. Create a collection\nclient.create_collection(\n collection_name=\"quick_setup\",\n dimension=5,\n)\n\n","import io.milvus.v2.client.ConnectConfig;\nimport io.milvus.v2.client.MilvusClientV2;\nimport io.milvus.v2.service.collection.request.CreateCollectionReq;\n\nString CLUSTER_ENDPOINT = \"http://localhost:19530\";\n\n// 1. Connect to Milvus server\nConnectConfig connectConfig = ConnectConfig.builder()\n .uri(CLUSTER_ENDPOINT)\n .build();\n\nMilvusClientV2 client = new MilvusClientV2(connectConfig);\n\n// 2. Create a collection in quick setup mode\nCreateCollectionReq quickSetupReq = CreateCollectionReq.builder()\n .collectionName(\"quick_setup\")\n .dimension(5)\n .build();\n\nclient.createCollection(quickSetupReq);\n","const address = \"http://localhost:19530\"\n\n// 1. Set up a Milvus Client\nclient = new MilvusClient({address});\n\n// 2. Create a collection in quick setup mode\nawait client.createCollection({\n collection_name: \"quick_setup\",\n dimension: 5,\n}); \n","# 3. List partitions\nres = client.list_partitions(collection_name=\"quick_setup\")\nprint(res)\n\n# Output\n#\n# [\"_default\"]\n","import io.milvus.v2.service.partition.request.ListPartitionsReq;\n\n// 3. List all partitions in the collection\nListPartitionsReq listPartitionsReq = ListPartitionsReq.builder()\n .collectionName(\"quick_setup\")\n .build();\n\nList partitionNames = client.listPartitions(listPartitionsReq);\n\nSystem.out.println(partitionNames);\n\n// Output:\n// [\"_default\"]\n","// 3. List partitions\nres = await client.listPartitions({\n collection_name: \"quick_setup\"\n})\n\nconsole.log(res.partition_names)\n\n// Output\n// \n// [ '_default' ]\n// \n","# 4. Create more partitions\nclient.create_partition(\n collection_name=\"quick_setup\",\n partition_name=\"partitionA\"\n)\n\nclient.create_partition(\n collection_name=\"quick_setup\",\n partition_name=\"partitionB\"\n)\n\nres = client.list_partitions(collection_name=\"quick_setup\")\nprint(res)\n\n# Output\n#\n# [\"_default\", \"partitionA\", \"partitionB\"]\n","import io.milvus.v2.service.partition.request.CreatePartitionReq;\n\n// 4. Create more partitions\nCreatePartitionReq createPartitionReq = CreatePartitionReq.builder()\n .collectionName(\"quick_setup\")\n .partitionName(\"partitionA\")\n .build();\n\nclient.createPartition(createPartitionReq);\n\ncreatePartitionReq = CreatePartitionReq.builder()\n .collectionName(\"quick_setup\")\n .partitionName(\"partitionB\")\n .build();\n\nclient.createPartition(createPartitionReq);\n\nlistPartitionsReq = ListPartitionsReq.builder()\n .collectionName(\"quick_setup\")\n .build();\n\npartitionNames = client.listPartitions(listPartitionsReq);\n\nSystem.out.println(partitionNames);\n\n// Output:\n// [\n// \"_default\",\n// \"partitionA\",\n// \"partitionB\"\n// ]\n","// 4. Create more partitions\nawait client.createPartition({\n collection_name: \"quick_setup\",\n partition_name: \"partitionA\"\n})\n\nawait client.createPartition({\n collection_name: \"quick_setup\",\n partition_name: \"partitionB\"\n})\n\nres = await client.listPartitions({\n collection_name: \"quick_setup\"\n})\n\nconsole.log(res.partition_names)\n\n// Output\n// \n// [ '_default', 'partitionA', 'partitionB' ]\n// \n","# 5. Check whether a partition exists\nres = client.has_partition(\n collection_name=\"quick_setup\",\n partition_name=\"partitionA\"\n)\nprint(res)\n\n# Output\n#\n# True\n\nres = client.has_partition(\n collection_name=\"quick_setup\",\n partition_name=\"partitionC\"\n)\nprint(res)\n\n# Output\n#\n# False\n","import io.milvus.v2.service.partition.request.HasPartitionReq;\n\n// 5. Check whether a partition exists\nHasPartitionReq hasPartitionReq = HasPartitionReq.builder()\n .collectionName(\"quick_setup\")\n .partitionName(\"partitionA\")\n .build();\n\nboolean exists = client.hasPartition(hasPartitionReq);\n\nSystem.out.println(exists);\n\n// Output:\n// true\n\nhasPartitionReq = HasPartitionReq.builder()\n .collectionName(\"quick_setup\")\n .partitionName(\"partitionC\")\n .build();\n\nexists = client.hasPartition(hasPartitionReq);\n\nSystem.out.println(exists);\n\n// Output:\n// false\n","// 5. Check whether a partition exists\nres = await client.hasPartition({\n collection_name: \"quick_setup\",\n partition_name: \"partitionA\"\n})\n\nconsole.log(res.value)\n\n// Output\n// \n// true\n// \n\nres = await client.hasPartition({\n collection_name: \"quick_setup\",\n partition_name: \"partitionC\"\n})\n\nconsole.log(res.value)\n\n// Output\n// \n// false\n// \n","# Release the collection\nclient.release_collection(collection_name=\"quick_setup\")\n\n# Check the load status\nres = client.get_load_state(collection_name=\"quick_setup\")\nprint(res)\n\n# Output\n#\n# {\n# \"state\": \"\"\n# }\n\nres = client.get_load_state(\n collection_name=\"quick_setup\", \n partition_name=\"partitionA\"\n)\n\nprint(res)\n\n# Output\n#\n# {\n# \"state\": \"\"\n# }\n\nres = client.get_load_state(\n collection_name=\"quick_setup\", \n partition_name=\"partitionB\"\n)\n\nprint(res)\n\n# Output\n#\n# {\n# \"state\": \"\"\n# }\n\n","import io.milvus.v2.service.collection.request.GetLoadStateReq;\nimport io.milvus.v2.service.collection.request.ReleaseCollectionReq;\nimport io.milvus.v2.service.partition.request.LoadPartitionsReq;\nimport io.milvus.v2.service.partition.request.ReleasePartitionsReq;\n\n// 6. Load a partition independantly\n// 6.1 Release the collection\nReleaseCollectionReq releaseCollectionReq = ReleaseCollectionReq.builder()\n .collectionName(\"quick_setup\")\n .build();\n\nclient.releaseCollection(releaseCollectionReq);\n\n// 6.2 Load partitionA\nLoadPartitionsReq loadPartitionsReq = LoadPartitionsReq.builder()\n .collectionName(\"quick_setup\")\n .partitionNames(List.of(\"partitionA\"))\n .build();\n\nclient.loadPartitions(loadPartitionsReq);\n\nThread.sleep(3000);\n\n// 6.3 Check the load status of the collection and its partitions\nGetLoadStateReq getLoadStateReq = GetLoadStateReq.builder()\n .collectionName(\"quick_setup\")\n .build();\n\nboolean state = client.getLoadState(getLoadStateReq);\n\nSystem.out.println(state);\n\n// Output:\n// true\n\ngetLoadStateReq = GetLoadStateReq.builder()\n .collectionName(\"quick_setup\")\n .partitionName(\"partitionA\")\n .build();\n\nstate = client.getLoadState(getLoadStateReq);\n\nSystem.out.println(state);\n\n// Output:\n// true\n\ngetLoadStateReq = GetLoadStateReq.builder()\n .collectionName(\"quick_setup\")\n .partitionName(\"partitionB\")\n .build();\n\nstate = client.getLoadState(getLoadStateReq);\n\nSystem.out.println(state);\n\n// Output:\n// false\n","// 6. Load a partition indenpendantly\nawait client.releaseCollection({\n collection_name: \"quick_setup\"\n})\n\nres = await client.getLoadState({\n collection_name: \"quick_setup\"\n})\n\nconsole.log(res.state)\n\n// Output\n// \n// LoadStateNotLoad\n// \n\nawait client.loadPartitions({\n collection_name: \"quick_setup\",\n partition_names: [\"partitionA\"]\n})\n\nawait sleep(3000)\n\nres = await client.getLoadState({\n collection_name: \"quick_setup\"\n})\n\nconsole.log(res.state)\n\n// Output\n// \n// LoadStateLoaded\n// \n\nres = await client.getLoadState({\n collection_name: \"quick_setup\",\n partition_name: \"partitionA\"\n})\n\nconsole.log(res.state)\n\n// Output\n// \n// LoadStateLoaded\n// \n\nres = await client.getLoadState({\n collection_name: \"quick_setup\",\n partition_name: \"partitionB\"\n})\n\nconsole.log(res.state)\n\n// Output\n// \n// LoadStateLoaded\n// \n","client.load_partitions(\n collection_name=\"quick_setup\",\n partition_names=[\"partitionA\"]\n)\n\nres = client.get_load_state(collection_name=\"quick_setup\")\nprint(res)\n\n# Output\n#\n# {\n# \"state\": \"\"\n# }\n","LoadPartitionsReq loadPartitionsReq = LoadPartitionsReq.builder()\n .collectionName(\"quick_setup\")\n .partitionNames(List.of(\"partitionA\"))\n .build();\n\nclient.loadPartitions(loadPartitionsReq);\n\ngetLoadStateReq = GetLoadStateReq.builder()\n .collectionName(\"quick_setup\")\n .partitionName(\"partitionA\")\n .build();\n\nstate = client.getLoadState(getLoadStateReq);\n\nSystem.out.println(state);\n\n// Output:\n// true\n","await client.loadPartitions({\n collection_name: \"quick_setup\",\n partition_names: [\"partitionA\"]\n})\n\nres = await client.getLoadState({\n collection_name: \"quick_setup\",\n partition_name: \"partitionA\"\n})\n\nconsole.log(res.state)\n\n// Output\n// \n// LoadStateLoaded\n//\n","client.load_partitions(\n collection_name=\"quick_setup\",\n partition_names=[\"partitionA\", \"partitionB\"]\n)\n\nres = client.get_load_status(\n collection_name=\"quick_setup\",\n partition_name=\"partitionA\"\n)\n\n# Output\n#\n# {\n# \"state\": \"\"\n# }\n\nres = client.get_load_status(\n collection_name=\"quick_setup\",\n partition_name=\"partitionB\"\n)\n\n# Output\n#\n# {\n# \"state\": \"\"\n# }\n","LoadPartitionsReq loadPartitionsReq = LoadPartitionsReq.builder()\n .collectionName(\"quick_setup\")\n .partitionNames(List.of(\"partitionA\", \"partitionB\"))\n .build();\n\nclient.loadPartitions(loadPartitionsReq);\n\ngetLoadStateReq = GetLoadStateReq.builder()\n .collectionName(\"quick_setup\")\n .partitionName(\"partitionA\")\n .build();\n\nstate = client.getLoadState(getLoadStateReq);\n\nSystem.out.println(state);\n\n// Output:\n// true\n\ngetLoadStateReq = GetLoadStateReq.builder()\n .collectionName(\"quick_setup\")\n .partitionName(\"partitionB\")\n .build();\n\nstate = client.getLoadState(getLoadStateReq);\n\nSystem.out.println(state);\n\n// Output:\n// true\n","await client.loadPartitions({\n collection_name: \"quick_setup\",\n partition_names: [\"partitionA\", \"partitionB\"]\n})\n\nres = await client.getLoadState({\n collection_name: \"quick_setup\",\n partition_name: \"partitionA\"\n})\n\nconsole.log(res)\n\n// Output\n// \n// LoadStateLoaded\n// \n\nres = await client.getLoadState({\n collection_name: \"quick_setup\",\n partition_name: \"partitionB\"\n})\n\nconsole.log(res)\n\n// Output\n// \n// LoadStateLoaded\n// \n","client.load_partitions(\n collection_name=\"quick_setup\",\n partition_names=[\"partitionA\"],\n load_fields=[\"id\", \"vector\"],\n skip_load_dynamic_field=True\n)\n","# 7. Release a partition\nclient.release_partitions(\n collection_name=\"quick_setup\",\n partition_names=[\"partitionA\"]\n)\n\nres = client.get_load_state(\n collection_name=\"quick_setup\", \n partition_name=\"partitionA\"\n)\n\nprint(res)\n\n# Output\n#\n# {\n# \"state\": \"\"\n# }\n\n","import io.milvus.v2.service.partition.request.ReleasePartitionsReq;\n\n// 7. Release a partition\nReleasePartitionsReq releasePartitionsReq = ReleasePartitionsReq.builder()\n .collectionName(\"quick_setup\")\n .partitionNames(List.of(\"partitionA\"))\n .build();\n\nclient.releasePartitions(releasePartitionsReq);\n\ngetLoadStateReq = GetLoadStateReq.builder()\n .collectionName(\"quick_setup\")\n .partitionName(\"partitionA\")\n .build();\n\nstate = client.getLoadState(getLoadStateReq);\n\nSystem.out.println(state);\n\n// Output:\n// false\n","// 7. Release a partition\nawait client.releasePartitions({\n collection_name: \"quick_setup\",\n partition_names: [\"partitionA\"]\n})\n\nres = await client.getLoadState({\n collection_name: \"quick_setup\"\n})\n\nconsole.log(res.state)\n\n// Output\n// \n// LoadStateNotLoad\n// \n","client.release_partitions(\n collection_name=\"quick_setup\",\n partition_names=[\"_default\", \"partitionA\", \"partitionB\"]\n)\n\nres = client.get_load_status(\n collection_name=\"quick_setup\",\n)\n\n# Output\n#\n# {\n# \"state\": \"\"\n# }\n","# 8. Drop a partition\nclient.drop_partition(\n collection_name=\"quick_setup\",\n partition_name=\"partitionB\"\n)\n\nres = client.list_partitions(collection_name=\"quick_setup\")\nprint(res)\n\n# Output\n#\n# [\"_default\", \"partitionA\"]\n","import io.milvus.v2.service.partition.request.ReleasePartitionsReq;\n\nReleasePartitionsReq releasePartitionsReq = ReleasePartitionsReq.builder()\n .collectionName(\"quick_setup\")\n .partitionNames(List.of(\"_default\", \"partitionA\", \"partitionB\"))\n .build();\n\nclient.releasePartitions(releasePartitionsReq);\n\ngetLoadStateReq = GetLoadStateReq.builder()\n .collectionName(\"quick_setup\")\n .build();\n\nstate = client.getLoadState(getLoadStateReq);\n\nSystem.out.println(state);\n\n// Output:\n// false\n","\nawait client.releasePartitions({\n collection_name: \"quick_setup\",\n partition_names: [\"_default\", \"partitionA\", \"partitionB\"]\n})\n\nres = await client.getLoadState({\n collection_name: \"quick_setup\"\n})\n\nconsole.log(res)\n\n// Output\n// \n// {\n// status: {\n// error_code: 'Success',\n// reason: '',\n// code: 0,\n// retriable: false,\n// detail: ''\n// },\n// state: 'LoadStateNotLoad'\n// }\n// \n"],"headingContent":"Manage Partitions","anchorList":[{"label":"Gerir partições","href":"Manage-Partitions","type":1,"isActive":false},{"label":"Visão geral","href":"Overview","type":2,"isActive":false},{"label":"Preparações","href":"Preparations","type":2,"isActive":false},{"label":"Listar partições","href":"List-Partitions","type":2,"isActive":false},{"label":"Criar partições","href":"Create-Partitions","type":2,"isActive":false},{"label":"Verificar a existência de uma partição específica","href":"Check-for-a-Specific-Partition","type":2,"isActive":false},{"label":"Carregar e liberar partições","href":"Load--Release-Partitions","type":2,"isActive":false},{"label":"Soltar partições","href":"Drop-Partitions","type":2,"isActive":false},{"label":"FAQ","href":"FAQ","type":2,"isActive":false}]} \ No newline at end of file diff --git a/localization/v2.4.x/site/pt/userGuide/manage-partitions.md b/localization/v2.4.x/site/pt/userGuide/manage-partitions.md index 5907464aa..034c7031d 100644 --- a/localization/v2.4.x/site/pt/userGuide/manage-partitions.md +++ b/localization/v2.4.x/site/pt/userGuide/manage-partitions.md @@ -1,7 +1,6 @@ --- id: manage-partitions.md title: Gerir partições -summary: '' ---

    Gerir partições -

    Libertar partições

    -

    Para liberar todas as partições de uma coleção, basta chamar release_collection(). Para liberar partições específicas de uma coleção, use release_partitions().

    +

    Para carregar campos especificados numa ou mais partições, faça o seguinte:

    +
    client.load_partitions(
    +    collection_name="quick_setup",
    +    partition_names=["partitionA"],
    +    load_fields=["id", "vector"],
    +    skip_load_dynamic_field=True
    +)
    +
    +

    Observe que apenas os campos listados em load_fields podem ser usados como condições de filtragem e campos de saída em pesquisas e consultas. Deve incluir sempre a chave primária na lista. Os nomes de campo excluídos do carregamento não estarão disponíveis para filtragem ou saída.

    +

    Pode utilizar skip_load_dynamic_field=True para saltar o carregamento do campo dinâmico. O Milvus trata o campo dinâmico como um único campo, portanto todas as chaves no campo dinâmico serão incluídas ou excluídas juntas.

    +

    Liberar partições

    +

    Para libertar todas as partições de uma coleção, basta chamar release_collection(). Para liberar partições específicas de uma coleção, use release_partitions().

    Para liberar todas as partições de uma coleção, basta chamar releaseCollection(). Para libertar partições específicas de uma coleção, utilize releasePartitions().

    @@ -930,7 +939,7 @@ res = await client.rootCoord.maxPartitionNum. Para obter detalhes, consulte Configurações do sistema.

    +

    Por defeito, o Milvus permite a criação de um máximo de 1.024 partições. É possível ajustar o número máximo de partições configurando rootCoord.maxPartitionNum. Para obter detalhes, consulte Configurações do sistema.

  • Como posso diferenciar entre partições e chaves de partição?

    As partições são unidades de armazenamento físicas, enquanto as chaves de partição são conceitos lógicos que atribuem automaticamente dados a partições específicas com base numa coluna designada.

    Por exemplo, no Milvus, se tiver uma coleção com uma chave de partição definida como o campo color, o sistema atribui automaticamente os dados às partições com base nos valores de hash do campo color para cada entidade. Este processo automatizado liberta o utilizador da responsabilidade de especificar manualmente a partição ao inserir ou pesquisar dados.

    diff --git a/localization/v2.4.x/site/pt/userGuide/search-query-get/single-vector-search.json b/localization/v2.4.x/site/pt/userGuide/search-query-get/single-vector-search.json index 6b1549fe8..d7cf4cd69 100644 --- a/localization/v2.4.x/site/pt/userGuide/search-query-get/single-vector-search.json +++ b/localization/v2.4.x/site/pt/userGuide/search-query-get/single-vector-search.json @@ -1 +1 @@ -{"codeList":["# 1. Set up a Milvus client\nclient = MilvusClient(\n uri=CLUSTER_ENDPOINT,\n token=TOKEN \n)\n\n# 2. Create a collection\nclient.create_collection(\n collection_name=\"quick_setup\",\n dimension=5,\n metric_type=\"IP\"\n)\n\n# 3. Insert randomly generated vectors \ncolors = [\"green\", \"blue\", \"yellow\", \"red\", \"black\", \"white\", \"purple\", \"pink\", \"orange\", \"brown\", \"grey\"]\ndata = []\n\nfor i in range(1000):\n current_color = random.choice(colors)\n data.append({\n \"id\": i,\n \"vector\": [ random.uniform(-1, 1) for _ in range(5) ],\n \"color\": current_color,\n \"color_tag\": f\"{current_color}_{str(random.randint(1000, 9999))}\"\n })\n\nres = client.insert(\n collection_name=\"quick_setup\",\n data=data\n)\n\nprint(res)\n\n# Output\n#\n# {\n# \"insert_count\": 1000,\n# \"ids\": [\n# 0,\n# 1,\n# 2,\n# 3,\n# 4,\n# 5,\n# 6,\n# 7,\n# 8,\n# 9,\n# \"(990 more items hidden)\"\n# ]\n# }\n\n# 6.1 Create partitions \nclient.create_partition(\n collection_name=\"quick_setup\",\n partition_name=\"red\"\n)\n\nclient.create_partition(\n collection_name=\"quick_setup\",\n partition_name=\"blue\"\n)\n\n# 6.1 Insert data into partitions\nred_data = [ {\"id\": i, \"vector\": [ random.uniform(-1, 1) for _ in range(5) ], \"color\": \"red\", \"color_tag\": f\"red_{str(random.randint(1000, 9999))}\" } for i in range(500) ]\nblue_data = [ {\"id\": i, \"vector\": [ random.uniform(-1, 1) for _ in range(5) ], \"color\": \"blue\", \"color_tag\": f\"blue_{str(random.randint(1000, 9999))}\" } for i in range(500) ]\n\nres = client.insert(\n collection_name=\"quick_setup\",\n data=red_data,\n partition_name=\"red\"\n)\n\nprint(res)\n\n# Output\n#\n# {\n# \"insert_count\": 500,\n# \"ids\": [\n# 0,\n# 1,\n# 2,\n# 3,\n# 4,\n# 5,\n# 6,\n# 7,\n# 8,\n# 9,\n# \"(490 more items hidden)\"\n# ]\n# }\n\nres = client.insert(\n collection_name=\"quick_setup\",\n data=blue_data,\n partition_name=\"blue\"\n)\n\nprint(res)\n\n# Output\n#\n# {\n# \"insert_count\": 500,\n# \"ids\": [\n# 0,\n# 1,\n# 2,\n# 3,\n# 4,\n# 5,\n# 6,\n# 7,\n# 8,\n# 9,\n# \"(490 more items hidden)\"\n# ]\n# }\n","import java.util.ArrayList;\nimport java.util.Arrays;\nimport java.util.List;\nimport java.util.Map;\nimport java.util.Random;\n\nimport com.alibaba.fastjson.JSONObject;\n\nimport io.milvus.v2.client.ConnectConfig;\nimport io.milvus.v2.client.MilvusClientV2;\nimport io.milvus.v2.service.collection.request.CreateCollectionReq;\nimport io.milvus.v2.service.collection.request.GetLoadStateReq;\nimport io.milvus.v2.service.vector.request.InsertReq;\nimport io.milvus.v2.service.vector.response.InsertResp; \n\nString CLUSTER_ENDPOINT = \"http://localhost:19530\";\n\n// 1. Connect to Milvus server\nConnectConfig connectConfig = ConnectConfig.builder()\n .uri(CLUSTER_ENDPOINT)\n .build();\n\nMilvusClientV2 client = new MilvusClientV2(connectConfig); \n\n// 2. Create a collection in quick setup mode\nCreateCollectionReq quickSetupReq = CreateCollectionReq.builder()\n .collectionName(\"quick_setup\")\n .dimension(5)\n .metricType(\"IP\")\n .build();\n\nclient.createCollection(quickSetupReq);\n\nGetLoadStateReq loadStateReq = GetLoadStateReq.builder()\n .collectionName(\"quick_setup\")\n .build();\n\nboolean state = client.getLoadState(loadStateReq);\n\nSystem.out.println(state);\n\n// Output:\n// true\n\n// 3. Insert randomly generated vectors into the collection\nList colors = Arrays.asList(\"green\", \"blue\", \"yellow\", \"red\", \"black\", \"white\", \"purple\", \"pink\", \"orange\", \"brown\", \"grey\");\nList data = new ArrayList<>();\n\nfor (int i=0; i<1000; i++) {\n Random rand = new Random();\n String current_color = colors.get(rand.nextInt(colors.size()-1));\n JSONObject row = new JSONObject();\n row.put(\"id\", Long.valueOf(i));\n row.put(\"vector\", Arrays.asList(rand.nextFloat(), rand.nextFloat(), rand.nextFloat(), rand.nextFloat(), rand.nextFloat()));\n row.put(\"color_tag\", current_color + \"_\" + String.valueOf(rand.nextInt(8999) + 1000));\n data.add(row);\n}\n\nInsertReq insertReq = InsertReq.builder()\n .collectionName(\"quick_setup\")\n .data(data)\n .build();\n\nInsertResp insertResp = client.insert(insertReq);\n\nSystem.out.println(JSONObject.toJSON(insertResp));\n\n// Output:\n// {\"insertCnt\": 1000}\n\n// 6.1. Create a partition\nCreatePartitionReq partitionReq = CreatePartitionReq.builder()\n .collectionName(\"quick_setup\")\n .partitionName(\"red\")\n .build();\n\nclient.createPartition(partitionReq);\n\npartitionReq = CreatePartitionReq.builder()\n .collectionName(\"quick_setup\")\n .partitionName(\"blue\")\n .build();\n\nclient.createPartition(partitionReq);\n\n// 6.2 Insert data into the partition\ndata = new ArrayList<>();\n\nfor (int i=1000; i<1500; i++) {\n Random rand = new Random();\n String current_color = \"red\";\n JSONObject row = new JSONObject();\n row.put(\"id\", Long.valueOf(i));\n row.put(\"vector\", Arrays.asList(rand.nextFloat(), rand.nextFloat(), rand.nextFloat(), rand.nextFloat(), rand.nextFloat()));\n row.put(\"color\", current_color);\n row.put(\"color_tag\", current_color + \"_\" + String.valueOf(rand.nextInt(8999) + 1000));\n data.add(row);\n} \n\ninsertReq = InsertReq.builder()\n .collectionName(\"quick_setup\")\n .data(data)\n .partitionName(\"red\")\n .build();\n\ninsertResp = client.insert(insertReq);\n\nSystem.out.println(JSONObject.toJSON(insertResp));\n\n// Output:\n// {\"insertCnt\": 500}\n\ndata = new ArrayList<>();\n\nfor (int i=1500; i<2000; i++) {\n Random rand = new Random();\n String current_color = \"blue\";\n JSONObject row = new JSONObject();\n row.put(\"id\", Long.valueOf(i));\n row.put(\"vector\", Arrays.asList(rand.nextFloat(), rand.nextFloat(), rand.nextFloat(), rand.nextFloat(), rand.nextFloat()));\n row.put(\"color\", current_color);\n row.put(\"color_tag\", current_color + \"_\" + String.valueOf(rand.nextInt(8999) + 1000));\n data.add(row);\n}\n\ninsertReq = InsertReq.builder()\n .collectionName(\"quick_setup\")\n .data(data)\n .partitionName(\"blue\")\n .build();\n\ninsertResp = client.insert(insertReq);\n\nSystem.out.println(JSONObject.toJSON(insertResp));\n\n// Output:\n// {\"insertCnt\": 500}\n","const { MilvusClient, DataType, sleep } = require(\"@zilliz/milvus2-sdk-node\")\n\nconst address = \"http://localhost:19530\"\n\n// 1. Set up a Milvus Client\nclient = new MilvusClient({address});\n\n// 2. Create a collection in quick setup mode\nawait client.createCollection({\n collection_name: \"quick_setup\",\n dimension: 5,\n metric_type: \"IP\"\n}); \n\n// 3. Insert randomly generated vectors\nconst colors = [\"green\", \"blue\", \"yellow\", \"red\", \"black\", \"white\", \"purple\", \"pink\", \"orange\", \"brown\", \"grey\"]\ndata = []\n\nfor (let i = 0; i < 1000; i++) {\n current_color = colors[Math.floor(Math.random() * colors.length)]\n data.push({\n id: i,\n vector: [Math.random(), Math.random(), Math.random(), Math.random(), Math.random()],\n color: current_color,\n color_tag: `${current_color}_${Math.floor(Math.random() * 8999) + 1000}`\n })\n}\n\nvar res = await client.insert({\n collection_name: \"quick_setup\",\n data: data\n})\n\nconsole.log(res.insert_cnt)\n\n// Output\n// \n// 1000\n// \n\nawait client.createPartition({\n collection_name: \"quick_setup\",\n partition_name: \"red\"\n})\n\nawait client.createPartition({\n collection_name: \"quick_setup\",\n partition_name: \"blue\"\n})\n\n// 6.1 Insert data into partitions\nvar red_data = []\nvar blue_data = []\n\nfor (let i = 1000; i < 1500; i++) {\n red_data.push({\n id: i,\n vector: [Math.random(), Math.random(), Math.random(), Math.random(), Math.random()],\n color: \"red\",\n color_tag: `red_${Math.floor(Math.random() * 8999) + 1000}`\n })\n}\n\nfor (let i = 1500; i < 2000; i++) {\n blue_data.push({\n id: i,\n vector: [Math.random(), Math.random(), Math.random(), Math.random(), Math.random()],\n color: \"blue\",\n color_tag: `blue_${Math.floor(Math.random() * 8999) + 1000}`\n })\n}\n\nres = await client.insert({\n collection_name: \"quick_setup\",\n data: red_data,\n partition_name: \"red\"\n})\n\nconsole.log(res.insert_cnt)\n\n// Output\n// \n// 500\n// \n\nres = await client.insert({\n collection_name: \"quick_setup\",\n data: blue_data,\n partition_name: \"blue\"\n})\n\nconsole.log(res.insert_cnt)\n\n// Output\n// \n// 500\n// \n","# Single vector search\nres = client.search(\n collection_name=\"test_collection\", # Replace with the actual name of your collection\n # Replace with your query vector\n data=[[0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592]],\n limit=5, # Max. number of search results to return\n search_params={\"metric_type\": \"IP\", \"params\": {}} # Search parameters\n)\n\n# Convert the output to a formatted JSON string\nresult = json.dumps(res, indent=4)\nprint(result)\n","// 4. Single vector search\nList> query_vectors = Arrays.asList(Arrays.asList(0.3580376395471989f, -0.6023495712049978f, 0.18414012509913835f, -0.26286205330961354f, 0.9029438446296592f));\n\nSearchReq searchReq = SearchReq.builder()\n .collectionName(\"quick_setup\")\n .data(query_vectors)\n .topK(3) // The number of results to return\n .build();\n\nSearchResp searchResp = client.search(searchReq);\n\nSystem.out.println(JSONObject.toJSON(searchResp));\n","// 4. Single vector search\nvar query_vector = [0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592],\n\nres = await client.search({\n collection_name: \"quick_setup\",\n data: [query_vector],\n limit: 3, // The number of results to return\n})\n\nconsole.log(res.results)\n","[\n [\n {\n \"id\": 0,\n \"distance\": 1.4093276262283325,\n \"entity\": {}\n },\n {\n \"id\": 4,\n \"distance\": 0.9902134537696838,\n \"entity\": {}\n },\n {\n \"id\": 1,\n \"distance\": 0.8519943356513977,\n \"entity\": {}\n },\n {\n \"id\": 5,\n \"distance\": 0.7972343564033508,\n \"entity\": {}\n },\n {\n \"id\": 2,\n \"distance\": 0.5928734540939331,\n \"entity\": {}\n }\n ]\n]\n","{\"searchResults\": [[\n {\n \"score\": 1.263043,\n \"fields\": {\n \"vector\": [\n 0.9533119,\n 0.02538395,\n 0.76714665,\n 0.35481733,\n 0.9845762\n ],\n \"id\": 740\n }\n },\n {\n \"score\": 1.2377806,\n \"fields\": {\n \"vector\": [\n 0.7411156,\n 0.08687937,\n 0.8254139,\n 0.08370924,\n 0.99095553\n ],\n \"id\": 640\n }\n },\n {\n \"score\": 1.1869997,\n \"fields\": {\n \"vector\": [\n 0.87928146,\n 0.05324632,\n 0.6312755,\n 0.28005534,\n 0.9542448\n ],\n \"id\": 455\n }\n }\n]]}\n","[\n { score: 1.7463608980178833, id: '854' },\n { score: 1.744946002960205, id: '425' },\n { score: 1.7258622646331787, id: '718' }\n]\n","# Bulk-vector search\nres = client.search(\n collection_name=\"test_collection\", # Replace with the actual name of your collection\n data=[\n [0.19886812562848388, 0.06023560599112088, 0.6976963061752597, 0.2614474506242501, 0.838729485096104],\n [0.3172005263489739, 0.9719044792798428, -0.36981146090600725, -0.4860894583077995, 0.95791889146345]\n ], # Replace with your query vectors\n limit=2, # Max. number of search results to return\n search_params={\"metric_type\": \"IP\", \"params\": {}} # Search parameters\n)\n\nresult = json.dumps(res, indent=4)\nprint(result)\n","// 5. Batch vector search\nquery_vectors = Arrays.asList(\n Arrays.asList(0.3580376395471989f, -0.6023495712049978f, 0.18414012509913835f, -0.26286205330961354f, 0.9029438446296592f),\n Arrays.asList(0.19886812562848388f, 0.06023560599112088f, 0.6976963061752597f, 0.2614474506242501f, 0.838729485096104f)\n);\n\nsearchReq = SearchReq.builder()\n .collectionName(\"quick_setup\")\n .data(query_vectors)\n .topK(2)\n .build();\n\nsearchResp = client.search(searchReq);\n\nSystem.out.println(JSONObject.toJSON(searchResp));\n","// 5. Batch vector search\nvar query_vectors = [\n [0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592],\n [0.19886812562848388, 0.06023560599112088, 0.6976963061752597, 0.2614474506242501, 0.838729485096104]\n]\n\nres = await client.search({\n collection_name: \"quick_setup\",\n data: query_vectors,\n limit: 2,\n})\n\nconsole.log(res.results)\n","[\n [\n {\n \"id\": 1,\n \"distance\": 1.3017789125442505,\n \"entity\": {}\n },\n {\n \"id\": 7,\n \"distance\": 1.2419954538345337,\n \"entity\": {}\n }\n ], # Result set 1\n [\n {\n \"id\": 3,\n \"distance\": 2.3358664512634277,\n \"entity\": {}\n },\n {\n \"id\": 8,\n \"distance\": 0.5642921924591064,\n \"entity\": {}\n }\n ] # Result set 2\n]\n","// Two sets of vectors are returned as expected\n\n{\"searchResults\": [\n [\n {\n \"score\": 1.263043,\n \"fields\": {\n \"vector\": [\n 0.9533119,\n 0.02538395,\n 0.76714665,\n 0.35481733,\n 0.9845762\n ],\n \"id\": 740\n }\n },\n {\n \"score\": 1.2377806,\n \"fields\": {\n \"vector\": [\n 0.7411156,\n 0.08687937,\n 0.8254139,\n 0.08370924,\n 0.99095553\n ],\n \"id\": 640\n }\n }\n ],\n [\n {\n \"score\": 1.8654699,\n \"fields\": {\n \"vector\": [\n 0.4671427,\n 0.8378432,\n 0.98844475,\n 0.82763994,\n 0.9729997\n ],\n \"id\": 638\n }\n },\n {\n \"score\": 1.8581753,\n \"fields\": {\n \"vector\": [\n 0.735541,\n 0.60140246,\n 0.86730254,\n 0.93152493,\n 0.98603314\n ],\n \"id\": 855\n }\n }\n ]\n]}\n","[\n [\n { score: 2.3590476512908936, id: '854' },\n { score: 2.2896690368652344, id: '59' }\n [\n { score: 2.664059638977051, id: '59' },\n { score: 2.59483003616333, id: '854' }\n ]\n]\n","# 6.2 Search within a partition\nquery_vector = [0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592]\n\nres = client.search(\n collection_name=\"quick_setup\",\n data=[query_vector],\n limit=5,\n search_params={\"metric_type\": \"IP\", \"params\": {\"level\": 1}},\n partition_names=[\"red\"]\n)\n\nprint(res)\n","// 6.3 Search within partitions\nquery_vectors = Arrays.asList(Arrays.asList(0.3580376395471989f, -0.6023495712049978f, 0.18414012509913835f, -0.26286205330961354f, 0.9029438446296592f));\n\nsearchReq = SearchReq.builder()\n .collectionName(\"quick_setup\")\n .data(query_vectors)\n .partitionNames(Arrays.asList(\"red\"))\n .topK(5)\n .build();\n\nsearchResp = client.search(searchReq);\n\nSystem.out.println(JSONObject.toJSON(searchResp));\n","// 6.2 Search within partitions\nquery_vector = [0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592]\n\nres = await client.search({\n collection_name: \"quick_setup\",\n data: [query_vector],\n partition_names: [\"red\"],\n limit: 5,\n})\n\nconsole.log(res.results)\n","[\n [\n {\n \"id\": 16,\n \"distance\": 0.9200337529182434,\n \"entity\": {}\n },\n {\n \"id\": 14,\n \"distance\": 0.4505271911621094,\n \"entity\": {}\n },\n {\n \"id\": 15,\n \"distance\": 0.19924677908420563,\n \"entity\": {}\n },\n {\n \"id\": 17,\n \"distance\": 0.0075093843042850494,\n \"entity\": {}\n },\n {\n \"id\": 13,\n \"distance\": -0.14609718322753906,\n \"entity\": {}\n }\n ]\n]\n","{\"searchResults\": [\n [\n {\n \"score\": 1.1677284,\n \"fields\": {\n \"vector\": [\n 0.9986977,\n 0.17964739,\n 0.49086612,\n 0.23155272,\n 0.98438674\n ],\n \"id\": 1435\n }\n },\n {\n \"score\": 1.1476475,\n \"fields\": {\n \"vector\": [\n 0.6952647,\n 0.13417172,\n 0.91045254,\n 0.119336545,\n 0.9338931\n ],\n \"id\": 1291\n }\n },\n {\n \"score\": 1.0969629,\n \"fields\": {\n \"vector\": [\n 0.3363194,\n 0.028906643,\n 0.6675426,\n 0.030419827,\n 0.9735209\n ],\n \"id\": 1168\n }\n },\n {\n \"score\": 1.0741848,\n \"fields\": {\n \"vector\": [\n 0.9980543,\n 0.36063594,\n 0.66427994,\n 0.17359233,\n 0.94954175\n ],\n \"id\": 1164\n }\n },\n {\n \"score\": 1.0584627,\n \"fields\": {\n \"vector\": [\n 0.7187005,\n 0.12674773,\n 0.987718,\n 0.3110777,\n 0.86093885\n ],\n \"id\": 1085\n }\n }\n ],\n [\n {\n \"score\": 1.8030131,\n \"fields\": {\n \"vector\": [\n 0.59726167,\n 0.7054632,\n 0.9573117,\n 0.94529945,\n 0.8664103\n ],\n \"id\": 1203\n }\n },\n {\n \"score\": 1.7728865,\n \"fields\": {\n \"vector\": [\n 0.6672442,\n 0.60448086,\n 0.9325822,\n 0.80272985,\n 0.8861626\n ],\n \"id\": 1448\n }\n },\n {\n \"score\": 1.7536311,\n \"fields\": {\n \"vector\": [\n 0.59663296,\n 0.77831805,\n 0.8578314,\n 0.88818026,\n 0.9030075\n ],\n \"id\": 1010\n }\n },\n {\n \"score\": 1.7520742,\n \"fields\": {\n \"vector\": [\n 0.854198,\n 0.72294194,\n 0.9245805,\n 0.86126596,\n 0.7969224\n ],\n \"id\": 1219\n }\n },\n {\n \"score\": 1.7452049,\n \"fields\": {\n \"vector\": [\n 0.96419,\n 0.943535,\n 0.87611496,\n 0.8268136,\n 0.79786557\n ],\n \"id\": 1149\n }\n }\n ]\n]}\n","[\n { score: 3.0258803367614746, id: '1201' },\n { score: 3.004319190979004, id: '1458' },\n { score: 2.880324363708496, id: '1187' },\n { score: 2.8246407508850098, id: '1347' },\n { score: 2.797295093536377, id: '1406' }\n]\n","res = client.search(\n collection_name=\"quick_setup\",\n data=[query_vector],\n limit=5,\n search_params={\"metric_type\": \"IP\", \"params\": {\"level\": 1}},\n partition_names=[\"blue\"]\n)\n\nprint(res)\n","searchReq = SearchReq.builder()\n .collectionName(\"quick_setup\")\n .data(query_vectors)\n .partitionNames(Arrays.asList(\"blue\"))\n .topK(5)\n .build();\n\nsearchResp = client.search(searchReq);\n\nSystem.out.println(JSONObject.toJSON(searchResp));\n","res = await client.search({\n collection_name: \"quick_setup\",\n data: [query_vector],\n partition_names: [\"blue\"],\n limit: 5,\n})\n\nconsole.log(res.results)\n","[\n [\n {\n \"id\": 20,\n \"distance\": 2.363696813583374,\n \"entity\": {}\n },\n {\n \"id\": 26,\n \"distance\": 1.0665391683578491,\n \"entity\": {}\n },\n {\n \"id\": 23,\n \"distance\": 1.066049575805664,\n \"entity\": {}\n },\n {\n \"id\": 29,\n \"distance\": 0.8353596925735474,\n \"entity\": {}\n },\n {\n \"id\": 28,\n \"distance\": 0.7484277486801147,\n \"entity\": {}\n }\n ]\n]\n","{\"searchResults\": [\n [\n {\n \"score\": 1.1628494,\n \"fields\": {\n \"vector\": [\n 0.7442872,\n 0.046407282,\n 0.71031404,\n 0.3544345,\n 0.9819991\n ],\n \"id\": 1992\n }\n },\n {\n \"score\": 1.1470042,\n \"fields\": {\n \"vector\": [\n 0.5505825,\n 0.04367262,\n 0.9985836,\n 0.18922359,\n 0.93255126\n ],\n \"id\": 1977\n }\n },\n {\n \"score\": 1.1450152,\n \"fields\": {\n \"vector\": [\n 0.89994013,\n 0.052991092,\n 0.8645576,\n 0.6406729,\n 0.95679337\n ],\n \"id\": 1573\n }\n },\n {\n \"score\": 1.1439825,\n \"fields\": {\n \"vector\": [\n 0.9253267,\n 0.15890503,\n 0.7999555,\n 0.19126713,\n 0.898583\n ],\n \"id\": 1552\n }\n },\n {\n \"score\": 1.1029172,\n \"fields\": {\n \"vector\": [\n 0.95661926,\n 0.18777144,\n 0.38115507,\n 0.14323527,\n 0.93137646\n ],\n \"id\": 1823\n }\n }\n ],\n [\n {\n \"score\": 1.8005109,\n \"fields\": {\n \"vector\": [\n 0.5953582,\n 0.7794224,\n 0.9388869,\n 0.79825854,\n 0.9197286\n ],\n \"id\": 1888\n }\n },\n {\n \"score\": 1.7714822,\n \"fields\": {\n \"vector\": [\n 0.56805456,\n 0.89422905,\n 0.88187534,\n 0.914824,\n 0.8944365\n ],\n \"id\": 1648\n }\n },\n {\n \"score\": 1.7561421,\n \"fields\": {\n \"vector\": [\n 0.83421993,\n 0.39865613,\n 0.92319834,\n 0.42695504,\n 0.96633124\n ],\n \"id\": 1688\n }\n },\n {\n \"score\": 1.7553532,\n \"fields\": {\n \"vector\": [\n 0.89994013,\n 0.052991092,\n 0.8645576,\n 0.6406729,\n 0.95679337\n ],\n \"id\": 1573\n }\n },\n {\n \"score\": 1.7543385,\n \"fields\": {\n \"vector\": [\n 0.16542226,\n 0.38248396,\n 0.9888778,\n 0.80913955,\n 0.9501492\n ],\n \"id\": 1544\n }\n }\n ]\n]}\n","[\n { score: 2.8421106338500977, id: '1745' },\n { score: 2.838560104370117, id: '1782' },\n { score: 2.8134000301361084, id: '1511' },\n { score: 2.718268871307373, id: '1679' },\n { score: 2.7014894485473633, id: '1597' }\n]\n","# Search with output fields\nres = client.search(\n collection_name=\"test_collection\", # Replace with the actual name of your collection\n data=[[0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592]],\n limit=5, # Max. number of search results to return\n search_params={\"metric_type\": \"IP\", \"params\": {}}, # Search parameters\n output_fields=[\"color\"] # Output fields to return\n)\n\nresult = json.dumps(res, indent=4)\nprint(result)\n","// 7. Search with output fields\nquery_vectors = Arrays.asList(Arrays.asList(0.3580376395471989f, -0.6023495712049978f, 0.18414012509913835f, -0.26286205330961354f, 0.9029438446296592f));\n\nsearchReq = SearchReq.builder()\n .collectionName(\"quick_setup\")\n .data(query_vectors)\n .outputFields(Arrays.asList(\"color\"))\n .topK(5)\n .build();\n\nsearchResp = client.search(searchReq);\n\nSystem.out.println(JSONObject.toJSON(searchResp));\n","// 7. Search with output fields\nquery_vector = [0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592]\n\nres = await client.search({\n collection_name: \"quick_setup\",\n data: [query_vector],\n limit: 5,\n output_fields: [\"color\"],\n})\n\nconsole.log(res.results)\n","[\n [\n {\n \"id\": 0,\n \"distance\": 1.4093276262283325,\n \"entity\": {\n \"color\": \"pink_8682\"\n }\n },\n {\n \"id\": 16,\n \"distance\": 1.0159327983856201,\n \"entity\": {\n \"color\": \"yellow_1496\"\n }\n },\n {\n \"id\": 4,\n \"distance\": 0.9902134537696838,\n \"entity\": {\n \"color\": \"red_4794\"\n }\n },\n {\n \"id\": 14,\n \"distance\": 0.9803846478462219,\n \"entity\": {\n \"color\": \"green_2899\"\n }\n },\n {\n \"id\": 1,\n \"distance\": 0.8519943356513977,\n \"entity\": {\n \"color\": \"red_7025\"\n }\n }\n ]\n]\n","{\"searchResults\": [\n [\n {\n \"score\": 1.263043,\n \"fields\": {}\n },\n {\n \"score\": 1.2377806,\n \"fields\": {}\n },\n {\n \"score\": 1.1869997,\n \"fields\": {}\n },\n {\n \"score\": 1.1748955,\n \"fields\": {}\n },\n {\n \"score\": 1.1720343,\n \"fields\": {}\n }\n ]\n]}\n","\n[\n { score: 3.036271572113037, id: '59', color: 'orange' },\n { score: 3.0267879962921143, id: '1745', color: 'blue' },\n { score: 3.0069446563720703, id: '854', color: 'black' },\n { score: 2.984386682510376, id: '718', color: 'black' },\n { score: 2.916019916534424, id: '425', color: 'purple' }\n]\n","# Search with filter\nres = client.search(\n collection_name=\"test_collection\", # Replace with the actual name of your collection\n data=[[0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592]],\n limit=5, # Max. number of search results to return\n search_params={\"metric_type\": \"IP\", \"params\": {}}, # Search parameters\n output_fields=[\"color\"], # Output fields to return\n filter='color like \"red%\"'\n)\n\nresult = json.dumps(res, indent=4)\nprint(result)\n","// 8. Filtered search\nquery_vectors = Arrays.asList(Arrays.asList(0.3580376395471989f, -0.6023495712049978f, 0.18414012509913835f, -0.26286205330961354f, 0.9029438446296592f));\n\nsearchReq = SearchReq.builder()\n .collectionName(\"quick_setup\")\n .data(query_vectors)\n .outputFields(Arrays.asList(\"color_tag\"))\n .filter(\"color_tag like \\\"red%\\\"\")\n .topK(5)\n .build();\n\nsearchResp = client.search(searchReq);\n\nSystem.out.println(JSONObject.toJSON(searchResp));\n","// 8. Filtered search\n// 8.1 Filter with \"like\" operator and prefix wildcard\nquery_vector = [0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592]\n\nres = await client.search({\n collection_name: \"quick_setup\",\n data: [query_vector],\n limit: 5,\n filters: \"color_tag like \\\"red%\\\"\",\n output_fields: [\"color_tag\"]\n})\n\nconsole.log(res.results)\n","[\n [\n {\n \"id\": 4,\n \"distance\": 0.9902134537696838,\n \"entity\": {\n \"color\": \"red_4794\"\n }\n },\n {\n \"id\": 1,\n \"distance\": 0.8519943356513977,\n \"entity\": {\n \"color\": \"red_7025\"\n }\n },\n {\n \"id\": 6,\n \"distance\": -0.4113418459892273,\n \"entity\": {\n \"color\": \"red_9392\"\n }\n }\n ]\n]\n","{\"searchResults\": [\n [\n {\n \"score\": 1.1869997,\n \"fields\": {\"color_tag\": \"red_3026\"}\n },\n {\n \"score\": 1.1677284,\n \"fields\": {\"color_tag\": \"red_9030\"}\n },\n {\n \"score\": 1.1476475,\n \"fields\": {\"color_tag\": \"red_3744\"}\n },\n {\n \"score\": 1.0969629,\n \"fields\": {\"color_tag\": \"red_4168\"}\n },\n {\n \"score\": 1.0741848,\n \"fields\": {\"color_tag\": \"red_9678\"}\n }\n ]\n]}\n","[\n { score: 2.5080761909484863, id: '1201', color_tag: 'red_8904' },\n { score: 2.491129159927368, id: '425', color_tag: 'purple_8212' },\n { score: 2.4889798164367676, id: '1458', color_tag: 'red_6891' },\n { score: 2.42964243888855, id: '724', color_tag: 'black_9885' },\n { score: 2.4004223346710205, id: '854', color_tag: 'black_5990' }\n]\n","# Infix match on color field\nres = client.search(\n collection_name=\"test_collection\", # Replace with the actual name of your collection\n data=[[0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592]],\n limit=5, # Max. number of search results to return\n search_params={\"metric_type\": \"IP\", \"params\": {}}, # Search parameters\n output_fields=[\"color\"], # Output fields to return\n filter='color like \"%ll%\"' # Filter on color field, infix match on \"ll\"\n)\n\nresult = json.dumps(res, indent=4)\nprint(result)\n","// 8. Filtered search\nquery_vectors = Arrays.asList(Arrays.asList(0.3580376395471989f, -0.6023495712049978f, 0.18414012509913835f, -0.26286205330961354f, 0.9029438446296592f));\n\nsearchReq = SearchReq.builder()\n .collectionName(\"quick_setup\")\n .data(query_vectors)\n .outputFields(Arrays.asList(\"color_tag\"))\n .filter(\"color like \\\"%ll%\\\"\")\n .topK(5)\n .build();\n\nsearchResp = client.search(searchReq);\n\nSystem.out.println(JSONObject.toJSON(searchResp));\n","// 8. Filtered search\n// 8.1 Filter with \"like\" operator and prefix wildcard\nquery_vector = [0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592]\n\nres = await client.search({\n collection_name: \"quick_setup\",\n data: [query_vector],\n limit: 5,\n filters: \"color_tag like \\\"%ll%\\\"\",\n output_fields: [\"color_tag\"]\n})\n\nconsole.log(res.results)\n","[\n [\n {\n \"id\": 5,\n \"distance\": 0.7972343564033508,\n \"entity\": {\n \"color\": \"yellow_4222\"\n }\n }\n ]\n]\n","{\"searchResults\": [\n [\n {\n \"score\": 1.1869997,\n \"fields\": {\"color_tag\": \"yellow_4222\"}\n }\n ]\n]}\n","[\n { score: 2.5080761909484863, id: '1201', color_tag: 'yellow_4222' }\n]\n","# Conduct a range search\nsearch_params = {\n \"metric_type\": \"IP\",\n \"params\": {\n \"radius\": 0.8, # Radius of the search circle\n \"range_filter\": 1.0 # Range filter to filter out vectors that are not within the search circle\n }\n}\n\nres = client.search(\n collection_name=\"test_collection\", # Replace with the actual name of your collection\n data=[[0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592]],\n limit=3, # Max. number of search results to return\n search_params=search_params, # Search parameters\n output_fields=[\"color\"], # Output fields to return\n)\n\nresult = json.dumps(res, indent=4)\nprint(result)\n","// 9. Range search\nquery_vectors = Arrays.asList(Arrays.asList(0.3580376395471989f, -0.6023495712049978f, 0.18414012509913835f, -0.26286205330961354f, 0.9029438446296592f));\n\nsearchReq = SearchReq.builder()\n .collectionName(\"quick_setup\")\n .data(query_vectors)\n .outputFields(Arrays.asList(\"color_tag\"))\n .searchParams(Map.of(\"radius\", 0.1, \"range\", 1.0))\n .topK(5)\n .build();\n\nsearchResp = client.search(searchReq);\n\nSystem.out.println(JSONObject.toJSON(searchResp));\n","// 9. Range search\nquery_vector = [0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592]\n\nres = await client.search({\n collection_name: \"quick_setup\",\n data: [query_vector],\n limit: 5,\n params: {\n radius: 0.1,\n range: 1.0\n },\n output_fields: [\"color_tag\"]\n})\n\nconsole.log(res.results)\n","[\n [\n {\n \"id\": 4,\n \"distance\": 0.9902134537696838,\n \"entity\": {\n \"color\": \"red_4794\"\n }\n },\n {\n \"id\": 14,\n \"distance\": 0.9803846478462219,\n \"entity\": {\n \"color\": \"green_2899\"\n }\n },\n {\n \"id\": 1,\n \"distance\": 0.8519943356513977,\n \"entity\": {\n \"color\": \"red_7025\"\n }\n }\n ]\n]\n","{\"searchResults\": [\n [\n {\n \"score\": 1.263043,\n \"fields\": {\"color_tag\": \"green_2052\"}\n },\n {\n \"score\": 1.2377806,\n \"fields\": {\"color_tag\": \"purple_3709\"}\n },\n {\n \"score\": 1.1869997,\n \"fields\": {\"color_tag\": \"red_3026\"}\n },\n {\n \"score\": 1.1748955,\n \"fields\": {\"color_tag\": \"black_1646\"}\n },\n {\n \"score\": 1.1720343,\n \"fields\": {\"color_tag\": \"green_4853\"}\n }\n ]\n]}\n","[\n { score: 2.3387961387634277, id: '718', color_tag: 'black_7154' },\n { score: 2.3352415561676025, id: '1745', color_tag: 'blue_8741' },\n { score: 2.290485382080078, id: '1408', color_tag: 'red_2324' },\n { score: 2.285870313644409, id: '854', color_tag: 'black_5990' },\n { score: 2.2593345642089844, id: '1309', color_tag: 'red_8458' }\n]\n","# Connect to Milvus\nclient = MilvusClient(uri='http://localhost:19530') # Milvus server address\n\n# Load data into collection\nclient.load_collection(\"group_search\") # Collection name\n\n# Group search results\nres = client.search(\n collection_name=\"group_search\", # Collection name\n data=[[0.14529211512077012, 0.9147257273453546, 0.7965055218724449, 0.7009258593102812, 0.5605206522382088]], # Query vector\n search_params={\n \"metric_type\": \"L2\",\n \"params\": {\"nprobe\": 10},\n }, # Search parameters\n limit=10, # Max. number of search results to return\n group_by_field=\"doc_id\", # Group results by document ID\n output_fields=[\"doc_id\", \"passage_id\"]\n)\n\n# Retrieve the values in the `doc_id` column\ndoc_ids = [result['entity']['doc_id'] for result in res[0]]\n\nprint(doc_ids)\n","[5, 10, 1, 7, 9, 6, 3, 4, 8, 2]\n","# Connect to Milvus\nclient = MilvusClient(uri='http://localhost:19530') # Milvus server address\n\n# Load data into collection\nclient.load_collection(\"group_search\") # Collection name\n\n# Search without `group_by_field`\nres = client.search(\n collection_name=\"group_search\", # Collection name\n data=query_passage_vector, # Replace with your query vector\n search_params={\n \"metric_type\": \"L2\",\n \"params\": {\"nprobe\": 10},\n }, # Search parameters\n limit=10, # Max. number of search results to return\n # group_by_field=\"doc_id\", # Group results by document ID\n output_fields=[\"doc_id\", \"passage_id\"]\n)\n\n# Retrieve the values in the `doc_id` column\ndoc_ids = [result['entity']['doc_id'] for result in res[0]]\n\nprint(doc_ids)\n","[1, 10, 3, 10, 1, 9, 4, 4, 8, 6]\n","# In normal cases, you do not need to set search parameters manually\n# Except for range searches.\nsearch_parameters = {\n 'metric_type': 'L2',\n 'params': {\n 'nprobe': 10,\n 'level': 1,\n 'radius': 1.0\n 'range_filter': 0.8\n }\n}\n"],"headingContent":"","anchorList":[{"label":"Pesquisa num único vetor","href":"Single-Vector-Search","type":1,"isActive":false},{"label":"Visão geral","href":"Overview","type":2,"isActive":false},{"label":"Preparações","href":"Preparations","type":2,"isActive":false},{"label":"Pesquisa básica","href":"Basic-search","type":2,"isActive":false},{"label":"Pesquisa filtrada","href":"Filtered-search","type":2,"isActive":false},{"label":"Pesquisa de intervalo","href":"Range-search","type":2,"isActive":false},{"label":"Pesquisa de agrupamento","href":"Grouping-search","type":2,"isActive":false},{"label":"Parâmetros de pesquisa","href":"Search-parameters","type":2,"isActive":false}]} \ No newline at end of file +{"codeList":["# 1. Set up a Milvus client\nclient = MilvusClient(\n uri=CLUSTER_ENDPOINT,\n token=TOKEN \n)\n\n# 2. Create a collection\nclient.create_collection(\n collection_name=\"quick_setup\",\n dimension=5,\n metric_type=\"IP\"\n)\n\n# 3. Insert randomly generated vectors \ncolors = [\"green\", \"blue\", \"yellow\", \"red\", \"black\", \"white\", \"purple\", \"pink\", \"orange\", \"brown\", \"grey\"]\ndata = []\n\nfor i in range(1000):\n current_color = random.choice(colors)\n data.append({\n \"id\": i,\n \"vector\": [ random.uniform(-1, 1) for _ in range(5) ],\n \"color\": current_color,\n \"color_tag\": f\"{current_color}_{str(random.randint(1000, 9999))}\"\n })\n\nres = client.insert(\n collection_name=\"quick_setup\",\n data=data\n)\n\nprint(res)\n\n# Output\n#\n# {\n# \"insert_count\": 1000,\n# \"ids\": [\n# 0,\n# 1,\n# 2,\n# 3,\n# 4,\n# 5,\n# 6,\n# 7,\n# 8,\n# 9,\n# \"(990 more items hidden)\"\n# ]\n# }\n\n# 6.1 Create partitions \nclient.create_partition(\n collection_name=\"quick_setup\",\n partition_name=\"red\"\n)\n\nclient.create_partition(\n collection_name=\"quick_setup\",\n partition_name=\"blue\"\n)\n\n# 6.1 Insert data into partitions\nred_data = [ {\"id\": i, \"vector\": [ random.uniform(-1, 1) for _ in range(5) ], \"color\": \"red\", \"color_tag\": f\"red_{str(random.randint(1000, 9999))}\" } for i in range(500) ]\nblue_data = [ {\"id\": i, \"vector\": [ random.uniform(-1, 1) for _ in range(5) ], \"color\": \"blue\", \"color_tag\": f\"blue_{str(random.randint(1000, 9999))}\" } for i in range(500) ]\n\nres = client.insert(\n collection_name=\"quick_setup\",\n data=red_data,\n partition_name=\"red\"\n)\n\nprint(res)\n\n# Output\n#\n# {\n# \"insert_count\": 500,\n# \"ids\": [\n# 0,\n# 1,\n# 2,\n# 3,\n# 4,\n# 5,\n# 6,\n# 7,\n# 8,\n# 9,\n# \"(490 more items hidden)\"\n# ]\n# }\n\nres = client.insert(\n collection_name=\"quick_setup\",\n data=blue_data,\n partition_name=\"blue\"\n)\n\nprint(res)\n\n# Output\n#\n# {\n# \"insert_count\": 500,\n# \"ids\": [\n# 0,\n# 1,\n# 2,\n# 3,\n# 4,\n# 5,\n# 6,\n# 7,\n# 8,\n# 9,\n# \"(490 more items hidden)\"\n# ]\n# }\n","import java.util.ArrayList;\nimport java.util.Arrays;\nimport java.util.List;\nimport java.util.Map;\nimport java.util.Random;\n\nimport com.alibaba.fastjson.JSONObject;\n\nimport io.milvus.v2.client.ConnectConfig;\nimport io.milvus.v2.client.MilvusClientV2;\nimport io.milvus.v2.service.collection.request.CreateCollectionReq;\nimport io.milvus.v2.service.collection.request.GetLoadStateReq;\nimport io.milvus.v2.service.vector.request.InsertReq;\nimport io.milvus.v2.service.vector.response.InsertResp; \n\nString CLUSTER_ENDPOINT = \"http://localhost:19530\";\n\n// 1. Connect to Milvus server\nConnectConfig connectConfig = ConnectConfig.builder()\n .uri(CLUSTER_ENDPOINT)\n .build();\n\nMilvusClientV2 client = new MilvusClientV2(connectConfig); \n\n// 2. Create a collection in quick setup mode\nCreateCollectionReq quickSetupReq = CreateCollectionReq.builder()\n .collectionName(\"quick_setup\")\n .dimension(5)\n .metricType(\"IP\")\n .build();\n\nclient.createCollection(quickSetupReq);\n\nGetLoadStateReq loadStateReq = GetLoadStateReq.builder()\n .collectionName(\"quick_setup\")\n .build();\n\nboolean state = client.getLoadState(loadStateReq);\n\nSystem.out.println(state);\n\n// Output:\n// true\n\n// 3. Insert randomly generated vectors into the collection\nList colors = Arrays.asList(\"green\", \"blue\", \"yellow\", \"red\", \"black\", \"white\", \"purple\", \"pink\", \"orange\", \"brown\", \"grey\");\nList data = new ArrayList<>();\n\nfor (int i=0; i<1000; i++) {\n Random rand = new Random();\n String current_color = colors.get(rand.nextInt(colors.size()-1));\n JSONObject row = new JSONObject();\n row.put(\"id\", Long.valueOf(i));\n row.put(\"vector\", Arrays.asList(rand.nextFloat(), rand.nextFloat(), rand.nextFloat(), rand.nextFloat(), rand.nextFloat()));\n row.put(\"color_tag\", current_color + \"_\" + String.valueOf(rand.nextInt(8999) + 1000));\n data.add(row);\n}\n\nInsertReq insertReq = InsertReq.builder()\n .collectionName(\"quick_setup\")\n .data(data)\n .build();\n\nInsertResp insertResp = client.insert(insertReq);\n\nSystem.out.println(JSONObject.toJSON(insertResp));\n\n// Output:\n// {\"insertCnt\": 1000}\n\n// 6.1. Create a partition\nCreatePartitionReq partitionReq = CreatePartitionReq.builder()\n .collectionName(\"quick_setup\")\n .partitionName(\"red\")\n .build();\n\nclient.createPartition(partitionReq);\n\npartitionReq = CreatePartitionReq.builder()\n .collectionName(\"quick_setup\")\n .partitionName(\"blue\")\n .build();\n\nclient.createPartition(partitionReq);\n\n// 6.2 Insert data into the partition\ndata = new ArrayList<>();\n\nfor (int i=1000; i<1500; i++) {\n Random rand = new Random();\n String current_color = \"red\";\n JSONObject row = new JSONObject();\n row.put(\"id\", Long.valueOf(i));\n row.put(\"vector\", Arrays.asList(rand.nextFloat(), rand.nextFloat(), rand.nextFloat(), rand.nextFloat(), rand.nextFloat()));\n row.put(\"color\", current_color);\n row.put(\"color_tag\", current_color + \"_\" + String.valueOf(rand.nextInt(8999) + 1000));\n data.add(row);\n} \n\ninsertReq = InsertReq.builder()\n .collectionName(\"quick_setup\")\n .data(data)\n .partitionName(\"red\")\n .build();\n\ninsertResp = client.insert(insertReq);\n\nSystem.out.println(JSONObject.toJSON(insertResp));\n\n// Output:\n// {\"insertCnt\": 500}\n\ndata = new ArrayList<>();\n\nfor (int i=1500; i<2000; i++) {\n Random rand = new Random();\n String current_color = \"blue\";\n JSONObject row = new JSONObject();\n row.put(\"id\", Long.valueOf(i));\n row.put(\"vector\", Arrays.asList(rand.nextFloat(), rand.nextFloat(), rand.nextFloat(), rand.nextFloat(), rand.nextFloat()));\n row.put(\"color\", current_color);\n row.put(\"color_tag\", current_color + \"_\" + String.valueOf(rand.nextInt(8999) + 1000));\n data.add(row);\n}\n\ninsertReq = InsertReq.builder()\n .collectionName(\"quick_setup\")\n .data(data)\n .partitionName(\"blue\")\n .build();\n\ninsertResp = client.insert(insertReq);\n\nSystem.out.println(JSONObject.toJSON(insertResp));\n\n// Output:\n// {\"insertCnt\": 500}\n","const { MilvusClient, DataType, sleep } = require(\"@zilliz/milvus2-sdk-node\")\n\nconst address = \"http://localhost:19530\"\n\n// 1. Set up a Milvus Client\nclient = new MilvusClient({address});\n\n// 2. Create a collection in quick setup mode\nawait client.createCollection({\n collection_name: \"quick_setup\",\n dimension: 5,\n metric_type: \"IP\"\n}); \n\n// 3. Insert randomly generated vectors\nconst colors = [\"green\", \"blue\", \"yellow\", \"red\", \"black\", \"white\", \"purple\", \"pink\", \"orange\", \"brown\", \"grey\"]\ndata = []\n\nfor (let i = 0; i < 1000; i++) {\n current_color = colors[Math.floor(Math.random() * colors.length)]\n data.push({\n id: i,\n vector: [Math.random(), Math.random(), Math.random(), Math.random(), Math.random()],\n color: current_color,\n color_tag: `${current_color}_${Math.floor(Math.random() * 8999) + 1000}`\n })\n}\n\nvar res = await client.insert({\n collection_name: \"quick_setup\",\n data: data\n})\n\nconsole.log(res.insert_cnt)\n\n// Output\n// \n// 1000\n// \n\nawait client.createPartition({\n collection_name: \"quick_setup\",\n partition_name: \"red\"\n})\n\nawait client.createPartition({\n collection_name: \"quick_setup\",\n partition_name: \"blue\"\n})\n\n// 6.1 Insert data into partitions\nvar red_data = []\nvar blue_data = []\n\nfor (let i = 1000; i < 1500; i++) {\n red_data.push({\n id: i,\n vector: [Math.random(), Math.random(), Math.random(), Math.random(), Math.random()],\n color: \"red\",\n color_tag: `red_${Math.floor(Math.random() * 8999) + 1000}`\n })\n}\n\nfor (let i = 1500; i < 2000; i++) {\n blue_data.push({\n id: i,\n vector: [Math.random(), Math.random(), Math.random(), Math.random(), Math.random()],\n color: \"blue\",\n color_tag: `blue_${Math.floor(Math.random() * 8999) + 1000}`\n })\n}\n\nres = await client.insert({\n collection_name: \"quick_setup\",\n data: red_data,\n partition_name: \"red\"\n})\n\nconsole.log(res.insert_cnt)\n\n// Output\n// \n// 500\n// \n\nres = await client.insert({\n collection_name: \"quick_setup\",\n data: blue_data,\n partition_name: \"blue\"\n})\n\nconsole.log(res.insert_cnt)\n\n// Output\n// \n// 500\n// \n","# Single vector search\nres = client.search(\n collection_name=\"test_collection\", # Replace with the actual name of your collection\n # Replace with your query vector\n data=[[0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592]],\n limit=5, # Max. number of search results to return\n search_params={\"metric_type\": \"IP\", \"params\": {}} # Search parameters\n)\n\n# Convert the output to a formatted JSON string\nresult = json.dumps(res, indent=4)\nprint(result)\n","// 4. Single vector search\nList> query_vectors = Arrays.asList(Arrays.asList(0.3580376395471989f, -0.6023495712049978f, 0.18414012509913835f, -0.26286205330961354f, 0.9029438446296592f));\n\nSearchReq searchReq = SearchReq.builder()\n .collectionName(\"quick_setup\")\n .data(query_vectors)\n .topK(3) // The number of results to return\n .build();\n\nSearchResp searchResp = client.search(searchReq);\n\nSystem.out.println(JSONObject.toJSON(searchResp));\n","// 4. Single vector search\nvar query_vector = [0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592],\n\nres = await client.search({\n collection_name: \"quick_setup\",\n data: [query_vector],\n limit: 3, // The number of results to return\n})\n\nconsole.log(res.results)\n","[\n [\n {\n \"id\": 0,\n \"distance\": 1.4093276262283325,\n \"entity\": {}\n },\n {\n \"id\": 4,\n \"distance\": 0.9902134537696838,\n \"entity\": {}\n },\n {\n \"id\": 1,\n \"distance\": 0.8519943356513977,\n \"entity\": {}\n },\n {\n \"id\": 5,\n \"distance\": 0.7972343564033508,\n \"entity\": {}\n },\n {\n \"id\": 2,\n \"distance\": 0.5928734540939331,\n \"entity\": {}\n }\n ]\n]\n","{\"searchResults\": [[\n {\n \"score\": 1.263043,\n \"fields\": {\n \"vector\": [\n 0.9533119,\n 0.02538395,\n 0.76714665,\n 0.35481733,\n 0.9845762\n ],\n \"id\": 740\n }\n },\n {\n \"score\": 1.2377806,\n \"fields\": {\n \"vector\": [\n 0.7411156,\n 0.08687937,\n 0.8254139,\n 0.08370924,\n 0.99095553\n ],\n \"id\": 640\n }\n },\n {\n \"score\": 1.1869997,\n \"fields\": {\n \"vector\": [\n 0.87928146,\n 0.05324632,\n 0.6312755,\n 0.28005534,\n 0.9542448\n ],\n \"id\": 455\n }\n }\n]]}\n","[\n { score: 1.7463608980178833, id: '854' },\n { score: 1.744946002960205, id: '425' },\n { score: 1.7258622646331787, id: '718' }\n]\n","# Bulk-vector search\nres = client.search(\n collection_name=\"test_collection\", # Replace with the actual name of your collection\n data=[\n [0.19886812562848388, 0.06023560599112088, 0.6976963061752597, 0.2614474506242501, 0.838729485096104],\n [0.3172005263489739, 0.9719044792798428, -0.36981146090600725, -0.4860894583077995, 0.95791889146345]\n ], # Replace with your query vectors\n limit=2, # Max. number of search results to return\n search_params={\"metric_type\": \"IP\", \"params\": {}} # Search parameters\n)\n\nresult = json.dumps(res, indent=4)\nprint(result)\n","// 5. Batch vector search\nquery_vectors = Arrays.asList(\n Arrays.asList(0.3580376395471989f, -0.6023495712049978f, 0.18414012509913835f, -0.26286205330961354f, 0.9029438446296592f),\n Arrays.asList(0.19886812562848388f, 0.06023560599112088f, 0.6976963061752597f, 0.2614474506242501f, 0.838729485096104f)\n);\n\nsearchReq = SearchReq.builder()\n .collectionName(\"quick_setup\")\n .data(query_vectors)\n .topK(2)\n .build();\n\nsearchResp = client.search(searchReq);\n\nSystem.out.println(JSONObject.toJSON(searchResp));\n","// 5. Batch vector search\nvar query_vectors = [\n [0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592],\n [0.19886812562848388, 0.06023560599112088, 0.6976963061752597, 0.2614474506242501, 0.838729485096104]\n]\n\nres = await client.search({\n collection_name: \"quick_setup\",\n data: query_vectors,\n limit: 2,\n})\n\nconsole.log(res.results)\n","[\n [\n {\n \"id\": 1,\n \"distance\": 1.3017789125442505,\n \"entity\": {}\n },\n {\n \"id\": 7,\n \"distance\": 1.2419954538345337,\n \"entity\": {}\n }\n ], # Result set 1\n [\n {\n \"id\": 3,\n \"distance\": 2.3358664512634277,\n \"entity\": {}\n },\n {\n \"id\": 8,\n \"distance\": 0.5642921924591064,\n \"entity\": {}\n }\n ] # Result set 2\n]\n","// Two sets of vectors are returned as expected\n\n{\"searchResults\": [\n [\n {\n \"score\": 1.263043,\n \"fields\": {\n \"vector\": [\n 0.9533119,\n 0.02538395,\n 0.76714665,\n 0.35481733,\n 0.9845762\n ],\n \"id\": 740\n }\n },\n {\n \"score\": 1.2377806,\n \"fields\": {\n \"vector\": [\n 0.7411156,\n 0.08687937,\n 0.8254139,\n 0.08370924,\n 0.99095553\n ],\n \"id\": 640\n }\n }\n ],\n [\n {\n \"score\": 1.8654699,\n \"fields\": {\n \"vector\": [\n 0.4671427,\n 0.8378432,\n 0.98844475,\n 0.82763994,\n 0.9729997\n ],\n \"id\": 638\n }\n },\n {\n \"score\": 1.8581753,\n \"fields\": {\n \"vector\": [\n 0.735541,\n 0.60140246,\n 0.86730254,\n 0.93152493,\n 0.98603314\n ],\n \"id\": 855\n }\n }\n ]\n]}\n","[\n [\n { score: 2.3590476512908936, id: '854' },\n { score: 2.2896690368652344, id: '59' }\n [\n { score: 2.664059638977051, id: '59' },\n { score: 2.59483003616333, id: '854' }\n ]\n]\n","# 6.2 Search within a partition\nquery_vector = [0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592]\n\nres = client.search(\n collection_name=\"quick_setup\",\n data=[query_vector],\n limit=5,\n search_params={\"metric_type\": \"IP\", \"params\": {\"level\": 1}},\n partition_names=[\"red\"]\n)\n\nprint(res)\n","// 6.3 Search within partitions\nquery_vectors = Arrays.asList(Arrays.asList(0.3580376395471989f, -0.6023495712049978f, 0.18414012509913835f, -0.26286205330961354f, 0.9029438446296592f));\n\nsearchReq = SearchReq.builder()\n .collectionName(\"quick_setup\")\n .data(query_vectors)\n .partitionNames(Arrays.asList(\"red\"))\n .topK(5)\n .build();\n\nsearchResp = client.search(searchReq);\n\nSystem.out.println(JSONObject.toJSON(searchResp));\n","// 6.2 Search within partitions\nquery_vector = [0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592]\n\nres = await client.search({\n collection_name: \"quick_setup\",\n data: [query_vector],\n partition_names: [\"red\"],\n limit: 5,\n})\n\nconsole.log(res.results)\n","[\n [\n {\n \"id\": 16,\n \"distance\": 0.9200337529182434,\n \"entity\": {}\n },\n {\n \"id\": 14,\n \"distance\": 0.4505271911621094,\n \"entity\": {}\n },\n {\n \"id\": 15,\n \"distance\": 0.19924677908420563,\n \"entity\": {}\n },\n {\n \"id\": 17,\n \"distance\": 0.0075093843042850494,\n \"entity\": {}\n },\n {\n \"id\": 13,\n \"distance\": -0.14609718322753906,\n \"entity\": {}\n }\n ]\n]\n","{\"searchResults\": [\n [\n {\n \"score\": 1.1677284,\n \"fields\": {\n \"vector\": [\n 0.9986977,\n 0.17964739,\n 0.49086612,\n 0.23155272,\n 0.98438674\n ],\n \"id\": 1435\n }\n },\n {\n \"score\": 1.1476475,\n \"fields\": {\n \"vector\": [\n 0.6952647,\n 0.13417172,\n 0.91045254,\n 0.119336545,\n 0.9338931\n ],\n \"id\": 1291\n }\n },\n {\n \"score\": 1.0969629,\n \"fields\": {\n \"vector\": [\n 0.3363194,\n 0.028906643,\n 0.6675426,\n 0.030419827,\n 0.9735209\n ],\n \"id\": 1168\n }\n },\n {\n \"score\": 1.0741848,\n \"fields\": {\n \"vector\": [\n 0.9980543,\n 0.36063594,\n 0.66427994,\n 0.17359233,\n 0.94954175\n ],\n \"id\": 1164\n }\n },\n {\n \"score\": 1.0584627,\n \"fields\": {\n \"vector\": [\n 0.7187005,\n 0.12674773,\n 0.987718,\n 0.3110777,\n 0.86093885\n ],\n \"id\": 1085\n }\n }\n ],\n [\n {\n \"score\": 1.8030131,\n \"fields\": {\n \"vector\": [\n 0.59726167,\n 0.7054632,\n 0.9573117,\n 0.94529945,\n 0.8664103\n ],\n \"id\": 1203\n }\n },\n {\n \"score\": 1.7728865,\n \"fields\": {\n \"vector\": [\n 0.6672442,\n 0.60448086,\n 0.9325822,\n 0.80272985,\n 0.8861626\n ],\n \"id\": 1448\n }\n },\n {\n \"score\": 1.7536311,\n \"fields\": {\n \"vector\": [\n 0.59663296,\n 0.77831805,\n 0.8578314,\n 0.88818026,\n 0.9030075\n ],\n \"id\": 1010\n }\n },\n {\n \"score\": 1.7520742,\n \"fields\": {\n \"vector\": [\n 0.854198,\n 0.72294194,\n 0.9245805,\n 0.86126596,\n 0.7969224\n ],\n \"id\": 1219\n }\n },\n {\n \"score\": 1.7452049,\n \"fields\": {\n \"vector\": [\n 0.96419,\n 0.943535,\n 0.87611496,\n 0.8268136,\n 0.79786557\n ],\n \"id\": 1149\n }\n }\n ]\n]}\n","[\n { score: 3.0258803367614746, id: '1201' },\n { score: 3.004319190979004, id: '1458' },\n { score: 2.880324363708496, id: '1187' },\n { score: 2.8246407508850098, id: '1347' },\n { score: 2.797295093536377, id: '1406' }\n]\n","res = client.search(\n collection_name=\"quick_setup\",\n data=[query_vector],\n limit=5,\n search_params={\"metric_type\": \"IP\", \"params\": {\"level\": 1}},\n partition_names=[\"blue\"]\n)\n\nprint(res)\n","searchReq = SearchReq.builder()\n .collectionName(\"quick_setup\")\n .data(query_vectors)\n .partitionNames(Arrays.asList(\"blue\"))\n .topK(5)\n .build();\n\nsearchResp = client.search(searchReq);\n\nSystem.out.println(JSONObject.toJSON(searchResp));\n","res = await client.search({\n collection_name: \"quick_setup\",\n data: [query_vector],\n partition_names: [\"blue\"],\n limit: 5,\n})\n\nconsole.log(res.results)\n","[\n [\n {\n \"id\": 20,\n \"distance\": 2.363696813583374,\n \"entity\": {}\n },\n {\n \"id\": 26,\n \"distance\": 1.0665391683578491,\n \"entity\": {}\n },\n {\n \"id\": 23,\n \"distance\": 1.066049575805664,\n \"entity\": {}\n },\n {\n \"id\": 29,\n \"distance\": 0.8353596925735474,\n \"entity\": {}\n },\n {\n \"id\": 28,\n \"distance\": 0.7484277486801147,\n \"entity\": {}\n }\n ]\n]\n","{\"searchResults\": [\n [\n {\n \"score\": 1.1628494,\n \"fields\": {\n \"vector\": [\n 0.7442872,\n 0.046407282,\n 0.71031404,\n 0.3544345,\n 0.9819991\n ],\n \"id\": 1992\n }\n },\n {\n \"score\": 1.1470042,\n \"fields\": {\n \"vector\": [\n 0.5505825,\n 0.04367262,\n 0.9985836,\n 0.18922359,\n 0.93255126\n ],\n \"id\": 1977\n }\n },\n {\n \"score\": 1.1450152,\n \"fields\": {\n \"vector\": [\n 0.89994013,\n 0.052991092,\n 0.8645576,\n 0.6406729,\n 0.95679337\n ],\n \"id\": 1573\n }\n },\n {\n \"score\": 1.1439825,\n \"fields\": {\n \"vector\": [\n 0.9253267,\n 0.15890503,\n 0.7999555,\n 0.19126713,\n 0.898583\n ],\n \"id\": 1552\n }\n },\n {\n \"score\": 1.1029172,\n \"fields\": {\n \"vector\": [\n 0.95661926,\n 0.18777144,\n 0.38115507,\n 0.14323527,\n 0.93137646\n ],\n \"id\": 1823\n }\n }\n ],\n [\n {\n \"score\": 1.8005109,\n \"fields\": {\n \"vector\": [\n 0.5953582,\n 0.7794224,\n 0.9388869,\n 0.79825854,\n 0.9197286\n ],\n \"id\": 1888\n }\n },\n {\n \"score\": 1.7714822,\n \"fields\": {\n \"vector\": [\n 0.56805456,\n 0.89422905,\n 0.88187534,\n 0.914824,\n 0.8944365\n ],\n \"id\": 1648\n }\n },\n {\n \"score\": 1.7561421,\n \"fields\": {\n \"vector\": [\n 0.83421993,\n 0.39865613,\n 0.92319834,\n 0.42695504,\n 0.96633124\n ],\n \"id\": 1688\n }\n },\n {\n \"score\": 1.7553532,\n \"fields\": {\n \"vector\": [\n 0.89994013,\n 0.052991092,\n 0.8645576,\n 0.6406729,\n 0.95679337\n ],\n \"id\": 1573\n }\n },\n {\n \"score\": 1.7543385,\n \"fields\": {\n \"vector\": [\n 0.16542226,\n 0.38248396,\n 0.9888778,\n 0.80913955,\n 0.9501492\n ],\n \"id\": 1544\n }\n }\n ]\n]}\n","[\n { score: 2.8421106338500977, id: '1745' },\n { score: 2.838560104370117, id: '1782' },\n { score: 2.8134000301361084, id: '1511' },\n { score: 2.718268871307373, id: '1679' },\n { score: 2.7014894485473633, id: '1597' }\n]\n","# Search with output fields\nres = client.search(\n collection_name=\"test_collection\", # Replace with the actual name of your collection\n data=[[0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592]],\n limit=5, # Max. number of search results to return\n search_params={\"metric_type\": \"IP\", \"params\": {}}, # Search parameters\n output_fields=[\"color\"] # Output fields to return\n)\n\nresult = json.dumps(res, indent=4)\nprint(result)\n","// 7. Search with output fields\nquery_vectors = Arrays.asList(Arrays.asList(0.3580376395471989f, -0.6023495712049978f, 0.18414012509913835f, -0.26286205330961354f, 0.9029438446296592f));\n\nsearchReq = SearchReq.builder()\n .collectionName(\"quick_setup\")\n .data(query_vectors)\n .outputFields(Arrays.asList(\"color\"))\n .topK(5)\n .build();\n\nsearchResp = client.search(searchReq);\n\nSystem.out.println(JSONObject.toJSON(searchResp));\n","// 7. Search with output fields\nquery_vector = [0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592]\n\nres = await client.search({\n collection_name: \"quick_setup\",\n data: [query_vector],\n limit: 5,\n output_fields: [\"color\"],\n})\n\nconsole.log(res.results)\n","[\n [\n {\n \"id\": 0,\n \"distance\": 1.4093276262283325,\n \"entity\": {\n \"color\": \"pink_8682\"\n }\n },\n {\n \"id\": 16,\n \"distance\": 1.0159327983856201,\n \"entity\": {\n \"color\": \"yellow_1496\"\n }\n },\n {\n \"id\": 4,\n \"distance\": 0.9902134537696838,\n \"entity\": {\n \"color\": \"red_4794\"\n }\n },\n {\n \"id\": 14,\n \"distance\": 0.9803846478462219,\n \"entity\": {\n \"color\": \"green_2899\"\n }\n },\n {\n \"id\": 1,\n \"distance\": 0.8519943356513977,\n \"entity\": {\n \"color\": \"red_7025\"\n }\n }\n ]\n]\n","{\"searchResults\": [\n [\n {\n \"score\": 1.263043,\n \"fields\": {}\n },\n {\n \"score\": 1.2377806,\n \"fields\": {}\n },\n {\n \"score\": 1.1869997,\n \"fields\": {}\n },\n {\n \"score\": 1.1748955,\n \"fields\": {}\n },\n {\n \"score\": 1.1720343,\n \"fields\": {}\n }\n ]\n]}\n","\n[\n { score: 3.036271572113037, id: '59', color: 'orange' },\n { score: 3.0267879962921143, id: '1745', color: 'blue' },\n { score: 3.0069446563720703, id: '854', color: 'black' },\n { score: 2.984386682510376, id: '718', color: 'black' },\n { score: 2.916019916534424, id: '425', color: 'purple' }\n]\n","# Search with filter\nres = client.search(\n collection_name=\"test_collection\", # Replace with the actual name of your collection\n data=[[0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592]],\n limit=5, # Max. number of search results to return\n search_params={\"metric_type\": \"IP\", \"params\": {}}, # Search parameters\n output_fields=[\"color\"], # Output fields to return\n filter='color like \"red%\"'\n)\n\nresult = json.dumps(res, indent=4)\nprint(result)\n","// 8. Filtered search\nquery_vectors = Arrays.asList(Arrays.asList(0.3580376395471989f, -0.6023495712049978f, 0.18414012509913835f, -0.26286205330961354f, 0.9029438446296592f));\n\nsearchReq = SearchReq.builder()\n .collectionName(\"quick_setup\")\n .data(query_vectors)\n .outputFields(Arrays.asList(\"color_tag\"))\n .filter(\"color_tag like \\\"red%\\\"\")\n .topK(5)\n .build();\n\nsearchResp = client.search(searchReq);\n\nSystem.out.println(JSONObject.toJSON(searchResp));\n","// 8. Filtered search\n// 8.1 Filter with \"like\" operator and prefix wildcard\nquery_vector = [0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592]\n\nres = await client.search({\n collection_name: \"quick_setup\",\n data: [query_vector],\n limit: 5,\n filters: \"color_tag like \\\"red%\\\"\",\n output_fields: [\"color_tag\"]\n})\n\nconsole.log(res.results)\n","[\n [\n {\n \"id\": 4,\n \"distance\": 0.9902134537696838,\n \"entity\": {\n \"color\": \"red_4794\"\n }\n },\n {\n \"id\": 1,\n \"distance\": 0.8519943356513977,\n \"entity\": {\n \"color\": \"red_7025\"\n }\n },\n {\n \"id\": 6,\n \"distance\": -0.4113418459892273,\n \"entity\": {\n \"color\": \"red_9392\"\n }\n }\n ]\n]\n","{\"searchResults\": [\n [\n {\n \"score\": 1.1869997,\n \"fields\": {\"color_tag\": \"red_3026\"}\n },\n {\n \"score\": 1.1677284,\n \"fields\": {\"color_tag\": \"red_9030\"}\n },\n {\n \"score\": 1.1476475,\n \"fields\": {\"color_tag\": \"red_3744\"}\n },\n {\n \"score\": 1.0969629,\n \"fields\": {\"color_tag\": \"red_4168\"}\n },\n {\n \"score\": 1.0741848,\n \"fields\": {\"color_tag\": \"red_9678\"}\n }\n ]\n]}\n","[\n { score: 2.5080761909484863, id: '1201', color_tag: 'red_8904' },\n { score: 2.491129159927368, id: '425', color_tag: 'purple_8212' },\n { score: 2.4889798164367676, id: '1458', color_tag: 'red_6891' },\n { score: 2.42964243888855, id: '724', color_tag: 'black_9885' },\n { score: 2.4004223346710205, id: '854', color_tag: 'black_5990' }\n]\n","# Infix match on color field\nres = client.search(\n collection_name=\"test_collection\", # Replace with the actual name of your collection\n data=[[0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592]],\n limit=5, # Max. number of search results to return\n search_params={\"metric_type\": \"IP\", \"params\": {}}, # Search parameters\n output_fields=[\"color\"], # Output fields to return\n filter='color like \"%ll%\"' # Filter on color field, infix match on \"ll\"\n)\n\nresult = json.dumps(res, indent=4)\nprint(result)\n","// 8. Filtered search\nquery_vectors = Arrays.asList(Arrays.asList(0.3580376395471989f, -0.6023495712049978f, 0.18414012509913835f, -0.26286205330961354f, 0.9029438446296592f));\n\nsearchReq = SearchReq.builder()\n .collectionName(\"quick_setup\")\n .data(query_vectors)\n .outputFields(Arrays.asList(\"color_tag\"))\n .filter(\"color like \\\"%ll%\\\"\")\n .topK(5)\n .build();\n\nsearchResp = client.search(searchReq);\n\nSystem.out.println(JSONObject.toJSON(searchResp));\n","// 8. Filtered search\n// 8.1 Filter with \"like\" operator and prefix wildcard\nquery_vector = [0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592]\n\nres = await client.search({\n collection_name: \"quick_setup\",\n data: [query_vector],\n limit: 5,\n filters: \"color_tag like \\\"%ll%\\\"\",\n output_fields: [\"color_tag\"]\n})\n\nconsole.log(res.results)\n","[\n [\n {\n \"id\": 5,\n \"distance\": 0.7972343564033508,\n \"entity\": {\n \"color\": \"yellow_4222\"\n }\n }\n ]\n]\n","{\"searchResults\": [\n [\n {\n \"score\": 1.1869997,\n \"fields\": {\"color_tag\": \"yellow_4222\"}\n }\n ]\n]}\n","[\n { score: 2.5080761909484863, id: '1201', color_tag: 'yellow_4222' }\n]\n","# Conduct a range search\nsearch_params = {\n \"metric_type\": \"IP\",\n \"params\": {\n \"radius\": 0.8, # Radius of the search circle\n \"range_filter\": 1.0 # Range filter to filter out vectors that are not within the search circle\n }\n}\n\nres = client.search(\n collection_name=\"test_collection\", # Replace with the actual name of your collection\n data=[[0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592]],\n limit=3, # Max. number of search results to return\n search_params=search_params, # Search parameters\n output_fields=[\"color\"], # Output fields to return\n)\n\nresult = json.dumps(res, indent=4)\nprint(result)\n","// 9. Range search\nquery_vectors = Arrays.asList(Arrays.asList(0.3580376395471989f, -0.6023495712049978f, 0.18414012509913835f, -0.26286205330961354f, 0.9029438446296592f));\n\nsearchReq = SearchReq.builder()\n .collectionName(\"quick_setup\")\n .data(query_vectors)\n .outputFields(Arrays.asList(\"color_tag\"))\n .searchParams(Map.of(\"radius\", 0.1, \"range\", 1.0))\n .topK(5)\n .build();\n\nsearchResp = client.search(searchReq);\n\nSystem.out.println(JSONObject.toJSON(searchResp));\n","// 9. Range search\nquery_vector = [0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592]\n\nres = await client.search({\n collection_name: \"quick_setup\",\n data: [query_vector],\n limit: 5,\n params: {\n radius: 0.1,\n range: 1.0\n },\n output_fields: [\"color_tag\"]\n})\n\nconsole.log(res.results)\n","[\n [\n {\n \"id\": 4,\n \"distance\": 0.9902134537696838,\n \"entity\": {\n \"color\": \"red_4794\"\n }\n },\n {\n \"id\": 14,\n \"distance\": 0.9803846478462219,\n \"entity\": {\n \"color\": \"green_2899\"\n }\n },\n {\n \"id\": 1,\n \"distance\": 0.8519943356513977,\n \"entity\": {\n \"color\": \"red_7025\"\n }\n }\n ]\n]\n","{\"searchResults\": [\n [\n {\n \"score\": 1.263043,\n \"fields\": {\"color_tag\": \"green_2052\"}\n },\n {\n \"score\": 1.2377806,\n \"fields\": {\"color_tag\": \"purple_3709\"}\n },\n {\n \"score\": 1.1869997,\n \"fields\": {\"color_tag\": \"red_3026\"}\n },\n {\n \"score\": 1.1748955,\n \"fields\": {\"color_tag\": \"black_1646\"}\n },\n {\n \"score\": 1.1720343,\n \"fields\": {\"color_tag\": \"green_4853\"}\n }\n ]\n]}\n","[\n { score: 2.3387961387634277, id: '718', color_tag: 'black_7154' },\n { score: 2.3352415561676025, id: '1745', color_tag: 'blue_8741' },\n { score: 2.290485382080078, id: '1408', color_tag: 'red_2324' },\n { score: 2.285870313644409, id: '854', color_tag: 'black_5990' },\n { score: 2.2593345642089844, id: '1309', color_tag: 'red_8458' }\n]\n","# Connect to Milvus\nclient = MilvusClient(uri='http://localhost:19530') # Milvus server address\n\n# Load data into collection\nclient.load_collection(\"group_search\") # Collection name\n\n# Group search results\nres = client.search(\n collection_name=\"group_search\", # Collection name\n data=[[0.14529211512077012, 0.9147257273453546, 0.7965055218724449, 0.7009258593102812, 0.5605206522382088]], # Query vector\n search_params={\n \"metric_type\": \"L2\",\n \"params\": {\"nprobe\": 10},\n }, # Search parameters\n limit=5, # Max. number of groups to return\n group_by_field=\"doc_id\", # Group results by document ID\n group_size=2, # returned at most 2 passages per document, the default value is 1\n group_strict_size=True, # ensure every group contains exactly 3 passages\n output_fields=[\"doc_id\", \"passage_id\"]\n)\n\n# Retrieve the values in the `doc_id` column\ndoc_ids = [result['entity']['doc_id'] for result in res[0]]\npassage_ids = [result['entity']['passage_id'] for result in res[0]]\n\nprint(doc_ids)\nprint(passage_ids)\n","[\"doc_11\", \"doc_11\", \"doc_7\", \"doc_7\", \"doc_3\", \"doc_3\", \"doc_2\", \"doc_2\", \"doc_8\", \"doc_8\"]\n[5, 10, 11, 10, 9, 6, 5, 4, 9, 2]\n","# Connect to Milvus\nclient = MilvusClient(uri='http://localhost:19530') # Milvus server address\n\n# Load data into collection\nclient.load_collection(\"group_search\") # Collection name\n\n# Search without `group_by_field`\nres = client.search(\n collection_name=\"group_search\", # Collection name\n data=query_passage_vector, # Replace with your query vector\n search_params={\n \"metric_type\": \"L2\",\n \"params\": {\"nprobe\": 10},\n }, # Search parameters\n limit=5, # Max. number of search results to return\n # group_by_field=\"doc_id\", # Group results by document ID\n # group_size=2, \n # group_strict_size=True,\n output_fields=[\"doc_id\", \"passage_id\"]\n)\n\n# Retrieve the values in the `doc_id` column\ndoc_ids = [result['entity']['doc_id'] for result in res[0]]\npassage_ids = [result['entity']['passage_id'] for result in res[0]]\n\nprint(doc_ids)\nprint(passage_ids)\n","[\"doc_11\", \"doc_11\", \"doc_11\", \"doc_11\", \"doc_11\"]\n[1, 10, 3, 12, 9]\n","# In normal cases, you do not need to set search parameters manually\n# Except for range searches.\nsearch_parameters = {\n 'metric_type': 'L2',\n 'params': {\n 'nprobe': 10,\n 'level': 1,\n 'radius': 1.0\n 'range_filter': 0.8\n }\n}\n"],"headingContent":"Single-Vector Search","anchorList":[{"label":"Pesquisa num único vetor","href":"Single-Vector-Search","type":1,"isActive":false},{"label":"Visão geral","href":"Overview","type":2,"isActive":false},{"label":"Preparações","href":"Preparations","type":2,"isActive":false},{"label":"Pesquisa básica","href":"Basic-search","type":2,"isActive":false},{"label":"Pesquisa filtrada","href":"Filtered-search","type":2,"isActive":false},{"label":"Pesquisa de intervalo","href":"Range-search","type":2,"isActive":false},{"label":"Pesquisa de agrupamento","href":"Grouping-search","type":2,"isActive":false},{"label":"Parâmetros de pesquisa","href":"Search-parameters","type":2,"isActive":false}]} \ No newline at end of file diff --git a/localization/v2.4.x/site/pt/userGuide/search-query-get/single-vector-search.md b/localization/v2.4.x/site/pt/userGuide/search-query-get/single-vector-search.md index 239ad37aa..cc8a25515 100644 --- a/localization/v2.4.x/site/pt/userGuide/search-query-get/single-vector-search.md +++ b/localization/v2.4.x/site/pt/userGuide/search-query-get/single-vector-search.md @@ -4,7 +4,7 @@ order: 1 summary: >- Este artigo descreve como procurar vectores numa coleção Milvus utilizando um único vetor de consulta. -title: Pesquisa de vetor único +title: Pesquisa num único vetor ---

    Pesquisa num único vetor

    Uma vez inseridos os dados, a etapa seguinte consiste em efetuar pesquisas de semelhança da sua coleção no Milvus.

    -

    Milvus permite-lhe efetuar dois tipos de pesquisas, em função do número de campos vectoriais da sua coleção:

    +

    O Milvus permite-lhe efetuar dois tipos de pesquisas, em função do número de campos vectoriais da sua coleção:

    • Pesquisa de um único vetor: Se a sua coleção tiver apenas um campo vetorial, utilize o método search() para encontrar as entidades mais semelhantes. Este método compara o seu vetor de consulta com os vectores existentes na sua coleção e devolve as IDs das correspondências mais próximas juntamente com as distâncias entre elas. Opcionalmente, também pode devolver os valores do vetor e os metadados dos resultados.
    • Pesquisa híbrida: Para colecções com dois ou mais campos vectoriais, utilize o método hybrid_search() método. Esse método executa várias solicitações de pesquisa ANN (Approximate Nearest Neighbor) e combina os resultados para retornar as correspondências mais relevantes após a reavaliação.
    • @@ -554,7 +554,7 @@ res = await client. Python Java Node.js
  • [
    @@ -1717,9 +1717,10 @@ res = await client.
           
    -    

    No Milvus, o agrupamento da pesquisa por um campo específico pode evitar a redundância do mesmo item de campo nos resultados. Pode obter um conjunto variado de resultados para o campo específico.

    -

    Considere uma coleção de documentos, cada documento dividido em várias passagens. Cada passagem é representada por um vetor de incorporação e pertence a um documento. Para encontrar documentos relevantes em vez de passagens semelhantes, pode incluir o argumento group_by_field na opção search() para agrupar os resultados pelo ID do documento. Isto ajuda a devolver os documentos mais relevantes e únicos, em vez de passagens separadas do mesmo documento.

    -

    Aqui está o código de exemplo para agrupar os resultados da pesquisa por campo:

    +

    No Milvus, a pesquisa de agrupamento foi concebida para melhorar a abrangência e a precisão dos resultados de pesquisa.

    +

    Considere um cenário no RAG, onde cargas de documentos são divididas em várias passagens, e cada passagem é representada por uma incorporação de vetor. Os utilizadores pretendem encontrar as passagens mais relevantes para que os LLMs possam ser solicitados com precisão. A função de pesquisa normal do Milvus pode satisfazer este requisito, mas pode dar origem a resultados muito distorcidos e tendenciosos: a maioria das passagens provém apenas de alguns documentos e a abrangência dos resultados da pesquisa é muito fraca. Isto pode prejudicar seriamente a precisão ou mesmo a correção dos resultados fornecidos pelo LLM e influenciar negativamente a experiência dos utilizadores do LLM.

    +

    A pesquisa por agrupamento pode resolver eficazmente este problema. Ao passar um campo_por_grupo e um tamanho_de_grupo, os utilizadores do Milvus podem agrupar os resultados da pesquisa em vários grupos e garantir que o número de entidades de cada grupo não excede um tamanho_de_grupo específico. Esta funcionalidade pode aumentar significativamente a abrangência e a equidade dos resultados da pesquisa, melhorando visivelmente a qualidade dos resultados do LLM.

    +

    Aqui está o código de exemplo para agrupar resultados de pesquisa por campo:

    # Connect to Milvus
     client = MilvusClient(uri='http://localhost:19530') # Milvus server address
     
    @@ -1734,21 +1735,26 @@ res = client.search(
         "metric_type": "L2",
         "params": {"nprobe": 10},
         }, # Search parameters
    -    limit=10, # Max. number of search results to return
    +    limit=5, # Max. number of groups to return
         group_by_field="doc_id", # Group results by document ID
    +    group_size=2, # returned at most 2 passages per document, the default value is 1
    +    group_strict_size=True, # ensure every group contains exactly 3 passages
         output_fields=["doc_id", "passage_id"]
     )
     
     # Retrieve the values in the `doc_id` column
     doc_ids = [result['entity']['doc_id'] for result in res[0]]
    +passage_ids = [result['entity']['passage_id'] for result in res[0]]
     
     print(doc_ids)
    +print(passage_ids)
     

    O resultado é semelhante ao seguinte:

    -
    [5, 10, 1, 7, 9, 6, 3, 4, 8, 2]
    +
    ["doc_11", "doc_11", "doc_7", "doc_7", "doc_3", "doc_3", "doc_2", "doc_2", "doc_8", "doc_8"]
    +[5, 10, 11, 10, 9, 6, 5, 4, 9, 2]
     
    -

    Na saída fornecida, é possível observar que as entidades retornadas não contêm nenhum valor doc_id duplicado.

    -

    Para comparação, vamos comentar o group_by_field e efetuar uma pesquisa regular:

    +

    Na saída dada, pode observar-se que, para cada documento, são recuperadas exatamente duas passagens e que um total de 5 documentos compõem coletivamente os resultados.

    +

    Para comparação, vamos comentar os parâmetros relacionados com o grupo e efetuar uma pesquisa normal:

    # Connect to Milvus
     client = MilvusClient(uri='http://localhost:19530') # Milvus server address
     
    @@ -1763,27 +1769,33 @@ res = client.search(
         "metric_type": "L2",
         "params": {"nprobe": 10},
         }, # Search parameters
    -    limit=10, # Max. number of search results to return
    +    limit=5, # Max. number of search results to return
         # group_by_field="doc_id", # Group results by document ID
    +    # group_size=2, 
    +    # group_strict_size=True,
         output_fields=["doc_id", "passage_id"]
     )
     
     # Retrieve the values in the `doc_id` column
     doc_ids = [result['entity']['doc_id'] for result in res[0]]
    +passage_ids = [result['entity']['passage_id'] for result in res[0]]
     
     print(doc_ids)
    +print(passage_ids)
     

    O resultado é semelhante ao seguinte:

    -
    [1, 10, 3, 10, 1, 9, 4, 4, 8, 6]
    +
    ["doc_11", "doc_11", "doc_11", "doc_11", "doc_11"]
    +[1, 10, 3, 12, 9]
     
    -

    No resultado apresentado, é possível observar que as entidades devolvidas contêm valores duplicados em doc_id.

    +

    No resultado apresentado, pode observar-se que o "doc_11" dominou completamente os resultados da pesquisa, ofuscando os parágrafos de alta qualidade de outros documentos, o que pode ser um mau aviso para o LLM.

    +

    Mais um ponto a notar: por defeito, a pesquisa_de_grupo devolverá resultados instantaneamente quando tiver grupos suficientes, o que pode levar a que o número de resultados em cada grupo não seja suficiente para satisfazer o tamanho_do_grupo. Se se preocupa com o número de resultados para cada grupo, defina group_strict_size=True como mostrado no código acima. Isto fará com que o Milvus se esforce por obter resultados suficientes para cada grupo, com um ligeiro custo para o desempenho.

    Limitações

    • Indexação: Este recurso de agrupamento funciona apenas para coleções que são indexadas com o tipo HNSW, IVF_FLAT ou FLAT. Para obter mais informações, consulte Índice na memória.

    • -
    • Vetor: Atualmente, a pesquisa de agrupamento não suporta um campo de vetor do tipo BINARY_VECTOR. Para obter mais informações sobre tipos de dados, consulte Tipos de dados suportados.

    • +
    • Vetor: Atualmente, a pesquisa de agrupamento não suporta um campo de vetor do tipo BINARY_VECTOR. Para obter mais informações sobre tipos de dados, consulte Tipos de dados suportados.

    • Campo: Atualmente, a pesquisa de agrupamento permite apenas uma única coluna. Não é possível especificar vários nomes de campo na configuração group_by_field. Além disso, a pesquisa de agrupamento é incompatível com os tipos de dados JSON, FLOAT, DOUBLE, ARRAY ou campos de vetor.

    • Impacto no desempenho: Lembre-se de que o desempenho diminui com o aumento da contagem de vetores de consulta. Usando um cluster com 2 núcleos de CPU e 8 GB de memória como exemplo, o tempo de execução da pesquisa de agrupamento aumenta proporcionalmente com o número de vetores de consulta de entrada.

    • -
    • Funcionalidade: Atualmente, a pesquisa de agrupamento não é suportada pela pesquisa de intervalos, iteradores de pesquisa ou pesquisa híbrida.

    • +
    • Funcionalidade: Atualmente, a pesquisa de agrupamento não é suportada pela pesquisa de intervalo, iteradores de pesquisa

    Parâmetros de pesquisa

    - +
    O nome do campo de destino ao qual se aplica este objeto IndexParam.O nome do campo de destino a que se aplica este objeto IndexParam.
    indexNameNúmero de unidades a consultar durante a pesquisa.
    O valor situa-se no intervalo [1, nlist[1]].
    params.levelNível de precisão da pesquisa.
    Os valores possíveis são 1, 2, e 3, e a predefinição é 1. Valores mais elevados produzem resultados mais exactos mas um desempenho mais lento.
    params.radiusDefine o limite externo do seu espaço de pesquisa. Somente os vetores que estão dentro dessa distância do vetor de consulta são considerados correspondências potenciais.
    O intervalo de valores é determinado pelo parâmetro metric_type. Por exemplo, se metric_type estiver definido como L2, o intervalo de valores válido é [0, ∞]. Se metric_type estiver definido como COSINE, o intervalo de valores válido é [-1, 1]. Para obter mais informações, consulte Métricas de similaridade.
    params.range_filterEnquanto radius define o limite exterior da pesquisa, range_filter pode ser utilizado opcionalmente para definir um limite interior, criando um intervalo de distância dentro do qual os vectores têm de se enquadrar para serem considerados correspondências.
    O intervalo de valores é determinado pelo parâmetro metric_type. Por exemplo, se metric_type estiver definido como L2, o intervalo de valores válido é [0, ∞]. Se metric_type estiver definido como COSINE, o intervalo de valores válido é [-1, 1]. Para obter mais informações, consulte Métrica de similaridade.
    params.range_filterEnquanto radius define o limite exterior da pesquisa, range_filter pode ser utilizado opcionalmente para definir um limite interior, criando um intervalo de distância dentro do qual os vectores têm de se enquadrar para serem considerados correspondências.
    O intervalo de valores é determinado pelo parâmetro metric_type. Por exemplo, se metric_type estiver definido como L2, o intervalo de valores válido é [0, ∞]. Se metric_type estiver definido como COSINE, o intervalo de valores válido é [-1, 1]. Para obter mais informações, consulte Métricas de similaridade.

    notas

    -

    [1] Número de unidades de cluster após a indexação. Ao indexar uma coleção, o Milvus subdivide os dados vectoriais em várias unidades de agrupamento, cujo número varia com as definições de indexação actuais.

    +

    [1] Número de unidades de cluster após a indexação. Ao indexar uma coleção, Milvus subdivide os dados vectoriais em várias unidades de agrupamento, cujo número varia com as definições de indexação actuais.

    [2] Número de entidades a devolver numa pesquisa.

    diff --git a/localization/v2.4.x/site/pt/userGuide/search-query-get/with-iterators.json b/localization/v2.4.x/site/pt/userGuide/search-query-get/with-iterators.json index 76038431c..97979f19c 100644 --- a/localization/v2.4.x/site/pt/userGuide/search-query-get/with-iterators.json +++ b/localization/v2.4.x/site/pt/userGuide/search-query-get/with-iterators.json @@ -1 +1 @@ -{"codeList":["from pymilvus import MilvusClient\n\n# 1. Set up a Milvus client\nclient = MilvusClient(\n uri=\"http://localhost:19530\"\n)\n\n# 2. Create a collection\nclient.create_collection(\n collection_name=\"quick_setup\",\n dimension=5,\n)\n","import io.milvus.client.MilvusServiceClient;\nimport io.milvus.param.ConnectParam;\nimport io.milvus.param.highlevel.collection.CreateSimpleCollectionParam;\n\nString CLUSTER_ENDPOINT = \"http://localhost:19530\";\n\n// 1. Connect to Milvus server\nConnectParam connectParam = ConnectParam.newBuilder()\n .withUri(CLUSTER_ENDPOINT)\n .build();\n\nMilvusServiceClient client = new MilvusServiceClient(connectParam);\n\n// 2. Create a collection\nCreateSimpleCollectionParam createCollectionParam = CreateSimpleCollectionParam.newBuilder()\n .withCollectionName(\"quick_setup\")\n .withDimension(5)\n .build();\n\nclient.createCollection(createCollectionParam);\n","# 3. Insert randomly generated vectors \ncolors = [\"green\", \"blue\", \"yellow\", \"red\", \"black\", \"white\", \"purple\", \"pink\", \"orange\", \"brown\", \"grey\"]\ndata = []\n\nfor i in range(10000):\n current_color = random.choice(colors)\n current_tag = random.randint(1000, 9999)\n data.append({\n \"id\": i,\n \"vector\": [ random.uniform(-1, 1) for _ in range(5) ],\n \"color\": current_color,\n \"tag\": current_tag,\n \"color_tag\": f\"{current_color}_{str(current_tag)}\"\n })\n\nprint(data[0])\n\n# Output\n#\n# {\n# \"id\": 0,\n# \"vector\": [\n# -0.5705990742218152,\n# 0.39844925120642083,\n# -0.8791287928610869,\n# 0.024163154953680932,\n# 0.6837669917169638\n# ],\n# \"color\": \"purple\",\n# \"tag\": 7774,\n# \"color_tag\": \"purple_7774\"\n# }\n\nres = client.insert(\n collection_name=\"quick_setup\",\n data=data,\n)\n\nprint(res)\n\n# Output\n#\n# {\n# \"insert_count\": 10000,\n# \"ids\": [\n# 0,\n# 1,\n# 2,\n# 3,\n# 4,\n# 5,\n# 6,\n# 7,\n# 8,\n# 9,\n# \"(9990 more items hidden)\"\n# ]\n# }\n","import java.util.ArrayList;\nimport java.util.Arrays;\nimport java.util.List;\nimport java.util.Random;\n\nimport com.alibaba.fastjson.JSONObject;\n\nimport io.milvus.param.R;\nimport io.milvus.param.dml.InsertParam;\nimport io.milvus.response.MutationResultWrapper;\nimport io.milvus.grpc.MutationResult;\n\n\n// 3. Insert randomly generated vectors into the collection\nList colors = Arrays.asList(\"green\", \"blue\", \"yellow\", \"red\", \"black\", \"white\", \"purple\", \"pink\", \"orange\", \"brown\", \"grey\");\nList data = new ArrayList<>();\n\nfor (int i=0; i<10000; i++) {\n Random rand = new Random();\n String current_color = colors.get(rand.nextInt(colors.size()-1));\n JSONObject row = new JSONObject();\n row.put(\"id\", Long.valueOf(i));\n row.put(\"vector\", Arrays.asList(rand.nextFloat(), rand.nextFloat(), rand.nextFloat(), rand.nextFloat(), rand.nextFloat()));\n row.put(\"color_tag\", current_color + \"_\" + String.valueOf(rand.nextInt(8999) + 1000));\n data.add(row);\n}\n\nInsertParam insertParam = InsertParam.newBuilder()\n .withCollectionName(\"quick_setup\")\n .withRows(data)\n .build();\n\nR insertRes = client.insert(insertParam);\n\nif (insertRes.getStatus() != R.Status.Success.getCode()) {\n System.err.println(insertRes.getMessage());\n}\n\nMutationResultWrapper wrapper = new MutationResultWrapper(insertRes.getData());\nSystem.out.println(wrapper.getInsertCount());\n","from pymilvus import Collection\n\n# 4. Search with iterator\nconnections.connect(host=\"127.0.0.1\", port=19530)\ncollection = Collection(\"quick_setup\")\n\nquery_vectors = [[0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592]]\nsearch_params = {\n \"metric_type\": \"IP\",\n \"params\": {\"nprobe\": 10}\n}\n\niterator = collection.search_iterator(\n data=query_vectors,\n anns_field=\"vector\",\n batch_size=10,\n param=search_params,\n output_fields=[\"color_tag\"],\n limit=3\n)\n\nresults = []\n\nwhile True:\n result = iterator.next()\n if not result:\n iterator.close()\n break\n \n results.extend(result)\n \n for hit in result:\n results.append(hit.to_dict())\n\nprint(results)\n\n# Output\n#\n# [\n# {\n# \"id\": 1756,\n# \"distance\": 2.0642056465148926,\n# \"entity\": {\n# \"color_tag\": \"black_9109\"\n# }\n# },\n# {\n# \"id\": 6488,\n# \"distance\": 1.9437453746795654,\n# \"entity\": {\n# \"color_tag\": \"purple_8164\"\n# }\n# },\n# {\n# \"id\": 3338,\n# \"distance\": 1.9107104539871216,\n# \"entity\": {\n# \"color_tag\": \"brown_8121\"\n# }\n# }\n# ]\n","import io.milvus.param.dml.QueryIteratorParam;\nimport io.milvus.param.dml.SearchIteratorParam;\nimport io.milvus.response.QueryResultsWrapper;\nimport io.milvus.orm.iterator.SearchIterator;\n\n// 4. Search with iterators\nSearchIteratorParam iteratorParam = SearchIteratorParam.newBuilder()\n .withCollectionName(\"quick_setup\")\n .withVectorFieldName(\"vector\")\n // Use withFloatVectors() in clusters compatible with Milvus 2.4.x\n .withVectors(Arrays.asList(0.3580376395471989f, -0.6023495712049978f, 0.18414012509913835f, -0.26286205330961354f, 0.9029438446296592f))\n .withBatchSize(10L)\n .withParams(\"{\\\"metric_type\\\": \\\"COSINE\\\", \\\"params\\\": {\\\"level\\\": 1}}\")\n .build();\n \n\nR searchIteratorRes = client.searchIterator(iteratorParam);\n\nif (searchIteratorRes.getStatus() != R.Status.Success.getCode()) {\n System.err.println(searchIteratorRes.getMessage());\n}\n\nSearchIterator searchIterator = searchIteratorRes.getData();\nList results = new ArrayList<>();\n\nwhile (true) {\n List batchResults = searchIterator.next();\n if (batchResults.isEmpty()) {\n searchIterator.close();\n break;\n }\n for (QueryResultsWrapper.RowRecord rowRecord : batchResults) {\n results.add(rowRecord);\n }\n}\n\nSystem.out.println(results.size());\n","# 6. Query with iterator\niterator = collection.query_iterator(\n batch_size=10, # Controls the size of the return each time you call next()\n expr=\"color_tag like \\\"brown_8\\\"\",\n output_fields=[\"color_tag\"]\n)\n\nresults = []\n\nwhile True:\n result = iterator.next()\n if not result:\n iterator.close()\n break\n \n results.extend(result)\n \n# 8. Check the search results\nprint(len(results))\n\nprint(results[:3])\n\n# Output\n#\n# [\n# {\n# \"color_tag\": \"brown_8785\",\n# \"id\": 94\n# },\n# {\n# \"color_tag\": \"brown_8568\",\n# \"id\": 176\n# },\n# {\n# \"color_tag\": \"brown_8721\",\n# \"id\": 289\n# }\n# ]\n","import io.milvus.param.dml.QueryIteratorParam;\nimport io.milvus.orm.iterator.QueryIterator;\n\n// 5. Query with iterators\n\ntry {\n Files.write(Path.of(\"results.json\"), JSON.toJSONString(new ArrayList<>()).getBytes(), StandardOpenOption.CREATE, StandardOpenOption.TRUNCATE_EXISTING);\n} catch (Exception e) {\n // TODO: handle exception\n e.printStackTrace();\n}\n\nQueryIteratorParam queryIteratorParam = QueryIteratorParam.newBuilder()\n .withCollectionName(\"quick_setup\")\n .withExpr(\"color_tag like \\\"brown_8%\\\"\")\n .withBatchSize(50L)\n .addOutField(\"vector\")\n .addOutField(\"color_tag\")\n .build();\n\nR queryIteratRes = client.queryIterator(queryIteratorParam);\n\nif (queryIteratRes.getStatus() != R.Status.Success.getCode()) {\n System.err.println(queryIteratRes.getMessage());\n}\n\nQueryIterator queryIterator = queryIteratRes.getData();\n\nwhile (true) {\n List batchResults = queryIterator.next();\n if (batchResults.isEmpty()) {\n queryIterator.close();\n break;\n }\n\n String jsonString = \"\";\n List jsonObject = new ArrayList<>();\n try {\n jsonString = Files.readString(Path.of(\"results.json\"));\n jsonObject = JSON.parseArray(jsonString).toJavaList(null);\n } catch (IOException e) {\n // TODO Auto-generated catch block\n e.printStackTrace();\n }\n\n for (QueryResultsWrapper.RowRecord queryResult : batchResults) {\n JSONObject row = new JSONObject();\n row.put(\"id\", queryResult.get(\"id\"));\n row.put(\"vector\", queryResult.get(\"vector\"));\n row.put(\"color_tag\", queryResult.get(\"color_tag\"));\n jsonObject.add(row);\n }\n\n try {\n Files.write(Path.of(\"results.json\"), JSON.toJSONString(jsonObject).getBytes(), StandardOpenOption.WRITE);\n } catch (IOException e) {\n // TODO Auto-generated catch block\n e.printStackTrace();\n }\n}\n"],"headingContent":"","anchorList":[{"label":"Com Iteradores","href":"With-Iterators","type":1,"isActive":false},{"label":"Visão geral","href":"Overview","type":2,"isActive":false},{"label":"Preparações","href":"Preparations","type":2,"isActive":false},{"label":"Pesquisa com iterador","href":"Search-with-iterator","type":2,"isActive":false},{"label":"Consultar com um iterador","href":"Query-with-an-iterator","type":2,"isActive":false}]} \ No newline at end of file +{"codeList":["from pymilvus import MilvusClient\n\n# 1. Set up a Milvus client\nclient = MilvusClient(\n uri=\"http://localhost:19530\"\n)\n\n# 2. Create a collection\nclient.create_collection(\n collection_name=\"quick_setup\",\n dimension=5,\n)\n","import io.milvus.client.MilvusServiceClient;\nimport io.milvus.param.ConnectParam;\nimport io.milvus.param.highlevel.collection.CreateSimpleCollectionParam;\n\nString CLUSTER_ENDPOINT = \"http://localhost:19530\";\n\n// 1. Connect to Milvus server\nConnectParam connectParam = ConnectParam.newBuilder()\n .withUri(CLUSTER_ENDPOINT)\n .build();\n\nMilvusServiceClient client = new MilvusServiceClient(connectParam);\n\n// 2. Create a collection\nCreateSimpleCollectionParam createCollectionParam = CreateSimpleCollectionParam.newBuilder()\n .withCollectionName(\"quick_setup\")\n .withDimension(5)\n .build();\n\nclient.createCollection(createCollectionParam);\n","# 3. Insert randomly generated vectors \ncolors = [\"green\", \"blue\", \"yellow\", \"red\", \"black\", \"white\", \"purple\", \"pink\", \"orange\", \"brown\", \"grey\"]\ndata = []\n\nfor i in range(10000):\n current_color = random.choice(colors)\n current_tag = random.randint(1000, 9999)\n data.append({\n \"id\": i,\n \"vector\": [ random.uniform(-1, 1) for _ in range(5) ],\n \"color\": current_color,\n \"tag\": current_tag,\n \"color_tag\": f\"{current_color}_{str(current_tag)}\"\n })\n\nprint(data[0])\n\n# Output\n#\n# {\n# \"id\": 0,\n# \"vector\": [\n# -0.5705990742218152,\n# 0.39844925120642083,\n# -0.8791287928610869,\n# 0.024163154953680932,\n# 0.6837669917169638\n# ],\n# \"color\": \"purple\",\n# \"tag\": 7774,\n# \"color_tag\": \"purple_7774\"\n# }\n\nres = client.insert(\n collection_name=\"quick_setup\",\n data=data,\n)\n\nprint(res)\n\n# Output\n#\n# {\n# \"insert_count\": 10000,\n# \"ids\": [\n# 0,\n# 1,\n# 2,\n# 3,\n# 4,\n# 5,\n# 6,\n# 7,\n# 8,\n# 9,\n# \"(9990 more items hidden)\"\n# ]\n# }\n","import java.util.ArrayList;\nimport java.util.Arrays;\nimport java.util.List;\nimport java.util.Random;\n\nimport com.alibaba.fastjson.JSONObject;\n\nimport io.milvus.param.R;\nimport io.milvus.param.dml.InsertParam;\nimport io.milvus.response.MutationResultWrapper;\nimport io.milvus.grpc.MutationResult;\n\n\n// 3. Insert randomly generated vectors into the collection\nList colors = Arrays.asList(\"green\", \"blue\", \"yellow\", \"red\", \"black\", \"white\", \"purple\", \"pink\", \"orange\", \"brown\", \"grey\");\nList data = new ArrayList<>();\n\nfor (int i=0; i<10000; i++) {\n Random rand = new Random();\n String current_color = colors.get(rand.nextInt(colors.size()-1));\n JSONObject row = new JSONObject();\n row.put(\"id\", Long.valueOf(i));\n row.put(\"vector\", Arrays.asList(rand.nextFloat(), rand.nextFloat(), rand.nextFloat(), rand.nextFloat(), rand.nextFloat()));\n row.put(\"color_tag\", current_color + \"_\" + String.valueOf(rand.nextInt(8999) + 1000));\n data.add(row);\n}\n\nInsertParam insertParam = InsertParam.newBuilder()\n .withCollectionName(\"quick_setup\")\n .withRows(data)\n .build();\n\nR insertRes = client.insert(insertParam);\n\nif (insertRes.getStatus() != R.Status.Success.getCode()) {\n System.err.println(insertRes.getMessage());\n}\n\nMutationResultWrapper wrapper = new MutationResultWrapper(insertRes.getData());\nSystem.out.println(wrapper.getInsertCount());\n","from pymilvus import Collection\n\n# 4. Search with iterator\nconnections.connect(host=\"127.0.0.1\", port=19530)\ncollection = Collection(\"quick_setup\")\n\nquery_vectors = [[0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592]]\nsearch_params = {\n \"metric_type\": \"IP\",\n \"params\": {\"nprobe\": 10}\n}\n\niterator = collection.search_iterator(\n data=query_vectors,\n anns_field=\"vector\",\n batch_size=10,\n param=search_params,\n output_fields=[\"color_tag\"],\n limit=300\n)\n# search 300 entities totally with 10 entities per page\n\nresults = []\n\nwhile True:\n result = iterator.next()\n if not result:\n iterator.close()\n break\n \n results.extend(result)\n \n for hit in result:\n results.append(hit.to_dict())\n\nprint(results)\n\n# Output\n#\n# [\n# {\n# \"id\": 1756,\n# \"distance\": 2.0642056465148926,\n# \"entity\": {\n# \"color_tag\": \"black_9109\"\n# }\n# },\n# {\n# \"id\": 6488,\n# \"distance\": 1.9437453746795654,\n# \"entity\": {\n# \"color_tag\": \"purple_8164\"\n# }\n# },\n# {\n# \"id\": 3338,\n# \"distance\": 1.9107104539871216,\n# \"entity\": {\n# \"color_tag\": \"brown_8121\"\n# }\n# }\n# ]\n","import io.milvus.param.dml.QueryIteratorParam;\nimport io.milvus.param.dml.SearchIteratorParam;\nimport io.milvus.response.QueryResultsWrapper;\nimport io.milvus.orm.iterator.SearchIterator;\n\n// 4. Search with iterators\nSearchIteratorParam iteratorParam = SearchIteratorParam.newBuilder()\n .withCollectionName(\"quick_setup\")\n .withVectorFieldName(\"vector\")\n // Use withFloatVectors() in clusters compatible with Milvus 2.4.x\n .withVectors(Arrays.asList(0.3580376395471989f, -0.6023495712049978f, 0.18414012509913835f, -0.26286205330961354f, 0.9029438446296592f))\n .withBatchSize(10L)\n .withParams(\"{\\\"metric_type\\\": \\\"COSINE\\\", \\\"params\\\": {\\\"level\\\": 1}}\")\n .build();\n \n\nR searchIteratorRes = client.searchIterator(iteratorParam);\n\nif (searchIteratorRes.getStatus() != R.Status.Success.getCode()) {\n System.err.println(searchIteratorRes.getMessage());\n}\n\nSearchIterator searchIterator = searchIteratorRes.getData();\nList results = new ArrayList<>();\n\nwhile (true) {\n List batchResults = searchIterator.next();\n if (batchResults.isEmpty()) {\n searchIterator.close();\n break;\n }\n for (QueryResultsWrapper.RowRecord rowRecord : batchResults) {\n results.add(rowRecord);\n }\n}\n\nSystem.out.println(results.size());\n","# 6. Query with iterator\niterator = collection.query_iterator(\n batch_size=10, # Controls the size of the return each time you call next()\n expr=\"color_tag like \\\"brown_8\\\"\",\n output_fields=[\"color_tag\"]\n)\n\nresults = []\n\nwhile True:\n result = iterator.next()\n if not result:\n iterator.close()\n break\n \n results.extend(result)\n \n# 8. Check the search results\nprint(len(results))\n\nprint(results[:3])\n\n# Output\n#\n# [\n# {\n# \"color_tag\": \"brown_8785\",\n# \"id\": 94\n# },\n# {\n# \"color_tag\": \"brown_8568\",\n# \"id\": 176\n# },\n# {\n# \"color_tag\": \"brown_8721\",\n# \"id\": 289\n# }\n# ]\n","import io.milvus.param.dml.QueryIteratorParam;\nimport io.milvus.orm.iterator.QueryIterator;\n\n// 5. Query with iterators\n\ntry {\n Files.write(Path.of(\"results.json\"), JSON.toJSONString(new ArrayList<>()).getBytes(), StandardOpenOption.CREATE, StandardOpenOption.TRUNCATE_EXISTING);\n} catch (Exception e) {\n // TODO: handle exception\n e.printStackTrace();\n}\n\nQueryIteratorParam queryIteratorParam = QueryIteratorParam.newBuilder()\n .withCollectionName(\"quick_setup\")\n .withExpr(\"color_tag like \\\"brown_8%\\\"\")\n .withBatchSize(50L)\n .addOutField(\"vector\")\n .addOutField(\"color_tag\")\n .build();\n\nR queryIteratRes = client.queryIterator(queryIteratorParam);\n\nif (queryIteratRes.getStatus() != R.Status.Success.getCode()) {\n System.err.println(queryIteratRes.getMessage());\n}\n\nQueryIterator queryIterator = queryIteratRes.getData();\n\nwhile (true) {\n List batchResults = queryIterator.next();\n if (batchResults.isEmpty()) {\n queryIterator.close();\n break;\n }\n\n String jsonString = \"\";\n List jsonObject = new ArrayList<>();\n try {\n jsonString = Files.readString(Path.of(\"results.json\"));\n jsonObject = JSON.parseArray(jsonString).toJavaList(null);\n } catch (IOException e) {\n // TODO Auto-generated catch block\n e.printStackTrace();\n }\n\n for (QueryResultsWrapper.RowRecord queryResult : batchResults) {\n JSONObject row = new JSONObject();\n row.put(\"id\", queryResult.get(\"id\"));\n row.put(\"vector\", queryResult.get(\"vector\"));\n row.put(\"color_tag\", queryResult.get(\"color_tag\"));\n jsonObject.add(row);\n }\n\n try {\n Files.write(Path.of(\"results.json\"), JSON.toJSONString(jsonObject).getBytes(), StandardOpenOption.WRITE);\n } catch (IOException e) {\n // TODO Auto-generated catch block\n e.printStackTrace();\n }\n}\n"],"headingContent":"With Iterators","anchorList":[{"label":"Com Iteradores","href":"With-Iterators","type":1,"isActive":false},{"label":"Visão geral","href":"Overview","type":2,"isActive":false},{"label":"Preparações","href":"Preparations","type":2,"isActive":false},{"label":"Pesquisa com iterador","href":"Search-with-iterator","type":2,"isActive":false},{"label":"Consultar com um iterador","href":"Query-with-an-iterator","type":2,"isActive":false}]} \ No newline at end of file diff --git a/localization/v2.4.x/site/pt/userGuide/search-query-get/with-iterators.md b/localization/v2.4.x/site/pt/userGuide/search-query-get/with-iterators.md index 2a83797f4..29cddde03 100644 --- a/localization/v2.4.x/site/pt/userGuide/search-query-get/with-iterators.md +++ b/localization/v2.4.x/site/pt/userGuide/search-query-get/with-iterators.md @@ -21,7 +21,7 @@ title: Com Iteradores d="M4 9h1v1H4c-1.5 0-3-1.69-3-3.5S2.55 3 4 3h4c1.45 0 3 1.69 3 3.5 0 1.41-.91 2.72-2 3.25V8.59c.58-.45 1-1.27 1-2.09C10 5.22 8.98 4 8 4H4c-.98 0-2 1.22-2 2.5S3 9 4 9zm9-3h-1v1h1c1 0 2 1.22 2 2.5S13.98 12 13 12H9c-.98 0-2-1.22-2-2.5 0-.83.42-1.64 1-2.09V6.25c-1.09.53-2 1.84-2 3.25C6 11.31 7.55 13 9 13h4c1.45 0 3-1.69 3-3.5S14.5 6 13 6z" > -

    O Milvus fornece iteradores de pesquisa e consulta para iterar resultados com um grande volume de entidades. Uma vez que o Milvus limita o TopK a 16384, os utilizadores podem utilizar iteradores para devolver grandes números ou mesmo entidades inteiras numa coleção em modo batch.

    +

    Milvus fornece iteradores de pesquisa e consulta para iterar através de um grande volume de entidades. Uma vez que o Milvus limita o TopK a 16384, os utilizadores podem utilizar iteradores para devolver grandes números ou mesmo entidades inteiras numa coleção em modo batch.

    Visão geral

    Os iteradores são ferramentas poderosas que ajudam a iterar através de um grande volume de dados ou de todos os dados de uma coleção, utilizando valores de chave primária e expressões booleanas. Isto pode melhorar significativamente a forma como recupera os dados. Ao contrário da utilização tradicional de parâmetros de desvio e limite, que podem tornar-se menos eficientes ao longo do tempo, os iteradores oferecem uma solução mais escalável.

    +

    Os iteradores são uma ferramenta eficiente para pesquisar uma coleção inteira ou iterar através de um grande volume de entidades, especificando valores de chave primária ou uma expressão de filtro. Em comparação com uma chamada de pesquisa ou consulta com parâmetros de deslocamento e limite, a utilização de iteradores é mais eficiente e escalável.

    Vantagens da utilização de iteradores

      -
    • Simplicidade: Elimina as complexas definições de desvio e limite.

    • -
    • Eficiência: Fornece recuperação de dados escalável, buscando apenas os dados necessários.

    • +
    • Simplicidade: Elimina as complexas definições de offset e limite.

    • +
    • Eficiência: Fornece recuperação escalável de dados, buscando apenas os dados necessários.

    • Consistência: Garante um tamanho de conjunto de dados consistente com filtros booleanos.

    @@ -64,9 +64,9 @@ title: Com Iteradores d="M4 9h1v1H4c-1.5 0-3-1.69-3-3.5S2.55 3 4 3h4c1.45 0 3 1.69 3 3.5 0 1.41-.91 2.72-2 3.25V8.59c.58-.45 1-1.27 1-2.09C10 5.22 8.98 4 8 4H4c-.98 0-2 1.22-2 2.5S3 9 4 9zm9-3h-1v1h1c1 0 2 1.22 2 2.5S13.98 12 13 12H9c-.98 0-2-1.22-2-2.5 0-.83.42-1.64 1-2.09V6.25c-1.09.53-2 1.84-2 3.25C6 11.31 7.55 13 9 13h4c1.45 0 3-1.69 3-3.5S14.5 6 13 6z" > -

    As etapas a seguir redirecionam o código para se conectar ao Milvus, configurar rapidamente uma coleção e inserir mais de 10.000 entidades geradas aleatoriamente na coleção.

    +

    O seguinte passo de preparação liga-se ao Milvus e insere entidades geradas aleatoriamente numa coleção.

    Passo 1: Criar uma coleção

    -

    Utilize MilvusClient para se ligar ao servidor Milvus e create_collection() para criar uma coleção.

    +

    Use MilvusClient para se ligar ao servidor Milvus e create_collection() para criar uma coleção.

    Utilize MilvusClientV2 para se ligar ao servidor Milvus e createCollection() para criar uma coleção.

    @@ -266,8 +266,9 @@ iterator = collection.search_iterator( batch_size=10, param=search_params, output_fields=["color_tag"], - limit=3 + limit=300 ) +# search 300 entities totally with 10 entities per page results = [] diff --git a/localization/v2.4.x/site/pt/userGuide/tools/cli_commands.json b/localization/v2.4.x/site/pt/userGuide/tools/cli_commands.json index 92dd25e3b..e499fc3fe 100644 --- a/localization/v2.4.x/site/pt/userGuide/tools/cli_commands.json +++ b/localization/v2.4.x/site/pt/userGuide/tools/cli_commands.json @@ -1 +1 @@ -{"codeList":["clear\n","connect [-uri (text)] [-t (text)]\n","milvus_cli > connect -uri http://127.0.0.1:19530 \n","create database -db (text) \n","milvus_cli > create database -db testdb\n","use database -db (text) \n","milvus_cli > use database -db testdb\n","list databases \n","milvus_cli > list databases\n","delete database -db (text) \n","milvus_cli > delete database -db testdb\n","create user -u (text) -p (text)\n","milvus_cli > create user -u zilliz -p zilliz\n","create alias -c (text) -a (text) [-A] \n","milvus_cli > create alias -c car -a carAlias1\n","milvus_cli > create alias -c car2 -A -a carAlias1\n","create collection -c (text) -f (text) -p (text) [-a] [-d (text)]\n","## For array field: --schema-field support :::(:if Varchar)\n\nmilvus_cli > create collection -c car -f id:INT64:primary_field -f vector:FLOAT_VECTOR:128 -f color:INT64:color -f brand:ARRAY:64:VARCHAR:128 -p id -A -d 'car_collection'\n","create partition -c (text) -p (text) [-d (text)]\n","milvus_cli > create partition -c car -p new_partition -d test_add_partition\n","create index\n","milvus_cli > create index\n\nCollection name (car, car2): car2\n\nThe name of the field to create an index for (vector): vector\n\nIndex name: vectorIndex\n\n# Default is ''\nIndex type FLAT, IVF_FLAT, IVF_SQ8, IVF_PQ, RNSG, HNSW, ANNOY, AUTOINDEX, DISKANN, GPU_IVF_FLAT, GPU_IVF_PQ, SPARSE_INVERTED_INDEX, SPARSE_WAND, SCANN, STL_SORT, Trie, INVERTED, ) []: IVF_FLAT \n\n# Default is ''\nIndex metric type (L2, IP, HAMMING, TANIMOTO, COSINE, ) []: \n\nTimeout []:\n","delete user -u (text)\n","milvus_cli > delete user -u zilliz\n","delete alias -a (text) \n","delete collection -c (text) \n","milvus_cli > delete collection -c car\n","delete entities -c (text) -p (text) \n","milvus_cli > delete entities -c car\n\nThe expression to specify entities to be deleted, such as \"film_id in [ 0, 1 ]\": film_id in [ 0, 1 ]\n\nYou are trying to delete the entities of collection. This action cannot be undone!\n\nDo you want to continue? [y/N]: y\n","delete partition -c (text) -p (text)\n","milvus_cli > delete partition -c car -p new_partition\n","delete index -c (text) -in (text)\n","milvus_cli > delete index -c car -in indexName\n","show collection -c (text)\n","milvus_cli > show collection -c test_collection_insert\n","show partition -c (text) -p (text)\n","milvus_cli > show partition -c test_collection_insert -p _default\n","show index -c (text) -in (text)\n","milvus_cli > show index -c test_collection -in index_name\n","exit\n","help \n","import -c (text)[-p (text)] \n","milvus_cli > import -c car 'examples/import_csv/vectors.csv'\n\nReading csv file... [####################################] 100%\n\nColumn names are ['vector', 'color', 'brand']\n\nProcessed 50001 lines.\n\nInserting ...\n\nInsert successfully.\n-------------------------- ------------------\nTotal insert entities: 50000\nTotal collection entities: 150000\nMilvus timestamp: 428849214449254403\n-------------------------- ------------------\n","milvus_cli > import -c car 'https://raw.githubusercontent.com/milvus-\nio/milvus_cli/main/examples/import_csv/vectors.csv'\n\nReading file from remote URL.\n\nReading csv file... [####################################] 100%\n\nColumn names are ['vector', 'color', 'brand']\n\nProcessed 50001 lines.\n\nInserting ...\n\nInsert successfully.\n\n-------------------------- ------------------\nTotal insert entities: 50000\nTotal collection entities: 150000\nMilvus timestamp: 428849214449254403\n-------------------------- ------------------\n","list users\n","list collections\n","list indexes -c (text)\n","list partitions -c (text)\n","load -c (text) [-p (text)]\n","query\n","milvus_cli > query\n\nCollection name: car\n\nThe query expression: id in [ 428960801420883491, 428960801420883492,\n428960801420883493 ]\n\nName of partitions that contain entities(split by \",\" if multiple) []:\ndefault\n\nA list of fields to return(split by \",\" if multiple) []: color, brand\n\ntimeout []:\n\nGuarantee timestamp. This instructs Milvus to see all operations performed before a provided timestamp. If no such timestamp is provided, then Milvus will search all operations performed to date. [0]:\nGraceful time. Only used in bounded consistency level. If graceful_time is set, PyMilvus will use current timestamp minus the graceful_time as the guarantee_timestamp. This option is 5s by default if not set. [5]:\n","milvus_cli > query\n\nCollection name: car\n\nThe query expression: id > 428960801420883491\n\nName of partitions that contain entities(split by \",\" if multiple) []:\ndefault\n\nA list of fields to return(split by \",\" if multiple) []: id, color,\nbrand\n\ntimeout []:\n\nGuarantee timestamp. This instructs Milvus to see all operations performed before a provided timestamp. If no such timestamp is provided, then Milvus will search all operations performed to date. [0]:\nGraceful time. Only used in bounded consistency level. If graceful_time is set, PyMilvus will use current timestamp minus the graceful_time as the guarantee_timestamp. This option is 5s by default if not set. [5]:\n","release -c (text) [-p (text)]\n","search\n","milvus_cli > search\n\nCollection name (car, test_collection): car\n\nThe vectors of search data(the length of data is number of query (nq), the dim of every vector in data must be equal to vector field’s of collection. You can also import a csv file\nout headers): examples/import_csv/search_vectors.csv\n\nThe vector field used to search of collection (vector): vector\n\nSearch parameter nprobe's value: 10\n\nThe max number of returned record, also known as topk: 2\n\nThe boolean expression used to filter attribute []: id > 0\n\nThe names of partitions to search (split by \",\" if multiple) ['_default'] []: _default\n\ntimeout []:\n\nGuarantee Timestamp(It instructs Milvus to see all operations performed before a provided timestamp. If no such timestamp is provided, then Milvus will search all operations performed to date) [0]:\n\n","milvus_cli > search\n\nCollection name (car, test_collection): car\n\nThe vectors of search data(the length of data is number of query (nq), the dim of every vector in data must be equal to vector field’s of collection. You can also import a csv file without headers):\n [[0.71, 0.76, 0.17, 0.13, 0.42, 0.07, 0.15, 0.67, 0.58, 0.02, 0.39, 0.47, 0.58, 0.88, 0.73, 0.31, 0.23, 0.57, 0.33, 0.2, 0.03, 0.43, 0.78, 0.49, 0.17, 0.56, 0.76, 0.54, 0.45, 0.46, 0.05, 0.1, 0.43, 0.63, 0.29, 0.44, 0.65, 0.01, 0.35, 0.46, 0.66, 0.7, 0.88, 0.07, 0.49, 0.92, 0.57, 0.5, 0.16, 0.77, 0.98, 0.1, 0.44, 0.88, 0.82, 0.16, 0.67, 0.63, 0.57, 0.55, 0.95, 0.13, 0.64, 0.43, 0.71, 0.81, 0.43, 0.65, 0.76, 0.7, 0.05, 0.24, 0.03, 0.9, 0.46, 0.28, 0.92, 0.25, 0.97, 0.79, 0.73, 0.97, 0.49, 0.28, 0.64, 0.19, 0.23, 0.51, 0.09, 0.1, 0.53, 0.03, 0.23, 0.94, 0.87, 0.14, 0.42, 0.82, 0.91, 0.11, 0.91, 0.37, 0.26, 0.6, 0.89, 0.6, 0.32, 0.11, 0.98, 0.67, 0.12, 0.66, 0.47, 0.02, 0.15, 0.6, 0.64, 0.57, 0.14, 0.81, 0.75, 0.11, 0.49, 0.78, 0.16, 0.63, 0.57, 0.18]]\n\nThe vector field used to search of collection (vector): vector\n\nSearch parameter nprobe's value: 10\n\nThe specified number of decimal places of returned distance [-1]: 5\n\nThe max number of returned record, also known as topk: 2\n\nThe boolean expression used to filter attribute []: id > 0\n\nThe names of partitions to search (split by \",\" if multiple) ['_default'] []: _default\n\ntimeout []:\n\nGuarantee Timestamp(It instructs Milvus to see all operations performed before a provided timestamp. If no such timestamp is provided, then Milvus will search all operations performed to date) [0]:\n\n","milvus_cli > search\n\nCollection name (car, car2): car\n\nThe vectors of search data(the length of data is number of query (nq), the dim of every vector in data must be equal to vector field’s of collection. You can also import a csv file without headers): examples/import_csv/search_vectors.csv\n\nThe vector field used to search of collection (vector): vector\n\nThe specified number of decimal places of returned distance [-1]: 5\n\nThe max number of returned record, also known as topk: 2\n\nThe boolean expression used to filter attribute []:\n\nThe names of partitions to search (split by \",\" if multiple) ['_default'] []:\n\ntimeout []:\n\nGuarantee Timestamp(It instructs Milvus to see all operations performed before a provided timestamp. If no such timestamp is provided, then Milvus will search all operations performed to date) [0]:\n\n","list connections \n","show index_progress -c (text) [-i (text)]\n","show loading_progress -c (text) [-p (text)]\n","version\n","$ milvus_cli --version\nMilvus_CLI v0.4.0\n"],"headingContent":"","anchorList":[{"label":"Referência do Comando Milvus_CLI","href":"MilvusCLI-Command-Reference","type":1,"isActive":false},{"label":"limpar","href":"clear","type":2,"isActive":false},{"label":"ligar","href":"connect","type":2,"isActive":false},{"label":"criar base de dados","href":"create-Database","type":2,"isActive":false},{"label":"usar base de dados","href":"use-Database","type":2,"isActive":false},{"label":"Listar bases de dados","href":"List-Databases","type":2,"isActive":false},{"label":"Eliminar base de dados","href":"Delete-Database","type":2,"isActive":false},{"label":"criar utilizador","href":"create-user","type":2,"isActive":false},{"label":"criar alias","href":"create-alias","type":2,"isActive":false},{"label":"criar coleção","href":"create-collection","type":2,"isActive":false},{"label":"criar partição","href":"create-partition","type":2,"isActive":false},{"label":"criar índice","href":"create-index","type":2,"isActive":false},{"label":"delete user","href":"delete-user","type":2,"isActive":false},{"label":"delete alias","href":"delete-alias","type":2,"isActive":false},{"label":"delete collection","href":"delete-collection","type":2,"isActive":false},{"label":"delete entities","href":"delete-entities","type":2,"isActive":false},{"label":"delete partição","href":"delete-partition","type":2,"isActive":false},{"label":"delete index","href":"delete-index","type":2,"isActive":false},{"label":"mostrar coleção","href":"show-collection","type":2,"isActive":false},{"label":"mostrar partição","href":"show-partition","type":2,"isActive":false},{"label":"mostrar índice","href":"show-index","type":2,"isActive":false},{"label":"exit","href":"exit","type":2,"isActive":false},{"label":"help","href":"help","type":2,"isActive":false},{"label":"importar","href":"import","type":2,"isActive":false},{"label":"listar utilizadores","href":"list-users","type":2,"isActive":false},{"label":"listar coleções","href":"list-collections","type":2,"isActive":false},{"label":"listar índices","href":"list-indexes","type":2,"isActive":false},{"label":"listar partições","href":"list-partitions","type":2,"isActive":false},{"label":"load","href":"load","type":2,"isActive":false},{"label":"consulta","href":"query","type":2,"isActive":false},{"label":"libertação","href":"release","type":2,"isActive":false},{"label":"pesquisa","href":"search","type":2,"isActive":false},{"label":"Listar ligações","href":"List-connection","type":2,"isActive":false},{"label":"show index_progress","href":"show-indexprogress","type":2,"isActive":false},{"label":"show loading_progress","href":"show-loadingprogress","type":2,"isActive":false},{"label":"version","href":"version","type":2,"isActive":false}]} \ No newline at end of file +{"codeList":["clear\n","connect [-uri (text)] [-t (text)]\n","milvus_cli > connect -uri http://127.0.0.1:19530 \n","create database -db (text) \n","milvus_cli > create database -db testdb\n","use database -db (text) \n","milvus_cli > use database -db testdb\n","list databases \n","milvus_cli > list databases\n","delete database -db (text) \n","milvus_cli > delete database -db testdb\n","create user -u (text) -p (text)\n","milvus_cli > create user -u zilliz -p zilliz\n","create alias -c (text) -a (text) [-A] \n","milvus_cli > create alias -c car -a carAlias1\n","milvus_cli > create alias -c car2 -A -a carAlias1\n","create collection -c (text) -f (text) -p (text) [-a] [-d (text)]\n","## For array field: --schema-field support :::(:if Varchar)\n\nmilvus_cli > create collection -c car -f id:INT64:primary_field -f vector:FLOAT_VECTOR:128 -f color:INT64:color -f brand:ARRAY:64:VARCHAR:128 -p id -A -d 'car_collection'\n","create partition -c (text) -p (text) [-d (text)]\n","milvus_cli > create partition -c car -p new_partition -d test_add_partition\n","create index\n","milvus_cli > create index\n\nCollection name (car, car2): car2\n\nThe name of the field to create an index for (vector): vector\n\nIndex name: vectorIndex\n\n# Default is ''\nIndex type FLAT, IVF_FLAT, IVF_SQ8, IVF_PQ, RNSG, HNSW, ANNOY, AUTOINDEX, DISKANN, GPU_IVF_FLAT, GPU_IVF_PQ, SPARSE_INVERTED_INDEX, SPARSE_WAND, SCANN, STL_SORT, Trie, INVERTED, ) []: IVF_FLAT \n\n# Default is ''\nIndex metric type (L2, IP, HAMMING, TANIMOTO, COSINE, ) []: \n\nTimeout []:\n","delete user -u (text)\n","milvus_cli > delete user -u zilliz\n","delete alias -a (text) \n","delete collection -c (text) \n","milvus_cli > delete collection -c car\n","delete entities -c (text) -p (text) \n","milvus_cli > delete entities -c car\n\nThe expression to specify entities to be deleted, such as \"film_id in [ 0, 1 ]\": film_id in [ 0, 1 ]\n\nYou are trying to delete the entities of collection. This action cannot be undone!\n\nDo you want to continue? [y/N]: y\n","delete partition -c (text) -p (text)\n","milvus_cli > delete partition -c car -p new_partition\n","delete index -c (text) -in (text)\n","milvus_cli > delete index -c car -in indexName\n","show collection -c (text)\n","milvus_cli > show collection -c test_collection_insert\n","show partition -c (text) -p (text)\n","milvus_cli > show partition -c test_collection_insert -p _default\n","show index -c (text) -in (text)\n","milvus_cli > show index -c test_collection -in index_name\n","exit\n","help \n","import -c (text)[-p (text)] \n","milvus_cli > import -c car 'examples/import_csv/vectors.csv'\n\nReading csv file... [####################################] 100%\n\nColumn names are ['vector', 'color', 'brand']\n\nProcessed 50001 lines.\n\nInserting ...\n\nInsert successfully.\n-------------------------- ------------------\nTotal insert entities: 50000\nTotal collection entities: 150000\nMilvus timestamp: 428849214449254403\n-------------------------- ------------------\n","milvus_cli > import -c car 'https://raw.githubusercontent.com/milvus-\nio/milvus_cli/main/examples/import_csv/vectors.csv'\n\nReading file from remote URL.\n\nReading csv file... [####################################] 100%\n\nColumn names are ['vector', 'color', 'brand']\n\nProcessed 50001 lines.\n\nInserting ...\n\nInsert successfully.\n\n-------------------------- ------------------\nTotal insert entities: 50000\nTotal collection entities: 150000\nMilvus timestamp: 428849214449254403\n-------------------------- ------------------\n","list users\n","list collections\n","list indexes -c (text)\n","list partitions -c (text)\n","load -c (text) [-p (text)]\n","query\n","milvus_cli > query\n\nCollection name: car\n\nThe query expression: id in [ 428960801420883491, 428960801420883492,\n428960801420883493 ]\n\nName of partitions that contain entities(split by \",\" if multiple) []:\ndefault\n\nA list of fields to return(split by \",\" if multiple) []: color, brand\n\ntimeout []:\n\nGuarantee timestamp. This instructs Milvus to see all operations performed before a provided timestamp. If no such timestamp is provided, then Milvus will search all operations performed to date. [0]:\nGraceful time. Only used in bounded consistency level. If graceful_time is set, PyMilvus will use current timestamp minus the graceful_time as the guarantee_timestamp. This option is 5s by default if not set. [5]:\n","milvus_cli > query\n\nCollection name: car\n\nThe query expression: id > 428960801420883491\n\nName of partitions that contain entities(split by \",\" if multiple) []:\ndefault\n\nA list of fields to return(split by \",\" if multiple) []: id, color,\nbrand\n\ntimeout []:\n\nGuarantee timestamp. This instructs Milvus to see all operations performed before a provided timestamp. If no such timestamp is provided, then Milvus will search all operations performed to date. [0]:\nGraceful time. Only used in bounded consistency level. If graceful_time is set, PyMilvus will use current timestamp minus the graceful_time as the guarantee_timestamp. This option is 5s by default if not set. [5]:\n","release -c (text) [-p (text)]\n","search\n","milvus_cli > search\n\nCollection name (car, test_collection): car\n\nThe vectors of search data(the length of data is number of query (nq), the dim of every vector in data must be equal to vector field’s of collection. You can also import a csv file\nout headers): examples/import_csv/search_vectors.csv\n\nThe vector field used to search of collection (vector): vector\n\nSearch parameter nprobe's value: 10\n\nThe max number of returned record, also known as topk: 2\n\nThe boolean expression used to filter attribute []: id > 0\n\nThe names of partitions to search (split by \",\" if multiple) ['_default'] []: _default\n\ntimeout []:\n\nGuarantee Timestamp(It instructs Milvus to see all operations performed before a provided timestamp. If no such timestamp is provided, then Milvus will search all operations performed to date) [0]:\n\n","milvus_cli > search\n\nCollection name (car, test_collection): car\n\nThe vectors of search data(the length of data is number of query (nq), the dim of every vector in data must be equal to vector field’s of collection. You can also import a csv file without headers):\n [[0.71, 0.76, 0.17, 0.13, 0.42, 0.07, 0.15, 0.67, 0.58, 0.02, 0.39, 0.47, 0.58, 0.88, 0.73, 0.31, 0.23, 0.57, 0.33, 0.2, 0.03, 0.43, 0.78, 0.49, 0.17, 0.56, 0.76, 0.54, 0.45, 0.46, 0.05, 0.1, 0.43, 0.63, 0.29, 0.44, 0.65, 0.01, 0.35, 0.46, 0.66, 0.7, 0.88, 0.07, 0.49, 0.92, 0.57, 0.5, 0.16, 0.77, 0.98, 0.1, 0.44, 0.88, 0.82, 0.16, 0.67, 0.63, 0.57, 0.55, 0.95, 0.13, 0.64, 0.43, 0.71, 0.81, 0.43, 0.65, 0.76, 0.7, 0.05, 0.24, 0.03, 0.9, 0.46, 0.28, 0.92, 0.25, 0.97, 0.79, 0.73, 0.97, 0.49, 0.28, 0.64, 0.19, 0.23, 0.51, 0.09, 0.1, 0.53, 0.03, 0.23, 0.94, 0.87, 0.14, 0.42, 0.82, 0.91, 0.11, 0.91, 0.37, 0.26, 0.6, 0.89, 0.6, 0.32, 0.11, 0.98, 0.67, 0.12, 0.66, 0.47, 0.02, 0.15, 0.6, 0.64, 0.57, 0.14, 0.81, 0.75, 0.11, 0.49, 0.78, 0.16, 0.63, 0.57, 0.18]]\n\nThe vector field used to search of collection (vector): vector\n\nSearch parameter nprobe's value: 10\n\nThe specified number of decimal places of returned distance [-1]: 5\n\nThe max number of returned record, also known as topk: 2\n\nThe boolean expression used to filter attribute []: id > 0\n\nThe names of partitions to search (split by \",\" if multiple) ['_default'] []: _default\n\ntimeout []:\n\nGuarantee Timestamp(It instructs Milvus to see all operations performed before a provided timestamp. If no such timestamp is provided, then Milvus will search all operations performed to date) [0]:\n\n","milvus_cli > search\n\nCollection name (car, car2): car\n\nThe vectors of search data(the length of data is number of query (nq), the dim of every vector in data must be equal to vector field’s of collection. You can also import a csv file without headers): examples/import_csv/search_vectors.csv\n\nThe vector field used to search of collection (vector): vector\n\nThe specified number of decimal places of returned distance [-1]: 5\n\nThe max number of returned record, also known as topk: 2\n\nThe boolean expression used to filter attribute []:\n\nThe names of partitions to search (split by \",\" if multiple) ['_default'] []:\n\ntimeout []:\n\nGuarantee Timestamp(It instructs Milvus to see all operations performed before a provided timestamp. If no such timestamp is provided, then Milvus will search all operations performed to date) [0]:\n\n","list connections \n","show index_progress -c (text) [-i (text)]\n","show loading_progress -c (text) [-p (text)]\n","version\n","$ milvus_cli --version\nMilvus_CLI v0.4.0\n"],"headingContent":"Milvus_CLI Command Reference","anchorList":[{"label":"Referência do Comando Milvus_CLI","href":"MilvusCLI-Command-Reference","type":1,"isActive":false},{"label":"limpar","href":"clear","type":2,"isActive":false},{"label":"ligar","href":"connect","type":2,"isActive":false},{"label":"criar base de dados","href":"create-Database","type":2,"isActive":false},{"label":"usar base de dados","href":"use-Database","type":2,"isActive":false},{"label":"listar bases de dados","href":"list-Databases","type":2,"isActive":false},{"label":"apagar base de dados","href":"delete-Database","type":2,"isActive":false},{"label":"criar utilizador","href":"create-user","type":2,"isActive":false},{"label":"criar alias","href":"create-alias","type":2,"isActive":false},{"label":"criar coleção","href":"create-collection","type":2,"isActive":false},{"label":"criar partição","href":"create-partition","type":2,"isActive":false},{"label":"criar índice","href":"create-index","type":2,"isActive":false},{"label":"delete user","href":"delete-user","type":2,"isActive":false},{"label":"delete alias","href":"delete-alias","type":2,"isActive":false},{"label":"delete collection","href":"delete-collection","type":2,"isActive":false},{"label":"delete entities","href":"delete-entities","type":2,"isActive":false},{"label":"delete partição","href":"delete-partition","type":2,"isActive":false},{"label":"delete index","href":"delete-index","type":2,"isActive":false},{"label":"mostrar coleção","href":"show-collection","type":2,"isActive":false},{"label":"mostrar partição","href":"show-partition","type":2,"isActive":false},{"label":"mostrar índice","href":"show-index","type":2,"isActive":false},{"label":"exit","href":"exit","type":2,"isActive":false},{"label":"help","href":"help","type":2,"isActive":false},{"label":"importar","href":"import","type":2,"isActive":false},{"label":"listar utilizadores","href":"list-users","type":2,"isActive":false},{"label":"listar coleções","href":"list-collections","type":2,"isActive":false},{"label":"listar índices","href":"list-indexes","type":2,"isActive":false},{"label":"listar partições","href":"list-partitions","type":2,"isActive":false},{"label":"load","href":"load","type":2,"isActive":false},{"label":"consulta","href":"query","type":2,"isActive":false},{"label":"libertação","href":"release","type":2,"isActive":false},{"label":"pesquisa","href":"search","type":2,"isActive":false},{"label":"listar ligações","href":"list-connection","type":2,"isActive":false},{"label":"show index_progress","href":"show-indexprogress","type":2,"isActive":false},{"label":"show loading_progress","href":"show-loadingprogress","type":2,"isActive":false},{"label":"version","href":"version","type":2,"isActive":false}]} \ No newline at end of file diff --git a/localization/v2.4.x/site/pt/userGuide/tools/cli_commands.md b/localization/v2.4.x/site/pt/userGuide/tools/cli_commands.md index fea0c33e2..414ead66a 100644 --- a/localization/v2.4.x/site/pt/userGuide/tools/cli_commands.md +++ b/localization/v2.4.x/site/pt/userGuide/tools/cli_commands.md @@ -1,7 +1,7 @@ --- id: cli_commands.md summary: Interagir com Milvus através de comandos. -title: Referência do comando Milvus_CLI +title: Referência do Comando Milvus_CLI ---

    Referência do Comando Milvus_CLI

    -

    Listar bases de dados

    Exemplo 1

    O exemplo seguinte lista as bases de dados em milvus.

    milvus_cli > list databases
     
    -

    Eliminar base de dados

    -

    Listar ligações

    Mostra o progresso do carregamento da entidade.

    +

    Apresenta o progresso do carregamento de uma coleção.

    Sintaxe

    show loading_progress -c (text) [-p (text)]
     
    diff --git a/localization/v2.4.x/site/pt/userGuide/tools/milvus_backup_overview.json b/localization/v2.4.x/site/pt/userGuide/tools/milvus_backup_overview.json index 17a8f56e2..3ac557a95 100644 --- a/localization/v2.4.x/site/pt/userGuide/tools/milvus_backup_overview.json +++ b/localization/v2.4.x/site/pt/userGuide/tools/milvus_backup_overview.json @@ -1 +1 @@ -{"codeList":[],"headingContent":"","anchorList":[{"label":"Cópia de segurança do Milvus","href":"Milvus-Backup","type":1,"isActive":false},{"label":"Pré-requisitos","href":"Prerequisites","type":2,"isActive":false},{"label":"Arquitetura","href":"Architecture","type":2,"isActive":false},{"label":"Versão mais recente","href":"Latest-release","type":2,"isActive":false}]} \ No newline at end of file +{"codeList":[],"headingContent":"Milvus Backup","anchorList":[{"label":"Cópia de segurança do Milvus","href":"Milvus-Backup","type":1,"isActive":false},{"label":"Pré-requisitos","href":"Prerequisites","type":2,"isActive":false},{"label":"Arquitetura","href":"Architecture","type":2,"isActive":false},{"label":"Versão mais recente","href":"Latest-release","type":2,"isActive":false}]} \ No newline at end of file diff --git a/localization/v2.4.x/site/pt/userGuide/tools/milvus_backup_overview.md b/localization/v2.4.x/site/pt/userGuide/tools/milvus_backup_overview.md index 9c184f24e..34cc63553 100644 --- a/localization/v2.4.x/site/pt/userGuide/tools/milvus_backup_overview.md +++ b/localization/v2.4.x/site/pt/userGuide/tools/milvus_backup_overview.md @@ -3,7 +3,7 @@ id: milvus_backup_overview.md summary: >- O Milvus-Backup é uma ferramenta que permite aos utilizadores fazer cópias de segurança e restaurar os dados do Milvus. -title: Backup de Milvus +title: Cópia de segurança do Milvus ---

    Cópia de segurança do Milvus

    diff --git a/localization/v2.4.x/site/pt/userGuide/use-partition-key.json b/localization/v2.4.x/site/pt/userGuide/use-partition-key.json index e7630a3f7..78549d3cc 100644 --- a/localization/v2.4.x/site/pt/userGuide/use-partition-key.json +++ b/localization/v2.4.x/site/pt/userGuide/use-partition-key.json @@ -1 +1 @@ -{"codeList":["import random, time\nfrom pymilvus import connections, MilvusClient, DataType\n\nSERVER_ADDR = \"http://localhost:19530\"\n\n# 1. Set up a Milvus client\nclient = MilvusClient(\n uri=SERVER_ADDR\n)\n\n# 2. Create a collection\nschema = MilvusClient.create_schema(\n auto_id=False,\n enable_dynamic_field=True,\n partition_key_field=\"color\",\n num_partitions=16 # Number of partitions. Defaults to 16.\n)\n\nschema.add_field(field_name=\"id\", datatype=DataType.INT64, is_primary=True)\nschema.add_field(field_name=\"vector\", datatype=DataType.FLOAT_VECTOR, dim=5)\nschema.add_field(field_name=\"color\", datatype=DataType.VARCHAR, max_length=512)\n","import io.milvus.v2.client.ConnectConfig;\nimport io.milvus.v2.client.MilvusClientV2;\nimport io.milvus.v2.common.DataType;\nimport io.milvus.v2.common.IndexParam;\nimport io.milvus.v2.service.collection.request.AddFieldReq;\nimport io.milvus.v2.service.collection.request.CreateCollectionReq;\n\nString CLUSTER_ENDPOINT = \"http://localhost:19530\";\n\n// 1. Connect to Milvus server\nConnectConfig connectConfig = ConnectConfig.builder()\n .uri(CLUSTER_ENDPOINT)\n .build();\n\nMilvusClientV2 client = new MilvusClientV2(connectConfig);\n\n// 2. Create a collection in customized setup mode\n\n// 2.1 Create schema\nCreateCollectionReq.CollectionSchema schema = client.createSchema();\n\n// 2.2 Add fields to schema\nschema.addField(AddFieldReq.builder()\n .fieldName(\"id\")\n .dataType(DataType.Int64)\n .isPrimaryKey(true)\n .autoID(false)\n .build());\n\nschema.addField(AddFieldReq.builder()\n .fieldName(\"vector\")\n .dataType(DataType.FloatVector)\n .dimension(5)\n .build());\n \nschema.addField(AddFieldReq.builder()\n .fieldName(\"color\")\n .dataType(DataType.VarChar)\n .maxLength(512)\n .isPartitionKey(true)\n .build());\n","const { MilvusClient, DataType, sleep } = require(\"@zilliz/milvus2-sdk-node\")\n\nconst address = \"http://localhost:19530\"\n\nasync function main() {\n// 1. Set up a Milvus Client\nclient = new MilvusClient({address}); \n\n// 2. Create a collection\n// 2.1 Define fields\nconst fields = [\n {\n name: \"id\",\n data_type: DataType.Int64,\n is_primary_key: true,\n auto_id: false\n },\n {\n name: \"vector\",\n data_type: DataType.FloatVector,\n dim: 5\n },\n {\n name: \"color\",\n data_type: DataType.VarChar,\n max_length: 512,\n is_partition_key: true\n }\n]\n","index_params = MilvusClient.prepare_index_params()\n\nindex_params.add_index(\n field_name=\"id\",\n index_type=\"STL_SORT\"\n)\n\nindex_params.add_index(\n field_name=\"color\",\n index_type=\"Trie\"\n)\n\nindex_params.add_index(\n field_name=\"vector\",\n index_type=\"IVF_FLAT\",\n metric_type=\"L2\",\n params={\"nlist\": 1024}\n)\n","// 2.3 Prepare index parameters\nIndexParam indexParamForVectorField = IndexParam.builder()\n .fieldName(\"vector\")\n .indexType(IndexParam.IndexType.IVF_FLAT)\n .metricType(IndexParam.MetricType.IP)\n .extraParams(Map.of(\"nlist\", 1024))\n .build();\n\nList indexParams = new ArrayList<>();\nindexParams.add(indexParamForVectorField);\n","// 2.2 Prepare index parameters\nconst index_params = [{\n field_name: \"color\",\n index_type: \"Trie\"\n},{\n field_name: \"id\",\n index_type: \"STL_SORT\"\n},{\n field_name: \"vector\",\n index_type: \"IVF_FLAT\",\n metric_type: \"IP\",\n params: { nlist: 1024}\n}]\n","client.create_collection(\n collection_name=\"test_collection\",\n schema=schema,\n index_params=index_params\n)\n","// 2.4 Create a collection with schema and index parameters\nCreateCollectionReq customizedSetupReq = CreateCollectionReq.builder()\n .collectionName(\"test_collection\")\n .collectionSchema(schema)\n .indexParams(indexParams) \n .build();\n\nclient.createCollection(customizedSetupReq);\n","// 2.3 Create a collection with fields and index parameters\nres = await client.createCollection({\n collection_name: \"test_collection\",\n fields: fields, \n index_params: index_params,\n})\n\nconsole.log(res.error_code)\n\n// Output\n// \n// Success\n//\n","# 3. Insert randomly generated vectors \ncolors = [\"green\", \"blue\", \"yellow\", \"red\", \"black\", \"white\", \"purple\", \"pink\", \"orange\", \"brown\", \"grey\"]\ndata = []\n\nfor i in range(1000):\n current_color = random.choice(colors)\n current_tag = random.randint(1000, 9999)\n data.append({\n \"id\": i,\n \"vector\": [ random.uniform(-1, 1) for _ in range(5) ],\n \"color\": current_color,\n \"tag\": current_tag,\n \"color_tag\": f\"{current_color}_{str(current_tag)}\"\n })\n\nprint(data[0])\n","// 3. Insert randomly generated vectors\nList colors = Arrays.asList(\"green\", \"blue\", \"yellow\", \"red\", \"black\", \"white\", \"purple\", \"pink\", \"orange\", \"brown\", \"grey\");\nList data = new ArrayList<>();\n\nfor (int i=0; i<1000; i++) {\n Random rand = new Random();\n String current_color = colors.get(rand.nextInt(colors.size()-1));\n int current_tag = rand.nextInt(8999) + 1000;\n JSONObject row = new JSONObject();\n row.put(\"id\", Long.valueOf(i));\n row.put(\"vector\", Arrays.asList(rand.nextFloat(), rand.nextFloat(), rand.nextFloat(), rand.nextFloat(), rand.nextFloat()));\n row.put(\"color\", current_color);\n row.put(\"tag\", current_tag);\n row.put(\"color_tag\", current_color + \"_\" + String.valueOf(rand.nextInt(8999) + 1000));\n data.add(row);\n}\n\nSystem.out.println(JSONObject.toJSON(data.get(0))); \n","// 3. Insert randomly generated vectors \nconst colors = [\"green\", \"blue\", \"yellow\", \"red\", \"black\", \"white\", \"purple\", \"pink\", \"orange\", \"brown\", \"grey\"]\nvar data = []\n\nfor (let i = 0; i < 1000; i++) {\n const current_color = colors[Math.floor(Math.random() * colors.length)]\n const current_tag = Math.floor(Math.random() * 8999 + 1000)\n data.push({\n id: i,\n vector: [Math.random(), Math.random(), Math.random(), Math.random(), Math.random()],\n color: current_color,\n tag: current_tag,\n color_tag: `${current_color}_${current_tag}`\n })\n}\n\nconsole.log(data[0])\n","{\n id: 0,\n vector: [\n 0.1275656405044483,\n 0.47417858592773277,\n 0.13858264437643286,\n 0.2390904907020377,\n 0.8447862593689635\n ],\n color: 'blue',\n tag: 2064,\n color_tag: 'blue_2064'\n}\n","res = client.insert(\n collection_name=\"test_collection\",\n data=data\n)\n\nprint(res)\n\n# Output\n#\n# {\n# \"insert_count\": 1000,\n# \"ids\": [\n# 0,\n# 1,\n# 2,\n# 3,\n# 4,\n# 5,\n# 6,\n# 7,\n# 8,\n# 9,\n# \"(990 more items hidden)\"\n# ]\n# }\n","// 3.1 Insert data into the collection\nInsertReq insertReq = InsertReq.builder()\n .collectionName(\"test_collection\")\n .data(data)\n .build();\n\nInsertResp insertResp = client.insert(insertReq);\n\nSystem.out.println(JSONObject.toJSON(insertResp));\n\n// Output:\n// {\"insertCnt\": 1000}\n","res = await client.insert({\n collection_name: \"test_collection\",\n data: data,\n})\n\nconsole.log(res.insert_cnt)\n\n// Output\n// \n// 1000\n// \n","# 4. Search with partition key\nquery_vectors = [[0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592]]\n\nres = client.search(\n collection_name=\"test_collection\",\n data=query_vectors,\n filter=\"color == 'green'\",\n search_params={\"metric_type\": \"L2\", \"params\": {\"nprobe\": 10}},\n output_fields=[\"id\", \"color_tag\"],\n limit=3\n)\n\nprint(res)\n\n# Output\n#\n# [\n# [\n# {\n# \"id\": 970,\n# \"distance\": 0.5770174264907837,\n# \"entity\": {\n# \"id\": 970,\n# \"color_tag\": \"green_9828\"\n# }\n# },\n# {\n# \"id\": 115,\n# \"distance\": 0.6898155808448792,\n# \"entity\": {\n# \"id\": 115,\n# \"color_tag\": \"green_4073\"\n# }\n# },\n# {\n# \"id\": 899,\n# \"distance\": 0.7028976678848267,\n# \"entity\": {\n# \"id\": 899,\n# \"color_tag\": \"green_9897\"\n# }\n# }\n# ]\n# ]\n","// 4. Search with partition key\nList> query_vectors = Arrays.asList(Arrays.asList(0.3580376395471989f, -0.6023495712049978f, 0.18414012509913835f, -0.26286205330961354f, 0.9029438446296592f));\n\nSearchReq searchReq = SearchReq.builder()\n .collectionName(\"test_collection\")\n .data(query_vectors)\n .filter(\"color == \\\"green\\\"\")\n .topK(3)\n .build();\n\nSearchResp searchResp = client.search(searchReq);\n\nSystem.out.println(JSONObject.toJSON(searchResp)); \n\n// Output:\n// {\"searchResults\": [[\n// {\n// \"distance\": 1.0586997,\n// \"id\": 414,\n// \"entity\": {}\n// },\n// {\n// \"distance\": 0.981384,\n// \"id\": 293,\n// \"entity\": {}\n// },\n// {\n// \"distance\": 0.9548756,\n// \"id\": 325,\n// \"entity\": {}\n// }\n// ]]}\n","// 4. Search with partition key\nconst query_vectors = [0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592]\n\nres = await client.search({\n collection_name: \"test_collection\",\n data: query_vectors,\n filter: \"color == 'green'\",\n output_fields: [\"color_tag\"],\n limit: 3\n})\n\nconsole.log(res.results)\n\n// Output\n// \n// [\n// { score: 2.402090549468994, id: '135', color_tag: 'green_2694' },\n// { score: 2.3938629627227783, id: '326', color_tag: 'green_7104' },\n// { score: 2.3235254287719727, id: '801', color_tag: 'green_3162' }\n// ]\n// \n"],"headingContent":"","anchorList":[{"label":"Use Partition Key","href":"Use-Partition-Key","type":1,"isActive":false},{"label":"Overview","href":"Overview","type":2,"isActive":false},{"label":"Enable partition key","href":"Enable-partition-key","type":2,"isActive":false},{"label":"List partitions","href":"List-partitions","type":2,"isActive":false},{"label":"Insert data","href":"Insert-data","type":2,"isActive":false},{"label":"Use partition key","href":"Use-partition-key","type":2,"isActive":false},{"label":"Typical use cases","href":"Typical-use-cases","type":2,"isActive":false}]} \ No newline at end of file +{"codeList":["import random, time\nfrom pymilvus import connections, MilvusClient, DataType\n\nSERVER_ADDR = \"http://localhost:19530\"\n\n# 1. Set up a Milvus client\nclient = MilvusClient(\n uri=SERVER_ADDR\n)\n\n# 2. Create a collection\nschema = MilvusClient.create_schema(\n auto_id=False,\n enable_dynamic_field=True,\n partition_key_field=\"color\",\n num_partitions=64 # Number of partitions. Defaults to 64.\n)\n\nschema.add_field(field_name=\"id\", datatype=DataType.INT64, is_primary=True)\nschema.add_field(field_name=\"vector\", datatype=DataType.FLOAT_VECTOR, dim=5)\nschema.add_field(field_name=\"color\", datatype=DataType.VARCHAR, max_length=512)\n","import io.milvus.v2.client.ConnectConfig;\nimport io.milvus.v2.client.MilvusClientV2;\nimport io.milvus.v2.common.DataType;\nimport io.milvus.v2.common.IndexParam;\nimport io.milvus.v2.service.collection.request.AddFieldReq;\nimport io.milvus.v2.service.collection.request.CreateCollectionReq;\n\nString CLUSTER_ENDPOINT = \"http://localhost:19530\";\n\n// 1. Connect to Milvus server\nConnectConfig connectConfig = ConnectConfig.builder()\n .uri(CLUSTER_ENDPOINT)\n .build();\n\nMilvusClientV2 client = new MilvusClientV2(connectConfig);\n\n// 2. Create a collection in customized setup mode\n\n// 2.1 Create schema\nCreateCollectionReq.CollectionSchema schema = client.createSchema();\n\n// 2.2 Add fields to schema\nschema.addField(AddFieldReq.builder()\n .fieldName(\"id\")\n .dataType(DataType.Int64)\n .isPrimaryKey(true)\n .autoID(false)\n .build());\n\nschema.addField(AddFieldReq.builder()\n .fieldName(\"vector\")\n .dataType(DataType.FloatVector)\n .dimension(5)\n .build());\n \nschema.addField(AddFieldReq.builder()\n .fieldName(\"color\")\n .dataType(DataType.VarChar)\n .maxLength(512)\n .isPartitionKey(true)\n .build());\n","const { MilvusClient, DataType, sleep } = require(\"@zilliz/milvus2-sdk-node\")\n\nconst address = \"http://localhost:19530\"\n\nasync function main() {\n// 1. Set up a Milvus Client\nclient = new MilvusClient({address}); \n\n// 2. Create a collection\n// 2.1 Define fields\nconst fields = [\n {\n name: \"id\",\n data_type: DataType.Int64,\n is_primary_key: true,\n auto_id: false\n },\n {\n name: \"vector\",\n data_type: DataType.FloatVector,\n dim: 5\n },\n {\n name: \"color\",\n data_type: DataType.VarChar,\n max_length: 512,\n is_partition_key: true\n }\n]\n","index_params = MilvusClient.prepare_index_params()\n\nindex_params.add_index(\n field_name=\"id\",\n index_type=\"STL_SORT\"\n)\n\nindex_params.add_index(\n field_name=\"color\",\n index_type=\"Trie\"\n)\n\nindex_params.add_index(\n field_name=\"vector\",\n index_type=\"IVF_FLAT\",\n metric_type=\"L2\",\n params={\"nlist\": 1024}\n)\n","// 2.3 Prepare index parameters\nIndexParam indexParamForVectorField = IndexParam.builder()\n .fieldName(\"vector\")\n .indexType(IndexParam.IndexType.IVF_FLAT)\n .metricType(IndexParam.MetricType.IP)\n .extraParams(Map.of(\"nlist\", 1024))\n .build();\n\nList indexParams = new ArrayList<>();\nindexParams.add(indexParamForVectorField);\n","// 2.2 Prepare index parameters\nconst index_params = [{\n field_name: \"color\",\n index_type: \"Trie\"\n},{\n field_name: \"id\",\n index_type: \"STL_SORT\"\n},{\n field_name: \"vector\",\n index_type: \"IVF_FLAT\",\n metric_type: \"IP\",\n params: { nlist: 1024}\n}]\n","client.create_collection(\n collection_name=\"test_collection\",\n schema=schema,\n index_params=index_params\n)\n","// 2.4 Create a collection with schema and index parameters\nCreateCollectionReq customizedSetupReq = CreateCollectionReq.builder()\n .collectionName(\"test_collection\")\n .collectionSchema(schema)\n .indexParams(indexParams) \n .build();\n\nclient.createCollection(customizedSetupReq);\n","// 2.3 Create a collection with fields and index parameters\nres = await client.createCollection({\n collection_name: \"test_collection\",\n fields: fields, \n index_params: index_params,\n})\n\nconsole.log(res.error_code)\n\n// Output\n// \n// Success\n//\n","# 3. Insert randomly generated vectors \ncolors = [\"green\", \"blue\", \"yellow\", \"red\", \"black\", \"white\", \"purple\", \"pink\", \"orange\", \"brown\", \"grey\"]\ndata = []\n\nfor i in range(1000):\n current_color = random.choice(colors)\n current_tag = random.randint(1000, 9999)\n data.append({\n \"id\": i,\n \"vector\": [ random.uniform(-1, 1) for _ in range(5) ],\n \"color\": current_color,\n \"tag\": current_tag,\n \"color_tag\": f\"{current_color}_{str(current_tag)}\"\n })\n\nprint(data[0])\n","// 3. Insert randomly generated vectors\nList colors = Arrays.asList(\"green\", \"blue\", \"yellow\", \"red\", \"black\", \"white\", \"purple\", \"pink\", \"orange\", \"brown\", \"grey\");\nList data = new ArrayList<>();\n\nfor (int i=0; i<1000; i++) {\n Random rand = new Random();\n String current_color = colors.get(rand.nextInt(colors.size()-1));\n int current_tag = rand.nextInt(8999) + 1000;\n JSONObject row = new JSONObject();\n row.put(\"id\", Long.valueOf(i));\n row.put(\"vector\", Arrays.asList(rand.nextFloat(), rand.nextFloat(), rand.nextFloat(), rand.nextFloat(), rand.nextFloat()));\n row.put(\"color\", current_color);\n row.put(\"tag\", current_tag);\n row.put(\"color_tag\", current_color + \"_\" + String.valueOf(rand.nextInt(8999) + 1000));\n data.add(row);\n}\n\nSystem.out.println(JSONObject.toJSON(data.get(0))); \n","// 3. Insert randomly generated vectors \nconst colors = [\"green\", \"blue\", \"yellow\", \"red\", \"black\", \"white\", \"purple\", \"pink\", \"orange\", \"brown\", \"grey\"]\nvar data = []\n\nfor (let i = 0; i < 1000; i++) {\n const current_color = colors[Math.floor(Math.random() * colors.length)]\n const current_tag = Math.floor(Math.random() * 8999 + 1000)\n data.push({\n id: i,\n vector: [Math.random(), Math.random(), Math.random(), Math.random(), Math.random()],\n color: current_color,\n tag: current_tag,\n color_tag: `${current_color}_${current_tag}`\n })\n}\n\nconsole.log(data[0])\n","{\n id: 0,\n vector: [\n 0.1275656405044483,\n 0.47417858592773277,\n 0.13858264437643286,\n 0.2390904907020377,\n 0.8447862593689635\n ],\n color: 'blue',\n tag: 2064,\n color_tag: 'blue_2064'\n}\n","res = client.insert(\n collection_name=\"test_collection\",\n data=data\n)\n\nprint(res)\n\n# Output\n#\n# {\n# \"insert_count\": 1000,\n# \"ids\": [\n# 0,\n# 1,\n# 2,\n# 3,\n# 4,\n# 5,\n# 6,\n# 7,\n# 8,\n# 9,\n# \"(990 more items hidden)\"\n# ]\n# }\n","// 3.1 Insert data into the collection\nInsertReq insertReq = InsertReq.builder()\n .collectionName(\"test_collection\")\n .data(data)\n .build();\n\nInsertResp insertResp = client.insert(insertReq);\n\nSystem.out.println(JSONObject.toJSON(insertResp));\n\n// Output:\n// {\"insertCnt\": 1000}\n","res = await client.insert({\n collection_name: \"test_collection\",\n data: data,\n})\n\nconsole.log(res.insert_cnt)\n\n// Output\n// \n// 1000\n// \n","# 4. Search with partition key\nquery_vectors = [[0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592]]\n\nres = client.search(\n collection_name=\"test_collection\",\n data=query_vectors,\n filter=\"color == 'green'\",\n search_params={\"metric_type\": \"L2\", \"params\": {\"nprobe\": 10}},\n output_fields=[\"id\", \"color_tag\"],\n limit=3\n)\n\nprint(res)\n\n# Output\n#\n# [\n# [\n# {\n# \"id\": 970,\n# \"distance\": 0.5770174264907837,\n# \"entity\": {\n# \"id\": 970,\n# \"color_tag\": \"green_9828\"\n# }\n# },\n# {\n# \"id\": 115,\n# \"distance\": 0.6898155808448792,\n# \"entity\": {\n# \"id\": 115,\n# \"color_tag\": \"green_4073\"\n# }\n# },\n# {\n# \"id\": 899,\n# \"distance\": 0.7028976678848267,\n# \"entity\": {\n# \"id\": 899,\n# \"color_tag\": \"green_9897\"\n# }\n# }\n# ]\n# ]\n","// 4. Search with partition key\nList> query_vectors = Arrays.asList(Arrays.asList(0.3580376395471989f, -0.6023495712049978f, 0.18414012509913835f, -0.26286205330961354f, 0.9029438446296592f));\n\nSearchReq searchReq = SearchReq.builder()\n .collectionName(\"test_collection\")\n .data(query_vectors)\n .filter(\"color == \\\"green\\\"\")\n .topK(3)\n .build();\n\nSearchResp searchResp = client.search(searchReq);\n\nSystem.out.println(JSONObject.toJSON(searchResp)); \n\n// Output:\n// {\"searchResults\": [[\n// {\n// \"distance\": 1.0586997,\n// \"id\": 414,\n// \"entity\": {}\n// },\n// {\n// \"distance\": 0.981384,\n// \"id\": 293,\n// \"entity\": {}\n// },\n// {\n// \"distance\": 0.9548756,\n// \"id\": 325,\n// \"entity\": {}\n// }\n// ]]}\n","// 4. Search with partition key\nconst query_vectors = [0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592]\n\nres = await client.search({\n collection_name: \"test_collection\",\n data: query_vectors,\n filter: \"color == 'green'\",\n output_fields: [\"color_tag\"],\n limit: 3\n})\n\nconsole.log(res.results)\n\n// Output\n// \n// [\n// { score: 2.402090549468994, id: '135', color_tag: 'green_2694' },\n// { score: 2.3938629627227783, id: '326', color_tag: 'green_7104' },\n// { score: 2.3235254287719727, id: '801', color_tag: 'green_3162' }\n// ]\n// \n"],"headingContent":"Use Partition Key","anchorList":[{"label":"Utilizar a chave de partição","href":"Use-Partition-Key","type":1,"isActive":false},{"label":"Visão geral","href":"Overview","type":2,"isActive":false},{"label":"Ativar a chave de partição","href":"Enable-partition-key","type":2,"isActive":false},{"label":"Listar partições","href":"List-partitions","type":2,"isActive":false},{"label":"Inserir dados","href":"Insert-data","type":2,"isActive":false},{"label":"Utilizar a chave de partição","href":"Use-partition-key","type":2,"isActive":false},{"label":"Casos de uso típicos","href":"Typical-use-cases","type":2,"isActive":false}]} \ No newline at end of file diff --git a/localization/v2.4.x/site/pt/userGuide/use-partition-key.md b/localization/v2.4.x/site/pt/userGuide/use-partition-key.md index 990c59b03..58249d34b 100644 --- a/localization/v2.4.x/site/pt/userGuide/use-partition-key.md +++ b/localization/v2.4.x/site/pt/userGuide/use-partition-key.md @@ -1,9 +1,8 @@ --- id: use-partition-key.md -title: Use Partition Key -summary: '' +title: Utilizar a chave de partição --- -

    Use Partition Key

    This guide walks you through using the partition key to accelerate data retrieval from your collection.

    -

    Overview

    Este guia orienta-o na utilização da chave de partição para acelerar a recuperação de dados da sua coleção.

    +

    Visão geral

    You can set a particular field in a collection as the partition key so that Milvus distributes incoming entities into different partitions according to their respective partition values in this field. This allows entities with the same key value to be grouped in a partition, accelerating search performance by avoiding the need to scan irrelevant partitions when filtering by the key field. When compared to traditional filtering methods, the partition key can greatly enhance query performance.

    -

    You can use the partition key to implement multi-tenancy. For details on multi-tenancy, read Multi-tenancy for more.

    -

    Enable partition key

    É possível definir um determinado campo numa coleção como chave de partição para que o Milvus distribua as entidades recebidas em diferentes partições de acordo com os respectivos valores de partição neste campo. Isto permite que entidades com o mesmo valor chave sejam agrupadas numa partição, acelerando o desempenho da pesquisa ao evitar a necessidade de pesquisar partições irrelevantes ao filtrar pelo campo chave. Quando comparada com os métodos de filtragem tradicionais, a chave de partição pode melhorar significativamente o desempenho da consulta.

    +

    Pode utilizar a chave de partição para implementar o multi-tenancy. Para obter detalhes sobre multilocação, leia Multilocação para saber mais.

    +

    Ativar a chave de partição

    To set a field as the partition key, specify partition_key_field when creating a collection schema.

    -

    In the example code below, num_partitions determines the number of partitions that will be created. By default, it is set to 16. We recommend you retain the default value.

    +

    Para definir um campo como chave de partição, especifique partition_key_field ao criar um esquema de coleção.

    +

    No código de exemplo abaixo, num_partitions determina o número de partições que serão criadas. Por predefinição, está definido para 64. Recomendamos que mantenha o valor predefinido.

    -

    For more information on parameters, refer to MilvusClient, create_schema(), and add_field() in the SDK reference.

    +

    Para obter mais informações sobre os parâmetros, consulte MilvusClient, create_schema(), e add_field() na referência do SDK.

    -

    For more information on parameters, refer to MilvusClientV2, createSchema(), and addField() in the SDK reference.

    +

    Para obter mais informações sobre os parâmetros, consulte MilvusClientV2, createSchema(), e addField() na referência do SDK.

    -

    For more information on parameters, refer to MilvusClient and createCollection() in the SDK reference.

    +

    Para mais informações sobre os parâmetros, consulte MilvusClient e createCollection() na referência do SDK.

    + Python Java Node.js
    import random, time
     from pymilvus import connections, MilvusClient, DataType
     
    @@ -82,7 +78,7 @@ schema = MilvusClient.create_schema(
         auto_id=False,
         enable_dynamic_field=True,
         partition_key_field="color",
    -    num_partitions=16 # Number of partitions. Defaults to 16.
    +    num_partitions=64 # Number of partitions. Defaults to 64.
     )
     
     schema.add_field(field_name="id", datatype=DataType.INT64, is_primary=True)
    @@ -161,12 +157,9 @@ client = new M
         }
     ]
     
    -

    After you have defined the fields, set up the index parameters.

    +

    Depois de ter definido os campos, configure os parâmetros de índice.

    + Python Java Node.js
    index_params = MilvusClient.prepare_index_params()
     
     index_params.add_index(
    @@ -211,12 +204,9 @@ indexParams.add(indexParamForVectorFie
         params: { nlist: 1024}
     }]
     
    -

    Finally, you can create a collection.

    +

    Por fim, pode criar uma coleção.

    + Python Java Node.js
    client.create_collection(
         collection_name="test_collection",
         schema=schema,
    @@ -246,7 +236,7 @@ res = await client.// Success
     //
     
    -

    List partitions

    Once a field of a collection is used as the partition key, Milvus creates the specified number of partitions and manages them on your behalf. Therefore, you cannot manipulate the partitions in this collection anymore.

    -

    The following snippet demonstrates that 64 partitions in a collection once one of its fields is used as the partition key.

    -

    Insert data

    Quando um campo de uma coleção é utilizado como chave de partição, o Milvus cria o número especificado de partições e gere-as em seu nome. Por conseguinte, já não pode manipular as partições desta coleção.

    +

    O seguinte excerto demonstra a existência de 64 partições numa coleção quando um dos seus campos é utilizado como chave de partição.

    +

    Inserir dados

    Once the collection is ready, start inserting data as follows:

    -

    Prepare data

    +

    Quando a coleção estiver pronta, comece a inserir os dados da seguinte forma:

    +

    Preparar dados

    # 3. Insert randomly generated vectors 
     colors = ["green", "blue", "yellow", "red", "black", "white", "purple", "pink", "orange", "brown", "grey"]
     data = []
    @@ -338,7 +325,7 @@ data = []
     
     console.log(data[0])
     
    -

    You can view the structure of the generated data by checking its first entry.

    +

    Pode ver a estrutura dos dados gerados verificando a sua primeira entrada.

    {
         id: 0,
         vector: [
    @@ -353,20 +340,17 @@ data = []
         color_tag: 'blue_2064'
     }
     
    -

    Insert data

    -

    Use the insert() method to insert the data into the collection.

    +

    Inserir dados

    +

    Utilize o método insert() para inserir os dados na coleção.

    -

    Use the insert() method to insert the data into the collection.

    +

    Utilize o método insert() para inserir os dados na coleção.

    -

    Use the insert() method to insert the data into the collection.

    +

    Utilizar o método insert() para inserir os dados na coleção.

    + Python Java Node.js
    res = client.insert(
         collection_name="test_collection",
         data=data
    @@ -418,7 +402,7 @@ data = []
     // 1000
     // 
     
    -

    Use partition key

    Once you have indexed and loaded the collection as well as inserted data, you can conduct a similarity search using the partition key.

    +

    Depois de ter indexado e carregado a coleção, bem como os dados inseridos, pode realizar uma pesquisa de semelhança utilizando a chave de partição.

    -

    For more information on parameters, refer to search() in the SDK reference.

    +

    Para mais informações sobre parâmetros, consulte search() na referência do SDK.

    -

    For more information on parameters, refer to search() in the SDK reference.

    +

    Para obter mais informações sobre os parâmetros, consulte search() na referência do SDK.

    -

    For more information on parameters, refer to search() in the SDK reference.

    +

    Para obter mais informações sobre os parâmetros, consultar search() na referência do SDK.

    -

    notes

    -

    To conduct a similarity search using the partition key, you should include either of the following in the boolean expression of the search request:

    +

    notas

    +

    Para efetuar uma pesquisa de similaridade utilizando a chave de partição, deve incluir um dos seguintes itens na expressão booleana do pedido de pesquisa:

    • expr='<partition_key>=="xxxx"'

    • expr='<partition_key> in ["xxx", "xxx"]'

    -

    Do replace <partition_key> with the name of the field that is designated as the partition key.

    +

    Substituir <partition_key> pelo nome do campo que é designado como a chave de partição.

    + Python Java Node.js
    # 4. Search with partition key
     query_vectors = [[0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592]]
     
    @@ -557,7 +538,7 @@ res = await client.// ]
     // 
     
    -

    Typical use cases

    You can utilize the partition key feature to achieve better search performance and enable multi-tenancy. This can be done by assigning a tenant-specific value as the partition key field for each entity. When searching or querying the collection, you can filter entities by the tenant-specific value by including the partition key field in the boolean expression. This approach ensures data isolation by tenants and avoids scanning unnecessary partitions.

    +

    Pode utilizar a funcionalidade de chave de partição para obter um melhor desempenho de pesquisa e permitir o multilocatário. Isso pode ser feito atribuindo um valor específico do locatário como o campo de chave de partição para cada entidade. Ao pesquisar ou consultar a coleção, é possível filtrar as entidades pelo valor específico do locatário, incluindo o campo da chave de partição na expressão booleana. Essa abordagem garante o isolamento de dados por locatários e evita a varredura de partições desnecessárias.

    diff --git a/localization/v2.4.x/site/zh/adminGuide/clouds/aws/s3.json b/localization/v2.4.x/site/zh/adminGuide/clouds/aws/s3.json index 3851cc820..8fc8f8af0 100644 --- a/localization/v2.4.x/site/zh/adminGuide/clouds/aws/s3.json +++ b/localization/v2.4.x/site/zh/adminGuide/clouds/aws/s3.json @@ -1,54 +1 @@ -{ - "codeList": [ - "milvus_bucket_name=\"milvus-bucket-$(openssl rand -hex 12)\"\n\naws s3api create-bucket --bucket \"$milvus_bucket_name\" --region 'us-east-2' --acl private --object-ownership ObjectWriter --create-bucket-configuration LocationConstraint='us-east-2'\n\n\n# Output\n#\n# \"Location\": \"http://milvus-bucket-039dd013c0712f085d60e21f.s3.amazonaws.com/\"\n", - "echo '{\n \"Version\": \"2012-10-17\",\n \"Statement\": [\n {\n \"Effect\": \"Allow\",\n \"Action\": [\n \"s3:GetObject\",\n \"s3:PutObject\",\n \"s3:ListBucket\",\n \"s3:DeleteObject\"\n ],\n \"Resource\": [\n \"arn:aws:s3:::\",\n \"arn:aws:s3:::/*\"\n ]\n }\n ]\n}' > milvus-s3-policy.json\n\naws iam create-policy --policy-name MilvusS3ReadWrite --policy-document file://milvus-s3-policy.json\n\n\n# Get the ARN from the command output as follows:\n# {\n# \"Policy\": {\n# \"PolicyName\": \"MilvusS3ReadWrite\",\n# \"PolicyId\": \"AN5QQVVPM1BVTFlBNkdZT\",\n# \"Arn\": \"arn:aws:iam::12345678901:policy/MilvusS3ReadWrite\",\n# \"Path\": \"/\",\n# \"DefaultVersionId\": \"v1\",\n# \"AttachmentCount\": 0,\n# \"PermissionsBoundaryUsageCount\": 0,\n# \"IsAttachable\": true,\n# \"CreateDate\": \"2023-11-16T06:00:01+00:00\",\n# \"UpdateDate\": \"2023-11-16T06:00:01+00:00\"\n# }\n# } \n", - "eksctl create iamserviceaccount --name milvus-s3-access-sa --namespace milvus --cluster milvus-eks-cluster --role-name milvus-s3-access-sa \\\n --attach-policy-arn arn:aws:iam:::policy/MilvusS3ReadWrite --approve\n", - "aws iam get-role --role-name milvus-s3-access-sa --query Role.AssumeRolePolicyDocument\n# An example output is as follows\n{\n \"Version\": \"2012-10-17\",\n \"Statement\": [\n {\n \"Effect\": \"Allow\",\n \"Principal\": {\n \"Federated\": \"arn:aws:iam::111122223333:oidc-provider/oidc.eks.region-code.amazonaws.com/id/EXAMPLED539D4633E53DE1B71EXAMPLE\"\n },\n \"Action\": \"sts:AssumeRoleWithWebIdentity\",\n \"Condition\": {\n \"StringEquals\": {\n \"oidc.eks.region-code.amazonaws.com/id/EXAMPLED539D4633E53DE1B71EXAMPLE:sub\": \"system:serviceaccount:default:my-service-account\",\n \"oidc.eks.region-code.amazonaws.com/id/EXAMPLED539D4633E53DE1B71EXAMPLE:aud\": \"sts.amazonaws.com\"\n }\n }\n }\n ]\n}\n", - "aws iam list-attached-role-policies --role-name milvus-s3-access-sa --query 'AttachedPolicies[].PolicyArn' --output text\n# An example output is as follows\narn:aws:iam::12345678901:policy/MilvusS3ReadWrite\n", - "export policy_arn='arn:aws:iam::12345678901:policy/MilvusS3ReadWrite'\naws iam get-policy --policy-arn $policy_arn\n# An example output is as follows\n{\n \"Policy\": {\n \"PolicyName\": \"MilvusS3ReadWrite\",\n \"PolicyId\": \"EXAMPLEBIOWGLDEXAMPLE\",\n \"Arn\": \"arn:aws:iam::12345678901:policy/MilvusS3ReadWrite\",\n \"Path\": \"/\",\n \"DefaultVersionId\": \"v2\",\n [...]\n }\n}\n", - "aws iam get-policy-version --policy-arn $policy_arn --version-id v2\n# An example output is as follows\n{\n \"PolicyVersion\": {\n \"Document\": {\n \"Version\": \"2012-10-17\",\n \"Statement\": [\n {\n \"Effect\": \"Allow\",\n \"Action\": [\n \"s3:GetObject\",\n \"s3:PutObject\",\n \"s3:ListBucket\",\n \"s3:DeleteObject\"\n ],\n \"Resource\": [\n \"arn:aws:s3:::\",\n \"arn:aws:s3:::/*\"\n ]\n }\n ]\n },\n [...]\n }\n}\n", - "kubectl describe serviceaccount milvus-s3-access-sa -n milvus\n# An example output is as follows\nName: milvus-s3-access-sa\nNamespace: milvus\nLabels: app.kubernetes.io/managed-by=eksctl\nAnnotations: eks.amazonaws.com/role-arn: arn:aws:iam::12345678901:role/milvus-s3-access-sa\n[...]\n", - "helm repo add milvus https://zilliztech.github.io/milvus-helm/\nhelm repo update\n", - "cluster:\n enabled: true\n\nservice:\n type: LoadBalancer\n port: 19530\n annotations: \n service.beta.kubernetes.io/aws-load-balancer-type: external\n service.beta.kubernetes.io/aws-load-balancer-name: milvus-service\n service.beta.kubernetes.io/aws-load-balancer-scheme: internet-facing\n service.beta.kubernetes.io/aws-load-balancer-nlb-target-type: ip\n\nserviceAccount:\n create: false\n name: milvus-s3-access-sa\n\nminio:\n enabled: false\n\nexternalS3:\n enabled: true\n host: \"s3.us-east-2.amazonaws.com\"\n port: \"443\"\n useSSL: true\n bucketName: \"\"\n useIAM: true\n cloudProvider: \"aws\"\n iamEndpoint: \"\"\n\nrootCoordinator:\n replicas: 2\n activeStandby:\n enabled: true\n resources: \n limits:\n cpu: 1\n memory: 2Gi\n\nindexCoordinator:\n replicas: 2\n activeStandby:\n enabled: true\n resources: \n limits:\n cpu: \"0.5\"\n memory: 0.5Gi\n\nqueryCoordinator:\n replicas: 2\n activeStandby:\n enabled: true\n resources: \n limits:\n cpu: \"0.5\"\n memory: 0.5Gi\n\ndataCoordinator:\n replicas: 2\n activeStandby:\n enabled: true\n resources: \n limits:\n cpu: \"0.5\"\n memory: 0.5Gi\n\nproxy:\n replicas: 2\n resources: \n limits:\n cpu: 1\n memory: 2Gi \n", - "helm upgrade --install milvus-demo milvus/milvus -n milvus -f milvus.yaml\n" - ], - "headingContent": "", - "anchorList": [ - { - "label": "按 IAM 角色配置 S3 访问权限", - "href": "Configure-S3-Access-by-IAM-Role", - "type": 1, - "isActive": false - }, - { - "label": "开始之前", - "href": "Before-you-start", - "type": 2, - "isActive": false - }, - { - "label": "将 IAM 角色与 Kubernetes 服务账户关联起来", - "href": "Associate-an-IAM-role-with-a-Kubernetes-service-account", - "type": 2, - "isActive": false - }, - { - "label": "验证角色和服务帐户设置", - "href": "Verify-the-role-and-service-account-setup", - "type": 2, - "isActive": false - }, - { - "label": "部署Milvus", - "href": "Deploy-Milvus", - "type": 2, - "isActive": false - }, - { - "label": "验证安装", - "href": "Verify-the-installation", - "type": 2, - "isActive": false - } - ] -} +{"codeList":["milvus_bucket_name=\"milvus-bucket-$(openssl rand -hex 12)\"\n\naws s3api create-bucket --bucket \"$milvus_bucket_name\" --region 'us-east-2' --acl private --object-ownership ObjectWriter --create-bucket-configuration LocationConstraint='us-east-2'\n\n\n# Output\n#\n# \"Location\": \"http://milvus-bucket-039dd013c0712f085d60e21f.s3.amazonaws.com/\"\n","echo '{\n \"Version\": \"2012-10-17\",\n \"Statement\": [\n {\n \"Effect\": \"Allow\",\n \"Action\": [\n \"s3:ListBucket\"\n ],\n \"Resource\": [\n \"arn:aws:s3:::\"\n ]\n },\n {\n \"Effect\": \"Allow\",\n \"Action\": [\n \"s3:DeleteObject\",\n \"s3:GetObject\",\n \"s3:PutObject\"\n ],\n \"Resource\": [\n \"arn:aws:s3:::/*\"\n ]\n }\n ]\n}' > milvus-s3-policy.json\n\naws iam create-policy --policy-name MilvusS3ReadWrite --policy-document file://milvus-s3-policy.json\n\n\n# Get the ARN from the command output as follows:\n# {\n# \"Policy\": {\n# \"PolicyName\": \"MilvusS3ReadWrite\",\n# \"PolicyId\": \"AN5QQVVPM1BVTFlBNkdZT\",\n# \"Arn\": \"arn:aws:iam::12345678901:policy/MilvusS3ReadWrite\",\n# \"Path\": \"/\",\n# \"DefaultVersionId\": \"v1\",\n# \"AttachmentCount\": 0,\n# \"PermissionsBoundaryUsageCount\": 0,\n# \"IsAttachable\": true,\n# \"CreateDate\": \"2023-11-16T06:00:01+00:00\",\n# \"UpdateDate\": \"2023-11-16T06:00:01+00:00\"\n# }\n# } \n","eksctl create iamserviceaccount --name milvus-s3-access-sa --namespace milvus --cluster milvus-eks-cluster --role-name milvus-s3-access-sa \\\n --attach-policy-arn arn:aws:iam:::policy/MilvusS3ReadWrite --approve\n","aws iam get-role --role-name milvus-s3-access-sa --query Role.AssumeRolePolicyDocument\n# An example output is as follows\n{\n \"Version\": \"2012-10-17\",\n \"Statement\": [\n {\n \"Effect\": \"Allow\",\n \"Principal\": {\n \"Federated\": \"arn:aws:iam::111122223333:oidc-provider/oidc.eks.region-code.amazonaws.com/id/EXAMPLED539D4633E53DE1B71EXAMPLE\"\n },\n \"Action\": \"sts:AssumeRoleWithWebIdentity\",\n \"Condition\": {\n \"StringEquals\": {\n \"oidc.eks.region-code.amazonaws.com/id/EXAMPLED539D4633E53DE1B71EXAMPLE:sub\": \"system:serviceaccount:default:my-service-account\",\n \"oidc.eks.region-code.amazonaws.com/id/EXAMPLED539D4633E53DE1B71EXAMPLE:aud\": \"sts.amazonaws.com\"\n }\n }\n }\n ]\n}\n","aws iam list-attached-role-policies --role-name milvus-s3-access-sa --query 'AttachedPolicies[].PolicyArn' --output text\n# An example output is as follows\narn:aws:iam::12345678901:policy/MilvusS3ReadWrite\n","export policy_arn='arn:aws:iam::12345678901:policy/MilvusS3ReadWrite'\naws iam get-policy --policy-arn $policy_arn\n# An example output is as follows\n{\n \"Policy\": {\n \"PolicyName\": \"MilvusS3ReadWrite\",\n \"PolicyId\": \"EXAMPLEBIOWGLDEXAMPLE\",\n \"Arn\": \"arn:aws:iam::12345678901:policy/MilvusS3ReadWrite\",\n \"Path\": \"/\",\n \"DefaultVersionId\": \"v2\",\n [...]\n }\n}\n","aws iam get-policy-version --policy-arn $policy_arn --version-id v2\n# An example output is as follows\n{\n \"PolicyVersion\": {\n \"Document\": {\n \"Version\": \"2012-10-17\",\n \"Statement\": [\n {\n \"Effect\": \"Allow\",\n \"Action\": [\n \"s3:GetObject\",\n \"s3:PutObject\",\n \"s3:ListBucket\",\n \"s3:DeleteObject\"\n ],\n \"Resource\": [\n \"arn:aws:s3:::\",\n \"arn:aws:s3:::/*\"\n ]\n }\n ]\n },\n [...]\n }\n}\n","kubectl describe serviceaccount milvus-s3-access-sa -n milvus\n# An example output is as follows\nName: milvus-s3-access-sa\nNamespace: milvus\nLabels: app.kubernetes.io/managed-by=eksctl\nAnnotations: eks.amazonaws.com/role-arn: arn:aws:iam::12345678901:role/milvus-s3-access-sa\n[...]\n","helm repo add milvus https://zilliztech.github.io/milvus-helm/\nhelm repo update\n","cluster:\n enabled: true\n\nservice:\n type: LoadBalancer\n port: 19530\n annotations: \n service.beta.kubernetes.io/aws-load-balancer-type: external\n service.beta.kubernetes.io/aws-load-balancer-name: milvus-service\n service.beta.kubernetes.io/aws-load-balancer-scheme: internet-facing\n service.beta.kubernetes.io/aws-load-balancer-nlb-target-type: ip\n\nserviceAccount:\n create: false\n name: milvus-s3-access-sa\n\nminio:\n enabled: false\n\nexternalS3:\n enabled: true\n host: \"s3.us-east-2.amazonaws.com\"\n port: \"443\"\n useSSL: true\n bucketName: \"\"\n useIAM: true\n cloudProvider: \"aws\"\n iamEndpoint: \"\"\n\nrootCoordinator:\n replicas: 2\n activeStandby:\n enabled: true\n resources: \n limits:\n cpu: 1\n memory: 2Gi\n\nindexCoordinator:\n replicas: 2\n activeStandby:\n enabled: true\n resources: \n limits:\n cpu: \"0.5\"\n memory: 0.5Gi\n\nqueryCoordinator:\n replicas: 2\n activeStandby:\n enabled: true\n resources: \n limits:\n cpu: \"0.5\"\n memory: 0.5Gi\n\ndataCoordinator:\n replicas: 2\n activeStandby:\n enabled: true\n resources: \n limits:\n cpu: \"0.5\"\n memory: 0.5Gi\n\nproxy:\n replicas: 2\n resources: \n limits:\n cpu: 1\n memory: 2Gi \n","helm upgrade --install milvus-demo milvus/milvus -n milvus -f milvus.yaml\n"],"headingContent":"Configure S3 Access by IAM Role","anchorList":[{"label":"按 IAM 角色配置 S3 访问权限","href":"Configure-S3-Access-by-IAM-Role","type":1,"isActive":false},{"label":"开始之前","href":"Before-you-start","type":2,"isActive":false},{"label":"将 IAM 角色与 Kubernetes 服务账户关联","href":"Associate-an-IAM-role-with-a-Kubernetes-service-account","type":2,"isActive":false},{"label":"验证角色和服务账户设置","href":"Verify-the-role-and-service-account-setup","type":2,"isActive":false},{"label":"部署 Milvus","href":"Deploy-Milvus","type":2,"isActive":false},{"label":"验证安装","href":"Verify-the-installation","type":2,"isActive":false}]} \ No newline at end of file diff --git a/localization/v2.4.x/site/zh/adminGuide/clouds/aws/s3.md b/localization/v2.4.x/site/zh/adminGuide/clouds/aws/s3.md index 0d651416a..83ed5b1d1 100644 --- a/localization/v2.4.x/site/zh/adminGuide/clouds/aws/s3.md +++ b/localization/v2.4.x/site/zh/adminGuide/clouds/aws/s3.md @@ -19,7 +19,7 @@ summary: 了解如何使用 IAM 角色配置 s3。 d="M4 9h1v1H4c-1.5 0-3-1.69-3-3.5S2.55 3 4 3h4c1.45 0 3 1.69 3 3.5 0 1.41-.91 2.72-2 3.25V8.59c.58-.45 1-1.27 1-2.09C10 5.22 8.98 4 8 4H4c-.98 0-2 1.22-2 2.5S3 9 4 9zm9-3h-1v1h1c1 0 2 1.22 2 2.5S13.98 12 13 12H9c-.98 0-2-1.22-2-2.5 0-.83.42-1.64 1-2.09V6.25c-1.09.53-2 1.84-2 3.25C6 11.31 7.55 13 9 13h4c1.45 0 3-1.69 3-3.5S14.5 6 13 6z" > -

    本主题介绍如何在使用 helm 安装 Milvus 时按 IAM 角色配置 s3 访问。 更多详情,请参阅IAM 角色

    +

    本主题介绍如何在使用 Helm 安装 Milvus 时,按 IAM 角色配置 s3 访问。 更多详情,请参阅IAM 角色

    开始之前

    将 IAM 角色与 Kubernetes 服务账户关联

    • 创建 AWS S3 存储桶。

      -

      阅读 "桶命名规则",并在命名 AWS S3 桶时遵守命名规则。

      +

      阅读 "桶命名规则",并在命名 AWS S3 桶时遵守命名规则。

      milvus_bucket_name="milvus-bucket-$(openssl rand -hex 12)"
       
       aws s3api create-bucket --bucket "$milvus_bucket_name" --region 'us-east-2' --acl private  --object-ownership ObjectWriter --create-bucket-configuration LocationConstraint='us-east-2'
      @@ -73,13 +73,20 @@ aws s3api create-bucket --bucket "
        -
      • 准备好 Milvus 配置文件milvus.yaml ,并将<bucket-name> 替换为上文创建的水桶名称。
      • +
      • 准备好 Milvus 配置文件milvus.yaml ,并将<bucket-name> 替换为上面创建的水桶名称。
      cluster:
         enabled: true
      diff --git a/localization/v2.4.x/site/zh/adminGuide/clouds/openshift/openshift.json b/localization/v2.4.x/site/zh/adminGuide/clouds/openshift/openshift.json
      index dc2fa7eb6..e152655a9 100644
      --- a/localization/v2.4.x/site/zh/adminGuide/clouds/openshift/openshift.json
      +++ b/localization/v2.4.x/site/zh/adminGuide/clouds/openshift/openshift.json
      @@ -1,53 +1 @@
      -{
      -	"codeList": [
      -		"# milvus-operator-certificate.yaml\napiVersion: cert-manager.io/v1\nkind: Certificate\nmetadata:\nname: milvus-operator-serving-cert\nnamespace: milvus-operator\nspec:\ndnsNames:\n- milvus-operator-webhook-service.milvus-operator.svc\n- milvus-operator-webhook-service.milvus-operator.svc.cluster.local\nissuerRef:\n    kind: Issuer\n    name: milvus-operator-selfsigned-issuer\nsecretName: milvus-operator-webhook-cert\n---\napiVersion: cert-manager.io/v1\nkind: Issuer\nmetadata:\nname: milvus-operator-selfsigned-issuer\nnamespace: milvus-operator\nspec:\nselfSigned: {}\n",
      -		"kubectl apply -f milvus-operator-certificate.yaml\n",
      -		"helm repo add milvus-operator https://zilliztech.github.io/milvus-operator/\nhelm repo update milvus-operator\n",
      -		"helm -n milvus-operator upgrade --install --create-namespace milvus-operator milvus-operator/milvus-operator\n"
      -	],
      -	"headingContent": "",
      -	"anchorList": [
      -		{
      -			"label": "在 OpenShift 上部署 Milvus 集群",
      -			"href": "Deploy-a-Milvus-Cluster-on-OpenShift",
      -			"type": 1,
      -			"isActive": false
      -		},
      -		{
      -			"label": "先决条件",
      -			"href": "Prerequisites",
      -			"type": 2,
      -			"isActive": false
      -		},
      -		{
      -			"label": "步骤 1:安装证书管理器",
      -			"href": "Step-1-Install-Cert-Manager",
      -			"type": 2,
      -			"isActive": false
      -		},
      -		{
      -			"label": "步骤 2:为 Milvus Operator签发自签名证书",
      -			"href": "Step-2-Issue-a-Self-Signed-Certificate-for-Milvus-Operator",
      -			"type": 2,
      -			"isActive": false
      -		},
      -		{
      -			"label": "步骤 3:安装 Milvus 操作器",
      -			"href": "Step-3-Install-Milvus-Operator",
      -			"type": 2,
      -			"isActive": false
      -		},
      -		{
      -			"label": "步骤 4:部署 Milvus",
      -			"href": "Step-4-Deploy-Milvus",
      -			"type": 2,
      -			"isActive": false
      -		},
      -		{
      -			"label": "下一步行动",
      -			"href": "Whats-Next",
      -			"type": 2,
      -			"isActive": false
      -		}
      -	]
      -}
      +{"codeList":["# milvus-operator-certificate.yaml\napiVersion: cert-manager.io/v1\nkind: Certificate\nmetadata:\n  name: milvus-operator-serving-cert\n  namespace: milvus-operator\nspec:\n  dnsNames:\n  - milvus-operator-webhook-service.milvus-operator.svc\n  - milvus-operator-webhook-service.milvus-operator.svc.cluster.local\n  issuerRef:\n    kind: Issuer\n    name: milvus-operator-selfsigned-issuer\n  secretName: milvus-operator-webhook-cert\n---\napiVersion: cert-manager.io/v1\nkind: Issuer\nmetadata:\n  name: milvus-operator-selfsigned-issuer\n  namespace: milvus-operator\nspec:\n  selfSigned: {}\n","kubectl apply -f milvus-operator-certificate.yaml\n","helm repo add milvus-operator https://zilliztech.github.io/milvus-operator/\nhelm repo update milvus-operator\n","helm -n milvus-operator upgrade --install --create-namespace milvus-operator milvus-operator/milvus-operator\n"],"headingContent":"Deploy a Milvus Cluster on OpenShift","anchorList":[{"label":"在 OpenShift 上部署 Milvus 群集","href":"Deploy-a-Milvus-Cluster-on-OpenShift","type":1,"isActive":false},{"label":"先决条件","href":"Prerequisites","type":2,"isActive":false},{"label":"第 1 步:安装 Cert Manager","href":"Step-1-Install-Cert-Manager","type":2,"isActive":false},{"label":"步骤 2:为 Milvus Operator 签发自签名证书","href":"Step-2-Issue-a-Self-Signed-Certificate-for-Milvus-Operator","type":2,"isActive":false},{"label":"第 3 步:安装 Milvus 操作符","href":"Step-3-Install-Milvus-Operator","type":2,"isActive":false},{"label":"第 4 步:部署 Milvus","href":"Step-4-Deploy-Milvus","type":2,"isActive":false},{"label":"下一步","href":"Whats-Next","type":2,"isActive":false}]}
      \ No newline at end of file
      diff --git a/localization/v2.4.x/site/zh/adminGuide/clouds/openshift/openshift.md b/localization/v2.4.x/site/zh/adminGuide/clouds/openshift/openshift.md
      index 4f547ecfd..fd6527efd 100644
      --- a/localization/v2.4.x/site/zh/adminGuide/clouds/openshift/openshift.md
      +++ b/localization/v2.4.x/site/zh/adminGuide/clouds/openshift/openshift.md
      @@ -1,6 +1,6 @@
       ---
       id: openshift.md
      -title: 在 OpenShift 上部署 Milvus 集群
      +title: 在 OpenShift 上部署 Milvus 群集
       related_key: cluster
       summary: 了解如何在 OpenShift 上部署 Milvus 集群。
       ---
      @@ -19,7 +19,7 @@ summary: 了解如何在 OpenShift 上部署 Milvus 集群。
                 d="M4 9h1v1H4c-1.5 0-3-1.69-3-3.5S2.55 3 4 3h4c1.45 0 3 1.69 3 3.5 0 1.41-.91 2.72-2 3.25V8.59c.58-.45 1-1.27 1-2.09C10 5.22 8.98 4 8 4H4c-.98 0-2 1.22-2 2.5S3 9 4 9zm9-3h-1v1h1c1 0 2 1.22 2 2.5S13.98 12 13 12H9c-.98 0-2-1.22-2-2.5 0-.83.42-1.64 1-2.09V6.25c-1.09.53-2 1.84-2 3.25C6 11.31 7.55 13 9 13h4c1.45 0 3-1.69 3-3.5S14.5 6 13 6z"
               >
             
      -    

      本主题逐步介绍如何在 OpenShift 上部署 Milvus。

      +

      本主题将逐步介绍如何在 OpenShift 上部署 Milvus。

      先决条件

      在开始部署流程之前,请确保您拥有

      +

      在开始部署过程之前,请确保您拥有

      • 运行中的 OpenShift 群集。
      • 具有足够权限的 OpenShift 群集访问权限(cluster-admin 角色或同等权限)。
      • @@ -74,7 +74,7 @@ summary: 了解如何在 OpenShift 上部署 Milvus 集群。 证书管理器-2

        -

        第 2 步:为 Milvus Operator 签发自签名证书

    • 应用该文件:

      kubectl apply -f milvus-operator-certificate.yaml
       
    • -

      第 3 步:安装 Milvus Operator

      本主题介绍如何使用 Docker Compose 配置 Milvus 组件及其第三方依赖项。

      -在当前版本中,所有参数仅在 Milvus 重新启动后生效。
      +在当前版本中,所有参数只有在 Milvus 重新启动后才会生效。

      下载配置文件

      直接下载 milvus.yaml 或使用以下命令下载

      -
      $ wget https://raw.githubusercontent.com/milvus-io/milvus/v2.4.9/configs/milvus.yaml
      +    

      直接下载 milvus.yaml 或使用以下命令下载

      +
      $ wget https://raw.githubusercontent.com/milvus-io/milvus/v2.4.13-hotfix/configs/milvus.yaml
       

      修改配置文件

      通过调整milvus.yaml 中的相应参数,配置 Milvus 实例,使其适合你的应用场景。

      +

      通过调整milvus.yaml 中的相应参数,配置你的 Milvus 实例,以适应你的应用场景。

      有关各参数的详细信息,请查看以下链接。

      排序方式

      @@ -211,10 +211,10 @@ title: 使用 Docker Compose 配置 Milvus d="M4 9h1v1H4c-1.5 0-3-1.69-3-3.5S2.55 3 4 3h4c1.45 0 3 1.69 3 3.5 0 1.41-.91 2.72-2 3.25V8.59c.58-.45 1-1.27 1-2.09C10 5.22 8.98 4 8 4H4c-.98 0-2 1.22-2 2.5S3 9 4 9zm9-3h-1v1h1c1 0 2 1.22 2 2.5S13.98 12 13 12H9c-.98 0-2-1.22-2-2.5 0-.83.42-1.64 1-2.09V6.25c-1.09.53-2 1.84-2 3.25C6 11.31 7.55 13 9 13h4c1.45 0 3-1.69 3-3.5S14.5 6 13 6z" > -

      下载 Milvus单机版的安装文件,并将其保存为docker-compose.yml

      +

      下载 MilvusStandalone 的安装文件,并将其保存为docker-compose.yml

      也可以直接运行以下命令。

      # For Milvus standalone
      -$ wget https://github.com/milvus-io/milvus/releases/download/v2.4.9/milvus-standalone-docker-compose.yml -O docker-compose.yml
      +$ wget https://github.com/milvus-io/milvus/releases/download/v2.4.13-hotfix/milvus-standalone-docker-compose.yml -O docker-compose.yml
       

      修改安装文件

      -

      在上述示例中,我们假设 NVMe 磁盘为/dev/nvme1n1 。您需要根据具体配置修改脚本。

      @@ -124,7 +122,6 @@ mkfs.xfs /dev/md0 echo '/dev/md0 /var/lib/kubelet xfs defaults 0 0' >> /etc/fstab mount -a
    -

    在上述示例中,我们假设 NVMe 磁盘为/dev/nvme0n1/dev/nvme1n1 。您需要修改脚本以匹配您的特定配置。

    @@ -137,7 +134,7 @@ mkfs.xfs /dev/nvme0n1 mount -a mkdir -p /mnt/data/kubelet /mnt/data/containerd /mnt/data/log/pods -mkdir -p /var/lib/kubelet /var/lib/containerd /var/log/pods +mkdir -p /var/lib/kubelet /var/lib/containerd /var/log/pods echo '/mnt/data/kubelet /var/lib/kubelet none defaults,bind 0 0' >> /etc/fstab echo '/mnt/data/containerd /var/lib/containerd none defaults,bind 0 0' >> /etc/fstab @@ -146,7 +143,6 @@ mount -a echo "nvme init end..."
    -

    在上述示例中,我们假设 NVMe 磁盘为/dev/nvme0n1 。您需要修改脚本以符合您的具体配置。

    @@ -219,13 +215,12 @@ apt-get install fio -y cd /data # write 10GB -fio -direct=1-iodepth=128 -rw=randwrite -ioengine=libaio -bs=4K -size=10G -numjobs=10 -runtime=600 -group_reporting -filename=test -name=Rand_Write_IOPS_Test +fio -direct=1 -iodepth=128 -rw=randwrite -ioengine=libaio -bs=4K -size=10G -numjobs=10 -runtime=600 -group_reporting -filename=test -name=Rand_Write_IOPS_Test # verify the read speed # compare with the disk performance indicators provided by various cloud providers. -fio --filename=test --direct=1 --rw=randread --bs=4k --ioengine=libaio --iodepth=64 --runtime=120 --numjobs=128 --time_based --group_reporting --name=iops-test-job --eta-newline=1 --readonly +fio --filename=test --direct=1 --rw=randread --bs=4k --ioengine=libaio --iodepth=64 --runtime=120 --numjobs=128 --time_based --group_reporting --name=iops-test-job --eta-newline=1 --readonly
    -

    输出结果应如下所示:

    Jobs: 128 (f=128): [r(128)][100.0%][r=1458MiB/s][r=373k IOPS][eta 00m:00s]
     iops-test-job: (groupid=0, jobs=128): err= 0: pid=768: Mon Jun 24 09:35:06 2024
    @@ -269,7 +264,7 @@ IO depths    : 1=0.1<
               d="M4 9h1v1H4c-1.5 0-3-1.69-3-3.5S2.55 3 4 3h4c1.45 0 3 1.69 3 3.5 0 1.41-.91 2.72-2 3.25V8.59c.58-.45 1-1.27 1-2.09C10 5.22 8.98 4 8 4H4c-.98 0-2 1.22-2 2.5S3 9 4 9zm9-3h-1v1h1c1 0 2 1.22 2 2.5S13.98 12 13 12H9c-.98 0-2-1.22-2-2.5 0-.83.42-1.64 1-2.09V6.25c-1.09.53-2 1.84-2 3.25C6 11.31 7.55 13 9 13h4c1.45 0 3-1.69 3-3.5S14.5 6 13 6z"
             >
           
    -    

    一旦验证结果令人满意,就可以按以下步骤部署 Milvus Distributed:

    +

    验证结果令人满意后,就可以按以下步骤部署 Milvus Distributed:

    使用 Helm 部署 Milvus Distributed 的提示

    QueryNode pod 默认使用 NVMe 磁盘作为 EmptyDir 卷。建议在 QueryNode pod 中将 NVMe 磁盘挂载到/var/lib/milvus/data ,以确保最佳性能。

    有关如何使用 Helm 部署 Milvus Distributed 的详细信息,请参阅使用Helm 在 Kubernetes 中运行 Milvus

    使用 Milvus Operator 部署 Milvus Distributed 的提示

    Milvus Operator 会自动配置 QueryNode pod 将 NVMe 磁盘用作 EmptyDir 卷。建议将以下配置添加到MilvusCluster 自定义资源:

    @@ -284,4 +279,4 @@ IO depths : 1=0.1< - emptyDir: name: data
    -

    这将确保 QueryNode pod 将 NVMe 磁盘用作数据卷。有关如何使用 Milvus Operator 部署分布式Milvus 的详细信息,请参阅使用 Milvus Operator 在 Kubernetes 中运行 Milvus

    +

    这将确保 QueryNode pod 将 NVMe 磁盘用作数据卷。有关如何使用 Milvus Operator 部署Milvus Distributed 的详细信息,请参阅使用 Milvus Operator 在 Kubernetes 中运行 Milvus

    diff --git a/localization/v2.4.x/site/zh/adminGuide/limit_collection_counts.json b/localization/v2.4.x/site/zh/adminGuide/limit_collection_counts.json index b9f654302..55c43eedd 100644 --- a/localization/v2.4.x/site/zh/adminGuide/limit_collection_counts.json +++ b/localization/v2.4.x/site/zh/adminGuide/limit_collection_counts.json @@ -1 +1 @@ -{"codeList":["rootCoord:\n maxGeneralCapacity: 1024\n","60 (collections) x 2 (shards) x 4 (partitions) + 40 (collections) x 1 (shard) x 12 (partitions) = 960\n","failed checking constraint: sum_collections(parition*shard) exceeding the max general capacity:\n"],"headingContent":"","anchorList":[{"label":"限制 Collection 数","href":"Limit-Collection-Counts","type":1,"isActive":false},{"label":"配置选项","href":"Configuration-options","type":2,"isActive":false},{"label":"计算藏品数量","href":"Calculating-the-number-of-collections","type":2,"isActive":false}]} \ No newline at end of file +{"codeList":["rootCoord:\n maxGeneralCapacity: 65536\n","60 (collections) x 2 (shards) x 4 (partitions) + 40 (collections) x 1 (shard) x 12 (partitions) = 960\n","failed checking constraint: sum_collections(parition*shard) exceeding the max general capacity:\n"],"headingContent":"Limit Collection Counts","anchorList":[{"label":"收集数量限制","href":"Limit-Collection-Counts","type":1,"isActive":false},{"label":"配置选项","href":"Configuration-options","type":2,"isActive":false},{"label":"计算 Collections 的数量","href":"Calculating-the-number-of-collections","type":2,"isActive":false}]} \ No newline at end of file diff --git a/localization/v2.4.x/site/zh/adminGuide/limit_collection_counts.md b/localization/v2.4.x/site/zh/adminGuide/limit_collection_counts.md index 37b7ce28e..71865b6ef 100644 --- a/localization/v2.4.x/site/zh/adminGuide/limit_collection_counts.md +++ b/localization/v2.4.x/site/zh/adminGuide/limit_collection_counts.md @@ -1,9 +1,8 @@ --- id: limit_collection_counts.md -title: 设置 Collection 数量限制 -summary: '' +title: 设定收集数量限制 --- -

    限制 Collection 数

    Milvus 实例最多允许 65,536 个集合。不过,过多的收藏集可能会导致性能问题。因此,建议限制在 Milvus 实例中创建的集合数量。

    -

    本指南说明了如何设置 Milvus 实例中的收藏集数量限制。

    +

    Milvus 实例最多允许 65,536 个 Collection。不过,过多的 Collections 可能会导致性能问题。因此,建议限制在 Milvus 实例中创建的 Collection 数量。

    +

    本指南说明了如何设置 Milvus 实例中的 Collection 数量限制。

    配置因安装 Milvus 实例的方式而异。

    • 对于使用 Helm Charts 安装的 Milvus 实例

      将配置添加到values.yaml 文件的config 部分。有关详细信息,请参阅使用 Helm Charts 配置 Milvus

    • 对于使用 Docker Compose 安装的 Milvus 实例

      -

      将配置添加到用于启动 Milvus 实例的milvus.yaml 文件。有关详细信息,请参阅使用 Docker Compose 配置 Milvus

    • +

      将配置添加到用于启动 Milvus 实例的milvus.yaml 文件中。有关详细信息,请参阅使用 Docker Compose 配置 Milvus

    • 对于使用 Operator 安装的 Milvus 实例

      将配置添加到Milvus 自定义资源的spec.components 部分。有关详情,请参阅使用 Operator 配置 Milvus

    @@ -45,10 +44,10 @@ summary: '' >
    rootCoord:
    -    maxGeneralCapacity: 1024
    +    maxGeneralCapacity: 65536
     
    -

    maxGeneralCapacity 参数设置当前 Milvus 实例可容纳的最大集合数。默认值为1024

    -

    计算集合数

    在一个集合中,可以设置多个分片和分区。分片是用于在多个数据节点之间分配数据写入操作的逻辑单元。分区是逻辑单元,用于通过只加载集合数据的子集来提高数据检索效率。计算当前 Milvus 实例中的集合数量时,还需要计算分片和分区。

    -

    例如,假设您已经创建了100 个集合,其中60 个集合有2个分块和4 个分区,其余40 个集合有1 个分块和12 个分区。当前的集合数可以计算为

    +

    在一个 Collections 中,可以设置多个分片和分区。分片是用于在多个数据节点之间分配数据写入操作的逻辑单元。分区是逻辑单元,用于通过只加载 Collections 数据的子集来提高数据检索效率。在计算当前 Milvus 实例中的 Collections 数量时,还需要计算分片和分区的数量。

    +

    例如,假设您已经创建了100 个Collection,其中60 个Collection 有2个分块和4 个分区,其余40 个Collection 有1 个分块和12 个分区。当前的 Collections 数量可以计算如下:

    60 (collections) x 2 (shards) x 4 (partitions) + 40 (collections) x 1 (shard) x 12 (partitions) = 960
     
    -

    在上例中,您已经使用了默认限制中的960 个。现在如果想创建一个有4个分区和20 个分区的新集合,就会收到以下错误提示,因为集合总数超过了最大容量:

    +

    在上例中,您已经使用了默认限制中的960 个。现在如果想创建一个有4个分区和20 个分区的新 Collections,就会收到以下错误提示,因为 Collections 的总数超过了最大容量:

    failed checking constraint: sum_collections(parition*shard) exceeding the max general capacity:
     
    -

    要避免此错误,可以减少现有或新集合中的分片或分区数量,删除某些集合,或增加maxGeneralCapacity 值。

    +

    要避免此错误,可以减少现有或新集合中的分片或分区数量,删除某些集合,或者增加maxGeneralCapacity 值。

    diff --git a/localization/v2.4.x/site/zh/adminGuide/rbac.json b/localization/v2.4.x/site/zh/adminGuide/rbac.json index 96a2c406f..f0dbf8ef9 100644 --- a/localization/v2.4.x/site/zh/adminGuide/rbac.json +++ b/localization/v2.4.x/site/zh/adminGuide/rbac.json @@ -1 +1 @@ -{"codeList":["from pymilvus import MilvusClient\n\nclient = MilvusClient(\n uri='http://localhost:19530', # replace with your own Milvus server address\n token='root:Milvus' # replace with your own Milvus server token\n)\n","client.create_user(\n user_name='user_1',\n password='P@ssw0rd'\n)\n","client.update_password(\n user_name='user_1',\n old_password='P@ssw0rd',\n new_password='P@ssw0rd123'\n)\n","client.list_users()\n\n# output:\n# ['root', 'user_1']\n","client.describe_user(user_name='user_1')\n\n# output:\n# {'user_name': 'user_1', 'roles': ()}\n","client.create_role(\n role_name=\"roleA\",\n)\n","client.list_roles()\n\n# output:\n# ['admin', 'public', 'roleA']\n","# grant privilege to a role\n\nclient.grant_privilege(\n role_name='roleA',\n object_type='User', # value here can be Global, Collection or User, object type also depends on the API defined in privilegeName\n object_name='user_1', # value here can be * or a specific user name if object type is 'User'\n privilege='SelectUser'\n)\n","client.describe_role(\n role_name='roleA'\n)\n\n# output:\n# {'role': 'roleA',\n# 'privileges': [{'object_type': 'User',\n# 'object_name': 'user_1',\n# 'db_name': 'default',\n# 'role_name': 'roleA',\n# 'privilege': 'SelectUser',\n# 'grantor_name': 'root'}]}\n","# grant a role to a user\n\nclient.grant_role(\n user_name='user_1',\n role_name='roleA'\n)\n","client.describe_user(\n user_name='user_1'\n)\n\n# output:\n# {'user_name': 'user_1', 'roles': ('roleA')}\n","client.revoke_privilege(\n role_name='roleA',\n object_type='User', # value here can be Global, Collection or User, object type also depends on the API defined in privilegeName\n object_name='user_1', # value here can be * or a specific user name if object type is 'User'\n privilege='SelectUser'\n)\n","client.revoke_role(\n user_name='user_1',\n role_name='roleA'\n)\n","client.drop_role(role_name='roleA')\n","client.drop_user(user_name='user_1')\n"],"headingContent":"","anchorList":[{"label":"启用 RBAC","href":"Enable-RBAC","type":1,"isActive":false},{"label":"1.启动 Milvus 客户端建立连接","href":"1-Initiate-a-Milvus-client-to-establish-a-connection","type":2,"isActive":false},{"label":"2.创建用户","href":"2-Create-a-user","type":2,"isActive":false},{"label":"3.创建角色","href":"3-Create-a-role","type":2,"isActive":false},{"label":"4.授予角色特权","href":"4-Grant-a-privilege-to-a-role","type":2,"isActive":false},{"label":"5.向用户授予角色","href":"5-Grant-a-role-to-a-user","type":2,"isActive":false},{"label":"6.撤销特权","href":"6-Revoke-privileges","type":2,"isActive":false},{"label":"下一步行动","href":"Whats-next","type":2,"isActive":false}]} \ No newline at end of file +{"codeList":["from pymilvus import MilvusClient\n\nclient = MilvusClient(\n uri='http://localhost:19530', # replace with your own Milvus server address\n token='root:Milvus' # replace with your own Milvus server token\n)\n","client.create_user(\n user_name='user_1',\n password='P@ssw0rd'\n)\n","client.update_password(\n user_name='user_1',\n old_password='P@ssw0rd',\n new_password='P@ssw0rd123'\n)\n","client.list_users()\n\n# output:\n# ['root', 'user_1']\n","client.describe_user(user_name='user_1')\n\n# output:\n# {'user_name': 'user_1', 'roles': ()}\n","client.create_role(\n role_name=\"roleA\",\n)\n","client.list_roles()\n\n# output:\n# ['admin', 'public', 'roleA']\n","# grant privilege to a role\n\nclient.grant_privilege(\n role_name='roleA',\n object_type='User', # value here can be Global, Collection or User, object type also depends on the API defined in privilegeName\n object_name='user_1', # value here can be * or a specific user name if object type is 'User'\n privilege='SelectUser'\n)\n","client.describe_role(\n role_name='roleA'\n)\n\n# output:\n# {'role': 'roleA',\n# 'privileges': [{'object_type': 'User',\n# 'object_name': 'user_1',\n# 'db_name': 'default',\n# 'role_name': 'roleA',\n# 'privilege': 'SelectUser',\n# 'grantor_name': 'root'}]}\n","# grant a role to a user\n\nclient.grant_role(\n user_name='user_1',\n role_name='roleA'\n)\n","client.describe_user(\n user_name='user_1'\n)\n\n# output:\n# {'user_name': 'user_1', 'roles': ('roleA')}\n","client.revoke_privilege(\n role_name='roleA',\n object_type='User', # value here can be Global, Collection or User, object type also depends on the API defined in privilegeName\n object_name='user_1', # value here can be * or a specific user name if object type is 'User'\n privilege='SelectUser'\n)\n","client.revoke_role(\n user_name='user_1',\n role_name='roleA'\n)\n","client.drop_role(role_name='roleA')\n","client.drop_user(user_name='user_1')\n"],"headingContent":"Enable RBAC","anchorList":[{"label":"启用 RBAC","href":"Enable-RBAC","type":1,"isActive":false},{"label":"1.启动 Milvus 客户端建立连接","href":"1-Initiate-a-Milvus-client-to-establish-a-connection","type":2,"isActive":false},{"label":"2.创建用户","href":"2-Create-a-user","type":2,"isActive":false},{"label":"3.创建角色","href":"3-Create-a-role","type":2,"isActive":false},{"label":"4.向角色授予权限","href":"4-Grant-a-privilege-to-a-role","type":2,"isActive":false},{"label":"5.向用户授予角色","href":"5-Grant-a-role-to-a-user","type":2,"isActive":false},{"label":"6.撤销权限","href":"6-Revoke-privileges","type":2,"isActive":false},{"label":"下一步","href":"Whats-next","type":2,"isActive":false}]} \ No newline at end of file diff --git a/localization/v2.4.x/site/zh/adminGuide/rbac.md b/localization/v2.4.x/site/zh/adminGuide/rbac.md index 238da0f09..16aea543e 100644 --- a/localization/v2.4.x/site/zh/adminGuide/rbac.md +++ b/localization/v2.4.x/site/zh/adminGuide/rbac.md @@ -19,7 +19,7 @@ title: 启用 RBAC d="M4 9h1v1H4c-1.5 0-3-1.69-3-3.5S2.55 3 4 3h4c1.45 0 3 1.69 3 3.5 0 1.41-.91 2.72-2 3.25V8.59c.58-.45 1-1.27 1-2.09C10 5.22 8.98 4 8 4H4c-.98 0-2 1.22-2 2.5S3 9 4 9zm9-3h-1v1h1c1 0 2 1.22 2 2.5S13.98 12 13 12H9c-.98 0-2-1.22-2-2.5 0-.83.42-1.64 1-2.09V6.25c-1.09.53-2 1.84-2 3.25C6 11.31 7.55 13 9 13h4c1.45 0 3-1.69 3-3.5S14.5 6 13 6z" > -

    通过启用 RBAC,可以根据用户角色和权限控制对特定 Milvus 资源(如集合或分区)或权限的访问。目前,该功能仅在 Python 和 Java 中可用。

    +

    通过启用 RBAC,可以根据用户角色和权限控制对特定 Milvus 资源(如 Collections 或分区)或权限的访问。目前,该功能仅在 Python 和 Java 中可用。

    本主题介绍如何启用 RBAC 并管理用户和角色

    本页的代码片段使用新的MilvusClient(Python)与 Milvus 进行交互。用于其他语言的新 MilvusClient SDK 将在未来更新中发布。

    @@ -39,7 +39,7 @@ title: 启用 RBAC d="M4 9h1v1H4c-1.5 0-3-1.69-3-3.5S2.55 3 4 3h4c1.45 0 3 1.69 3 3.5 0 1.41-.91 2.72-2 3.25V8.59c.58-.45 1-1.27 1-2.09C10 5.22 8.98 4 8 4H4c-.98 0-2 1.22-2 2.5S3 9 4 9zm9-3h-1v1h1c1 0 2 1.22 2 2.5S13.98 12 13 12H9c-.98 0-2-1.22-2-2.5 0-.83.42-1.64 1-2.09V6.25c-1.09.53-2 1.84-2 3.25C6 11.31 7.55 13 9 13h4c1.45 0 3-1.69 3-3.5S14.5 6 13 6z" > -

    启用用户身份验证后,使用token (由用户名和密码组成)连接到 Milvus 实例。默认情况下,Milvus 使用root 用户和密码Milvus

    +

    启用用户身份验证后,使用由用户名和密码组成的token 连接到 Milvus 实例。默认情况下,Milvus 使用root 用户,密码为Milvus

    from pymilvus import MilvusClient
     
     client = MilvusClient(
    @@ -138,9 +138,9 @@ client = MilvusClient(
               d="M4 9h1v1H4c-1.5 0-3-1.69-3-3.5S2.55 3 4 3h4c1.45 0 3 1.69 3 3.5 0 1.41-.91 2.72-2 3.25V8.59c.58-.45 1-1.27 1-2.09C10 5.22 8.98 4 8 4H4c-.98 0-2 1.22-2 2.5S3 9 4 9zm9-3h-1v1h1c1 0 2 1.22 2 2.5S13.98 12 13 12H9c-.98 0-2-1.22-2-2.5 0-.83.42-1.64 1-2.09V6.25c-1.09.53-2 1.84-2 3.25C6 11.31 7.55 13 9 13h4c1.45 0 3-1.69 3-3.5S14.5 6 13 6z"
             >
           
    -    

    下面的示例演示了如何向名为roleA 的角色授予搜索所有收藏的权限。

    -

    object_type 指定对象类型,也可以理解为资源类型。目前,有效值包括集合/用户/全局等,其中全局表示没有特定的资源类型。object_name 是资源名称。如果对象类型为 Collection,那么对象名称可以指特定的集合名称,也可以使用 * 来指定所有集合。如果对象类型为全局,则只能用 * 指定对象名称。有关可以授予的其他权限类型,请参阅用户和角色

    -

    在管理角色权限之前,请确保已启用用户身份验证。否则可能会出错。有关如何启用用户身份验证信息,请参阅验证用户访问

    +

    下面的示例演示了如何向名为roleA 的角色授予搜索所有 Collections 的权限。

    +

    object_type 指定对象类型,也可以理解为资源类型。目前,有效值包括 Collections/User/Global 等,其中 Global 表示没有特定的资源类型。object_name 是资源名称。如果 objecttype为 Collection,那么 object name 可以指特定的 Collections 名称,也可以使用 * 来指定所有 Collections。如果对象类型为全局,则只能用 * 指定对象名称。有关可以授予的其他权限类型,请参阅用户和角色

    +

    在管理角色权限之前,请确保已启用用户身份验证。否则可能会出错。有关如何启用用户身份验证的信息,请参阅验证用户访问

    # grant privilege to a role
     
     client.grant_privilege(
    @@ -182,7 +182,7 @@ client.grant_privilege(
               d="M4 9h1v1H4c-1.5 0-3-1.69-3-3.5S2.55 3 4 3h4c1.45 0 3 1.69 3 3.5 0 1.41-.91 2.72-2 3.25V8.59c.58-.45 1-1.27 1-2.09C10 5.22 8.98 4 8 4H4c-.98 0-2 1.22-2 2.5S3 9 4 9zm9-3h-1v1h1c1 0 2 1.22 2 2.5S13.98 12 13 12H9c-.98 0-2-1.22-2-2.5 0-.83.42-1.64 1-2.09V6.25c-1.09.53-2 1.84-2 3.25C6 11.31 7.55 13 9 13h4c1.45 0 3-1.69 3-3.5S14.5 6 13 6z"
             >
           
    -    

    向用户授予角色,使该用户可以继承角色的所有权限。

    +

    将角色授予一个用户,使该用户可以继承该角色的所有权限。

    # grant a role to a user
     
     client.grant_role(
    @@ -261,5 +261,5 @@ client.grant_role(
           
         
    diff --git a/localization/v2.4.x/site/zh/adminGuide/resource_group.json b/localization/v2.4.x/site/zh/adminGuide/resource_group.json
    index d08b73ee0..85a911b2d 100644
    --- a/localization/v2.4.x/site/zh/adminGuide/resource_group.json
    +++ b/localization/v2.4.x/site/zh/adminGuide/resource_group.json
    @@ -1 +1 @@
    -{"codeList":["{\n    \"requests\": { \"nodeNum\": 1 },\n    \"limits\": { \"nodeNum\": 1 },\n    \"transfer_from\": [{ \"resource_group\": \"rg1\" }],\n    \"transfer_to\": [{ \"resource_group\": \"rg2\" }]\n}\n","import pymilvus\n\n# A resource group name should be a string of 1 to 255 characters, starting with a letter or an underscore (_) and containing only numbers, letters, and underscores (_).\nname = \"rg\"\nnode_num = 0\n\n# create a resource group that exactly hold no query node.\ntry:\n    utility.create_resource_group(name, config=utility.ResourceGroupConfig(\n        requests={\"node_num\": node_num},\n        limits={\"node_num\": node_num},\n    ), using='default')\n    print(f\"Succeeded in creating resource group {name}.\")\nexcept Exception:\n    print(\"Failed to create the resource group.\")\n","rgs = utility.list_resource_groups(using='default')\nprint(f\"Resource group list: {rgs}\")\n\n# Resource group list: ['__default_resource_group', 'rg']\n","info = utility.describe_resource_group(name, using=\"default\")\nprint(f\"Resource group description: {info}\")\n\n# Resource group description: \n#        ,           // string, rg name\n#        ,            // int, num_node which has been transfer to this rg\n#        ,  // int, available node_num, some node may shutdown\n#        , // map[string]int, from collection_name to loaded replica of each collecion in this rg\n#        ,  // map[string]int, from collection_name to outgoging accessed node num by replica loaded in this rg \n#        .  // map[string]int, from collection_name to incoming accessed node num by replica loaded in other rg\n","source = '__default_resource_group'\ntarget = 'rg'\nexpected_num_nodes_in_default = 0\nexpected_num_nodes_in_rg = 1\n\ntry:\n    utility.update_resource_groups({\n        source: ResourceGroupConfig(\n            requests={\"node_num\": expected_num_nodes_in_default},\n            limits={\"node_num\": expected_num_nodes_in_default},\n        ),\n        target: ResourceGroupConfig(\n            requests={\"node_num\": expected_num_nodes_in_rg},\n            limits={\"node_num\": expected_num_nodes_in_rg},\n        )\n    }, using=\"default\")\n    print(f\"Succeeded in move 1 node(s) from {source} to {target}.\")\nexcept Exception:\n    print(\"Something went wrong while moving nodes.\")\n\n# After a while, succeeded in moving 1 node(s) from __default_resource_group to rg.\n","from pymilvus import Collection\n\ncollection = Collection('demo')\n\n# Milvus loads the collection to the default resource group.\ncollection.load(replica_number=2)\n\n# Or, you can ask Milvus load the collection to the desired resource group.\n# make sure that query nodes num should be greater or equal to replica_number\nresource_groups = ['rg']\ncollection.load(replica_number=2, _resource_groups=resource_groups) \n","collection = Collection(\"Books\")\n\n# Use the load method of a collection to load one of its partition\ncollection.load([\"Novels\"], replica_number=2, _resource_groups=resource_groups)\n\n# Or, you can use the load method of a partition directly\npartition = Partition(collection, \"Novels\")\npartition.load(replica_number=2, _resource_groups=resource_groups)\n","source = '__default_resource_group'\ntarget = 'rg'\ncollection_name = 'c'\nnum_replicas = 1\n\ntry:\n    utility.transfer_replica(source, target, collection_name, num_replicas, using=\"default\")\n    print(f\"Succeeded in moving {num_node} replica(s) of {collection_name} from {source} to {target}.\")\nexcept Exception:\n    print(\"Something went wrong while moving replicas.\")\n\n# Succeeded in moving 1 replica(s) of c from __default_resource_group to rg.\n","try:\n    utility.update_resource_groups({\n        \"rg\": utility.ResourceGroupConfig(\n            requests={\"node_num\": 0},\n            limits={\"node_num\": 0},\n        ),\n    }, using=\"default\")\n    utility.drop_resource_group(\"rg\", using=\"default\")\n    print(f\"Succeeded in dropping {source}.\")\nexcept Exception:\n    print(f\"Something went wrong while dropping {source}.\")\n","from pymilvus import utility\nfrom pymilvus.client.types import ResourceGroupConfig\n\n_PENDING_NODES_RESOURCE_GROUP=\"__pending_nodes\"\n\ndef init_cluster(node_num: int):\n    print(f\"Init cluster with {node_num} nodes, all nodes will be put in default resource group\")\n    # create a pending resource group, which can used to hold the pending nodes that do not hold any data.\n    utility.create_resource_group(name=_PENDING_NODES_RESOURCE_GROUP, config=ResourceGroupConfig(\n        requests={\"node_num\": 0}, # this resource group can hold 0 nodes, no data will be load on it.\n        limits={\"node_num\": 10000}, # this resource group can hold at most 10000 nodes \n    ))\n\n    # update default resource group, which can used to hold the nodes that all initial node in it.\n    utility.update_resource_groups({\n        \"__default_resource_group\": ResourceGroupConfig(\n            requests={\"node_num\": node_num},\n            limits={\"node_num\": node_num},\n            transfer_from=[{\"resource_group\": _PENDING_NODES_RESOURCE_GROUP}], # recover missing node from pending resource group at high priority.\n            transfer_to=[{\"resource_group\": _PENDING_NODES_RESOURCE_GROUP}], # recover redundant node to pending resource group at low priority.\n        )})\n    utility.create_resource_group(name=\"rg1\", config=ResourceGroupConfig(\n        requests={\"node_num\": 0},\n        limits={\"node_num\": 0},\n        transfer_from=[{\"resource_group\": _PENDING_NODES_RESOURCE_GROUP}], \n        transfer_to=[{\"resource_group\": _PENDING_NODES_RESOURCE_GROUP}],\n    ))\n    utility.create_resource_group(name=\"rg2\", config=ResourceGroupConfig(\n        requests={\"node_num\": 0},\n        limits={\"node_num\": 0},\n        transfer_from=[{\"resource_group\": _PENDING_NODES_RESOURCE_GROUP}], \n        transfer_to=[{\"resource_group\": _PENDING_NODES_RESOURCE_GROUP}],\n    ))\n\ninit_cluster(1)\n","\ndef scale_to(node_num: int):\n    # scale the querynode number in Milvus into node_num.\n    pass\n","# scale rg1 into 3 nodes, rg2 into 1 nodes\nutility.update_resource_groups({\n    \"rg1\": ResourceGroupConfig(\n        requests={\"node_num\": 3},\n        limits={\"node_num\": 3},\n        transfer_from=[{\"resource_group\": _PENDING_NODES_RESOURCE_GROUP}],\n        transfer_to=[{\"resource_group\": _PENDING_NODES_RESOURCE_GROUP}],\n    ),\n    \"rg2\": ResourceGroupConfig(\n        requests={\"node_num\": 1},\n        limits={\"node_num\": 1},\n        transfer_from=[{\"resource_group\": _PENDING_NODES_RESOURCE_GROUP}],\n        transfer_to=[{\"resource_group\": _PENDING_NODES_RESOURCE_GROUP}],\n    ),\n})\nscale_to(5)\n# rg1 has 3 nodes, rg2 has 1 node, __default_resource_group has 1 node.\n","# scale rg1 from 3 nodes into 2 nodes\nutility.update_resource_groups({\n    \"rg1\": ResourceGroupConfig(\n        requests={\"node_num\": 2},\n        limits={\"node_num\": 2},\n        transfer_from=[{\"resource_group\": _PENDING_NODES_RESOURCE_GROUP}],\n        transfer_to=[{\"resource_group\": _PENDING_NODES_RESOURCE_GROUP}],\n    ),\n})\n\n# rg1 has 2 nodes, rg2 has 1 node, __default_resource_group has 1 node, __pending_nodes has 1 node.\nscale_to(4)\n# scale the node in __pending_nodes\n"],"headingContent":"","anchorList":[{"label":"管理资源组","href":"Manage-Resource-Groups","type":1,"isActive":false},{"label":"什么是资源组","href":"What-is-a-resource-group","type":2,"isActive":false},{"label":"资源组的概念","href":"Concepts-of-resource-group","type":2,"isActive":false},{"label":"使用声明式 api 管理资源组","href":"Use-declarative-api-to-manage-resource-group","type":2,"isActive":false},{"label":"管理集群扩展的良好做法","href":"A-good-practice-to-manage-cluster-scaling","type":2,"isActive":false},{"label":"资源组如何与多个副本互动","href":"How-resource-groups-interacts-with-multiple-replicas","type":2,"isActive":false},{"label":"下一步行动","href":"Whats-next","type":1,"isActive":false}]}
    \ No newline at end of file
    +{"codeList":["{\n    \"requests\": { \"nodeNum\": 1 },\n    \"limits\": { \"nodeNum\": 1 },\n    \"transfer_from\": [{ \"resource_group\": \"rg1\" }],\n    \"transfer_to\": [{ \"resource_group\": \"rg2\" }]\n}\n","import pymilvus\n\n# A resource group name should be a string of 1 to 255 characters, starting with a letter or an underscore (_) and containing only numbers, letters, and underscores (_).\nname = \"rg\"\nnode_num = 0\n\n# create a resource group that exactly hold no query node.\ntry:\n    utility.create_resource_group(name, config=utility.ResourceGroupConfig(\n        requests={\"node_num\": node_num},\n        limits={\"node_num\": node_num},\n    ), using='default')\n    print(f\"Succeeded in creating resource group {name}.\")\nexcept Exception:\n    print(\"Failed to create the resource group.\")\n","rgs = utility.list_resource_groups(using='default')\nprint(f\"Resource group list: {rgs}\")\n\n# Resource group list: ['__default_resource_group', 'rg']\n","info = utility.describe_resource_group(name, using=\"default\")\nprint(f\"Resource group description: {info}\")\n\n# Resource group description: \n#        ,           // string, rg name\n#        ,            // int, num_node which has been transfer to this rg\n#        ,  // int, available node_num, some node may shutdown\n#        , // map[string]int, from collection_name to loaded replica of each collecion in this rg\n#        ,  // map[string]int, from collection_name to outgoging accessed node num by replica loaded in this rg \n#        .  // map[string]int, from collection_name to incoming accessed node num by replica loaded in other rg\n","source = '__default_resource_group'\ntarget = 'rg'\nexpected_num_nodes_in_default = 0\nexpected_num_nodes_in_rg = 1\n\ntry:\n    utility.update_resource_groups({\n        source: ResourceGroupConfig(\n            requests={\"node_num\": expected_num_nodes_in_default},\n            limits={\"node_num\": expected_num_nodes_in_default},\n        ),\n        target: ResourceGroupConfig(\n            requests={\"node_num\": expected_num_nodes_in_rg},\n            limits={\"node_num\": expected_num_nodes_in_rg},\n        )\n    }, using=\"default\")\n    print(f\"Succeeded in move 1 node(s) from {source} to {target}.\")\nexcept Exception:\n    print(\"Something went wrong while moving nodes.\")\n\n# After a while, succeeded in moving 1 node(s) from __default_resource_group to rg.\n","from pymilvus import Collection\n\ncollection = Collection('demo')\n\n# Milvus loads the collection to the default resource group.\ncollection.load(replica_number=2)\n\n# Or, you can ask Milvus load the collection to the desired resource group.\n# make sure that query nodes num should be greater or equal to replica_number\nresource_groups = ['rg']\ncollection.load(replica_number=2, _resource_groups=resource_groups) \n","collection = Collection(\"Books\")\n\n# Use the load method of a collection to load one of its partition\ncollection.load([\"Novels\"], replica_number=2, _resource_groups=resource_groups)\n\n# Or, you can use the load method of a partition directly\npartition = Partition(collection, \"Novels\")\npartition.load(replica_number=2, _resource_groups=resource_groups)\n","source = '__default_resource_group'\ntarget = 'rg'\ncollection_name = 'c'\nnum_replicas = 1\n\ntry:\n    utility.transfer_replica(source, target, collection_name, num_replicas, using=\"default\")\n    print(f\"Succeeded in moving {num_node} replica(s) of {collection_name} from {source} to {target}.\")\nexcept Exception:\n    print(\"Something went wrong while moving replicas.\")\n\n# Succeeded in moving 1 replica(s) of c from __default_resource_group to rg.\n","try:\n    utility.update_resource_groups({\n        \"rg\": utility.ResourceGroupConfig(\n            requests={\"node_num\": 0},\n            limits={\"node_num\": 0},\n        ),\n    }, using=\"default\")\n    utility.drop_resource_group(\"rg\", using=\"default\")\n    print(f\"Succeeded in dropping {source}.\")\nexcept Exception:\n    print(f\"Something went wrong while dropping {source}.\")\n","from pymilvus import utility\nfrom pymilvus.client.types import ResourceGroupConfig\n\n_PENDING_NODES_RESOURCE_GROUP=\"__pending_nodes\"\n\ndef init_cluster(node_num: int):\n    print(f\"Init cluster with {node_num} nodes, all nodes will be put in default resource group\")\n    # create a pending resource group, which can used to hold the pending nodes that do not hold any data.\n    utility.create_resource_group(name=_PENDING_NODES_RESOURCE_GROUP, config=ResourceGroupConfig(\n        requests={\"node_num\": 0}, # this resource group can hold 0 nodes, no data will be load on it.\n        limits={\"node_num\": 10000}, # this resource group can hold at most 10000 nodes \n    ))\n\n    # update default resource group, which can used to hold the nodes that all initial node in it.\n    utility.update_resource_groups({\n        \"__default_resource_group\": ResourceGroupConfig(\n            requests={\"node_num\": node_num},\n            limits={\"node_num\": node_num},\n            transfer_from=[{\"resource_group\": _PENDING_NODES_RESOURCE_GROUP}], # recover missing node from pending resource group at high priority.\n            transfer_to=[{\"resource_group\": _PENDING_NODES_RESOURCE_GROUP}], # recover redundant node to pending resource group at low priority.\n        )})\n    utility.create_resource_group(name=\"rg1\", config=ResourceGroupConfig(\n        requests={\"node_num\": 0},\n        limits={\"node_num\": 0},\n        transfer_from=[{\"resource_group\": _PENDING_NODES_RESOURCE_GROUP}], \n        transfer_to=[{\"resource_group\": _PENDING_NODES_RESOURCE_GROUP}],\n    ))\n    utility.create_resource_group(name=\"rg2\", config=ResourceGroupConfig(\n        requests={\"node_num\": 0},\n        limits={\"node_num\": 0},\n        transfer_from=[{\"resource_group\": _PENDING_NODES_RESOURCE_GROUP}], \n        transfer_to=[{\"resource_group\": _PENDING_NODES_RESOURCE_GROUP}],\n    ))\n\ninit_cluster(1)\n","\ndef scale_to(node_num: int):\n    # scale the querynode number in Milvus into node_num.\n    pass\n","# scale rg1 into 3 nodes, rg2 into 1 nodes\nutility.update_resource_groups({\n    \"rg1\": ResourceGroupConfig(\n        requests={\"node_num\": 3},\n        limits={\"node_num\": 3},\n        transfer_from=[{\"resource_group\": _PENDING_NODES_RESOURCE_GROUP}],\n        transfer_to=[{\"resource_group\": _PENDING_NODES_RESOURCE_GROUP}],\n    ),\n    \"rg2\": ResourceGroupConfig(\n        requests={\"node_num\": 1},\n        limits={\"node_num\": 1},\n        transfer_from=[{\"resource_group\": _PENDING_NODES_RESOURCE_GROUP}],\n        transfer_to=[{\"resource_group\": _PENDING_NODES_RESOURCE_GROUP}],\n    ),\n})\nscale_to(5)\n# rg1 has 3 nodes, rg2 has 1 node, __default_resource_group has 1 node.\n","# scale rg1 from 3 nodes into 2 nodes\nutility.update_resource_groups({\n    \"rg1\": ResourceGroupConfig(\n        requests={\"node_num\": 2},\n        limits={\"node_num\": 2},\n        transfer_from=[{\"resource_group\": _PENDING_NODES_RESOURCE_GROUP}],\n        transfer_to=[{\"resource_group\": _PENDING_NODES_RESOURCE_GROUP}],\n    ),\n})\n\n# rg1 has 2 nodes, rg2 has 1 node, __default_resource_group has 1 node, __pending_nodes has 1 node.\nscale_to(4)\n# scale the node in __pending_nodes\n"],"headingContent":"Manage Resource Groups","anchorList":[{"label":"管理资源组","href":"Manage-Resource-Groups","type":1,"isActive":false},{"label":"什么是资源组","href":"What-is-a-resource-group","type":2,"isActive":false},{"label":"资源组的概念","href":"Concepts-of-resource-group","type":2,"isActive":false},{"label":"使用声明式 api 管理资源组","href":"Use-declarative-api-to-manage-resource-group","type":2,"isActive":false},{"label":"管理集群扩展的良好做法","href":"A-good-practice-to-manage-cluster-scaling","type":2,"isActive":false},{"label":"资源组如何与多个副本交互","href":"How-resource-groups-interacts-with-multiple-replicas","type":2,"isActive":false},{"label":"下一步","href":"Whats-next","type":1,"isActive":false}]}
    \ No newline at end of file
    diff --git a/localization/v2.4.x/site/zh/adminGuide/resource_group.md b/localization/v2.4.x/site/zh/adminGuide/resource_group.md
    index 712ba8a3a..c999d8506 100644
    --- a/localization/v2.4.x/site/zh/adminGuide/resource_group.md
    +++ b/localization/v2.4.x/site/zh/adminGuide/resource_group.md
    @@ -36,7 +36,7 @@ title: 管理资源组
             >
           
         

    一个资源组可以容纳 Milvus 集群中的多个或全部查询节点。如何在资源组之间分配查询节点,由您根据最合理的方式来决定。例如,在多集合场景中,可以为每个资源组分配适当数量的查询节点,并将集合加载到不同的资源组中,这样每个集合中的操作与其他集合中的操作在物理上是独立的。

    -

    请注意,Milvus 实例会维护一个默认资源组,用于在启动时容纳所有查询节点,并将其命名为__default_resource_group

    +

    请注意,Milvus 实例在启动时会维护一个默认资源组来容纳所有查询节点,并将其命名为__default_resource_group

    从 2.4.1 版开始,Milvus 提供了声明式资源组 API,而旧的资源组 API 已被弃用。新的声明式 API 使用户能够实现惰性,从而更轻松地在云原生环境中进行二次开发。

    资源组的概念

    -

    本页面上的所有代码示例都在 PyMilvus 2.4.5 中。运行这些示例之前,请升级您的 PyMilvus 安装。

    +

    本页面上的所有代码示例都在 PyMilvus 2.4.8 中。运行这些示例之前,请升级您的 PyMilvus 安装。

    1. 创建资源组。

      @@ -133,7 +133,7 @@ node_num = 0 # <num_incoming_node:{}>. // map[string]int, from collection_name to incoming accessed node num by replica loaded in other rg
  • 在资源组之间转移节点。

    -

    您可能会注意到,所描述的资源组还没有任何查询节点。将一些节点从默认资源组转移到您创建的资源组,如下所示:假设集群的__default_resource_group 中目前有 1 个查询节点,我们要将一个节点转移到创建的rg 中。update_resource_groups ,确保多个配置更改的原子性,因此 Milvus 不会看到中间状态。

    +

    您可能会注意到,所描述的资源组还没有任何查询节点。将一些节点从默认资源组转移到你创建的资源组,如下所示: 假设集群的__default_resource_group 中目前有 1 个查询节点,我们想将一个节点转移到创建的rg 中。update_resource_groups ,确保多次配置更改的原子性,因此 Milvus 不会看到中间状态。

    source = '__default_resource_group'
     target = 'rg'
     expected_num_nodes_in_default = 0
    @@ -156,8 +156,8 @@ expected_num_nodes_in_rg = 1
     
     # After a while, succeeded in moving 1 node(s) from __default_resource_group to rg.
     
  • -
  • 将集合和分区加载到资源组。

    -

    一旦资源组中有了查询节点,就可以将集合加载到该资源组。下面的代码段假定已经存在名为demo 的集合。

    +
  • 向资源组加载 Collections 和分区。

    +

    一旦资源组中有了查询节点,就可以向该资源组加载 Collections。下面的代码段假定已经存在名为demo 的 Collections。

    from pymilvus import Collection
     
     collection = Collection('demo')
    @@ -170,7 +170,7 @@ collection.load(replica_number=2)
     resource_groups = ['rg']
     collection.load(replica_number=2, _resource_groups=resource_groups) 
     
    -

    此外,您还可以将一个分区加载到一个资源组中,并将其副本分布到多个资源组中。以下代码假定已存在名为Books 的集合,且该集合中有一个名为Novels 的分区。

    +

    此外,您还可以将一个分区加载到一个资源组中,并将其副本分布到多个资源组中。下面假设已经存在名为Books 的 Collections,并且它有一个名为Novels 的分区。

    collection = Collection("Books")
     
     # Use the load method of a collection to load one of its partition
    @@ -181,9 +181,9 @@ partition = Partition(collection, "Novels"2, _resource_groups=resource_groups)
     

    请注意,_resource_groups 是一个可选参数,如果不指定,Milvus 将把副本加载到默认资源组中的查询节点上。

    -

    要让 Milus 在单独的资源组中加载集合的每个副本,请确保资源组的数量等于副本的数量。

  • +

    要让 Milus 在单独的资源组中加载 Collections 的每个副本,请确保资源组的数量等于副本的数量。

  • 在资源组之间传输副本。

    -

    Milvus 使用副本来实现分布在多个查询节点上的数据段之间的负载平衡。您可以按以下方式将某个资源组中的某些副本从一个集合转移到另一个集合:

    +

    Milvus 使用副本来实现分布在多个查询节点上的网段之间的负载平衡。您可以按以下方式将某个 Collection 的某些副本从一个资源组转移到另一个资源组:

    source = '__default_resource_group'
     target = 'rg'
     collection_name = 'c'
    @@ -228,9 +228,9 @@ num_replicas = 1
               d="M4 9h1v1H4c-1.5 0-3-1.69-3-3.5S2.55 3 4 3h4c1.45 0 3 1.69 3 3.5 0 1.41-.91 2.72-2 3.25V8.59c.58-.45 1-1.27 1-2.09C10 5.22 8.98 4 8 4H4c-.98 0-2 1.22-2 2.5S3 9 4 9zm9-3h-1v1h1c1 0 2 1.22 2 2.5S13.98 12 13 12H9c-.98 0-2-1.22-2-2.5 0-.83.42-1.64 1-2.09V6.25c-1.09.53-2 1.84-2 3.25C6 11.31 7.55 13 9 13h4c1.45 0 3-1.69 3-3.5S14.5 6 13 6z"
             >
           
    -    

    目前,Milvus 无法在云原生环境中独立扩展。不过,通过将声明式资源组 API与容器协调结合使用,Milvus 可以轻松实现 QueryNodes 的资源隔离和管理。 以下是在云环境中管理 QueryNodes 的良好实践:

    +

    目前,Milvus 无法在云原生环境中独立地伸缩。不过,通过将声明式资源组 API与容器协调结合使用,Milvus 可以轻松实现 QueryNodes 的资源隔离和管理。 以下是在云环境中管理 QueryNodes 的良好实践:

      -
    1. 默认情况下,Milvus 会创建一个__default_resource_group。该资源组不能删除,也是所有集合的默认加载资源组,多余的 QueryNodes 总是分配给它。因此,我们可以创建一个待定资源组来保存未使用的 QueryNode 资源,防止 QueryNode 资源被__default_resource_group 占用。

      +
    2. 默认情况下,Milvus 会创建一个__default_resource_group。该资源组不能删除,同时也作为所有 Collections 的默认加载资源组,冗余的 QueryNodes 总是分配给它。因此,我们可以创建一个待定资源组来保存未使用的 QueryNode 资源,防止 QueryNode 资源被__default_resource_group 占用。

      此外,如果我们严格执行sum(.requests.nodeNum) <= queryNodeNum 这一约束,就能精确控制集群中 QueryNode 的分配。下面是一个设置示例:

      from pymilvus import utility
       from pymilvus.client.types import ResourceGroupConfig
      @@ -268,7 +268,7 @@ _PENDING_NODES_RESOURCE_GROUP="__pending_nodes&qu
       
       init_cluster(1)
       
      -

      使用上面的示例代码,我们创建了一个名为__pending_nodes的资源组,用于容纳更多的 QueryNodes。我们还创建了名为rg1rg2 的两个特定于用户的资源组。此外,我们还确保另一个资源组优先恢复__pending_nodes 中丢失或多余的 QueryNodes。

    3. +

      使用上面的示例代码,我们创建了一个名为__pending_nodes的资源组,用于容纳更多的 QueryNodes。我们还创建了名为rg1rg2 的两个特定于用户的资源组。此外,我们还确保其他资源组优先从__pending_nodes 中恢复丢失或多余的 QueryNodes。

    4. 集群扩展

      假设我们有以下缩放功能:

      
      @@ -276,7 +276,7 @@ init_cluster(1)
           # scale the querynode number in Milvus into node_num.
           pass
       
      -

      我们可以使用 API 将特定资源组扩展到指定数量的 QueryNodes,而不会影响任何其他资源组。

      +

      我们可以使用 API 将特定资源组的 QueryNodes 扩展到指定数量,而不会影响其他任何资源组。

      # scale rg1 into 3 nodes, rg2 into 1 nodes
       utility.update_resource_groups({
           "rg1": ResourceGroupConfig(
      @@ -328,8 +328,8 @@ scale_to(4)
               >
             
           
        -
      • 单个集合的副本与资源组之间是 N 对 N 的关系。
      • -
      • 当单个集合的多个副本加载到一个资源组时,该资源组的 QueryNodes 会平均分配给各个副本,确保每个副本的 QueryNodes 数量之差不超过 1。
      • +
      • 单个 Collections 的副本和资源组之间是 N 对 N 的关系。
      • +
      • 当单个 Collections 的多个副本加载到一个资源组时,该资源组的 QueryNodes 会平均分配给各个副本,确保每个副本拥有的 QueryNodes 数量之差不超过 1。

      下一步

      TLS(传输层安全)是一种确保通信安全的加密协议。Milvus 代理使用 TLS 单向和双向验证。

      -

      本主题介绍如何在 Milvus 中启用 TLS 代理。

      +

      本主题将介绍如何在 Milvus 代理中启用 TLS,用于 gRPC 和 RESTful 流量。

      -

      TLS 和用户身份验证是两种不同的安全方法。如果在 Milvus 系统中同时启用了用户身份验证和 TLS,则需要提供用户名、密码和证书文件路径。有关如何启用用户身份验证信息,请参阅验证用户访问

      +

      TLS 和用户身份验证是两种不同的安全方法。如果在 Milvus 系统中同时启用了用户身份验证和 TLS,则需要提供用户名、密码和证书文件路径。有关如何启用用户身份验证的信息,请参阅验证用户访问

      创建自己的证书

      -

      更多信息,请参阅example_tls1.pyexample_tls2. py

      +

      更多信息请参阅example_tls1.pyexample_tls2. py

      +

      使用 TLS 连接到 Milvus RESTful 服务器

      对于 RESTful API,可以使用curl 命令检查 TLS。

      +

      单向 TLS 连接

      curl --cacert path_to/ca.pem https://localhost:19530/v2/vectordb/collections/list
      +
      +

      双向 TLS 连接

      curl --cert path_to/client.pem --key path_to/client.key --cacert path_to/ca.pem https://localhost:19530/v2/vectordb/collections/list
      +
      diff --git a/localization/v2.4.x/site/zh/adminGuide/upgrade_milvus_cluster-docker.json b/localization/v2.4.x/site/zh/adminGuide/upgrade_milvus_cluster-docker.json index 4cda5b9a8..1ecf0d863 100644 --- a/localization/v2.4.x/site/zh/adminGuide/upgrade_milvus_cluster-docker.json +++ b/localization/v2.4.x/site/zh/adminGuide/upgrade_milvus_cluster-docker.json @@ -1,37 +1 @@ -{ - "codeList": [ - "...\nrootcoord:\n container_name: milvus-rootcoord\n image: milvusdb/milvus:v2.4.9\n...\nproxy:\n container_name: milvus-proxy\n image: milvusdb/milvus:v2.4.9\n...\nquerycoord:\n container_name: milvus-querycoord\n image: milvusdb/milvus:v2.4.9 \n...\nquerynode:\n container_name: milvus-querynode\n image: milvusdb/milvus:v2.4.9\n...\nindexcoord:\n container_name: milvus-indexcoord\n image: milvusdb/milvus:v2.4.9\n...\nindexnode:\n container_name: milvus-indexnode\n image: milvusdb/milvus:v2.4.9 \n...\ndatacoord:\n container_name: milvus-datacoord\n image: milvusdb/milvus:v2.4.9 \n...\ndatanode:\n container_name: milvus-datanode\n image: milvusdb/milvus:v2.4.9\n", - "docker compose down\ndocker compose up -d\n", - "docker stop \n", - "# migration.yaml\ncmd:\n # Option: run/backup/rollback\n type: run\n runWithBackup: true\nconfig:\n sourceVersion: 2.1.4 # Specify your milvus version\n targetVersion: 2.4.9\n backupFilePath: /tmp/migration.bak\nmetastore:\n type: etcd\netcd:\n endpoints:\n - milvus-etcd:2379 # Use the etcd container name\n rootPath: by-dev # The root path where data is stored in etcd\n metaSubPath: meta\n kvSubPath: kv\n", - "# Suppose your docker-compose run with the default milvus network,\n# and you put migration.yaml in the same directory with docker-compose.yaml.\ndocker run --rm -it --network milvus -v $(pwd)/migration.yaml:/milvus/configs/migration.yaml milvus/meta-migration:v2.2.0 /milvus/bin/meta-migration -config=/milvus/configs/migration.yaml\n", - "Update the milvus image tag in the docker-compose.yaml\ndocker compose down\ndocker compose up -d\n" - ], - "headingContent": "", - "anchorList": [ - { - "label": "使用 Docker Compose 升级 Milvus 集群", - "href": "Upgrade-Milvus-Cluster-with-Docker-Compose", - "type": 1, - "isActive": false - }, - { - "label": "改变Milvus的形象,使其升级", - "href": "Upgrade-Milvus-by-changing-its-image", - "type": 2, - "isActive": false - }, - { - "label": "迁移元数据", - "href": "Migrate-the-metadata", - "type": 2, - "isActive": false - }, - { - "label": "下一步行动", - "href": "Whats-next", - "type": 2, - "isActive": false - } - ] -} +{"codeList":["...\nrootcoord:\n container_name: milvus-rootcoord\n image: milvusdb/milvus:v2.4.13-hotfix\n...\nproxy:\n container_name: milvus-proxy\n image: milvusdb/milvus:v2.4.13-hotfix\n...\nquerycoord:\n container_name: milvus-querycoord\n image: milvusdb/milvus:v2.4.13-hotfix \n...\nquerynode:\n container_name: milvus-querynode\n image: milvusdb/milvus:v2.4.13-hotfix\n...\nindexcoord:\n container_name: milvus-indexcoord\n image: milvusdb/milvus:v2.4.13-hotfix\n...\nindexnode:\n container_name: milvus-indexnode\n image: milvusdb/milvus:v2.4.13-hotfix \n...\ndatacoord:\n container_name: milvus-datacoord\n image: milvusdb/milvus:v2.4.13-hotfix \n...\ndatanode:\n container_name: milvus-datanode\n image: milvusdb/milvus:v2.4.13-hotfix\n","docker compose down\ndocker compose up -d\n","docker stop \n","# migration.yaml\ncmd:\n # Option: run/backup/rollback\n type: run\n runWithBackup: true\nconfig:\n sourceVersion: 2.1.4 # Specify your milvus version\n targetVersion: 2.4.13-hotfix\n backupFilePath: /tmp/migration.bak\nmetastore:\n type: etcd\netcd:\n endpoints:\n - milvus-etcd:2379 # Use the etcd container name\n rootPath: by-dev # The root path where data is stored in etcd\n metaSubPath: meta\n kvSubPath: kv\n","# Suppose your docker-compose run with the default milvus network,\n# and you put migration.yaml in the same directory with docker-compose.yaml.\ndocker run --rm -it --network milvus -v $(pwd)/migration.yaml:/milvus/configs/migration.yaml milvus/meta-migration:v2.2.0 /milvus/bin/meta-migration -config=/milvus/configs/migration.yaml\n","Update the milvus image tag in the docker-compose.yaml\ndocker compose down\ndocker compose up -d\n"],"headingContent":"Upgrade Milvus Cluster with Docker Compose","anchorList":[{"label":"使用 Docker Compose 升级 Milvus 群集","href":"Upgrade-Milvus-Cluster-with-Docker-Compose","type":1,"isActive":false},{"label":"通过更改映像升级 Milvus","href":"Upgrade-Milvus-by-changing-its-image","type":2,"isActive":false},{"label":"迁移元数据","href":"Migrate-the-metadata","type":2,"isActive":false},{"label":"下一步","href":"Whats-next","type":2,"isActive":false}]} \ No newline at end of file diff --git a/localization/v2.4.x/site/zh/adminGuide/upgrade_milvus_cluster-docker.md b/localization/v2.4.x/site/zh/adminGuide/upgrade_milvus_cluster-docker.md index 589a66313..7070aff15 100644 --- a/localization/v2.4.x/site/zh/adminGuide/upgrade_milvus_cluster-docker.md +++ b/localization/v2.4.x/site/zh/adminGuide/upgrade_milvus_cluster-docker.md @@ -1,11 +1,10 @@ --- id: upgrade_milvus_cluster-docker.md summary: 了解如何使用 Docker Compose 升级 Milvus 集群。 -title: 使用 Docker Compose 升级 Milvus 集群 +title: 使用 Docker Compose 升级 Milvus 群集 --- - - -

      使用 Docker Compose 升级 Milvus 集群

      本主题介绍如何使用 Docker Compose 升级 Milvus。

      -

      在正常情况下,你可以通过更改映像来升级 Milvus。不过,在从 v2.1.x 升级到 v2.4.9 之前,需要迁移元数据

      +

      在正常情况下,你可以通过更改映像来升级 Milvus。不过,在从 v2.1.x 升级到 v2.4.13-hotfix 之前,需要迁移元数据

      通过更改映像升级 Milvus

      在正常情况下,你可以按以下方法升级 Milvus:

        -
      1. 更改docker-compose.yaml 中的 Milvus 图像标签。

        +
      2. docker-compose.yaml 中更改 Milvus 图像标签。

        请注意,您需要更改代理、所有协调器和所有工作节点的镜像标签。

        ...
         rootcoord:
           container_name: milvus-rootcoord
        -  image: milvusdb/milvus:v2.4.9
        +  image: milvusdb/milvus:v2.4.13-hotfix
         ...
         proxy:
           container_name: milvus-proxy
        -  image: milvusdb/milvus:v2.4.9
        +  image: milvusdb/milvus:v2.4.13-hotfix
         ...
         querycoord:
           container_name: milvus-querycoord
        -  image: milvusdb/milvus:v2.4.9  
        +  image: milvusdb/milvus:v2.4.13-hotfix  
         ...
         querynode:
           container_name: milvus-querynode
        -  image: milvusdb/milvus:v2.4.9
        +  image: milvusdb/milvus:v2.4.13-hotfix
         ...
         indexcoord:
           container_name: milvus-indexcoord
        -  image: milvusdb/milvus:v2.4.9
        +  image: milvusdb/milvus:v2.4.13-hotfix
         ...
         indexnode:
           container_name: milvus-indexnode
        -  image: milvusdb/milvus:v2.4.9 
        +  image: milvusdb/milvus:v2.4.13-hotfix 
         ...
         datacoord:
           container_name: milvus-datacoord
        -  image: milvusdb/milvus:v2.4.9   
        +  image: milvusdb/milvus:v2.4.13-hotfix   
         ...
         datanode:
           container_name: milvus-datanode
        -  image: milvusdb/milvus:v2.4.9
        +  image: milvusdb/milvus:v2.4.13-hotfix
         
      3. 运行以下命令执行升级。

        docker compose down
        @@ -98,7 +97,7 @@ docker compose up -d
         
      4. 停止所有 Milvus 组件。

        docker stop <milvus-component-docker-container-name>
         
      5. -
      6. 为元数据迁移准备配置文件migrate.yaml

        +
      7. 为元迁移准备配置文件migrate.yaml

        # migration.yaml
         cmd:
           # Option: run/backup/rollback
        @@ -106,7 +105,7 @@ cmd:
           runWithBackup: true
         config:
           sourceVersion: 2.1.4   # Specify your milvus version
        -  targetVersion: 2.4.9
        +  targetVersion: 2.4.13-hotfix
           backupFilePath: /tmp/migration.bak
         metastore:
           type: etcd
        diff --git a/localization/v2.4.x/site/zh/adminGuide/upgrade_milvus_cluster-helm.json b/localization/v2.4.x/site/zh/adminGuide/upgrade_milvus_cluster-helm.json
        index 3684c2d9c..2d3bbce0c 100644
        --- a/localization/v2.4.x/site/zh/adminGuide/upgrade_milvus_cluster-helm.json
        +++ b/localization/v2.4.x/site/zh/adminGuide/upgrade_milvus_cluster-helm.json
        @@ -1,51 +1 @@
        -{
        -	"codeList": [
        -		"$ helm repo update\n$ helm search repo zilliztech/milvus --versions\n",
        -		"helm repo add zilliztech https://zilliztech.github.io/milvus-helm\nhelm repo update\n# upgrade existing helm release\nhelm upgrade my-release zilliztech/milvus\n",
        -		"NAME                    CHART VERSION   APP VERSION             DESCRIPTION                                       \nzilliztech/milvus       4.1.34          2.4.5                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.33          2.4.4                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.32          2.4.3                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.31          2.4.1                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.30          2.4.1                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.29          2.4.0                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.24          2.3.11                  Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.23          2.3.10                  Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.22          2.3.10                  Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.21          2.3.10                  Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.20          2.3.10                  Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.18          2.3.10                  Milvus is an open-source vector database built ... \nzilliztech/milvus       4.1.18          2.3.9                   Milvus is an open-source vector database built ...                                       \nzilliztech/milvus       4.1.17          2.3.8                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.16          2.3.7                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.15          2.3.5                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.14          2.3.6                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.13          2.3.5                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.12          2.3.5                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.11          2.3.4                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.10          2.3.3                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.9           2.3.3                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.8           2.3.2                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.7           2.3.2                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.6           2.3.1                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.5           2.3.1                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.4           2.3.1                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.3           2.3.1                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.2           2.3.1                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.1           2.3.0                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.0           2.3.0                   Milvus is an open-source vector database built ...\n",
        -		"sh rollingUpdate.sh -n default -i my-release -o update -t 2.4.9 -w 'milvusdb/milvus:v2.4.9'\n",
        -		"helm repo update\nhelm upgrade my-release zilliztech/milvus --reuse-values --version=4.1.24 # use the helm chart version here\n",
        -		"NAME                NAMESPACE   REVISION    UPDATED                                 STATUS      CHART           APP VERSION    \nnew-release         default     1           2022-11-21 15:41:25.51539 +0800 CST     deployed    milvus-3.2.18   2.1.4 \n",
        -		"NAME                                             READY   STATUS      RESTARTS   AGE\nmy-release-etcd-0                               1/1     Running     0          21m\nmy-release-etcd-1                               1/1     Running     0          21m\nmy-release-etcd-2                               1/1     Running     0          21m\nmy-release-milvus-datacoord-664c58798d-fl75s    1/1     Running     0          21m\nmy-release-milvus-datanode-5f75686c55-xfg2r     1/1     Running     0          21m\nmy-release-milvus-indexcoord-5f98b97589-2l48r   1/1     Running     0          21m\nmy-release-milvus-indexnode-857b4ddf98-vmd75    1/1     Running     0          21m\nmy-release-milvus-proxy-6c548f787f-scspp        1/1     Running     0          21m\nmy-release-milvus-querycoord-c454f44cd-dwmwq    1/1     Running     0          21m\nmy-release-milvus-querynode-76bb4946d-lbrz6     1/1     Running     0          21m\nmy-release-milvus-rootcoord-7764c5b686-62msm    1/1     Running     0          21m\nmy-release-minio-0                              1/1     Running     0          21m\nmy-release-minio-1                              1/1     Running     0          21m\nmy-release-minio-2                              1/1     Running     0          21m\nmy-release-minio-3                              1/1     Running     0          21m\nmy-release-pulsar-bookie-0                      1/1     Running     0          21m\nmy-release-pulsar-bookie-1                      1/1     Running     0          21m\nmy-release-pulsar-bookie-2                      1/1     Running     0          21m\nmy-release-pulsar-bookie-init-tjxpj             0/1     Completed   0          21m\nmy-release-pulsar-broker-0                      1/1     Running     0          21m\nmy-release-pulsar-proxy-0                       1/1     Running     0          21m\nmy-release-pulsar-pulsar-init-c8vvc             0/1     Completed   0          21m\nmy-release-pulsar-recovery-0                    1/1     Running     0          21m\nmy-release-pulsar-zookeeper-0                   1/1     Running     0          21m\nmy-release-pulsar-zookeeper-1                   1/1     Running     0          20m\nmy-release-pulsar-zookeeper-2                   1/1     Running     0          20m\n",
        -		"$ kubectl get pods my-release-milvus-proxy-6c548f787f-scspp -o=jsonpath='{$.spec.containers[0].image}'\n# milvusdb/milvus:v2.1.4\n",
        -		"./migrate.sh -i my-release -s 2.1.4 -t 2.4.9\n",
        -		"./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.9\n",
        -		"./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.9 -r by-dev\n",
        -		"./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.9 -r by-dev -w milvusdb/milvus:v2.4.9\n",
        -		"./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.9 -w milvusdb/milvus:v2.4.9 -d true\n",
        -		"./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.9 -r by-dev -o rollback -w milvusdb/milvus:v2.1.1\n./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.9 -r by-dev -o migrate -w milvusdb/milvus:v2.4.9\n"
        -	],
        -	"headingContent": "",
        -	"anchorList": [
        -		{
        -			"label": "升级 Milvus 星群与舵手图",
        -			"href": "Upgrade-Milvus-Cluster-with-Helm-Chart",
        -			"type": 1,
        -			"isActive": false
        -		},
        -		{
        -			"label": "查看Milvus舵手图",
        -			"href": "Check-Milvus-Helm-Chart",
        -			"type": 2,
        -			"isActive": false
        -		},
        -		{
        -			"label": "进行滚动升级",
        -			"href": "Conduct-a-rolling-upgrade",
        -			"type": 2,
        -			"isActive": false
        -		},
        -		{
        -			"label": "使用 Helm 升级 Milvus",
        -			"href": "Upgrade-Milvus-using-Helm",
        -			"type": 2,
        -			"isActive": false
        -		},
        -		{
        -			"label": "迁移元数据",
        -			"href": "Migrate-the-metadata",
        -			"type": 2,
        -			"isActive": false
        -		}
        -	]
        -}
        +{"codeList":["$ helm repo update\n$ helm search repo zilliztech/milvus --versions\n","helm repo add zilliztech https://zilliztech.github.io/milvus-helm\nhelm repo update\n# upgrade existing helm release\nhelm upgrade my-release zilliztech/milvus\n","NAME                    CHART VERSION   APP VERSION             DESCRIPTION                                       \nzilliztech/milvus       4.1.34          2.4.5                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.33          2.4.4                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.32          2.4.3                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.31          2.4.1                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.30          2.4.1                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.29          2.4.0                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.24          2.3.11                  Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.23          2.3.10                  Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.22          2.3.10                  Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.21          2.3.10                  Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.20          2.3.10                  Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.18          2.3.10                  Milvus is an open-source vector database built ... \nzilliztech/milvus       4.1.18          2.3.9                   Milvus is an open-source vector database built ...                                       \nzilliztech/milvus       4.1.17          2.3.8                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.16          2.3.7                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.15          2.3.5                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.14          2.3.6                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.13          2.3.5                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.12          2.3.5                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.11          2.3.4                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.10          2.3.3                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.9           2.3.3                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.8           2.3.2                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.7           2.3.2                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.6           2.3.1                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.5           2.3.1                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.4           2.3.1                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.3           2.3.1                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.2           2.3.1                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.1           2.3.0                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.0           2.3.0                   Milvus is an open-source vector database built ...\n","sh rollingUpdate.sh -n default -i my-release -o update -t 2.4.13-hotfix -w 'milvusdb/milvus:v2.4.13-hotfix'\n","helm repo update\nhelm upgrade my-release zilliztech/milvus --reuse-values --version=4.1.24 # use the helm chart version here\n","NAME                NAMESPACE   REVISION    UPDATED                                 STATUS      CHART           APP VERSION    \nnew-release         default     1           2022-11-21 15:41:25.51539 +0800 CST     deployed    milvus-3.2.18   2.1.4 \n","NAME                                             READY   STATUS      RESTARTS   AGE\nmy-release-etcd-0                               1/1     Running     0          21m\nmy-release-etcd-1                               1/1     Running     0          21m\nmy-release-etcd-2                               1/1     Running     0          21m\nmy-release-milvus-datacoord-664c58798d-fl75s    1/1     Running     0          21m\nmy-release-milvus-datanode-5f75686c55-xfg2r     1/1     Running     0          21m\nmy-release-milvus-indexcoord-5f98b97589-2l48r   1/1     Running     0          21m\nmy-release-milvus-indexnode-857b4ddf98-vmd75    1/1     Running     0          21m\nmy-release-milvus-proxy-6c548f787f-scspp        1/1     Running     0          21m\nmy-release-milvus-querycoord-c454f44cd-dwmwq    1/1     Running     0          21m\nmy-release-milvus-querynode-76bb4946d-lbrz6     1/1     Running     0          21m\nmy-release-milvus-rootcoord-7764c5b686-62msm    1/1     Running     0          21m\nmy-release-minio-0                              1/1     Running     0          21m\nmy-release-minio-1                              1/1     Running     0          21m\nmy-release-minio-2                              1/1     Running     0          21m\nmy-release-minio-3                              1/1     Running     0          21m\nmy-release-pulsar-bookie-0                      1/1     Running     0          21m\nmy-release-pulsar-bookie-1                      1/1     Running     0          21m\nmy-release-pulsar-bookie-2                      1/1     Running     0          21m\nmy-release-pulsar-bookie-init-tjxpj             0/1     Completed   0          21m\nmy-release-pulsar-broker-0                      1/1     Running     0          21m\nmy-release-pulsar-proxy-0                       1/1     Running     0          21m\nmy-release-pulsar-pulsar-init-c8vvc             0/1     Completed   0          21m\nmy-release-pulsar-recovery-0                    1/1     Running     0          21m\nmy-release-pulsar-zookeeper-0                   1/1     Running     0          21m\nmy-release-pulsar-zookeeper-1                   1/1     Running     0          20m\nmy-release-pulsar-zookeeper-2                   1/1     Running     0          20m\n","$ kubectl get pods my-release-milvus-proxy-6c548f787f-scspp -o=jsonpath='{$.spec.containers[0].image}'\n# milvusdb/milvus:v2.1.4\n","./migrate.sh -i my-release -s 2.1.4 -t 2.4.13-hotfix\n","./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.13-hotfix\n","./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.13-hotfix -r by-dev\n","./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.13-hotfix -r by-dev -w milvusdb/milvus:v2.4.13-hotfix\n","./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.13-hotfix -w milvusdb/milvus:v2.4.13-hotfix -d true\n","./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.13-hotfix -r by-dev -o rollback -w milvusdb/milvus:v2.1.1\n./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.13-hotfix -r by-dev -o migrate -w milvusdb/milvus:v2.4.13-hotfix\n"],"headingContent":"Upgrade Milvus Cluster with Helm Chart","anchorList":[{"label":"使用 Helm 图表升级 Milvus 群集","href":"Upgrade-Milvus-Cluster-with-Helm-Chart","type":1,"isActive":false},{"label":"检查 Milvus Helm 图表","href":"Check-Milvus-Helm-Chart","type":2,"isActive":false},{"label":"进行滚动升级","href":"Conduct-a-rolling-upgrade","type":2,"isActive":false},{"label":"使用 Helm 升级 Milvus","href":"Upgrade-Milvus-using-Helm","type":2,"isActive":false},{"label":"迁移元数据","href":"Migrate-the-metadata","type":2,"isActive":false}]}
        \ No newline at end of file
        diff --git a/localization/v2.4.x/site/zh/adminGuide/upgrade_milvus_cluster-helm.md b/localization/v2.4.x/site/zh/adminGuide/upgrade_milvus_cluster-helm.md
        index 575effcac..3a699c845 100644
        --- a/localization/v2.4.x/site/zh/adminGuide/upgrade_milvus_cluster-helm.md
        +++ b/localization/v2.4.x/site/zh/adminGuide/upgrade_milvus_cluster-helm.md
        @@ -5,9 +5,9 @@ order: 1
         group: upgrade_milvus_cluster-operator.md
         related_key: upgrade Milvus Cluster
         summary: 了解如何使用 Helm 图表升级 Milvus 集群。
        -title: 升级 Milvus 星群与舵手图
        +title: 使用 Helm 图表升级 Milvus 群集
         ---
        -
        +
         

        使用 Helm 图表升级 Milvus 群集

        -

        https://milvus-io.github.io/milvus-helm/ 上的 Milvus Helm 图表 repo 已归档,您可以从https://zilliztech.github.io/milvus-helm/ 获取进一步更新,具体如下:

        +

        https://milvus-io.github.io/milvus-helm/ 上的 Milvus Helm 图表 repo 已归档,你可以从https://zilliztech.github.io/milvus-helm/ 获取进一步更新,具体如下:

        helm repo add zilliztech https://zilliztech.github.io/milvus-helm
         helm repo update
         # upgrade existing helm release
        @@ -85,11 +85,11 @@ zilliztech/milvus       4.1.2           2.3.1                   Milvus is an ope
         zilliztech/milvus       4.1.1           2.3.0                   Milvus is an open-source vector database built ...
         zilliztech/milvus       4.1.0           2.3.0                   Milvus is an open-source vector database built ...
         
        -

        您可以按以下方式为您的 Milvus 选择升级路径:

        -
        - 进行滚动升级](#conduct-a-rolling-upgrade) 从 Milvus v2.2.3 及以后的版本升级到 v2.4.9。
        +

        您可以按以下方式选择 Milvus 的升级路径:

        +
        - 进行滚动升级](#conduct-a-rolling-upgrade) 从 Milvus v2.2.3 及以后的版本升级到 v2.4.13-hotfix。

        进行滚动升级

        自 Milvus 2.2.3 起,您可以将 Milvus 协调器配置为主动待机模式,并为它们启用滚动升级功能,以便 Milvus 在协调器升级期间响应传入的请求。在以前的版本中,升级时需要移除协调器,然后再创建协调器,这可能会导致服务出现一定的停机时间。

        +

        自 Milvus 2.2.3 起,您可以将 Milvus 协调器配置为主动待机模式,并为它们启用滚动升级功能,这样 Milvus 就能在协调器升级期间响应传入的请求。在以前的版本中,升级时需要移除协调器,然后再创建协调器,这可能会导致服务出现一定的停机时间。

        滚动升级要求协调程序以活动-待机模式工作。您可以使用我们提供的脚本将协调程序配置为活动-待机模式,然后开始滚动升级。

        -

        基于 Kubernetes 提供的滚动更新功能,上述脚本会根据部署的依赖关系对部署进行有序更新。此外,Milvus 还实施了一种机制,确保其组件在升级过程中与依赖它们的组件保持兼容,从而大大减少了潜在的服务停机时间。

        +

        基于 Kubernetes 提供的滚动更新功能,上述脚本会根据部署的依赖关系对部署进行有序更新。此外,Milvus 还实现了一种机制,确保其组件在升级过程中与依赖它们的组件保持兼容,从而大大减少了潜在的服务停机时间。

        该脚本仅适用于升级与 Helm 一起安装的 Milvus。下表列出了脚本中可用的命令标志。

        @@ -118,17 +118,17 @@ zilliztech/milvus 4.1.0 2.3.0 Milvus is an ope - + - +
        iMilvus 实例名称None为真
        nMilvus 安装的命名空间default
        t目标 Milvus 版本None
        tMilvus 目标版本None
        w新的 Milvus 图像标签milvusdb/milvus:v2.2.3
        o操作update
        o操作符update
        -

        确保 Milvus 实例中的所有部署都处于正常状态后。可以运行以下命令将 Milvus 实例升级到 2.4.9。

        -
        sh rollingUpdate.sh -n default -i my-release -o update -t 2.4.9 -w 'milvusdb/milvus:v2.4.9'
        +

        确保 Milvus 实例中的所有部署都处于正常状态后。可以运行以下命令将 Milvus 实例升级到 2.4.13-hotfix。

        +
        sh rollingUpdate.sh -n default -i my-release -o update -t 2.4.13-hotfix -w 'milvusdb/milvus:v2.4.13-hotfix'
         
          -
        1. 脚本硬编码了部署的升级顺序,不可更改。
        2. +
        3. 脚本硬编码了部署的升级顺序,无法更改。
        4. 脚本使用kubectl patch 更新部署,使用kubectl rollout status 观察部署状态。
        5. 脚本使用kubectl patch 将部署的app.kubernetes.io/version 标签更新为命令中-t 标志后指定的标签。
        @@ -149,7 +149,7 @@ zilliztech/milvus 4.1.0 2.3.0 Milvus is an ope d="M4 9h1v1H4c-1.5 0-3-1.69-3-3.5S2.55 3 4 3h4c1.45 0 3 1.69 3 3.5 0 1.41-.91 2.72-2 3.25V8.59c.58-.45 1-1.27 1-2.09C10 5.22 8.98 4 8 4H4c-.98 0-2 1.22-2 2.5S3 9 4 9zm9-3h-1v1h1c1 0 2 1.22 2 2.5S13.98 12 13 12H9c-.98 0-2-1.22-2-2.5 0-.83.42-1.64 1-2.09V6.25c-1.09.53-2 1.84-2 3.25C6 11.31 7.55 13 9 13h4c1.45 0 3-1.69 3-3.5S14.5 6 13 6z" > -

        要将 Milvus 从 2.2.3 版之前的次版本升级到最新版本,请运行以下命令:

        +

        要将 Milvus 从 v2.2.3 之前的次版本升级到最新版本,请运行以下命令:

        helm repo update
         helm upgrade my-release zilliztech/milvus --reuse-values --version=4.1.24 # use the helm chart version here
         
        @@ -207,9 +207,9 @@ my-release-pulsar-zookeeper-2
        $ kubectl get pods my-release-milvus-proxy-6c548f787f-scspp -o=jsonpath='{$.spec.containers[0].image}'
         # milvusdb/milvus:v2.1.4
         
        -

        4.迁移元数据

        Milvus 2.2 的一个主要变化是段索引的元数据结构。因此,当 Milvus 从 v2.1.x 升级到 v2.2.0 时,你需要使用 Helm 来迁移元数据。 这里有一个脚本供你安全迁移元数据。

        -

        该脚本仅适用于安装在 K8s 集群上的 Milvus。如果在迁移过程中出现错误,请先使用回滚操作回滚到之前的版本。

        -

        下表列出了元数据迁移的操作。

        +

        4.迁移元数据

        Milvus 2.2 的一个主要变化是段索引的元数据结构。因此,当 Milvus 从 v2.1.x 升级到 v2.2.0 时,你需要使用 Helm 来迁移元数据。 下面是一个脚本,供你安全迁移元数据。

        +

        该脚本仅适用于安装在 K8s 集群上的 Milvus。如果过程中出现错误,请先使用回滚操作符回滚到之前的版本。

        +

        下表列出了元数据迁移的操作符。

        @@ -222,10 +222,10 @@ my-release-pulsar-zookeeper-2 - + - +
        参数说明默认值需要
        rMilvus 元的根路径。by-dev
        w新的 Milvus 图像标签。milvusdb/milvus:v2.2.0
        m元迁移图像标签。milvusdb/meta-migration:v2.2.0
        o元迁移操作。migrate
        o元迁移操作符。migrate
        d迁移完成后是否删除迁移 pod。false
        c元迁移 pvc 的存储类别。default storage class
        emilvus 使用的 etcd enpoint。etcd svc installed with milvus错误
        eMilvus 使用的 etcd enpoint。etcd svc installed with milvus错误

        1.迁移元数据

          @@ -235,25 +235,25 @@ my-release-pulsar-zookeeper-2
        1. 迁移 Milvus 元数据。
        2. 使用新镜像启动 Milvus 组件。
        -

        2.将 Milvus 从 2.1.x 升级到 2.4.9

        以下命令假定你将 Milvus 从 v2.1.4 升级到 2.4.9。请将它们更改为适合你需要的版本。

        +

        2.将 Milvus 从 2.1.x 升级到 2.4.13-hotfix

        以下命令假定你将 Milvus 从 v2.1.4 升级到 2.4.13-hotfix。请将它们改为适合你需要的版本。

        1. 指定 Milvus 实例名称、源 Milvus 版本和目标 Milvus 版本。

          -
          ./migrate.sh -i my-release -s 2.1.4 -t 2.4.9
          +
          ./migrate.sh -i my-release -s 2.1.4 -t 2.4.13-hotfix
           
        2. -
        3. 如果你的 Milvus 没有安装在默认的 K8s 命名空间中,用-n 指定命名空间。

          -
          ./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.9
          +
        4. 如果你的 Milvus 没有安装在默认的 K8s 命名空间,请用-n 指定命名空间。

          +
          ./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.13-hotfix
           
        5. 如果 Milvus 安装的是自定义rootpath ,请用-r 指定根路径。

          -
          ./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.9 -r by-dev
          +
          ./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.13-hotfix -r by-dev
           
        6. -
        7. 如果 Milvus 安装的是自定义image ,请使用-w 指定图片标签。

          -
          ./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.9 -r by-dev -w milvusdb/milvus:v2.4.9
          +
        8. 如果你的 Milvus 安装的是自定义image ,请用-w 指定图片标签。

          +
          ./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.13-hotfix -r by-dev -w milvusdb/milvus:v2.4.13-hotfix
           
        9. -
        10. 如果希望在迁移完成后自动移除迁移 pod,请设置-d true

          -
          ./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.9 -w milvusdb/milvus:v2.4.9 -d true
          +
        11. 如果想在迁移完成后自动移除迁移 pod,请设置-d true

          +
          ./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.13-hotfix -w milvusdb/milvus:v2.4.13-hotfix -d true
           
        12. 如果迁移失败,请回滚并重新迁移。

          -
          ./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.9 -r by-dev -o rollback -w milvusdb/milvus:v2.1.1
          -./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.9 -r by-dev -o migrate -w milvusdb/milvus:v2.4.9
          +
          ./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.13-hotfix -r by-dev -o rollback -w milvusdb/milvus:v2.1.1
          +./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.13-hotfix -r by-dev -o migrate -w milvusdb/milvus:v2.4.13-hotfix
           
        diff --git a/localization/v2.4.x/site/zh/adminGuide/upgrade_milvus_cluster-operator.json b/localization/v2.4.x/site/zh/adminGuide/upgrade_milvus_cluster-operator.json index 9ed36dee9..0dd29d61d 100644 --- a/localization/v2.4.x/site/zh/adminGuide/upgrade_milvus_cluster-operator.json +++ b/localization/v2.4.x/site/zh/adminGuide/upgrade_milvus_cluster-operator.json @@ -1,47 +1 @@ -{ - "codeList": [ - "helm repo add zilliztech-milvus-operator https://zilliztech.github.io/milvus-operator/\nhelm repo update zilliztech-milvus-operator\nhelm -n milvus-operator upgrade milvus-operator zilliztech-milvus-operator/milvus-operator\n", - "apiVersion: milvus.io/v1beta1\nkind: Milvus\nmetadata:\n name: my-release\nspec:\n components:\n enableRollingUpdate: true\n imageUpdateMode: rollingUpgrade # Default value, can be omitted\n image: milvusdb/milvus:v2.4.9\n", - "apiVersion: milvus.io/v1beta1\nkind: Milvus\nmetadata:\n name: my-release\nspec:\n components:\n enableRollingUpdate: true\n imageUpdateMode: all\n image: milvusdb/milvus:v2.4.9\n", - "apiVersion: milvus.io/v1beta1\nkind: Milvus\nmetadata:\n name: my-release\nspec:\n components:\n enableRollingUpdate: true\n imageUpdateMode: rollingDowngrade\n image: milvusdb/milvus:\n", - "kubectl apply -f milvusupgrade.yml\n", - "apiVersion: milvus.io/v1beta1\nkind: Milvus\nmetadata:\n name: my-release\nspec:\n # Omit other fields ...\n components:\n image: milvusdb/milvus:v2.4.9\n", - "kubectl apply -f milvusupgrade.yaml\n", - "apiVersion: milvus.io/v1beta1\nkind: MilvusUpgrade\nmetadata:\n name: my-release-upgrade\nspec:\n milvus:\n namespace: default\n name: my-release\n sourceVersion: \"v2.1.4\"\n targetVersion: \"v2.4.9\"\n # below are some omit default values:\n # targetImage: \"milvusdb/milvus:v2.4.9\"\n # toolImage: \"milvusdb/meta-migration:v2.2.0\"\n # operation: upgrade\n # rollbackIfFailed: true\n # backupPVC: \"\"\n # maxRetry: 3\n", - "$ kubectl apply -f https://github.com/zilliztech/milvus-operator/blob/main/config/samples/beta/milvusupgrade.yaml\n", - "kubectl describe milvus release-name\n" - ], - "headingContent": "", - "anchorList": [ - { - "label": "使用 Milvus Operator 升级 Milvus 集群", - "href": "Upgrade-Milvus-Cluster-with-Milvus-Operator", - "type": 1, - "isActive": false - }, - { - "label": "升级您的 Milvus Operator", - "href": "Upgrade-your-Milvus-operator", - "type": 2, - "isActive": false - }, - { - "label": "进行滚动升级", - "href": "Conduct-a-rolling-upgrade", - "type": 2, - "isActive": false - }, - { - "label": "改变Milvus的形象,使其升级", - "href": "Upgrade-Milvus-by-changing-its-image", - "type": 2, - "isActive": false - }, - { - "label": "迁移元数据", - "href": "Migrate-the-metadata", - "type": 2, - "isActive": false - } - ] -} +{"codeList":["helm repo add zilliztech-milvus-operator https://zilliztech.github.io/milvus-operator/\nhelm repo update zilliztech-milvus-operator\nhelm -n milvus-operator upgrade milvus-operator zilliztech-milvus-operator/milvus-operator\n","apiVersion: milvus.io/v1beta1\nkind: Milvus\nmetadata:\n name: my-release\nspec:\n components:\n enableRollingUpdate: true\n imageUpdateMode: rollingUpgrade # Default value, can be omitted\n image: milvusdb/milvus:v2.4.13-hotfix\n","apiVersion: milvus.io/v1beta1\nkind: Milvus\nmetadata:\n name: my-release\nspec:\n components:\n enableRollingUpdate: true\n imageUpdateMode: all\n image: milvusdb/milvus:v2.4.13-hotfix\n","apiVersion: milvus.io/v1beta1\nkind: Milvus\nmetadata:\n name: my-release\nspec:\n components:\n enableRollingUpdate: true\n imageUpdateMode: rollingDowngrade\n image: milvusdb/milvus:\n","kubectl apply -f milvusupgrade.yml\n","apiVersion: milvus.io/v1beta1\nkind: Milvus\nmetadata:\n name: my-release\nspec:\n # Omit other fields ...\n components:\n image: milvusdb/milvus:v2.4.13-hotfix\n","kubectl apply -f milvusupgrade.yaml\n","apiVersion: milvus.io/v1beta1\nkind: MilvusUpgrade\nmetadata:\n name: my-release-upgrade\nspec:\n milvus:\n namespace: default\n name: my-release\n sourceVersion: \"v2.1.4\"\n targetVersion: \"v2.4.13-hotfix\"\n # below are some omit default values:\n # targetImage: \"milvusdb/milvus:v2.4.13-hotfix\"\n # toolImage: \"milvusdb/meta-migration:v2.2.0\"\n # operation: upgrade\n # rollbackIfFailed: true\n # backupPVC: \"\"\n # maxRetry: 3\n","$ kubectl apply -f https://github.com/zilliztech/milvus-operator/blob/main/config/samples/beta/milvusupgrade.yaml\n","kubectl describe milvus release-name\n"],"headingContent":"Upgrade Milvus Cluster with Milvus Operator","anchorList":[{"label":"使用 Milvus Operator 升级 Milvus 群集","href":"Upgrade-Milvus-Cluster-with-Milvus-Operator","type":1,"isActive":false},{"label":"升级您的 Milvus 操作符","href":"Upgrade-your-Milvus-operator","type":2,"isActive":false},{"label":"进行滚动升级","href":"Conduct-a-rolling-upgrade","type":2,"isActive":false},{"label":"通过更改映像升级 Milvus","href":"Upgrade-Milvus-by-changing-its-image","type":2,"isActive":false},{"label":"迁移元数据","href":"Migrate-the-metadata","type":2,"isActive":false}]} \ No newline at end of file diff --git a/localization/v2.4.x/site/zh/adminGuide/upgrade_milvus_cluster-operator.md b/localization/v2.4.x/site/zh/adminGuide/upgrade_milvus_cluster-operator.md index e530e357d..3e08497a3 100644 --- a/localization/v2.4.x/site/zh/adminGuide/upgrade_milvus_cluster-operator.md +++ b/localization/v2.4.x/site/zh/adminGuide/upgrade_milvus_cluster-operator.md @@ -5,9 +5,8 @@ order: 0 group: upgrade_milvus_cluster-operator.md related_key: upgrade Milvus Cluster summary: 了解如何使用 Milvus Operator 升级 Milvus 集群。 -title: 使用 Milvus Operator 升级 Milvus 集群 +title: 使用 Milvus Operator 升级 Milvus 群集 --- -

        使用 Milvus Operator 升级 Milvus 群集

        本指南介绍如何使用 Milvus Operator 升级 Milvus 群集。

        -

        升级 Milvus Operator

        运行以下命令将 Milvus Operator 版本升级到 v1.0.1。

        +

        运行以下命令将您的 Milvus 操作符版本升级到 v1.0.1。

        helm repo add zilliztech-milvus-operator https://zilliztech.github.io/milvus-operator/
         helm repo update zilliztech-milvus-operator
         helm -n milvus-operator upgrade milvus-operator zilliztech-milvus-operator/milvus-operator
         
        -

        将 Milvus Operator 升级到最新版本后,您有以下选择:

        +

        将您的 Milvus 操作符升级到最新版本后,您有以下选择:

        进行滚动升级

        从 Milvus 2.2.3 版开始,你可以将 Milvus 协调器配置为主动待机模式,并为它们启用滚动升级功能,这样 Milvus 就能在协调器升级期间响应传入的请求。在以前的版本中,升级时需要移除协调器,然后再创建协调器,这可能会导致服务出现一定的停机时间。

        -

        基于 Kubernetes 提供的滚动更新功能,Milvus Operator 会根据部署的依赖关系对部署进行有序更新。此外,Milvus 还实施了一种机制,确保其组件在升级期间与依赖于它们的组件保持兼容,从而大大减少了潜在的服务停机时间。

        +

        自 Milvus 2.2.3 起,你可以将 Milvus 协调器配置为主动待机模式,并为它们启用滚动升级功能,这样 Milvus 就能在协调器升级期间响应传入的请求。在以前的版本中,升级时需要先删除协调器,然后再创建协调器,这可能会导致服务出现一定程度的停机。

        +

        基于 Kubernetes 提供的滚动更新功能,Milvus 操作符会根据部署的依赖关系强制执行有序更新。此外,Milvus 还实施了一种机制,确保其组件在升级期间与依赖于它们的组件保持兼容,从而大大减少了潜在的服务停机时间。

        滚动升级功能默认为禁用。你需要通过配置文件明确启用它。

        apiVersion: milvus.io/v1beta1
         kind: Milvus
        @@ -77,10 +76,10 @@ spec:
           components:
             enableRollingUpdate: true
             imageUpdateMode: rollingUpgrade # Default value, can be omitted
        -    image: milvusdb/milvus:v2.4.9
        +    image: milvusdb/milvus:v2.4.13-hotfix
         

        在上述配置文件中,将spec.components.enableRollingUpdate 设置为true ,将spec.components.image 设置为所需的 Milvus 版本。

        -

        默认情况下,Milvus 会以有序方式对协调器进行滚动升级,即逐个替换协调器 pod 映像。为缩短升级时间,可考虑将spec.components.imageUpdateMode 设置为all ,以便 Milvus 同时替换所有 pod 映像。

        +

        默认情况下,Milvus 会以有序方式对协调器执行滚动升级,即逐个替换协调器 pod 映像。要缩短升级时间,可以考虑将spec.components.imageUpdateMode 设置为all ,这样 Milvus 就会同时替换所有 pod 映像。

        apiVersion: milvus.io/v1beta1
         kind: Milvus
         metadata:
        @@ -89,9 +88,9 @@ spec:
           components:
             enableRollingUpdate: true
             imageUpdateMode: all
        -    image: milvusdb/milvus:v2.4.9
        +    image: milvusdb/milvus:v2.4.13-hotfix
         
        -

        您可以将spec.components.imageUpdateMode 设置为rollingDowngrade ,让 Milvus 用较低的版本替换协调器 pod 映像。

        +

        可以将spec.components.imageUpdateMode 设置为rollingDowngrade ,让 Milvus 用较低的版本替换协调器 pod 映像。

        apiVersion: milvus.io/v1beta1
         kind: Milvus
         metadata:
        @@ -120,7 +119,7 @@ spec:
                   d="M4 9h1v1H4c-1.5 0-3-1.69-3-3.5S2.55 3 4 3h4c1.45 0 3 1.69 3 3.5 0 1.41-.91 2.72-2 3.25V8.59c.58-.45 1-1.27 1-2.09C10 5.22 8.98 4 8 4H4c-.98 0-2 1.22-2 2.5S3 9 4 9zm9-3h-1v1h1c1 0 2 1.22 2 2.5S13.98 12 13 12H9c-.98 0-2-1.22-2-2.5 0-.83.42-1.64 1-2.09V6.25c-1.09.53-2 1.84-2 3.25C6 11.31 7.55 13 9 13h4c1.45 0 3-1.69 3-3.5S14.5 6 13 6z"
                 >
               
        -    

        在正常情况下,你可以通过更改映像将 Milvus 升级到最新版本。但要注意的是,用这种方法升级 Milvus 时会有一定的停机时间。

        +

        在正常情况下,你可以通过更改映像将你的 Milvus 升级到最新版本。但要注意的是,用这种方法升级 Milvus 时会有一定的停机时间。

        编写如下配置文件,并将其保存为milvusupgrade.yaml

        apiVersion: milvus.io/v1beta1
         kind: Milvus
        @@ -129,7 +128,7 @@ metadata:
         spec:
           # Omit other fields ...
           components:
        -   image: milvusdb/milvus:v2.4.9
        +   image: milvusdb/milvus:v2.4.13-hotfix
         

        然后运行以下命令执行升级:

        kubectl apply -f milvusupgrade.yaml
        @@ -149,8 +148,8 @@ spec:
                   d="M4 9h1v1H4c-1.5 0-3-1.69-3-3.5S2.55 3 4 3h4c1.45 0 3 1.69 3 3.5 0 1.41-.91 2.72-2 3.25V8.59c.58-.45 1-1.27 1-2.09C10 5.22 8.98 4 8 4H4c-.98 0-2 1.22-2 2.5S3 9 4 9zm9-3h-1v1h1c1 0 2 1.22 2 2.5S13.98 12 13 12H9c-.98 0-2-1.22-2-2.5 0-.83.42-1.64 1-2.09V6.25c-1.09.53-2 1.84-2 3.25C6 11.31 7.55 13 9 13h4c1.45 0 3-1.69 3-3.5S14.5 6 13 6z"
                 >
               
        -    

        自 Milvus 2.2.0 起,元数据与以前版本的元数据不兼容。以下示例片段假定从 Milvus 2.1.4 升级到 Milvus 2.4.9。

        -

        1.为元数据迁移创建.yaml 文件

        创建元数据迁移文件。下面是一个示例。需要在配置文件中指定namesourceVersiontargetVersion 。下面的示例将name 设置为my-release-upgrade ,将sourceVersion 设置为v2.1.4 ,将targetVersion 设置为v2.4.9 。这意味着您的 Milvus 群集将从 v2.1.4 升级到 v2.4.9。

        +

        自 Milvus 2.2.0 起,元数据与以前版本的元数据不兼容。以下示例片段假定从 Milvus 2.1.4 升级到 Milvus 2.4.13-hotfix。

        +

        1.创建用于元数据迁移的.yaml 文件

        创建元数据迁移文件。下面是一个示例。需要在配置文件中指定namesourceVersiontargetVersion 。下面的示例将name 设置为my-release-upgrade ,将sourceVersion 设置为v2.1.4 ,将targetVersion 设置为v2.4.13-hotfix 。这意味着您的 Milvus 群集将从 v2.1.4 升级到 v2.4.13-hotfix。

        apiVersion: milvus.io/v1beta1
         kind: MilvusUpgrade
         metadata:
        @@ -160,21 +159,21 @@ spec:
             namespace: default
             name: my-release
           sourceVersion: "v2.1.4"
        -  targetVersion: "v2.4.9"
        +  targetVersion: "v2.4.13-hotfix"
           # below are some omit default values:
        -  # targetImage: "milvusdb/milvus:v2.4.9"
        +  # targetImage: "milvusdb/milvus:v2.4.13-hotfix"
           # toolImage: "milvusdb/meta-migration:v2.2.0"
           # operation: upgrade
           # rollbackIfFailed: true
           # backupPVC: ""
           # maxRetry: 3
         
        -

        2.应用新配置

        运行以下命令应用新配置。

        +

        2.2. 应用新配置

        运行以下命令应用新配置。

        $ kubectl apply -f https://github.com/zilliztech/milvus-operator/blob/main/config/samples/beta/milvusupgrade.yaml
         

        3.3. 检查元数据迁移状态

        运行以下命令检查元数据迁移的状态。

        kubectl describe milvus release-name
         
        -

        输出中的ready 表示元数据迁移成功。

        +

        输出中的状态为ready 意味着元数据迁移成功。

        或者,也可以运行kubectl get pod 检查所有 pod。如果所有 pod 都是ready ,则元数据迁移成功。

        4.删除my-release-upgrade

        升级成功后,删除 YAML 文件中的my-release-upgrade

        diff --git a/localization/v2.4.x/site/zh/adminGuide/upgrade_milvus_standalone-docker.json b/localization/v2.4.x/site/zh/adminGuide/upgrade_milvus_standalone-docker.json index 6a5690580..2f294692d 100644 --- a/localization/v2.4.x/site/zh/adminGuide/upgrade_milvus_standalone-docker.json +++ b/localization/v2.4.x/site/zh/adminGuide/upgrade_milvus_standalone-docker.json @@ -1,37 +1 @@ -{ - "codeList": [ - "...\nstandalone:\n container_name: milvus-standalone\n image: milvusdb/milvus:v2.4.9\n", - "docker compose down\ndocker compose up -d\n", - "docker stop \n", - "# migration.yaml\ncmd:\n # Option: run/backup/rollback\n type: run\n runWithBackup: true\nconfig:\n sourceVersion: 2.1.4 # Specify your milvus version\n targetVersion: 2.4.9\n backupFilePath: /tmp/migration.bak\nmetastore:\n type: etcd\netcd:\n endpoints:\n - milvus-etcd:2379 # Use the etcd container name\n rootPath: by-dev # The root path where data is stored in etcd\n metaSubPath: meta\n kvSubPath: kv\n", - "# Suppose your docker-compose run with the default milvus network,\n# and you put migration.yaml in the same directory with docker-compose.yaml.\ndocker run --rm -it --network milvus -v $(pwd)/migration.yaml:/milvus/configs/migration.yaml milvusdb/meta-migration:v2.2.0 /milvus/bin/meta-migration -config=/milvus/configs/migration.yaml\n", - "// Run the following only after update the milvus image tag in the docker-compose.yaml\ndocker compose down\ndocker compose up -d\n" - ], - "headingContent": "", - "anchorList": [ - { - "label": "使用 Docker Compose 升级 Milvus 单机版", - "href": "Upgrade-Milvus-Standalone-with-Docker-Compose", - "type": 1, - "isActive": false - }, - { - "label": "改变Milvus的形象,使其升级", - "href": "Upgrade-Milvus-by-changing-its-image", - "type": 2, - "isActive": false - }, - { - "label": "迁移元数据", - "href": "Migrate-the-metadata", - "type": 2, - "isActive": false - }, - { - "label": "下一步行动", - "href": "Whats-next", - "type": 2, - "isActive": false - } - ] -} +{"codeList":["...\nstandalone:\n container_name: milvus-standalone\n image: milvusdb/milvus:v2.4.13-hotfix\n","docker compose down\ndocker compose up -d\n","docker stop \n","# migration.yaml\ncmd:\n # Option: run/backup/rollback\n type: run\n runWithBackup: true\nconfig:\n sourceVersion: 2.1.4 # Specify your milvus version\n targetVersion: 2.4.13-hotfix\n backupFilePath: /tmp/migration.bak\nmetastore:\n type: etcd\netcd:\n endpoints:\n - milvus-etcd:2379 # Use the etcd container name\n rootPath: by-dev # The root path where data is stored in etcd\n metaSubPath: meta\n kvSubPath: kv\n","# Suppose your docker-compose run with the default milvus network,\n# and you put migration.yaml in the same directory with docker-compose.yaml.\ndocker run --rm -it --network milvus -v $(pwd)/migration.yaml:/milvus/configs/migration.yaml milvusdb/meta-migration:v2.2.0 /milvus/bin/meta-migration -config=/milvus/configs/migration.yaml\n","// Run the following only after update the milvus image tag in the docker-compose.yaml\ndocker compose down\ndocker compose up -d\n"],"headingContent":"Upgrade Milvus Standalone with Docker Compose","anchorList":[{"label":"使用 Docker Compose 升级 Milvus 单机版","href":"Upgrade-Milvus-Standalone-with-Docker-Compose","type":1,"isActive":false},{"label":"通过更改映像升级 Milvus","href":"Upgrade-Milvus-by-changing-its-image","type":2,"isActive":false},{"label":"迁移元数据","href":"Migrate-the-metadata","type":2,"isActive":false},{"label":"下一步","href":"Whats-next","type":2,"isActive":false}]} \ No newline at end of file diff --git a/localization/v2.4.x/site/zh/adminGuide/upgrade_milvus_standalone-docker.md b/localization/v2.4.x/site/zh/adminGuide/upgrade_milvus_standalone-docker.md index fb5370ec4..ba189ad47 100644 --- a/localization/v2.4.x/site/zh/adminGuide/upgrade_milvus_standalone-docker.md +++ b/localization/v2.4.x/site/zh/adminGuide/upgrade_milvus_standalone-docker.md @@ -4,7 +4,7 @@ label: Docker Compose order: 1 group: upgrade_milvus_standalone-operator.md related_key: upgrade Milvus Standalone -summary: 了解如何使用 Docker Compose 独立升级 Milvus。 +summary: 了解如何使用 Docker Compose 升级 Milvus Standalone。 title: 使用 Docker Compose 升级 Milvus 单机版 --- @@ -24,9 +24,9 @@ title: 使用 Docker Compose 升级 Milvus 单机版 >

        本主题介绍如何使用 Docker Compose 升级 Milvus。

        -

        在正常情况下,你可以通过更改映像来升级 Milvus。不过,在从 v2.1.x 升级到 v2.4.9 之前,你需要迁移元数据

        +

        在正常情况下,你可以通过更改映像来升级 Milvus。不过,在从 v2.1.x 升级到 v2.4.13-hotfix 之前,你需要迁移元数据

        -

        出于安全考虑,Milvus 在发布 v2.2.5 时将 MinIO 升级到 RELEASE.2023-03-20T20-16-18Z。在使用 Docker Compose 从以前的 Milvus Standalone 版本升级之前,应创建单节点单硬盘 MinIO 部署,并将现有 MinIO 设置和内容迁移到新部署。有关详细信息,请参阅本指南

        +

        出于安全考虑,Milvus 在发布 v2.2.5 时将其 MinIO 升级到 RELEASE.2023-03-20T20-16-18Z。在使用 Docker Compose 从以前安装的 Milvus Standalone 版本升级之前,应创建一个 Single-Node Single-Drive MinIO 部署,并将现有 MinIO 设置和内容迁移到新部署。有关详细信息,请参阅本指南

        通过更改映像升级 Milvus

        在正常情况下,可以按以下步骤升级 Milvus:

        +

        在正常情况下,可以按以下方法升级 Milvus:

        1. docker-compose.yaml 中更改 Milvus 映像标记。

          ...
           standalone:
             container_name: milvus-standalone
          -  image: milvusdb/milvus:v2.4.9
          +  image: milvusdb/milvus:v2.4.13-hotfix
           
        2. 运行以下命令执行升级。

          docker compose down
          @@ -75,7 +75,7 @@ docker compose up -d
           
        3. 停止所有 Milvus 组件。

          docker stop <milvus-component-docker-container-name>
           
        4. -
        5. 为元数据迁移准备配置文件migration.yaml

          +
        6. 为元迁移准备配置文件migration.yaml

          # migration.yaml
           cmd:
             # Option: run/backup/rollback
          @@ -83,7 +83,7 @@ cmd:
             runWithBackup: true
           config:
             sourceVersion: 2.1.4   # Specify your milvus version
          -  targetVersion: 2.4.9
          +  targetVersion: 2.4.13-hotfix
             backupFilePath: /tmp/migration.bak
           metastore:
             type: etcd
          diff --git a/localization/v2.4.x/site/zh/adminGuide/upgrade_milvus_standalone-helm.json b/localization/v2.4.x/site/zh/adminGuide/upgrade_milvus_standalone-helm.json
          index eec4334a5..5f30287dd 100644
          --- a/localization/v2.4.x/site/zh/adminGuide/upgrade_milvus_standalone-helm.json
          +++ b/localization/v2.4.x/site/zh/adminGuide/upgrade_milvus_standalone-helm.json
          @@ -1 +1 @@
          -{"codeList":["$ helm repo update\n$ helm search repo zilliztech/milvus --versions\n","helm repo add zilliztech https://zilliztech.github.io/milvus-helm\nhelm repo update\n# upgrade existing helm release\nhelm upgrade my-release zilliztech/milvus\n","NAME                    CHART VERSION   APP VERSION             DESCRIPTION                                       \nzilliztech/milvus       4.1.34          2.4.5                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.33          2.4.4                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.32          2.4.3                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.31          2.4.1                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.30          2.4.1                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.29          2.4.0                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.24          2.3.11                  Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.23          2.3.10                  Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.22          2.3.10                  Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.21          2.3.10                  Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.20          2.3.10                  Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.18          2.3.10                  Milvus is an open-source vector database built ... \nzilliztech/milvus       4.1.18          2.3.9                   Milvus is an open-source vector database built ...                                       \nzilliztech/milvus       4.1.17          2.3.8                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.16          2.3.7                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.15          2.3.5                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.14          2.3.6                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.13          2.3.5                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.12          2.3.5                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.11          2.3.4                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.10          2.3.3                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.9           2.3.3                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.8           2.3.2                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.7           2.3.2                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.6           2.3.1                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.5           2.3.1                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.4           2.3.1                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.3           2.3.1                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.2           2.3.1                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.1           2.3.0                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.0           2.3.0                   Milvus is an open-source vector database built ...\n","sh rollingUpdate.sh -n default -i my-release -o update -t 2.4.9 -w 'milvusdb/milvus:v2.4.9'\n","helm repo update\nhelm upgrade my-release milvus/milvus --reuse-values --version=4.1.24 # use the helm chart version here\n","NAME                NAMESPACE   REVISION    UPDATED                                 STATUS      CHART           APP VERSION     \nmy-release          default     1           2022-11-21 15:41:25.51539 +0800 CST     deployed    milvus-3.2.18   2.1.4\n","NAME                                            READY   STATUS    RESTARTS   AGE\nmy-release-etcd-0                               1/1     Running   0          84s\nmy-release-milvus-standalone-75c599fffc-6rwlj   1/1     Running   0          84s\nmy-release-minio-744dd9586f-qngzv               1/1     Running   0          84s\n","$ kubectl get pods my-release-milvus-proxy-6c548f787f-scspp -o=jsonpath='{$.spec.containers[0].image}'\n# milvusdb/milvus:v2.1.4\n","./migrate.sh -i my-release -s 2.1.4 -t 2.4.9\n","./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.9\n","./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.9 -r by-dev\n","./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.9 -r by-dev -w milvusdb/milvus:v2.4.9\n","./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.9 -w milvusdb/milvus:v2.4.9 -d true\n","./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.9 -r by-dev -o rollback -w milvusdb/milvus:v2.1.1\n./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.9 -r by-dev -o migrate -w milvusdb/milvus:v2.4.9\n"],"headingContent":"","anchorList":[{"label":"用舵手图升级 Milvus 单机版","href":"Upgrade-Milvus-Standalone-with-Helm-Chart","type":1,"isActive":false},{"label":"查看 Milvus 版本","href":"Check-the-Milvus-version","type":2,"isActive":false},{"label":"进行滚动升级","href":"Conduct-a-rolling-upgrade","type":2,"isActive":false},{"label":"使用 Helm 升级 Milvus","href":"Upgrade-Milvus-using-Helm","type":2,"isActive":false},{"label":"迁移元数据","href":"Migrate-the-metadata","type":2,"isActive":false}]}
          \ No newline at end of file
          +{"codeList":["$ helm repo update\n$ helm search repo zilliztech/milvus --versions\n","helm repo add zilliztech https://zilliztech.github.io/milvus-helm\nhelm repo update\n# upgrade existing helm release\nhelm upgrade my-release zilliztech/milvus\n","NAME                    CHART VERSION   APP VERSION             DESCRIPTION                                       \nzilliztech/milvus       4.1.34          2.4.5                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.33          2.4.4                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.32          2.4.3                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.31          2.4.1                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.30          2.4.1                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.29          2.4.0                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.24          2.3.11                  Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.23          2.3.10                  Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.22          2.3.10                  Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.21          2.3.10                  Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.20          2.3.10                  Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.18          2.3.10                  Milvus is an open-source vector database built ... \nzilliztech/milvus       4.1.18          2.3.9                   Milvus is an open-source vector database built ...                                       \nzilliztech/milvus       4.1.17          2.3.8                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.16          2.3.7                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.15          2.3.5                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.14          2.3.6                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.13          2.3.5                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.12          2.3.5                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.11          2.3.4                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.10          2.3.3                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.9           2.3.3                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.8           2.3.2                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.7           2.3.2                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.6           2.3.1                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.5           2.3.1                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.4           2.3.1                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.3           2.3.1                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.2           2.3.1                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.1           2.3.0                   Milvus is an open-source vector database built ...\nzilliztech/milvus       4.1.0           2.3.0                   Milvus is an open-source vector database built ...\n","sh rollingUpdate.sh -n default -i my-release -o update -t 2.4.13-hotfix -w 'milvusdb/milvus:v2.4.13-hotfix'\n","helm repo update\nhelm upgrade my-release milvus/milvus --reuse-values --version=4.1.24 # use the helm chart version here\n","NAME                NAMESPACE   REVISION    UPDATED                                 STATUS      CHART           APP VERSION     \nmy-release          default     1           2022-11-21 15:41:25.51539 +0800 CST     deployed    milvus-3.2.18   2.1.4\n","NAME                                            READY   STATUS    RESTARTS   AGE\nmy-release-etcd-0                               1/1     Running   0          84s\nmy-release-milvus-standalone-75c599fffc-6rwlj   1/1     Running   0          84s\nmy-release-minio-744dd9586f-qngzv               1/1     Running   0          84s\n","$ kubectl get pods my-release-milvus-proxy-6c548f787f-scspp -o=jsonpath='{$.spec.containers[0].image}'\n# milvusdb/milvus:v2.1.4\n","./migrate.sh -i my-release -s 2.1.4 -t 2.4.13-hotfix\n","./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.13-hotfix\n","./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.13-hotfix -r by-dev\n","./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.13-hotfix -r by-dev -w milvusdb/milvus:v2.4.13-hotfix\n","./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.13-hotfix -w milvusdb/milvus:v2.4.13-hotfix -d true\n","./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.13-hotfix -r by-dev -o rollback -w milvusdb/milvus:v2.1.1\n./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.13-hotfix -r by-dev -o migrate -w milvusdb/milvus:v2.4.13-hotfix\n"],"headingContent":"Upgrade Milvus Standalone with Helm Chart","anchorList":[{"label":"使用 Helm 图表升级 Milvus 单机版","href":"Upgrade-Milvus-Standalone-with-Helm-Chart","type":1,"isActive":false},{"label":"检查 Milvus 版本","href":"Check-the-Milvus-version","type":2,"isActive":false},{"label":"进行滚动升级","href":"Conduct-a-rolling-upgrade","type":2,"isActive":false},{"label":"使用 Helm 升级 Milvus","href":"Upgrade-Milvus-using-Helm","type":2,"isActive":false},{"label":"迁移元数据","href":"Migrate-the-metadata","type":2,"isActive":false}]}
          \ No newline at end of file
          diff --git a/localization/v2.4.x/site/zh/adminGuide/upgrade_milvus_standalone-helm.md b/localization/v2.4.x/site/zh/adminGuide/upgrade_milvus_standalone-helm.md
          index 5163e7c4f..eebd690e8 100644
          --- a/localization/v2.4.x/site/zh/adminGuide/upgrade_milvus_standalone-helm.md
          +++ b/localization/v2.4.x/site/zh/adminGuide/upgrade_milvus_standalone-helm.md
          @@ -4,10 +4,10 @@ label: Helm
           order: 1
           group: upgrade_milvus_standalone-operator.md
           related_key: upgrade Milvus Standalone
          -summary: 了解如何使用 Helm 图表升级 Milvus 单机版。
          -title: 用舵手图升级 Milvus 单机版
          +summary: 了解如何使用 Helm Chart 升级 Milvus Standalone 单机版。
          +title: 使用 Helm 图表升级 Milvus 单机版
           ---
          -
          +
           

          使用 Helm 图表升级 Milvus 单机版

          -

          https://milvus-io.github.io/milvus-helm/ 上的 Milvus Helm 图表软件源已归档,你可以从https://zilliztech.github.io/milvus-helm/ 获取进一步更新,如下所示:

          +

          https://milvus-io.github.io/milvus-helm/ 上的 Milvus Helm 图表 repo 已归档,你可以从https://zilliztech.github.io/milvus-helm/ 获取进一步的更新,如下所示:

          helm repo add zilliztech https://zilliztech.github.io/milvus-helm
           helm repo update
           # upgrade existing helm release
          @@ -85,11 +85,11 @@ zilliztech/milvus       4.1.2           2.3.1                   Milvus is an ope
           zilliztech/milvus       4.1.1           2.3.0                   Milvus is an open-source vector database built ...
           zilliztech/milvus       4.1.0           2.3.0                   Milvus is an open-source vector database built ...
           
          -

          您可以按以下方式为您的 Milvus 选择升级路径:

          -
          - 进行滚动升级](#conduct-a-rolling-upgrade) 从 Milvus v2.2.3 及以后的版本升级到 v2.4.9。
          +

          您可以按以下方式选择 Milvus 的升级路径:

          +
          - 进行滚动升级](#conduct-a-rolling-upgrade) 从 Milvus v2.2.3 及以后的版本升级到 v2.4.13-hotfix。

          进行滚动升级

          自 Milvus 2.2.3 起,您可以将 Milvus 协调器配置为主动待机模式,并为它们启用滚动升级功能,以便 Milvus 在协调器升级期间响应传入的请求。在以前的版本中,升级时需要移除协调器,然后再创建协调器,这可能会导致服务出现一定的停机时间。

          +

          自 Milvus 2.2.3 起,您可以将 Milvus 协调器配置为主动待机模式,并为它们启用滚动升级功能,这样 Milvus 就能在协调器升级期间响应传入的请求。在以前的版本中,升级时需要移除协调器,然后再创建协调器,这可能会导致服务出现一定的停机时间。

          滚动升级要求协调程序以活动-待机模式工作。您可以使用我们提供的脚本将协调程序配置为活动-待机模式,然后开始滚动升级。

          -

          基于 Kubernetes 提供的滚动更新功能,上述脚本会根据部署的依赖关系对部署进行有序更新。此外,Milvus 还实施了一种机制,确保其组件在升级过程中与依赖它们的组件保持兼容,从而大大减少了潜在的服务停机时间。

          +

          基于 Kubernetes 提供的滚动更新功能,上述脚本会根据部署的依赖关系对部署进行有序更新。此外,Milvus 还实现了一种机制,确保其组件在升级过程中与依赖它们的组件保持兼容,从而大大减少了潜在的服务停机时间。

          该脚本仅适用于升级与 Helm 一起安装的 Milvus。下表列出了脚本中可用的命令标志。

          @@ -118,13 +118,13 @@ zilliztech/milvus 4.1.0 2.3.0 Milvus is an ope - + - +
          iMilvus 实例名称None为真
          nMilvus 安装的命名空间default
          t目标 Milvus 版本None
          tMilvus 目标版本None
          w新的 Milvus 图像标签milvusdb/milvus:v2.2.3
          o操作update
          o操作符update
          -

          确保 Milvus 实例中的所有部署都处于正常状态后。你可以运行以下命令将 Milvus 实例升级到 2.4.9。

          -
          sh rollingUpdate.sh -n default -i my-release -o update -t 2.4.9 -w 'milvusdb/milvus:v2.4.9'
          +

          确保 Milvus 实例中的所有部署都处于正常状态后。你可以运行以下命令将 Milvus 实例升级到 2.4.13-hotfix。

          +
          sh rollingUpdate.sh -n default -i my-release -o update -t 2.4.13-hotfix -w 'milvusdb/milvus:v2.4.13-hotfix'
           
            @@ -150,7 +150,7 @@ zilliztech/milvus 4.1.0 2.3.0 Milvus is an ope d="M4 9h1v1H4c-1.5 0-3-1.69-3-3.5S2.55 3 4 3h4c1.45 0 3 1.69 3 3.5 0 1.41-.91 2.72-2 3.25V8.59c.58-.45 1-1.27 1-2.09C10 5.22 8.98 4 8 4H4c-.98 0-2 1.22-2 2.5S3 9 4 9zm9-3h-1v1h1c1 0 2 1.22 2 2.5S13.98 12 13 12H9c-.98 0-2-1.22-2-2.5 0-.83.42-1.64 1-2.09V6.25c-1.09.53-2 1.84-2 3.25C6 11.31 7.55 13 9 13h4c1.45 0 3-1.69 3-3.5S14.5 6 13 6z" > -

            要将 Milvus 从 2.2.3 版之前的次版本升级到最新版本,请运行以下命令:

            +

            要将 Milvus 从 v2.2.3 之前的次版本升级到最新版本,请运行以下命令:

            helm repo update
             helm upgrade my-release milvus/milvus --reuse-values --version=4.1.24 # use the helm chart version here
             
            @@ -185,9 +185,9 @@ my-release-minio-744dd9586f-qngzv 1/1 Running 0 84s
            $ kubectl get pods my-release-milvus-proxy-6c548f787f-scspp -o=jsonpath='{$.spec.containers[0].image}'
             # milvusdb/milvus:v2.1.4
             
            -

            4.迁移元数据

            Milvus 2.2 的一个主要变化是段索引的元数据结构。因此,当 Milvus 从 v2.1.x 升级到 v2.2.0 时,你需要使用 Helm 来迁移元数据。 这里有一个脚本供你安全迁移元数据。

            -

            该脚本仅适用于安装在 K8s 集群上的 Milvus。如果在迁移过程中出现错误,请先使用回滚操作回滚到之前的版本。

            -

            下表列出了元数据迁移的操作。

            +

            4.迁移元数据

            Milvus 2.2 的一个主要变化是段索引的元数据结构。因此,当 Milvus 从 v2.1.x 升级到 v2.2.0 时,你需要使用 Helm 来迁移元数据。 下面是一个脚本,供你安全迁移元数据。

            +

            该脚本仅适用于安装在 K8s 集群上的 Milvus。如果过程中出现错误,请先使用回滚操作符回滚到之前的版本。

            +

            下表列出了元数据迁移的操作符。

            @@ -200,10 +200,10 @@ my-release-minio-744dd9586f-qngzv 1/1 Running 0 84s - + - +
            参数说明默认值需要
            rMilvus 元的根路径。by-dev
            w新的 Milvus 图像标签。milvusdb/milvus:v2.2.0
            m元迁移图像标签。milvusdb/meta-migration:v2.2.0
            o元迁移操作。migrate
            o元迁移操作符。migrate
            d迁移完成后是否删除迁移 pod。false
            c元迁移 pvc 的存储类别。default storage class
            emilvus 使用的 etcd enpoint。etcd svc installed with milvus错误
            eMilvus 使用的 etcd enpoint。etcd svc installed with milvus错误

            1.迁移元数据

              @@ -213,25 +213,25 @@ my-release-minio-744dd9586f-qngzv 1/1 Running 0 84s
            1. 迁移 Milvus 元数据。
            2. 使用新镜像启动 Milvus 组件。
            -

            2.将 Milvus 从 2.1.x 升级到 2.4.9

            以下命令假定你将 Milvus 从 v2.1.4 升级到 2.4.9。请将它们更改为适合你需要的版本。

            +

            2.将 Milvus 从 2.1.x 升级到 2.4.13-hotfix

            以下命令假定你将 Milvus 从 v2.1.4 升级到 2.4.13-hotfix。请将它们改为适合你需要的版本。

            1. 指定 Milvus 实例名称、源 Milvus 版本和目标 Milvus 版本。

              -
              ./migrate.sh -i my-release -s 2.1.4 -t 2.4.9
              +
              ./migrate.sh -i my-release -s 2.1.4 -t 2.4.13-hotfix
               
            2. -
            3. 如果你的 Milvus 没有安装在默认的 K8s 命名空间中,用-n 指定命名空间。

              -
              ./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.9
              +
            4. 如果你的 Milvus 没有安装在默认的 K8s 命名空间,请用-n 指定命名空间。

              +
              ./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.13-hotfix
               
            5. 如果 Milvus 安装的是自定义rootpath ,请用-r 指定根路径。

              -
              ./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.9 -r by-dev
              +
              ./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.13-hotfix -r by-dev
               
            6. -
            7. 如果 Milvus 安装的是自定义image ,请使用-w 指定图片标签。

              -
              ./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.9 -r by-dev -w milvusdb/milvus:v2.4.9
              +
            8. 如果你的 Milvus 安装的是自定义image ,请用-w 指定图片标签。

              +
              ./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.13-hotfix -r by-dev -w milvusdb/milvus:v2.4.13-hotfix
               
            9. -
            10. 如果希望在迁移完成后自动移除迁移 pod,请设置-d true

              -
              ./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.9 -w milvusdb/milvus:v2.4.9 -d true
              +
            11. 如果想在迁移完成后自动移除迁移 pod,请设置-d true

              +
              ./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.13-hotfix -w milvusdb/milvus:v2.4.13-hotfix -d true
               
            12. 如果迁移失败,请回滚并重新迁移。

              -
              ./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.9 -r by-dev -o rollback -w milvusdb/milvus:v2.1.1
              -./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.9 -r by-dev -o migrate -w milvusdb/milvus:v2.4.9
              +
              ./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.13-hotfix -r by-dev -o rollback -w milvusdb/milvus:v2.1.1
              +./migrate.sh -i my-release -n milvus -s 2.1.4 -t 2.4.13-hotfix -r by-dev -o migrate -w milvusdb/milvus:v2.4.13-hotfix
               
            diff --git a/localization/v2.4.x/site/zh/adminGuide/upgrade_milvus_standalone-operator.json b/localization/v2.4.x/site/zh/adminGuide/upgrade_milvus_standalone-operator.json index 205802f90..9fd949069 100644 --- a/localization/v2.4.x/site/zh/adminGuide/upgrade_milvus_standalone-operator.json +++ b/localization/v2.4.x/site/zh/adminGuide/upgrade_milvus_standalone-operator.json @@ -1,47 +1 @@ -{ - "codeList": [ - "helm repo add zilliztech-milvus-operator https://zilliztech.github.io/milvus-operator/\nhelm repo update zilliztech-milvus-operator\nhelm -n milvus-operator upgrade milvus-operator zilliztech-milvus-operator/milvus-operator\n", - "apiVersion: milvus.io/v1beta1\nkind: Milvus\nmetadata:\n name: my-release\nspec:\n components:\n enableRollingUpdate: true\n imageUpdateMode: rollingUpgrade # Default value, can be omitted\n image: milvusdb/milvus:v2.4.9\n", - "apiVersion: milvus.io/v1beta1\nkind: Milvus\nmetadata:\n name: my-release\nspec:\n components:\n enableRollingUpdate: true\n imageUpdateMode: all\n image: milvusdb/milvus:v2.4.9\n", - "apiVersion: milvus.io/v1beta1\nkind: Milvus\nmetadata:\n name: my-release\nspec:\n components:\n enableRollingUpdate: true\n imageUpdateMode: rollingDowngrade\n image: milvusdb/milvus:\n", - "kubectl apply -f milvusupgrade.yml\n", - "apiVersion: milvus.io/v1beta1\nkind: Milvus\nmetadata:\n name: my-release\nlabels:\n app: milvus\nspec:\n # Omit other fields ...\n components:\n image: milvusdb/milvus:v2.4.9\n", - "kubectl apply -f milvusupgrade.yaml\n", - "apiVersion: milvus.io/v1beta1\nkind: MilvusUpgrade\nmetadata:\n name: my-release-upgrade\nspec:\n milvus:\n namespace: default\n name: my-release\n sourceVersion: \"v2.1.4\"\n targetVersion: \"v2.4.9\"\n # below are some omit default values:\n # targetImage: \"milvusdb/milvus:v2.4.9\"\n # toolImage: \"milvusdb/meta-migration:v2.2.0\"\n # operation: upgrade\n # rollbackIfFailed: true\n # backupPVC: \"\"\n # maxRetry: 3\n", - "$ kubectl apply -f https://raw.githubusercontent.com/zilliztech/milvus-operator/main/config/samples/milvusupgrade.yaml\n", - "kubectl describe milvus release-name\n" - ], - "headingContent": "", - "anchorList": [ - { - "label": "用 Milvus Operator 升级 Milvus 单机版", - "href": "Upgrade-Milvus-Standalone-with-Milvus-Operator", - "type": 1, - "isActive": false - }, - { - "label": "升级您的 Milvus Operator", - "href": "Upgrade-your-Milvus-operator", - "type": 2, - "isActive": false - }, - { - "label": "进行滚动升级", - "href": "Conduct-a-rolling-upgrade", - "type": 2, - "isActive": false - }, - { - "label": "改变Milvus的形象,使其升级", - "href": "Upgrade-Milvus-by-changing-its-image", - "type": 2, - "isActive": false - }, - { - "label": "迁移元数据", - "href": "Migrate-the-metadata", - "type": 2, - "isActive": false - } - ] -} +{"codeList":["helm repo add zilliztech-milvus-operator https://zilliztech.github.io/milvus-operator/\nhelm repo update zilliztech-milvus-operator\nhelm -n milvus-operator upgrade milvus-operator zilliztech-milvus-operator/milvus-operator\n","apiVersion: milvus.io/v1beta1\nkind: Milvus\nmetadata:\n name: my-release\nspec:\n components:\n enableRollingUpdate: true\n imageUpdateMode: rollingUpgrade # Default value, can be omitted\n image: milvusdb/milvus:v2.4.13-hotfix\n","apiVersion: milvus.io/v1beta1\nkind: Milvus\nmetadata:\n name: my-release\nspec:\n components:\n enableRollingUpdate: true\n imageUpdateMode: all\n image: milvusdb/milvus:v2.4.13-hotfix\n","apiVersion: milvus.io/v1beta1\nkind: Milvus\nmetadata:\n name: my-release\nspec:\n components:\n enableRollingUpdate: true\n imageUpdateMode: rollingDowngrade\n image: milvusdb/milvus:\n","kubectl apply -f milvusupgrade.yml\n","apiVersion: milvus.io/v1beta1\nkind: Milvus\nmetadata:\n name: my-release\nlabels:\n app: milvus\nspec:\n # Omit other fields ...\n components:\n image: milvusdb/milvus:v2.4.13-hotfix\n","kubectl apply -f milvusupgrade.yaml\n","apiVersion: milvus.io/v1beta1\nkind: MilvusUpgrade\nmetadata:\n name: my-release-upgrade\nspec:\n milvus:\n namespace: default\n name: my-release\n sourceVersion: \"v2.1.4\"\n targetVersion: \"v2.4.13-hotfix\"\n # below are some omit default values:\n # targetImage: \"milvusdb/milvus:v2.4.13-hotfix\"\n # toolImage: \"milvusdb/meta-migration:v2.2.0\"\n # operation: upgrade\n # rollbackIfFailed: true\n # backupPVC: \"\"\n # maxRetry: 3\n","$ kubectl apply -f https://raw.githubusercontent.com/zilliztech/milvus-operator/main/config/samples/milvusupgrade.yaml\n","kubectl describe milvus release-name\n"],"headingContent":"Upgrade Milvus Standalone with Milvus Operator","anchorList":[{"label":"使用 Milvus Operator 升级 Milvus Standalone","href":"Upgrade-Milvus-Standalone-with-Milvus-Operator","type":1,"isActive":false},{"label":"升级 Milvus 操作符","href":"Upgrade-your-Milvus-operator","type":2,"isActive":false},{"label":"进行滚动升级","href":"Conduct-a-rolling-upgrade","type":2,"isActive":false},{"label":"通过更改映像升级 Milvus","href":"Upgrade-Milvus-by-changing-its-image","type":2,"isActive":false},{"label":"迁移元数据","href":"Migrate-the-metadata","type":2,"isActive":false}]} \ No newline at end of file diff --git a/localization/v2.4.x/site/zh/adminGuide/upgrade_milvus_standalone-operator.md b/localization/v2.4.x/site/zh/adminGuide/upgrade_milvus_standalone-operator.md index 0d7f875a0..724221563 100644 --- a/localization/v2.4.x/site/zh/adminGuide/upgrade_milvus_standalone-operator.md +++ b/localization/v2.4.x/site/zh/adminGuide/upgrade_milvus_standalone-operator.md @@ -4,12 +4,11 @@ label: Milvus Operator order: 0 group: upgrade_milvus_standalone-operator.md related_key: upgrade Milvus Standalone -summary: 了解如何使用 Milvus Operator 升级 Milvus 单机版。 -title: 用 Milvus Operator 升级 Milvus 单机版 +summary: 了解如何使用 Milvus Operator 升级 Milvus Standalone。 +title: 使用 Milvus Operator 升级 Milvus Standalone --- - -

            使用 Milvus Operator 升级 Milvus 单机版

            本指南介绍如何用 Milvus Operator 升级 Milvus 单机版。

            -

            升级 Milvus Operator

            本指南介绍如何使用 Milvus Operator 升级 Milvus Standalone。

            +

            升级 Milvus 操作符

            运行以下命令将你的 Milvus 操作系统版本升级到 v1.0.1。

            +

            运行以下命令将您的 Milvus 操作符版本升级到 v1.0.1。

            helm repo add zilliztech-milvus-operator https://zilliztech.github.io/milvus-operator/
             helm repo update zilliztech-milvus-operator
             helm -n milvus-operator upgrade milvus-operator zilliztech-milvus-operator/milvus-operator
             
            -

            将 Milvus Operator 升级到最新版本后,您有以下选择:

            +

            将 Milvus 操作符升级到最新版本后,您有以下选择:

            进行滚动升级

            从 Milvus 2.2.3 版开始,你可以将 Milvus 协调器配置为主动待机模式,并为它们启用滚动升级功能,这样 Milvus 就能在协调器升级期间响应传入的请求。在以前的版本中,升级时需要移除协调器,然后再创建协调器,这可能会导致服务出现一定程度的停机。

            -

            基于 Kubernetes 提供的滚动更新功能,Milvus Operator 会根据部署的依赖关系对部署进行有序更新。此外,Milvus 还实施了一种机制,确保其组件在升级期间与依赖于它们的组件保持兼容,从而大大减少了潜在的服务停机时间。

            +

            自 Milvus 2.2.3 起,你可以将 Milvus 协调器配置为主动待机模式,并为它们启用滚动升级功能,这样 Milvus 就能在协调器升级期间响应传入的请求。在以前的版本中,升级时需要移除协调器,然后再创建协调器,这可能会导致服务出现一定的停机时间。

            +

            基于 Kubernetes 提供的滚动更新功能,Milvus 操作符会根据部署的依赖关系强制执行有序更新。此外,Milvus 还实施了一种机制,确保其组件在升级期间与依赖于它们的组件保持兼容,从而大大减少了潜在的服务停机时间。

            滚动升级功能默认为禁用。你需要通过配置文件明确启用它。

            apiVersion: milvus.io/v1beta1
             kind: Milvus
            @@ -77,10 +76,10 @@ spec:
               components:
                 enableRollingUpdate: true
                 imageUpdateMode: rollingUpgrade # Default value, can be omitted
            -    image: milvusdb/milvus:v2.4.9
            +    image: milvusdb/milvus:v2.4.13-hotfix
             

            在上述配置文件中,将spec.components.enableRollingUpdate 设置为true ,将spec.components.image 设置为所需的 Milvus 版本。

            -

            默认情况下,Milvus 会以有序方式对协调器进行滚动升级,即逐个替换协调器 pod 映像。要缩短升级时间,可以考虑将spec.components.imageUpdateMode 设置为all ,这样 Milvus 就会同时替换所有 pod 映像。

            +

            默认情况下,Milvus 会以有序的方式对协调器进行滚动升级,即逐个替换协调器 pod 映像。要缩短升级时间,可以考虑将spec.components.imageUpdateMode 设置为all ,这样 Milvus 就会同时替换所有 pod 映像。

            apiVersion: milvus.io/v1beta1
             kind: Milvus
             metadata:
            @@ -89,9 +88,9 @@ spec:
               components:
                 enableRollingUpdate: true
                 imageUpdateMode: all
            -    image: milvusdb/milvus:v2.4.9
            +    image: milvusdb/milvus:v2.4.13-hotfix
             
            -

            您可以将spec.components.imageUpdateMode 设置为rollingDowngrade ,让 Milvus 用较低的版本替换协调器 pod 映像。

            +

            可以将spec.components.imageUpdateMode 设置为rollingDowngrade ,让 Milvus 用较低的版本替换协调器 pod 映像。

            apiVersion: milvus.io/v1beta1
             kind: Milvus
             metadata:
            @@ -120,7 +119,7 @@ spec:
                       d="M4 9h1v1H4c-1.5 0-3-1.69-3-3.5S2.55 3 4 3h4c1.45 0 3 1.69 3 3.5 0 1.41-.91 2.72-2 3.25V8.59c.58-.45 1-1.27 1-2.09C10 5.22 8.98 4 8 4H4c-.98 0-2 1.22-2 2.5S3 9 4 9zm9-3h-1v1h1c1 0 2 1.22 2 2.5S13.98 12 13 12H9c-.98 0-2-1.22-2-2.5 0-.83.42-1.64 1-2.09V6.25c-1.09.53-2 1.84-2 3.25C6 11.31 7.55 13 9 13h4c1.45 0 3-1.69 3-3.5S14.5 6 13 6z"
                     >
                   
            -    

            在正常情况下,你可以通过更改映像将 Milvus 升级到最新版本。但要注意的是,用这种方法升级 Milvus 时会有一定的停机时间。

            +

            在正常情况下,你可以通过更改映像将你的 Milvus 升级到最新版本。但要注意的是,用这种方法升级 Milvus 时会有一定的停机时间。

            编写如下配置文件,并将其保存为milvusupgrade.yaml

            apiVersion: milvus.io/v1beta1
             kind: Milvus
            @@ -131,7 +130,7 @@ labels:
             spec:
               # Omit other fields ...
               components:
            -   image: milvusdb/milvus:v2.4.9
            +   image: milvusdb/milvus:v2.4.13-hotfix
             

            然后运行以下命令执行升级:

            kubectl apply -f milvusupgrade.yaml
            @@ -151,8 +150,8 @@ spec:
                       d="M4 9h1v1H4c-1.5 0-3-1.69-3-3.5S2.55 3 4 3h4c1.45 0 3 1.69 3 3.5 0 1.41-.91 2.72-2 3.25V8.59c.58-.45 1-1.27 1-2.09C10 5.22 8.98 4 8 4H4c-.98 0-2 1.22-2 2.5S3 9 4 9zm9-3h-1v1h1c1 0 2 1.22 2 2.5S13.98 12 13 12H9c-.98 0-2-1.22-2-2.5 0-.83.42-1.64 1-2.09V6.25c-1.09.53-2 1.84-2 3.25C6 11.31 7.55 13 9 13h4c1.45 0 3-1.69 3-3.5S14.5 6 13 6z"
                     >
                   
            -    

            自 Milvus 2.2.0 起,元数据与以前版本的元数据不兼容。以下示例片段假定从 Milvus 2.1.4 升级到 Milvus v2.4.9。

            -

            1.为元数据迁移创建.yaml 文件

            创建元数据迁移文件。下面是一个示例。需要在配置文件中指定namesourceVersiontargetVersion 。下面的示例将name 设置为my-release-upgrade ,将sourceVersion 设置为v2.1.4 ,将targetVersion 设置为v2.4.9 。这意味着您的 Milvus 实例将从 v2.1.4 升级到 v2.4.9。

            +

            自 Milvus 2.2.0 起,元数据与以前版本的元数据不兼容。以下示例片段假定从 Milvus 2.1.4 升级到 Milvus v2.4.13-hotfix。

            +

            1.创建用于元数据迁移的.yaml 文件

            创建元数据迁移文件。下面是一个示例。需要在配置文件中指定namesourceVersiontargetVersion 。下面的示例将name 设置为my-release-upgrade ,将sourceVersion 设置为v2.1.4 ,将targetVersion 设置为v2.4.13-hotfix 。这意味着你的 Milvus 实例将从 v2.1.4 升级到 v2.4.13-hotfix。

            apiVersion: milvus.io/v1beta1
             kind: MilvusUpgrade
             metadata:
            @@ -162,9 +161,9 @@ spec:
                 namespace: default
                 name: my-release
               sourceVersion: "v2.1.4"
            -  targetVersion: "v2.4.9"
            +  targetVersion: "v2.4.13-hotfix"
               # below are some omit default values:
            -  # targetImage: "milvusdb/milvus:v2.4.9"
            +  # targetImage: "milvusdb/milvus:v2.4.13-hotfix"
               # toolImage: "milvusdb/meta-migration:v2.2.0"
               # operation: upgrade
               # rollbackIfFailed: true
            @@ -177,6 +176,6 @@ spec:
             

            3.3. 检查元数据迁移状态

            运行以下命令检查元数据迁移的状态。

            kubectl describe milvus release-name
             
            -

            输出中的ready 表示元数据迁移成功。

            +

            输出中的状态为ready 意味着元数据迁移成功。

            或者,也可以运行kubectl get pod 检查所有 pod。如果所有 pod 都是ready ,则元数据迁移成功。

            4.删除my-release-upgrade

            升级成功后,删除 YAML 文件中的my-release-upgrade

            diff --git a/localization/v2.4.x/site/zh/embeddings/embed-with-cohere.json b/localization/v2.4.x/site/zh/embeddings/embed-with-cohere.json index 40401b602..47f6d535c 100644 --- a/localization/v2.4.x/site/zh/embeddings/embed-with-cohere.json +++ b/localization/v2.4.x/site/zh/embeddings/embed-with-cohere.json @@ -1 +1 @@ -{"codeList":["pip install --upgrade pymilvus\npip install \"pymilvus[model]\"\n","cohere_ef = CohereEmbeddingFunction(\n model_name=\"embed-english-light-v3.0\",\n api_key=\"YOUR_COHERE_API_KEY\",\n input_type=\"search_document\",\n embedding_types=[\"float\"]\n)\n","docs = [\n \"Artificial intelligence was founded as an academic discipline in 1956.\",\n \"Alan Turing was the first person to conduct substantial research in AI.\",\n \"Born in Maida Vale, London, Turing was raised in southern England.\",\n]\n\ndocs_embeddings = cohere_ef.encode_documents(docs)\n\n# Print embeddings\nprint(\"Embeddings:\", docs_embeddings)\n# Print dimension and shape of embeddings\nprint(\"Dim:\", cohere_ef.dim, docs_embeddings[0].shape)\n","Embeddings: [array([ 3.43322754e-02, 1.16252899e-03, -5.25207520e-02, 1.32846832e-03,\n -6.80541992e-02, 6.10961914e-02, -7.06176758e-02, 1.48925781e-01,\n 1.54174805e-01, 1.98516846e-02, 2.43835449e-02, 3.55224609e-02,\n 1.82952881e-02, 7.57446289e-02, -2.40783691e-02, 4.40063477e-02,\n...\n 0.06359863, -0.01971436, -0.02253723, 0.00354195, 0.00222015,\n 0.00184727, 0.03408813, -0.00777817, 0.04919434, 0.01519775,\n -0.02862549, 0.04760742, -0.07891846, 0.0124054 ], dtype=float32)]\nDim: 384 (384,)\n","queries = [\"When was artificial intelligence founded\", \n \"Where was Alan Turing born?\"]\n\nquery_embeddings = cohere_ef.encode_queries(queries)\n\nprint(\"Embeddings:\", query_embeddings)\nprint(\"Dim\", cohere_ef.dim, query_embeddings[0].shape)\n","Embeddings: [array([-1.33361816e-02, 9.79423523e-04, -7.28759766e-02, -1.93786621e-02,\n -9.71679688e-02, 4.34875488e-02, -9.81445312e-02, 1.16882324e-01,\n 5.89904785e-02, -4.19921875e-02, 4.95910645e-02, 5.83496094e-02,\n 3.47595215e-02, -5.87463379e-03, -7.30514526e-03, 2.92816162e-02,\n...\n 0.00749969, -0.01192474, 0.02719116, 0.03347778, 0.07696533,\n 0.01409149, 0.00964355, -0.01681519, -0.0073204 , 0.00043154,\n -0.04577637, 0.03591919, -0.02807617, -0.04812622], dtype=float32)]\nDim 384 (384,)\n"],"headingContent":"","anchorList":[{"label":"Cohere","href":"Cohere","type":1,"isActive":false}]} \ No newline at end of file +{"codeList":["pip install --upgrade pymilvus\npip install \"pymilvus[model]\"\n","from pymilvus.model.dense import CohereEmbeddingFunction\n\ncohere_ef = CohereEmbeddingFunction(\n model_name=\"embed-english-light-v3.0\",\n api_key=\"YOUR_COHERE_API_KEY\",\n input_type=\"search_document\",\n embedding_types=[\"float\"]\n)\n","docs = [\n \"Artificial intelligence was founded as an academic discipline in 1956.\",\n \"Alan Turing was the first person to conduct substantial research in AI.\",\n \"Born in Maida Vale, London, Turing was raised in southern England.\",\n]\n\ndocs_embeddings = cohere_ef.encode_documents(docs)\n\n# Print embeddings\nprint(\"Embeddings:\", docs_embeddings)\n# Print dimension and shape of embeddings\nprint(\"Dim:\", cohere_ef.dim, docs_embeddings[0].shape)\n","Embeddings: [array([ 3.43322754e-02, 1.16252899e-03, -5.25207520e-02, 1.32846832e-03,\n -6.80541992e-02, 6.10961914e-02, -7.06176758e-02, 1.48925781e-01,\n 1.54174805e-01, 1.98516846e-02, 2.43835449e-02, 3.55224609e-02,\n 1.82952881e-02, 7.57446289e-02, -2.40783691e-02, 4.40063477e-02,\n...\n 0.06359863, -0.01971436, -0.02253723, 0.00354195, 0.00222015,\n 0.00184727, 0.03408813, -0.00777817, 0.04919434, 0.01519775,\n -0.02862549, 0.04760742, -0.07891846, 0.0124054 ], dtype=float32)]\nDim: 384 (384,)\n","queries = [\"When was artificial intelligence founded\", \n \"Where was Alan Turing born?\"]\n\nquery_embeddings = cohere_ef.encode_queries(queries)\n\nprint(\"Embeddings:\", query_embeddings)\nprint(\"Dim\", cohere_ef.dim, query_embeddings[0].shape)\n","Embeddings: [array([-1.33361816e-02, 9.79423523e-04, -7.28759766e-02, -1.93786621e-02,\n -9.71679688e-02, 4.34875488e-02, -9.81445312e-02, 1.16882324e-01,\n 5.89904785e-02, -4.19921875e-02, 4.95910645e-02, 5.83496094e-02,\n 3.47595215e-02, -5.87463379e-03, -7.30514526e-03, 2.92816162e-02,\n...\n 0.00749969, -0.01192474, 0.02719116, 0.03347778, 0.07696533,\n 0.01409149, 0.00964355, -0.01681519, -0.0073204 , 0.00043154,\n -0.04577637, 0.03591919, -0.02807617, -0.04812622], dtype=float32)]\nDim 384 (384,)\n"],"headingContent":"Cohere","anchorList":[{"label":"嵌入模型","href":"Cohere","type":1,"isActive":false}]} \ No newline at end of file diff --git a/localization/v2.4.x/site/zh/embeddings/embed-with-cohere.md b/localization/v2.4.x/site/zh/embeddings/embed-with-cohere.md index f5d5babda..24b3361cd 100644 --- a/localization/v2.4.x/site/zh/embeddings/embed-with-cohere.md +++ b/localization/v2.4.x/site/zh/embeddings/embed-with-cohere.md @@ -4,7 +4,6 @@ order: 9 summary: 本文介绍如何使用 CohereEmbeddingFunction 使用 Cohere 嵌入模型对文档和查询进行编码。 title: 嵌入 Cohere --- -

            嵌入模型

            Cohere 的嵌入模型用于生成文本嵌入,即捕捉文本语义信息的浮点数列表。这些嵌入可用于文本分类和语义搜索等任务。

            +

            Cohere 的嵌入模型用于生成文本嵌入,即捕捉文本语义信息的浮点数列表。这些嵌入模型可用于文本分类和语义搜索等任务。

            Milvus 使用CohereEmbeddingFunction 类集成了 Cohere 的嵌入模型。该类处理嵌入的计算,并以与 Milvus 兼容的格式返回,以便进行索引和搜索。

            要使用该功能,请安装必要的依赖项:

            pip install --upgrade pymilvus
             pip install "pymilvus[model]"
             

            然后,实例化CohereEmbeddingFunction

            -
            cohere_ef = CohereEmbeddingFunction(
            +
            from pymilvus.model.dense import CohereEmbeddingFunction
            +
            +cohere_ef = CohereEmbeddingFunction(
                 model_name="embed-english-light-v3.0",
                 api_key="YOUR_COHERE_API_KEY",
                 input_type="search_document",
            @@ -37,26 +38,26 @@ pip install "pymilvus[model]"
             

            参数

            • model_name (字符串)

              -

              用于编码的 Cohere 嵌入模型名称。可以指定任何可用的 Cohere 嵌入模型名称,例如embed-english-v3.0,embed-multilingual-v3.0 等。如果不指定此参数,将使用embed-english-light-v3.0 。有关可用模型的列表,请参阅Embed

            • +

              用于编码的 Cohere Embeddings 模型名称。可以指定任何可用的 Cohere 嵌入模型名称,例如embed-english-v3.0,embed-multilingual-v3.0 等。如果不指定此参数,将使用embed-english-light-v3.0 。有关可用模型的列表,请参阅Embed

            • api_key (字符串)

              访问 Cohere API 的 API 密钥。

            • input_type (字符串)

              传递给模型的输入类型。嵌入模型 v3 及更高版本时必须使用。

                -
              • "search_document":用于存储在向量数据库中的嵌入,以备搜索之用。
              • -
              • "search_query":用于嵌入针对向量数据库运行的搜索查询,以查找相关文档。
              • +
              • "search_document":用于嵌入存储在向量数据库中的搜索用例。
              • +
              • "search_query":用于向量数据库搜索查询的嵌入,以查找相关文档。
              • "classification":用于通过文本分类器进行嵌入。
              • "clustering":用于通过聚类算法运行的嵌入。
            • embedding_types (列表[str])。

              您希望返回的嵌入类型。非必填项,默认为 "无",即返回 Embed Floats 响应类型。目前只能为该参数指定一个值。可能的值

                -
              • "float":当您想返回默认的浮点嵌入时,请使用此参数。对所有模型都有效。
              • +
              • "float":当您想返回默认的浮点嵌入时,请使用此参数。对所有模型有效。
              • "binary":当您要返回带符号的二进制嵌入时使用此值。仅对 v3 模型有效。
              • "ubinary":当您要返回无符号二进制嵌入时使用此选项。仅对 v3 模型有效。
            -

            要为文档创建嵌入信息,请使用encode_documents() 方法:

            +

            要为文档创建 Embeddings,请使用encode_documents() 方法:

            docs = [
                 "Artificial intelligence was founded as an academic discipline in 1956.",
                 "Alan Turing was the first person to conduct substantial research in AI.",
            @@ -70,7 +71,6 @@ docs_embeddings = cohere_ef.encode_documents(docs)
             # Print dimension and shape of embeddings
             print("Dim:", cohere_ef.dim, docs_embeddings[0].shape)
             
            -

            预期输出类似于下图:

            Embeddings: [array([ 3.43322754e-02,  1.16252899e-03, -5.25207520e-02,  1.32846832e-03,
                    -6.80541992e-02,  6.10961914e-02, -7.06176758e-02,  1.48925781e-01,
            @@ -91,7 +91,6 @@ query_embeddings = cohere_ef.encode_queries(queries)
             print("Embeddings:", query_embeddings)
             print("Dim", cohere_ef.dim, query_embeddings[0].shape)
             
            -

            预期输出类似于下面的内容:

            Embeddings: [array([-1.33361816e-02,  9.79423523e-04, -7.28759766e-02, -1.93786621e-02,
                    -9.71679688e-02,  4.34875488e-02, -9.81445312e-02,  1.16882324e-01,
            diff --git a/localization/v2.4.x/site/zh/embeddings/embed-with-jina.json b/localization/v2.4.x/site/zh/embeddings/embed-with-jina.json
            index 3ba04b590..8d017c798 100644
            --- a/localization/v2.4.x/site/zh/embeddings/embed-with-jina.json
            +++ b/localization/v2.4.x/site/zh/embeddings/embed-with-jina.json
            @@ -1 +1 @@
            -{"codeList":["pip install --upgrade pymilvus\npip install \"pymilvus[model]\"\n","from pymilvus.model.dense import JinaEmbeddingFunction\n\njina_ef = JinaEmbeddingFunction(\n    model_name=\"jina-embeddings-v2-base-en\", # Defaults to `jina-embeddings-v2-base-en`\n    api_key=JINAAI_API_KEY # Provide your Jina AI API key\n)\n","docs = [\n    \"Artificial intelligence was founded as an academic discipline in 1956.\",\n    \"Alan Turing was the first person to conduct substantial research in AI.\",\n    \"Born in Maida Vale, London, Turing was raised in southern England.\",\n]\n\ndocs_embeddings = jina_ef.encode_documents(docs)\n\n# Print embeddings\nprint(\"Embeddings:\", docs_embeddings)\n# Print dimension and shape of embeddings\nprint(\"Dim:\", jina_ef.dim, docs_embeddings[0].shape)\n","Embeddings: [array([-4.88487840e-01, -4.28095880e-01,  4.90086500e-01, -1.63274320e-01,\n        3.43437800e-01,  3.21476880e-01,  2.83173790e-02, -3.10403670e-01,\n        4.76985040e-01, -1.77410420e-01, -3.84803180e-01, -2.19224200e-01,\n       -2.52898000e-01,  6.62411900e-02, -8.58173100e-01,  1.05221800e+00,\n...\n       -2.04462400e-01,  7.14229800e-01, -1.66823000e-01,  8.72551440e-01,\n        5.53560140e-01,  8.92506300e-01, -2.39408610e-01, -4.22413560e-01,\n       -3.19551350e-01,  5.59153850e-01,  2.44338100e-01, -8.60452100e-01])]\nDim: 768 (768,)\n","queries = [\"When was artificial intelligence founded\", \n           \"Where was Alan Turing born?\"]\n\nquery_embeddings = jina_ef.encode_queries(queries)\n\nprint(\"Embeddings:\", query_embeddings)\nprint(\"Dim\", jina_ef.dim, query_embeddings[0].shape)\n","Embeddings: [array([-5.99164660e-01, -3.49827350e-01,  8.22405160e-01, -1.18632730e-01,\n        5.78107540e-01,  1.09789170e-01,  2.91604200e-01, -3.29306450e-01,\n        2.93779640e-01, -2.17880800e-01, -6.84535440e-01, -3.79752000e-01,\n       -3.47541800e-01,  9.20846100e-02, -6.13804400e-01,  6.31312800e-01,\n...\n       -1.84993740e-02,  9.38629150e-01,  2.74858470e-02,  1.09396360e+00,\n        3.96270750e-01,  7.44445800e-01, -1.95404050e-01, -6.08383200e-01,\n       -3.75076300e-01,  3.87512200e-01,  8.11889650e-01, -3.76407620e-01])]\nDim 768 (768,)\n"],"headingContent":"","anchorList":[{"label":"Jina AI","href":"Jina-AI","type":1,"isActive":false}]}
            \ No newline at end of file
            +{"codeList":["pip install --upgrade pymilvus\npip install \"pymilvus[model]\"\n","from pymilvus.model.dense import JinaEmbeddingFunction\n\njina_ef = JinaEmbeddingFunction(\n    model_name=\"jina-embeddings-v3\", # Defaults to `jina-embeddings-v3`\n    api_key=JINAAI_API_KEY, # Provide your Jina AI API key\n    task=\"retrieval.passage\", # Specify the task\n    dimensions=1024, # Defaults to 1024\n)\n","\n```python\ndocs = [\n    \"Artificial intelligence was founded as an academic discipline in 1956.\",\n    \"Alan Turing was the first person to conduct substantial research in AI.\",\n    \"Born in Maida Vale, London, Turing was raised in southern England.\",\n]\n\ndocs_embeddings = jina_ef.encode_documents(docs)\n\n# Print embeddings\nprint(\"Embeddings:\", docs_embeddings)\n# Print dimension and shape of embeddings\nprint(\"Dim:\", jina_ef.dim, docs_embeddings[0].shape)\n","Embeddings: [array([9.80641991e-02, -8.51697400e-02,  7.36531913e-02,  1.42558888e-02,\n       -2.23589484e-02,  1.68494112e-03, -3.50753777e-02, -3.11530549e-02,\n       -3.26012149e-02,  5.04568312e-03,  3.69836427e-02,  3.48948985e-02,\n        8.19722563e-03,  5.88679723e-02, -6.71099266e-03, -1.82369724e-02,\n...\n        2.48654783e-02,  3.43279652e-02, -1.66154150e-02, -9.90478322e-03,\n       -2.96043139e-03, -8.57473817e-03, -7.39028037e-04,  6.25024503e-03,\n       -1.08831357e-02, -4.00776342e-02,  3.25369164e-02, -1.42691191e-03])]\nDim: 1024 (1024,)\n","queries = [\"When was artificial intelligence founded\", \n           \"Where was Alan Turing born?\"]\n\nquery_embeddings = jina_ef.encode_queries(queries)\n\nprint(\"Embeddings:\", query_embeddings)\nprint(\"Dim\", jina_ef.dim, query_embeddings[0].shape)\n","Embeddings: [array([8.79201014e-03,  1.47551354e-02,  4.02722731e-02, -2.52991207e-02,\n        1.12719582e-02,  3.75947170e-02,  3.97946090e-02, -7.36681819e-02,\n       -2.17952449e-02, -1.16298944e-02, -6.83426252e-03, -5.12507409e-02,\n        5.26071340e-02,  6.75181448e-02,  3.92445624e-02, -1.40817231e-02,\n...\n        8.81703943e-03,  4.24629413e-02, -2.32944116e-02, -2.05193572e-02,\n       -3.22035812e-02,  2.81896023e-03,  3.85326855e-02,  3.64372656e-02,\n       -1.65050142e-02, -4.26847413e-02,  2.02664156e-02, -1.72684863e-02])]\nDim 1024 (1024,)\n","from pymilvus.model.dense import JinaEmbeddingFunction\n\njina_ef = JinaEmbeddingFunction(\n    model_name=\"jina-embeddings-v3\", # Defaults to `jina-embeddings-v3`\n    api_key=JINA_API_KEY, # Provide your Jina AI API key\n    task=\"text-matching\",\n    dimensions=1024, # Defaults to 1024\n)\n\ntexts = [\n    \"Follow the white rabbit.\",  # English\n    \"Sigue al conejo blanco.\",  # Spanish\n    \"Suis le lapin blanc.\",  # French\n    \"跟着白兔走。\",  # Chinese\n    \"اتبع الأرنب الأبيض.\",  # Arabic\n    \"Folge dem weißen Kaninchen.\",  # German\n]\n\nembeddings = jina_ef(texts)\n\n# Compute similarities\nprint(embeddings[0] @ embeddings[1].T)\n"],"headingContent":"Jina AI","anchorList":[{"label":"吉纳人工智能","href":"Jina-AI","type":1,"isActive":false}]}
            \ No newline at end of file
            diff --git a/localization/v2.4.x/site/zh/embeddings/embed-with-jina.md b/localization/v2.4.x/site/zh/embeddings/embed-with-jina.md
            index f65f2e41d..605774a30 100644
            --- a/localization/v2.4.x/site/zh/embeddings/embed-with-jina.md
            +++ b/localization/v2.4.x/site/zh/embeddings/embed-with-jina.md
            @@ -1,7 +1,7 @@
             ---
             id: embed-with-jina.md
             order: 8
            -summary: 本文介绍了如何使用 JinaEmbeddingFunction 来使用 Jina AI 嵌入模型对文档和查询进行编码。
            +summary: 本文介绍如何使用 JinaEmbeddingFunction 使用 Jina AI 嵌入模型对文档和查询进行编码。
             title: Jina AI - 嵌入
             ---
             

            吉纳人工智能

            Jina AI 的嵌入模型是高性能的文本嵌入模型,可以将文本输入转化为数字表示,捕捉文本的语义。这些模型在密集检索、语义文本相似性和多语言理解等应用中表现出色。

            -

            Milvus 通过JinaEmbeddingFunction 类与 Jina AI 的嵌入模型集成。该类提供了使用 Jina AI 嵌入模型对文档和查询进行编码的方法,并将嵌入作为与 Milvus 索引兼容的密集向量返回。要使用此功能,请从Jina AI 获取 API 密钥。

            +

            Milvus 通过JinaEmbeddingFunction 类与 Jina AI 的 Embeddings 模型集成。该类提供了使用 Jina AI 嵌入模型对文档和查询进行编码的方法,并将嵌入作为与 Milvus 索引兼容的密集向量返回。要使用此功能,请从Jina AI 获取 API 密钥。

            要使用此功能,请安装必要的依赖项:

            pip install --upgrade pymilvus
             pip install "pymilvus[model]"
            @@ -29,19 +29,36 @@ pip install "pymilvus[model]"
             
            from pymilvus.model.dense import JinaEmbeddingFunction
             
             jina_ef = JinaEmbeddingFunction(
            -    model_name="jina-embeddings-v2-base-en", # Defaults to `jina-embeddings-v2-base-en`
            -    api_key=JINAAI_API_KEY # Provide your Jina AI API key
            +    model_name="jina-embeddings-v3", # Defaults to `jina-embeddings-v3`
            +    api_key=JINAAI_API_KEY, # Provide your Jina AI API key
            +    task="retrieval.passage", # Specify the task
            +    dimensions=1024, # Defaults to 1024
             )
             

            参数

            • model_name (字符串)

              -

              用于编码的 Jina AI 嵌入模型名称。您可以指定任何可用的 Jina AI 嵌入模型名称,例如jina-embeddings-v2-base-en,jina-embeddings-v2-small-en 等。如果不指定此参数,则将使用jina-embeddings-v2-base-en 。有关可用模型的列表,请参阅Jina Embeddings

            • +

              用于编码的 Jina AI 嵌入模型名称。您可以指定任何可用的 Jina AI 嵌入模型名称,例如jina-embeddings-v3,jina-embeddings-v2-base-en 等。如果不指定此参数,则将使用jina-embeddings-v3 。有关可用模型的列表,请参阅Jina Embeddings

            • api_key (字符串)

              访问 Jina AI API 的 API 密钥。

            • +
            • task (字符串)

              +

              传递给模型的输入类型。为嵌入模型 v3 及更高版本所必需。

              +
                +
              • "retrieval.passage":用于在索引时对检索任务中的大型文档进行编码。
              • +
              • "retrieval.query":用于在检索任务中对用户查询或问题进行编码。
              • +
              • "classification":用于对文本分类任务中的文本进行编码。
              • +
              • "text-matching":用于对相似性匹配的文本进行编码,例如测量两个句子之间的相似性。
              • +
              • "clustering":用于聚类或 Rerankers 任务。
              • +
            • +
            • dimensions 维数

              +

              输出嵌入结果的维数。默认为 1024。仅支持嵌入模型 v3 及更高版本。

            • +
            • late_chunking (二进制)

              +

              此参数控制是否使用Jina AI 上个月推出的新分块方法对一批句子进行编码。默认设置为False 。当设置为True 时,Jina AI API 会将输入字段中的所有句子串联起来,并作为单个字符串送入模型。在内部,模型会嵌入这个长串联字符串,然后执行后期分块,返回一个与输入列表大小相匹配的嵌入列表。

            -

            要为文档创建嵌入,请使用encode_documents() 方法:

            -
            docs = [
            +

            要创建文档嵌入,请使用encode_documents() 方法。该方法专为非对称检索任务中的文档嵌入而设计,例如为搜索或推荐任务编制文档索引。该方法使用retrieval.passage 作为任务。

            +
            
            +```python
            +docs = [
                 "Artificial intelligence was founded as an academic discipline in 1956.",
                 "Alan Turing was the first person to conduct substantial research in AI.",
                 "Born in Maida Vale, London, Turing was raised in southern England.",
            @@ -54,18 +71,18 @@ docs_embeddings = jina_ef.encode_documents(docs)
             # Print dimension and shape of embeddings
             print("Dim:", jina_ef.dim, docs_embeddings[0].shape)
             
            -

            预期输出类似于下图:

            -
            Embeddings: [array([-4.88487840e-01, -4.28095880e-01,  4.90086500e-01, -1.63274320e-01,
            -        3.43437800e-01,  3.21476880e-01,  2.83173790e-02, -3.10403670e-01,
            -        4.76985040e-01, -1.77410420e-01, -3.84803180e-01, -2.19224200e-01,
            -       -2.52898000e-01,  6.62411900e-02, -8.58173100e-01,  1.05221800e+00,
            +

            预期输出结果类似于下图:

            +
            Embeddings: [array([9.80641991e-02, -8.51697400e-02,  7.36531913e-02,  1.42558888e-02,
            +       -2.23589484e-02,  1.68494112e-03, -3.50753777e-02, -3.11530549e-02,
            +       -3.26012149e-02,  5.04568312e-03,  3.69836427e-02,  3.48948985e-02,
            +        8.19722563e-03,  5.88679723e-02, -6.71099266e-03, -1.82369724e-02,
             ...
            -       -2.04462400e-01,  7.14229800e-01, -1.66823000e-01,  8.72551440e-01,
            -        5.53560140e-01,  8.92506300e-01, -2.39408610e-01, -4.22413560e-01,
            -       -3.19551350e-01,  5.59153850e-01,  2.44338100e-01, -8.60452100e-01])]
            -Dim: 768 (768,)
            +        2.48654783e-02,  3.43279652e-02, -1.66154150e-02, -9.90478322e-03,
            +       -2.96043139e-03, -8.57473817e-03, -7.39028037e-04,  6.25024503e-03,
            +       -1.08831357e-02, -4.00776342e-02,  3.25369164e-02, -1.42691191e-03])]
            +Dim: 1024 (1024,)
             
            -

            要为查询创建嵌入式信息,请使用encode_queries() 方法:

            +

            要创建查询嵌入,请使用encode_queries() 方法。这种方法是为非对称检索任务(如搜索查询或问题)中的查询嵌入而设计的。该方法使用retrieval.query 作为任务。

            queries = ["When was artificial intelligence founded", 
                        "Where was Alan Turing born?"]
             
            @@ -75,13 +92,37 @@ query_embeddings = jina_ef.encode_queries(queries)
             print("Dim", jina_ef.dim, query_embeddings[0].shape)
             

            预期输出类似于下面的内容:

            -
            Embeddings: [array([-5.99164660e-01, -3.49827350e-01,  8.22405160e-01, -1.18632730e-01,
            -        5.78107540e-01,  1.09789170e-01,  2.91604200e-01, -3.29306450e-01,
            -        2.93779640e-01, -2.17880800e-01, -6.84535440e-01, -3.79752000e-01,
            -       -3.47541800e-01,  9.20846100e-02, -6.13804400e-01,  6.31312800e-01,
            +
            Embeddings: [array([8.79201014e-03,  1.47551354e-02,  4.02722731e-02, -2.52991207e-02,
            +        1.12719582e-02,  3.75947170e-02,  3.97946090e-02, -7.36681819e-02,
            +       -2.17952449e-02, -1.16298944e-02, -6.83426252e-03, -5.12507409e-02,
            +        5.26071340e-02,  6.75181448e-02,  3.92445624e-02, -1.40817231e-02,
             ...
            -       -1.84993740e-02,  9.38629150e-01,  2.74858470e-02,  1.09396360e+00,
            -        3.96270750e-01,  7.44445800e-01, -1.95404050e-01, -6.08383200e-01,
            -       -3.75076300e-01,  3.87512200e-01,  8.11889650e-01, -3.76407620e-01])]
            -Dim 768 (768,)
            +        8.81703943e-03,  4.24629413e-02, -2.32944116e-02, -2.05193572e-02,
            +       -3.22035812e-02,  2.81896023e-03,  3.85326855e-02,  3.64372656e-02,
            +       -1.65050142e-02, -4.26847413e-02,  2.02664156e-02, -1.72684863e-02])]
            +Dim 1024 (1024,)
            +
            +

            要为相似性匹配(如 STS 或对称检索任务)、文本分类、聚类或重排任务创建输入嵌入,请在实例化JinaEmbeddingFunction 类时使用适当的task 参数值。

            +
            from pymilvus.model.dense import JinaEmbeddingFunction
            +
            +jina_ef = JinaEmbeddingFunction(
            +    model_name="jina-embeddings-v3", # Defaults to `jina-embeddings-v3`
            +    api_key=JINA_API_KEY, # Provide your Jina AI API key
            +    task="text-matching",
            +    dimensions=1024, # Defaults to 1024
            +)
            +
            +texts = [
            +    "Follow the white rabbit.",  # English
            +    "Sigue al conejo blanco.",  # Spanish
            +    "Suis le lapin blanc.",  # French
            +    "跟着白兔走。",  # Chinese
            +    "اتبع الأرنب الأبيض.",  # Arabic
            +    "Folge dem weißen Kaninchen.",  # German
            +]
            +
            +embeddings = jina_ef(texts)
            +
            +# Compute similarities
            +print(embeddings[0] @ embeddings[1].T)
             
            diff --git a/localization/v2.4.x/site/zh/embeddings/embed-with-voyage.json b/localization/v2.4.x/site/zh/embeddings/embed-with-voyage.json index 41324ffb6..ba7547304 100644 --- a/localization/v2.4.x/site/zh/embeddings/embed-with-voyage.json +++ b/localization/v2.4.x/site/zh/embeddings/embed-with-voyage.json @@ -1 +1 @@ -{"codeList":["pip install --upgrade pymilvus\npip install \"pymilvus[model]\"\n","from pymilvus.model.dense import VoyageEmbeddingFunction\n\nvoyage_ef = VoyageEmbeddingFunction(\n model_name=\"voyage-lite-02-instruct\", # Defaults to `voyage-2`\n api_key=VOYAGE_API_KEY # Provide your Voyage API key\n)\n","docs = [\n \"Artificial intelligence was founded as an academic discipline in 1956.\",\n \"Alan Turing was the first person to conduct substantial research in AI.\",\n \"Born in Maida Vale, London, Turing was raised in southern England.\",\n]\n\ndocs_embeddings = voyage_ef.encode_documents(docs)\n\n# Print embeddings\nprint(\"Embeddings:\", docs_embeddings)\n# Print dimension and shape of embeddings\nprint(\"Dim:\", voyage_ef.dim, docs_embeddings[0].shape)\n","Embeddings: [array([ 0.02582654, -0.00907086, -0.04604037, ..., -0.01227521,\n 0.04420955, -0.00038829]), array([ 0.03844212, -0.01597065, -0.03728884, ..., -0.02118733,\n 0.03349845, 0.0065346 ]), array([ 0.05143557, -0.01096631, -0.02690451, ..., -0.02416254,\n 0.07658645, 0.03064499])]\nDim: 1024 (1024,)\n","queries = [\"When was artificial intelligence founded\", \n \"Where was Alan Turing born?\"]\n\nquery_embeddings = voyage_ef.encode_queries(queries)\n\nprint(\"Embeddings:\", query_embeddings)\nprint(\"Dim\", voyage_ef.dim, query_embeddings[0].shape)\n","Embeddings: [array([ 0.01733501, -0.0230672 , -0.05208827, ..., -0.00957995,\n 0.04493361, 0.01485138]), array([ 0.05937521, -0.00729363, -0.02184347, ..., -0.02107683,\n 0.05706626, 0.0263358 ])]\nDim 1024 (1024,)\n"],"headingContent":"","anchorList":[{"label":"航行","href":"Voyage","type":1,"isActive":false}]} \ No newline at end of file +{"codeList":["pip install --upgrade pymilvus\npip install \"pymilvus[model]\"\n","from pymilvus.model.dense import VoyageEmbeddingFunction\n\nvoyage_ef = VoyageEmbeddingFunction(\n model_name=\"voyage-3\", # Defaults to `voyage-3`\n api_key=VOYAGE_API_KEY # Provide your Voyage API key\n)\n","docs = [\n \"Artificial intelligence was founded as an academic discipline in 1956.\",\n \"Alan Turing was the first person to conduct substantial research in AI.\",\n \"Born in Maida Vale, London, Turing was raised in southern England.\",\n]\n\ndocs_embeddings = voyage_ef.encode_documents(docs)\n\n# Print embeddings\nprint(\"Embeddings:\", docs_embeddings)\n# Print dimension and shape of embeddings\nprint(\"Dim:\", voyage_ef.dim, docs_embeddings[0].shape)\n","Embeddings: [array([ 0.02582654, -0.00907086, -0.04604037, ..., -0.01227521,\n 0.04420955, -0.00038829]), array([ 0.03844212, -0.01597065, -0.03728884, ..., -0.02118733,\n 0.03349845, 0.0065346 ]), array([ 0.05143557, -0.01096631, -0.02690451, ..., -0.02416254,\n 0.07658645, 0.03064499])]\nDim: 1024 (1024,)\n","queries = [\"When was artificial intelligence founded\", \n \"Where was Alan Turing born?\"]\n\nquery_embeddings = voyage_ef.encode_queries(queries)\n\nprint(\"Embeddings:\", query_embeddings)\nprint(\"Dim\", voyage_ef.dim, query_embeddings[0].shape)\n","Embeddings: [array([ 0.01733501, -0.0230672 , -0.05208827, ..., -0.00957995,\n 0.04493361, 0.01485138]), array([ 0.05937521, -0.00729363, -0.02184347, ..., -0.02107683,\n 0.05706626, 0.0263358 ])]\nDim 1024 (1024,)\n"],"headingContent":"Voyage","anchorList":[{"label":"Voyage","href":"Voyage","type":1,"isActive":false}]} \ No newline at end of file diff --git a/localization/v2.4.x/site/zh/embeddings/embed-with-voyage.md b/localization/v2.4.x/site/zh/embeddings/embed-with-voyage.md index 90faece79..9ccf863d1 100644 --- a/localization/v2.4.x/site/zh/embeddings/embed-with-voyage.md +++ b/localization/v2.4.x/site/zh/embeddings/embed-with-voyage.md @@ -2,7 +2,7 @@ id: embed-with-voyage.md order: 7 summary: 本文介绍如何使用 VoyageEmbeddingFunction 使用 Voyage 模型对文档和查询进行编码。 -title: 嵌入航程 +title: 嵌入 Voyage ---

            Voyage

            Milvus 通过 VoyageEmbeddingFunction 类与 Voyage 模型集成。该类提供了使用 Voyage 模型对文档和查询进行编码的方法,并将嵌入返回为与 Milvus 索引兼容的密集向量。要使用该功能,请通过在Voyage平台上创建账户从Voyage获取 API 密钥。

            +

            Milvus 通过 VoyageEmbeddingFunction 类与 Voyage 的模型集成。该类提供了使用 Voyage 模型对文档和查询进行编码的方法,并将嵌入返回为与 Milvus 索引兼容的密集向量。要使用该功能,请通过在Voyage平台上创建账户从Voyage获取 API 密钥。

            要使用该功能,请安装必要的依赖项:

            pip install --upgrade pymilvus
             pip install "pymilvus[model]"
            @@ -28,16 +28,16 @@ pip install "pymilvus[model]"
             
            from pymilvus.model.dense import VoyageEmbeddingFunction
             
             voyage_ef = VoyageEmbeddingFunction(
            -    model_name="voyage-lite-02-instruct", # Defaults to `voyage-2`
            +    model_name="voyage-3", # Defaults to `voyage-3`
                 api_key=VOYAGE_API_KEY # Provide your Voyage API key
             )
             

            参数

              -
            • model_name (字符串)用于编码的 Voyage 模型名称。可以指定任何可用的 Voyage 模型名称,例如 , 等。如果不指定此参数,则将使用 。有关可用模型的列表,请参阅voyage-law-2 voyage-code-2 voyage-2 Voyage 官方文档
            • -
            • api_key (字符串)访问 Voyage API 的 API 密钥。有关如何创建 API 密钥信息,请参阅API 密钥和 Python 客户端
            • +
            • model_name (字符串)用于编码的 Voyage 模型名称。可以指定任何可用的 Voyage 模型名称,例如 , 等。如果不指定此参数,则将使用 。有关可用模型的列表,请参阅voyage-3-lite voyage-finance-2 voyage-3 Voyage 官方文档
            • +
            • api_key (字符串)访问 Voyage API 的 API 密钥。有关如何创建 API 密钥的信息,请参阅API 密钥和 Python 客户端
            -

            要为文档创建嵌入,请使用encode_documents() 方法:

            +

            要为文档创建 Embeddings,请使用encode_documents() 方法:

            docs = [
                 "Artificial intelligence was founded as an academic discipline in 1956.",
                 "Alan Turing was the first person to conduct substantial research in AI.",
            @@ -58,7 +58,7 @@ docs_embeddings = voyage_ef.encode_documents(docs)
                     0.07658645,  0.03064499])]
             Dim: 1024 (1024,)
             
            -

            要为查询创建嵌入式内容,请使用encode_queries() 方法:

            +

            要为查询创建 Embeddings,请使用encode_queries() 方法:

            queries = ["When was artificial intelligence founded", 
                        "Where was Alan Turing born?"]
             
            diff --git a/localization/v2.4.x/site/zh/embeddings/embeddings.json b/localization/v2.4.x/site/zh/embeddings/embeddings.json
            index e44bcd704..d69427320 100644
            --- a/localization/v2.4.x/site/zh/embeddings/embeddings.json
            +++ b/localization/v2.4.x/site/zh/embeddings/embeddings.json
            @@ -1 +1 @@
            -{"codeList":["pip install \"pymilvus[model]\"\n","from pymilvus import model\n\n# This will download \"all-MiniLM-L6-v2\", a light weight model.\nef = model.DefaultEmbeddingFunction()\n\n# Data from which embeddings are to be generated \ndocs = [\n    \"Artificial intelligence was founded as an academic discipline in 1956.\",\n    \"Alan Turing was the first person to conduct substantial research in AI.\",\n    \"Born in Maida Vale, London, Turing was raised in southern England.\",\n]\n\nembeddings = ef.encode_documents(docs)\n\n# Print embeddings\nprint(\"Embeddings:\", embeddings)\n# Print dimension and shape of embeddings\nprint(\"Dim:\", ef.dim, embeddings[0].shape)\n","Embeddings: [array([-3.09392996e-02, -1.80662833e-02,  1.34775648e-02,  2.77156215e-02,\n       -4.86349640e-03, -3.12581174e-02, -3.55921760e-02,  5.76934684e-03,\n        2.80773244e-03,  1.35783911e-01,  3.59678417e-02,  6.17732145e-02,\n...\n       -4.61330153e-02, -4.85207550e-02,  3.13997865e-02,  7.82178566e-02,\n       -4.75336798e-02,  5.21207601e-02,  9.04406682e-02, -5.36676683e-02],\n      dtype=float32)]\nDim: 384 (384,)\n","from pymilvus.model.hybrid import BGEM3EmbeddingFunction\nfrom pymilvus import (\n    utility,\n    FieldSchema, CollectionSchema, DataType,\n    Collection, AnnSearchRequest, RRFRanker, connections,\n)\n","# 1. prepare a small corpus to search\ndocs = [\n    \"Artificial intelligence was founded as an academic discipline in 1956.\",\n    \"Alan Turing was the first person to conduct substantial research in AI.\",\n    \"Born in Maida Vale, London, Turing was raised in southern England.\",\n]\nquery = \"Who started AI research?\"\n\n# BGE-M3 model can embed texts as dense and sparse vectors.\n# It is included in the optional `model` module in pymilvus, to install it,\n# simply run \"pip install pymilvus[model]\".\n\nbge_m3_ef = BGEM3EmbeddingFunction(use_fp16=False, device=\"cpu\")\n\ndocs_embeddings = bge_m3_ef(docs)\nquery_embeddings = bge_m3_ef([query])\n","from pymilvus.model.sparse import BM25EmbeddingFunction\n","# 1. prepare a small corpus to search\ndocs = [\n    \"Artificial intelligence was founded as an academic discipline in 1956.\",\n    \"Alan Turing was the first person to conduct substantial research in AI.\",\n    \"Born in Maida Vale, London, Turing was raised in southern England.\",\n]\nquery = \"Where was Turing born?\"\nbm25_ef = BM25EmbeddingFunction()\n\n# 2. fit the corpus to get BM25 model parameters on your documents.\nbm25_ef.fit(docs)\n\n# 3. store the fitted parameters to disk to expedite future processing.\nbm25_ef.save(\"bm25_params.json\")\n\n# 4. load the saved params\nnew_bm25_ef = BM25EmbeddingFunction()\nnew_bm25_ef.load(\"bm25_params.json\")\n\ndocs_embeddings = new_bm25_ef.encode_documents(docs)\nquery_embeddings = new_bm25_ef.encode_queries([query])\nprint(\"Dim:\", new_bm25_ef.dim, list(docs_embeddings)[0].shape)\n","Dim: 21 (1, 21)\n"],"headingContent":"","anchorList":[{"label":"嵌入概述","href":"Embedding-Overview","type":1,"isActive":false},{"label":"例 1:使用默认嵌入函数生成密集向量","href":"Example-1-Use-default-embedding-function-to-generate-dense-vectors","type":2,"isActive":false},{"label":"例 2:使用 BGE M3 模型一次调用生成密集向量和稀疏向量","href":"Example-2-Generate-dense-and-sparse-vectors-in-one-call-with-BGE-M3-model","type":2,"isActive":false},{"label":"例 3:使用 BM25 模型生成稀疏向量","href":"Example-3-Generate--sparse-vectors-using-BM25-model","type":2,"isActive":false}]}
            \ No newline at end of file
            +{"codeList":["pip install \"pymilvus[model]\"\n","from pymilvus import model\n\n# This will download \"all-MiniLM-L6-v2\", a light weight model.\nef = model.DefaultEmbeddingFunction()\n\n# Data from which embeddings are to be generated \ndocs = [\n    \"Artificial intelligence was founded as an academic discipline in 1956.\",\n    \"Alan Turing was the first person to conduct substantial research in AI.\",\n    \"Born in Maida Vale, London, Turing was raised in southern England.\",\n]\n\nembeddings = ef.encode_documents(docs)\n\n# Print embeddings\nprint(\"Embeddings:\", embeddings)\n# Print dimension and shape of embeddings\nprint(\"Dim:\", ef.dim, embeddings[0].shape)\n","Embeddings: [array([-3.09392996e-02, -1.80662833e-02,  1.34775648e-02,  2.77156215e-02,\n       -4.86349640e-03, -3.12581174e-02, -3.55921760e-02,  5.76934684e-03,\n        2.80773244e-03,  1.35783911e-01,  3.59678417e-02,  6.17732145e-02,\n...\n       -4.61330153e-02, -4.85207550e-02,  3.13997865e-02,  7.82178566e-02,\n       -4.75336798e-02,  5.21207601e-02,  9.04406682e-02, -5.36676683e-02],\n      dtype=float32)]\nDim: 384 (384,)\n","from pymilvus.model.hybrid import BGEM3EmbeddingFunction\nfrom pymilvus import (\n    utility,\n    FieldSchema, CollectionSchema, DataType,\n    Collection, AnnSearchRequest, RRFRanker, connections,\n)\n","# 1. prepare a small corpus to search\ndocs = [\n    \"Artificial intelligence was founded as an academic discipline in 1956.\",\n    \"Alan Turing was the first person to conduct substantial research in AI.\",\n    \"Born in Maida Vale, London, Turing was raised in southern England.\",\n]\nquery = \"Who started AI research?\"\n\n# BGE-M3 model can embed texts as dense and sparse vectors.\n# It is included in the optional `model` module in pymilvus, to install it,\n# simply run \"pip install pymilvus[model]\".\n\nbge_m3_ef = BGEM3EmbeddingFunction(use_fp16=False, device=\"cpu\")\n\ndocs_embeddings = bge_m3_ef(docs)\nquery_embeddings = bge_m3_ef([query])\n","from pymilvus.model.sparse import BM25EmbeddingFunction\n","# 1. prepare a small corpus to search\ndocs = [\n    \"Artificial intelligence was founded as an academic discipline in 1956.\",\n    \"Alan Turing was the first person to conduct substantial research in AI.\",\n    \"Born in Maida Vale, London, Turing was raised in southern England.\",\n]\nquery = \"Where was Turing born?\"\nbm25_ef = BM25EmbeddingFunction()\n\n# 2. fit the corpus to get BM25 model parameters on your documents.\nbm25_ef.fit(docs)\n\n# 3. store the fitted parameters to disk to expedite future processing.\nbm25_ef.save(\"bm25_params.json\")\n\n# 4. load the saved params\nnew_bm25_ef = BM25EmbeddingFunction()\nnew_bm25_ef.load(\"bm25_params.json\")\n\ndocs_embeddings = new_bm25_ef.encode_documents(docs)\nquery_embeddings = new_bm25_ef.encode_queries([query])\nprint(\"Dim:\", new_bm25_ef.dim, list(docs_embeddings)[0].shape)\n","Dim: 21 (1, 21)\n"],"headingContent":"Embedding Overview","anchorList":[{"label":"嵌入概述","href":"Embedding-Overview","type":1,"isActive":false},{"label":"例 1:使用默认嵌入函数生成密集向量","href":"Example-1-Use-default-embedding-function-to-generate-dense-vectors","type":2,"isActive":false},{"label":"例 2:使用 BGE M3 模型一次调用生成密集向量和稀疏向量","href":"Example-2-Generate-dense-and-sparse-vectors-in-one-call-with-BGE-M3-model","type":2,"isActive":false},{"label":"示例 3:使用 BM25 模型生成稀疏向量","href":"Example-3-Generate--sparse-vectors-using-BM25-model","type":2,"isActive":false}]}
            \ No newline at end of file
            diff --git a/localization/v2.4.x/site/zh/embeddings/embeddings.md b/localization/v2.4.x/site/zh/embeddings/embeddings.md
            index 1cedeb675..68f682aa5 100644
            --- a/localization/v2.4.x/site/zh/embeddings/embeddings.md
            +++ b/localization/v2.4.x/site/zh/embeddings/embeddings.md
            @@ -1,10 +1,9 @@
             ---
             id: embeddings.md
             order: 1
            -summary: 了解如何为数据生成嵌入。
            +summary: 了解如何为数据生成 Embeddings。
             title: 嵌入概述
             ---
            -
             

            嵌入概述

            嵌入是一种机器学习概念,用于将数据映射到高维空间,将语义相似的数据放在一起。嵌入模型通常是 BERT 或其他 Transformer 系列中的深度神经网络,可以用一系列称为向量的数字有效地表示文本、图像和其他数据类型的语义。这些模型的一个主要特点是,高维空间中向量之间的数学距离可以表示原始文本或图像语义的相似性。这一特性开启了许多信息检索应用,如谷歌和必应等网络搜索引擎、电子商务网站上的产品搜索和推荐,以及最近流行的生成式人工智能中的检索增强生成(RAG)范式。

            -

            嵌入有两大类,每一类产生不同类型的向量:

            +

            Embeddings 是一种机器学习概念,用于将数据映射到高维空间,将语义相似的数据放在一起。嵌入模型通常是 BERT 或其他 Transformer 系列中的深度神经网络,可以用一系列称为向量的数字有效地表示文本、图像和其他数据类型的语义。这些模型的一个主要特点是,向量之间在高维空间中的数学距离可以表示原始文本或图像语义的相似性。这一特性开启了许多信息检索应用,如谷歌和必应等网络搜索引擎、电子商务网站上的产品搜索和推荐,以及最近流行的生成式人工智能中的检索增强生成(RAG)范式。

            +

            嵌入有两大类,每一类都能产生不同类型的向量:

              -
            • 密集嵌入:大多数嵌入模型将信息表示为数百到数千维的浮点向量。由于大多数维度的值都不为零,因此输出结果被称为 "密集 "向量。例如,流行的开源嵌入模型 BAAI/bge-base-en-v1.5 输出的向量包含 768 个浮点数(768 维浮点向量)。

            • -
            • 稀疏嵌入:相比之下,稀疏嵌入的输出向量大部分维数为零,即 "稀疏 "向量。这些向量通常具有更高的维度(数万或更多),这是由标记词汇量的大小决定的。稀疏向量可以通过深度神经网络或文本语料库统计分析生成。由于稀疏嵌入向量具有可解释性和更好的域外泛化能力,开发人员越来越多地采用稀疏嵌入向量作为高密度嵌入向量的补充。

            • +
            • 密集嵌入:大多数嵌入模型将信息表示为数百到数千维的浮点向量。由于大多数维度的值都不为零,因此输出的向量被称为 "密集 "向量。例如,流行的开源嵌入模型 BAAI/bge-base-en-v1.5 输出的向量为 768 个浮点数(768 维浮点向量)。

            • +
            • 稀疏嵌入:相比之下,稀疏嵌入的输出向量大部分维数为零,即 "稀疏 "向量。这些向量通常具有更高的维度(数万或更多),这是由标记词汇量的大小决定的。稀疏向量可由深度神经网络或文本语料库统计分析生成。由于稀疏嵌入向量具有可解释性和更好的域外泛化能力,越来越多的开发人员采用稀疏嵌入向量作为高密度嵌入向量的补充。

            -

            Milvus 是一个向量数据库,专为向量数据管理、存储和检索而设计。通过整合主流的嵌入和重新排序模型,您可以轻松地将原始文本转换为可搜索的向量,或使用强大的模型对结果进行重新排序,从而获得更准确的 RAG 结果。这种集成简化了文本转换,无需额外的嵌入或重排组件,从而简化了 RAG 的开发和验证。

            -

            要在实际中创建嵌入,请参阅使用 PyMilvus 的模型生成文本嵌入

            +

            Milvus 是一个向量数据库,专为向量数据管理、存储和检索而设计。通过整合主流的嵌入和重排模型,您可以轻松地将原始文本转换为可搜索的向量,或使用强大的模型对结果进行重排,从而获得更准确的 Rerankers 结果。这种集成简化了文本转换,无需额外的嵌入或重排组件,从而简化了 RAG 的开发和验证。

            +

            要在实际操作中创建嵌入,请参阅使用 PyMilvus 的模型生成文本嵌入

            @@ -36,11 +35,15 @@ title: 嵌入概述 - + - + + + + +
            嵌入函数类型API 或开源
            openai密集API
            句子转换器密集开源
            bm25稀疏开源
            Splade稀疏开源
            SPLADE稀疏开源
            bge-m3混合开源
            航程密集型应用程序接口
            jina密集API
            cohere密集密集向量
            cohere密集API
            指导员密集开源
            Mistral AI密集应用程序接口
            Nomic密集API
            mGTE混合型开源

            例 1:使用默认嵌入函数生成密集向量

            要在 Milvus 中使用嵌入函数,首先要安装 PyMilvus 客户端库和model 子包,该子包封装了嵌入生成的所有实用程序。

            pip install "pymilvus[model]"
             
            -

            model 子包支持各种嵌入模型,从OpenAISentence TransformersBGE M3BM25SPLADE预训练模型。为简便起见,本示例使用的DefaultEmbeddingFunction全 MiniLM-L6-v2句子转换器模型,该模型约 70MB,首次使用时会下载:

            +

            model 子包支持各种嵌入模型,从OpenAISentence TransformersBGE M3BM25SPLADE预训练模型。为简化起见,本例使用的DefaultEmbeddingFunction全-MiniLM-L6-v2句子转换器模型,该模型约 70MB,首次使用时会下载:

            from pymilvus import model
             
             # This will download "all-MiniLM-L6-v2", a light weight model.
            @@ -69,9 +72,9 @@ ef = model.DefaultEmbeddingFunction()
             
             # Data from which embeddings are to be generated 
             docs = [
            -"Artificial intelligence was founded as an academic discipline in 1956.",
            -"Alan Turing was the first person to conduct substantial research in AI.",
            -"Born in Maida Vale, London, Turing was raised in southern England.",
            +    "Artificial intelligence was founded as an academic discipline in 1956.",
            +    "Alan Turing was the first person to conduct substantial research in AI.",
            +    "Born in Maida Vale, London, Turing was raised in southern England.",
             ]
             
             embeddings = ef.encode_documents(docs)
            @@ -81,7 +84,6 @@ embeddings = ef.encode_documents(docs)
             # Print dimension and shape of embeddings
             print("Dim:", ef.dim, embeddings[0].shape)
             
            -

            预期输出类似于下面的内容:

            Embeddings: [array([-3.09392996e-02, -1.80662833e-02,  1.34775648e-02,  2.77156215e-02,
                    -4.86349640e-03, -3.12581174e-02, -3.55921760e-02,  5.76934684e-03,
            @@ -107,10 +109,10 @@ Dim: 384 (384,
                       d="M4 9h1v1H4c-1.5 0-3-1.69-3-3.5S2.55 3 4 3h4c1.45 0 3 1.69 3 3.5 0 1.41-.91 2.72-2 3.25V8.59c.58-.45 1-1.27 1-2.09C10 5.22 8.98 4 8 4H4c-.98 0-2 1.22-2 2.5S3 9 4 9zm9-3h-1v1h1c1 0 2 1.22 2 2.5S13.98 12 13 12H9c-.98 0-2-1.22-2-2.5 0-.83.42-1.64 1-2.09V6.25c-1.09.53-2 1.84-2 3.25C6 11.31 7.55 13 9 13h4c1.45 0 3-1.69 3-3.5S14.5 6 13 6z"
                     >
                   
            -    

            在本例中,我们使用BGE M3混合模型将文本嵌入密集向量和稀疏向量,并使用它们检索相关文档。总体步骤如下:

            +

            在本例中,我们使用BGE M3混合模型将文本嵌入密集向量和稀疏向量,并用它们检索相关文档。总体步骤如下:

            1. 使用 BGE-M3 模型将文本嵌入为密集向量和稀疏向量;

            2. -
            3. 建立一个 Milvus 集合来存储密集向量和稀疏向量;

            4. +
            5. 建立一个 Milvus Collections 来存储密集向量和稀疏向量;

            6. 将数据插入 Milvus;

            7. 搜索并检查结果。

            @@ -122,7 +124,7 @@ Dim: 384 (384, Collection, AnnSearchRequest, RRFRanker, connections, )
            -

            使用 BGE M3 对文档和查询进行编码,以便嵌入检索。

            +

            使用 BGE M3 对文档和查询进行编码,以便进行 Embeddings 检索。

            # 1. prepare a small corpus to search
             docs = [
                 "Artificial intelligence was founded as an academic discipline in 1956.",
            @@ -140,8 +142,7 @@ bge_m3_ef = BGEM3EmbeddingFunction(use_fp16=False
            - -

            例 3:使用 BM25 模型生成稀疏向量

            -

            预期的输出结果类似于下图:

            Dim: 21 (1, 21)
             
            diff --git a/localization/v2.4.x/site/zh/faq/operational_faq.json b/localization/v2.4.x/site/zh/faq/operational_faq.json index 7395103b8..b5e46f62f 100644 --- a/localization/v2.4.x/site/zh/faq/operational_faq.json +++ b/localization/v2.4.x/site/zh/faq/operational_faq.json @@ -1 +1 @@ -{"codeList":["{\n \"registry-mirrors\": [\"https://registry.docker-cn.com\"]\n}\n","$ lscpu | grep -e sse4_2 -e avx -e avx2 -e avx512\n","pip install pymilvus>=2.4.2\n"],"headingContent":"","anchorList":[{"label":"操作常见问题","href":"Operational-FAQ","type":1,"isActive":false}]} \ No newline at end of file +{"codeList":["{\n \"registry-mirrors\": [\"https://registry.docker-cn.com\"]\n}\n","$ lscpu | grep -e sse4_2 -e avx -e avx2 -e avx512\n","pip install pymilvus>=2.4.2\n","# Python Example: result of len() str cannot be used as \"max-length\" in Milvus \n>>> s = \"你好,世界!\"\n>>> len(s) # Number of characters of s.\n6\n>>> len(bytes(s, \"utf-8\")) # Size in bytes of s, max-length in Milvus.\n18\n"],"headingContent":"Operational FAQ","anchorList":[{"label":"操作常见问题","href":"Operational-FAQ","type":1,"isActive":false}]} \ No newline at end of file diff --git a/localization/v2.4.x/site/zh/faq/operational_faq.md b/localization/v2.4.x/site/zh/faq/operational_faq.md index 3a09987fa..6e39edc5f 100644 --- a/localization/v2.4.x/site/zh/faq/operational_faq.md +++ b/localization/v2.4.x/site/zh/faq/operational_faq.md @@ -1,9 +1,8 @@ --- id: operational_faq.md -summary: 查找有关Milvus运营的常见问题的答案。 +summary: 查找有关 Milvus 操作符常见问题的答案。 title: 操作常见问题 --- -

            操作常见问题

            如果我无法从 Docker Hub 提取 Milvus Docker 镜像怎么办?

            如果从 Docker Hub 拉取 Milvus Docker 镜像失败,请尝试添加其他注册镜像。

            +

            从 Docker Hub 提取 Milvus Docker 镜像失败怎么办?

            如果从 Docker Hub 拉取 Milvus Docker 镜像失败,请尝试添加其他注册镜像。

            中国大陆的用户可以在/etc.docker/daemon.json 中的 registry-mirrors 数组中添加网址 "https://registry.docker-cn.com"。

            {
               "registry-mirrors": ["https://registry.docker-cn.com"]
             }
             
            -

            Docker 是安装和运行 Milvus 的唯一方式吗?

            Docker 是部署 Milvus 的有效方法,但不是唯一的方法。你也可以从源代码部署 Milvus。这需要 Ubuntu(18.04 或更高版本)或 CentOS(7 或更高版本)。更多信息,请参阅从源代码构建 Milvus

            +

            Docker 是安装和运行 Milvus 的唯一方式吗?

            Docker 是部署 Milvus 的有效方式,但不是唯一的方式。你也可以从源代码部署 Milvus。这需要 Ubuntu(18.04 或更高版本)或 CentOS(7 或更高版本)。更多信息,请参阅从源代码构建 Milvus

            影响召回率的主要因素是什么?

            召回率主要受索引类型和搜索参数的影响。

            -

            对于 FLAT 索引,Milvus 在集合内进行穷举扫描,100% 返回。

            -

            对于 IVF 索引,nprobe 参数决定了在集合内的搜索范围。增加 nprobe 会增加搜索向量的比例和召回率,但会降低查询性能。

            +

            对于 FLAT 索引,Milvus 在一个 Collection 内进行穷举扫描,100% 返回。

            +

            对于 IVF 索引,nprobe 参数决定了 Collections 内的搜索范围。增加 nprobe 会增加搜索到的向量比例和召回率,但会降低查询性能。

            对于 HNSW 索引,ef 参数决定图搜索的广度。增加 ef 会增加在图上搜索的点数和召回率,但会降低查询性能。

            有关详细信息,请参阅向量索引

            -

            为什么我对配置文件的修改没有生效?

            Milvus 不支持在运行期间修改配置文件。必须重新启动 Milvus Docker,配置文件的更改才能生效。

            -

            我如何知道 Milvus 是否已成功启动?

            如果使用 Docker Compose 启动 Milvus,请运行docker ps 观察有多少 Docker 容器在运行,并检查 Milvus 服务是否正确启动。

            -

            对于单机版 Milvus,你应该至少能观察到三个正在运行的 Docker 容器,其中一个是 Milvus 服务,另外两个是 etcd 管理和存储服务。更多信息,请参阅安装 Milvus 单机版

            +

            为什么我对配置文件的修改没有生效?

            Milvus 不支持在运行期间修改配置文件。您必须重新启动 Milvus Docker,配置文件的更改才能生效。

            +

            如何知道 Milvus 是否已成功启动?

            如果使用 Docker Compose 启动 Milvus,请运行docker ps 观察有多少 Docker 容器正在运行,并检查 Milvus 服务是否正确启动。

            +

            对于 Milvus Standalone,应该至少能观察到三个运行中的 Docker 容器,其中一个是 Milvus 服务,其他两个是 etcd 管理和存储服务。更多信息,请参阅安装 Milvus Standalone

            为什么日志文件中的时间与系统时间不同?

            时间不同通常是由于主机不使用协调世界时(UTC)。

            Docker 映像中的日志文件默认使用 UTC。如果您的主机不使用 UTC,可能会出现这个问题。

            -

            我如何知道我的 CPU 是否支持 Milvus?

            Milvus 的计算操作取决于 CPU 对 SIMD(单指令、多数据)扩展指令集的支持。您的中央处理器是否支持 SIMD 扩展指令集对 Milvus 中的索引构建和向量相似性搜索至关重要。确保 CPU 至少支持以下一种 SIMD 指令集:

            +

            我如何知道我的 CPU 是否支持 Milvus?

            Milvus 的操作符取决于 CPU 对 SIMD(单指令、多数据)扩展指令集的支持。您的中央处理器是否支持 SIMD 扩展指令集对 Milvus 中的索引建立和向量相似性搜索至关重要。确保 CPU 至少支持以下一种 SIMD 指令集:

            • SSE4.2
            • AVX
            • @@ -46,35 +45,60 @@ title: 操作常见问题

              运行 lscpu 命令检查 CPU 是否支持上述 SIMD 指令集:

              $ lscpu | grep -e sse4_2 -e avx -e avx2 -e avx512
               
              -

              为什么 Milvus 在启动时返回illegal instruction

              Milvus 要求 CPU 支持 SIMD 指令集:SSE4.2、AVX、AVX2 或 AVX512。CPU 必须至少支持其中之一,以确保 Milvus 正常运行。启动过程中返回的illegal instruction 错误说明 CPU 不支持上述四种指令集中的任何一种。

              +

              为什么 Milvus 在启动时返回illegal instruction

              Milvus 要求 CPU 支持 SIMD 指令集:SSE4.2、AVX、AVX2 或 AVX512。CPU 必须至少支持其中之一,以确保 Milvus 正常操作符。启动过程中返回的illegal instruction 错误说明 CPU 不支持上述四种指令集中的任何一种。

              请参阅CPU 对 SIMD 指令集的支持

              -

              我可以在 Windows 上安装 Milvus 吗?

              可以。你可以通过源代码编译或二进制包在 Windows 上安装 Milvus。

              +

              我可以在 Windows 上安装 Milvus 吗?

              可以。您可以通过源代码编译或二进制包在 Windows 上安装 Milvus。

              请参阅在 Windows 上运行 Milvus了解如何在 Windows 上安装 Milvus。

              -

              我在 Windows 上安装 pymilvus 时出错了。我该怎么办?

              不建议在 Windows 上安装 PyMilvus。但如果您必须在 Windows 上安装 PyMilvus,却发现了错误,请尝试在Conda环境中安装。有关如何在 Conda 环境中安装PyMilvus的详细信息,请参阅安装 Milvus SDK

              -

              断开网络连接后还能部署 Milvus 吗?

              可以。您可以在离线环境中安装 Milvus。更多信息,请参阅离线安装 Milvus

              -

              在哪里可以找到 Milvus 生成的日志?

              Milvus 日志默认打印到 stout(标准输出)和 stderr(标准错误),但我们强烈建议在生产中将日志重定向到持久卷。为此,请更新milvus.yaml 中的log.file.rootPath 。如果使用milvus-helm 图表部署 Milvus,还需要先通过--set log.persistence.enabled=true 启用日志持久性。

              +

              我在 Windows 上安装 pymilvus 时出错了。我该怎么办?

              不建议在 Windows 上安装 PyMilvus。但如果您必须在 Windows 上安装 PyMilvus,却发现了错误,请尝试在Conda环境中安装。有关如何在 Conda 环境中安装 PyMilvus 的详细信息,请参阅安装 Milvus SDK

              +

              断开网络连接后还能部署 Milvus 吗?

              可以。您可以在离线环境中安装 Milvus。请参阅离线安装 Milvus获取更多信息。

              +

              在哪里可以找到 Milvus 生成的日志?

              Milvus 日志默认打印到 stout(标准输出)和 stderr(标准错误),但是我们强烈建议在生产中将日志重定向到持久卷。为此,请更新Milvus.yaml 中的log.file.rootPath 。而如果你用milvus-helm chart 部署 Milvus,也需要先通过--set log.persistence.enabled=true 启用日志持久性。

              如果你没有更改配置,使用 kubectl logs <pod-name> 或 docker logs CONTAINER 也能帮你找到日志。

              在插入数据之前,我可以为段创建索引吗?

              可以。但我们建议在为每个数据段创建索引之前,分批插入数据,每批不应超过 256 MB。

              -

              能否在多个 Milvus 实例之间共享一个 etcd 实例?

              可以,你可以在多个 Milvus 实例之间共享一个 etcd 实例。为此,在启动每个 Milvus 实例之前,需要在每个实例的配置文件中将etcd.rootPath 更改为单独的值。

              +

              能否在多个 Milvus 实例之间共享一个 etcd 实例?

              可以,您可以在多个 Milvus 实例之间共享一个 etcd 实例。为此,在启动每个 Milvus 实例之前,需要在每个实例的配置文件中将etcd.rootPath 更改为单独的值。

              能否在多个 Milvus 实例之间共享一个 Pulsar 实例?

              可以,你可以在多个 Milvus 实例之间共享一个 Pulsar 实例。为此,你可以

                -
              • 如果你的 Pulsar 实例启用了多租户,考虑为每个 Milvus 实例分配一个单独的租户或命名空间。为此,需要在启动 Milvus 实例之前,将其配置文件中的pulsar.tenantpulsar.namespace 改为每个实例的唯一值。
              • -
              • 如果不打算在 Pulsar 实例上启用多租户功能,请考虑在启动 Milvus 实例之前,将其配置文件中的msgChannel.chanNamePrefix.cluster 更改为每个实例的唯一值。
              • +
              • 如果在你的 Pulsar 实例上启用了多租户,考虑为每个 Milvus 实例分配一个单独的租户或命名空间。为此,你需要在启动 Milvus 实例之前,将其配置文件中的pulsar.tenantpulsar.namespace 改为每个实例的唯一值。
              • +
              • 如果不打算在 Pulsar 实例上启用多租户功能,可以考虑在启动 Milvus 实例之前,将其配置文件中的msgChannel.chanNamePrefix.cluster 更改为每个实例的唯一值。
              -

              我可以在多个 Milvus 实例之间共享 MinIO 实例吗?

              可以,您可以在多个 Milvus 实例之间共享 MinIO 实例。为此,您需要在启动每个 Milvus 实例之前,在每个实例的配置文件中将minio.rootPath 更改为唯一值。

              -

              如何处理pymilvus.exceptions.ConnectionConfigException: <ConnectionConfigException: (code=1, message=Illegal uri: [example.db], expected form 'https://user:pwd@example.com:12345')> 错误信息?

              错误信息Illegal uri [example.db] 表明你正在尝试使用早期版本的 PyMilvus 连接 Milvus Lite,而早期版本的 PyMilvus 不支持这种连接类型。要解决这个问题,请将 PyMilvus 升级到至少 2.4.2 版本,该版本支持连接 Milvus Lite。

              +

              我可以在多个 Milvus 实例之间共享一个 MinIO 实例吗?

              可以,您可以在多个 Milvus 实例之间共享一个 MinIO 实例。为此,您需要在启动每个 Milvus 实例之前,在每个实例的配置文件中将minio.rootPath 更改为唯一值。

              +

              如何处理pymilvus.exceptions.ConnectionConfigException: <ConnectionConfigException: (code=1, message=Illegal uri: [example.db], expected form 'https://user:pwd@example.com:12345')> 错误信息?

              错误信息Illegal uri [example.db] 表明你正在尝试使用早期版本的 PyMilvus 连接 Milvus Lite,而早期版本的 PyMilvus 不支持这种连接类型。要解决这个问题,请将你的 PyMilvus 安装升级到至少 2.4.2 版本,其中包括对连接 Milvus Lite 的支持。

              您可以使用以下命令升级 PyMilvus:

              pip install pymilvus>=2.4.2
               

              为什么我得到的结果少于我在搜索/查询中设置的limit

              有几种原因可能导致您收到的结果少于您指定的limit

                -
              • 数据有限:集合中可能没有足够的实体来满足您要求的限制。如果集合中的实体总数少于限制,您收到的结果自然会减少。

              • +
              • 数据有限:Collections 可能没有足够的实体来满足您要求的限制。如果 Collections 中的实体总数少于限制,您收到的结果自然也会减少。

              • 主键重复:在搜索过程中遇到主键重复时,Milvus 会优先处理特定实体。这种行为根据搜索类型而有所不同:

              • -
              • 查询(完全匹配):Milvus 选择具有匹配 PK 的最新实体。 ANN 搜索:即使实体共享相同的 PK,Milvus 也会选择相似度得分最高的实体。 如果您的集合有很多重复的主键,这种优先级可能会导致唯一结果少于限制。

              • -
              • 匹配不足:您的搜索过滤表达式可能过于严格,导致符合相似性阈值的实体较少。如果为搜索设置的条件过于严格,匹配的实体就不够多,导致结果比预期的少。

              • +
              • 查询(完全匹配):Milvus 选择具有匹配 PK 的最新实体。 ANN 搜索:Milvus 会选择相似度得分最高的实体,即使实体共享相同的 PK。 如果您的 Collections 有很多重复的主键,这种优先级可能会导致唯一结果少于限制。

              • +
              • 匹配不足:您的搜索过滤表达式可能过于严格,导致符合相似性阈值的实体较少。如果为搜索设置的条件限制性太强,匹配的实体就不够多,导致结果比预期的少。

              -

              还有问题?

              可以:

              +

              MilvusClient("milvus_demo.db") gives an error: ModuleNotFoundError: No module named 'milvus_lite'.什么原因导致这种情况,如何解决?

              当你尝试在 Windows 平台上使用 Milvus Lite 时,就会出现这个错误。Milvus Lite 主要为 Linux 环境设计,可能不支持 Windows。

              +

              解决办法是使用 Linux 环境:

                -
              • 在 GitHub 上查看Milvus。随时提问、分享想法并帮助他人。
              • -
              • 加入我们的Milvus 论坛Slack 频道,寻求支持并参与我们的开源社区。
              • +
              • 使用基于 Linux 的操作系统或虚拟机来运行 Milvus Lite。
              • +
              • 这种方法将确保与库的依赖关系和功能兼容。
              • +
              +

              Milvus 中的 "长度超过最大长度 "错误是什么,如何理解和解决?

              Milvus 中的 "长度超过最大长度 "错误发生在数据元素的大小超过 Collections 或字段的最大允许大小时。下面是一些示例和解释:

              +
                +
              • JSON 字段错误:<MilvusException: (code=1100, message=the length (398324) of json field (metadata) exceeds max length (65536): expected=valid length json string, actual=length exceeds max length: invalid parameter)>

              • +
              • 字符串长度错误:<ParamError: (code=1, message=invalid input, length of string exceeds max length. length: 74238, max length: 60535)>

              • +
              • VarChar 字段错误:<MilvusException: (code=1100, message=the length (60540) of 0th VarChar paragraph exceeds max length (0)%!(EXTRA int64=60535): invalid parameter)>

              • +
              +

              要理解和处理这些错误,请

              +
                +
              • 要理解len(str) 在 Python 中代表的是字符数,而不是以字节为单位的大小。
              • +
              • 对于基于字符串的数据类型,如 VARCHAR 和 JSON,使用len(bytes(str, encoding='utf-8')) 来确定以字节为单位的实际大小,这就是 Milvus 使用的 "max-length"。
              • +
              +

              Python 示例:

              +
              # Python Example: result of len() str cannot be used as "max-length" in Milvus 
              +>>> s = "你好,世界!"
              +>>> len(s) # Number of characters of s.
              +6
              +>>> len(bytes(s, "utf-8")) # Size in bytes of s, max-length in Milvus.
              +18
              +
              +

              还有问题?

              你可以

              +
                +
              • 查看 GitHub 上的Milvus。随时提问、分享想法并帮助其他用户。
              • +
              • 加入我们的Milvus 论坛Slack 频道,寻求支持并与我们的开源社区互动。
              diff --git a/localization/v2.4.x/site/zh/faq/performance_faq.json b/localization/v2.4.x/site/zh/faq/performance_faq.json index 5b9bb9205..140017ad0 100644 --- a/localization/v2.4.x/site/zh/faq/performance_faq.json +++ b/localization/v2.4.x/site/zh/faq/performance_faq.json @@ -1 +1 @@ -{"codeList":[],"headingContent":"","anchorList":[{"label":"性能常见问题","href":"Performance-FAQ","type":1,"isActive":false}]} \ No newline at end of file +{"codeList":[],"headingContent":"Performance FAQ","anchorList":[{"label":"性能常见问题","href":"Performance-FAQ","type":1,"isActive":false}]} \ No newline at end of file diff --git a/localization/v2.4.x/site/zh/faq/performance_faq.md b/localization/v2.4.x/site/zh/faq/performance_faq.md index 3be0e3484..e2e4c48a9 100644 --- a/localization/v2.4.x/site/zh/faq/performance_faq.md +++ b/localization/v2.4.x/site/zh/faq/performance_faq.md @@ -1,6 +1,6 @@ --- id: performance_faq.md -summary: 查找有关搜索性能、性能增强及其他性能相关问题的常见问题答案。 +summary: 查找有关搜索性能、性能增强和其他性能相关问题的常见问题答案。 title: 性能常见问题 ---

              性能常见问题

              Milvus 的成本是多少?

              Milvus 是一个 100% 免费的开源项目。

              -

              在将 Milvus 用于生产或发布目的时,请遵守Apache License 2.0

              -

              Milvus 背后的公司 Zilliz 还为那些不想构建和维护自己的分布式实例的用户提供完全托管的云版平台。Zilliz 云平台可自动维护数据的可靠性,用户只需支付使用费用。

              +

              在使用 Milvus 进行生产或发布时,请遵守Apache License 2.0

              +

              Milvus 背后的公司 Zilliz 还为那些不想构建和维护自己的分布式实例的用户提供完全托管的云版平台。Zilliz Cloud可自动维护数据的可靠性,并允许用户只为其使用付费。

              Milvus 支持非 x86 架构吗?

              Milvus不能在非x86平台上安装或运行。

              -

              您的 CPU 必须支持以下指令集之一,才能运行 Milvus:SSE4.2、AVX、AVX2、AVX512。这些都是 x86 专用 SIMD 指令集。

              -

              Milvus 能处理的最大数据集大小是多少?

              从理论上讲,Milvus 能处理的最大数据集大小取决于其运行的硬件,特别是系统内存和存储:

              +

              您的 CPU 必须支持以下指令集之一才能运行 Milvus:SSE4.2、AVX、AVX2、AVX512。这些都是 x86 专用 SIMD 指令集。

              +

              Milvus 能处理的最大数据集大小是多少?

              理论上,Milvus 能处理的最大数据集大小由其运行的硬件决定,特别是系统内存和存储:

                -
              • 在运行查询之前,Milvus 会将所有指定的集合和分区加载到内存中。因此,内存大小决定了 Milvus 能查询的最大数据量。
              • -
              • 当新实体和与集合相关的模式(目前只支持 MinIO 用于数据持久化)被添加到 Milvus 时,系统存储将决定插入数据的最大允许大小。
              • +
              • 在运行查询之前,Milvus 会将所有指定的 Collections 和分区加载到内存中。因此,内存大小决定了 Milvus 能查询的最大数据量。
              • +
              • 当新实体和与 Collections 相关的 Schema(目前只支持 MinIO 用于数据持久化)被添加到 Milvus 时,系统存储会决定插入数据的最大允许大小。
              -

              Milvus 在哪里存储数据?

              Milvus 处理两种类型的数据:插入数据和元数据。

              -

              插入数据(包括向量数据、标量数据和特定于集合的模式)以增量日志的形式存储在持久存储中。Milvus 支持多种对象存储后端,包括MinIOAWS S3谷歌云 存储(GCS)、Azure Blob 存储阿里云 OSS腾讯云对象存储(COS)。

              +

              Milvus 在哪里存储数据?

              Milvus 处理两种类型的数据,即插入数据和元数据。

              +

              插入数据(包括向量数据、标量数据和特定于 Collections 的 Schema)以增量日志的形式存储在持久存储中。Milvus 支持多种对象存储后端,包括MinIOAWS S3谷歌云存储(GCS)、Azure Blob 存储阿里云 OSS腾讯云对象存储(COS)。

              元数据在 Milvus 内部生成。每个 Milvus 模块都有自己的元数据,这些元数据存储在 etcd 中。

              为什么 etcd 中没有向量数据?

              etcd 存储 Milvus 模块元数据;MinIO 存储实体。

              -

              Milvus 支持同时插入和搜索数据吗?

              是的。插入操作和查询操作由两个相互独立的模块处理。从客户端的角度来看,当插入的数据进入消息队列时,插入操作就完成了。但是,插入的数据在加载到查询节点之前是不可查询的。如果数据段的大小没有达到建立索引的阈值(默认为 512 MB),Milvus 就会采用暴力搜索,查询性能可能会降低。

              +

              Milvus 支持同时插入和搜索数据吗?

              是的。插入操作和查询操作由两个相互独立的模块处理。从客户端的角度来看,当插入的数据进入消息队列时,插入操作符就完成了。但是,插入的数据在加载到查询节点之前是不可查询的。如果数据段的大小没有达到建立索引的阈值(默认为 512 MB),Milvus 就会采用暴力搜索,查询性能可能会降低。

              能否在 Milvus 中插入主键重复的向量?

              可以。Milvus 不会检查向量主键是否重复。

              -

              插入主键重复的向量时,Milvus 是否将其视为更新操作?

              Milvus 目前不支持更新操作,也不检查实体主键是否重复。你有责任确保实体主键是唯一的,如果它们不是唯一的,Milvus 可能包含多个主键重复的实体。

              +

              当插入具有重复主键的向量时,Milvus 是否将其视为更新操作?

              Milvus目前不支持更新操作,也不检查实体主键是否重复。你有责任确保实体主键是唯一的,如果不是唯一的,Milvus 可能包含多个主键重复的实体。

              如果出现这种情况,查询时将返回哪个数据副本仍是未知行为。这一限制将在今后的版本中修复。

              自定义实体主键的最大长度是多少?

              实体主键必须是非负 64 位整数。

              每次插入操作可添加的最大数据量是多少?

              插入操作的大小不得超过 1,024 MB。这是 gRPC 规定的限制。

              -

              在特定分区中搜索时,集合大小会影响查询性能吗?

              如果为搜索指定了分区,Milvus 只搜索指定的分区。

              -

              不会。Milvus 的行为各不相同。数据必须在搜索前加载到内存中。

              +

              在特定分区中搜索时,Collection 的大小会影响查询性能吗?

              不会。如果为搜索指定了分区,Milvus 只搜索指定的分区。

              +

              不会。Milvus 的行为各不相同。数据必须在搜索前加载到内存中。

              • 如果您知道数据位于哪个分区,请调用load_partition() 加载目标分区,然后search() 方法调用中指定分区。
              • 如果不知道确切的分区,请先调用load_collection() ,然后再调用search()
              • -
              • 如果在搜索前没有加载集合或分区,Milvus 会返回错误信息。
              • +
              • 如果在搜索前没有加载 Collections 或分区,Milvus 会返回错误。
              -

              插入向量后能否创建索引?

              可以。如果之前已通过create_index() 为某个集合建立了索引,Milvus 将自动为随后插入的向量建立索引。不过,在新插入的向量填满整个数据段且新创建的索引文件与之前的索引文件分开之前,Milvus 不会建立索引。

              -

              FLAT 和 IVF_FLAT 索引有何不同?

              IVF_FLAT 索引将向量空间划分为列表簇。在默认列表值为 16,384 时,Milvus 会比较目标向量与所有 16,384 个簇的中心点之间的距离,以返回最近的探测簇。然后,Milvus 会比较目标向量与选定簇中向量之间的距离,以获得最近的向量。与 IVF_FLAT 不同,FLAT 直接比较目标向量与其他向量之间的距离。

              -

              当向量总数约等于 nlist 时,IVF_FLAT 和 FLAT 在计算要求和搜索性能上几乎没有差距。但是,当向量数量超过 nlist 的两倍或更多时,IVF_FLAT 就开始显示出性能优势。

              +

              插入向量后能否创建索引?

              可以。如果之前已通过create_index() 为某个 Collection 建立了索引,Milvus 会自动为随后插入的向量建立索引。不过,在新插入的向量填满整个数据段,且新创建的索引文件与之前的索引文件分开时,Milvus 才会建立索引。

              +

              FLAT 和 IVF_FLAT 索引有何不同?

              IVF_FLAT 索引将向量空间划分为列表簇。在默认列表值为 16,384 时,Milvus 会比较目标向量与所有 16,384 个簇的中心点之间的距离,以返回探针最近的簇。然后,Milvus 会比较目标向量与所选簇中向量之间的距离,从而得到最近向量。与 IVF_FLAT 不同,FLAT 直接比较目标向量与其他每个向量之间的距离。

              +

              当向量总数近似等于 nlist 时,IVF_FLAT 和 FLAT 在计算要求和搜索性能上几乎没有距离。但是,当向量数量超过 nlist 的 2 倍或更多时,IVF_FLAT 就开始显示出性能优势。

              更多信息,请参阅向量索引

              Milvus 如何刷新数据?

              当插入的数据加载到消息队列时,Milvus 返回成功。但是,数据尚未刷新到磁盘。然后,Milvus 的数据节点会将消息队列中的数据作为增量日志写入持久存储。如果调用flush() ,数据节点就会被迫立即将消息队列中的所有数据写入持久化存储。

              -

              什么是规范化?为什么需要规范化?

              归一化是指转换向量,使其规范等于 1 的过程。如果使用内积来计算向量的相似性,则必须对向量进行归一化。归一化后,内积等于余弦相似度。

              +

              什么是规范化?为什么需要规范化?

              归一化指的是转换向量使其法等于 1 的过程。如果使用内积来计算向量相似性,则必须对向量进行归一化。归一化后,内积等于余弦相似度。

              更多信息,请参见维基百科

              -

              为什么欧氏距离(L2)和内积(IP)返回的结果不同?

              对于归一化向量,欧氏距离 (L2) 在数学上等同于内积 (IP)。如果这些相似度指标返回的结果不同,请检查您的向量是否已归一化。

              -

              Milvus 的集合和分区总数有限制吗?

              有。在 Milvus 实例中最多可以创建 65,535 个集合。在计算现有集合数量时,Milvus 会计算所有包含分片和分区的集合。

              -

              例如,假设您已经创建了 100 个集合,其中 60 个集合有 2 个分块和 4 个分区,其余 40 个集合有 1 个分块和 12 个分区。当前的集合数可以计算为

              +

              为什么欧氏距离(L2)和内积(IP)返回的结果不同?

              对于归一化向量,欧氏距离(L2)在数学上等同于内积(IP)。如果这些相似度度量返回不同的结果,请检查您的向量是否归一化

              +

              Milvus 中的 Collections 和分区总数有限制吗?

              有。您最多可以在 Milvus 实例中创建 65,535 个集合。计算现有集合数量时,Milvus 会计算所有包含分片和分区的集合。

              +

              例如,假设您已经创建了 100 个 Collection,其中 60 个 Collection 有 2 个分片和 4 个分区,其余 40 个 Collection 有 1 个分片和 12 个分区。当前的 Collections 数量可以计算如下:

              60 * 2 * 4 + 40 * 1 * 12 = 960
               
              -

              在搜索topk 向量时,为什么得到的向量少于 k 个?

              在 Milvus 支持的索引中,IVF_FLAT 和 IVF_SQ8 实现了 k-means 聚类方法。数据空间被划分为nlist 个簇,插入的向量被分配到这些簇中。然后,Milvus 会选择nprobe 最近的簇,并比较目标向量与所选簇中所有向量之间的距离,返回最终结果。

              +

              在搜索topk 向量时,为什么得到的向量少于 k 个?

              在 Milvus 支持的索引中,IVF_FLAT 和 IVF_SQ8 实现了 k-means 聚类方法。数据空间被划分为nlist 个簇,插入的向量被分配到这些簇中。然后,Milvus 选择离其最近的nprobe 个簇,并比较目标向量与所选簇中所有向量之间的距离,返回最终结果。

              如果nlisttopk 较大,而 nprobe 较小,则 nprobe 簇中的向量数量可能少于k 。因此,当搜索topk 最近的向量时,返回的向量数量少于k

              为避免这种情况,可尝试将nprobe 设置得大一些,将nlistk 设置得小一些。

              -

              更多信息,请参阅 "向量索引"

              -

              Milvus 支持的最大向量维度是多少?

              Milvus 默认最多可管理 32,768 维的向量。你可以增加Proxy.maxDimension 的值,以允许更大维度的向量。

              -

              Milvus 支持苹果 M1 CPU 吗?

              当前的 Milvus 版本不支持苹果 M1 CPU。

              -

              Milvus 支持主键字段的哪些数据类型?

              在当前版本中,Milvus 支持 INT64 和字符串。

              -

              Milvus 可以扩展吗?

              可以。你可以在 Kubernetes 上通过 Helm Chart 部署多节点的 Milvus 集群。更多说明请参阅《扩展指南》

              -

              查询是否在内存中执行?什么是增量数据和历史数据?

              是的。当收到查询请求时,Milvus 会将增量数据和历史数据加载到内存中进行搜索。增量数据是不断增长的数据段,它们在达到在存储引擎中持久化的阈值之前会被缓冲到内存中,而历史数据则来自存储在对象存储中的密封数据段。增量数据和历史数据共同构成了要搜索的整个数据集。

              -

              可以。对于同一数据集的查询,Milvus 会同时搜索增量数据和历史数据。不过,对不同数据集的查询是串联进行的。历史数据可能是一个极其庞大的数据集,因此对历史数据的搜索相对更耗时,而且基本上是串联进行的。

              -

              为什么 MinIO 中的数据会在相应的数据集被删除后保留下来?

              MinIO 中的数据被设计为保留一段时间,以方便数据回滚。

              -

              除了 Pulsar 之外,Milvus 还支持其他消息引擎吗?

              支持。Milvus 2.1.0 支持 Kafka。

              -

              搜索和查询有什么区别?

              在 Milvus 中,向量相似性搜索基于相似性计算和向量索引加速来检索向量。与向量相似性搜索不同,向量查询是通过基于布尔表达式的标量过滤来检索向量的。布尔表达式会对标量字段或主键字段进行过滤,并检索所有符合过滤条件的结果。在查询中,既不涉及相似度指标,也不涉及向量索引。

              -

              为什么在 Milvus 中,浮点向量值的精度是小数点后 7 位?

              Milvus 支持将向量存储为 Float32 数组。Float32 值的精度为小数点后 7 位。即使是 Float64 值,如 1.3476964684980388,Milvus 也将其存储为 1.347696。因此,当你从 Milvus 中检索这样一个向量时,Float64 值的精度就会丢失。

              +

              更多信息请参见向量索引

              +

              Milvus 支持的最大向量维度是多少?

              默认情况下,Milvus 最多可管理 32,768 维的向量。你可以增加Proxy.maxDimension 的值,以允许更大维度的向量。

              +

              Milvus 是否支持苹果 M1 CPU?

              目前的 Milvus 版本不直接支持苹果 M1 CPU。Milvus 2.3 之后,Milvus 会提供 ARM64 架构的 Docker 镜像。

              +

              Milvus 在主键字段上支持哪些数据类型?

              在当前版本中,Milvus 同时支持 INT64 和字符串。

              +

              Milvus 可以扩展吗?

              是的。您可以在 Kubernetes 上使用 Helm Chart 部署多节点的 Milvus 集群。更多说明请参阅《扩展指南》

              +

              查询是否在内存中执行?什么是增量数据和历史数据?

              是的。当有查询请求时,Milvus 会将增量数据和历史数据加载到内存中进行搜索。增量数据是不断增长的数据段,它们在达到在存储引擎中持久化的阈值之前会被缓冲到内存中,而历史数据则来自存储在对象存储中的密封数据段。增量数据和历史数据共同构成了要搜索的整个数据集。

              +

              可以。对于同一 Collections 的查询,Milvus 会并发搜索增量数据和历史数据。不过,对不同 Collection 的查询是串联进行的。历史数据可能是一个极其庞大的数据集,因此对历史数据的搜索相对更耗时,而且基本上是串联进行的。

              +

              为什么相应的 Collections 丢弃后,MinIO 中的数据仍会保留?

              MinIO 中的数据被设计为保留一段时间,以方便数据回滚。

              +

              Milvus 是否支持 Pulsar 以外的消息引擎?

              支持。Milvus 2.1.0 支持 Kafka。

              +

              搜索和查询有什么区别?

              在 Milvus 中,向量相似性搜索根据相似性计算和向量索引加速检索向量。与向量相似性搜索不同,向量查询是通过基于布尔表达式的标量过滤来检索向量的。布尔表达式会对标量字段或主键字段进行过滤,并检索所有符合过滤条件的结果。在查询中,既不涉及相似度指标,也不涉及向量索引。

              +

              为什么在 Milvus 中,浮点向量值的精度是小数点后 7 位?

              Milvus 支持将向量存储为 Float32 数组。Float32 值的精度为小数点后 7 位。即使是 Float64 值,如 1.3476964684980388,Milvus 也将其存储为 1.347696。因此,当你从 Milvus 获取这样一个向量时,Float64 值的精度就会丢失。

              Milvus 如何处理向量数据类型和精度?

              Milvus 支持二进制、Float32、Float16 和 BFloat16 向量类型。

              • 二进制向量:将二进制数据存储为 0 和 1 的序列,用于图像处理和信息检索。
              • -
              • Float32 向量:默认存储精度约为十进制 7 位数。即使是 Float64 值,也是以 Float32 精度存储的,因此在检索时可能会丢失精度。
              • +
              • Float32 向量:默认存储精度约为十进制 7 位数。即使是 Float64 值也是以 Float32 精度存储的,这可能会导致检索时的精度损失。
              • Float16 和 BFloat16 向量:可降低精度和内存使用量。Float16 适用于带宽和存储有限的应用,而 BFloat16 则兼顾了范围和效率,常用于深度学习,在不显著影响精度的情况下降低计算要求。
              -

              Milvus 是否支持为标量或向量字段指定默认值?

              目前,Milvus 2.4.x 不支持为标量或向量字段指定默认值。该功能计划在未来版本中推出。

              +

              Milvus 是否支持为标量或向量场指定默认值?

              目前,Milvus 2.4.x 不支持为标量或向量场指定默认值。该功能计划在未来版本中推出。

              仍有问题?

              您可以

                -
              • 查看 GitHub 上的Milvus。欢迎提出问题、分享想法并帮助他人。
              • -
              • 加入我们的Slack 社区,寻求支持并参与我们的开源社区
              • +
              • 查看 GitHub 上的Milvus。欢迎提出问题、分享想法并帮助其他用户。
              • +
              • 加入我们的Slack 社区,寻求支持并参与我们的开源社区。
              diff --git a/localization/v2.4.x/site/zh/getstarted/install_SDKs/install-java.json b/localization/v2.4.x/site/zh/getstarted/install_SDKs/install-java.json index fd60cd9c2..b5cd50c1f 100644 --- a/localization/v2.4.x/site/zh/getstarted/install_SDKs/install-java.json +++ b/localization/v2.4.x/site/zh/getstarted/install_SDKs/install-java.json @@ -1 +1 @@ -{"codeList":["\n io.milvus\n milvus-sdk-java\n 2.4.3\n\n","implementation 'io.milvus:milvus-sdk-java:2.4.3'\n"],"headingContent":"","anchorList":[{"label":"Install Milvus Java SDK","href":"Install-Milvus-Java-SDK","type":1,"isActive":false},{"label":"Requirement","href":"Requirement","type":2,"isActive":false},{"label":"Install Milvus Java SDK","href":"Install-Milvus-Java-SDK","type":2,"isActive":false},{"label":"What's next","href":"Whats-next","type":2,"isActive":false}]} \ No newline at end of file +{"codeList":["\n io.milvus\n milvus-sdk-java\n 2.4.5\n\n","implementation 'io.milvus:milvus-sdk-java:2.4.5'\n"],"headingContent":"Install Milvus Java SDK","anchorList":[{"label":"安装 Milvus Java SDK","href":"Install-Milvus-Java-SDK","type":1,"isActive":false},{"label":"要求","href":"Requirement","type":2,"isActive":false},{"label":"安装 Milvus Java SDK","href":"Install-Milvus-Java-SDK","type":2,"isActive":false},{"label":"下一步","href":"Whats-next","type":2,"isActive":false}]} \ No newline at end of file diff --git a/localization/v2.4.x/site/zh/getstarted/install_SDKs/install-java.md b/localization/v2.4.x/site/zh/getstarted/install_SDKs/install-java.md index 6e3f195fb..a3627ef5c 100644 --- a/localization/v2.4.x/site/zh/getstarted/install_SDKs/install-java.md +++ b/localization/v2.4.x/site/zh/getstarted/install_SDKs/install-java.md @@ -5,7 +5,6 @@ related_key: SDK summary: 了解如何安装 Milvus 的 Java SDK。 title: 安装 Milvus Java SDK --- -

              安装 Milvus Java SDK

            • Gradle/Grails
            -
            implementation 'io.milvus:milvus-sdk-java:2.4.3'
            +
            implementation 'io.milvus:milvus-sdk-java:2.4.5'
             

            下一步

            本主题介绍如何为 Milvus 安装 Milvus Python SDK pymilvus。

            +

            本主题介绍如何为 Milvus 安装 Milvus python SDK pymilvus。

            当前版本的 Milvus 支持 Python、Node.js、GO 和 Java SDK。

            要求

            PyMilvus 可在Python 包索引中找到。

            -建议安装与 Milvus 服务器版本相匹配的 PyMilvus 版本。更多信息,请参阅发行说明
            -
            $ python3 -m pip install pymilvus==2.4.5
            +建议安装与所安装 Milvus 服务器版本相匹配的 PyMilvus 版本。更多信息,请参阅发行说明
          +
          $ python3 -m pip install pymilvus==2.4.8
           

          验证安装

          Open In Colab

          -

          向量是神经网络模型的输出数据格式,可以有效地编码信息,在知识库、语义搜索、检索增强生成(RAG)等人工智能应用中发挥着关键作用。

          -

          Milvus 是一个开源向量数据库,适合各种规模的人工智能应用,从在 Jupyter 笔记本中运行一个演示聊天机器人,到构建可为数十亿用户提供服务的网络规模搜索。在本指南中,我们将指导你如何在几分钟内本地设置 Milvus,并使用 Python 客户端库生成、存储和搜索向量。

          +

          Open In Colab +GitHub Repository

          +

          向量是神经网络模型的输出数据格式,可以有效地对信息进行编码,在知识库、语义搜索、检索增强生成(RAG)等人工智能应用中发挥着举足轻重的作用。

          +

          Milvus 是一个开源的向量数据库,适合各种规模的人工智能应用,从在 Jupyter notebook 中运行一个演示聊天机器人,到构建服务数十亿用户的网络规模搜索。在本指南中,我们将指导您如何在几分钟内本地设置 Milvus,并使用 Python 客户端库生成、存储和搜索向量。

          安装 Milvus

          在本指南中,我们使用 Milvus Lite,它是pymilvus 中包含的一个 Python 库,可以嵌入到客户端应用程序中。Milvus 还支持在DockerKubernetes上部署,适用于生产用例。

          +

          在本指南中,我们使用 Milvus Lite,它是pymilvus 中包含的一个 python 库,可以嵌入到客户端应用程序中。Milvus 还支持在DockerKubernetes上部署,适用于生产用例。

          开始之前,请确保本地环境中有 Python 3.8+ 可用。安装pymilvus ,其中包含 python 客户端库和 Milvus Lite:

          $ pip install -U pymilvus
           
          @@ -61,13 +61,12 @@ title: 快速入门 d="M4 9h1v1H4c-1.5 0-3-1.69-3-3.5S2.55 3 4 3h4c1.45 0 3 1.69 3 3.5 0 1.41-.91 2.72-2 3.25V8.59c.58-.45 1-1.27 1-2.09C10 5.22 8.98 4 8 4H4c-.98 0-2 1.22-2 2.5S3 9 4 9zm9-3h-1v1h1c1 0 2 1.22 2 2.5S13.98 12 13 12H9c-.98 0-2-1.22-2-2.5 0-.83.42-1.64 1-2.09V6.25c-1.09.53-2 1.84-2 3.25C6 11.31 7.55 13 9 13h4c1.45 0 3-1.69 3-3.5S14.5 6 13 6z" > -

          要创建本地的 Milvus 向量数据库,只需通过指定存储所有数据的文件名(如 "milvus_demo.db")来实例化MilvusClient

          +

          要创建本地的 Milvus 向量数据库,只需实例化一个MilvusClient ,指定一个存储所有数据的文件名,如 "milvus_demo.db"。

          from pymilvus import MilvusClient
           
           client = MilvusClient("milvus_demo.db")
           
          - -

          创建 Collection

          在 Milvus 中,我们需要一个集合来存储向量及其相关元数据。你可以把它想象成传统 SQL 数据库中的表格。创建 collection 时,可以定义模式和索引参数来配置向量规格,如维度、索引类型和远距离度量。此外,还有一些复杂的概念可用于优化索引以提高向量搜索性能。现在,我们只关注基础知识,并尽可能使用默认设置。至少,你只需要设置集合名称和集合向量场的维度。

          +

          在 Milvus 中,我们需要一个 Collections 来存储向量及其相关元数据。你可以把它想象成传统 SQL 数据库中的表格。创建 Collections 时,可以定义 Schema 和索引参数来配置向量规格,如维度、索引类型和远距离度量。此外,还有一些复杂的概念来优化索引以提高向量搜索性能。现在,我们只关注基础知识,并尽可能使用默认设置。至少,你只需要设置 Collections 的名称和向量场的维度。

          if client.has_collection(collection_name="demo_collection"):
               client.drop_collection(collection_name="demo_collection")
           client.create_collection(
          @@ -94,7 +93,7 @@ client.create_collection(
           
          • 主键和向量字段使用默认名称("id "和 "vector")。
          • 度量类型(向量距离定义)设置为默认值(COSINE)。
          • -
          • 主键字段接受整数,且不会自动递增(即不使用自动 ID 功能)。 或者,您也可以按照此说明正式定义集合的模式。
          • +
          • 主键字段接受整数,且不自动递增(即不使用自动 ID 功能)。 或者,您也可以按照此说明正式定义 Collections 的 Schema。

          准备数据

          在本指南中,我们使用向量对文本进行语义搜索。我们需要通过下载嵌入模型为文本生成向量。使用pymilvus[model] 库中的实用功能可以轻松完成这项工作。

          +

          在本指南中,我们使用向量对文本进行语义搜索。我们需要通过下载 embedding 模型为文本生成向量。使用pymilvus[model] 库中的实用功能可以轻松完成这项工作。

          用向量表示文本

          首先,安装模型库。该软件包包含 PyTorch 等基本 ML 工具。如果您的本地环境从未安装过 PyTorch,下载软件包可能需要一些时间。

          +

          首先,安装模型库。该软件包包含 PyTorch 等基本 ML 工具。如果您的本地环境从未安装过 PyTorch,则软件包下载可能需要一些时间。

          $ pip install "pymilvus[model]"
           
          -

          使用默认模型生成向量嵌入。Milvus 希望数据以字典列表的形式插入,每个字典代表一条数据记录,称为实体。

          +

          用默认模型生成向量 Embeddings。Milvus 希望数据以字典列表的形式插入,每个字典代表一条数据记录,称为实体。

          from pymilvus import model
           
           # If connection to https://huggingface.co/ failed, uncomment the following path
          @@ -142,25 +141,25 @@ embedding_fn = model.DefaultEmbeddingFunction()
           
           # Text strings to search from.
           docs = [
          -"Artificial intelligence was founded as an academic discipline in 1956.",
          -"Alan Turing was the first person to conduct substantial research in AI.",
          -"Born in Maida Vale, London, Turing was raised in southern England.",
          +    "Artificial intelligence was founded as an academic discipline in 1956.",
          +    "Alan Turing was the first person to conduct substantial research in AI.",
          +    "Born in Maida Vale, London, Turing was raised in southern England.",
           ]
           
           vectors = embedding_fn.encode_documents(docs)
           # The output vector has 768 dimensions, matching the collection that we just created.
          -print("Dim:", embedding_fn.dim, vectors[0].shape) # Dim: 768 (768,)
          +print("Dim:", embedding_fn.dim, vectors[0].shape)  # Dim: 768 (768,)
           
           # Each entity has id, vector representation, raw text, and a subject label that we use
           # to demo metadata filtering later.
           data = [
          -{"id": i, "vector": vectors[i], "text": docs[i], "subject": "history"}
          +    {"id": i, "vector": vectors[i], "text": docs[i], "subject": "history"}
          +    for i in range(len(vectors))
           ]
           
           print("Data has", len(data), "entities, each with fields: ", data[0].keys())
           print("Vector dim:", len(data[0]["vector"]))
           
          -
          Dim: 768 (768,)
           Data has 3 entities, each with fields:  dict_keys(['id', 'vector', 'text', 'subject'])
           Vector dim: 768
          @@ -180,26 +179,25 @@ data = [
                     d="M4 9h1v1H4c-1.5 0-3-1.69-3-3.5S2.55 3 4 3h4c1.45 0 3 1.69 3 3.5 0 1.41-.91 2.72-2 3.25V8.59c.58-.45 1-1.27 1-2.09C10 5.22 8.98 4 8 4H4c-.98 0-2 1.22-2 2.5S3 9 4 9zm9-3h-1v1h1c1 0 2 1.22 2 2.5S13.98 12 13 12H9c-.98 0-2-1.22-2-2.5 0-.83.42-1.64 1-2.09V6.25c-1.09.53-2 1.84-2 3.25C6 11.31 7.55 13 9 13h4c1.45 0 3-1.69 3-3.5S14.5 6 13 6z"
                   >
                 
          -    

          如果由于网络问题无法下载模型,作为一种变通方法,你可以使用随机向量来表示文本,这样仍然可以完成示例。只需注意,搜索结果不会反映语义相似性,因为向量是假的。

          +

          如果由于网络问题无法下载模型,作为一种走马观花的方法,你可以使用随机向量来表示文本,仍然可以完成示例。只需注意,由于向量是假向量,搜索结果不会反映语义相似性。

          import random
           
           # Text strings to search from.
           docs = [
          -"Artificial intelligence was founded as an academic discipline in 1956.",
          -"Alan Turing was the first person to conduct substantial research in AI.",
          -"Born in Maida Vale, London, Turing was raised in southern England.",
          +    "Artificial intelligence was founded as an academic discipline in 1956.",
          +    "Alan Turing was the first person to conduct substantial research in AI.",
          +    "Born in Maida Vale, London, Turing was raised in southern England.",
           ]
           # Use fake representation with random vectors (768 dimension).
          -vectors = [[random.uniform(-1, 1) for _ in range(768)] for \_ in docs]
          +vectors = [[random.uniform(-1, 1) for _ in range(768)] for _ in docs]
           data = [
          -{"id": i, "vector": vectors[i], "text": docs[i], "subject": "history"}
          -for i in range(len(vectors))
          +    {"id": i, "vector": vectors[i], "text": docs[i], "subject": "history"}
          +    for i in range(len(vectors))
           ]
           
           print("Data has", len(data), "entities, each with fields: ", data[0].keys())
           print("Vector dim:", len(data[0]["vector"]))
           
          -
          Data has 3 entities, each with fields:  dict_keys(['id', 'vector', 'text', 'subject'])
           Vector dim: 768
           
          @@ -218,12 +216,11 @@ data = [ d="M4 9h1v1H4c-1.5 0-3-1.69-3-3.5S2.55 3 4 3h4c1.45 0 3 1.69 3 3.5 0 1.41-.91 2.72-2 3.25V8.59c.58-.45 1-1.27 1-2.09C10 5.22 8.98 4 8 4H4c-.98 0-2 1.22-2 2.5S3 9 4 9zm9-3h-1v1h1c1 0 2 1.22 2 2.5S13.98 12 13 12H9c-.98 0-2-1.22-2-2.5 0-.83.42-1.64 1-2.09V6.25c-1.09.53-2 1.84-2 3.25C6 11.31 7.55 13 9 13h4c1.45 0 3-1.69 3-3.5S14.5 6 13 6z" > -

          让我们将数据插入数据集:

          +

          让我们把数据插入 Collections:

          res = client.insert(collection_name="demo_collection", data=data)
           
           print(res)
           
          -
          {'insert_count': 3, 'ids': [0, 1, 2], 'cost': 0}
           

          现在,我们可以通过将搜索查询文本表示为向量来进行语义搜索,并在 Milvus 上进行向量相似性搜索。

          +

          现在我们可以通过将搜索查询文本表示为向量来进行语义搜索,并在 Milvus 上进行向量相似性搜索。

          Milvus 可同时接受一个或多个向量搜索请求。query_vectors 变量的值是一个向量列表,其中每个向量都是一个浮点数数组。

          query_vectors = embedding_fn.encode_queries(["Who is Alan Turing?"])
           # If you don't have the embedding function you can use a fake vector to finish the demo:
           # query_vectors = [ [ random.uniform(-1, 1) for _ in range(768) ] ]
           
           res = client.search(
          -collection_name="demo_collection", # target collection
          -data=query_vectors, # query vectors
          -limit=2, # number of returned entities
          -output_fields=["text", "subject"], # specifies fields to be returned
          +    collection_name="demo_collection",  # target collection
          +    data=query_vectors,  # query vectors
          +    limit=2,  # number of returned entities
          +    output_fields=["text", "subject"],  # specifies fields to be returned
           )
           
           print(res)
           
          -
          data: ["[{'id': 2, 'distance': 0.5859944820404053, 'entity': {'text': 'Born in Maida Vale, London, Turing was raised in southern England.', 'subject': 'history'}}, {'id': 1, 'distance': 0.5118255615234375, 'entity': {'text': 'Alan Turing was the first person to conduct substantial research in AI.', 'subject': 'history'}}]"] , extra_info: {'cost': 0}
           

          输出结果是一个结果列表,每个结果映射到一个向量搜索查询。每个查询都包含一个结果列表,其中每个结果都包含实体主键、到查询向量的距离以及指定output_fields 的实体详细信息。

          @@ -275,7 +271,7 @@ output_fields=["text", -

          你也可以在考虑元数据值(在 Milvus 中称为 "标量 "字段,因为标量指的是非向量数据)的同时进行向量搜索。这可以通过指定特定条件的过滤表达式来实现。让我们在下面的示例中看看如何使用subject 字段进行搜索和筛选。

          +

          你还可以在考虑元数据值(在 Milvus 中称为 "标量 "字段,因为标量指的是非向量数据)的同时进行向量搜索。这可以通过指定特定条件的过滤表达式来实现。让我们在下面的示例中看看如何使用subject 字段进行搜索和筛选。

          # Insert more docs in another subject.
           docs = [
               "Machine learning has been used for drug design.",
          @@ -292,21 +288,20 @@ client.insert(collection_name="demo_collection&qu
           
           # This will exclude any text in "history" subject despite close to the query vector.
           res = client.search(
          -collection_name="demo_collection",
          -data=embedding_fn.encode_queries(["tell me AI related information"]),
          -filter="subject == 'biology'",
          -limit=2,
          -output_fields=["text", "subject"],
          +    collection_name="demo_collection",
          +    data=embedding_fn.encode_queries(["tell me AI related information"]),
          +    filter="subject == 'biology'",
          +    limit=2,
          +    output_fields=["text", "subject"],
           )
           
           print(res)
           
          -
          data: ["[{'id': 4, 'distance': 0.27030569314956665, 'entity': {'text': 'Computational synthesis with AI algorithms predicts molecular properties.', 'subject': 'biology'}}, {'id': 3, 'distance': 0.16425910592079163, 'entity': {'text': 'Machine learning has been used for drug design.', 'subject': 'biology'}}]"] , extra_info: {'cost': 0}
           
          -

          默认情况下,标量字段不编制索引。如果需要在大型数据集中执行元数据过滤搜索,可以考虑使用固定模式,同时打开索引以提高搜索性能。

          +

          默认情况下,标量字段不编制索引。如果需要在大型数据集中执行元数据过滤搜索,可以考虑使用固定 Schema,同时打开索引以提高搜索性能。

          除了向量搜索,还可以执行其他类型的搜索:

          -

          查询

          查询()是一种操作,用于检索与某一概念(如过滤表达式或与某些 id 匹配的实体)相匹配的所有实体。

          +

          查询

          查询()是一种操作符,用于检索与某个条件(如过滤表达式或与某些 id 匹配)相匹配的所有实体。

          例如,检索标量字段具有特定值的所有实体:

          res = client.query(
               collection_name="demo_collection",
          @@ -314,7 +309,7 @@ output_fields=["text", "text", "subject"],
           )
           
          -

          通过主键直接检索实体:

          +

          通过主键直接检索实体

          res = client.query(
               collection_name="demo_collection",
               ids=[0, 2],
          @@ -344,13 +339,12 @@ res = client.delete(collection_name="demo_collect
           
           # Delete entities by a filter expression
           res = client.delete(
          -collection_name="demo_collection",
          -filter="subject == 'biology'",
          +    collection_name="demo_collection",
          +    filter="subject == 'biology'",
           )
           
           print(res)
           
          -
          [0, 2]
           [3, 4, 5]
           
          @@ -369,13 +363,12 @@ collection_name="demo_collection", d="M4 9h1v1H4c-1.5 0-3-1.69-3-3.5S2.55 3 4 3h4c1.45 0 3 1.69 3 3.5 0 1.41-.91 2.72-2 3.25V8.59c.58-.45 1-1.27 1-2.09C10 5.22 8.98 4 8 4H4c-.98 0-2 1.22-2 2.5S3 9 4 9zm9-3h-1v1h1c1 0 2 1.22 2 2.5S13.98 12 13 12H9c-.98 0-2-1.22-2-2.5 0-.83.42-1.64 1-2.09V6.25c-1.09.53-2 1.84-2 3.25C6 11.31 7.55 13 9 13h4c1.45 0 3-1.69 3-3.5S14.5 6 13 6z" > -

          由于 Milvus Lite 的所有数据都存储在本地文件中,因此即使在程序终止后,也可以通过创建MilvusClient 与现有文件一起将所有数据加载到内存中。例如,这将恢复 "milvus_demo.db "文件中的集合,并继续向其中写入数据。

          +

          由于 Milvus Lite 的所有数据都存储在本地文件中,因此即使在程序终止后,你也可以通过创建一个带有现有文件的MilvusClient ,将所有数据加载到内存中。例如,这将恢复 "milvus_demo.db "文件中的 Collections,并继续向其中写入数据。

          from pymilvus import MilvusClient
           
           client = MilvusClient("milvus_demo.db")
           
          - -

          删除 Collection

          如果想删除集合中的所有数据,可以使用

          +

          如果想删除某个 Collections 中的所有数据,可以通过以下方法丢弃该 Collections

          # Drop collection
           client.drop_collection(collection_name="demo_collection")
           
          @@ -409,7 +402,7 @@ client.drop_collection(collection_name="demo_coll d="M4 9h1v1H4c-1.5 0-3-1.69-3-3.5S2.55 3 4 3h4c1.45 0 3 1.69 3 3.5 0 1.41-.91 2.72-2 3.25V8.59c.58-.45 1-1.27 1-2.09C10 5.22 8.98 4 8 4H4c-.98 0-2 1.22-2 2.5S3 9 4 9zm9-3h-1v1h1c1 0 2 1.22 2 2.5S13.98 12 13 12H9c-.98 0-2-1.22-2-2.5 0-.83.42-1.64 1-2.09V6.25c-1.09.53-2 1.84-2 3.25C6 11.31 7.55 13 9 13h4c1.45 0 3-1.69 3-3.5S14.5 6 13 6z" > -

          Milvus Lite 是使用本地 python 程序入门的好帮手。如果你有大规模数据或想在生产中使用 Milvus,你可以了解在DockerKubernetes 上部署 Milvus。Milvus 的所有部署模式都共享相同的应用程序接口(API),因此,如果转到另一种部署模式,你的客户端代码不需要做太大改动。只需指定部署在任何地方的 Milvus 服务器的URI 和令牌即可:

          +

          Milvus Lite 非常适合从本地 python 程序入门。如果你有大规模数据或想在生产中使用 Milvus,你可以了解在DockerKubernetes 上部署 Milvus。Milvus 的所有部署模式都共享相同的 API,因此如果转向其他部署模式,你的客户端代码不需要做太大改动。只需指定部署在任何地方的 Milvus 服务器的URI 和令牌即可:

          client = MilvusClient(uri="http://localhost:19530", token="root:Milvus")
           
          -

          Milvus 提供 REST 和 gRPC API,以及PythonJavaGo、C# 和Node.js 等语言的客户端库。

          +

          Milvus 提供 REST 和 gRPC API,并提供PythonJavaGo、C# 和Node.js 等语言的客户端库。

          diff --git a/localization/v2.4.x/site/zh/getstarted/run-milvus-docker/install_standalone-docker-compose.json b/localization/v2.4.x/site/zh/getstarted/run-milvus-docker/install_standalone-docker-compose.json index df2b29ac5..2fbd8b6c8 100644 --- a/localization/v2.4.x/site/zh/getstarted/run-milvus-docker/install_standalone-docker-compose.json +++ b/localization/v2.4.x/site/zh/getstarted/run-milvus-docker/install_standalone-docker-compose.json @@ -1,34 +1 @@ -{ - "codeList": [ - "# Download the configuration file\n$ wget https://github.com/milvus-io/milvus/releases/download/v2.4.9/milvus-standalone-docker-compose.yml -O docker-compose.yml\n\n# Start Milvus\n$ sudo docker compose up -d\n\nCreating milvus-etcd ... done\nCreating milvus-minio ... done\nCreating milvus-standalone ... done\n", - "$ sudo docker compose ps\n\n Name Command State Ports\n--------------------------------------------------------------------------------------------------------------------\nmilvus-etcd etcd -advertise-client-url ... Up 2379/tcp, 2380/tcp\nmilvus-minio /usr/bin/docker-entrypoint ... Up (healthy) 9000/tcp\nmilvus-standalone /tini -- milvus run standalone Up 0.0.0.0:19530->19530/tcp, 0.0.0.0:9091->9091/tcp\n", - "# Stop Milvus\n$ sudo docker compose down\n\n# Delete service data\n$ sudo rm -rf volumes\n" - ], - "headingContent": "", - "anchorList": [ - { - "label": "使用 Docker Compose 运行 Milvus", - "href": "Run-Milvus-with-Docker-Compose", - "type": 1, - "isActive": false - }, - { - "label": "先决条件", - "href": "Prerequisites", - "type": 2, - "isActive": false - }, - { - "label": "安装Milvus", - "href": "Install-Milvus", - "type": 2, - "isActive": false - }, - { - "label": "下一步行动", - "href": "Whats-next", - "type": 2, - "isActive": false - } - ] -} +{"codeList":["# Download the configuration file\n$ wget https://github.com/milvus-io/milvus/releases/download/v2.4.13-hotfix/milvus-standalone-docker-compose.yml -O docker-compose.yml\n\n# Start Milvus\n$ sudo docker-compose up -d\n\nCreating milvus-etcd ... done\nCreating milvus-minio ... done\nCreating milvus-standalone ... done\n","$ sudo docker-compose ps\n\n Name Command State Ports\n--------------------------------------------------------------------------------------------------------------------\nmilvus-etcd etcd -advertise-client-url ... Up 2379/tcp, 2380/tcp\nmilvus-minio /usr/bin/docker-entrypoint ... Up (healthy) 9000/tcp\nmilvus-standalone /tini -- milvus run standalone Up 0.0.0.0:19530->19530/tcp, 0.0.0.0:9091->9091/tcp\n","# Stop Milvus\n$ sudo docker-compose down\n\n# Delete service data\n$ sudo rm -rf volumes\n"],"headingContent":"Run Milvus with Docker Compose","anchorList":[{"label":"使用 Docker Compose 运行 Milvus","href":"Run-Milvus-with-Docker-Compose","type":1,"isActive":false},{"label":"前提条件","href":"Prerequisites","type":2,"isActive":false},{"label":"安装 Milvus","href":"Install-Milvus","type":2,"isActive":false},{"label":"下一步","href":"Whats-next","type":2,"isActive":false}]} \ No newline at end of file diff --git a/localization/v2.4.x/site/zh/getstarted/run-milvus-docker/install_standalone-docker-compose.md b/localization/v2.4.x/site/zh/getstarted/run-milvus-docker/install_standalone-docker-compose.md index c5117d7d8..bb2188dd3 100644 --- a/localization/v2.4.x/site/zh/getstarted/run-milvus-docker/install_standalone-docker-compose.md +++ b/localization/v2.4.x/site/zh/getstarted/run-milvus-docker/install_standalone-docker-compose.md @@ -5,7 +5,6 @@ related_key: Docker Compose summary: 了解如何使用 Docker Compose 独立安装 Milvus。 title: 使用 Docker Compose 运行 Milvus --- -

          使用 Docker Compose 运行 Milvus

          Milvus 在 Milvus 资源库中提供了 Docker Compose 配置文件。要使用 Docker Compose 安装 Milvus,只需运行

          # Download the configuration file
          -$ wget https://github.com/milvus-io/milvus/releases/download/v2.4.9/milvus-standalone-docker-compose.yml -O docker-compose.yml
          +$ wget https://github.com/milvus-io/milvus/releases/download/v2.4.13-hotfix/milvus-standalone-docker-compose.yml -O docker-compose.yml
           
           # Start Milvus
          -$ sudo docker compose up -d
          +$ sudo docker-compose up -d
           
          -Creating milvus-etcd ... done
          +Creating milvus-etcd  ... done
           Creating milvus-minio ... done
           Creating milvus-standalone ... done
           
          -
            -
          • 如果运行上述命令失败,请检查系统是否安装了 Docker Compose V1。如果是这种情况,建议根据本页的说明迁移到 Docker Compose V2。

          • +
          • 如果运行上述命令失败,请检查系统是否安装了 Docker Compose V1。如果是这种情况,建议你根据本页的说明迁移到 Docker Compose V2。

          • 如果您在拉取镜像时遇到任何问题,请通过community@zilliz.com联系我们,并提供有关问题的详细信息,我们将为您提供必要的支持。

          启动 Milvus 后、

          • 名为milvus- standalonemilvus-miniomilvus-etcd的容器启动。
              -
            • milvus-etcd容器不向主机暴露任何端口,并将数据映射到当前文件夹中的volumes/etcd
            • +
            • milvus-etcd容器不向主机暴露任何端口,并将其数据映射到当前文件夹中的volumes/etcd
            • milvus-minio容器使用默认身份验证凭据在本地为端口90909091提供服务,并将其数据映射到当前文件夹中的volumes/minio
            • -
            • milvus-standalone容器使用默认设置为本地19530端口提供服务,并将其数据映射到当前文件夹中的volumes/milvus
            • +
            • Milvus-standalone容器使用默认设置为本地19530端口提供服务,并将其数据映射到当前文件夹中的volumes/milvus

          您可以使用以下命令检查容器是否启动并运行:

          -
          $ sudo docker compose ps
          +
          $ sudo docker-compose ps
           
                 Name                     Command                  State                            Ports
          -
          ----
          -
          -milvus-etcd etcd -advertise-client-url ... Up 2379/tcp, 2380/tcp
          -milvus-minio /usr/bin/docker-entrypoint ... Up (healthy) 9000/tcp
          -milvus-standalone /tini -- milvus run standalone Up 0.0.0.0:19530->19530/tcp, 0.0.0.0:9091->9091/tcp
          +--------------------------------------------------------------------------------------------------------------------
          +milvus-etcd         etcd -advertise-client-url ...   Up             2379/tcp, 2380/tcp
          +milvus-minio        /usr/bin/docker-entrypoint ...   Up (healthy)   9000/tcp
          +milvus-standalone   /tini -- milvus run standalone   Up             0.0.0.0:19530->19530/tcp, 0.0.0.0:9091->9091/tcp
           
          -

          您可以按以下步骤停止和删除该容器

          # Stop Milvus
          -$ sudo docker compose down
          +$ sudo docker-compose down
           
           # Delete service data
           $ sudo rm -rf volumes
           
          -

          下一步

          要使用 Docker Compose 安装支持 GPU 的 Milvus,请按照以下步骤操作。

          -

          1.下载并配置 YAML 文件

          下载 milvus-standalone-docker-compose-gpu.yml并手动将其保存为 docker-compose.yml,或使用以下命令。

          -
          $ wget https://github.com/milvus-io/milvus/releases/download/v2.4.9/milvus-standalone-docker-compose-gpu.yml -O docker-compose.yml
          +

          1.下载并配置 YAML 文件

          下载 milvus-standalone-docker-compose-gpu.yml并手动将其保存为 docker-compose.yml,或使用以下命令。

          +
          $ wget https://github.com/milvus-io/milvus/releases/download/v2.4.13-hotfix/milvus-standalone-docker-compose-gpu.yml -O docker-compose.yml
           

          您需要对 YAML 文件中单机服务的环境变量做如下修改:

            @@ -93,37 +92,33 @@ title: 使用 Docker Compose 运行支持 GPU 的 Milvus device_ids: ['0', '1'] ...
          -

          2.启动 Milvus

          在保存 docker-compose.yml 的目录下,运行命令启动 Milvus:

          +

          2.启动 Milvus

          在保存 docker-compose.yml 的目录下,通过运行启动 Milvus:

          $ sudo docker compose up -d
           
          -Creating milvus-etcd ... done
          +Creating milvus-etcd  ... done
           Creating milvus-minio ... done
           Creating milvus-standalone ... done
           
          -
          -

          如果运行上述命令失败,请检查系统是否安装了 Docker Compose V1。如果是这样,建议你根据本页的说明迁移到 Docker Compose V2。

          +

          如果运行上述命令失败,请检查系统是否安装了 Docker Compose V1。如果是这种情况,建议你根据本页的说明迁移到 Docker Compose V2。

          启动 Milvus 后、

          • 名为milvus- standalonemilvus-miniomilvus-etcd的容器启动。
            • milvus-etcd容器不向主机暴露任何端口,并将其数据映射到当前文件夹中的volumes/etcd
            • milvus-minio容器使用默认身份验证凭据在本地为端口90909091提供服务,并将其数据映射到当前文件夹中的volumes/minio
            • -
            • milvus-standalone容器使用默认设置为本地19530端口提供服务,并将其数据映射到当前文件夹中的volumes/milvus
            • +
            • Milvus-standalone容器使用默认设置为本地19530端口提供服务,并将其数据映射到当前文件夹中的volumes/milvus

          您可以使用以下命令检查容器是否启动并运行:

          $ sudo docker compose ps
           
                 Name                     Command                  State                            Ports
          -
          ----
          -
          -milvus-etcd etcd -advertise-client-url ... Up 2379/tcp, 2380/tcp
          -milvus-minio /usr/bin/docker-entrypoint ... Up (healthy) 9000/tcp
          -milvus-standalone /tini -- milvus run standalone Up 0.0.0.0:19530->19530/tcp, 0.0.0.0:9091->9091/tcp
          +--------------------------------------------------------------------------------------------------------------------
          +milvus-etcd         etcd -advertise-client-url ...   Up             2379/tcp, 2380/tcp
          +milvus-minio        /usr/bin/docker-entrypoint ...   Up (healthy)   9000/tcp
          +milvus-standalone   /tini -- milvus run standalone   Up             0.0.0.0:19530->19530/tcp, 0.0.0.0:9091->9091/tcp
           
          -

          如果在 docker-compose.yml 中为 Milvus 分配了多个 GPU 设备,可以指定哪个 GPU 设备可见或可用。

          让 GPU 设备0 对 Milvus 可见:

          $ CUDA_VISIBLE_DEVICES=0 ./milvus run standalone
          @@ -138,7 +133,6 @@ $ sudo docker compose down
           # Delete service data
           $ sudo rm -rf volumes
           
          -

          配置内存池

          Milvus 启动并运行后,可以通过修改milvus.yaml 文件中的initMemSizemaxMemSize 设置来定制内存池。

          +

          Milvus 启动并运行后,您可以通过修改milvus.yaml 文件中的initMemSizemaxMemSize 设置来定制内存池。

          milvus.yaml 文件位于 Milvus 容器内的/milvus/configs/ 目录中。

          @@ -206,14 +200,14 @@ docker start <milvus_container_id>
        7. 了解 Milvus 的基本操作:

        8. 使用 Helm 图表升级 Milvus

        9. -
        10. 扩展你的 Milvus 集群

        11. +
        12. 扩展你的 Milvus 集群

        13. 在云上部署你的 Milvu 集群:

        14. 探索Milvus 备份,一个用于 Milvus 数据备份的开源工具。

        15. -
        16. 了解Birdwatcher,用于调试 Milvus 和动态配置更新的开源工具。

        17. -
        18. 探索用于直观管理 Milvus 的开源 GUI 工具Attu

        19. +
        20. 探索Birdwatcher,用于调试 Milvus 和动态配置更新的开源工具。

        21. +
        22. 探索Attu,一款用于直观管理 Milvus 的开源图形用户界面工具。

        23. 使用 Prometheus 监控 Milvus

        24. diff --git a/localization/v2.4.x/site/zh/integrations/evaluation_with_deepeval.md b/localization/v2.4.x/site/zh/integrations/evaluation_with_deepeval.md index 8b06fa163..95cf79a03 100644 --- a/localization/v2.4.x/site/zh/integrations/evaluation_with_deepeval.md +++ b/localization/v2.4.x/site/zh/integrations/evaluation_with_deepeval.md @@ -18,7 +18,8 @@ title: 使用 DeepEval 进行评估 d="M4 9h1v1H4c-1.5 0-3-1.69-3-3.5S2.55 3 4 3h4c1.45 0 3 1.69 3 3.5 0 1.41-.91 2.72-2 3.25V8.59c.58-.45 1-1.27 1-2.09C10 5.22 8.98 4 8 4H4c-.98 0-2 1.22-2 2.5S3 9 4 9zm9-3h-1v1h1c1 0 2 1.22 2 2.5S13.98 12 13 12H9c-.98 0-2-1.22-2-2.5 0-.83.42-1.64 1-2.09V6.25c-1.09.53-2 1.84-2 3.25C6 11.31 7.55 13 9 13h4c1.45 0 3-1.69 3-3.5S14.5 6 13 6z" > -

          Open In Colab

          +

          Open In Colab +GitHub Repository

          本指南演示了如何使用DeepEval评估基于Milvus 的检索增强生成 (RAG) 管道。

          RAG 系统将检索系统与生成模型相结合,根据给定提示生成新文本。该系统首先使用 Milvus 从语料库中检索相关文档,然后使用生成模型根据检索到的文档生成新文本。

          DeepEval 是一个帮助您评估 RAG 管道的框架。现有的工具和框架可以帮助您构建这些管道,但评估和量化管道性能可能很难。这就是 DeepEval 的用武之地。

          @@ -288,7 +289,7 @@ Answering questions: 100%|██████████| 3/3 [00:03<00:00, 0 硬件要求是什么? - [硬件要求(Hardware Requirements/n):以下是硬件要求规格。 + [硬件要求(Hardware Requirements/n/以下为具体要求)... 构建Milvus的硬件要求规范... 如果您想构建 Milvus 并从源代码中运行... @@ -364,9 +365,9 @@ result = evaluate(
          /Users/eureka/miniconda3/envs/zilliz/lib/python3.9/site-packages/deepeval/__init__.py:49: UserWarning: You are using deepeval version 1.1.6, however version 1.2.2 is available. You should consider upgrading via the "pip install --upgrade deepeval" command.
             warnings.warn(
           
          -
          您正在运行 DeepEval 最新的上下文精度指标(使用 gpt-4o, strict=Falseasync_mode=True...
          -
          ✨ 您正在运行 DeepEval 最新的上下文召回指标(使用 gpt-4o, strict=Falseasync_mode=True...
          -
          ✨ 您正在运行 DeepEval 最新的上下文相关性指标(使用 gpt-4o, strict=Falseasync_mode=True...
          +
          您正在运行 DeepEval 最新的上下文精度指标(使用 gpt-4o, strict=Falseasync_mode=True...
          +
          ✨ 您正在运行 DeepEval 最新的上下文召回指标(使用 gpt-4o, strict=Falseasync_mode=True...
          +
          ✨ 您正在运行 DeepEval 最新的上下文相关性指标(使用 gpt-4o, strict=Falseasync_mode=True...
          Event loop is already running. Applying nest_asyncio patch to allow async execution...
           
           
          @@ -420,8 +421,8 @@ result = evaluate(
               print_results=False,  # Change to True to see detailed metric results
           )
           
          -
          ✨ 您正在运行 DeepEval 最新的答案相关性度量标准(使用 gpt-4o, strict=Falseasync_mode=True...
          -
          ✨ 您正在运行 DeepEval 最新的忠实度指标(使用 gpt-4o, strict=Falseasync_mode=True...
          +
          ✨ 您正在运行 DeepEval 最新的答案相关性度量标准(使用 gpt-4o, strict=Falseasync_mode=True...
          +
          ✨ 您正在运行 DeepEval 最新的忠实度指标(使用 gpt-4o, strict=Falseasync_mode=True...
          Event loop is already running. Applying nest_asyncio patch to allow async execution...
           
           
          diff --git a/localization/v2.4.x/site/zh/integrations/evaluation_with_phoenix.md b/localization/v2.4.x/site/zh/integrations/evaluation_with_phoenix.md
          index 66778f3ca..39735edbc 100644
          --- a/localization/v2.4.x/site/zh/integrations/evaluation_with_phoenix.md
          +++ b/localization/v2.4.x/site/zh/integrations/evaluation_with_phoenix.md
          @@ -18,7 +18,8 @@ title: 使用 Arize Pheonix 进行评估
                     d="M4 9h1v1H4c-1.5 0-3-1.69-3-3.5S2.55 3 4 3h4c1.45 0 3 1.69 3 3.5 0 1.41-.91 2.72-2 3.25V8.59c.58-.45 1-1.27 1-2.09C10 5.22 8.98 4 8 4H4c-.98 0-2 1.22-2 2.5S3 9 4 9zm9-3h-1v1h1c1 0 2 1.22 2 2.5S13.98 12 13 12H9c-.98 0-2-1.22-2-2.5 0-.83.42-1.64 1-2.09V6.25c-1.09.53-2 1.84-2 3.25C6 11.31 7.55 13 9 13h4c1.45 0 3-1.69 3-3.5S14.5 6 13 6z"
                   >
                 
          -    

          Open In Colab

          +

          Open In Colab +GitHub Repository

          本指南演示了如何使用Arize Pheonix评估基于Milvus 的检索增强生成(RAG)管道。

          RAG 系统将检索系统与生成模型相结合,根据给定提示生成新文本。该系统首先使用 Milvus 从语料库中检索相关文档,然后使用生成模型根据检索到的文档生成新文本。

          Arize Pheonix 是一个帮助您评估 RAG 管道的框架。现有的工具和框架可以帮助您构建这些管道,但评估和量化管道性能可能很难。这就是 Arize Pheonix 的用武之地。

          @@ -357,7 +358,7 @@ OpenAIInstrumentor().instrument()

          - Alt Text + Alt Text 文本

          import nest_asyncio
           
          diff --git a/localization/v2.4.x/site/zh/integrations/integrate_with_bentoml.json b/localization/v2.4.x/site/zh/integrations/integrate_with_bentoml.json
          index 0e536da93..cebb2f323 100644
          --- a/localization/v2.4.x/site/zh/integrations/integrate_with_bentoml.json
          +++ b/localization/v2.4.x/site/zh/integrations/integrate_with_bentoml.json
          @@ -1,71 +1 @@
          -{
          -	"codeList": [
          -		"$ pip install -U pymilvus bentoml\n",
          -		"import bentoml\n\nBENTO_EMBEDDING_MODEL_END_POINT = \"BENTO_EMBEDDING_MODEL_END_POINT\"\nBENTO_API_TOKEN = \"BENTO_API_TOKEN\"\n\nembedding_client = bentoml.SyncHTTPClient(\n    BENTO_EMBEDDING_MODEL_END_POINT, token=BENTO_API_TOKEN\n)\n",
          -		"# naively chunk on newlines\ndef chunk_text(filename: str) -> list:\n    with open(filename, \"r\") as f:\n        text = f.read()\n    sentences = text.split(\"\\n\")\n    return sentences\n",
          -		"import os\nimport requests\nimport urllib.request\n\n# set up the data source\nrepo = \"ytang07/bento_octo_milvus_RAG\"\ndirectory = \"data\"\nsave_dir = \"./city_data\"\napi_url = f\"https://api.github.com/repos/{repo}/contents/{directory}\"\n\n\nresponse = requests.get(api_url)\ndata = response.json()\n\nif not os.path.exists(save_dir):\n    os.makedirs(save_dir)\n\nfor item in data:\n    if item[\"type\"] == \"file\":\n        file_url = item[\"download_url\"]\n        file_path = os.path.join(save_dir, item[\"name\"])\n        urllib.request.urlretrieve(file_url, file_path)\n",
          -		"# please upload your data directory under this file's folder\ncities = os.listdir(\"city_data\")\n# store chunked text for each of the cities in a list of dicts\ncity_chunks = []\nfor city in cities:\n    chunked = chunk_text(f\"city_data/{city}\")\n    cleaned = []\n    for chunk in chunked:\n        if len(chunk) > 7:\n            cleaned.append(chunk)\n    mapped = {\"city_name\": city.split(\".\")[0], \"chunks\": cleaned}\n    city_chunks.append(mapped)\n",
          -		"def get_embeddings(texts: list) -> list:\n    if len(texts) > 25:\n        splits = [texts[x : x + 25] for x in range(0, len(texts), 25)]\n        embeddings = []\n        for split in splits:\n            embedding_split = embedding_client.encode(sentences=split)\n            embeddings += embedding_split\n        return embeddings\n    return embedding_client.encode(\n        sentences=texts,\n    )\n",
          -		"entries = []\nfor city_dict in city_chunks:\n    # No need for the embeddings list if get_embeddings already returns a list of lists\n    embedding_list = get_embeddings(city_dict[\"chunks\"])  # returns a list of lists\n    # Now match texts with embeddings and city name\n    for i, embedding in enumerate(embedding_list):\n        entry = {\n            \"embedding\": embedding,\n            \"sentence\": city_dict[\"chunks\"][\n                i\n            ],  # Assume \"chunks\" has the corresponding texts for the embeddings\n            \"city\": city_dict[\"city_name\"],\n        }\n        entries.append(entry)\n    print(entries)\n",
          -		"from pymilvus import MilvusClient\n\nCOLLECTION_NAME = \"Bento_Milvus_RAG\"  # random name for your collection\nDIMENSION = 384\n\n# Initialize a Milvus Lite client\nmilvus_client = MilvusClient(\"milvus_demo.db\")\n",
          -		"from pymilvus import connections\n\nconnections.connect(uri=\"milvus_demo.db\")\n",
          -		"from pymilvus import MilvusClient, DataType, Collection\n\n# Create schema\nschema = MilvusClient.create_schema(\n    auto_id=True,\n    enable_dynamic_field=True,\n)\n\n# 3.2. Add fields to schema\nschema.add_field(field_name=\"id\", datatype=DataType.INT64, is_primary=True)\nschema.add_field(field_name=\"embedding\", datatype=DataType.FLOAT_VECTOR, dim=DIMENSION)\n",
          -		"# prepare index parameters\nindex_params = milvus_client.prepare_index_params()\n\n# add index\nindex_params.add_index(\n    field_name=\"embedding\",\n    index_type=\"AUTOINDEX\",  # use autoindex instead of other complex indexing method\n    metric_type=\"COSINE\",  # L2, COSINE, or IP\n)\n\n# create collection\nif milvus_client.has_collection(collection_name=COLLECTION_NAME):\n    milvus_client.drop_collection(collection_name=COLLECTION_NAME)\nmilvus_client.create_collection(\n    collection_name=COLLECTION_NAME, schema=schema, index_params=index_params\n)\n\n# Outside the loop, now you upsert all the entries at once\nmilvus_client.insert(collection_name=COLLECTION_NAME, data=entries)\n",
          -		"BENTO_LLM_END_POINT = \"BENTO_LLM_END_POINT\"\n\nllm_client = bentoml.SyncHTTPClient(BENTO_LLM_END_POINT, token=BENTO_API_TOKEN)\n",
          -		"def dorag(question: str, context: str):\n\n    prompt = (\n        f\"You are a helpful assistant. The user has a question. Answer the user question based only on the context: {context}. \\n\"\n        f\"The user question is {question}\"\n    )\n\n    results = llm_client.generate(\n        max_tokens=1024,\n        prompt=prompt,\n    )\n\n    res = \"\"\n    for result in results:\n        res += result\n\n    return res\n",
          -		"question = \"What state is Cambridge in?\"\n\n\ndef ask_a_question(question):\n    embeddings = get_embeddings([question])\n    res = milvus_client.search(\n        collection_name=COLLECTION_NAME,\n        data=embeddings,  # search for the one (1) embedding returned as a list of lists\n        anns_field=\"embedding\",  # Search across embeddings\n        limit=5,  # get me the top 5 results\n        output_fields=[\"sentence\"],  # get the sentence/chunk and city\n    )\n\n    sentences = []\n    for hits in res:\n        for hit in hits:\n            print(hit)\n            sentences.append(hit[\"entity\"][\"sentence\"])\n    context = \". \".join(sentences)\n    return context\n\n\ncontext = ask_a_question(question=question)\nprint(context)\n",
          -		"print(dorag(question=question, context=context))\n"
          -	],
          -	"headingContent": "",
          -	"anchorList": [
          -		{
          -			"label": "使用 Milvus 和 BentoML 的检索增强生成(RAG)",
          -			"href": "Retrieval-Augmented-Generation-RAG-with-Milvus-and-BentoML",
          -			"type": 1,
          -			"isActive": false
          -		},
          -		{ "label": "导言", "href": "Introduction", "type": 2, "isActive": false },
          -		{
          -			"label": "开始之前",
          -			"href": "Before-you-begin",
          -			"type": 2,
          -			"isActive": false
          -		},
          -		{
          -			"label": "用 BentoML/BentoCloud 提供嵌入式服务",
          -			"href": "Serving-Embeddings-with-BentoMLBentoCloud",
          -			"type": 2,
          -			"isActive": false
          -		},
          -		{
          -			"label": "将数据插入向量数据库以便检索",
          -			"href": "Inserting-Data-into-a-Vector-Database-for-Retrieval",
          -			"type": 2,
          -			"isActive": false
          -		},
          -		{
          -			"label": "创建您的 Milvus Lite 系列",
          -			"href": "Creating-Your-Milvus-Lite-Collection",
          -			"type": 2,
          -			"isActive": false
          -		},
          -		{
          -			"label": "为 RAG 设置您的法律硕士课程",
          -			"href": "Set-up-Your-LLM-for-RAG",
          -			"type": 2,
          -			"isActive": false
          -		},
          -		{
          -			"label": "法律硕士说明",
          -			"href": "LLM-Instructions",
          -			"type": 2,
          -			"isActive": false
          -		},
          -		{
          -			"label": "RAG 示例",
          -			"href": "A-RAG-Example",
          -			"type": 2,
          -			"isActive": false
          -		}
          -	]
          -}
          +{"codeList":["$ pip install -U pymilvus bentoml\n","import bentoml\n\nBENTO_EMBEDDING_MODEL_END_POINT = \"BENTO_EMBEDDING_MODEL_END_POINT\"\nBENTO_API_TOKEN = \"BENTO_API_TOKEN\"\n\nembedding_client = bentoml.SyncHTTPClient(\n    BENTO_EMBEDDING_MODEL_END_POINT, token=BENTO_API_TOKEN\n)\n","# naively chunk on newlines\ndef chunk_text(filename: str) -> list:\n    with open(filename, \"r\") as f:\n        text = f.read()\n    sentences = text.split(\"\\n\")\n    return sentences\n","import os\nimport requests\nimport urllib.request\n\n# set up the data source\nrepo = \"ytang07/bento_octo_milvus_RAG\"\ndirectory = \"data\"\nsave_dir = \"./city_data\"\napi_url = f\"https://api.github.com/repos/{repo}/contents/{directory}\"\n\n\nresponse = requests.get(api_url)\ndata = response.json()\n\nif not os.path.exists(save_dir):\n    os.makedirs(save_dir)\n\nfor item in data:\n    if item[\"type\"] == \"file\":\n        file_url = item[\"download_url\"]\n        file_path = os.path.join(save_dir, item[\"name\"])\n        urllib.request.urlretrieve(file_url, file_path)\n","# please upload your data directory under this file's folder\ncities = os.listdir(\"city_data\")\n# store chunked text for each of the cities in a list of dicts\ncity_chunks = []\nfor city in cities:\n    chunked = chunk_text(f\"city_data/{city}\")\n    cleaned = []\n    for chunk in chunked:\n        if len(chunk) > 7:\n            cleaned.append(chunk)\n    mapped = {\"city_name\": city.split(\".\")[0], \"chunks\": cleaned}\n    city_chunks.append(mapped)\n","def get_embeddings(texts: list) -> list:\n    if len(texts) > 25:\n        splits = [texts[x : x + 25] for x in range(0, len(texts), 25)]\n        embeddings = []\n        for split in splits:\n            embedding_split = embedding_client.encode(sentences=split)\n            embeddings += embedding_split\n        return embeddings\n    return embedding_client.encode(\n        sentences=texts,\n    )\n","entries = []\nfor city_dict in city_chunks:\n    # No need for the embeddings list if get_embeddings already returns a list of lists\n    embedding_list = get_embeddings(city_dict[\"chunks\"])  # returns a list of lists\n    # Now match texts with embeddings and city name\n    for i, embedding in enumerate(embedding_list):\n        entry = {\n            \"embedding\": embedding,\n            \"sentence\": city_dict[\"chunks\"][\n                i\n            ],  # Assume \"chunks\" has the corresponding texts for the embeddings\n            \"city\": city_dict[\"city_name\"],\n        }\n        entries.append(entry)\n    print(entries)\n","from pymilvus import MilvusClient\n\nCOLLECTION_NAME = \"Bento_Milvus_RAG\"  # random name for your collection\nDIMENSION = 384\n\n# Initialize a Milvus Lite client\nmilvus_client = MilvusClient(\"milvus_demo.db\")\n","from pymilvus import connections\n\nconnections.connect(uri=\"milvus_demo.db\")\n","from pymilvus import MilvusClient, DataType, Collection\n\n# Create schema\nschema = MilvusClient.create_schema(\n    auto_id=True,\n    enable_dynamic_field=True,\n)\n\n# 3.2. Add fields to schema\nschema.add_field(field_name=\"id\", datatype=DataType.INT64, is_primary=True)\nschema.add_field(field_name=\"embedding\", datatype=DataType.FLOAT_VECTOR, dim=DIMENSION)\n","# prepare index parameters\nindex_params = milvus_client.prepare_index_params()\n\n# add index\nindex_params.add_index(\n    field_name=\"embedding\",\n    index_type=\"AUTOINDEX\",  # use autoindex instead of other complex indexing method\n    metric_type=\"COSINE\",  # L2, COSINE, or IP\n)\n\n# create collection\nif milvus_client.has_collection(collection_name=COLLECTION_NAME):\n    milvus_client.drop_collection(collection_name=COLLECTION_NAME)\nmilvus_client.create_collection(\n    collection_name=COLLECTION_NAME, schema=schema, index_params=index_params\n)\n\n# Outside the loop, now you upsert all the entries at once\nmilvus_client.insert(collection_name=COLLECTION_NAME, data=entries)\n","BENTO_LLM_END_POINT = \"BENTO_LLM_END_POINT\"\n\nllm_client = bentoml.SyncHTTPClient(BENTO_LLM_END_POINT, token=BENTO_API_TOKEN)\n","def dorag(question: str, context: str):\n\n    prompt = (\n        f\"You are a helpful assistant. The user has a question. Answer the user question based only on the context: {context}. \\n\"\n        f\"The user question is {question}\"\n    )\n\n    results = llm_client.generate(\n        max_tokens=1024,\n        prompt=prompt,\n    )\n\n    res = \"\"\n    for result in results:\n        res += result\n\n    return res\n","question = \"What state is Cambridge in?\"\n\n\ndef ask_a_question(question):\n    embeddings = get_embeddings([question])\n    res = milvus_client.search(\n        collection_name=COLLECTION_NAME,\n        data=embeddings,  # search for the one (1) embedding returned as a list of lists\n        anns_field=\"embedding\",  # Search across embeddings\n        limit=5,  # get me the top 5 results\n        output_fields=[\"sentence\"],  # get the sentence/chunk and city\n    )\n\n    sentences = []\n    for hits in res:\n        for hit in hits:\n            print(hit)\n            sentences.append(hit[\"entity\"][\"sentence\"])\n    context = \". \".join(sentences)\n    return context\n\n\ncontext = ask_a_question(question=question)\nprint(context)\n","print(dorag(question=question, context=context))\n"],"headingContent":"Retrieval-Augmented Generation (RAG) with Milvus and BentoML","anchorList":[{"label":"使用 Milvus 和 BentoML 的检索增强生成(RAG)","href":"Retrieval-Augmented-Generation-RAG-with-Milvus-and-BentoML","type":1,"isActive":false},{"label":"简介","href":"Introduction","type":2,"isActive":false},{"label":"开始之前","href":"Before-you-begin","type":2,"isActive":false},{"label":"使用 BentoML/BentoCloud 服务 Embeddings","href":"Serving-Embeddings-with-BentoMLBentoCloud","type":2,"isActive":false},{"label":"将数据插入向量数据库以便检索","href":"Inserting-Data-into-a-Vector-Database-for-Retrieval","type":2,"isActive":false},{"label":"创建 Milvus Lite Collections","href":"Creating-Your-Milvus-Lite-Collection","type":2,"isActive":false},{"label":"为 RAG 设置 LLM","href":"Set-up-Your-LLM-for-RAG","type":2,"isActive":false},{"label":"LLM 说明","href":"LLM-Instructions","type":2,"isActive":false},{"label":"RAG 示例","href":"A-RAG-Example","type":2,"isActive":false}]}
          \ No newline at end of file
          diff --git a/localization/v2.4.x/site/zh/integrations/integrate_with_bentoml.md b/localization/v2.4.x/site/zh/integrations/integrate_with_bentoml.md
          index ee048db97..463502270 100644
          --- a/localization/v2.4.x/site/zh/integrations/integrate_with_bentoml.md
          +++ b/localization/v2.4.x/site/zh/integrations/integrate_with_bentoml.md
          @@ -1,9 +1,8 @@
           ---
           id: integrate_with_bentoml.md
          -summary: 本指南演示了如何使用 BentoCloud 上的开源嵌入模型和大型语言模型以及 Milvus 向量数据库来构建检索增强生成 (RAG) 应用程序。
          +summary: 本指南演示了如何使用 BentoCloud 上的开源嵌入模型和大语言模型与 Milvus 向量数据库来构建检索增强生成(RAG)应用程序。
           title: 使用 Milvus 和 BentoML 的检索增强生成(RAG)
           ---
          -
           

          使用 Milvus 和 BentoML 的检索增强生成(RAG)

          Open In Colab

          +

          Open In Colab +GitHub Repository

          简介

          本指南演示了如何使用 BentoCloud 上的开源嵌入模型和大型语言模型以及 Milvus 向量数据库来构建 RAG(检索增强生成)应用程序。 BentoCloud 是面向快速移动的人工智能团队的人工智能推理平台,为模型推理量身定制了全面管理的基础架构。它与开源模型服务框架 BentoML 配合使用,便于轻松创建和部署高性能模型服务。在本演示中,我们使用 Milvus Lite 作为向量数据库,它是 Milvus 的轻量级版本,可以嵌入到您的 Python 应用程序中。

          +

          本指南演示了如何使用 BentoCloud 上的开源嵌入模型和大型语言模型以及 Milvus 向量数据库来构建 RAG(检索增强生成)应用程序。 BentoCloud 是面向快速发展的人工智能团队的人工智能推理平台,为模型推理提供量身定制的全面管理基础设施。它与开源模型服务框架 BentoML 配合使用,便于轻松创建和部署高性能模型服务。在本演示中,我们使用 Milvus Lite 作为向量数据库,它是 Milvus 的轻量级版本,可以嵌入到您的 Python 应用程序中。

          开始之前

          登录 BentoCloud 后,我们可以在 "部署"(Deployments)中与已部署的 BentoCloud 服务交互,相应的END_POINT 和 API 位于 Playground -> Python。 您可以在此处下载城市数据。

          -

          使用 BentoML/BentoCloud 提供嵌入式服务

        25. -

          连接到 embedding_client 后,我们需要处理数据。我们提供了几个函数来执行数据分割和嵌入。

          读取文件并将文本预处理为字符串列表。

          # naively chunk on newlines
          @@ -104,19 +103,19 @@ directory = "data"
           save_dir = "./city_data"
           api_url = f"https://api.github.com/repos/{repo}/contents/{directory}"
           
          +
           response = requests.get(api_url)
           data = response.json()
           
           if not os.path.exists(save_dir):
          -os.makedirs(save_dir)
          +    os.makedirs(save_dir)
           
           for item in data:
          -if item["type"] == "file":
          -file_url = item["download_url"]
          -file_path = os.path.join(save_dir, item["name"])
          -urllib.request.urlretrieve(file_url, file_path)
          +    if item["type"] == "file":
          +        file_url = item["download_url"]
          +        file_path = os.path.join(save_dir, item["name"])
          +        urllib.request.urlretrieve(file_url, file_path)
           
          -

          接下来,我们将对每个文件进行处理。

          # please upload your data directory under this file's folder
           cities = os.listdir("city_data")
          @@ -131,7 +130,7 @@ city_chunks = []
               mapped = {"city_name": city.split(".")[0], "chunks": cleaned}
               city_chunks.append(mapped)
           
          -

          将字符串列表分割成嵌入列表,每个嵌入列表由 25 个文本字符串组成。

          +

          将字符串列表分割成嵌入列表,每个嵌入列表分组 25 个文本字符串。

          def get_embeddings(texts: list) -> list:
               if len(texts) > 25:
                   splits = [texts[x : x + 25] for x in range(0, len(texts), 25)]
          @@ -144,7 +143,7 @@ city_chunks = []
                   sentences=texts,
               )
           
          -

          现在,我们需要匹配嵌入式和文本块。由于嵌入词列表和句子列表应按索引匹配,因此我们可以通过enumerate 任一列表进行匹配。

          +

          现在,我们需要将 embeddings 和文本块匹配起来。由于嵌入列表和句子列表应按索引进行匹配,因此我们可以通过enumerate 任一列表进行匹配。

          entries = []
           for city_dict in city_chunks:
               # No need for the embeddings list if get_embeddings already returns a list of lists
          @@ -176,20 +175,19 @@ city_chunks = []
                     d="M4 9h1v1H4c-1.5 0-3-1.69-3-3.5S2.55 3 4 3h4c1.45 0 3 1.69 3 3.5 0 1.41-.91 2.72-2 3.25V8.59c.58-.45 1-1.27 1-2.09C10 5.22 8.98 4 8 4H4c-.98 0-2 1.22-2 2.5S3 9 4 9zm9-3h-1v1h1c1 0 2 1.22 2 2.5S13.98 12 13 12H9c-.98 0-2-1.22-2-2.5 0-.83.42-1.64 1-2.09V6.25c-1.09.53-2 1.84-2 3.25C6 11.31 7.55 13 9 13h4c1.45 0 3-1.69 3-3.5S14.5 6 13 6z"
                   >
                 
          -    

          准备好嵌入和数据后,我们就可以将向量和元数据一起插入 Milvus Lite,以便稍后进行向量搜索。本节的第一步是通过连接 Milvus Lite 来启动客户端。我们只需导入MilvusClient 模块,然后初始化一个连接到 Milvus Lite 向量数据库的 Milvus Lite 客户端。维度大小来自嵌入模型的大小,例如,句子转换器模型all-MiniLM-L6-v2 产生的向量维度为 384。

          +

          准备好嵌入和数据后,我们就可以将向量连同元数据一起插入 Milvus Lite,以便稍后进行向量搜索。本节的第一步是通过连接 Milvus Lite 来启动客户端。我们只需导入MilvusClient 模块并初始化一个 Milvus Lite 客户端,它将连接到你的 Milvus Lite 向量数据库。维度大小来自嵌入模型的大小,例如,Sentence Transformers 模型all-MiniLM-L6-v2 产生的向量维度为 384。

          from pymilvus import MilvusClient
           
          -COLLECTION_NAME = "Bento_Milvus_RAG" # random name for your collection
          +COLLECTION_NAME = "Bento_Milvus_RAG"  # random name for your collection
           DIMENSION = 384
           
           # Initialize a Milvus Lite client
           milvus_client = MilvusClient("milvus_demo.db")
           
          -

          至于MilvusClient 的参数:

            -
          • uri 设置为本地文件(如./milvus.db )是最方便的方法,因为它会自动利用Milvus Lite将所有数据存储在该文件中。
          • +
          • uri 设置为本地文件,如./milvus.db ,是最方便的方法,因为它会自动利用Milvus Lite将所有数据存储在此文件中。
          • 如果数据规模较大,可以在docker 或 kubernetes 上设置性能更强的 Milvus 服务器。在此设置中,请使用服务器 uri,例如http://localhost:19530 ,作为您的uri
          • 如果你想使用Zilliz Cloud(Milvus 的全托管云服务),请调整uritoken ,它们与 Zilliz Cloud 中的公共端点和 Api 密钥相对应。
          @@ -199,8 +197,7 @@ milvus_client = MilvusClient("milvus_demo.db" connections.connect(uri="milvus_demo.db")
          - -

          创建 Milvus Lite 数据集

          使用 Milvus Lite 创建集合包括两个步骤:首先是定义模式,其次是定义索引。在本节中,我们需要一个模块:DataType 告诉我们字段中的数据类型。我们还需要使用两个函数来创建模式和添加字段:create_schema():创建一个Collection Schema ;add_field():在Collection Schema 中添加一个字段。

          +

          使用 Milvus Lite 创建 Collections 包括两个步骤:首先是定义 Schema,其次是定义索引。在本节中,我们需要一个模块:DataType 告诉我们字段中的数据类型。我们还需要使用两个函数来创建模式和添加字段:create_schema():创建 Collections 模式,add_field():向 Collection 的模式中添加字段。

          from pymilvus import MilvusClient, DataType, Collection
           
           # Create schema
           schema = MilvusClient.create_schema(
          -auto_id=True,
          -enable_dynamic_field=True,
          +    auto_id=True,
          +    enable_dynamic_field=True,
           )
           
           # 3.2. Add fields to schema
           schema.add_field(field_name="id", datatype=DataType.INT64, is_primary=True)
           schema.add_field(field_name="embedding", datatype=DataType.FLOAT_VECTOR, dim=DIMENSION)
           
          - -

          现在我们已经创建了模式并成功定义了数据字段,我们需要定义索引。就搜索而言,"索引 "定义了我们如何映射数据以供检索。在本项目中,我们使用默认的"AUTOINDEX"为数据建立索引。

          -

          接下来,我们使用之前给定的名称、模式和索引创建集合。最后,插入之前处理过的数据。

          +

          现在,我们已经创建了模式并成功定义了数据字段,我们需要定义索引。就搜索而言,"索引 "定义了我们如何映射数据以供检索。在本项目中,我们使用默认的 "AUTOINDEX"为数据建立索引。

          +

          接下来,我们用之前给定的名称、Schema 和索引创建 Collections。最后,插入之前处理过的数据。

          # prepare index parameters
           index_params = milvus_client.prepare_index_params()
           
           # add index
           index_params.add_index(
          -field_name="embedding",
          -index_type="AUTOINDEX", # use autoindex instead of other complex indexing method
          -metric_type="COSINE", # L2, COSINE, or IP
          +    field_name="embedding",
          +    index_type="AUTOINDEX",  # use autoindex instead of other complex indexing method
          +    metric_type="COSINE",  # L2, COSINE, or IP
           )
           
           # create collection
           if milvus_client.has_collection(collection_name=COLLECTION_NAME):
          -milvus_client.drop_collection(collection_name=COLLECTION_NAME)
          +    milvus_client.drop_collection(collection_name=COLLECTION_NAME)
           milvus_client.create_collection(
          -collection_name=COLLECTION_NAME, schema=schema, index_params=index_params
          +    collection_name=COLLECTION_NAME, schema=schema, index_params=index_params
           )
           
           # Outside the loop, now you upsert all the entries at once
           milvus_client.insert(collection_name=COLLECTION_NAME, data=entries)
           
          -

          为 RAG 设置 LLM

          要构建 RAG 应用程序,我们需要在 BentoCloud 上部署 LLM。让我们使用最新的 Llama3 LLM。启动并运行后,只需复制该模型服务的端点和令牌,并为其设置客户端即可。

          BENTO_LLM_END_POINT = "BENTO_LLM_END_POINT"
           
          -llm*client = bentoml.SyncHTTPClient(BENTO*LLM_END_POINT, token=BENTO_API_TOKEN)
          +llm_client = bentoml.SyncHTTPClient(BENTO_LLM_END_POINT, token=BENTO_API_TOKEN)
           
          -

          LLM 说明

        -

        RAG 示例

        现在我们可以提问了。该函数只需接收一个问题,然后通过 RAG 从背景信息中生成相关上下文。然后,我们将上下文和问题传递给 dorag() 并得到结果。

        question = "What state is Cambridge in?"
         
        +
         def ask_a_question(question):
        -embeddings = get_embeddings([question])
        -res = milvus_client.search(
        -collection_name=COLLECTION_NAME,
        -data=embeddings, # search for the one (1) embedding returned as a list of lists
        -anns_field="embedding", # Search across embeddings
        -limit=5, # get me the top 5 results
        -output_fields=["sentence"], # get the sentence/chunk and city
        -)
        +    embeddings = get_embeddings([question])
        +    res = milvus_client.search(
        +        collection_name=COLLECTION_NAME,
        +        data=embeddings,  # search for the one (1) embedding returned as a list of lists
        +        anns_field="embedding",  # Search across embeddings
        +        limit=5,  # get me the top 5 results
        +        output_fields=["sentence"],  # get the sentence/chunk and city
        +    )
         
             sentences = []
             for hits in res:
        @@ -345,10 +338,10 @@ output_fields=["sentence"], ". ".join(sentences)
             return context
         
        +
         context = ask_a_question(question=question)
         print(context)
         
        -

        实现 RAG

        print(dorag(question=question, context=context))
         
        diff --git a/localization/v2.4.x/site/zh/integrations/integrate_with_camel.json b/localization/v2.4.x/site/zh/integrations/integrate_with_camel.json index e6f13af3f..9aa786ea9 100644 --- a/localization/v2.4.x/site/zh/integrations/integrate_with_camel.json +++ b/localization/v2.4.x/site/zh/integrations/integrate_with_camel.json @@ -1 +1 @@ -{"codeList":["$ pip install -U \"camel-ai[all]\" pymilvus\n","import os\nimport requests\n\nos.makedirs(\"local_data\", exist_ok=True)\n\nurl = \"https://arxiv.org/pdf/2303.17760.pdf\"\nresponse = requests.get(url)\nwith open(\"local_data/camel paper.pdf\", \"wb\") as file:\n file.write(response.content)\n","os.environ[\"OPENAI_API_KEY\"] = \"Your Key\"\n","from camel.embeddings import OpenAIEmbedding\n\nembedding_instance = OpenAIEmbedding()\n","from camel.storages import MilvusStorage\n\nstorage_instance = MilvusStorage(\n vector_dim=embedding_instance.get_output_dim(),\n url_and_api_key=(\n \"./milvus_demo.db\", # Your Milvus connection URI\n \"\", # Your Milvus token\n ),\n collection_name=\"camel_paper\",\n)\n","from camel.retrievers import VectorRetriever\n\nvector_retriever = VectorRetriever(\n embedding_model=embedding_instance, storage=storage_instance\n)\n","vector_retriever.process(content_input_path=\"local_data/camel paper.pdf\")\n","retrieved_info = vector_retriever.query(query=\"What is CAMEL?\", top_k=1)\nprint(retrieved_info)\n","retrieved_info_irrelevant = vector_retriever.query(\n query=\"Compared with dumpling and rice, which should I take for dinner?\", top_k=1\n)\n\nprint(retrieved_info_irrelevant)\n","from camel.retrievers import AutoRetriever\nfrom camel.types import StorageType\n\nauto_retriever = AutoRetriever(\n url_and_api_key=(\n \"./milvus_demo.db\", # Your Milvus connection URI\n \"\", # Your Milvus token\n ),\n storage_type=StorageType.MILVUS,\n embedding_model=embedding_instance,\n)\n\nretrieved_info = auto_retriever.run_vector_retriever(\n query=\"What is CAMEL-AI\",\n content_input_paths=[\n \"local_data/camel paper.pdf\", # example local path\n \"https://www.camel-ai.org/\", # example remote url\n ],\n top_k=1,\n return_detailed_info=True,\n)\n\nprint(retrieved_info)\n","from camel.agents import ChatAgent\nfrom camel.messages import BaseMessage\nfrom camel.types import RoleType\nfrom camel.retrievers import AutoRetriever\nfrom camel.types import StorageType\n\n\ndef single_agent(query: str) -> str:\n # Set agent role\n assistant_sys_msg = BaseMessage(\n role_name=\"Assistant\",\n role_type=RoleType.ASSISTANT,\n meta_dict=None,\n content=\"\"\"You are a helpful assistant to answer question,\n I will give you the Original Query and Retrieved Context,\n answer the Original Query based on the Retrieved Context,\n if you can't answer the question just say I don't know.\"\"\",\n )\n\n # Add auto retriever\n auto_retriever = AutoRetriever(\n url_and_api_key=(\n \"./milvus_demo.db\", # Your Milvus connection URI\n \"\", # Your Milvus token\n ),\n storage_type=StorageType.MILVUS,\n embedding_model=embedding_instance,\n )\n\n retrieved_info = auto_retriever.run_vector_retriever(\n query=query,\n content_input_paths=[\n \"local_data/camel paper.pdf\", # example local path\n \"https://www.camel-ai.org/\", # example remote url\n ],\n # vector_storage_local_path=\"storage_default_run\",\n top_k=1,\n return_detailed_info=True,\n )\n\n # Pass the retrieved infomation to agent\n user_msg = BaseMessage.make_user_message(role_name=\"User\", content=retrieved_info)\n agent = ChatAgent(assistant_sys_msg)\n\n # Get response\n assistant_response = agent.step(user_msg)\n return assistant_response.msg.content\n\n\nprint(single_agent(\"What is CAMEL-AI\"))\n","from typing import List\nfrom colorama import Fore\n\nfrom camel.agents.chat_agent import FunctionCallingRecord\nfrom camel.configs import ChatGPTConfig\nfrom camel.functions import (\n MATH_FUNCS,\n RETRIEVAL_FUNCS,\n)\nfrom camel.societies import RolePlaying\nfrom camel.types import ModelType\nfrom camel.utils import print_text_animated\n\n\ndef role_playing_with_rag(\n task_prompt, model_type=ModelType.GPT_4O, chat_turn_limit=10\n) -> None:\n task_prompt = task_prompt\n\n user_model_config = ChatGPTConfig(temperature=0.0)\n\n function_list = [\n *MATH_FUNCS,\n *RETRIEVAL_FUNCS,\n ]\n assistant_model_config = ChatGPTConfig(\n tools=function_list,\n temperature=0.0,\n )\n\n role_play_session = RolePlaying(\n assistant_role_name=\"Searcher\",\n user_role_name=\"Professor\",\n assistant_agent_kwargs=dict(\n model_type=model_type,\n model_config=assistant_model_config,\n tools=function_list,\n ),\n user_agent_kwargs=dict(\n model_type=model_type,\n model_config=user_model_config,\n ),\n task_prompt=task_prompt,\n with_task_specify=False,\n )\n\n print(\n Fore.GREEN\n + f\"AI Assistant sys message:\\n{role_play_session.assistant_sys_msg}\\n\"\n )\n print(Fore.BLUE + f\"AI User sys message:\\n{role_play_session.user_sys_msg}\\n\")\n\n print(Fore.YELLOW + f\"Original task prompt:\\n{task_prompt}\\n\")\n print(\n Fore.CYAN\n + f\"Specified task prompt:\\n{role_play_session.specified_task_prompt}\\n\"\n )\n print(Fore.RED + f\"Final task prompt:\\n{role_play_session.task_prompt}\\n\")\n\n n = 0\n input_msg = role_play_session.init_chat()\n while n < chat_turn_limit:\n n += 1\n assistant_response, user_response = role_play_session.step(input_msg)\n\n if assistant_response.terminated:\n print(\n Fore.GREEN\n + (\n \"AI Assistant terminated. Reason: \"\n f\"{assistant_response.info['termination_reasons']}.\"\n )\n )\n break\n if user_response.terminated:\n print(\n Fore.GREEN\n + (\n \"AI User terminated. \"\n f\"Reason: {user_response.info['termination_reasons']}.\"\n )\n )\n break\n\n # Print output from the user\n print_text_animated(Fore.BLUE + f\"AI User:\\n\\n{user_response.msg.content}\\n\")\n\n # Print output from the assistant, including any function\n # execution information\n print_text_animated(Fore.GREEN + \"AI Assistant:\")\n tool_calls: List[FunctionCallingRecord] = assistant_response.info[\"tool_calls\"]\n for func_record in tool_calls:\n print_text_animated(f\"{func_record}\")\n print_text_animated(f\"{assistant_response.msg.content}\\n\")\n\n if \"CAMEL_TASK_DONE\" in user_response.msg.content:\n break\n\n input_msg = assistant_response.msg\n","role_playing_with_rag(\n task_prompt=\"\"\"What is the main termination reasons for AI Society\n dataset, how many number of messages did camel decided to\n limit, what's the value plus 100? You should refer to the\n content in path camel/local_data/camel paper.pdf\"\"\"\n)\n"],"headingContent":"","anchorList":[{"label":"使用 Milvus 和 Camel 的检索增强生成(RAG)","href":"Retrieval-Augmented-Generation-RAG-with-Milvus-and-Camel","type":1,"isActive":false},{"label":"载荷数据","href":"Load-Data","type":2,"isActive":false},{"label":"1.定制的 RAG","href":"1-Customized-RAG","type":2,"isActive":false},{"label":"2.自动 RAG","href":"2-Auto-RAG","type":2,"isActive":false},{"label":"3.具有自动 RAG 功能的单一代理","href":"3-Single-Agent-with-Auto-RAG","type":2,"isActive":false},{"label":"4.使用 Auto RAG 进行角色扮演","href":"4-Role-playing-with-Auto-RAG","type":2,"isActive":false}]} \ No newline at end of file +{"codeList":["$ pip install -U \"camel-ai[all]\" pymilvus\n","import os\nimport requests\n\nos.makedirs(\"local_data\", exist_ok=True)\n\nurl = \"https://arxiv.org/pdf/2303.17760.pdf\"\nresponse = requests.get(url)\nwith open(\"local_data/camel paper.pdf\", \"wb\") as file:\n file.write(response.content)\n","os.environ[\"OPENAI_API_KEY\"] = \"Your Key\"\n","from camel.embeddings import OpenAIEmbedding\n\nembedding_instance = OpenAIEmbedding()\n","from camel.storages import MilvusStorage\n\nstorage_instance = MilvusStorage(\n vector_dim=embedding_instance.get_output_dim(),\n url_and_api_key=(\n \"./milvus_demo.db\", # Your Milvus connection URI\n \"\", # Your Milvus token\n ),\n collection_name=\"camel_paper\",\n)\n","from camel.retrievers import VectorRetriever\n\nvector_retriever = VectorRetriever(\n embedding_model=embedding_instance, storage=storage_instance\n)\n","vector_retriever.process(content_input_path=\"local_data/camel paper.pdf\")\n","retrieved_info = vector_retriever.query(query=\"What is CAMEL?\", top_k=1)\nprint(retrieved_info)\n","retrieved_info_irrelevant = vector_retriever.query(\n query=\"Compared with dumpling and rice, which should I take for dinner?\", top_k=1\n)\n\nprint(retrieved_info_irrelevant)\n","from camel.retrievers import AutoRetriever\nfrom camel.types import StorageType\n\nauto_retriever = AutoRetriever(\n url_and_api_key=(\n \"./milvus_demo.db\", # Your Milvus connection URI\n \"\", # Your Milvus token\n ),\n storage_type=StorageType.MILVUS,\n embedding_model=embedding_instance,\n)\n\nretrieved_info = auto_retriever.run_vector_retriever(\n query=\"What is CAMEL-AI\",\n content_input_paths=[\n \"local_data/camel paper.pdf\", # example local path\n \"https://www.camel-ai.org/\", # example remote url\n ],\n top_k=1,\n return_detailed_info=True,\n)\n\nprint(retrieved_info)\n","from camel.agents import ChatAgent\nfrom camel.messages import BaseMessage\nfrom camel.types import RoleType\nfrom camel.retrievers import AutoRetriever\nfrom camel.types import StorageType\n\n\ndef single_agent(query: str) -> str:\n # Set agent role\n assistant_sys_msg = BaseMessage(\n role_name=\"Assistant\",\n role_type=RoleType.ASSISTANT,\n meta_dict=None,\n content=\"\"\"You are a helpful assistant to answer question,\n I will give you the Original Query and Retrieved Context,\n answer the Original Query based on the Retrieved Context,\n if you can't answer the question just say I don't know.\"\"\",\n )\n\n # Add auto retriever\n auto_retriever = AutoRetriever(\n url_and_api_key=(\n \"./milvus_demo.db\", # Your Milvus connection URI\n \"\", # Your Milvus token\n ),\n storage_type=StorageType.MILVUS,\n embedding_model=embedding_instance,\n )\n\n retrieved_info = auto_retriever.run_vector_retriever(\n query=query,\n content_input_paths=[\n \"local_data/camel paper.pdf\", # example local path\n \"https://www.camel-ai.org/\", # example remote url\n ],\n # vector_storage_local_path=\"storage_default_run\",\n top_k=1,\n return_detailed_info=True,\n )\n\n # Pass the retrieved infomation to agent\n user_msg = BaseMessage.make_user_message(role_name=\"User\", content=retrieved_info)\n agent = ChatAgent(assistant_sys_msg)\n\n # Get response\n assistant_response = agent.step(user_msg)\n return assistant_response.msg.content\n\n\nprint(single_agent(\"What is CAMEL-AI\"))\n","from typing import List\nfrom colorama import Fore\n\nfrom camel.agents.chat_agent import FunctionCallingRecord\nfrom camel.configs import ChatGPTConfig\nfrom camel.functions import (\n MATH_FUNCS,\n RETRIEVAL_FUNCS,\n)\nfrom camel.societies import RolePlaying\nfrom camel.types import ModelType\nfrom camel.utils import print_text_animated\n\n\ndef role_playing_with_rag(\n task_prompt, model_type=ModelType.GPT_4O, chat_turn_limit=10\n) -> None:\n task_prompt = task_prompt\n\n user_model_config = ChatGPTConfig(temperature=0.0)\n\n function_list = [\n *MATH_FUNCS,\n *RETRIEVAL_FUNCS,\n ]\n assistant_model_config = ChatGPTConfig(\n tools=function_list,\n temperature=0.0,\n )\n\n role_play_session = RolePlaying(\n assistant_role_name=\"Searcher\",\n user_role_name=\"Professor\",\n assistant_agent_kwargs=dict(\n model_type=model_type,\n model_config=assistant_model_config,\n tools=function_list,\n ),\n user_agent_kwargs=dict(\n model_type=model_type,\n model_config=user_model_config,\n ),\n task_prompt=task_prompt,\n with_task_specify=False,\n )\n\n print(\n Fore.GREEN\n + f\"AI Assistant sys message:\\n{role_play_session.assistant_sys_msg}\\n\"\n )\n print(Fore.BLUE + f\"AI User sys message:\\n{role_play_session.user_sys_msg}\\n\")\n\n print(Fore.YELLOW + f\"Original task prompt:\\n{task_prompt}\\n\")\n print(\n Fore.CYAN\n + f\"Specified task prompt:\\n{role_play_session.specified_task_prompt}\\n\"\n )\n print(Fore.RED + f\"Final task prompt:\\n{role_play_session.task_prompt}\\n\")\n\n n = 0\n input_msg = role_play_session.init_chat()\n while n < chat_turn_limit:\n n += 1\n assistant_response, user_response = role_play_session.step(input_msg)\n\n if assistant_response.terminated:\n print(\n Fore.GREEN\n + (\n \"AI Assistant terminated. Reason: \"\n f\"{assistant_response.info['termination_reasons']}.\"\n )\n )\n break\n if user_response.terminated:\n print(\n Fore.GREEN\n + (\n \"AI User terminated. \"\n f\"Reason: {user_response.info['termination_reasons']}.\"\n )\n )\n break\n\n # Print output from the user\n print_text_animated(Fore.BLUE + f\"AI User:\\n\\n{user_response.msg.content}\\n\")\n\n # Print output from the assistant, including any function\n # execution information\n print_text_animated(Fore.GREEN + \"AI Assistant:\")\n tool_calls: List[FunctionCallingRecord] = assistant_response.info[\"tool_calls\"]\n for func_record in tool_calls:\n print_text_animated(f\"{func_record}\")\n print_text_animated(f\"{assistant_response.msg.content}\\n\")\n\n if \"CAMEL_TASK_DONE\" in user_response.msg.content:\n break\n\n input_msg = assistant_response.msg\n","role_playing_with_rag(\n task_prompt=\"\"\"What is the main termination reasons for AI Society\n dataset, how many number of messages did camel decided to\n limit, what's the value plus 100? You should refer to the\n content in path camel/local_data/camel paper.pdf\"\"\"\n)\n"],"headingContent":"Retrieval-Augmented Generation (RAG) with Milvus and Camel","anchorList":[{"label":"使用 Milvus 和 Camel 的检索增强生成 (RAG) 系统","href":"Retrieval-Augmented-Generation-RAG-with-Milvus-and-Camel","type":1,"isActive":false},{"label":"加载数据","href":"Load-Data","type":2,"isActive":false},{"label":"1.自定义 RAG","href":"1-Customized-RAG","type":2,"isActive":false},{"label":"2.自动 RAG","href":"2-Auto-RAG","type":2,"isActive":false},{"label":"3.带有自动 RAG 的单一 Agents","href":"3-Single-Agent-with-Auto-RAG","type":2,"isActive":false},{"label":"4.使用自动 RAG 进行角色扮演","href":"4-Role-playing-with-Auto-RAG","type":2,"isActive":false}]} \ No newline at end of file diff --git a/localization/v2.4.x/site/zh/integrations/integrate_with_camel.md b/localization/v2.4.x/site/zh/integrations/integrate_with_camel.md index f6d2e0f9a..4948f083f 100644 --- a/localization/v2.4.x/site/zh/integrations/integrate_with_camel.md +++ b/localization/v2.4.x/site/zh/integrations/integrate_with_camel.md @@ -1,10 +1,9 @@ --- id: integrate_with_camel.md -summary: 本指南演示了如何使用 BentoCloud 上的开源嵌入模型和大型语言模型以及 Milvus 向量数据库来构建检索增强生成 (RAG) 应用程序。 +summary: 本指南演示了如何使用 BentoCloud 上的开源嵌入模型和大语言模型与 Milvus 向量数据库来构建检索增强生成(RAG)应用程序。 title: 使用 Milvus 和 BentoML 的检索增强生成(RAG) --- - -

        使用 Milvus 和 Camel 的检索增强生成(RAG)

        Open In Colab

        -

        本指南演示如何使用 CAMEL 和 Milvus 构建检索-增强生成(RAG)系统。

        -

        RAG 系统结合了检索系统和生成模型,可根据给定提示生成新文本。该系统首先使用 Milvus 从语料库中检索相关文档,然后使用生成模型根据检索到的文档生成新文本。

        -

        CAMEL是一个多代理框架。Milvus是世界上最先进的开源向量数据库,用于支持嵌入式相似性搜索和人工智能应用。

        -

        在本笔记本中,我们将展示 CAMEL 检索模块的自定义和自动两种使用方式。我们还将展示如何将AutoRetrieverChatAgent 结合起来,并通过使用Function Calling 进一步将AutoRetrieverRolePlaying 结合起来。

        +

        Open In Colab +GitHub Repository

        +

        本指南演示了如何使用 Camel 和 Milvus 建立一个检索-增强生成(RAG)系统。

        +

        RAG 系统将检索系统与生成模型相结合,根据给定提示生成新文本。该系统首先使用 Milvus 从语料库中检索相关文档,然后使用生成模型根据检索到的文档生成新文本。

        +

        Camel是一个多 Agents 框架。Milvus是世界上最先进的开源向量数据库,专为支持 Embeddings 相似性搜索和人工智能应用而构建。

        +

        在本笔记本中,我们将展示 CAMEL 检索模块的定制方式和自动方式的用法。我们还将展示如何将AutoRetrieverChatAgent 结合起来,并通过使用Function Calling 进一步将AutoRetrieverRolePlaying 结合起来。

        包括 4 个主要部分:

        • 自定义 RAG
        • 自动 RAG
        • -
        • 使用自动 RAG 的单一代理
        • +
        • 使用自动 RAG 的单个 Agents
        • 使用自动 RAG 进行角色扮演

        加载数据

        首先,让我们从 https://arxiv.org/pdf/2303.17760.pdf 加载 CAMEL 文件。这将是我们的本地示例数据。

        +

        首先从 https://arxiv.org/pdf/2303.17760.pdf 加载 Camel 文件。这将是我们的本地示例数据。

        $ pip install -U "camel-ai[all]" pymilvus
         
        @@ -60,9 +60,8 @@ os.makedirs("local_data", exist_ok="https://arxiv.org/pdf/2303.17760.pdf" response = requests.get(url) with open("local_data/camel paper.pdf", "wb") as file: -file.write(response.content) + file.write(response.content)
        -

        1.自定义 RAG

        在本节中,我们将设置自定义 RAG 管道,以VectorRetriever 为例。我们将把OpenAIEmbedding 设置为嵌入模型,把MilvusStorage 设置为其存储空间。

        -

        要设置 OpenAI 嵌入,我们需要设置OPENAI_API_KEY

        +

        要设置 OpenAI 嵌入,我们需要在下面的OPENAI_API_KEY 中进行设置。

        os.environ["OPENAI_API_KEY"] = "Your Key"
         

        导入并设置嵌入实例:

        from camel.embeddings import OpenAIEmbedding
         
        -embedding*instance = OpenAIEmbedding()
        +embedding_instance = OpenAIEmbedding()
         
        -

        导入并设置向量存储实例:

        from camel.storages import MilvusStorage
         
         storage_instance = MilvusStorage(
        -vector_dim=embedding_instance.get_output_dim(),
        -url_and_api_key=(
        -"./milvus_demo.db", # Your Milvus connection URI
        -"", # Your Milvus token
        -),
        -collection_name="camel_paper",
        +    vector_dim=embedding_instance.get_output_dim(),
        +    url_and_api_key=(
        +        "./milvus_demo.db",  # Your Milvus connection URI
        +        "",  # Your Milvus token
        +    ),
        +    collection_name="camel_paper",
         )
         
        -

        对于url_and_api_key

          -
        • 使用本地文件(如./milvus.db )作为 Milvus 连接 URI 是最方便的方法,因为它会自动利用Milvus Lite将所有数据存储在此文件中。
        • +
        • 使用本地文件,例如./milvus.db ,作为 Milvus 连接 URI 是最方便的方法,因为它会自动利用Milvus Lite将所有数据存储在此文件中。
        • 如果数据规模较大,可以在docker 或 kubernetes 上设置性能更强的 Milvus 服务器。在这种设置中,请使用服务器 uri(例如http://localhost:19530 )作为您的 url。
        • 如果你想使用Zilliz Cloud(Milvus 的全托管云服务),请调整连接 uri 和令牌,它们与 Zilliz Cloud 中的公共端点和 Api 密钥相对应。
        @@ -113,12 +110,11 @@ collection_name="camel_paper",

        默认情况下,similarity_threshold 设置为 0.75。您可以更改。

        from camel.retrievers import VectorRetriever
         
        -vector*retriever = VectorRetriever(
        -embedding_model=embedding_instance, storage=storage_instance
        +vector_retriever = VectorRetriever(
        +    embedding_model=embedding_instance, storage=storage_instance
         )
         
        - -

        我们使用集成的Unstructured Module 将内容分割成小块,内容将通过chunk_by_title 功能自动分割,每个小块的最大字符数为 500 字符,这是OpenAIEmbedding 的合适长度。分块中的所有文本都将嵌入并存储到向量存储实例中,这需要一些时间,请稍候。

        +

        我们使用集成的Unstructured Module 将内容分割成小块,内容将通过chunk_by_title 功能自动分割,每个小块的最大字符数为 500 字符,这是OpenAIEmbedding 的合适长度。分块中的所有文本将被嵌入并存储到向量存储实例中,这需要一些时间,请稍候。

        vector_retriever.process(content_input_path="local_data/camel paper.pdf")
         
        [nltk_data] Downloading package punkt to /root/nltk_data...
        @@ -147,7 +143,6 @@ embedding_model=embedding_instance, storage=storage_instance
         
         print(retrieved_info_irrelevant)
         
        -
        [{'text': 'No suitable information retrieved from local_data/camel paper.pdf                 with similarity_threshold = 0.75.'}]
         

        2.自动 RAG

        -
        Original Query:
         {What is CAMEL-AI}
         Retrieved Context:
         {'similarity score': '0.8252888321876526', 'content path': 'local_data/camel paper.pdf', 'metadata': {'last_modified': '2024-04-19T14:40:00', 'filetype': 'application/pdf', 'page_number': 7}, 'text': ' Section 3.2, to simulate assistant-user cooperation. For our analysis, we set our attention on AI Society setting. We also gathered conversational data, named CAMEL AI Society and CAMEL Code datasets and problem-solution pairs data named CAMEL Math and CAMEL Science and analyzed and evaluated their quality. Moreover, we will discuss potential extensions of our framework and highlight both the risks and opportunities that future AI society might present.'}
         {'similarity score': '0.8378663659095764', 'content path': 'https://www.camel-ai.org/', 'metadata': {'filetype': 'text/html', 'languages': ['eng'], 'page_number': 1, 'url': 'https://www.camel-ai.org/', 'link_urls': ['#h.3f4tphhd9pn8', 'https://join.slack.com/t/camel-ai/shared_invite/zt-2g7xc41gy-_7rcrNNAArIP6sLQqldkqQ', 'https://discord.gg/CNcNpquyDc'], 'link_texts': [None, None, None], 'emphasized_text_contents': ['Mission', 'CAMEL-AI.org', 'is an open-source community dedicated to the study of autonomous and communicative agents. We believe that studying these agents on a large scale offers valuable insights into their behaviors, capabilities, and potential risks. To facilitate research in this field, we provide, implement, and support various types of agents, tasks, prompts, models, datasets, and simulated environments.', 'Join us via', 'Slack', 'Discord', 'or'], 'emphasized_text_tags': ['span', 'span', 'span', 'span', 'span', 'span', 'span']}, 'text': 'Mission\n\nCAMEL-AI.org is an open-source community dedicated to the study of autonomous and communicative agents. We believe that studying these agents on a large scale offers valuable insights into their behaviors, capabilities, and potential risks. To facilitate research in this field, we provide, implement, and support various types of agents, tasks, prompts, models, datasets, and simulated environments.\n\nJoin us via\n\nSlack\n\nDiscord\n\nor'}
         
        -

        3.带有自动 RAG 的单一代理

        在本节中,我们将展示如何将AutoRetriever 与一个ChatAgent 结合起来。

        -

        让我们设置一个代理函数,在该函数中,我们可以通过向该代理提供查询来获取响应。

        +

        让我们设置一个 Agents 函数,在此函数中,我们可以通过向该代理提供查询来获取响应。

        from camel.agents import ChatAgent
         from camel.messages import BaseMessage
         from camel.types import RoleType
         from camel.retrievers import AutoRetriever
         from camel.types import StorageType
         
        +
         def single_agent(query: str) -> str:
        -# Set agent role
        -assistant_sys_msg = BaseMessage(
        -role_name="Assistant",
        -role_type=RoleType.ASSISTANT,
        -meta_dict=None,
        -content="""You are a helpful assistant to answer question,
        -I will give you the Original Query and Retrieved Context,
        -answer the Original Query based on the Retrieved Context,
        -if you can't answer the question just say I don't know.""",
        -)
        +    # Set agent role
        +    assistant_sys_msg = BaseMessage(
        +        role_name="Assistant",
        +        role_type=RoleType.ASSISTANT,
        +        meta_dict=None,
        +        content="""You are a helpful assistant to answer question,
        +         I will give you the Original Query and Retrieved Context,
        +        answer the Original Query based on the Retrieved Context,
        +        if you can't answer the question just say I don't know.""",
        +    )
         
             # Add auto retriever
             auto_retriever = AutoRetriever(
        @@ -268,9 +263,9 @@ if you can't answer the question just say I don't know.""&qu
             assistant_response = agent.step(user_msg)
             return assistant_response.msg.content
         
        +
         print(single_agent("What is CAMEL-AI"))
         
        -
        CAMEL-AI is an open-source community dedicated to the study of autonomous and communicative agents. It provides, implements, and supports various types of agents, tasks, prompts, models, datasets, and simulated environments to facilitate research in this field.
         

        4.使用自动 RAG 进行角色扮演

      8. -

        使用定义的检索器函数运行角色扮演:

        role_playing_with_rag(
             task_prompt="""What is the main termination reasons for AI Society
        @@ -413,16 +407,19 @@ None
         Final task prompt:
         What is the main termination reasons for AI Society dataset, how many number of messages did camel decided to limit, what's the value plus 100?
         
        +
        +
         AI User:
         
         Instruction: Provide a summary of the main termination reasons in the AI Society dataset.
         Input: None
         
        +
         AI Assistant:
         
         Function Execution: local_retriever
        -Args: {'query': 'main termination reasons for AI Society dataset'}
        -Result: Original Query:
        +    Args: {'query': 'main termination reasons for AI Society dataset'}
        +    Result: Original Query:
         {main termination reasons for AI Society dataset}
         Retrieved Context:
         Next we examine the conversation termination reasons for both AI Society and Code datasets. As can be seen in Figure 8, the main termination reasons for AI Society dataset is Assistant Instruct whereas for Code it is Token Limit. The latter is expected as the since responses that contain code tend to be long. It is also interesting to note that in both datasets, the termination due to Maximum Number of Messages is low indicating that the limit of 40 maximum messages is reasonable. Our decision t
        @@ -431,36 +428,42 @@ Solution: The main termination reason for the AI Society dataset is "Assist
         
         Next request.
         
        +
         AI User:
         
         Instruction: Identify the number of messages that the camel decided to limit in the context provided.
         Input: None
         
        +
         AI Assistant:
         
         Solution: The context provided from the local retriever indicates that the camel decided to limit the number of messages to a maximum of 40 messages. This is mentioned in the retrieved context where it states that the termination due to Maximum Number of Messages is low, indicating that the limit of 40 maximum messages is reasonable.
         
         Next request.
         
        +
         AI User:
         
         Instruction: Calculate the value of the message limit plus 100.
         Input: None
         
        +
         AI Assistant:
         
         Function Execution: add
        -Args: {'a': 40, 'b': 100}
        -Result: 140
        +    Args: {'a': 40, 'b': 100}
        +    Result: 140
         
         Solution: The value of the message limit plus 100 is 140.
         
         Next request.
         
        +
         AI User:
         
         CAMEL_TASK_DONE
         
        +
         AI Assistant:
         
         Solution: Understood, the task is completed.
        diff --git a/localization/v2.4.x/site/zh/integrations/integrate_with_dspy.json b/localization/v2.4.x/site/zh/integrations/integrate_with_dspy.json
        index f01338ee6..0fcf1a063 100644
        --- a/localization/v2.4.x/site/zh/integrations/integrate_with_dspy.json
        +++ b/localization/v2.4.x/site/zh/integrations/integrate_with_dspy.json
        @@ -1 +1 @@
        -{"codeList":["$ pip install \"dspy-ai[milvus]\"\n$ pip install -U pymilvus\n","from dspy.datasets import HotPotQA\n\n# Load the dataset.\ndataset = HotPotQA(\n    train_seed=1, train_size=20, eval_seed=2023, dev_size=50, test_size=0\n)\n\n# Tell DSPy that the 'question' field is the input. Any other fields are labels and/or metadata.\ntrainset = [x.with_inputs(\"question\") for x in dataset.train]\ndevset = [x.with_inputs(\"question\") for x in dataset.dev]\n","import requests\nimport os\n\nos.environ[\"OPENAI_API_KEY\"] = \"\"\nMILVUS_URI = \"example.db\"\nMILVUS_TOKEN = \"\"\n\nfrom pymilvus import MilvusClient, DataType, Collection\nfrom dspy.retrieve.milvus_rm import openai_embedding_function\n\nclient = MilvusClient(uri=MILVUS_URI, token=MILVUS_TOKEN)\n\nif \"dspy_example\" not in client.list_collections():\n    client.create_collection(\n        collection_name=\"dspy_example\",\n        overwrite=True,\n        dimension=1536,\n        primary_field_name=\"id\",\n        vector_field_name=\"embedding\",\n        id_type=\"int\",\n        metric_type=\"IP\",\n        max_length=65535,\n        enable_dynamic=True,\n    )\ntext = requests.get(\n    \"https://raw.githubusercontent.com/wxywb/dspy_dataset_sample/master/sample_data.txt\"\n).text\n\nfor idx, passage in enumerate(text.split(\"\\n\")):\n    if len(passage) == 0:\n        continue\n    client.insert(\n        collection_name=\"dspy_example\",\n        data=[\n            {\n                \"id\": idx,\n                \"embedding\": openai_embedding_function(passage)[0],\n                \"text\": passage,\n            }\n        ],\n    )\n","from dspy.retrieve.milvus_rm import MilvusRM\nimport dspy\n\nretriever_model = MilvusRM(\n    collection_name=\"dspy_example\",\n    uri=MILVUS_URI,\n    token=MILVUS_TOKEN,  # ignore this if no token is required for Milvus connection\n    embedding_function=openai_embedding_function,\n)\nturbo = dspy.OpenAI(model=\"gpt-3.5-turbo\")\ndspy.settings.configure(lm=turbo)\n","class GenerateAnswer(dspy.Signature):\n    \"\"\"Answer questions with short factoid answers.\"\"\"\n\n    context = dspy.InputField(desc=\"may contain relevant facts\")\n    question = dspy.InputField()\n    answer = dspy.OutputField(desc=\"often between 1 and 5 words\")\n","class RAG(dspy.Module):\n    def __init__(self, rm):\n        super().__init__()\n        self.retrieve = rm\n\n        # This signature indicates the task imposed on the COT module.\n        self.generate_answer = dspy.ChainOfThought(GenerateAnswer)\n\n    def forward(self, question):\n        # Use milvus_rm to retrieve context for the question.\n        context = self.retrieve(question).passages\n        # COT module takes \"context, query\" and output \"answer\".\n        prediction = self.generate_answer(context=context, question=question)\n        return dspy.Prediction(\n            context=[item.long_text for item in context], answer=prediction.answer\n        )\n","rag = RAG(retriever_model)\nprint(rag(\"who write At My Window\").answer)\n","from dspy.evaluate.evaluate import Evaluate\nfrom dspy.datasets import HotPotQA\n\nevaluate_on_hotpotqa = Evaluate(\n    devset=devset, num_threads=1, display_progress=False, display_table=5\n)\n\nmetric = dspy.evaluate.answer_exact_match\nscore = evaluate_on_hotpotqa(rag, metric=metric)\nprint(\"rag:\", score)\n","from dspy.teleprompt import BootstrapFewShot\n\n# Validation logic: check that the predicted answer is correct.# Also check that the retrieved context does contain that answer.\n\n\ndef validate_context_and_answer(example, pred, trace=None):\n    answer_EM = dspy.evaluate.answer_exact_match(example, pred)\n    answer_PM = dspy.evaluate.answer_passage_match(example, pred)\n    return answer_EM and answer_PM\n\n\n# Set up a basic teleprompter, which will compile our RAG program.\nteleprompter = BootstrapFewShot(metric=validate_context_and_answer)\n\n# Compile!\ncompiled_rag = teleprompter.compile(rag, trainset=trainset)\n\n# Now compiled_rag is optimized and ready to answer your new question!\n# Now, let’s evaluate the compiled RAG program.\nscore = evaluate_on_hotpotqa(compiled_rag, metric=metric)\nprint(score)\nprint(\"compile_rag:\", score)\n"],"headingContent":"","anchorList":[{"label":"将 Milvus 与 DSPy 集成","href":"Integrate-Milvus-with-DSPy","type":1,"isActive":false},{"label":"什么是 DSPy","href":"What-is-DSPy","type":2,"isActive":false},{"label":"使用 DSPy 的好处","href":"Benefits-of-using-DSPy","type":2,"isActive":false},{"label":"模块","href":"Modules","type":2,"isActive":false},{"label":"为什么要在 DSPy 中使用 Milvus","href":"Why-Milvus-in-DSPy","type":2,"isActive":false},{"label":"实例","href":"Examples","type":2,"isActive":false},{"label":"摘要","href":"Summary","type":2,"isActive":false}]}
        \ No newline at end of file
        +{"codeList":["$ pip install \"dspy-ai[milvus]\"\n$ pip install -U pymilvus\n","from dspy.datasets import HotPotQA\n\n# Load the dataset.\ndataset = HotPotQA(\n    train_seed=1, train_size=20, eval_seed=2023, dev_size=50, test_size=0\n)\n\n# Tell DSPy that the 'question' field is the input. Any other fields are labels and/or metadata.\ntrainset = [x.with_inputs(\"question\") for x in dataset.train]\ndevset = [x.with_inputs(\"question\") for x in dataset.dev]\n","import requests\nimport os\n\nos.environ[\"OPENAI_API_KEY\"] = \"\"\nMILVUS_URI = \"example.db\"\nMILVUS_TOKEN = \"\"\n\nfrom pymilvus import MilvusClient, DataType, Collection\nfrom dspy.retrieve.milvus_rm import openai_embedding_function\n\nclient = MilvusClient(uri=MILVUS_URI, token=MILVUS_TOKEN)\n\nif \"dspy_example\" not in client.list_collections():\n    client.create_collection(\n        collection_name=\"dspy_example\",\n        overwrite=True,\n        dimension=1536,\n        primary_field_name=\"id\",\n        vector_field_name=\"embedding\",\n        id_type=\"int\",\n        metric_type=\"IP\",\n        max_length=65535,\n        enable_dynamic=True,\n    )\ntext = requests.get(\n    \"https://raw.githubusercontent.com/wxywb/dspy_dataset_sample/master/sample_data.txt\"\n).text\n\nfor idx, passage in enumerate(text.split(\"\\n\")):\n    if len(passage) == 0:\n        continue\n    client.insert(\n        collection_name=\"dspy_example\",\n        data=[\n            {\n                \"id\": idx,\n                \"embedding\": openai_embedding_function(passage)[0],\n                \"text\": passage,\n            }\n        ],\n    )\n","from dspy.retrieve.milvus_rm import MilvusRM\nimport dspy\n\nretriever_model = MilvusRM(\n    collection_name=\"dspy_example\",\n    uri=MILVUS_URI,\n    token=MILVUS_TOKEN,  # ignore this if no token is required for Milvus connection\n    embedding_function=openai_embedding_function,\n)\nturbo = dspy.OpenAI(model=\"gpt-3.5-turbo\")\ndspy.settings.configure(lm=turbo)\n","class GenerateAnswer(dspy.Signature):\n    \"\"\"Answer questions with short factoid answers.\"\"\"\n\n    context = dspy.InputField(desc=\"may contain relevant facts\")\n    question = dspy.InputField()\n    answer = dspy.OutputField(desc=\"often between 1 and 5 words\")\n","class RAG(dspy.Module):\n    def __init__(self, rm):\n        super().__init__()\n        self.retrieve = rm\n\n        # This signature indicates the task imposed on the COT module.\n        self.generate_answer = dspy.ChainOfThought(GenerateAnswer)\n\n    def forward(self, question):\n        # Use milvus_rm to retrieve context for the question.\n        context = self.retrieve(question).passages\n        # COT module takes \"context, query\" and output \"answer\".\n        prediction = self.generate_answer(context=context, question=question)\n        return dspy.Prediction(\n            context=[item.long_text for item in context], answer=prediction.answer\n        )\n","rag = RAG(retriever_model)\nprint(rag(\"who write At My Window\").answer)\n","from dspy.evaluate.evaluate import Evaluate\nfrom dspy.datasets import HotPotQA\n\nevaluate_on_hotpotqa = Evaluate(\n    devset=devset, num_threads=1, display_progress=False, display_table=5\n)\n\nmetric = dspy.evaluate.answer_exact_match\nscore = evaluate_on_hotpotqa(rag, metric=metric)\nprint(\"rag:\", score)\n","from dspy.teleprompt import BootstrapFewShot\n\n# Validation logic: check that the predicted answer is correct.# Also check that the retrieved context does contain that answer.\n\n\ndef validate_context_and_answer(example, pred, trace=None):\n    answer_EM = dspy.evaluate.answer_exact_match(example, pred)\n    answer_PM = dspy.evaluate.answer_passage_match(example, pred)\n    return answer_EM and answer_PM\n\n\n# Set up a basic teleprompter, which will compile our RAG program.\nteleprompter = BootstrapFewShot(metric=validate_context_and_answer)\n\n# Compile!\ncompiled_rag = teleprompter.compile(rag, trainset=trainset)\n\n# Now compiled_rag is optimized and ready to answer your new question!\n# Now, let’s evaluate the compiled RAG program.\nscore = evaluate_on_hotpotqa(compiled_rag, metric=metric)\nprint(score)\nprint(\"compile_rag:\", score)\n"],"headingContent":"Integrate Milvus with DSPy","anchorList":[{"label":"Milvus 与 DSPy 集成","href":"Integrate-Milvus-with-DSPy","type":1,"isActive":false},{"label":"什么是 DSPy","href":"What-is-DSPy","type":2,"isActive":false},{"label":"使用 DSPy 的好处","href":"Benefits-of-using-DSPy","type":2,"isActive":false},{"label":"模块","href":"Modules","type":2,"isActive":false},{"label":"为什么在 DSPy 中使用 Milvus","href":"Why-Milvus-in-DSPy","type":2,"isActive":false},{"label":"示例","href":"Examples","type":2,"isActive":false},{"label":"总结","href":"Summary","type":2,"isActive":false}]}
        \ No newline at end of file
        diff --git a/localization/v2.4.x/site/zh/integrations/integrate_with_dspy.md b/localization/v2.4.x/site/zh/integrations/integrate_with_dspy.md
        index 1215130e8..f3f633fb8 100644
        --- a/localization/v2.4.x/site/zh/integrations/integrate_with_dspy.md
        +++ b/localization/v2.4.x/site/zh/integrations/integrate_with_dspy.md
        @@ -1,10 +1,9 @@
         ---
         id: integrate_with_dspy.md
         summary: 本指南演示了如何使用 DSPy 的 Retriever 模块之一 MilvusRM 来优化 RAG 程序。
        -title: 将 Milvus 与 DSPy 集成
        +title: Milvus 与 DSPy 集成
         ---
        -
        -

        将 Milvus 与 DSPy 集成

        Open In Colab

        +

        Open In Colab +GitHub Repository

        什么是 DSPy

        DSPy 由斯坦福大学 NLP 小组推出,是一个开创性的程序框架,旨在优化语言模型中的提示和权重,尤其适用于大型语言模型 (LLM) 在管道的多个阶段进行集成的情况。与依赖人工制作和调整的传统提示工程技术不同,DSPy 采用的是一种基于学习的方法。通过吸收问答示例,DSPy 可根据特定任务动态生成优化提示。这种创新方法可实现整个流水线的无缝重组,从而消除了持续手动调整提示的需要。DSPy 的 Pythonic 语法提供了各种可组合的声明式模块,简化了 LLM 的指令。

        +

        DSPy 由斯坦福大学 NLP 小组推出,是一个开创性的程序框架,旨在优化语言模型中的提示和权重,尤其适用于大型语言模型 (LLMs) 在管道的多个阶段进行集成的情况。与依赖人工制作和调整的传统提示工程技术不同,DSPy 采用的是一种基于学习的方法。通过吸收问答示例,DSPy 可根据特定任务动态生成优化提示。这种创新方法可实现整个流水线的无缝重组,从而消除了持续手动调整提示的需要。DSPy 的 Pythonic 语法提供了各种可组合的声明式模块,简化了 LLMs 的指令。

        使用 DSPy 的好处

          -
        • 编程方法:DSPy 将管道抽象为文本转换图,而不仅仅是提示 LLM,从而为 LM 管道开发提供了系统的编程方法。它的声明式模块实现了结构化设计和优化,取代了传统提示模板的试错法。
        • +
        • 编程方法:DSPy 将管道抽象为文本转换图,而不仅仅是提示 LLMs,从而为 LM 管道开发提供了系统的编程方法。它的声明式模块实现了结构化设计和优化,取代了传统提示模板的试错法。
        • 性能提升:与现有方法相比,DSPy 的性能有了显著提高。通过案例研究,它的性能优于标准提示和专家创建的演示,展示了它的多功能性和有效性,即使编译成较小的 LM 模型也是如此。
        • 模块化抽象:DSPy 有效地抽象了 LM 管道开发的复杂方面,如分解、微调和模型选择。有了 DSPy,一个简洁的程序可以无缝地转换成各种模型的指令,如 GPT-4、Llama2-13b 或 T5-base,从而简化开发过程并提高性能。
        @@ -71,7 +71,7 @@ title: 将 Milvus 与 DSPy 集成 d="M4 9h1v1H4c-1.5 0-3-1.69-3-3.5S2.55 3 4 3h4c1.45 0 3 1.69 3 3.5 0 1.41-.91 2.72-2 3.25V8.59c.58-.45 1-1.27 1-2.09C10 5.22 8.98 4 8 4H4c-.98 0-2 1.22-2 2.5S3 9 4 9zm9-3h-1v1h1c1 0 2 1.22 2 2.5S13.98 12 13 12H9c-.98 0-2-1.22-2-2.5 0-.83.42-1.64 1-2.09V6.25c-1.09.53-2 1.84-2 3.25C6 11.31 7.55 13 9 13h4c1.45 0 3-1.69 3-3.5S14.5 6 13 6z" > -

        构建 LLM 管道需要许多组件。在此,我们将介绍一些关键组件,以提供对 DSPy 运行方式的高层次理解。

        +

        构建 LLM 管道需要许多组件。在此,我们将介绍一些关键组件,以提供对 DSPy 操作符的高层次理解。

        DSPy Modules @@ -115,20 +115,19 @@ $ pip install -U pymilvus

        如果使用的是 Google Colab,要启用刚安装的依赖项,可能需要**重启运行时**(点击屏幕上方的 "运行时 "菜单,从下拉菜单中选择 "重启会话")。
        -

        加载数据集

        在本例中,我们使用 HotPotQA 作为训练数据集。我们可以通过 HotPotQA 类加载这些数据集。

        +

        加载数据集

        在本例中,我们使用 HotPotQA(一个复杂问答对的 Collections)作为训练数据集。我们可以通过 HotPotQA 类加载这些数据集。

        from dspy.datasets import HotPotQA
         
         # Load the dataset.
         dataset = HotPotQA(
        -train_seed=1, train_size=20, eval_seed=2023, dev_size=50, test_size=0
        +    train_seed=1, train_size=20, eval_seed=2023, dev_size=50, test_size=0
         )
         
         # Tell DSPy that the 'question' field is the input. Any other fields are labels and/or metadata.
         trainset = [x.with_inputs("question") for x in dataset.train]
         devset = [x.with_inputs("question") for x in dataset.dev]
         
        - -

        将数据输入 Milvus 向量数据库

        将上下文信息输入 Milvus 数据集,以便进行向量检索。该集合应有一个embedding 字段和一个text 字段。在这种情况下,我们使用 OpenAI 的text-embedding-3-small 模型作为默认查询嵌入函数。

        +

        将数据摄入 Milvus 向量数据库

        将上下文信息摄入到用于向量检索的 Milvus Collections 中。该 Collections 应有一个embedding 字段和一个text 字段。在这种情况下,我们使用 OpenAI 的text-embedding-3-small 模型作为默认查询嵌入函数。

        import requests
         import os
         
        @@ -142,50 +141,48 @@ MILVUS_TOKEN = ""
         client = MilvusClient(uri=MILVUS_URI, token=MILVUS_TOKEN)
         
         if "dspy_example" not in client.list_collections():
        -client.create_collection(
        -collection_name="dspy_example",
        -overwrite=True,
        -dimension=1536,
        -primary_field_name="id",
        -vector_field_name="embedding",
        -id_type="int",
        -metric_type="IP",
        -max_length=65535,
        -enable_dynamic=True,
        -)
        +    client.create_collection(
        +        collection_name="dspy_example",
        +        overwrite=True,
        +        dimension=1536,
        +        primary_field_name="id",
        +        vector_field_name="embedding",
        +        id_type="int",
        +        metric_type="IP",
        +        max_length=65535,
        +        enable_dynamic=True,
        +    )
         text = requests.get(
        -"https://raw.githubusercontent.com/wxywb/dspy_dataset_sample/master/sample_data.txt"
        +    "https://raw.githubusercontent.com/wxywb/dspy_dataset_sample/master/sample_data.txt"
         ).text
         
         for idx, passage in enumerate(text.split("\n")):
        -if len(passage) == 0:
        -continue
        -client.insert(
        -collection_name="dspy_example",
        -data=[
        -{
        -"id": idx,
        -"embedding": openai_embedding_function(passage)[0],
        -"text": passage,
        -}
        -],
        -)
        +    if len(passage) == 0:
        +        continue
        +    client.insert(
        +        collection_name="dspy_example",
        +        data=[
        +            {
        +                "id": idx,
        +                "embedding": openai_embedding_function(passage)[0],
        +                "text": passage,
        +            }
        +        ],
        +    )
         
        -

        定义 MilvusRM。

        现在,您需要定义 MilvusRM。

        from dspy.retrieve.milvus_rm import MilvusRM
         import dspy
         
         retriever_model = MilvusRM(
        -collection_name="dspy_example",
        -uri=MILVUS_URI,
        -token=MILVUS_TOKEN, # ignore this if no token is required for Milvus connection
        -embedding_function=openai_embedding_function,
        +    collection_name="dspy_example",
        +    uri=MILVUS_URI,
        +    token=MILVUS_TOKEN,  # ignore this if no token is required for Milvus connection
        +    embedding_function=openai_embedding_function,
         )
         turbo = dspy.OpenAI(model="gpt-3.5-turbo")
         dspy.settings.configure(lm=turbo)
         
        -

        构建签名

        现在我们已经加载了数据,让我们开始为管道的子任务定义签名。我们可以确定简单的输入question 和输出answer ,但由于我们正在构建一个 RAG 管道,我们将从 Milvus 获取上下文信息。因此,我们将签名定义为context, question --> answer

        class GenerateAnswer(dspy.Signature):
             """Answer questions with short factoid answers."""
        @@ -193,9 +190,7 @@ dspy.settings.configure(lm=turbo)
             context = dspy.InputField(desc="may contain relevant facts")
             question = dspy.InputField()
             answer = dspy.OutputField(desc="often between 1 and 5 words")
        -
         
        -

        我们在contextanswer 字段中加入了简短的描述,以便更清晰地定义模型将接收和应生成的内容。

        构建管道

        现在,让我们定义 RAG 管道。

        class RAG(dspy.Module):
        @@ -214,9 +209,7 @@ dspy.settings.configure(lm=turbo)
                 return dspy.Prediction(
                     context=[item.long_text for item in context], answer=prediction.answer
                 )
        -
         
        -

        执行管道并获取结果

        现在,我们已经构建了 RAG 管道。让我们试一试并获取结果。

        rag = RAG(retriever_model)
         print(rag("who write At My Window").answer)
        @@ -228,14 +221,13 @@ dspy.settings.configure(lm=turbo)
         from dspy.datasets import HotPotQA
         
         evaluate_on_hotpotqa = Evaluate(
        -devset=devset, num_threads=1, display_progress=False, display_table=5
        +    devset=devset, num_threads=1, display_progress=False, display_table=5
         )
         
         metric = dspy.evaluate.answer_exact_match
         score = evaluate_on_hotpotqa(rag, metric=metric)
         print("rag:", score)
         
        -

        优化管道

        定义完程序后,下一步就是编译。这个过程会更新每个模块内的参数,以提高性能。编译过程取决于三个关键因素:

        • 训练集:我们将利用训练数据集中的 20 个问答示例进行演示。
        • @@ -246,10 +238,12 @@ score = evaluate_on_hotpotqa(rag, metric=metric) # Validation logic: check that the predicted answer is correct.# Also check that the retrieved context does contain that answer. + def validate_context_and_answer(example, pred, trace=None): -answer_EM = dspy.evaluate.answer_exact_match(example, pred) -answer_PM = dspy.evaluate.answer_passage_match(example, pred) -return answer_EM and answer_PM + answer_EM = dspy.evaluate.answer_exact_match(example, pred) + answer_PM = dspy.evaluate.answer_passage_match(example, pred) + return answer_EM and answer_PM + # Set up a basic teleprompter, which will compile our RAG program. teleprompter = BootstrapFewShot(metric=validate_context_and_answer) @@ -263,7 +257,6 @@ score = evaluate_on_hotpotqa(compiled_rag, metric=metric) print(score) print("compile_rag:", score)
    -

    Ragas 分数从之前的 50.0 增加到 52.0,表明答案质量有所提高。

    总结

    DSPy 通过其可编程接口实现了语言模型交互方面的飞跃,从而促进了模型提示和权重的算法和自动优化。利用 DSPy 实施 RAG,可轻松适应不同的语言模型或数据集,大大减少了繁琐的人工干预。

    +

    DSPy 通过其可编程接口,促进了模型提示和权重的算法和自动优化,标志着语言模型交互的飞跃。利用 DSPy 实施 RAG,可轻松适应不同的语言模型或数据集,大大减少了繁琐的人工干预。

    diff --git a/localization/v2.4.x/site/zh/integrations/integrate_with_haystack.json b/localization/v2.4.x/site/zh/integrations/integrate_with_haystack.json index 862ac9af8..a365411e0 100644 --- a/localization/v2.4.x/site/zh/integrations/integrate_with_haystack.json +++ b/localization/v2.4.x/site/zh/integrations/integrate_with_haystack.json @@ -1 +1 @@ -{"codeList":["! pip install --upgrade --quiet pymilvus milvus-haystack markdown-it-py mdit_plain\n","import os\n\nos.environ[\"OPENAI_API_KEY\"] = \"sk-***********\"\n","import os\nimport urllib.request\n\nurl = \"https://www.gutenberg.org/cache/epub/7785/pg7785.txt\"\nfile_path = \"./davinci.txt\"\n\nif not os.path.exists(file_path):\n urllib.request.urlretrieve(url, file_path)\n","from haystack import Pipeline\nfrom haystack.components.converters import MarkdownToDocument\nfrom haystack.components.embedders import OpenAIDocumentEmbedder, OpenAITextEmbedder\nfrom haystack.components.preprocessors import DocumentSplitter\nfrom haystack.components.writers import DocumentWriter\nfrom haystack.utils import Secret\n\nfrom milvus_haystack import MilvusDocumentStore\nfrom milvus_haystack.milvus_embedding_retriever import MilvusEmbeddingRetriever\n\n\ndocument_store = MilvusDocumentStore(\n connection_args={\"uri\": \"./milvus.db\"},\n # connection_args={\"uri\": \"http://localhost:19530\"},\n # connection_args={\"uri\": YOUR_ZILLIZ_CLOUD_URI, \"token\": Secret.from_env_var(\"ZILLIZ_CLOUD_API_KEY\")},\n drop_old=True,\n)\n","indexing_pipeline = Pipeline()\nindexing_pipeline.add_component(\"converter\", MarkdownToDocument())\nindexing_pipeline.add_component(\n \"splitter\", DocumentSplitter(split_by=\"sentence\", split_length=2)\n)\nindexing_pipeline.add_component(\"embedder\", OpenAIDocumentEmbedder())\nindexing_pipeline.add_component(\"writer\", DocumentWriter(document_store))\nindexing_pipeline.connect(\"converter\", \"splitter\")\nindexing_pipeline.connect(\"splitter\", \"embedder\")\nindexing_pipeline.connect(\"embedder\", \"writer\")\nindexing_pipeline.run({\"converter\": {\"sources\": [file_path]}})\n\nprint(\"Number of documents:\", document_store.count_documents())\n","question = 'Where is the painting \"Warrior\" currently stored?'\n\nretrieval_pipeline = Pipeline()\nretrieval_pipeline.add_component(\"embedder\", OpenAITextEmbedder())\nretrieval_pipeline.add_component(\n \"retriever\", MilvusEmbeddingRetriever(document_store=document_store, top_k=3)\n)\nretrieval_pipeline.connect(\"embedder\", \"retriever\")\n\nretrieval_results = retrieval_pipeline.run({\"embedder\": {\"text\": question}})\n\nfor doc in retrieval_results[\"retriever\"][\"documents\"]:\n print(doc.content)\n print(\"-\" * 10)\n","from haystack.utils import Secret\nfrom haystack.components.builders import PromptBuilder\nfrom haystack.components.generators import OpenAIGenerator\n\nprompt_template = \"\"\"Answer the following query based on the provided context. If the context does\n not include an answer, reply with 'I don't know'.\\n\n Query: {{query}}\n Documents:\n {% for doc in documents %}\n {{ doc.content }}\n {% endfor %}\n Answer:\n \"\"\"\n\nrag_pipeline = Pipeline()\nrag_pipeline.add_component(\"text_embedder\", OpenAITextEmbedder())\nrag_pipeline.add_component(\n \"retriever\", MilvusEmbeddingRetriever(document_store=document_store, top_k=3)\n)\nrag_pipeline.add_component(\"prompt_builder\", PromptBuilder(template=prompt_template))\nrag_pipeline.add_component(\n \"generator\",\n OpenAIGenerator(\n api_key=Secret.from_token(os.getenv(\"OPENAI_API_KEY\")),\n generation_kwargs={\"temperature\": 0},\n ),\n)\nrag_pipeline.connect(\"text_embedder.embedding\", \"retriever.query_embedding\")\nrag_pipeline.connect(\"retriever.documents\", \"prompt_builder.documents\")\nrag_pipeline.connect(\"prompt_builder\", \"generator\")\n\nresults = rag_pipeline.run(\n {\n \"text_embedder\": {\"text\": question},\n \"prompt_builder\": {\"query\": question},\n }\n)\nprint(\"RAG answer:\", results[\"generator\"][\"replies\"][0])\n"],"headingContent":"","anchorList":[{"label":"使用 Milvus 和 Haystack 的检索增强生成(RAG)","href":"Retrieval-Augmented-Generation-RAG-with-Milvus-and-Haystack","type":1,"isActive":false},{"label":"先决条件","href":"Prerequisites","type":2,"isActive":false},{"label":"准备数据","href":"Prepare-the-data","type":2,"isActive":false},{"label":"创建索引管道","href":"Create-the-indexing-Pipeline","type":2,"isActive":false},{"label":"创建检索管道","href":"Create-the-retrieval-pipeline","type":2,"isActive":false},{"label":"创建 RAG 管道","href":"Create-the-RAG-pipeline","type":2,"isActive":false}]} \ No newline at end of file +{"codeList":["! pip install --upgrade --quiet pymilvus milvus-haystack markdown-it-py mdit_plain\n","import os\n\nos.environ[\"OPENAI_API_KEY\"] = \"sk-***********\"\n","import os\nimport urllib.request\n\nurl = \"https://www.gutenberg.org/cache/epub/7785/pg7785.txt\"\nfile_path = \"./davinci.txt\"\n\nif not os.path.exists(file_path):\n urllib.request.urlretrieve(url, file_path)\n","from haystack import Pipeline\nfrom haystack.components.converters import MarkdownToDocument\nfrom haystack.components.embedders import OpenAIDocumentEmbedder, OpenAITextEmbedder\nfrom haystack.components.preprocessors import DocumentSplitter\nfrom haystack.components.writers import DocumentWriter\nfrom haystack.utils import Secret\n\nfrom milvus_haystack import MilvusDocumentStore\nfrom milvus_haystack.milvus_embedding_retriever import MilvusEmbeddingRetriever\n\n\ndocument_store = MilvusDocumentStore(\n connection_args={\"uri\": \"./milvus.db\"},\n # connection_args={\"uri\": \"http://localhost:19530\"},\n # connection_args={\"uri\": YOUR_ZILLIZ_CLOUD_URI, \"token\": Secret.from_env_var(\"ZILLIZ_CLOUD_API_KEY\")},\n drop_old=True,\n)\n","indexing_pipeline = Pipeline()\nindexing_pipeline.add_component(\"converter\", MarkdownToDocument())\nindexing_pipeline.add_component(\n \"splitter\", DocumentSplitter(split_by=\"sentence\", split_length=2)\n)\nindexing_pipeline.add_component(\"embedder\", OpenAIDocumentEmbedder())\nindexing_pipeline.add_component(\"writer\", DocumentWriter(document_store))\nindexing_pipeline.connect(\"converter\", \"splitter\")\nindexing_pipeline.connect(\"splitter\", \"embedder\")\nindexing_pipeline.connect(\"embedder\", \"writer\")\nindexing_pipeline.run({\"converter\": {\"sources\": [file_path]}})\n\nprint(\"Number of documents:\", document_store.count_documents())\n","question = 'Where is the painting \"Warrior\" currently stored?'\n\nretrieval_pipeline = Pipeline()\nretrieval_pipeline.add_component(\"embedder\", OpenAITextEmbedder())\nretrieval_pipeline.add_component(\n \"retriever\", MilvusEmbeddingRetriever(document_store=document_store, top_k=3)\n)\nretrieval_pipeline.connect(\"embedder\", \"retriever\")\n\nretrieval_results = retrieval_pipeline.run({\"embedder\": {\"text\": question}})\n\nfor doc in retrieval_results[\"retriever\"][\"documents\"]:\n print(doc.content)\n print(\"-\" * 10)\n","from haystack.utils import Secret\nfrom haystack.components.builders import PromptBuilder\nfrom haystack.components.generators import OpenAIGenerator\n\nprompt_template = \"\"\"Answer the following query based on the provided context. If the context does\n not include an answer, reply with 'I don't know'.\\n\n Query: {{query}}\n Documents:\n {% for doc in documents %}\n {{ doc.content }}\n {% endfor %}\n Answer:\n \"\"\"\n\nrag_pipeline = Pipeline()\nrag_pipeline.add_component(\"text_embedder\", OpenAITextEmbedder())\nrag_pipeline.add_component(\n \"retriever\", MilvusEmbeddingRetriever(document_store=document_store, top_k=3)\n)\nrag_pipeline.add_component(\"prompt_builder\", PromptBuilder(template=prompt_template))\nrag_pipeline.add_component(\n \"generator\",\n OpenAIGenerator(\n api_key=Secret.from_token(os.getenv(\"OPENAI_API_KEY\")),\n generation_kwargs={\"temperature\": 0},\n ),\n)\nrag_pipeline.connect(\"text_embedder.embedding\", \"retriever.query_embedding\")\nrag_pipeline.connect(\"retriever.documents\", \"prompt_builder.documents\")\nrag_pipeline.connect(\"prompt_builder\", \"generator\")\n\nresults = rag_pipeline.run(\n {\n \"text_embedder\": {\"text\": question},\n \"prompt_builder\": {\"query\": question},\n }\n)\nprint(\"RAG answer:\", results[\"generator\"][\"replies\"][0])\n"],"headingContent":"Retrieval-Augmented Generation (RAG) with Milvus and Haystack","anchorList":[{"label":"使用 Milvus 和 HayStack 的检索增强生成(RAG)","href":"Retrieval-Augmented-Generation-RAG-with-Milvus-and-Haystack","type":1,"isActive":false},{"label":"前提条件","href":"Prerequisites","type":2,"isActive":false},{"label":"准备数据","href":"Prepare-the-data","type":2,"isActive":false},{"label":"创建索引管道","href":"Create-the-indexing-Pipeline","type":2,"isActive":false},{"label":"创建检索管道","href":"Create-the-retrieval-pipeline","type":2,"isActive":false},{"label":"创建 RAG 管道","href":"Create-the-RAG-pipeline","type":2,"isActive":false}]} \ No newline at end of file diff --git a/localization/v2.4.x/site/zh/integrations/integrate_with_haystack.md b/localization/v2.4.x/site/zh/integrations/integrate_with_haystack.md index 36f2534d9..5247ae26e 100644 --- a/localization/v2.4.x/site/zh/integrations/integrate_with_haystack.md +++ b/localization/v2.4.x/site/zh/integrations/integrate_with_haystack.md @@ -1,10 +1,9 @@ --- id: integrate_with_haystack.md -summary: 本指南演示了如何使用 Haystack 和 Milvus 建立检索增强生成(RAG)系统。 -title: 使用 Milvus 和 Haystack 的检索增强生成(RAG) +summary: 本指南演示了如何使用 HayStack 和 Milvus 建立检索增强生成(RAG)系统。 +title: 使用 Milvus 和 HayStack 的检索增强生成(RAG) --- - -

    使用 Milvus 和 Haystack 的检索增强生成(RAG)

    Open In Colab

    -

    本指南演示了如何使用 Haystack 和 Milvus 构建检索-增强生成(RAG)系统。

    -

    RAG 系统结合了检索系统和生成模型,可根据给定提示生成新文本。该系统首先使用 Milvus 从语料库中检索相关文档,然后使用生成模型根据检索到的文档生成新文本。

    -

    Haystack是 deepset 开发的开源 Python 框架,用于使用大型语言模型(LLM)构建自定义应用程序。Milvus是世界上最先进的开源向量数据库,用于支持嵌入式相似性搜索和人工智能应用。

    +

    Open In Colab +GitHub Repository

    +

    本指南演示了如何使用 HayStack 和 Milvus 建立一个检索-增强生成(RAG)系统。

    +

    RAG 系统将检索系统与生成模型相结合,根据给定提示生成新文本。该系统首先使用 Milvus 从语料库中检索相关文档,然后使用生成模型根据检索到的文档生成新文本。

    +

    HayStack是 deepset 公司推出的开源 Python 框架,用于使用大型语言模型(LLMs)构建定制应用程序。Milvus是世界上最先进的开源向量数据库,用于支持嵌入式相似性搜索和人工智能应用。

    前提条件

  • -

    准备数据

    -

    创建索引管道

    创建一个索引管道,将文本转换成文档,分割成句子并嵌入其中。然后将文档写入 Milvus 文档存储区。

    +

    创建一个索引管道,将文本转换成文档,分割成句子并嵌入其中。然后将文档写入 Milvus 文档存储。

    from haystack import Pipeline
     from haystack.components.converters import MarkdownToDocument
     from haystack.components.embedders import OpenAIDocumentEmbedder, OpenAITextEmbedder
    @@ -103,18 +101,18 @@ urllib.request.urlretrieve(url, file_path)
     from milvus_haystack import MilvusDocumentStore
     from milvus_haystack.milvus_embedding_retriever import MilvusEmbeddingRetriever
     
    +
     document_store = MilvusDocumentStore(
    -connection_args={"uri": "./milvus.db"},
    -# connection_args={"uri": "http://localhost:19530"},
    -# connection_args={"uri": YOUR_ZILLIZ_CLOUD_URI, "token": Secret.from_env_var("ZILLIZ_CLOUD_API_KEY")},
    -drop_old=True,
    +    connection_args={"uri": "./milvus.db"},
    +    # connection_args={"uri": "http://localhost:19530"},
    +    # connection_args={"uri": YOUR_ZILLIZ_CLOUD_URI, "token": Secret.from_env_var("ZILLIZ_CLOUD_API_KEY")},
    +    drop_old=True,
     )
     
    -

    连接参数

      -
    • uri 设置为本地文件,如./milvus.db ,是最方便的方法,因为它会自动利用Milvus Lite将所有数据存储到该文件中。
    • +
    • uri 设置为本地文件,例如./milvus.db ,是最方便的方法,因为它会自动利用Milvus Lite将所有数据存储到这个文件中。
    • 如果数据规模较大,可以在docker 或 kubernetes 上设置性能更强的 Milvus 服务器。在此设置中,请使用服务器 uri,例如http://localhost:19530 ,作为您的uri
    • 如果你想使用Zilliz Cloud(Milvus 的全托管云服务),请调整uritoken ,它们与 Zilliz Cloud 中的公共端点和 Api 密钥相对应。
    @@ -133,7 +131,6 @@ indexing_pipeline.run({"converter": { print("Number of documents:", document_store.count_documents())
    -
    Converting markdown files to Documents: 100%|█| 1/
     Calculating embeddings: 100%|█| 9/9 [00:05<00:00, 
     E20240516 10:40:32.945937 5309095 milvus_local.cpp:189] [SERVER][GetCollection][] Collecton HaystackCollection not existed
    @@ -159,23 +156,22 @@ Number of documents: 277
               d="M4 9h1v1H4c-1.5 0-3-1.69-3-3.5S2.55 3 4 3h4c1.45 0 3 1.69 3 3.5 0 1.41-.91 2.72-2 3.25V8.59c.58-.45 1-1.27 1-2.09C10 5.22 8.98 4 8 4H4c-.98 0-2 1.22-2 2.5S3 9 4 9zm9-3h-1v1h1c1 0 2 1.22 2 2.5S13.98 12 13 12H9c-.98 0-2-1.22-2-2.5 0-.83.42-1.64 1-2.09V6.25c-1.09.53-2 1.84-2 3.25C6 11.31 7.55 13 9 13h4c1.45 0 3-1.69 3-3.5S14.5 6 13 6z"
             >
           
    -    

    创建检索管道,使用向量相似性搜索引擎从 Milvus 文档存储中检索文档。

    +

    创建一个检索管道,使用向量相似性搜索引擎从 Milvus 文档存储中检索文档。

    question = 'Where is the painting "Warrior" currently stored?'
     
     retrieval_pipeline = Pipeline()
     retrieval_pipeline.add_component("embedder", OpenAITextEmbedder())
     retrieval_pipeline.add_component(
    -"retriever", MilvusEmbeddingRetriever(document_store=document_store, top_k=3)
    +    "retriever", MilvusEmbeddingRetriever(document_store=document_store, top_k=3)
     )
     retrieval_pipeline.connect("embedder", "retriever")
     
     retrieval_results = retrieval_pipeline.run({"embedder": {"text": question}})
     
     for doc in retrieval_results["retriever"]["documents"]:
    -print(doc.content)
    -print("-" \* 10)
    +    print(doc.content)
    +    print("-" * 10)
     
    -
    ). The
     composition of this oil-painting seems to have been built up on the
     second cartoon, which he had made some eight years earlier, and which
    @@ -219,41 +215,40 @@ south-east staircase in the Victoria and Albert Museum.
     from haystack.components.generators import OpenAIGenerator
     
     prompt_template = """Answer the following query based on the provided context. If the context does
    -not include an answer, reply with 'I don't know'.\n
    -Query: {{query}}
    -Documents:
    -{% for doc in documents %}
    -{{ doc.content }}
    -{% endfor %}
    -Answer:
    -"""
    +                     not include an answer, reply with 'I don't know'.\n
    +                     Query: {{query}}
    +                     Documents:
    +                     {% for doc in documents %}
    +                        {{ doc.content }}
    +                     {% endfor %}
    +                     Answer:
    +                  """
     
     rag_pipeline = Pipeline()
     rag_pipeline.add_component("text_embedder", OpenAITextEmbedder())
     rag_pipeline.add_component(
    -"retriever", MilvusEmbeddingRetriever(document_store=document_store, top_k=3)
    +    "retriever", MilvusEmbeddingRetriever(document_store=document_store, top_k=3)
     )
     rag_pipeline.add_component("prompt_builder", PromptBuilder(template=prompt_template))
     rag_pipeline.add_component(
    -"generator",
    -OpenAIGenerator(
    -api_key=Secret.from_token(os.getenv("OPENAI_API_KEY")),
    -generation_kwargs={"temperature": 0},
    -),
    +    "generator",
    +    OpenAIGenerator(
    +        api_key=Secret.from_token(os.getenv("OPENAI_API_KEY")),
    +        generation_kwargs={"temperature": 0},
    +    ),
     )
     rag_pipeline.connect("text_embedder.embedding", "retriever.query_embedding")
     rag_pipeline.connect("retriever.documents", "prompt_builder.documents")
     rag_pipeline.connect("prompt_builder", "generator")
     
     results = rag_pipeline.run(
    -{
    -"text_embedder": {"text": question},
    -"prompt_builder": {"query": question},
    -}
    +    {
    +        "text_embedder": {"text": question},
    +        "prompt_builder": {"query": question},
    +    }
     )
     print("RAG answer:", results["generator"]["replies"][0])
     
    -
    RAG answer: The painting "Warrior" is currently stored in the Malcolm Collection in the British Museum.
     
    -

    有关如何使用 milvus-haystack 的更多信息,请参阅milvus-haystack Readme

    +

    有关如何使用 milvus-hayStack 的更多信息,请参阅milvus-haystack Readme

    diff --git a/localization/v2.4.x/site/zh/integrations/integrate_with_hugging-face.json b/localization/v2.4.x/site/zh/integrations/integrate_with_hugging-face.json index 446bd01bb..8d06b6910 100644 --- a/localization/v2.4.x/site/zh/integrations/integrate_with_hugging-face.json +++ b/localization/v2.4.x/site/zh/integrations/integrate_with_hugging-face.json @@ -1 +1 @@ -{"codeList":["$ pip install --upgrade pymilvus transformers datasets torch\n","from datasets import load_dataset\n\n\nDATASET = \"squad\" # Name of dataset from HuggingFace Datasets\nINSERT_RATIO = 0.001 # Ratio of example dataset to be inserted\n\ndata = load_dataset(DATASET, split=\"validation\")\n# Generates a fixed subset. To generate a random subset, remove the seed.\ndata = data.train_test_split(test_size=INSERT_RATIO, seed=42)[\"test\"]\n# Clean up the data structure in the dataset.\ndata = data.map(\n lambda val: {\"answer\": val[\"answers\"][\"text\"][0]},\n remove_columns=[\"id\", \"answers\", \"context\"],\n)\n\n# View summary of example data\nprint(data)\n","from transformers import AutoTokenizer, AutoModel\nimport torch\n\nMODEL = (\n \"sentence-transformers/all-MiniLM-L6-v2\" # Name of model from HuggingFace Models\n)\nINFERENCE_BATCH_SIZE = 64 # Batch size of model inference\n\n# Load tokenizer & model from HuggingFace Hub\ntokenizer = AutoTokenizer.from_pretrained(MODEL)\nmodel = AutoModel.from_pretrained(MODEL)\n\n\ndef encode_text(batch):\n # Tokenize sentences\n encoded_input = tokenizer(\n batch[\"question\"], padding=True, truncation=True, return_tensors=\"pt\"\n )\n\n # Compute token embeddings\n with torch.no_grad():\n model_output = model(**encoded_input)\n\n # Perform pooling\n token_embeddings = model_output[0]\n attention_mask = encoded_input[\"attention_mask\"]\n input_mask_expanded = (\n attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float()\n )\n sentence_embeddings = torch.sum(\n token_embeddings * input_mask_expanded, 1\n ) / torch.clamp(input_mask_expanded.sum(1), min=1e-9)\n\n # Normalize embeddings\n batch[\"question_embedding\"] = torch.nn.functional.normalize(\n sentence_embeddings, p=2, dim=1\n )\n return batch\n\n\ndata = data.map(encode_text, batched=True, batch_size=INFERENCE_BATCH_SIZE)\ndata_list = data.to_list()\n","from pymilvus import MilvusClient\n\n\nMILVUS_URI = \"./huggingface_milvus_test.db\" # Connection URI\nCOLLECTION_NAME = \"huggingface_test\" # Collection name\nDIMENSION = 384 # Embedding dimension depending on model\n\nmilvus_client = MilvusClient(MILVUS_URI)\nif milvus_client.has_collection(collection_name=COLLECTION_NAME):\n milvus_client.drop_collection(collection_name=COLLECTION_NAME)\nmilvus_client.create_collection(\n collection_name=COLLECTION_NAME,\n dimension=DIMENSION,\n auto_id=True, # Enable auto id\n enable_dynamic_field=True, # Enable dynamic fields\n vector_field_name=\"question_embedding\", # Map vector field name and embedding column in dataset\n consistency_level=\"Strong\", # To enable search with latest data\n)\n","milvus_client.insert(collection_name=COLLECTION_NAME, data=data_list)\n","questions = {\n \"question\": [\n \"What is LGM?\",\n \"When did Massachusetts first mandate that children be educated in schools?\",\n ]\n}\n\n# Generate question embeddings\nquestion_embeddings = [v.tolist() for v in encode_text(questions)[\"question_embedding\"]]\n\n# Search across Milvus\nsearch_results = milvus_client.search(\n collection_name=COLLECTION_NAME,\n data=question_embeddings,\n limit=3, # How many search results to output\n output_fields=[\"answer\", \"question\"], # Include these fields in search results\n)\n\n# Print out results\nfor q, res in zip(questions[\"question\"], search_results):\n print(\"Question:\", q)\n for r in res:\n print(\n {\n \"answer\": r[\"entity\"][\"answer\"],\n \"score\": r[\"distance\"],\n \"original question\": r[\"entity\"][\"question\"],\n }\n )\n print(\"\\n\")\n"],"headingContent":"","anchorList":[{"label":"使用 Milvus 和 Hugging Face 进行问题解答","href":"Question-Answering-Using-Milvus-and-Hugging-Face","type":1,"isActive":false},{"label":"开始之前","href":"Before-you-begin","type":2,"isActive":false},{"label":"准备数据","href":"Prepare-data","type":2,"isActive":false},{"label":"插入数据","href":"Insert-data","type":2,"isActive":false},{"label":"提出问题","href":"Ask-questions","type":2,"isActive":false}]} \ No newline at end of file +{"codeList":["$ pip install --upgrade pymilvus transformers datasets torch\n","from datasets import load_dataset\n\n\nDATASET = \"squad\" # Name of dataset from HuggingFace Datasets\nINSERT_RATIO = 0.001 # Ratio of example dataset to be inserted\n\ndata = load_dataset(DATASET, split=\"validation\")\n# Generates a fixed subset. To generate a random subset, remove the seed.\ndata = data.train_test_split(test_size=INSERT_RATIO, seed=42)[\"test\"]\n# Clean up the data structure in the dataset.\ndata = data.map(\n lambda val: {\"answer\": val[\"answers\"][\"text\"][0]},\n remove_columns=[\"id\", \"answers\", \"context\"],\n)\n\n# View summary of example data\nprint(data)\n","from transformers import AutoTokenizer, AutoModel\nimport torch\n\nMODEL = (\n \"sentence-transformers/all-MiniLM-L6-v2\" # Name of model from HuggingFace Models\n)\nINFERENCE_BATCH_SIZE = 64 # Batch size of model inference\n\n# Load tokenizer & model from HuggingFace Hub\ntokenizer = AutoTokenizer.from_pretrained(MODEL)\nmodel = AutoModel.from_pretrained(MODEL)\n\n\ndef encode_text(batch):\n # Tokenize sentences\n encoded_input = tokenizer(\n batch[\"question\"], padding=True, truncation=True, return_tensors=\"pt\"\n )\n\n # Compute token embeddings\n with torch.no_grad():\n model_output = model(**encoded_input)\n\n # Perform pooling\n token_embeddings = model_output[0]\n attention_mask = encoded_input[\"attention_mask\"]\n input_mask_expanded = (\n attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float()\n )\n sentence_embeddings = torch.sum(\n token_embeddings * input_mask_expanded, 1\n ) / torch.clamp(input_mask_expanded.sum(1), min=1e-9)\n\n # Normalize embeddings\n batch[\"question_embedding\"] = torch.nn.functional.normalize(\n sentence_embeddings, p=2, dim=1\n )\n return batch\n\n\ndata = data.map(encode_text, batched=True, batch_size=INFERENCE_BATCH_SIZE)\ndata_list = data.to_list()\n","from pymilvus import MilvusClient\n\n\nMILVUS_URI = \"./huggingface_milvus_test.db\" # Connection URI\nCOLLECTION_NAME = \"huggingface_test\" # Collection name\nDIMENSION = 384 # Embedding dimension depending on model\n\nmilvus_client = MilvusClient(MILVUS_URI)\nif milvus_client.has_collection(collection_name=COLLECTION_NAME):\n milvus_client.drop_collection(collection_name=COLLECTION_NAME)\nmilvus_client.create_collection(\n collection_name=COLLECTION_NAME,\n dimension=DIMENSION,\n auto_id=True, # Enable auto id\n enable_dynamic_field=True, # Enable dynamic fields\n vector_field_name=\"question_embedding\", # Map vector field name and embedding column in dataset\n consistency_level=\"Strong\", # To enable search with latest data\n)\n","milvus_client.insert(collection_name=COLLECTION_NAME, data=data_list)\n","questions = {\n \"question\": [\n \"What is LGM?\",\n \"When did Massachusetts first mandate that children be educated in schools?\",\n ]\n}\n\n# Generate question embeddings\nquestion_embeddings = [v.tolist() for v in encode_text(questions)[\"question_embedding\"]]\n\n# Search across Milvus\nsearch_results = milvus_client.search(\n collection_name=COLLECTION_NAME,\n data=question_embeddings,\n limit=3, # How many search results to output\n output_fields=[\"answer\", \"question\"], # Include these fields in search results\n)\n\n# Print out results\nfor q, res in zip(questions[\"question\"], search_results):\n print(\"Question:\", q)\n for r in res:\n print(\n {\n \"answer\": r[\"entity\"][\"answer\"],\n \"score\": r[\"distance\"],\n \"original question\": r[\"entity\"][\"question\"],\n }\n )\n print(\"\\n\")\n"],"headingContent":"Question Answering Using Milvus and Hugging Face","anchorList":[{"label":"使用 Milvus 和 Hugging Face 进行问题解答","href":"Question-Answering-Using-Milvus-and-Hugging-Face","type":1,"isActive":false},{"label":"开始之前","href":"Before-you-begin","type":2,"isActive":false},{"label":"准备数据","href":"Prepare-data","type":2,"isActive":false},{"label":"插入数据","href":"Insert-data","type":2,"isActive":false},{"label":"提出问题","href":"Ask-questions","type":2,"isActive":false}]} \ No newline at end of file diff --git a/localization/v2.4.x/site/zh/integrations/integrate_with_hugging-face.md b/localization/v2.4.x/site/zh/integrations/integrate_with_hugging-face.md index 22c245716..74ef3c4e1 100644 --- a/localization/v2.4.x/site/zh/integrations/integrate_with_hugging-face.md +++ b/localization/v2.4.x/site/zh/integrations/integrate_with_hugging-face.md @@ -1,6 +1,8 @@ --- id: integrate_with_hugging-face.md -summary: 本教程展示了如何使用数据加载器和嵌入生成器 Hugging Face 进行数据处理,并使用向量数据库 Milvus 进行语义搜索,从而构建一个问题解答系统。 +summary: >- + 本教程展示了如何使用 Hugging Face 作为数据加载器和嵌入生成器进行数据处理,并使用 Milvus + 作为向量数据库进行语义搜索,从而构建一个问题解答系统。 title: 使用 Milvus 和 Hugging Face 进行问题解答 ---

    使用 Milvus 和 Hugging Face 进行问题解答

    Open In Colab

    +

    Open In Colab +GitHub Repository

    基于语义搜索的问题解答系统的工作原理是从给定查询问题的问答数据集中找出最相似的问题。一旦确定了最相似的问题,数据集中的相应答案就会被视为查询问题的答案。这种方法依靠语义相似性度量来确定问题之间的相似性并检索相关答案。

    本教程展示了如何使用Hugging Face作为数据加载器和嵌入生成器进行数据处理,并使用Milvus作为向量数据库进行语义搜索,从而构建一个问题解答系统。

    开始之前

    你需要确保安装了所有必需的依赖项:

      -
    • pymilvus: python 软件包可与 Milvus 或 Zilliz Cloud 提供的向量数据库服务配合使用。
    • +
    • pymilvus: python 软件包可与由 Milvus 或 Zilliz Cloud 提供的向量数据库服务配合使用。
    • datasets,transformers: Hugging Face 软件包管理数据并利用模型。
    • torch:一个功能强大的库提供高效的张量计算和深度学习工具。
    @@ -86,7 +89,7 @@ data = data.map( num_rows: 11 })
    -

    要生成问题的嵌入模型,您可以从 Hugging Face 模型中选择一个文本嵌入模型。在本教程中,我们将以小型句子嵌入模型all-MiniLM-L6-v2为例。

    +

    要生成问题的嵌入,您可以从 Hugging Face 模型中选择一个文本嵌入模型。在本教程中,我们将以小型句子嵌入模型all-MiniLM-L6-v2为例。

    from transformers import AutoTokenizer, AutoModel
     import torch
     
    @@ -146,7 +149,7 @@ data_list = data.to_list()
             >
           
         

    现在,我们已经准备好带有问题嵌入的问答对。下一步是将它们插入向量数据库。

    -

    我们首先需要连接 Milvus 服务并创建一个 Milvus 集合。

    +

    我们首先需要连接 Milvus 服务并创建一个 Milvus Collections。

    from pymilvus import MilvusClient
     
     
    @@ -171,10 +174,10 @@ milvus_client.create_collection(
     
    • uri 设置为本地文件,如./milvus.db ,是最方便的方法,因为它会自动利用Milvus Lite将所有数据存储在此文件中。
    • 如果数据规模较大,可以在docker 或 kubernetes 上设置性能更强的 Milvus 服务器。在此设置中,请使用服务器 uri,例如http://localhost:19530 ,作为您的uri
    • -
    • 如果你想使用Zilliz Cloud(Milvus 的完全托管云服务),请调整uritoken ,它们与 Zilliz Cloud 中的公共端点和 Api 密钥相对应。
    • +
    • 如果你想使用Zilliz Cloud(Milvus 的全托管云服务),请调整uritoken ,它们与 Zilliz Cloud 中的公共端点和 Api 密钥相对应。
    -

    将所有数据插入数据集:

    +

    将所有数据插入 Collections:

    milvus_client.insert(collection_name=COLLECTION_NAME, data=data_list)
     
    {'insert_count': 11,
    diff --git a/localization/v2.4.x/site/zh/integrations/integrate_with_jina.json b/localization/v2.4.x/site/zh/integrations/integrate_with_jina.json
    index 920c36c8b..dae88fcfc 100644
    --- a/localization/v2.4.x/site/zh/integrations/integrate_with_jina.json
    +++ b/localization/v2.4.x/site/zh/integrations/integrate_with_jina.json
    @@ -1 +1 @@
    -{"codeList":["$ pip install -U pymilvus\n$ pip install \"pymilvus[model]\"\n","from pymilvus.model.dense import JinaEmbeddingFunction\n\njina_api_key = \"\"\nef = JinaEmbeddingFunction(\"jina-embeddings-v2-base-en\", jina_api_key)\n\nquery = \"what is information retrieval?\"\ndoc = \"Information retrieval is the process of finding relevant information from a large collection of data or documents.\"\n\nqvecs = ef.encode_queries([query])\ndvecs = ef.encode_documents([doc])\n","from pymilvus.model.dense import JinaEmbeddingFunction\n\njina_api_key = \"\"\nef = JinaEmbeddingFunction(\"jina-embeddings-v2-base-de\", jina_api_key)\n\nquery = \"what is information retrieval?\"\ndoc = \"Information Retrieval ist der Prozess, relevante Informationen aus einer großen Sammlung von Daten oder Dokumenten zu finden.\"\n\nqvecs = ef.encode_queries([query])\ndvecs = ef.encode_documents([doc])\n","from pymilvus.model.dense import JinaEmbeddingFunction\n\njina_api_key = \"\"\nef = JinaEmbeddingFunction(\"jina-embeddings-v2-base-code\", jina_api_key)\n\n# Case1: Enhanced Code Navigation\n# query: text description of the functionality\n# document: relevant code snippet\n\nquery = \"function to calculate average in Python.\"\ndoc = \"\"\"\ndef calculate_average(numbers):\n    total = sum(numbers)\n    count = len(numbers)\n    return total / count\n\"\"\"\n\n# Case2: Streamlined Code Review\n# query: text description of the programming concept\n# document: relevante code snippet or PR\n\nquery = \"pull quest related to Collection\"\ndoc = \"fix:[restful v2] parameters of create collection ...\"\n\n# Case3: Automatic Documentation Assistance\n# query: code snippet you need explanation\n# document: relevante document or DocsString\n\nquery = \"What is Collection in Milvus\"\ndoc = \"\"\"\nIn Milvus, you store your vector embeddings in collections. All vector embeddings within a collection share the same dimensionality and distance metric for measuring similarity.\nMilvus collections support dynamic fields (i.e., fields not pre-defined in the schema) and automatic incrementation of primary keys.\n\"\"\"\n\nqvecs = ef.encode_queries([query])\ndvecs = ef.encode_documents([doc])\n","from pymilvus.model.dense import JinaEmbeddingFunction\nfrom pymilvus import MilvusClient\n\njina_api_key = \"\"\nef = JinaEmbeddingFunction(\"jina-embeddings-v2-base-en\", jina_api_key)\nDIMENSION = 768  # size of jina-embeddings-v2-base-en\n\ndoc = [\n    \"In 1950, Alan Turing published his seminal paper, 'Computing Machinery and Intelligence,' proposing the Turing Test as a criterion of intelligence, a foundational concept in the philosophy and development of artificial intelligence.\",\n    \"The Dartmouth Conference in 1956 is considered the birthplace of artificial intelligence as a field; here, John McCarthy and others coined the term 'artificial intelligence' and laid out its basic goals.\",\n    \"In 1951, British mathematician and computer scientist Alan Turing also developed the first program designed to play chess, demonstrating an early example of AI in game strategy.\",\n    \"The invention of the Logic Theorist by Allen Newell, Herbert A. Simon, and Cliff Shaw in 1955 marked the creation of the first true AI program, which was capable of solving logic problems, akin to proving mathematical theorems.\",\n]\n\ndvecs = ef.encode_documents(doc)\n\ndata = [\n    {\"id\": i, \"vector\": dvecs[i], \"text\": doc[i], \"subject\": \"history\"}\n    for i in range(len(dvecs))\n]\n\nmilvus_client = MilvusClient(\"./milvus_jina_demo.db\")\nCOLLECTION_NAME = \"demo_collection\"  # Milvus collection name\nif milvus_client.has_collection(collection_name=COLLECTION_NAME):\n    milvus_client.drop_collection(collection_name=COLLECTION_NAME)\nmilvus_client.create_collection(collection_name=COLLECTION_NAME, dimension=DIMENSION)\n\nres = milvus_client.insert(collection_name=COLLECTION_NAME, data=data)\n\nprint(res[\"insert_count\"])\n","queries = \"What event in 1956 marked the official birth of artificial intelligence as a discipline?\"\nqvecs = ef.encode_queries([queries])\n\nres = milvus_client.search(\n    collection_name=COLLECTION_NAME,  # target collection\n    data=[qvecs[0]],  # query vectors\n    limit=3,  # number of returned entities\n    output_fields=[\"text\", \"subject\"],  # specifies fields to be returned\n)[0]\n\nfor result in res:\n    print(result)\n","from pymilvus.model.reranker import JinaRerankFunction\n\njina_api_key = \"\"\n\nrf = JinaRerankFunction(\"jina-reranker-v1-base-en\", jina_api_key)\n\nquery = \"What event in 1956 marked the official birth of artificial intelligence as a discipline?\"\n\ndocuments = [\n    \"In 1950, Alan Turing published his seminal paper, 'Computing Machinery and Intelligence,' proposing the Turing Test as a criterion of intelligence, a foundational concept in the philosophy and development of artificial intelligence.\",\n    \"The Dartmouth Conference in 1956 is considered the birthplace of artificial intelligence as a field; here, John McCarthy and others coined the term 'artificial intelligence' and laid out its basic goals.\",\n    \"In 1951, British mathematician and computer scientist Alan Turing also developed the first program designed to play chess, demonstrating an early example of AI in game strategy.\",\n    \"The invention of the Logic Theorist by Allen Newell, Herbert A. Simon, and Cliff Shaw in 1955 marked the creation of the first true AI program, which was capable of solving logic problems, akin to proving mathematical theorems.\",\n]\n\nrf(query, documents)\n"],"headingContent":"","anchorList":[{"label":"将 Milvus 与 Jina AI 相结合","href":"Integrate-Milvus-with-Jina-AI","type":1,"isActive":false},{"label":"谁是 Jina AI","href":"Who-is-Jina-AI","type":2,"isActive":false},{"label":"Milvus 和 Jina AI 的嵌入式技术","href":"Milvus-and-Jina-AIs-Embedding","type":2,"isActive":false},{"label":"实例","href":"Examples","type":2,"isActive":false},{"label":"通用嵌入","href":"General-Purpose-Embedding","type":2,"isActive":false},{"label":"双语嵌入","href":"Bilingual-Embeddings","type":2,"isActive":false},{"label":"代码嵌入","href":"Code-Embeddings","type":2,"isActive":false},{"label":"使用 Jina 和 Milvus 进行语义搜索","href":"Semantic-Search-with-Jina--Milvus","type":2,"isActive":false},{"label":"Jina Reranker","href":"Jina-Reranker","type":2,"isActive":false}]}
    \ No newline at end of file
    +{"codeList":["$ pip install -U pymilvus\n$ pip install \"pymilvus[model]\"\n","from pymilvus.model.dense import JinaEmbeddingFunction\n\njina_api_key = \"\"\nef = JinaEmbeddingFunction(\n    \"jina-embeddings-v3\", \n    jina_api_key,\n    task=\"retrieval.passage\",\n    dimensions=1024\n)\n\nquery = \"what is information retrieval?\"\ndoc = \"Information retrieval is the process of finding relevant information from a large collection of data or documents.\"\n\nqvecs = ef.encode_queries([query])  # This method uses `retrieval.query` as the task\ndvecs = ef.encode_documents([doc])  # This method uses `retrieval.passage` as the task\n","from pymilvus.model.dense import JinaEmbeddingFunction\n\njina_api_key = \"\"\nef = JinaEmbeddingFunction(\"jina-embeddings-v2-base-de\", jina_api_key)\n\nquery = \"what is information retrieval?\"\ndoc = \"Information Retrieval ist der Prozess, relevante Informationen aus einer großen Sammlung von Daten oder Dokumenten zu finden.\"\n\nqvecs = ef.encode_queries([query])\ndvecs = ef.encode_documents([doc])\n","from pymilvus.model.dense import JinaEmbeddingFunction\n\njina_api_key = \"\"\nef = JinaEmbeddingFunction(\"jina-embeddings-v2-base-code\", jina_api_key)\n\n# Case1: Enhanced Code Navigation\n# query: text description of the functionality\n# document: relevant code snippet\n\nquery = \"function to calculate average in Python.\"\ndoc = \"\"\"\ndef calculate_average(numbers):\n    total = sum(numbers)\n    count = len(numbers)\n    return total / count\n\"\"\"\n\n# Case2: Streamlined Code Review\n# query: text description of the programming concept\n# document: relevante code snippet or PR\n\nquery = \"pull quest related to Collection\"\ndoc = \"fix:[restful v2] parameters of create collection ...\"\n\n# Case3: Automatic Documentation Assistance\n# query: code snippet you need explanation\n# document: relevante document or DocsString\n\nquery = \"What is Collection in Milvus\"\ndoc = \"\"\"\nIn Milvus, you store your vector embeddings in collections. All vector embeddings within a collection share the same dimensionality and distance metric for measuring similarity.\nMilvus collections support dynamic fields (i.e., fields not pre-defined in the schema) and automatic incrementation of primary keys.\n\"\"\"\n\nqvecs = ef.encode_queries([query])\ndvecs = ef.encode_documents([doc])\n","from pymilvus.model.dense import JinaEmbeddingFunction\nfrom pymilvus import MilvusClient\n\njina_api_key = \"\"\nDIMENSION = 1024  # `jina-embeddings-v3` supports flexible embedding sizes (32, 64, 128, 256, 512, 768, 1024), allowing for truncating embeddings to fit your application. \nef = JinaEmbeddingFunction(\n    \"jina-embeddings-v3\", \n    jina_api_key,\n    task=\"retrieval.passage\",\n    dimensions=DIMENSION,\n)\n\n\ndoc = [\n    \"In 1950, Alan Turing published his seminal paper, 'Computing Machinery and Intelligence,' proposing the Turing Test as a criterion of intelligence, a foundational concept in the philosophy and development of artificial intelligence.\",\n    \"The Dartmouth Conference in 1956 is considered the birthplace of artificial intelligence as a field; here, John McCarthy and others coined the term 'artificial intelligence' and laid out its basic goals.\",\n    \"In 1951, British mathematician and computer scientist Alan Turing also developed the first program designed to play chess, demonstrating an early example of AI in game strategy.\",\n    \"The invention of the Logic Theorist by Allen Newell, Herbert A. Simon, and Cliff Shaw in 1955 marked the creation of the first true AI program, which was capable of solving logic problems, akin to proving mathematical theorems.\",\n]\n\ndvecs = ef.encode_documents(doc) # This method uses `retrieval.passage` as the task\n\ndata = [\n    {\"id\": i, \"vector\": dvecs[i], \"text\": doc[i], \"subject\": \"history\"}\n    for i in range(len(dvecs))\n]\n\nmilvus_client = MilvusClient(\"./milvus_jina_demo.db\")\nCOLLECTION_NAME = \"demo_collection\"  # Milvus collection name\nif milvus_client.has_collection(collection_name=COLLECTION_NAME):\n    milvus_client.drop_collection(collection_name=COLLECTION_NAME)\nmilvus_client.create_collection(collection_name=COLLECTION_NAME, dimension=DIMENSION)\n\nres = milvus_client.insert(collection_name=COLLECTION_NAME, data=data)\n\nprint(res[\"insert_count\"])\n","queries = \"What event in 1956 marked the official birth of artificial intelligence as a discipline?\"\nqvecs = ef.encode_queries([queries]) # This method uses `retrieval.query` as the task\n\nres = milvus_client.search(\n    collection_name=COLLECTION_NAME,  # target collection\n    data=[qvecs[0]],  # query vectors\n    limit=3,  # number of returned entities\n    output_fields=[\"text\", \"subject\"],  # specifies fields to be returned\n)[0]\n\nfor result in res:\n    print(result)\n","from pymilvus.model.reranker import JinaRerankFunction\n\njina_api_key = \"\"\n\nrf = JinaRerankFunction(\"jina-reranker-v1-base-en\", jina_api_key)\n\nquery = \"What event in 1956 marked the official birth of artificial intelligence as a discipline?\"\n\ndocuments = [\n    \"In 1950, Alan Turing published his seminal paper, 'Computing Machinery and Intelligence,' proposing the Turing Test as a criterion of intelligence, a foundational concept in the philosophy and development of artificial intelligence.\",\n    \"The Dartmouth Conference in 1956 is considered the birthplace of artificial intelligence as a field; here, John McCarthy and others coined the term 'artificial intelligence' and laid out its basic goals.\",\n    \"In 1951, British mathematician and computer scientist Alan Turing also developed the first program designed to play chess, demonstrating an early example of AI in game strategy.\",\n    \"The invention of the Logic Theorist by Allen Newell, Herbert A. Simon, and Cliff Shaw in 1955 marked the creation of the first true AI program, which was capable of solving logic problems, akin to proving mathematical theorems.\",\n]\n\nrf(query, documents)\n"],"headingContent":"Integrate Milvus with Jina AI","anchorList":[{"label":"将 Milvus 与 Jina AI 相结合","href":"Integrate-Milvus-with-Jina-AI","type":1,"isActive":false},{"label":"谁是 Jina AI","href":"Who-is-Jina-AI","type":2,"isActive":false},{"label":"Milvus 和 Jina AI 的嵌入式技术","href":"Milvus-and-Jina-AIs-Embedding","type":2,"isActive":false},{"label":"实例","href":"Examples","type":2,"isActive":false},{"label":"通用 Embeddings","href":"General-Purpose-Embedding","type":2,"isActive":false},{"label":"双语嵌入模型","href":"Bilingual-Embeddings","type":2,"isActive":false},{"label":"代码嵌入","href":"Code-Embeddings","type":2,"isActive":false},{"label":"使用 Jina 和 Milvus 进行语义搜索","href":"Semantic-Search-with-Jina--Milvus","type":2,"isActive":false},{"label":"Jina Reranker","href":"Jina-Reranker","type":2,"isActive":false}]}
    \ No newline at end of file
    diff --git a/localization/v2.4.x/site/zh/integrations/integrate_with_jina.md b/localization/v2.4.x/site/zh/integrations/integrate_with_jina.md
    index 8f1a986c6..d499bf03e 100644
    --- a/localization/v2.4.x/site/zh/integrations/integrate_with_jina.md
    +++ b/localization/v2.4.x/site/zh/integrations/integrate_with_jina.md
    @@ -1,7 +1,7 @@
     ---
     id: integrate_with_jina.md
    -summary: 本指南演示了如何使用 Jina 嵌入和 Milvus 来执行相似性搜索和检索任务。
    -title: 将 Milvus 与 Jina 整合
    +summary: 本指南演示了如何使用 Jina 嵌入和 Milvus 进行相似性搜索和检索任务。
    +title: Milvus 与 Jina 集成
     ---
     

    将 Milvus 与 Jina AI 相结合

    Open In Colab

    -

    本指南演示了如何使用 Jina AI 嵌入和 Milvus 来执行相似性搜索和检索任务。

    +

    Open In Colab +GitHub Repository

    +

    本指南演示了如何使用 Jina AI 嵌入和 Milvus 进行相似性搜索和检索任务。

    谁是 Jina AI

    Jina AI 于 2020 年在柏林成立,是一家领先的人工智能公司,致力于通过其搜索基础彻底改变人工智能的未来。Jina AI 专注于多模态人工智能,旨在通过其集成的组件套件(包括嵌入式、重行者、提示操作和核心基础架构),帮助企业和开发人员利用多模态数据的力量来创造价值和节约成本。 Jina AI 的尖端嵌入式拥有顶级性能,其特点是 8192 token 长度模型,是全面数据表示的理想选择。这些嵌入式系统提供多语种支持,并与 OpenAI 等领先平台无缝集成,为跨语言应用提供了便利。

    +

    Jina AI 于 2020 年在柏林成立,是一家领先的人工智能公司,致力于通过其搜索基础彻底改变人工智能的未来。Jina AI 专注于多模态人工智能,旨在通过其集成的组件套件(包括 embeddings、Rerankers、prompt ops 和核心基础架构),使企业和开发人员能够利用多模态数据的力量来创造价值和节约成本。 Jina AI 的尖端 embeddings 拥有顶级性能,采用 8192 token 长度模型,是全面数据表示的理想选择。这些 Embeddings 提供多语言支持,并与 OpenAI 等领先平台无缝集成,为跨语言应用提供了便利。

    Milvus 和 Jina AI 的嵌入式技术

    为了高效地存储和搜索这些嵌入式数据,提高速度和规模,需要为此设计特定的基础设施。Milvus 是一个广为人知的先进开源向量数据库,能够处理大规模向量数据。Milvus 可根据大量指标实现快速、准确的向量(嵌入)搜索。它的可扩展性允许无缝处理海量图像数据,即使数据集不断增长,也能确保高性能搜索操作。

    +

    为了高效、快速、大规模地存储和搜索这些 Embeddings,需要为此设计特定的基础设施。Milvus 是一个广为人知的先进开源向量数据库,能够处理大规模向量数据。Milvus 可根据大量指标实现快速、准确的向量(嵌入)搜索。它的可扩展性允许无缝处理海量图像数据,即使数据集不断增长,也能确保高性能的搜索操作。

    实例

    Jina 嵌入已经集成到 PyMilvus 模型库中。现在,我们将通过代码示例来演示如何实际使用 Jina 嵌入。

    +

    Jina 嵌入已经集成到 PyMilvus 模型库中。现在,我们将通过代码示例来演示如何实际使用 Jina embeddings。

    在开始之前,我们需要为 PyMilvus 安装模型库。

    $ pip install -U pymilvus
     $ pip install "pymilvus[model]"
    @@ -75,7 +76,7 @@ $ pip install "pymilvus[model]"
     

    如果您使用的是 Google Colab,为了启用刚刚安装的依赖项,您可能需要重启运行时。(点击屏幕上方的 "Runtime(运行时)"菜单,从下拉菜单中选择 "Restart session(重新启动会话)")。

    -

    通用嵌入

    Jina AI 的核心嵌入模型擅长理解详细的文本,因此非常适合语义搜索、内容分类,从而支持高级情感分析、文本摘要和个性化推荐系统。

    -
    from pymilvus.model.dense import JinaEmbeddingFunction
    +    

    Jina AI 的核心嵌入模型擅长理解详细文本,因此非常适合语义搜索、内容分类,从而支持高级情感分析、文本摘要和个性化推荐系统。

    +
    from pymilvus.model.dense import JinaEmbeddingFunction
     
     jina_api_key = "<YOUR_JINA_API_KEY>"
    -ef = JinaEmbeddingFunction("jina-embeddings-v2-base-en", jina_api_key)
    +ef = JinaEmbeddingFunction(
    +    "jina-embeddings-v3", 
    +    jina_api_key,
    +    task="retrieval.passage",
    +    dimensions=1024
    +)
     
     query = "what is information retrieval?"
     doc = "Information retrieval is the process of finding relevant information from a large collection of data or documents."
     
    -qvecs = ef.encode_queries([query])
    -dvecs = ef.encode_documents([doc])
    +qvecs = ef.encode_queries([query])  # This method uses `retrieval.query` as the task
    +dvecs = ef.encode_documents([doc])  # This method uses `retrieval.passage` as the task
     
    -

    双语嵌入

    在使用嵌入式搜索后,Jina Ai 还提供重排器来进一步提高检索质量。

    +

    在使用嵌入式搜索后,Jina Ai 还提供了 Rerankers 以进一步提高检索质量。

    from pymilvus.model.reranker import JinaRerankFunction
     
     jina_api_key = "<YOUR_JINA_API_KEY>"
    diff --git a/localization/v2.4.x/site/zh/integrations/integrate_with_llamaindex.json b/localization/v2.4.x/site/zh/integrations/integrate_with_llamaindex.json
    index 1085423d9..243befeec 100644
    --- a/localization/v2.4.x/site/zh/integrations/integrate_with_llamaindex.json
    +++ b/localization/v2.4.x/site/zh/integrations/integrate_with_llamaindex.json
    @@ -1 +1 @@
    -{"codeList":["$ pip install pymilvus>=2.4.2\n","$ pip install llama-index-vector-stores-milvus\n","$ pip install llama-index\n","import openai\n\nopenai.api_key = \"sk-***********\"\n","! mkdir -p 'data/'\n! wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham_essay.txt'\n! wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/docs/examples/data/10k/uber_2021.pdf' -O 'data/uber_2021.pdf'\n","from llama_index.core import SimpleDirectoryReader\n\n# load documents\ndocuments = SimpleDirectoryReader(\n    input_files=[\"./data/paul_graham_essay.txt\"]\n).load_data()\n\nprint(\"Document ID:\", documents[0].doc_id)\n","# Create an index over the documents\nfrom llama_index.core import VectorStoreIndex, StorageContext\nfrom llama_index.vector_stores.milvus import MilvusVectorStore\n\n\nvector_store = MilvusVectorStore(uri=\"./milvus_demo.db\", dim=1536, overwrite=True)\nstorage_context = StorageContext.from_defaults(vector_store=vector_store)\nindex = VectorStoreIndex.from_documents(documents, storage_context=storage_context)\n","query_engine = index.as_query_engine()\nres = query_engine.query(\"What did the author learn?\")\nprint(res)\n","res = query_engine.query(\"What challenges did the disease pose for the author?\")\nprint(res)\n","from llama_index.core import Document\n\n\nvector_store = MilvusVectorStore(uri=\"./milvus_demo.db\", dim=1536, overwrite=True)\nstorage_context = StorageContext.from_defaults(vector_store=vector_store)\nindex = VectorStoreIndex.from_documents(\n    [Document(text=\"The number that is being searched for is ten.\")],\n    storage_context,\n)\nquery_engine = index.as_query_engine()\nres = query_engine.query(\"Who is the author?\")\nprint(res)\n","del index, vector_store, storage_context, query_engine\n\nvector_store = MilvusVectorStore(uri=\"./milvus_demo.db\", overwrite=False)\nstorage_context = StorageContext.from_defaults(vector_store=vector_store)\nindex = VectorStoreIndex.from_documents(documents, storage_context=storage_context)\nquery_engine = index.as_query_engine()\nres = query_engine.query(\"What is the number?\")\nprint(res)\n","res = query_engine.query(\"Who is the author?\")\nprint(res)\n","from llama_index.core.vector_stores import ExactMatchFilter, MetadataFilters\n\n# Load all the two documents loaded before\ndocuments_all = SimpleDirectoryReader(\"./data/\").load_data()\n\nvector_store = MilvusVectorStore(uri=\"./milvus_demo.db\", dim=1536, overwrite=True)\nstorage_context = StorageContext.from_defaults(vector_store=vector_store)\nindex = VectorStoreIndex.from_documents(documents_all, storage_context)\n","filters = MetadataFilters(\n    filters=[ExactMatchFilter(key=\"file_name\", value=\"uber_2021.pdf\")]\n)\nquery_engine = index.as_query_engine(filters=filters)\nres = query_engine.query(\"What challenges did the disease pose for the author?\")\n\nprint(res)\n","filters = MetadataFilters(\n    filters=[ExactMatchFilter(key=\"file_name\", value=\"paul_graham_essay.txt\")]\n)\nquery_engine = index.as_query_engine(filters=filters)\nres = query_engine.query(\"What challenges did the disease pose for the author?\")\n\nprint(res)\n"],"headingContent":"","anchorList":[{"label":"使用 Milvus 和 LlamaIndex 的检索增强生成(RAG)","href":"Retrieval-Augmented-Generation-RAG-with-Milvus-and-LlamaIndex","type":1,"isActive":false},{"label":"开始之前","href":"Before-you-begin","type":2,"isActive":false},{"label":"开始","href":"Getting-Started","type":2,"isActive":false},{"label":"元数据过滤","href":"Metadata-filtering","type":2,"isActive":false}]}
    \ No newline at end of file
    +{"codeList":["$ pip install pymilvus>=2.4.2\n","$ pip install llama-index-vector-stores-milvus\n","$ pip install llama-index\n","import openai\n\nopenai.api_key = \"sk-***********\"\n","! mkdir -p 'data/'\n! wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham_essay.txt'\n! wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/docs/examples/data/10k/uber_2021.pdf' -O 'data/uber_2021.pdf'\n","from llama_index.core import SimpleDirectoryReader\n\n# load documents\ndocuments = SimpleDirectoryReader(\n    input_files=[\"./data/paul_graham_essay.txt\"]\n).load_data()\n\nprint(\"Document ID:\", documents[0].doc_id)\n","# Create an index over the documents\nfrom llama_index.core import VectorStoreIndex, StorageContext\nfrom llama_index.vector_stores.milvus import MilvusVectorStore\n\n\nvector_store = MilvusVectorStore(uri=\"./milvus_demo.db\", dim=1536, overwrite=True)\nstorage_context = StorageContext.from_defaults(vector_store=vector_store)\nindex = VectorStoreIndex.from_documents(documents, storage_context=storage_context)\n","query_engine = index.as_query_engine()\nres = query_engine.query(\"What did the author learn?\")\nprint(res)\n","res = query_engine.query(\"What challenges did the disease pose for the author?\")\nprint(res)\n","from llama_index.core import Document\n\n\nvector_store = MilvusVectorStore(uri=\"./milvus_demo.db\", dim=1536, overwrite=True)\nstorage_context = StorageContext.from_defaults(vector_store=vector_store)\nindex = VectorStoreIndex.from_documents(\n    [Document(text=\"The number that is being searched for is ten.\")],\n    storage_context,\n)\nquery_engine = index.as_query_engine()\nres = query_engine.query(\"Who is the author?\")\nprint(res)\n","del index, vector_store, storage_context, query_engine\n\nvector_store = MilvusVectorStore(uri=\"./milvus_demo.db\", overwrite=False)\nstorage_context = StorageContext.from_defaults(vector_store=vector_store)\nindex = VectorStoreIndex.from_documents(documents, storage_context=storage_context)\nquery_engine = index.as_query_engine()\nres = query_engine.query(\"What is the number?\")\nprint(res)\n","res = query_engine.query(\"Who is the author?\")\nprint(res)\n","from llama_index.core.vector_stores import ExactMatchFilter, MetadataFilters\n\n# Load all the two documents loaded before\ndocuments_all = SimpleDirectoryReader(\"./data/\").load_data()\n\nvector_store = MilvusVectorStore(uri=\"./milvus_demo.db\", dim=1536, overwrite=True)\nstorage_context = StorageContext.from_defaults(vector_store=vector_store)\nindex = VectorStoreIndex.from_documents(documents_all, storage_context)\n","filters = MetadataFilters(\n    filters=[ExactMatchFilter(key=\"file_name\", value=\"uber_2021.pdf\")]\n)\nquery_engine = index.as_query_engine(filters=filters)\nres = query_engine.query(\"What challenges did the disease pose for the author?\")\n\nprint(res)\n","filters = MetadataFilters(\n    filters=[ExactMatchFilter(key=\"file_name\", value=\"paul_graham_essay.txt\")]\n)\nquery_engine = index.as_query_engine(filters=filters)\nres = query_engine.query(\"What challenges did the disease pose for the author?\")\n\nprint(res)\n"],"headingContent":"Retrieval-Augmented Generation (RAG) with Milvus and LlamaIndex","anchorList":[{"label":"使用 Milvus 和 LlamaIndex 的检索增强生成(RAG)","href":"Retrieval-Augmented-Generation-RAG-with-Milvus-and-LlamaIndex","type":1,"isActive":false},{"label":"开始之前","href":"Before-you-begin","type":2,"isActive":false},{"label":"开始","href":"Getting-Started","type":2,"isActive":false},{"label":"元数据过滤","href":"Metadata-filtering","type":2,"isActive":false}]}
    \ No newline at end of file
    diff --git a/localization/v2.4.x/site/zh/integrations/integrate_with_llamaindex.md b/localization/v2.4.x/site/zh/integrations/integrate_with_llamaindex.md
    index 7bb540e29..d88f04eb5 100644
    --- a/localization/v2.4.x/site/zh/integrations/integrate_with_llamaindex.md
    +++ b/localization/v2.4.x/site/zh/integrations/integrate_with_llamaindex.md
    @@ -1,6 +1,6 @@
     ---
     id: integrate_with_llamaindex.md
    -summary: 本指南演示了如何使用 LlamaIndex 和 Milvus 构建检索增强生成(RAG)系统。
    +summary: 本指南演示了如何使用 LlamaIndex 和 Milvus 建立检索增强生成(RAG)系统。
     title: 使用 Milvus 和 LlamaIndex 的检索增强生成(RAG)
     ---
     

    使用 Milvus 和 LlamaIndex 的检索增强生成(RAG)

    Open In Colab

    +

    Open In Colab +GitHub Repository

    本指南演示了如何使用 LlamaIndex 和 Milvus 构建检索-增强生成(RAG)系统。

    RAG 系统结合了检索系统和生成模型,可根据给定提示生成新文本。该系统首先使用 Milvus 从语料库中检索相关文档,然后使用生成模型根据检索到的文档生成新文本。

    -

    LlamaIndex是一个简单、灵活的数据框架,用于将自定义数据源连接到大型语言模型(LLM)。Milvus是世界上最先进的开源向量数据库,专为支持嵌入式相似性搜索和人工智能应用而构建。

    +

    LlamaIndex是一个简单、灵活的数据框架,用于将自定义数据源连接到大型语言模型(LLMs)。Milvus是世界上最先进的开源向量数据库,专为支持嵌入式相似性搜索和人工智能应用而构建。

    在本笔记本中,我们将快速演示如何使用 MilvusVectorStore。

    开始之前

    Open In Colab

    -

    本指南展示了如何将OpenAI的嵌入式API与Milvus向量数据库结合使用,对文本进行语义搜索。

    +

    Open In Colab +GitHub Repository

    +

    本指南展示了如何将OpenAI 的 Embedding API与 Milvus 向量数据库结合使用,对文本进行语义搜索。

    开始

    Open In Colab

    +

    Open In Colab +GitHub Repository

    本指南演示了如何使用 Ragas 评估基于Milvus 的检索增强生成(RAG)管道。

    RAG 系统结合了检索系统和生成模型,可根据给定提示生成新文本。该系统首先使用 Milvus 从语料库中检索相关文档,然后使用生成模型根据检索到的文档生成新文本。

    Ragas是一个帮助您评估 RAG 管道的框架。现有的工具和框架可以帮助您构建这些管道,但评估和量化管道性能可能很难。这就是 Ragas(RAG 评估)的用武之地。

    @@ -181,9 +182,9 @@ my_rag = RAG(openai_client=openai_clie

    至于MilvusClient 的参数:

      -
    • uri 设置为本地文件(如./milvus.db )是最方便的方法,因为它会自动利用Milvus Lite将所有数据存储到该文件中。
    • +
    • uri 设置为本地文件,如./milvus.db ,是最方便的方法,因为它会自动利用Milvus Lite将所有数据存储在此文件中。
    • 如果数据规模较大,可以在docker 或 kubernetes 上设置性能更强的 Milvus 服务器。在此设置中,请使用服务器 uri,例如http://localhost:19530 ,作为您的uri
    • -
    • 如果要使用Zilliz Cloud(Milvus 的完全托管云服务),请调整uritoken ,它们与 Zilliz Cloud 中的公共端点和 Api 密钥相对应。
    • +
    • 如果你想使用Zilliz Cloud(Milvus 的全托管云服务),请调整uritoken ,它们与 Zilliz Cloud 中的公共端点和 Api 密钥相对应。

    运行 RAG 管道并获取结果

    我们使用 Ragas 来评估 RAG 管道结果的性能。

    -

    Ragas 提供了一套易于使用的指标。我们将Answer relevancyFaithfulnessContext recallContext precision 作为评估 RAG 管道的指标。有关指标的更多信息,请参阅Ragas 指标

    +

    Ragas 提供了一套易于使用的度量指标。我们将Answer relevancyFaithfulnessContext recallContext precision 作为评估 RAG 管道的指标。有关指标的更多信息,请参阅Ragas 指标

    from ragas import evaluate
     from ragas.metrics import (
         answer_relevancy,
    diff --git a/localization/v2.4.x/site/zh/integrations/integrate_with_vanna.json b/localization/v2.4.x/site/zh/integrations/integrate_with_vanna.json
    index 339b850cb..564976b34 100644
    --- a/localization/v2.4.x/site/zh/integrations/integrate_with_vanna.json
    +++ b/localization/v2.4.x/site/zh/integrations/integrate_with_vanna.json
    @@ -1 +1 @@
    -{"codeList":["$ pip install \"vanna[milvus,openai]\"\n","import os\n\nos.environ[\"OPENAI_API_KEY\"] = \"sk-***********\"\n","from pymilvus import MilvusClient, model\nfrom vanna.milvus import Milvus_VectorStore\nfrom vanna.openai import OpenAI_Chat\n\n\nclass VannaMilvus(Milvus_VectorStore, OpenAI_Chat):\n    def __init__(self, config=None):\n        Milvus_VectorStore.__init__(self, config=config)\n        OpenAI_Chat.__init__(self, config=config)\n","milvus_uri = \"./milvus_vanna.db\"\n\nmilvus_client = MilvusClient(uri=milvus_uri)\n\nvn_milvus = VannaMilvus(\n    config={\n        \"api_key\": os.getenv(\"OPENAI_API_KEY\"),\n        \"model\": \"gpt-3.5-turbo\",\n        \"milvus_client\": milvus_client,\n        \"embedding_function\": model.DefaultEmbeddingFunction(),\n        \"n_results\": 2,  # The number of results to return from Milvus semantic search.\n    }\n)\n","import sqlite3\n\nsqlite_path = \"./my-database.sqlite\"\nsql_connect = sqlite3.connect(sqlite_path)\nc = sql_connect.cursor()\n\ninit_sqls = \"\"\"\nCREATE TABLE IF NOT EXISTS Customer (\n    ID INTEGER PRIMARY KEY AUTOINCREMENT,\n    Name TEXT NOT NULL,\n    Company TEXT NOT NULL,\n    City TEXT NOT NULL,\n    Phone TEXT NOT NULL\n);\n\nCREATE TABLE IF NOT EXISTS Company (\n    ID INTEGER PRIMARY KEY AUTOINCREMENT,\n    Name TEXT NOT NULL,\n    Industry TEXT NOT NULL,\n    Location TEXT NOT NULL,\n    EmployeeCount INTEGER NOT NULL\n);\n\nCREATE TABLE IF NOT EXISTS User (\n    ID INTEGER PRIMARY KEY AUTOINCREMENT,\n    Username TEXT NOT NULL UNIQUE,\n    Email TEXT NOT NULL UNIQUE\n);\n\nINSERT INTO Customer (Name, Company, City, Phone) \nVALUES ('John Doe', 'ABC Corp', 'New York', '123-456-7890');\n\nINSERT INTO Customer (Name, Company, City, Phone) \nVALUES ('Jane Smith', 'XYZ Inc', 'Los Angeles', '098-765-4321');\n\nINSERT INTO Company (Name, Industry, Location, EmployeeCount)\nVALUES ('ABC Corp', 'cutting-edge technology', 'New York', 100);\n\nINSERT INTO User (Username, Email)\nVALUES ('johndoe123', 'johndoe123@example.com');\n\"\"\"\n\nfor sql in init_sqls.split(\";\"):\n    c.execute(sql)\n\nsql_connect.commit()\n\n# Connect to the SQLite database\nvn_milvus.connect_to_sqlite(sqlite_path)\n","# If there exists training data, we should remove it before training.\nexisting_training_data = vn_milvus.get_training_data()\nif len(existing_training_data) > 0:\n    for _, training_data in existing_training_data.iterrows():\n        vn_milvus.remove_training_data(training_data[\"id\"])\n\n# Get the DDL of the SQLite database\ndf_ddl = vn_milvus.run_sql(\"SELECT type, sql FROM sqlite_master WHERE sql is not null\")\n\n# Train the model on the DDL data\nfor ddl in df_ddl[\"sql\"].to_list():\n    vn_milvus.train(ddl=ddl)\n","# Add documentation about your business terminology or definitions.\nvn_milvus.train(\n    documentation=\"ABC Corp specializes in cutting-edge technology solutions and innovation.\"\n)\nvn_milvus.train(\n    documentation=\"XYZ Inc is a global leader in manufacturing and supply chain management.\"\n)\n\n# You can also add SQL queries to your training data.\nvn_milvus.train(sql=\"SELECT * FROM Customer WHERE Name = 'John Doe'\")\n","training_data = vn_milvus.get_training_data()\ntraining_data\n","sql = vn_milvus.generate_sql(\"what is the phone number of John Doe?\")\nvn_milvus.run_sql(sql)\n","sql = vn_milvus.generate_sql(\"which customer works for a manufacturing corporation?\")\nvn_milvus.run_sql(sql)\n","sql_connect.close()\nmilvus_client.close()\n\nos.remove(sqlite_path)\nif os.path.exists(milvus_uri):\n    os.remove(milvus_uri)\n"],"headingContent":"","anchorList":[{"label":"使用 Vanna 和 Milvus 编写 SQL","href":"Write-SQL-with-Vanna-and-Milvus","type":1,"isActive":false},{"label":"前提条件","href":"Prerequisites","type":2,"isActive":false},{"label":"数据准备","href":"Data-preparation","type":2,"isActive":false},{"label":"使用数据进行训练","href":"Train-with-data","type":2,"isActive":false},{"label":"生成并执行 SQL","href":"Generate-SQLs-and-execute-them","type":2,"isActive":false}]}
    \ No newline at end of file
    +{"codeList":["$ pip install \"vanna[milvus,openai]\"\n","import os\n\nos.environ[\"OPENAI_API_KEY\"] = \"sk-***********\"\n","from pymilvus import MilvusClient, model\nfrom vanna.milvus import Milvus_VectorStore\nfrom vanna.openai import OpenAI_Chat\n\n\nclass VannaMilvus(Milvus_VectorStore, OpenAI_Chat):\n    def __init__(self, config=None):\n        Milvus_VectorStore.__init__(self, config=config)\n        OpenAI_Chat.__init__(self, config=config)\n","milvus_uri = \"./milvus_vanna.db\"\n\nmilvus_client = MilvusClient(uri=milvus_uri)\n\nvn_milvus = VannaMilvus(\n    config={\n        \"api_key\": os.getenv(\"OPENAI_API_KEY\"),\n        \"model\": \"gpt-3.5-turbo\",\n        \"milvus_client\": milvus_client,\n        \"embedding_function\": model.DefaultEmbeddingFunction(),\n        \"n_results\": 2,  # The number of results to return from Milvus semantic search.\n    }\n)\n","import sqlite3\n\nsqlite_path = \"./my-database.sqlite\"\nsql_connect = sqlite3.connect(sqlite_path)\nc = sql_connect.cursor()\n\ninit_sqls = \"\"\"\nCREATE TABLE IF NOT EXISTS Customer (\n    ID INTEGER PRIMARY KEY AUTOINCREMENT,\n    Name TEXT NOT NULL,\n    Company TEXT NOT NULL,\n    City TEXT NOT NULL,\n    Phone TEXT NOT NULL\n);\n\nCREATE TABLE IF NOT EXISTS Company (\n    ID INTEGER PRIMARY KEY AUTOINCREMENT,\n    Name TEXT NOT NULL,\n    Industry TEXT NOT NULL,\n    Location TEXT NOT NULL,\n    EmployeeCount INTEGER NOT NULL\n);\n\nCREATE TABLE IF NOT EXISTS User (\n    ID INTEGER PRIMARY KEY AUTOINCREMENT,\n    Username TEXT NOT NULL UNIQUE,\n    Email TEXT NOT NULL UNIQUE\n);\n\nINSERT INTO Customer (Name, Company, City, Phone) \nVALUES ('John Doe', 'ABC Corp', 'New York', '123-456-7890');\n\nINSERT INTO Customer (Name, Company, City, Phone) \nVALUES ('Jane Smith', 'XYZ Inc', 'Los Angeles', '098-765-4321');\n\nINSERT INTO Company (Name, Industry, Location, EmployeeCount)\nVALUES ('ABC Corp', 'cutting-edge technology', 'New York', 100);\n\nINSERT INTO User (Username, Email)\nVALUES ('johndoe123', 'johndoe123@example.com');\n\"\"\"\n\nfor sql in init_sqls.split(\";\"):\n    c.execute(sql)\n\nsql_connect.commit()\n\n# Connect to the SQLite database\nvn_milvus.connect_to_sqlite(sqlite_path)\n","# If there exists training data, we should remove it before training.\nexisting_training_data = vn_milvus.get_training_data()\nif len(existing_training_data) > 0:\n    for _, training_data in existing_training_data.iterrows():\n        vn_milvus.remove_training_data(training_data[\"id\"])\n\n# Get the DDL of the SQLite database\ndf_ddl = vn_milvus.run_sql(\"SELECT type, sql FROM sqlite_master WHERE sql is not null\")\n\n# Train the model on the DDL data\nfor ddl in df_ddl[\"sql\"].to_list():\n    vn_milvus.train(ddl=ddl)\n","# Add documentation about your business terminology or definitions.\nvn_milvus.train(\n    documentation=\"ABC Corp specializes in cutting-edge technology solutions and innovation.\"\n)\nvn_milvus.train(\n    documentation=\"XYZ Inc is a global leader in manufacturing and supply chain management.\"\n)\n\n# You can also add SQL queries to your training data.\nvn_milvus.train(sql=\"SELECT * FROM Customer WHERE Name = 'John Doe'\")\n","training_data = vn_milvus.get_training_data()\ntraining_data\n","sql = vn_milvus.generate_sql(\"what is the phone number of John Doe?\")\nvn_milvus.run_sql(sql)\n","sql = vn_milvus.generate_sql(\"which customer works for a manufacturing corporation?\")\nvn_milvus.run_sql(sql)\n","sql_connect.close()\nmilvus_client.close()\n\nos.remove(sqlite_path)\nif os.path.exists(milvus_uri):\n    os.remove(milvus_uri)\n"],"headingContent":"Write SQL with Vanna and Milvus","anchorList":[{"label":"用 Vanna 和 Milvus 编写 SQL","href":"Write-SQL-with-Vanna-and-Milvus","type":1,"isActive":false},{"label":"前提条件","href":"Prerequisites","type":2,"isActive":false},{"label":"数据准备","href":"Data-preparation","type":2,"isActive":false},{"label":"使用数据进行训练","href":"Train-with-data","type":2,"isActive":false},{"label":"生成并执行 SQL","href":"Generate-SQLs-and-execute-them","type":2,"isActive":false}]}
    \ No newline at end of file
    diff --git a/localization/v2.4.x/site/zh/integrations/integrate_with_vanna.md b/localization/v2.4.x/site/zh/integrations/integrate_with_vanna.md
    index d9292a65d..cb11f705a 100644
    --- a/localization/v2.4.x/site/zh/integrations/integrate_with_vanna.md
    +++ b/localization/v2.4.x/site/zh/integrations/integrate_with_vanna.md
    @@ -1,9 +1,9 @@
     ---
     id: integrate_with_vanna.md
     summary: 本指南演示如何使用 Vanna 根据存储在数据库中的数据生成并执行 SQL 查询。
    -title: 与 Vanna 和 Milvus 一起编写 SQL
    +title: 用 Vanna 和 Milvus 编写 SQL
     ---
    -

    使用 Vanna 和 Milvus 编写 SQL

    Vanna是一个开源 Python RAG(检索增强生成)框架,用于生成 SQL 和相关功能。Milvus是世界上最先进的开源向量数据库,用于支持嵌入式相似性搜索和人工智能应用。

    -

    Vanna 的工作分为两个简单的步骤--在数据上训练一个 RAG "模型",然后提出问题,这些问题将返回 SQL 查询,这些查询可以设置为在数据库上运行。本指南演示了如何使用 Vanna 根据存储在数据库中的数据生成并执行 SQL 查询。

    +

    Open In Colab +GitHub Repository

    +

    Vanna是一个开源 Python RAG(检索增强生成)框架,用于生成 SQL 和相关功能。Milvus是世界上最先进的开源向量数据库,用于支持 Embeddings 相似性搜索和人工智能应用。

    +

    Vanna 的工作分为两个简单的步骤--在你的数据上训练一个 RAG "模型",然后提出问题,这些问题将返回 SQL 查询,这些查询可以设置为在你的数据库上运行。本指南演示了如何使用 Vanna 根据存储在数据库中的数据生成并执行 SQL 查询。

    前提条件

    首先,我们需要继承 Vanna 的Milvus_VectorStoreOpenAI_Chat 类,并定义一个结合了这两个类功能的新类VannaMilvus

    +

    首先,我们需要继承 Vanna 的Milvus_VectorStoreOpenAI_Chat 类,并定义一个新类VannaMilvus ,将两者的功能结合起来。

    from pymilvus import MilvusClient, model
     from vanna.milvus import Milvus_VectorStore
     from vanna.openai import OpenAI_Chat
    @@ -72,13 +74,13 @@ os.environ["OP
             Milvus_VectorStore.__init__(self, config=config)
             OpenAI_Chat.__init__(self, config=config)
     
    -

    我们使用必要的配置参数初始化VannaMilvus 类。我们使用milvus_client 实例来存储嵌入式数据,并使用从milvus_model初始化的model.DefaultEmbeddingFunction() 来生成嵌入式数据。

    +

    我们使用必要的配置参数初始化VannaMilvus 类。我们使用milvus_client 实例来存储嵌入式数据,并使用从milvus_model初始化的model.DefaultEmbeddingFunction() 来生成嵌入式数据。C

    至于MilvusClient 的参数:

      -
    • uri 设置为本地文件(如./milvus.db )是最方便的方法,因为它会自动利用Milvus Lite将所有数据存储在该文件中。
    • +
    • uri 设置为本地文件,如./milvus.db ,是最方便的方法,因为它会自动利用Milvus Lite将所有数据存储在此文件中。
    • 如果数据规模较大,可以在docker 或 kubernetes 上设置性能更强的 Milvus 服务器。在此设置中,请使用服务器 uri,例如http://localhost:19530 ,作为您的uri
    • -
    • 如果您想使用Zilliz Cloud(Milvus 的完全托管云服务),请调整uritoken ,它们与 Zilliz Cloud 中的公共端点和 Api 密钥相对应。
    • +
    • 如果你想使用Zilliz Cloud(Milvus 的全托管云服务),请调整uritoken ,它们对应于 Zilliz Cloud 中的公共端点和 Api 密钥
    milvus_uri = "./milvus_vanna.db"
    @@ -355,7 +357,7 @@ WHERE Company = 'XYZ Inc'
       
     
     
    -

    断开 SQLite 和 Milvus 的连接,并删除它们以释放资源。

    +

    断开 SQLite 和 Milvus 的连接并将其删除,以释放资源。

    sql_connect.close()
     milvus_client.close()
     
    diff --git a/localization/v2.4.x/site/zh/integrations/integrate_with_voyageai.json b/localization/v2.4.x/site/zh/integrations/integrate_with_voyageai.json
    index 9ff9460c4..600a9ca30 100644
    --- a/localization/v2.4.x/site/zh/integrations/integrate_with_voyageai.json
    +++ b/localization/v2.4.x/site/zh/integrations/integrate_with_voyageai.json
    @@ -1 +1 @@
    -{"codeList":["$ pip install --upgrade voyageai pymilvus\n","import voyageai\nfrom pymilvus import MilvusClient\n\nMODEL_NAME = \"voyage-law-2\"  # Which model to use, please check https://docs.voyageai.com/docs/embeddings for available models\nDIMENSION = 1024  # Dimension of vector embedding\n\n# Connect to VoyageAI with API Key.\nvoyage_client = voyageai.Client(api_key=\"\")\n\ndocs = [\n    \"Artificial intelligence was founded as an academic discipline in 1956.\",\n    \"Alan Turing was the first person to conduct substantial research in AI.\",\n    \"Born in Maida Vale, London, Turing was raised in southern England.\",\n]\n\nvectors = voyage_client.embed(texts=docs, model=MODEL_NAME, truncation=False).embeddings\n\n# Prepare data to be stored in Milvus vector database.\n# We can store the id, vector representation, raw text and labels such as \"subject\" in this case in Milvus.\ndata = [\n    {\"id\": i, \"vector\": vectors[i], \"text\": docs[i], \"subject\": \"history\"}\n    for i in range(len(docs))\n]\n\n\n# Connect to Milvus, all data is stored in a local file named \"milvus_voyage_demo.db\"\n# in current directory. You can also connect to a remote Milvus server following this\n# instruction: https://milvus.io/docs/install_standalone-docker.md.\nmilvus_client = MilvusClient(uri=\"milvus_voyage_demo.db\")\nCOLLECTION_NAME = \"demo_collection\"  # Milvus collection name\n# Create a collection to store the vectors and text.\nif milvus_client.has_collection(collection_name=COLLECTION_NAME):\n    milvus_client.drop_collection(collection_name=COLLECTION_NAME)\nmilvus_client.create_collection(collection_name=COLLECTION_NAME, dimension=DIMENSION)\n\n# Insert all data into Milvus vector database.\nres = milvus_client.insert(collection_name=\"demo_collection\", data=data)\n\nprint(res[\"insert_count\"])\n","queries = [\"When was artificial intelligence founded?\"]\n\nquery_vectors = voyage_client.embed(\n    texts=queries, model=MODEL_NAME, truncation=False\n).embeddings\n\nres = milvus_client.search(\n    collection_name=COLLECTION_NAME,  # target collection\n    data=query_vectors,  # query vectors\n    limit=2,  # number of returned entities\n    output_fields=[\"text\", \"subject\"],  # specifies fields to be returned\n)\n\nfor q in queries:\n    print(\"Query:\", q)\n    for result in res:\n        print(result)\n    print(\"\\n\")\n"],"headingContent":"","anchorList":[{"label":"使用Milvus和VoyageAI进行语义搜索","href":"Semantic-Search-with-Milvus-and-VoyageAI","type":1,"isActive":false},{"label":"开始","href":"Getting-started","type":2,"isActive":false},{"label":"使用 VoyageAI 和 Milvus 搜索书名","href":"Searching-book-titles-with-VoyageAI--Milvus","type":2,"isActive":false}]}
    \ No newline at end of file
    +{"codeList":["$ pip install --upgrade voyageai pymilvus\n","import voyageai\nfrom pymilvus import MilvusClient\n\nMODEL_NAME = \"voyage-law-2\"  # Which model to use, please check https://docs.voyageai.com/docs/embeddings for available models\nDIMENSION = 1024  # Dimension of vector embedding\n\n# Connect to VoyageAI with API Key.\nvoyage_client = voyageai.Client(api_key=\"\")\n\ndocs = [\n    \"Artificial intelligence was founded as an academic discipline in 1956.\",\n    \"Alan Turing was the first person to conduct substantial research in AI.\",\n    \"Born in Maida Vale, London, Turing was raised in southern England.\",\n]\n\nvectors = voyage_client.embed(texts=docs, model=MODEL_NAME, truncation=False).embeddings\n\n# Prepare data to be stored in Milvus vector database.\n# We can store the id, vector representation, raw text and labels such as \"subject\" in this case in Milvus.\ndata = [\n    {\"id\": i, \"vector\": vectors[i], \"text\": docs[i], \"subject\": \"history\"}\n    for i in range(len(docs))\n]\n\n\n# Connect to Milvus, all data is stored in a local file named \"milvus_voyage_demo.db\"\n# in current directory. You can also connect to a remote Milvus server following this\n# instruction: https://milvus.io/docs/install_standalone-docker.md.\nmilvus_client = MilvusClient(uri=\"milvus_voyage_demo.db\")\nCOLLECTION_NAME = \"demo_collection\"  # Milvus collection name\n# Create a collection to store the vectors and text.\nif milvus_client.has_collection(collection_name=COLLECTION_NAME):\n    milvus_client.drop_collection(collection_name=COLLECTION_NAME)\nmilvus_client.create_collection(collection_name=COLLECTION_NAME, dimension=DIMENSION)\n\n# Insert all data into Milvus vector database.\nres = milvus_client.insert(collection_name=\"demo_collection\", data=data)\n\nprint(res[\"insert_count\"])\n","queries = [\"When was artificial intelligence founded?\"]\n\nquery_vectors = voyage_client.embed(\n    texts=queries, model=MODEL_NAME, truncation=False\n).embeddings\n\nres = milvus_client.search(\n    collection_name=COLLECTION_NAME,  # target collection\n    data=query_vectors,  # query vectors\n    limit=2,  # number of returned entities\n    output_fields=[\"text\", \"subject\"],  # specifies fields to be returned\n)\n\nfor q in queries:\n    print(\"Query:\", q)\n    for result in res:\n        print(result)\n    print(\"\\n\")\n"],"headingContent":"Semantic Search with Milvus and VoyageAI","anchorList":[{"label":"使用Milvus和VoyageAI进行语义搜索","href":"Semantic-Search-with-Milvus-and-VoyageAI","type":1,"isActive":false},{"label":"开始","href":"Getting-started","type":2,"isActive":false},{"label":"使用 VoyageAI 和 Milvus 搜索书名","href":"Searching-book-titles-with-VoyageAI--Milvus","type":2,"isActive":false}]}
    \ No newline at end of file
    diff --git a/localization/v2.4.x/site/zh/integrations/integrate_with_voyageai.md b/localization/v2.4.x/site/zh/integrations/integrate_with_voyageai.md
    index 5b1f48612..f10b46a5c 100644
    --- a/localization/v2.4.x/site/zh/integrations/integrate_with_voyageai.md
    +++ b/localization/v2.4.x/site/zh/integrations/integrate_with_voyageai.md
    @@ -1,7 +1,7 @@
     ---
     id: integrate_with_voyageai.md
    -title: 利用 Milvus 和 VoyageAI 进行语义搜索
    -summary: 本页讨论向量数据库与 VoyageAI 的嵌入式 API 的集成。
    +title: 使用Milvus和VoyageAI进行语义搜索
    +summary: 本页讨论向量数据库与 VoyageAI 的 Embeddings API 的集成。
     ---
     

    使用Milvus和VoyageAI进行语义搜索

    Open In Colab

    -

    本指南展示了如何将VoyageAI的嵌入式API与Milvus向量数据库结合使用,对文本进行语义搜索。

    +

    Open In Colab +GitHub Repository

    +

    本指南展示了如何将VoyageAI的Embedding API与Milvus向量数据库结合使用,对文本进行语义搜索。

    开始

    开始之前,请确保您已准备好 Voyage API 密钥,或从VoyageAI 网站获取一个。

    本示例中使用的数据是书名。你可以在这里下载数据集,并将其放在运行以下代码的同一目录下。

    -

    首先,安装 Milvus 和 Voyage AI 软件包:

    +

    首先,安装 Milvus 和 Voyage AI 的软件包:

    $ pip install --upgrade voyageai pymilvus
     

    如果使用的是 Google Colab,要启用刚刚安装的依赖项,可能需要重启运行时。(点击屏幕上方的 "Runtime(运行时)"菜单,从下拉菜单中选择 "Restart session(重新启动会话)")。

    -

    这样,我们就可以生成嵌入并使用向量数据库进行语义搜索了。

    +

    这样,我们就可以生成 Embeddings 并使用向量数据库进行语义搜索了。

    使用 VoyageAI 和 Milvus 搜索书名

    Open In Colab

    +

    Open In Colab +GitHub Repository

    本指南演示了如何使用 LangChain 和 Milvus 构建检索-增强生成(RAG)系统。

    RAG 系统结合了检索系统和生成模型,可根据给定提示生成新文本。该系统首先使用 Milvus 从语料库中检索相关文档,然后使用生成模型根据检索到的文档生成新文本。

    -

    LangChain是一个开发由大型语言模型(LLM)驱动的应用程序的框架。Milvus是世界上最先进的开源向量数据库,用于支持嵌入式相似性搜索和人工智能应用。

    +

    LangChain是一个开发由大型语言模型(LLMs)驱动的应用程序的框架。Milvus是世界上最先进的开源向量数据库,用于支持嵌入式相似性搜索和人工智能应用。

    前提条件

    Document(page_content='Fig. 1. Overview of a LLM-powered autonomous agent system.\nComponent One: Planning#\nA complicated task usually involves many steps. An agent needs to know what they are and plan ahead.\nTask Decomposition#\nChain of thought (CoT; Wei et al. 2022) has become a standard prompting technique for enhancing model performance on complex tasks. The model is instructed to “think step by step” to utilize more test-time computation to decompose hard tasks into smaller and simpler steps. CoT transforms big tasks into multiple manageable tasks and shed lights into an interpretation of the model’s thinking process.\nTree of Thoughts (Yao et al. 2023) extends CoT by exploring multiple reasoning possibilities at each step. It first decomposes the problem into multiple thought steps and generates multiple thoughts per step, creating a tree structure. The search process can be BFS (breadth-first search) or DFS (depth-first search) with each state evaluated by a classifier (via a prompt) or majority vote.\nTask decomposition can be done (1) by LLM with simple prompting like "Steps for XYZ.\\n1.", "What are the subgoals for achieving XYZ?", (2) by using task-specific instructions; e.g. "Write a story outline." for writing a novel, or (3) with human inputs.\nAnother quite distinct approach, LLM+P (Liu et al. 2023), involves relying on an external classical planner to do long-horizon planning. This approach utilizes the Planning Domain Definition Language (PDDL) as an intermediate interface to describe the planning problem. In this process, LLM (1) translates the problem into “Problem PDDL”, then (2) requests a classical planner to generate a PDDL plan based on an existing “Domain PDDL”, and finally (3) translates the PDDL plan back into natural language. Essentially, the planning step is outsourced to an external tool, assuming the availability of domain-specific PDDL and a suitable planner which is common in certain robotic setups but not in many other domains.\nSelf-Reflection#', metadata={'source': 'https://lilianweng.github.io/posts/2023-06-23-agent/'})
     
    -

    正如我们所看到的,文档已经分割成块。数据内容是关于人工智能代理的。

    +

    正如我们所看到的,文档已经分割成块。而数据内容是关于人工智能 Agents 的。

    使用 Milvus 向量存储构建 RAG 链

    "Self-reflection of an AI agent involves the process of synthesizing memories into higher-level inferences over time to guide the agent's future behavior. It serves as a mechanism to create higher-level summaries of past events. One approach to self-reflection involves prompting the language model with the 100 most recent observations and asking it to generate the 3 most salient high-level questions based on those observations. This process helps the AI agent optimize believability in the current moment and over time."
     
    -

    恭喜您!您已经利用 Milvus 和 LangChain 构建了一个基本的 RAG 链。

    +

    恭喜您!您已经构建了由 Milvus 和 LangChain 支持的基本 RAG 链。

    元数据过滤

    Milvus 构建在 Faiss、HNSW、DiskANN、SCANN 等流行的向量搜索库之上,专为在包含数百万、数十亿甚至数万亿向量的密集向量数据集上进行相似性搜索而设计。在继续之前,请先熟悉嵌入检索的基本原理

    -

    Milvus 还支持数据分片、流式数据摄取、动态模式、结合向量和标量数据的搜索、多向量和混合搜索、稀疏向量和许多其他高级功能。该平台可按需提供性能,并可进行优化,以适应任何嵌入式检索场景。我们建议使用 Kubernetes 部署 Milvus,以获得最佳的可用性和弹性。

    -

    Milvus 采用共享存储架构,其计算节点具有存储和计算分解及横向扩展能力。Milvus 遵循数据平面和控制平面分解的原则,由四层组成:访问层、协调器服务、工作节点和存储。这些层在扩展或灾难恢复时相互独立。

    +

    Milvus 构建在 Faiss、HNSW、DiskANN、SCANN 等流行的向量搜索库之上,专为在包含数百万、数十亿甚至数万亿向量的密集向量数据集上进行相似性搜索而设计。在继续之前,请先熟悉一下 Embeddings 检索的基本原理

    +

    Milvus 还支持数据分片、流式数据摄取、动态 Schema、结合向量和标量数据的搜索、多向量和混合搜索、稀疏向量和其他许多高级功能。该平台按需提供性能,并可进行优化,以适应任何嵌入式检索场景。我们建议使用 Kubernetes 部署 Milvus,以获得最佳的可用性和弹性。

    +

    Milvus 采用共享存储架构,其计算节点具有存储和计算分解及横向扩展能力。按照数据平面和控制平面分解的原则,Milvus 由四层组成:访问层、协调器服务、工作节点和存储。这些层在扩展或灾难恢复时相互独立。

    Architecture_diagram 架构图

    +

    根据该图,接口可分为以下几类:

    +
      +
    • DDL / DCL:createCollection / createPartition / dropCollection / dropPartition / hasCollection / hasPartition
    • +
    • DML / Produce:插入 / 删除 / 上移
    • +
    • DQL:搜索/查询
    • +

    下一步

    diff --git a/localization/v2.4.x/site/zh/reference/disk_index.json b/localization/v2.4.x/site/zh/reference/disk_index.json index cead4c342..4f9248315 100644 --- a/localization/v2.4.x/site/zh/reference/disk_index.json +++ b/localization/v2.4.x/site/zh/reference/disk_index.json @@ -1 +1 @@ -{"codeList":["...\nDiskIndex:\n MaxDegree: 56\n SearchListSize: 100\n PQCodeBugetGBRatio: 0.125\n SearchCacheBudgetGBRatio: 0.125\n BeamWidthRatio: 4.0\n...\n"],"headingContent":"","anchorList":[{"label":"盘上索引","href":"On-disk-Index","type":1,"isActive":false},{"label":"前提条件","href":"Prerequisites","type":2,"isActive":false},{"label":"限制","href":"Limits","type":2,"isActive":false},{"label":"索引和搜索设置","href":"Index-and-search-settings","type":2,"isActive":false},{"label":"与 DiskANN 相关的 Milvus 配置","href":"DiskANN-related-Milvus-configurations","type":2,"isActive":false},{"label":"故障排除","href":"Troubleshooting","type":2,"isActive":false}]} \ No newline at end of file +{"codeList":["...\nDiskIndex:\n MaxDegree: 56\n SearchListSize: 100\n PQCodeBugetGBRatio: 0.125\n SearchCacheBudgetGBRatio: 0.125\n BeamWidthRatio: 4.0\n...\n"],"headingContent":"On-disk Index","anchorList":[{"label":"盘上索引","href":"On-disk-Index","type":1,"isActive":false},{"label":"前提条件","href":"Prerequisites","type":2,"isActive":false},{"label":"限制","href":"Limits","type":2,"isActive":false},{"label":"索引和搜索设置","href":"Index-and-search-settings","type":2,"isActive":false},{"label":"与 DiskANN 相关的 Milvus 配置","href":"DiskANN-related-Milvus-configurations","type":2,"isActive":false},{"label":"故障排除","href":"Troubleshooting","type":2,"isActive":false}]} \ No newline at end of file diff --git a/localization/v2.4.x/site/zh/reference/disk_index.md b/localization/v2.4.x/site/zh/reference/disk_index.md index 9b62bb09c..f62e927da 100644 --- a/localization/v2.4.x/site/zh/reference/disk_index.md +++ b/localization/v2.4.x/site/zh/reference/disk_index.md @@ -2,7 +2,7 @@ id: disk_index.md related_key: disk_index summary: Milvus 的磁盘索引机制。 -title: 磁盘索引 +title: 盘上索引 ---

    盘上索引

    要使用 DiskANN,请注意

    • DiskANN 默认已启用。如果你更喜欢内存索引而不是磁盘索引,建议你禁用该功能以获得更好的性能。
        -
      • 要禁用该功能,可在 milvus 配置文件中将queryNode.enableDisk 更改为false
      • +
      • 要禁用该功能,可在 Milvus 配置文件中将queryNode.enableDisk 更改为false
      • 要重新启用该功能,可将queryNode.enableDisk 设为true
    • Milvus 实例在 Ubuntu 18.04.6 或更高版本上运行。
    • Milvus 数据路径应挂载到 NVMe SSD 上,以充分发挥性能:
        -
      • 对于 Milvus 独立实例,数据路径应为实例运行容器中的/var/lib/milvus/data
      • +
      • 对于 Milvus Standalone 实例,数据路径应为实例运行容器中的/var/lib/milvus/data
      • 对于 Milvus 群集实例,数据路径应为查询节点和索引节点所在容器中的/var/lib/milvus/data
    @@ -68,7 +68,7 @@ title: 磁盘索引

    要使用 DiskANN,请确保

    • 在数据中只使用至少 1 维的浮点型向量。
    • -
    • 仅使用欧氏距离 (L2) 或内积 (IP) 来测量向量之间的距离。
    • +
    • 仅使用欧氏距离 (L2)、内积 (IP) 或 COSINE 来测量向量之间的距离。

    索引和搜索设置

    • 如何处理io_setup() failed; returned -11, errno=11:Resource temporarily unavailable 错误?

      -

      Linux 内核提供了异步非阻塞 I/O(AIO)功能,允许进程同时启动多个 I/O 操作,而无需等待任何一个操作完成。这有助于提高处理和 I/O 重叠的应用程序的性能。

      +

      Linux 内核提供了异步非阻塞 I/O(AIO)功能,允许一个进程同时启动多个 I/O 操作,而无需等待任何一个操作完成。这有助于提高处理和 I/O 重叠的应用程序的性能。

      可以使用 proc 文件系统中的/proc/sys/fs/aio-max-nr 虚拟文件来调整性能。aio-max-nr 参数决定允许的最大并发请求数。

      aio-max-nr 默认为65535 ,也可设置为10485760

    diff --git a/localization/v2.4.x/site/zh/reference/replica.json b/localization/v2.4.x/site/zh/reference/replica.json index 0f82e85c4..6d179607a 100644 --- a/localization/v2.4.x/site/zh/reference/replica.json +++ b/localization/v2.4.x/site/zh/reference/replica.json @@ -1 +1 @@ -{"codeList":[],"headingContent":"","anchorList":[{"label":"内存中复制","href":"In-Memory-Replica","type":1,"isActive":false},{"label":"概述","href":"Overview","type":2,"isActive":false},{"label":"关键概念","href":"Key-Concepts","type":2,"isActive":false},{"label":"设计细节","href":"Design-Details","type":2,"isActive":false}]} \ No newline at end of file +{"codeList":[],"headingContent":"In-Memory Replica","anchorList":[{"label":"内存内复制","href":"In-Memory-Replica","type":1,"isActive":false},{"label":"概述","href":"Overview","type":2,"isActive":false},{"label":"关键概念","href":"Key-Concepts","type":2,"isActive":false},{"label":"设计细节","href":"Design-Details","type":2,"isActive":false}]} \ No newline at end of file diff --git a/localization/v2.4.x/site/zh/reference/replica.md b/localization/v2.4.x/site/zh/reference/replica.md index d21c78cdc..00a77796e 100644 --- a/localization/v2.4.x/site/zh/reference/replica.md +++ b/localization/v2.4.x/site/zh/reference/replica.md @@ -3,7 +3,7 @@ id: replica.md summary: 了解 Milvus 中的内存复制。 title: 内存内复制 --- -

    内存中复制

    本主题介绍 Milvus 中的内存中复制(副本)机制,该机制可在工作内存中进行多段复制,以提高性能和可用性。

    +

    本主题介绍 Milvus 中的内存中复制(replication)机制,该机制可在工作内存中实现多个网段复制,以提高性能和可用性。

    有关如何配置内存中复制的信息,请参阅查询节点相关配置

    概述

    内存复制以复制组的形式组织。每个副本组都包含分片副本。每个分片副本都有一个流副本和一个历史副本,分别对应于分片(即 DML 通道)中不断增长和封存的分段

    - An illustration of how in-memory replica works + An illustration of how in-memory replica works 内存复制工作原理示例

    副本组

    副本组由多个查询节点组成,负责处理历史数据和副本。

    -

    分片副本

    分片副本由流式副本和历史副本组成,二者属于同一个分片。副本组中的分片副本数量由指定集合中的分片数量决定。

    +

    分片副本

    分片副本由流式副本和历史副本组成,二者属于同一个分片。副本组中碎片副本的数量由指定 Collections 中碎片的数量决定。

    流副本

    流副本包含来自同一 DML 通道的所有增长分段。从技术上讲,流副本只能由一个副本中的一个查询节点提供服务。

    历史副本

    历史副本包含来自同一 DML 通道的所有密封数据段。一个历史副本的密封分段可分布在同一副本组内的多个查询节点上。

    分片组长

    分片组长是为分片副本中的流副本提供服务的查询节点。

    @@ -84,8 +84,8 @@ title: 内存内复制

    平衡

    需要加载的新片段将分配给多个不同的查询节点。一旦至少一个副本加载成功,就可以处理搜索请求。

    缓存

    代理会维护一个将数据段映射到查询节点的缓存,并定期更新。当代理收到请求时,Milvus 会从缓存中获取所有需要搜索的封存段,并尝试将它们平均分配给查询节点。

    -

    对于不断增长的网段,代理也会维护一个从通道到查询节点的缓存,并向相应的查询节点发送请求。

    -

    故障转移

    代理上的缓存并不总是最新的。当收到请求时,某些片段或通道可能已被转移到其他查询节点。在这种情况下,代理将收到错误响应,更新缓存并尝试将其分配给另一个查询节点。

    +

    对于不断增长的网段,代理也会维护一个从通道到查询节点的缓存,并将请求发送到相应的查询节点。

    +

    故障转移

    代理上的缓存并不总是最新的。当请求到来时,某些片段或通道可能已被转移到其他查询节点。在这种情况下,代理将收到错误响应,更新缓存并尝试将其分配给另一个查询节点。

    如果代理在更新缓存后仍无法找到某个片段,该片段将被忽略。如果数据段已被压缩,就会出现这种情况。

    如果缓存不准确,代理可能会漏掉一些数据段。具有 DML 通道(不断增长的数据段)的查询节点会返回搜索响应以及可靠数据段的列表,代理可与之比较并更新缓存。

    -

    改进

    代理无法将搜索请求完全平等地分配给查询节点,查询节点可能拥有不同的资源来满足搜索请求。为避免资源的长尾分布,代理会将其他查询节点上的活动分段分配给也拥有这些分段的空闲查询节点。

    +

    改进

    代理无法将搜索请求完全平等地分配给查询节点,查询节点可能拥有不同的资源来满足搜索请求。为避免资源的长尾分布,代理会将其他查询节点上的活动分段分配给同样拥有这些分段的空闲查询节点。

    diff --git a/localization/v2.4.x/site/zh/release_notes.json b/localization/v2.4.x/site/zh/release_notes.json index 0a720b2c4..bdfbf7c63 100644 --- a/localization/v2.4.x/site/zh/release_notes.json +++ b/localization/v2.4.x/site/zh/release_notes.json @@ -1 +1 @@ -{"codeList":[],"headingContent":"Release Notes","anchorList":[{"label":"发布说明","href":"Release-Notes","type":1,"isActive":false},{"label":"v2.4.11","href":"v2411","type":2,"isActive":false},{"label":"v2.4.10","href":"v2410","type":2,"isActive":false},{"label":"v2.4.9","href":"v249","type":2,"isActive":false},{"label":"v2.4.8","href":"v248","type":2,"isActive":false},{"label":"v2.4.6","href":"v246","type":2,"isActive":false},{"label":"v2.4.5","href":"v245","type":2,"isActive":false},{"label":"v2.4.4","href":"v244","type":2,"isActive":false},{"label":"v2.4.3","href":"v243","type":2,"isActive":false},{"label":"v2.4.1","href":"v241","type":2,"isActive":false},{"label":"v2.4.0","href":"v240","type":2,"isActive":false},{"label":"版本 2.4.0-rc.1","href":"v240-rc1","type":2,"isActive":false}]} \ No newline at end of file +{"codeList":[],"headingContent":"Release Notes","anchorList":[{"label":"发布说明","href":"Release-Notes","type":1,"isActive":false},{"label":"v2.4.13 热修复","href":"v2413-hotfix","type":2,"isActive":false},{"label":"[已废弃] v2.4.13","href":"Deprecated-v2413","type":2,"isActive":false},{"label":"v2.4.12","href":"v2412","type":2,"isActive":false},{"label":"v2.4.11","href":"v2411","type":2,"isActive":false},{"label":"v2.4.10","href":"v2410","type":2,"isActive":false},{"label":"v2.4.9","href":"v249","type":2,"isActive":false},{"label":"v2.4.8","href":"v248","type":2,"isActive":false},{"label":"v2.4.6","href":"v246","type":2,"isActive":false},{"label":"v2.4.5","href":"v245","type":2,"isActive":false},{"label":"v2.4.4","href":"v244","type":2,"isActive":false},{"label":"v2.4.3","href":"v243","type":2,"isActive":false},{"label":"v2.4.1","href":"v241","type":2,"isActive":false},{"label":"v2.4.0","href":"v240","type":2,"isActive":false},{"label":"版本 2.4.0-rc.1","href":"v240-rc1","type":2,"isActive":false}]} \ No newline at end of file diff --git a/localization/v2.4.x/site/zh/release_notes.md b/localization/v2.4.x/site/zh/release_notes.md index 6f7d5c2d8..a3bd98c8f 100644 --- a/localization/v2.4.x/site/zh/release_notes.md +++ b/localization/v2.4.x/site/zh/release_notes.md @@ -18,7 +18,164 @@ title: 发布说明 d="M4 9h1v1H4c-1.5 0-3-1.69-3-3.5S2.55 3 4 3h4c1.45 0 3 1.69 3 3.5 0 1.41-.91 2.72-2 3.25V8.59c.58-.45 1-1.27 1-2.09C10 5.22 8.98 4 8 4H4c-.98 0-2 1.22-2 2.5S3 9 4 9zm9-3h-1v1h1c1 0 2 1.22 2 2.5S13.98 12 13 12H9c-.98 0-2-1.22-2-2.5 0-.83.42-1.64 1-2.09V6.25c-1.09.53-2 1.84-2 3.25C6 11.31 7.55 13 9 13h4c1.45 0 3-1.69 3-3.5S14.5 6 13 6z" > -

    了解 Milvus 的新功能!本页总结了每个版本的新功能、改进、已知问题和错误修复。您可以在本部分找到 v2.4.0 之后每个版本的发布说明。我们建议您定期访问此页面以了解更新信息。

    +

    了解 Milvus 的新功能!本页总结了每个版本的新功能、改进、已知问题和错误修复。您可以在本部分找到 v2.4.0 之后每个版本的发布说明。我们建议您定期访问此页面了解更新信息。

    +

    v2.4.13 热修复

    发布日期2024 年 10 月 17 日

    + + + + + + + +
    Milvus 版本Python SDK 版本Java SDK 版本Node.js SDK 版本
    2.4.13-hotfix2.4.82.4.52.4.9
    +

    Milvus v2.4.13-hotfix 解决了 v2.4.13 特有的一个关键问题,即如果所有 MetaKV 快照都被垃圾 Collections(#36933),则重启后 Milvus 可能无法检索 Collections 信息。建议当前运行 v2.4.13 的用户尽早升级到 v2.4.13-hotfix,以避免潜在的中断

    +

    关键修复

      +
    • 如果时间戳为 MaxTimestamp,则加载原始密钥(#36935)
    • +
    +

    [已废弃] v2.4.13

    发布日期2024 年 10 月 12 日

    + + + + + + + +
    Milvus 版本Python SDK 版本Java SDK 版本Node.js SDK 版本
    2.4.132.4.82.4.52.4.9
    +

    Milvus 2.4.13 引入了动态副本加载,允许用户调整 Collections 的副本数量,而无需释放和重新加载 Collections。该版本还解决了与批量导入、表达式解析、负载平衡和故障恢复有关的几个关键错误。此外,还对 MMAP 资源使用和导入性能进行了重大改进,提高了系统的整体效率。我们强烈建议升级到该版本,以获得更好的性能和稳定性。

    +

    功能特点

      +
    • 对已加载的 Collections 进行动态复制调整(#36417)
    • +
    • 不断增长的段类型中的稀疏向量 MMAP(#36565)
    • +
    +

    错误修复

      +
    • 修复了一个刷新性能问题(#36741)
    • +
    • 修复了"[]"中 JSON 表达式的一个错误(#36722)
    • +
    • 如果紧凑目标未编入索引,则移除邻居(#36694)
    • +
    • 提高了通道已满时 Rocksmq 的性能(#36618)
    • +
    • 修复了一个问题,即在解钉入过程中错误不会延迟(#36665)
    • +
    • 解决了段管理器中导入段的内存泄漏问题(#36631)
    • +
    • 跳过了代理中查询节点不必要的健康检查(#36553)
    • +
    • 修复了术语表达式的溢出问题(#36534)
    • +
    • 在分配任务前记录节点 ID,以防止任务分配错误(#36493)
    • +
    • 解决了聚类压缩中的数据竞赛问题(#36499)
    • +
    • 在类型匹配后添加了对字符串数组最大长度的检查(#36497)
    • +
    • 解决了混合或独立模式下的竞赛问题(#36459)
    • +
    • 修正了重复加载和释放操作后的段不平衡问题(#36543)
    • +
    • 修正了一个无法从停止节点移动分段的角落情况(#36475)
    • +
    • 即使某些程序段丢失,也能正确更新程序段信息(#36729)
    • +
    • 防止快照 KV 中的 etcd 事务超过最大限制(#36773)
    • +
    +

    改进

      +
    • 增强了 MMAP 资源估算:
        +
      • 改进了 column.h 中与 MMAP 相关的代码(#36521)
      • +
      • 改进了加载 Collections 时的资源估算(#36728)
      • +
    • +
    • 性能增强:
        +
      • 通过将 Unicode 转换为 ASCII 提高了表达式解析效率(#36676)
      • +
      • 支持并行生成多个主题的信息(#36462)
      • +
      • 计算索引文件大小时减少了 CPU 开销(#36580)
      • +
      • 从报文头读取报文类型,以尽量减少解码(#36454)
      • +
      • 优化了基于工作负载的副本选择策略(#36384)
      • +
    • +
    • 拆分删除任务消息以适应最大消息大小限制(#36574)
    • +
    • 添加了新的 RESTful URL 以描述导入任务(#36754)
    • +
    • 优化了导入调度并添加了时间成本指标(#36684)
    • +
    • 为查询协调平衡器添加了平衡报告日志(#36749)
    • +
    • 改用通用 GC 配置(#36670)
    • +
    • 为委托者添加了流式转发策略开关(#36712)
    • +
    • 为无索引的 Collections 启用手动压缩(#36581)
    • +
    • 在具有不同内存容量的查询节点上启用负载平衡(#36625)
    • +
    • 使用 metrics.label 统一入站标签案例(#36616)
    • +
    • 使传输通道/分段操作具有幂等性(#36552)
    • +
    • 添加了监控导入吞吐量和导入行数的指标(#36588)
    • +
    • 防止在目标中创建多个计时器对象(#36573)
    • +
    • 更新了表达式版本和表达式的 HTTP 响应格式(#36467)
    • +
    • 增强了快照 KV 中的垃圾收集(#36793)
    • +
    • 添加了对使用上下文参数执行方法的支持(#36798)
    • +
    +

    v2.4.12

    发布日期2024 年 9 月 26 日

    + + + + + + + +
    Milvus 版本Python SDK 版本Java SDK 版本Node.js SDK 版本
    2.4.122.4.72.4.42.4.9
    +

    Milvus 2.4.12 引入了重大增强功能和关键错误修复。该版本解决了数据重复问题,提高了故障恢复速度,尤其是在处理大量删除时。不过,仍存在一个已知问题,即在删除大量数据时,故障恢复速度可能会很慢。我们正在积极解决这个问题。

    +

    改进

      +
    • 为 flowgraph 管理器实现了优雅停止(#36358)
    • +
    • 禁用了对未加载向量字段的索引检查(#36280)
    • +
    • 在 delta 加载过程中过滤掉了未命中的删除记录(#36272)
    • +
    • 改进了对 std::stoi 异常的错误处理(#36296)
    • +
    • 禁止使用关键字作为字段名或动态字段名(#36108)
    • +
    • 为 L0 段中的删除条目添加了度量指标(#36227)
    • +
    • 实施了 L0 转发策略以支持远程加载(#36208)
    • +
    • 在代理中添加了 ANN 字段加载检查(#36194)
    • +
    • 启用空稀疏行支持(#36061)
    • +
    • 修复了一个安全漏洞(#36156)
    • +
    • 实施了请求/响应大小度量的统计处理程序(#36118)
    • +
    • 修正了编码数组数据的大小估算(#36379)
    • +
    +

    错误修复

      +
    • 解决了有两个向量字段的 Collections 的度量类型错误(#36473)
    • +
    • 修正了导致消息队列接收失败的长缓冲问题(#36425)
    • +
    • 在支持拆分后实现了适当的压缩到分段返回(#36429)
    • +
    • 解决了节点 ID 检查程序的数据竞赛问题(#36377)
    • +
    • 删除了元素类型检查(#36324)
    • +
    • 修复了增长和密封分段的并发访问问题(#36288)
    • +
    • 实现了未来有状态锁(#36333)
    • +
    • 纠正了 HybridSearch 中偏移量的使用(#36287#36253)
    • +
    • 解决了查询节点上的脏段/通道泄漏问题(#36259)
    • +
    • 修正了主键重复处理(#36274)
    • +
    • 在搜索请求中强制执行度量类型设置(#36279)
    • +
    • 修正了存储索引文件大小的度量清除问题(#36161)
    • +
    • 修正了全局 API 访问的读写权限组行为(#36145)
    • +

    v2.4.11

    • 解决了针对 GreaterThan 和 GreaterThanEqual 比较的 Trie 索引范围操作符问题(#36126)
    • 更正了marisa_label_order 在 Trie 索引构建中的用法(#36060)
    • -
    • 增强了trie.predictive_search 的值检查(#35999)
    • +
    • 增强了trie.predictive_search 的值检查(#35999)
    • 在反转索引中启用二进制算术表达式支持(#36097)
    • 修复了由 Skipindex 引起的段故障(#35908)
    • 解决了代理元缓存中的内存泄漏问题(#36076)
    • @@ -228,10 +385,10 @@ title: 发布说明
    • 在 Gin 和 RestfulV1 处理器中集成了 Sonic,以尽量减少 JSON marshaling 和 unmarshaling 过程中的 CPU 开销。(#35018)
    • 引入内存缓存,优化验证结果检索。(#35272)
    • 修改了自动索引的默认度量类型。[#34277,#34479]
    • -
    • 重构了变量列的运行时内存格式,从而减少了内存使用。[#34367,#35012, #35041]
    • +
    • 重构了变量列的运行时内存格式,从而减少了内存使用。[#34367,#35012,#35041]
    • 重构了压缩过程,以启用持久数据存储。(#34268)
    • 为不断增长的段启用内存映射文件支持,改善内存管理。(#34110)
    • -
    • 通过添加 RESTful API 支持、记录一致性级别以及区分系统错误和用户错误,改进了访问日志。[#34295 , #34352, # 34396]
    • +
    • 通过添加 RESTful API 支持、记录一致性级别以及区分系统错误和用户错误,改进了访问日志。[#34295 , #34352 , #34396]
    • 在 Knowhere 中使用新的range_search_k 参数,加快范围搜索速度。(#34709)
    • 应用阻塞式 Bloom 过滤器,提高过滤器构建和查询的速度。[#34377,#34922]
    • 内存使用改进:
        @@ -298,7 +455,7 @@ title: 发布说明
      • 通过移除不必要的索引参数验证,修正了加载幂等性问题。(#35179)
      • 确保在 DataCoord 重启后执行compressBinlog ,以使reloadFromKV 能够正确填充 binlog 的logID(#34062)
      • 修正了 DataCoord 垃圾收集后未删除 Collections 元数据的问题。(#34884)
      • -
      • 通过移除通过导入生成的刷新段,解决了 DataCoord 中 SegmentManager 的内存泄漏问题。(#34651)
      • +
      • 通过移除通过导入生成的刷新段,解决了 DataCoord 中的 SegmentManager 的内存泄漏问题。(#34651)
      • 修复了当压缩被禁用而某个 Collection 被丢弃时的恐慌问题。(#34206)
      • 通过增强内存使用估算算法,修正了 DataNode 中的内存不足问题。(#34203)
      • 通过为大块缓存实施单飞(singleflight),防止了当多个向量检索请求命中缓存未命中时的突发内存使用。(#34283)
      • @@ -353,7 +510,7 @@ title: 发布说明
      • 修正了查询可能返回已删除记录的问题(#34502)。
      • 解决了 IndexNode 在停止过程中由于生命周期控制不正确而卡住的问题(#34559)。
      • 修正了当 Worker 离线时主键 oracle 对象的内存泄漏问题(#34020)。
      • -
      • 修正了 ChannelManagerImplV2,使其能通知正确的节点,解决了在循环闭合时的参数捕获问题(#34004)。
      • +
      • 修正了 ChannelManagerImplV2,使其能通知正确的节点,从而解决了循环闭合中的参数捕获问题(#34004)。
      • 通过实现深度复制,修正了 ImportTask segmentsInfo 中的读写数据竞赛问题(#34126)。
      • 更正了 "legacyVersionWithoutRPCWatch "配置选项的版本信息,以防止滚动升级时出错(#34185)。
      • 修正了加载分区数量的度量(#34195)。
      • @@ -433,10 +590,10 @@ title: 发布说明
      • 防止删除过程中可能出现的数据丢失(#33821)
      • 确保在删除 Collections 后设置 DroppedAt 字段(可能的删除时间戳)(#33767)
      • 修正了一个可能导致 Milvus 错误处理二进制向量数据大小的问题(#33751)
      • -
      • 防止以纯文本记录敏感的 Kafka 凭据(#33694, #33747)
      • +
      • 防止以纯文本记录敏感的 Kafka 凭据(#33694,#33747)
      • 确保 Milvus 能够正确导入包含多个向量字段的数据(#33724)。
      • 通过在启动前检查导入任务是否存在,提高了导入的可靠性。(#33673)
      • -
      • 改进了稀疏 HNSW 索引的处理(内部功能)(#33714)
      • +
      • 改进了稀疏 HNSW 索引的处理(内部功能)(#33714)
      • 清理向量内存以避免内存泄漏(#33708)
      • 通过修复状态锁问题,确保异步预热更加顺畅(#33687)
      • 解决了一个可能导致查询迭代器丢失结果的 bug。(#33506)
      • @@ -459,10 +616,10 @@ title: 发布说明
      • 防止删除过程中可能出现的数据丢失(#33821)
      • 确保在删除 Collections 后设置 DroppedAt 字段(可能的删除时间戳)(#33767)
      • 修正了一个可能导致 Milvus 错误处理二进制向量数据大小的问题(#33751)
      • -
      • 防止以纯文本记录敏感的 Kafka 凭据(#33694, #33747)
      • +
      • 防止以纯文本记录敏感的 Kafka 凭据(#33694,#33747)
      • 确保 Milvus 能够正确导入包含多个向量字段的数据(#33724)。
      • 通过在启动前检查导入任务是否存在,提高了导入的可靠性。(#33673)
      • -
      • 改进了稀疏 HNSW 索引的处理(内部功能)(#33714)
      • +
      • 改进了稀疏 HNSW 索引的处理(内部功能)(#33714)
      • 清理向量内存以避免内存泄漏(#33708)
      • 通过修复状态锁问题,确保异步预热更加顺畅(#33687)
      • 解决了一个可能导致查询迭代器丢失结果的 bug。(#33506)
      • @@ -502,7 +659,7 @@ title: 发布说明 2.4.42.4.32.4.12.4.2 -

        Milvus v2.4.4 包含多个关键错误修复和改进,旨在提高性能和稳定性。值得注意的是我们解决了一个关键问题,即批量插入统计日志被错误地垃圾收集,从而可能影响数据完整性。我们强烈建议所有 v2.4 用户升级到该版本,以受益于这些修复。

        +

        Milvus v2.4.4 包含多个关键错误修复和改进,旨在提高性能和稳定性。值得注意的是,我们解决了一个关键问题,即批量插入统计日志被错误地垃圾收集,从而可能影响数据完整性。我们强烈建议所有 v2.4 用户升级到该版本,以受益于这些修复。

        如果您正在使用批量插入功能,请尽早升级到 v2.4.4,以确保数据完整性。

        重要错误修复

        • 填写统计日志 ID 并验证其正确性(#33478)
        • @@ -547,11 +704,11 @@ title: 发布说明
        • 基于 RPC 实现了 Datacoord/节点观察通道(#32036)
        • 优化了 Bloom 过滤器,以加速删除过滤(#32642#33329#33284)
        • 如果标量索引没有原始数据,则通过 mmap 加载原始数据(#33317)
        • -
        • 将 milvus 配置同步到 milvus.yaml 中(#33322# 32920# 32857# 32946)
        • -
        • 更新了 knowhere 版本(#33310#32931# 33043)
        • +
        • 将 milvus 配置同步到 milvus.yaml 中(#33322#32920#32857#32946)
        • +
        • 更新了 knowhere 版本(#33310#32931#33043)
        • 在 QueryCoord 中启用动态更新平衡策略(#33272)
        • 在写缓冲区中使用预置的日志记录器,以尽量减少日志记录器的分配(#33304)
        • -
        • 改进了参数检查(#32777#33271# 33218)
        • +
        • 改进了参数检查(#32777#33271#33218)
        • 添加了一个参数,以忽略检查点中不正确的消息 ID(#33249)
        • 添加了控制插件初始化失败处理的配置(#32680)
        • 为 knowhere 添加了分数计算一致性配置(#32997)
        • @@ -567,7 +724,7 @@ title: 发布说明
        • 启用批量上传(#32788)
        • 使用 Partition Key 时,将默认分区数改为 16(#32950)
        • 改进了超大 top-k 查询的还原性能(#32871)
        • -
        • 利用 TestLocations 功能加速写入和压缩(#32948)
        • +
        • 利用 TestLocations 功能加速写入和压缩(#32948)
        • 优化了计划解析器池,以避免不必要的循环(#32869)
        • 提高了加载速度(#32898)
        • 为 restv2 使用了 Collections 默认一致性级别(#32956)
        • @@ -585,7 +742,7 @@ title: 发布说明
        • 当分块领导者位置发生变化时,更新分块领导者缓存(#32470)
        • 移除过时的应用程序接口和字段(#32808#32704)
        • 添加了 metautil.channel 以将字符串比较转换为 int(#32749)
        • -
        • 当 querynode 发现新的 Collections 时,为有效载荷写入器的错误信息和日志添加了类型信息(#32522)
        • +
        • 当 querynode 发现新的 Collections 时,为 payload writer 的错误信息和日志添加了类型信息(#32522)
        • 使用分区键创建 Collections 时检查分区号(#32670)
        • 如果观察失败,则删除传统的 l0 段(#32725)
        • 改进了请求类型的打印(#33319)
        • @@ -677,7 +834,7 @@ title: 发布说明
        • 增强了稀疏浮点型向量,以支持暴力迭代器搜索和范围搜索(#32635)

        改进

          -
        • 添加了声明式资源组 api(#31930#32297 32536#32666)
        • +
        • 添加了声明式资源组 api(#31930#32297#32536#32666)
        • 重写了 QueryCoord 中的 Collections 观察器,使其任务驱动(#32441)
        • 重构了 DataNode 的 SyncManager 中使用的数据结构,以减少内存使用并防止错误(#32673)
        • 修改了垃圾收集的实现,以尽量减少与对象存储相关的列表操作(#31740)
        • @@ -694,7 +851,7 @@ title: 发布说明
        • 添加了配置选项,以控制是否强制激活 partitionKey 功能(#32433)
        • 添加了配置选项,以控制单次请求中可插入的最大数据量(#32433)
        • 在段级别对 applyDelete 操作进行并行化,以加快 Delegator 对 Delete 消息的处理(#32291)
        • -
        • 使用索引(#32232# 32505#32533# 32595)和添加缓存(#32580)来加速 QueryCoord 中频繁的过滤操作。
        • +
        • 使用索引(#32232#32505#32533#32595)和添加缓存(#32580)来加速 QueryCoord 中频繁的过滤操作。
        • 重写数据结构(#32273) 和重构代码(#32389) 以加速 DataCoord 中的常见操作。
        • 从 Conan 中移除 openblas(#32002)
        @@ -739,14 +896,14 @@ title: 发布说明

        改进

        错误修复

        diff --git a/localization/v2.4.x/site/zh/tutorials/build-rag-with-milvus.json b/localization/v2.4.x/site/zh/tutorials/build-rag-with-milvus.json index bddf545c3..a2e06dc67 100644 --- a/localization/v2.4.x/site/zh/tutorials/build-rag-with-milvus.json +++ b/localization/v2.4.x/site/zh/tutorials/build-rag-with-milvus.json @@ -1 +1 @@ -{"codeList":["$ pip install --upgrade pymilvus openai requests tqdm\n","import os\n\nos.environ[\"OPENAI_API_KEY\"] = \"sk-***********\"\n","$ wget https://github.com/milvus-io/milvus-docs/releases/download/v2.4.6-preview/milvus_docs_2.4.x_en.zip\n$ unzip -q milvus_docs_2.4.x_en.zip -d milvus_docs\n","from glob import glob\n\ntext_lines = []\n\nfor file_path in glob(\"milvus_docs/en/faq/*.md\", recursive=True):\n with open(file_path, \"r\") as file:\n file_text = file.read()\n\n text_lines += file_text.split(\"# \")\n","from openai import OpenAI\n\nopenai_client = OpenAI()\n","def emb_text(text):\n return (\n openai_client.embeddings.create(input=text, model=\"text-embedding-3-small\")\n .data[0]\n .embedding\n )\n","test_embedding = emb_text(\"This is a test\")\nembedding_dim = len(test_embedding)\nprint(embedding_dim)\nprint(test_embedding[:10])\n","from pymilvus import MilvusClient\n\nmilvus_client = MilvusClient(uri=\"./milvus_demo.db\")\n\ncollection_name = \"my_rag_collection\"\n","if milvus_client.has_collection(collection_name):\n milvus_client.drop_collection(collection_name)\n","milvus_client.create_collection(\n collection_name=collection_name,\n dimension=embedding_dim,\n metric_type=\"IP\", # Inner product distance\n consistency_level=\"Strong\", # Strong consistency level\n)\n","from tqdm import tqdm\n\ndata = []\n\nfor i, line in enumerate(tqdm(text_lines, desc=\"Creating embeddings\")):\n data.append({\"id\": i, \"vector\": emb_text(line), \"text\": line})\n\nmilvus_client.insert(collection_name=collection_name, data=data)\n","question = \"How is data stored in milvus?\"\n","search_res = milvus_client.search(\n collection_name=collection_name,\n data=[\n emb_text(question)\n ], # Use the `emb_text` function to convert the question to an embedding vector\n limit=3, # Return top 3 results\n search_params={\"metric_type\": \"IP\", \"params\": {}}, # Inner product distance\n output_fields=[\"text\"], # Return the text field\n)\n","import json\n\nretrieved_lines_with_distances = [\n (res[\"entity\"][\"text\"], res[\"distance\"]) for res in search_res[0]\n]\nprint(json.dumps(retrieved_lines_with_distances, indent=4))\n","context = \"\\n\".join(\n [line_with_distance[0] for line_with_distance in retrieved_lines_with_distances]\n)\n","SYSTEM_PROMPT = \"\"\"\nHuman: You are an AI assistant. You are able to find answers to the questions from the contextual passage snippets provided.\n\"\"\"\nUSER_PROMPT = f\"\"\"\nUse the following pieces of information enclosed in tags to provide an answer to the question enclosed in tags.\n\n{context}\n\n\n{question}\n\n\"\"\"\n","response = openai_client.chat.completions.create(\n model=\"gpt-3.5-turbo\",\n messages=[\n {\"role\": \"system\", \"content\": SYSTEM_PROMPT},\n {\"role\": \"user\", \"content\": USER_PROMPT},\n ],\n)\nprint(response.choices[0].message.content)\n"],"headingContent":"","anchorList":[{"label":"使用 Milvus 构建 RAG","href":"Build-RAG-with-Milvus","type":1,"isActive":false},{"label":"准备工作","href":"Preparation","type":2,"isActive":false},{"label":"将数据载入 Milvus","href":"Load-data-into-Milvus","type":2,"isActive":false},{"label":"构建 RAG","href":"Build-RAG","type":2,"isActive":false},{"label":"快速部署","href":"Quick-Deploy","type":2,"isActive":false}]} \ No newline at end of file +{"codeList":["$ pip install --upgrade pymilvus openai requests tqdm\n","import os\n\nos.environ[\"OPENAI_API_KEY\"] = \"sk-***********\"\n","$ wget https://github.com/milvus-io/milvus-docs/releases/download/v2.4.6-preview/milvus_docs_2.4.x_en.zip\n$ unzip -q milvus_docs_2.4.x_en.zip -d milvus_docs\n","from glob import glob\n\ntext_lines = []\n\nfor file_path in glob(\"milvus_docs/en/faq/*.md\", recursive=True):\n with open(file_path, \"r\") as file:\n file_text = file.read()\n\n text_lines += file_text.split(\"# \")\n","from openai import OpenAI\n\nopenai_client = OpenAI()\n","def emb_text(text):\n return (\n openai_client.embeddings.create(input=text, model=\"text-embedding-3-small\")\n .data[0]\n .embedding\n )\n","test_embedding = emb_text(\"This is a test\")\nembedding_dim = len(test_embedding)\nprint(embedding_dim)\nprint(test_embedding[:10])\n","from pymilvus import MilvusClient\n\nmilvus_client = MilvusClient(uri=\"./milvus_demo.db\")\n\ncollection_name = \"my_rag_collection\"\n","if milvus_client.has_collection(collection_name):\n milvus_client.drop_collection(collection_name)\n","milvus_client.create_collection(\n collection_name=collection_name,\n dimension=embedding_dim,\n metric_type=\"IP\", # Inner product distance\n consistency_level=\"Strong\", # Strong consistency level\n)\n","from tqdm import tqdm\n\ndata = []\n\nfor i, line in enumerate(tqdm(text_lines, desc=\"Creating embeddings\")):\n data.append({\"id\": i, \"vector\": emb_text(line), \"text\": line})\n\nmilvus_client.insert(collection_name=collection_name, data=data)\n","question = \"How is data stored in milvus?\"\n","search_res = milvus_client.search(\n collection_name=collection_name,\n data=[\n emb_text(question)\n ], # Use the `emb_text` function to convert the question to an embedding vector\n limit=3, # Return top 3 results\n search_params={\"metric_type\": \"IP\", \"params\": {}}, # Inner product distance\n output_fields=[\"text\"], # Return the text field\n)\n","import json\n\nretrieved_lines_with_distances = [\n (res[\"entity\"][\"text\"], res[\"distance\"]) for res in search_res[0]\n]\nprint(json.dumps(retrieved_lines_with_distances, indent=4))\n","context = \"\\n\".join(\n [line_with_distance[0] for line_with_distance in retrieved_lines_with_distances]\n)\n","SYSTEM_PROMPT = \"\"\"\nHuman: You are an AI assistant. You are able to find answers to the questions from the contextual passage snippets provided.\n\"\"\"\nUSER_PROMPT = f\"\"\"\nUse the following pieces of information enclosed in tags to provide an answer to the question enclosed in tags.\n\n{context}\n\n\n{question}\n\n\"\"\"\n","response = openai_client.chat.completions.create(\n model=\"gpt-3.5-turbo\",\n messages=[\n {\"role\": \"system\", \"content\": SYSTEM_PROMPT},\n {\"role\": \"user\", \"content\": USER_PROMPT},\n ],\n)\nprint(response.choices[0].message.content)\n"],"headingContent":"Build RAG with Milvus","anchorList":[{"label":"使用 Milvus 创建 RAG","href":"Build-RAG-with-Milvus","type":1,"isActive":false},{"label":"准备工作","href":"Preparation","type":2,"isActive":false},{"label":"将数据载入 Milvus","href":"Load-data-into-Milvus","type":2,"isActive":false},{"label":"构建 RAG","href":"Build-RAG","type":2,"isActive":false},{"label":"快速部署","href":"Quick-Deploy","type":2,"isActive":false}]} \ No newline at end of file diff --git a/localization/v2.4.x/site/zh/tutorials/build-rag-with-milvus.md b/localization/v2.4.x/site/zh/tutorials/build-rag-with-milvus.md index 67949e9d2..73c2f5319 100644 --- a/localization/v2.4.x/site/zh/tutorials/build-rag-with-milvus.md +++ b/localization/v2.4.x/site/zh/tutorials/build-rag-with-milvus.md @@ -1,10 +1,9 @@ --- id: build-rag-with-milvus.md -summary: 与Milvus一起建造抹布 -title: 用 Milvus 打造 RAG +summary: 用 Milvus 建抹布 +title: 使用 Milvus 创建 RAG --- - -

        使用 Milvus 构建 RAG

        Open In Colab

        +

        Open In Colab +GitHub Repository

        在本教程中,我们将向您展示如何使用 Milvus 构建 RAG(检索-增强生成)管道。

        RAG 系统结合了检索系统和生成模型,可根据给定提示生成新文本。系统首先使用 Milvus 从语料库中检索相关文档,然后使用生成模型根据检索到的文档生成新文本。

        @@ -46,10 +46,9 @@ title: 用 Milvus 打造 RAG

        在本例中,我们将使用 OpenAI 作为 LLM。您应将api key OPENAI_API_KEY 作为环境变量。

        import os
         
        -os.environ["OPENAI_API_KEY"] = "sk-\***\*\*\*\*\*\***"
        +os.environ["OPENAI_API_KEY"] = "sk-***********"
         
        - -

        准备数据

        我们使用Milvus Documentation 2.4.x中的常见问题页面作为 RAG 中的私有知识,这对于简单的 RAG 管道来说是一个很好的数据源。

        +

        准备数据

        我们使用Milvus 文档 2.4.x中的常见问题页面作为 RAG 中的私有知识,这对于简单的 RAG 管道来说是一个很好的数据源。

        下载 zip 文件并将文档解压缩到milvus_docs 文件夹中。

        $ wget https://github.com/milvus-io/milvus-docs/releases/download/v2.4.6-preview/milvus_docs_2.4.x_en.zip
         $ unzip -q milvus_docs_2.4.x_en.zip -d milvus_docs
        @@ -59,20 +58,17 @@ $ unzip -q milvus_docs_2.4.x_en.for file_path in glob("milvus_docs/en/faq/\*.md", recursive=True):
        -with open(file_path, "r") as file:
        -file_text = file.read()
        +for file_path in glob("milvus_docs/en/faq/*.md", recursive=True):
        +    with open(file_path, "r") as file:
        +        file_text = file.read()
         
             text_lines += file_text.split("# ")
        -
         
        - -

        准备嵌入模型

        我们对 OpenAI 客户端进行初始化,准备嵌入模型。

        +

        准备嵌入模型

        我们初始化 OpenAI 客户端,准备嵌入模型。

        from openai import OpenAI
         
        -openai*client = OpenAI()
        +openai_client = OpenAI()
         
        -

        定义一个函数,使用 OpenAI 客户端生成文本嵌入。我们以text-embedding-3-small模型为例。

        def emb_text(text):
             return (
        @@ -81,7 +77,7 @@ openai*client = OpenAI()
                 .embedding
             )
         
        -

        生成一个测试嵌入,并打印其维度和前几个元素。

        +

        生成测试嵌入并打印其维度和前几个元素。

        test_embedding = emb_text("This is a test")
         embedding_dim = len(test_embedding)
         print(embedding_dim)
        @@ -105,27 +101,26 @@ embedding_dim = len(test_embedding)
                   d="M4 9h1v1H4c-1.5 0-3-1.69-3-3.5S2.55 3 4 3h4c1.45 0 3 1.69 3 3.5 0 1.41-.91 2.72-2 3.25V8.59c.58-.45 1-1.27 1-2.09C10 5.22 8.98 4 8 4H4c-.98 0-2 1.22-2 2.5S3 9 4 9zm9-3h-1v1h1c1 0 2 1.22 2 2.5S13.98 12 13 12H9c-.98 0-2-1.22-2-2.5 0-.83.42-1.64 1-2.09V6.25c-1.09.53-2 1.84-2 3.25C6 11.31 7.55 13 9 13h4c1.45 0 3-1.69 3-3.5S14.5 6 13 6z"
                 >
               
        -    

        创建集合

        from pymilvus import MilvusClient
        +    

        创建 Collections

        from pymilvus import MilvusClient
         
        -milvus*client = MilvusClient(uri="./milvus_demo.db")
        +milvus_client = MilvusClient(uri="./milvus_demo.db")
         
         collection_name = "my_rag_collection"
         
        -

        至于MilvusClient 的参数:

          -
        • uri 设置为本地文件,如./milvus.db ,是最方便的方法,因为它会自动利用Milvus Lite将所有数据存储到该文件中。
        • +
        • uri 设置为本地文件,如./milvus.db ,是最方便的方法,因为它会自动利用Milvus Lite将所有数据存储在此文件中。
        • 如果数据规模较大,可以在docker 或 kubernetes 上设置性能更强的 Milvus 服务器。在此设置中,请使用服务器 uri,例如http://localhost:19530 ,作为您的uri
        • -
        • 如果要使用Zilliz Cloud(Milvus 的全托管云服务),请调整uritoken ,它们与 Zilliz Cloud 中的公共端点和 Api 密钥相对应。
        • +
        • 如果你想使用Zilliz Cloud(Milvus 的全托管云服务),请调整uritoken ,它们与 Zilliz Cloud 中的公共端点和 Api 密钥相对应。
        -

        检查集合是否已存在,如果已存在,则将其删除。

        +

        检查 Collections 是否已存在,如果已存在,则将其删除。

        if milvus_client.has_collection(collection_name):
             milvus_client.drop_collection(collection_name)
         
        -

        使用指定参数创建新集合。

        -

        如果我们没有指定任何字段信息,Milvus 会自动创建一个主键的默认id 字段,以及一个用于存储向量数据的vector 字段。保留的 JSON 字段用于存储非模式定义的字段及其值。

        +

        使用指定参数创建新 Collections。

        +

        如果我们没有指定任何字段信息,Milvus 会自动创建一个主键的默认id 字段,以及一个存储向量数据的vector 字段。保留的 JSON 字段用于存储非 Schema 定义的字段及其值。

        milvus_client.create_collection(
             collection_name=collection_name,
             dimension=embedding_dim,
        @@ -133,18 +128,17 @@ collection_name = "my_rag_collection"
             consistency_level="Strong",  # Strong consistency level
         )
         
        -

        插入数据

        遍历文本行,创建嵌入,然后将数据插入 Milvus。

        -

        这里有一个新字段text ,它是Collection Schema 中的一个未定义字段。它将被自动添加到保留的 JSON Dynamic Field中,在高层次上可被视为普通字段。

        +

        插入数据

        遍历文本行,创建 Embeddings,然后将数据插入 Milvus。

        +

        这里有一个新字段text ,它是 Collections Schema 中的一个非定义字段。它将自动添加到预留的 JSON 动态字段中,在高层次上可将其视为普通字段。

        from tqdm import tqdm
         
         data = []
         
         for i, line in enumerate(tqdm(text_lines, desc="Creating embeddings")):
        -data.append({"id": i, "vector": emb_text(line), "text": line})
        +    data.append({"id": i, "vector": emb_text(line), "text": line})
         
         milvus_client.insert(collection_name=collection_name, data=data)
         
        -
        Creating embeddings: 100%|██████████| 72/72 [00:27<00:00,  2.67it/s]
         
         
        @@ -173,7 +167,7 @@ milvus_client.insert(collection_name=collection_name, data=data)
             

        为查询检索数据

        让我们指定一个关于 Milvus 的常见问题。

        question = "How is data stored in milvus?"
         
        -

        在集合中搜索该问题并检索语义前 3 个匹配项。

        +

        在 Collections 中搜索该问题,并检索语义前 3 个匹配项。

        search_res = milvus_client.search(
             collection_name=collection_name,
             data=[
        @@ -188,11 +182,10 @@ milvus_client.insert(collection_name=collection_name, data=data)
         
        import json
         
         retrieved_lines_with_distances = [
        -(res["entity"]["text"], res["distance"]) for res in search_res[0]
        +    (res["entity"]["text"], res["distance"]) for res in search_res[0]
         ]
         print(json.dumps(retrieved_lines_with_distances, indent=4))
         
        -
        [
             [
                 " Where does Milvus store data?\n\nMilvus deals with two types of data, inserted data and metadata. \n\nInserted data, including vector data, scalar data, and collection-specific schema, are stored in persistent storage as incremental log. Milvus supports multiple object storage backends, including [MinIO](https://min.io/), [AWS S3](https://aws.amazon.com/s3/?nc1=h_ls), [Google Cloud Storage](https://cloud.google.com/storage?hl=en#object-storage-for-companies-of-all-sizes) (GCS), [Azure Blob Storage](https://azure.microsoft.com/en-us/products/storage/blobs), [Alibaba Cloud OSS](https://www.alibabacloud.com/product/object-storage-service), and [Tencent Cloud Object Storage](https://www.tencentcloud.com/products/cos) (COS).\n\nMetadata are generated within Milvus. Each Milvus module has its own metadata that are stored in etcd.\n\n###",
        diff --git a/localization/v2.4.x/site/zh/tutorials/graph_rag_with_milvus.md b/localization/v2.4.x/site/zh/tutorials/graph_rag_with_milvus.md
        index f74c63b50..f58444b6d 100644
        --- a/localization/v2.4.x/site/zh/tutorials/graph_rag_with_milvus.md
        +++ b/localization/v2.4.x/site/zh/tutorials/graph_rag_with_milvus.md
        @@ -18,7 +18,8 @@ title: 用 Milvus 绘制 RAG 图
                   d="M4 9h1v1H4c-1.5 0-3-1.69-3-3.5S2.55 3 4 3h4c1.45 0 3 1.69 3 3.5 0 1.41-.91 2.72-2 3.25V8.59c.58-.45 1-1.27 1-2.09C10 5.22 8.98 4 8 4H4c-.98 0-2 1.22-2 2.5S3 9 4 9zm9-3h-1v1h1c1 0 2 1.22 2 2.5S13.98 12 13 12H9c-.98 0-2-1.22-2-2.5 0-.83.42-1.64 1-2.09V6.25c-1.09.53-2 1.84-2 3.25C6 11.31 7.55 13 9 13h4c1.45 0 3-1.69 3-3.5S14.5 6 13 6z"
                 >
               
        -    

        Open In Colab

        +

        Open In Colab +GitHub Repository

        大型语言模型的广泛应用凸显了提高其响应准确性和相关性的重要性。检索增强生成(RAG)利用外部知识库增强了模型,提供了更多上下文信息,缓解了幻觉和知识不足等问题。然而,仅仅依靠简单的 RAG 范式有其局限性,尤其是在处理复杂的实体关系和多跳问题时,模型往往难以提供准确的答案。

        将知识图谱(KG)引入 RAG 系统提供了一种新的解决方案。知识图谱以结构化的方式呈现实体及其关系,提供更精确的检索信息,帮助 RAG 更好地处理复杂的问题解答任务。KG-RAG 仍处于早期阶段,对于如何从 KG 中有效检索实体及其关系,以及如何将向量相似性搜索与图结构相结合,目前还没有达成共识。

        在本笔记本中,我们介绍了一种简单但功能强大的方法,可大大提高该场景的性能。它是一种简单的 RAG 范式,先进行多向检索,然后重新排序,但它在逻辑上实现了 Graph RAG,并在处理多跳问题时达到了最先进的性能。让我们看看它是如何实现的。

        @@ -165,7 +166,7 @@ embedding_model = OpenAIEmbeddings(model=
      • 实体是三元组中的主语或宾语,因此我们直接从三元组中提取它们。
      • 在这里,我们通过直接连接主语、谓语和宾语来构建关系概念,中间留有空格。
      -

      我们还准备了一个 dict,用于将实体 id 映射到关系 id,以及另一个 dict,用于将关系 id 映射到通道 id,以备后用。

      +

      我们还准备了一个将实体 id 映射到关系 id 的 dict,以及另一个将关系 id 映射到通道 id 的 dict,以备后用。

      entityid_2_relationids = defaultdict(list)
       relationid_2_passageids = defaultdict(list)
       
      diff --git a/localization/v2.4.x/site/zh/tutorials/hybrid_search_with_milvus.json b/localization/v2.4.x/site/zh/tutorials/hybrid_search_with_milvus.json
      index a90ddf4d7..66580167d 100644
      --- a/localization/v2.4.x/site/zh/tutorials/hybrid_search_with_milvus.json
      +++ b/localization/v2.4.x/site/zh/tutorials/hybrid_search_with_milvus.json
      @@ -1 +1 @@
      -{"codeList":["$ pip install --upgrade pymilvus \"pymilvus[model]\"\n","# Run this cell to download the dataset\n$ wget http://qim.fs.quoracdn.net/quora_duplicate_questions.tsv\n","import pandas as pd\n\nfile_path = \"quora_duplicate_questions.tsv\"\ndf = pd.read_csv(file_path, sep=\"\\t\")\nquestions = set()\nfor _, row in df.iterrows():\n    obj = row.to_dict()\n    questions.add(obj[\"question1\"][:512])\n    questions.add(obj[\"question2\"][:512])\n    if len(questions) > 500:  # Skip this if you want to use the full dataset\n        break\n\ndocs = list(questions)\n\n# example question\nprint(docs[0])\n","from milvus_model.hybrid import BGEM3EmbeddingFunction\n\nef = BGEM3EmbeddingFunction(use_fp16=False, device=\"cpu\")\ndense_dim = ef.dim[\"dense\"]\n\n# Generate embeddings using BGE-M3 model\ndocs_embeddings = ef(docs)\n","from pymilvus import (\n    connections,\n    utility,\n    FieldSchema,\n    CollectionSchema,\n    DataType,\n    Collection,\n)\n\n# Connect to Milvus given URI\nconnections.connect(uri=\"./milvus.db\")\n\n# Specify the data schema for the new Collection\nfields = [\n    # Use auto generated id as primary key\n    FieldSchema(\n        name=\"pk\", dtype=DataType.VARCHAR, is_primary=True, auto_id=True, max_length=100\n    ),\n    # Store the original text to retrieve based on semantically distance\n    FieldSchema(name=\"text\", dtype=DataType.VARCHAR, max_length=512),\n    # Milvus now supports both sparse and dense vectors,\n    # we can store each in a separate field to conduct hybrid search on both vectors\n    FieldSchema(name=\"sparse_vector\", dtype=DataType.SPARSE_FLOAT_VECTOR),\n    FieldSchema(name=\"dense_vector\", dtype=DataType.FLOAT_VECTOR, dim=dense_dim),\n]\nschema = CollectionSchema(fields)\n\n# Create collection (drop the old one if exists)\ncol_name = \"hybrid_demo\"\nif utility.has_collection(col_name):\n    Collection(col_name).drop()\ncol = Collection(col_name, schema, consistency_level=\"Strong\")\n\n# To make vector search efficient, we need to create indices for the vector fields\nsparse_index = {\"index_type\": \"SPARSE_INVERTED_INDEX\", \"metric_type\": \"IP\"}\ncol.create_index(\"sparse_vector\", sparse_index)\ndense_index = {\"index_type\": \"AUTOINDEX\", \"metric_type\": \"IP\"}\ncol.create_index(\"dense_vector\", dense_index)\ncol.load()\n","# For efficiency, we insert 50 records in each small batch\nfor i in range(0, len(docs), 50):\n    batched_entities = [\n        docs[i : i + 50],\n        docs_embeddings[\"sparse\"][i : i + 50],\n        docs_embeddings[\"dense\"][i : i + 50],\n    ]\n    col.insert(batched_entities)\nprint(\"Number of entities inserted:\", col.num_entities)\n","# Enter your search query\nquery = input(\"Enter your search query: \")\nprint(query)\n\n# Generate embeddings for the query\nquery_embeddings = ef([query])\n# print(query_embeddings)\n","from pymilvus import (\n    AnnSearchRequest,\n    WeightedRanker,\n)\n\n\ndef dense_search(col, query_dense_embedding, limit=10):\n    search_params = {\"metric_type\": \"IP\", \"params\": {}}\n    res = col.search(\n        [query_dense_embedding],\n        anns_field=\"dense_vector\",\n        limit=limit,\n        output_fields=[\"text\"],\n        param=search_params,\n    )[0]\n    return [hit.get(\"text\") for hit in res]\n\n\ndef sparse_search(col, query_sparse_embedding, limit=10):\n    search_params = {\n        \"metric_type\": \"IP\",\n        \"params\": {},\n    }\n    res = col.search(\n        [query_sparse_embedding],\n        anns_field=\"sparse_vector\",\n        limit=limit,\n        output_fields=[\"text\"],\n        param=search_params,\n    )[0]\n    return [hit.get(\"text\") for hit in res]\n\n\ndef hybrid_search(\n    col,\n    query_dense_embedding,\n    query_sparse_embedding,\n    sparse_weight=1.0,\n    dense_weight=1.0,\n    limit=10,\n):\n    dense_search_params = {\"metric_type\": \"IP\", \"params\": {}}\n    dense_req = AnnSearchRequest(\n        [query_dense_embedding], \"dense_vector\", dense_search_params, limit=limit\n    )\n    sparse_search_params = {\"metric_type\": \"IP\", \"params\": {}}\n    sparse_req = AnnSearchRequest(\n        [query_sparse_embedding], \"sparse_vector\", sparse_search_params, limit=limit\n    )\n    rerank = WeightedRanker(sparse_weight, dense_weight)\n    res = col.hybrid_search(\n        [sparse_req, dense_req], rerank=rerank, limit=limit, output_fields=[\"text\"]\n    )[0]\n    return [hit.get(\"text\") for hit in res]\n","dense_results = dense_search(col, query_embeddings[\"dense\"][0])\nsparse_results = sparse_search(col, query_embeddings[\"sparse\"][0])\nhybrid_results = hybrid_search(\n    col,\n    query_embeddings[\"dense\"][0],\n    query_embeddings[\"sparse\"][0],\n    sparse_weight=0.7,\n    dense_weight=1.0,\n)\n","def doc_text_formatting(ef, query, docs):\n    tokenizer = ef.model.tokenizer\n    query_tokens_ids = tokenizer.encode(query, return_offsets_mapping=True)\n    query_tokens = tokenizer.convert_ids_to_tokens(query_tokens_ids)\n    formatted_texts = []\n\n    for doc in docs:\n        ldx = 0\n        landmarks = []\n        encoding = tokenizer.encode_plus(doc, return_offsets_mapping=True)\n        tokens = tokenizer.convert_ids_to_tokens(encoding[\"input_ids\"])[1:-1]\n        offsets = encoding[\"offset_mapping\"][1:-1]\n        for token, (start, end) in zip(tokens, offsets):\n            if token in query_tokens:\n                if len(landmarks) != 0 and start == landmarks[-1]:\n                    landmarks[-1] = end\n                else:\n                    landmarks.append(start)\n                    landmarks.append(end)\n        close = False\n        formatted_text = \"\"\n        for i, c in enumerate(doc):\n            if ldx == len(landmarks):\n                pass\n            elif i == landmarks[ldx]:\n                if close:\n                    formatted_text += \"\"\n                else:\n                    formatted_text += \"\"\n                close = not close\n                ldx = ldx + 1\n            formatted_text += c\n        if close is True:\n            formatted_text += \"\"\n        formatted_texts.append(formatted_text)\n    return formatted_texts\n","from IPython.display import Markdown, display\n\n# Dense search results\ndisplay(Markdown(\"**Dense Search Results:**\"))\nformatted_results = doc_text_formatting(ef, query, dense_results)\nfor result in dense_results:\n    display(Markdown(result))\n\n# Sparse search results\ndisplay(Markdown(\"\\n**Sparse Search Results:**\"))\nformatted_results = doc_text_formatting(ef, query, sparse_results)\nfor result in formatted_results:\n    display(Markdown(result))\n\n# Hybrid search results\ndisplay(Markdown(\"\\n**Hybrid Search Results:**\"))\nformatted_results = doc_text_formatting(ef, query, hybrid_results)\nfor result in formatted_results:\n    display(Markdown(result))\n"],"headingContent":"","anchorList":[{"label":"使用 Milvus 进行混合搜索","href":"Hybrid-Search-with-Milvus","type":1,"isActive":false}]}
      \ No newline at end of file
      +{"codeList":["$ pip install --upgrade pymilvus \"pymilvus[model]\"\n","# Run this cell to download the dataset\n$ wget http://qim.fs.quoracdn.net/quora_duplicate_questions.tsv\n","import pandas as pd\n\nfile_path = \"quora_duplicate_questions.tsv\"\ndf = pd.read_csv(file_path, sep=\"\\t\")\nquestions = set()\nfor _, row in df.iterrows():\n    obj = row.to_dict()\n    questions.add(obj[\"question1\"][:512])\n    questions.add(obj[\"question2\"][:512])\n    if len(questions) > 500:  # Skip this if you want to use the full dataset\n        break\n\ndocs = list(questions)\n\n# example question\nprint(docs[0])\n","from milvus_model.hybrid import BGEM3EmbeddingFunction\n\nef = BGEM3EmbeddingFunction(use_fp16=False, device=\"cpu\")\ndense_dim = ef.dim[\"dense\"]\n\n# Generate embeddings using BGE-M3 model\ndocs_embeddings = ef(docs)\n","from pymilvus import (\n    connections,\n    utility,\n    FieldSchema,\n    CollectionSchema,\n    DataType,\n    Collection,\n)\n\n# Connect to Milvus given URI\nconnections.connect(uri=\"./milvus.db\")\n\n# Specify the data schema for the new Collection\nfields = [\n    # Use auto generated id as primary key\n    FieldSchema(\n        name=\"pk\", dtype=DataType.VARCHAR, is_primary=True, auto_id=True, max_length=100\n    ),\n    # Store the original text to retrieve based on semantically distance\n    FieldSchema(name=\"text\", dtype=DataType.VARCHAR, max_length=512),\n    # Milvus now supports both sparse and dense vectors,\n    # we can store each in a separate field to conduct hybrid search on both vectors\n    FieldSchema(name=\"sparse_vector\", dtype=DataType.SPARSE_FLOAT_VECTOR),\n    FieldSchema(name=\"dense_vector\", dtype=DataType.FLOAT_VECTOR, dim=dense_dim),\n]\nschema = CollectionSchema(fields)\n\n# Create collection (drop the old one if exists)\ncol_name = \"hybrid_demo\"\nif utility.has_collection(col_name):\n    Collection(col_name).drop()\ncol = Collection(col_name, schema, consistency_level=\"Strong\")\n\n# To make vector search efficient, we need to create indices for the vector fields\nsparse_index = {\"index_type\": \"SPARSE_INVERTED_INDEX\", \"metric_type\": \"IP\"}\ncol.create_index(\"sparse_vector\", sparse_index)\ndense_index = {\"index_type\": \"AUTOINDEX\", \"metric_type\": \"IP\"}\ncol.create_index(\"dense_vector\", dense_index)\ncol.load()\n","# For efficiency, we insert 50 records in each small batch\nfor i in range(0, len(docs), 50):\n    batched_entities = [\n        docs[i : i + 50],\n        docs_embeddings[\"sparse\"][i : i + 50],\n        docs_embeddings[\"dense\"][i : i + 50],\n    ]\n    col.insert(batched_entities)\nprint(\"Number of entities inserted:\", col.num_entities)\n","# Enter your search query\nquery = input(\"Enter your search query: \")\nprint(query)\n\n# Generate embeddings for the query\nquery_embeddings = ef([query])\n# print(query_embeddings)\n","from pymilvus import (\n    AnnSearchRequest,\n    WeightedRanker,\n)\n\n\ndef dense_search(col, query_dense_embedding, limit=10):\n    search_params = {\"metric_type\": \"IP\", \"params\": {}}\n    res = col.search(\n        [query_dense_embedding],\n        anns_field=\"dense_vector\",\n        limit=limit,\n        output_fields=[\"text\"],\n        param=search_params,\n    )[0]\n    return [hit.get(\"text\") for hit in res]\n\n\ndef sparse_search(col, query_sparse_embedding, limit=10):\n    search_params = {\n        \"metric_type\": \"IP\",\n        \"params\": {},\n    }\n    res = col.search(\n        [query_sparse_embedding],\n        anns_field=\"sparse_vector\",\n        limit=limit,\n        output_fields=[\"text\"],\n        param=search_params,\n    )[0]\n    return [hit.get(\"text\") for hit in res]\n\n\ndef hybrid_search(\n    col,\n    query_dense_embedding,\n    query_sparse_embedding,\n    sparse_weight=1.0,\n    dense_weight=1.0,\n    limit=10,\n):\n    dense_search_params = {\"metric_type\": \"IP\", \"params\": {}}\n    dense_req = AnnSearchRequest(\n        [query_dense_embedding], \"dense_vector\", dense_search_params, limit=limit\n    )\n    sparse_search_params = {\"metric_type\": \"IP\", \"params\": {}}\n    sparse_req = AnnSearchRequest(\n        [query_sparse_embedding], \"sparse_vector\", sparse_search_params, limit=limit\n    )\n    rerank = WeightedRanker(sparse_weight, dense_weight)\n    res = col.hybrid_search(\n        [sparse_req, dense_req], rerank=rerank, limit=limit, output_fields=[\"text\"]\n    )[0]\n    return [hit.get(\"text\") for hit in res]\n","dense_results = dense_search(col, query_embeddings[\"dense\"][0])\nsparse_results = sparse_search(col, query_embeddings[\"sparse\"]._getrow(0))\nhybrid_results = hybrid_search(\n    col,\n    query_embeddings[\"dense\"][0],\n    query_embeddings[\"sparse\"]._getrow(0),\n    sparse_weight=0.7,\n    dense_weight=1.0,\n)\n","def doc_text_formatting(ef, query, docs):\n    tokenizer = ef.model.tokenizer\n    query_tokens_ids = tokenizer.encode(query, return_offsets_mapping=True)\n    query_tokens = tokenizer.convert_ids_to_tokens(query_tokens_ids)\n    formatted_texts = []\n\n    for doc in docs:\n        ldx = 0\n        landmarks = []\n        encoding = tokenizer.encode_plus(doc, return_offsets_mapping=True)\n        tokens = tokenizer.convert_ids_to_tokens(encoding[\"input_ids\"])[1:-1]\n        offsets = encoding[\"offset_mapping\"][1:-1]\n        for token, (start, end) in zip(tokens, offsets):\n            if token in query_tokens:\n                if len(landmarks) != 0 and start == landmarks[-1]:\n                    landmarks[-1] = end\n                else:\n                    landmarks.append(start)\n                    landmarks.append(end)\n        close = False\n        formatted_text = \"\"\n        for i, c in enumerate(doc):\n            if ldx == len(landmarks):\n                pass\n            elif i == landmarks[ldx]:\n                if close:\n                    formatted_text += \"\"\n                else:\n                    formatted_text += \"\"\n                close = not close\n                ldx = ldx + 1\n            formatted_text += c\n        if close is True:\n            formatted_text += \"\"\n        formatted_texts.append(formatted_text)\n    return formatted_texts\n","from IPython.display import Markdown, display\n\n# Dense search results\ndisplay(Markdown(\"**Dense Search Results:**\"))\nformatted_results = doc_text_formatting(ef, query, dense_results)\nfor result in dense_results:\n    display(Markdown(result))\n\n# Sparse search results\ndisplay(Markdown(\"\\n**Sparse Search Results:**\"))\nformatted_results = doc_text_formatting(ef, query, sparse_results)\nfor result in formatted_results:\n    display(Markdown(result))\n\n# Hybrid search results\ndisplay(Markdown(\"\\n**Hybrid Search Results:**\"))\nformatted_results = doc_text_formatting(ef, query, hybrid_results)\nfor result in formatted_results:\n    display(Markdown(result))\n"],"headingContent":"Hybrid Search with Milvus","anchorList":[{"label":"使用 Milvus 进行混合搜索","href":"Hybrid-Search-with-Milvus","type":1,"isActive":false}]}
      \ No newline at end of file
      diff --git a/localization/v2.4.x/site/zh/tutorials/hybrid_search_with_milvus.md b/localization/v2.4.x/site/zh/tutorials/hybrid_search_with_milvus.md
      index 1dd4b04dc..5374b4d56 100644
      --- a/localization/v2.4.x/site/zh/tutorials/hybrid_search_with_milvus.md
      +++ b/localization/v2.4.x/site/zh/tutorials/hybrid_search_with_milvus.md
      @@ -18,9 +18,10 @@ title: 使用 Milvus 进行混合搜索
                 d="M4 9h1v1H4c-1.5 0-3-1.69-3-3.5S2.55 3 4 3h4c1.45 0 3 1.69 3 3.5 0 1.41-.91 2.72-2 3.25V8.59c.58-.45 1-1.27 1-2.09C10 5.22 8.98 4 8 4H4c-.98 0-2 1.22-2 2.5S3 9 4 9zm9-3h-1v1h1c1 0 2 1.22 2 2.5S13.98 12 13 12H9c-.98 0-2-1.22-2-2.5 0-.83.42-1.64 1-2.09V6.25c-1.09.53-2 1.84-2 3.25C6 11.31 7.55 13 9 13h4c1.45 0 3-1.69 3-3.5S14.5 6 13 6z"
               >
             
      -    

      Open In Colab

      +

      Open In Colab +GitHub Repository

      -

      在本教程中,我们将演示如何使用MilvusBGE-M3 模型进行混合搜索。BGE-M3 模型可以将文本转换为密集向量和稀疏向量。Milvus 支持在一个集合中存储这两种向量,从而实现混合搜索,提高搜索结果的相关性。

      +

      在本教程中,我们将演示如何使用MilvusBGE-M3 模型进行混合搜索。BGE-M3 模型可将文本转换为密集向量和稀疏向量。Milvus 支持在一个 Collections 中存储这两种向量,从而实现混合搜索,提高搜索结果的相关性。

      Milvus 支持密集、稀疏和混合检索方法:

      • 密集检索:利用语义上下文来理解查询背后的含义。
      • @@ -67,10 +68,10 @@ docs_embeddings = ef(docs)
        Fetching 30 files: 100%|██████████| 30/30 [00:00<00:00, 302473.85it/s]
         Inference Embeddings: 100%|██████████| 32/32 [01:59<00:00,  3.74s/it]
         
        -

        设置 Milvus 收集和索引

        我们将设置 Milvus 集合并为向量场创建索引。

        +

        设置 Milvus Collections 和索引

        我们将设置 Milvus Collections 并为向量场创建索引。

          -
        • 将 uri 设置为本地文件,如"./milvus.db",是最方便的方法,因为它会自动利用Milvus Lite将所有数据存储在该文件中。
        • +
        • 将 uri 设置为本地文件,如"./milvus.db",是最方便的方法,因为它会自动利用Milvus Lite将所有数据存储在此文件中。
        • 如果你有大规模数据,比如超过一百万个向量,你可以在Docker 或 Kubernetes 上设置性能更强的 Milvus 服务器。在此设置中,请使用服务器 uri(如 http://localhost:19530)作为您的 uri。
        • 如果你想使用Zilliz Cloud(Milvus 的全托管云服务),请调整 uri 和令牌,它们与 Zilliz Cloud 中的公共端点和 API 密钥相对应。
        @@ -115,7 +116,7 @@ dense_index = {"index_type": "dense_vector", dense_index) col.load()
      -

      将数据插入 Milvus 数据集

      将文档及其嵌入插入收藏集。

      +

      将数据插入 Milvus Collections

      将文档及其 Embeddings 插入 Collections。

      # For efficiency, we insert 50 records in each small batch
       for i in range(0, len(docs), 50):
           batched_entities = [
      @@ -142,7 +143,7 @@ query_embeddings = ef([query])
       
      • dense_search只搜索密集向量场
      • sparse_search只在稀疏向量场中搜索
      • -
      • hybrid_search在密集向量场和向量场中使用加权重排器进行搜索
      • +
      • hybrid_search:使用加权 Reranker 在密集向量场和向量场中搜索
      from pymilvus import (
           AnnSearchRequest,
      @@ -201,11 +202,11 @@ def dense_search(col,
       

      让我们用定义的函数运行三种不同的搜索:

      dense_results = dense_search(col, query_embeddings["dense"][0])
      -sparse_results = sparse_search(col, query_embeddings["sparse"][0])
      +sparse_results = sparse_search(col, query_embeddings["sparse"]._getrow(0))
       hybrid_results = hybrid_search(
           col,
           query_embeddings["dense"][0],
      -    query_embeddings["sparse"][0],
      +    query_embeddings["sparse"]._getrow(0),
           sparse_weight=0.7,
           dense_weight=1.0,
       )
      @@ -283,7 +284,7 @@ formatted_results = doc_text_formatting(ef, query, hybrid_results)
       

      稀疏搜索结果:

      什么是 Java 编程?如何学习 Java 编程语言?

      开始学习机器人技术的最佳方法是什么

      -

      机器 学习替代方法是什么

      +

      机器 学习的替代方法是什么

      如何使用 C 语言在 Linux 中创建新终端和新 shell

      如何使用 C 语言在新终端(Linux 终端)中创建新 shell

      在海得拉巴哪一行比较好

      @@ -302,7 +303,7 @@ formatted_results = doc_text_formatting(ef, query, hybrid_results)

      如何学习计算机安全

      如何开始 学习信息安全

      如何学习 java 等计算机语言

      -

      机器 学习替代方法是什么

      +

      机器 学习的替代方法是什么?

      如何使用 C 语言在 Linux 中创建新的终端和新的 shell

      如何使用 C 语言在新终端(Linux 终端)中创建新 shell

      在海得拉巴哪一行比较好

      diff --git a/localization/v2.4.x/site/zh/tutorials/image_similarity_search.json b/localization/v2.4.x/site/zh/tutorials/image_similarity_search.json index 2bfc66330..9cfc3c0a9 100644 --- a/localization/v2.4.x/site/zh/tutorials/image_similarity_search.json +++ b/localization/v2.4.x/site/zh/tutorials/image_similarity_search.json @@ -1 +1 @@ -{"codeList":["!wget https://github.com/milvus-io/pymilvus-assets/releases/download/imagedata/reverse_image_search.zip\n!unzip -q -o reverse_image_search.zip\n","$ pip install pymilvus --upgrade\n$ pip install timm\n","import torch\nfrom PIL import Image\nimport timm\nfrom sklearn.preprocessing import normalize\nfrom timm.data import resolve_data_config\nfrom timm.data.transforms_factory import create_transform\n\n\nclass FeatureExtractor:\n def __init__(self, modelname):\n # Load the pre-trained model\n self.model = timm.create_model(\n modelname, pretrained=True, num_classes=0, global_pool=\"avg\"\n )\n self.model.eval()\n\n # Get the input size required by the model\n self.input_size = self.model.default_cfg[\"input_size\"]\n\n config = resolve_data_config({}, model=modelname)\n # Get the preprocessing function provided by TIMM for the model\n self.preprocess = create_transform(**config)\n\n def __call__(self, imagepath):\n # Preprocess the input image\n input_image = Image.open(imagepath).convert(\"RGB\") # Convert to RGB if needed\n input_image = self.preprocess(input_image)\n\n # Convert the image to a PyTorch tensor and add a batch dimension\n input_tensor = input_image.unsqueeze(0)\n\n # Perform inference\n with torch.no_grad():\n output = self.model(input_tensor)\n\n # Extract the feature vector\n feature_vector = output.squeeze().numpy()\n\n return normalize(feature_vector.reshape(1, -1), norm=\"l2\").flatten()\n","from pymilvus import MilvusClient\n\n# Set up a Milvus client\nclient = MilvusClient(uri=\"example.db\")\n# Create a collection in quick setup mode\nif client.has_collection(collection_name=\"image_embeddings\"):\n client.drop_collection(collection_name=\"image_embeddings\")\nclient.create_collection(\n collection_name=\"image_embeddings\",\n vector_field_name=\"vector\",\n dimension=512,\n auto_id=True,\n enable_dynamic_field=True,\n metric_type=\"COSINE\",\n)\n","import os\n\nextractor = FeatureExtractor(\"resnet34\")\n\nroot = \"./train\"\ninsert = True\nif insert is True:\n for dirpath, foldername, filenames in os.walk(root):\n for filename in filenames:\n if filename.endswith(\".JPEG\"):\n filepath = dirpath + \"/\" + filename\n image_embedding = extractor(filepath)\n client.insert(\n \"image_embeddings\",\n {\"vector\": image_embedding, \"filename\": filepath},\n )\n","from IPython.display import display\n\nquery_image = \"./test/Afghan_hound/n02088094_4261.JPEG\"\n\nresults = client.search(\n \"image_embeddings\",\n data=[extractor(query_image)],\n output_fields=[\"filename\"],\n search_params={\"metric_type\": \"COSINE\"},\n)\nimages = []\nfor result in results:\n for hit in result[:10]:\n filename = hit[\"entity\"][\"filename\"]\n img = Image.open(filename)\n img = img.resize((150, 150))\n images.append(img)\n\nwidth = 150 * 5\nheight = 150 * 2\nconcatenated_image = Image.new(\"RGB\", (width, height))\n\nfor idx, img in enumerate(images):\n x = idx % 5\n y = idx // 5\n concatenated_image.paste(img, (x * 150, y * 150))\ndisplay(\"query\")\ndisplay(Image.open(query_image).resize((150, 150)))\ndisplay(\"results\")\ndisplay(concatenated_image)\n"],"headingContent":"","anchorList":[{"label":"使用 Milvus 搜索图像","href":"Image-Search-with-Milvus","type":1,"isActive":false},{"label":"数据集准备","href":"Dataset-Preparation","type":2,"isActive":false},{"label":"前提条件","href":"Prequisites","type":2,"isActive":false},{"label":"定义特征提取器","href":"Define-the-Feature-Extractor","type":2,"isActive":false},{"label":"创建 Milvus 集合","href":"Create-a-Milvus-Collection","type":2,"isActive":false},{"label":"将嵌入信息插入 Milvus","href":"Insert-the-Embeddings-to-Milvus","type":2,"isActive":false},{"label":"快速部署","href":"Quick-Deploy","type":2,"isActive":false}]} \ No newline at end of file +{"codeList":["!wget https://github.com/milvus-io/pymilvus-assets/releases/download/imagedata/reverse_image_search.zip\n!unzip -q -o reverse_image_search.zip\n","$ pip install pymilvus --upgrade\n$ pip install timm\n","import torch\nfrom PIL import Image\nimport timm\nfrom sklearn.preprocessing import normalize\nfrom timm.data import resolve_data_config\nfrom timm.data.transforms_factory import create_transform\n\n\nclass FeatureExtractor:\n def __init__(self, modelname):\n # Load the pre-trained model\n self.model = timm.create_model(\n modelname, pretrained=True, num_classes=0, global_pool=\"avg\"\n )\n self.model.eval()\n\n # Get the input size required by the model\n self.input_size = self.model.default_cfg[\"input_size\"]\n\n config = resolve_data_config({}, model=modelname)\n # Get the preprocessing function provided by TIMM for the model\n self.preprocess = create_transform(**config)\n\n def __call__(self, imagepath):\n # Preprocess the input image\n input_image = Image.open(imagepath).convert(\"RGB\") # Convert to RGB if needed\n input_image = self.preprocess(input_image)\n\n # Convert the image to a PyTorch tensor and add a batch dimension\n input_tensor = input_image.unsqueeze(0)\n\n # Perform inference\n with torch.no_grad():\n output = self.model(input_tensor)\n\n # Extract the feature vector\n feature_vector = output.squeeze().numpy()\n\n return normalize(feature_vector.reshape(1, -1), norm=\"l2\").flatten()\n","from pymilvus import MilvusClient\n\n# Set up a Milvus client\nclient = MilvusClient(uri=\"example.db\")\n# Create a collection in quick setup mode\nif client.has_collection(collection_name=\"image_embeddings\"):\n client.drop_collection(collection_name=\"image_embeddings\")\nclient.create_collection(\n collection_name=\"image_embeddings\",\n vector_field_name=\"vector\",\n dimension=512,\n auto_id=True,\n enable_dynamic_field=True,\n metric_type=\"COSINE\",\n)\n","import os\n\nextractor = FeatureExtractor(\"resnet34\")\n\nroot = \"./train\"\ninsert = True\nif insert is True:\n for dirpath, foldername, filenames in os.walk(root):\n for filename in filenames:\n if filename.endswith(\".JPEG\"):\n filepath = dirpath + \"/\" + filename\n image_embedding = extractor(filepath)\n client.insert(\n \"image_embeddings\",\n {\"vector\": image_embedding, \"filename\": filepath},\n )\n","from IPython.display import display\n\nquery_image = \"./test/Afghan_hound/n02088094_4261.JPEG\"\n\nresults = client.search(\n \"image_embeddings\",\n data=[extractor(query_image)],\n output_fields=[\"filename\"],\n search_params={\"metric_type\": \"COSINE\"},\n)\nimages = []\nfor result in results:\n for hit in result[:10]:\n filename = hit[\"entity\"][\"filename\"]\n img = Image.open(filename)\n img = img.resize((150, 150))\n images.append(img)\n\nwidth = 150 * 5\nheight = 150 * 2\nconcatenated_image = Image.new(\"RGB\", (width, height))\n\nfor idx, img in enumerate(images):\n x = idx % 5\n y = idx // 5\n concatenated_image.paste(img, (x * 150, y * 150))\ndisplay(\"query\")\ndisplay(Image.open(query_image).resize((150, 150)))\ndisplay(\"results\")\ndisplay(concatenated_image)\n"],"headingContent":"Image Search with Milvus","anchorList":[{"label":"使用 Milvus 搜索图像","href":"Image-Search-with-Milvus","type":1,"isActive":false},{"label":"数据集准备","href":"Dataset-Preparation","type":2,"isActive":false},{"label":"前提条件","href":"Prequisites","type":2,"isActive":false},{"label":"定义特征提取器","href":"Define-the-Feature-Extractor","type":2,"isActive":false},{"label":"创建 Milvus Collections","href":"Create-a-Milvus-Collection","type":2,"isActive":false},{"label":"将嵌入数据插入 Milvus","href":"Insert-the-Embeddings-to-Milvus","type":2,"isActive":false},{"label":"快速部署","href":"Quick-Deploy","type":2,"isActive":false}]} \ No newline at end of file diff --git a/localization/v2.4.x/site/zh/tutorials/image_similarity_search.md b/localization/v2.4.x/site/zh/tutorials/image_similarity_search.md index 9f7c1e2a0..592423bb2 100644 --- a/localization/v2.4.x/site/zh/tutorials/image_similarity_search.md +++ b/localization/v2.4.x/site/zh/tutorials/image_similarity_search.md @@ -1,7 +1,7 @@ --- id: image_similarity_search.md summary: 使用 Milvus 进行图像搜索 -title: 使用 Milvus 进行图像搜索 +title: 使用 Milvus 搜索图像 ---

      使用 Milvus 搜索图像

      Open In Colab

      +

      Open In Colab +GitHub Repository

      在本笔记本中,我们将向您展示如何使用 Milvus 在数据集中搜索相似图像。我们将使用ImageNet数据集的一个子集,然后搜索阿富汗猎犬的图像来演示这一点。

      数据集准备

      然后,我们需要定义一个特征提取器,使用 timm 的 ResNet-34 模型从图像中提取嵌入。

      +

      然后,我们需要定义一个特征提取器,使用 timm 的 ResNet-34 模型从图像中提取嵌入信息。

      import torch
       from PIL import Image
       import timm
      @@ -127,7 +128,7 @@ $ pip install timm
       
               return normalize(feature_vector.reshape(1, -1), norm="l2").flatten()
       
      -

      创建 Milvus 集合

      然后,我们需要创建一个 Milvus 集合来存储图像嵌入。

      +

      然后,我们需要创建 Milvus Collections 来存储图像嵌入信息

      from pymilvus import MilvusClient
       
       # Set up a Milvus client
      @@ -167,7 +168,7 @@ client.create_collection(
       
    • 如果你想使用Zilliz Cloud(Milvus 的全托管云服务),请调整uritoken ,它们与 Zilliz Cloud 中的公共端点和 Api 密钥相对应。
    -

    将嵌入信息插入 Milvus

    我们将使用 ResNet34 模型提取每张图片的嵌入信息,并将训练集中的图片插入 Milvus。

    +

    我们将使用 ResNet34 模型提取每张图片的嵌入,并将训练集中的图片插入 Milvus。

    import os
     
     extractor = FeatureExtractor("resnet34")
    diff --git a/localization/v2.4.x/site/zh/tutorials/multimodal_rag_with_milvus.json b/localization/v2.4.x/site/zh/tutorials/multimodal_rag_with_milvus.json
    index 9606a8d47..bcf0a3ddc 100644
    --- a/localization/v2.4.x/site/zh/tutorials/multimodal_rag_with_milvus.json
    +++ b/localization/v2.4.x/site/zh/tutorials/multimodal_rag_with_milvus.json
    @@ -1 +1 @@
    -{"codeList":["$ pip install --upgrade pymilvus openai datasets opencv-python timm einops ftfy peft tqdm\n","$ git clone https://github.com/FlagOpen/FlagEmbedding.git\n$ pip install -e FlagEmbedding\n","$ wget https://github.com/milvus-io/bootcamp/releases/download/data/amazon_reviews_2023_subset.tar.gz\n$ tar -xzf amazon_reviews_2023_subset.tar.gz\n","$ wget https://huggingface.co/BAAI/bge-visualized/resolve/main/Visualized_base_en_v1.5.pth\n","import torch\nfrom FlagEmbedding.visual.modeling import Visualized_BGE\n\n\nclass Encoder:\n    def __init__(self, model_name: str, model_path: str):\n        self.model = Visualized_BGE(model_name_bge=model_name, model_weight=model_path)\n        self.model.eval()\n\n    def encode_query(self, image_path: str, text: str) -> list[float]:\n        with torch.no_grad():\n            query_emb = self.model.encode(image=image_path, text=text)\n        return query_emb.tolist()[0]\n\n    def encode_image(self, image_path: str) -> list[float]:\n        with torch.no_grad():\n            query_emb = self.model.encode(image=image_path)\n        return query_emb.tolist()[0]\n\n\nmodel_name = \"BAAI/bge-base-en-v1.5\"\nmodel_path = \"./Visualized_base_en_v1.5.pth\"  # Change to your own value if using a different model path\nencoder = Encoder(model_name, model_path)\n","import os\nfrom tqdm import tqdm\nfrom glob import glob\n\n\n# Generate embeddings for the image dataset\ndata_dir = (\n    \"./images_folder\"  # Change to your own value if using a different data directory\n)\nimage_list = glob(\n    os.path.join(data_dir, \"images\", \"*.jpg\")\n)  # We will only use images ending with \".jpg\"\nimage_dict = {}\nfor image_path in tqdm(image_list, desc=\"Generating image embeddings: \"):\n    try:\n        image_dict[image_path] = encoder.encode_image(image_path)\n    except Exception as e:\n        print(f\"Failed to generate embedding for {image_path}. Skipped.\")\n        continue\nprint(\"Number of encoded images:\", len(image_dict))\n","from pymilvus import MilvusClient\n\n\ndim = len(list(image_dict.values())[0])\ncollection_name = \"multimodal_rag_demo\"\n\n# Connect to Milvus client given URI\nmilvus_client = MilvusClient(uri=\"./milvus_demo.db\")\n\n# Create Milvus Collection\n# By default, vector field name is \"vector\"\nmilvus_client.create_collection(\n    collection_name=collection_name,\n    auto_id=True,\n    dimension=dim,\n    enable_dynamic_field=True,\n)\n\n# Insert data into collection\nmilvus_client.insert(\n    collection_name=collection_name,\n    data=[{\"image_path\": k, \"vector\": v} for k, v in image_dict.items()],\n)\n","query_image = os.path.join(\n    data_dir, \"leopard.jpg\"\n)  # Change to your own query image path\nquery_text = \"phone case with this image theme\"\n\n# Generate query embedding given image and text instructions\nquery_vec = encoder.encode_query(image_path=query_image, text=query_text)\n\nsearch_results = milvus_client.search(\n    collection_name=collection_name,\n    data=[query_vec],\n    output_fields=[\"image_path\"],\n    limit=9,  # Max number of search results to return\n    search_params={\"metric_type\": \"COSINE\", \"params\": {}},  # Search parameters\n)[0]\n\nretrieved_images = [hit.get(\"entity\").get(\"image_path\") for hit in search_results]\nprint(retrieved_images)\n","import numpy as np\nimport cv2\n\nimg_height = 300\nimg_width = 300\nrow_count = 3\n\n\ndef create_panoramic_view(query_image_path: str, retrieved_images: list) -> np.ndarray:\n    \"\"\"\n    creates a 5x5 panoramic view image from a list of images\n\n    args:\n        images: list of images to be combined\n\n    returns:\n        np.ndarray: the panoramic view image\n    \"\"\"\n    panoramic_width = img_width * row_count\n    panoramic_height = img_height * row_count\n    panoramic_image = np.full(\n        (panoramic_height, panoramic_width, 3), 255, dtype=np.uint8\n    )\n\n    # create and resize the query image with a blue border\n    query_image_null = np.full((panoramic_height, img_width, 3), 255, dtype=np.uint8)\n    query_image = Image.open(query_image_path).convert(\"RGB\")\n    query_array = np.array(query_image)[:, :, ::-1]\n    resized_image = cv2.resize(query_array, (img_width, img_height))\n\n    border_size = 10\n    blue = (255, 0, 0)  # blue color in BGR\n    bordered_query_image = cv2.copyMakeBorder(\n        resized_image,\n        border_size,\n        border_size,\n        border_size,\n        border_size,\n        cv2.BORDER_CONSTANT,\n        value=blue,\n    )\n\n    query_image_null[img_height * 2 : img_height * 3, 0:img_width] = cv2.resize(\n        bordered_query_image, (img_width, img_height)\n    )\n\n    # add text \"query\" below the query image\n    text = \"query\"\n    font_scale = 1\n    font_thickness = 2\n    text_org = (10, img_height * 3 + 30)\n    cv2.putText(\n        query_image_null,\n        text,\n        text_org,\n        cv2.FONT_HERSHEY_SIMPLEX,\n        font_scale,\n        blue,\n        font_thickness,\n        cv2.LINE_AA,\n    )\n\n    # combine the rest of the images into the panoramic view\n    retrieved_imgs = [\n        np.array(Image.open(img).convert(\"RGB\"))[:, :, ::-1] for img in retrieved_images\n    ]\n    for i, image in enumerate(retrieved_imgs):\n        image = cv2.resize(image, (img_width - 4, img_height - 4))\n        row = i // row_count\n        col = i % row_count\n        start_row = row * img_height\n        start_col = col * img_width\n\n        border_size = 2\n        bordered_image = cv2.copyMakeBorder(\n            image,\n            border_size,\n            border_size,\n            border_size,\n            border_size,\n            cv2.BORDER_CONSTANT,\n            value=(0, 0, 0),\n        )\n        panoramic_image[\n            start_row : start_row + img_height, start_col : start_col + img_width\n        ] = bordered_image\n\n        # add red index numbers to each image\n        text = str(i)\n        org = (start_col + 50, start_row + 30)\n        (font_width, font_height), baseline = cv2.getTextSize(\n            text, cv2.FONT_HERSHEY_SIMPLEX, 1, 2\n        )\n\n        top_left = (org[0] - 48, start_row + 2)\n        bottom_right = (org[0] - 48 + font_width + 5, org[1] + baseline + 5)\n\n        cv2.rectangle(\n            panoramic_image, top_left, bottom_right, (255, 255, 255), cv2.FILLED\n        )\n        cv2.putText(\n            panoramic_image,\n            text,\n            (start_col + 10, start_row + 30),\n            cv2.FONT_HERSHEY_SIMPLEX,\n            1,\n            (0, 0, 255),\n            2,\n            cv2.LINE_AA,\n        )\n\n    # combine the query image with the panoramic view\n    panoramic_image = np.hstack([query_image_null, panoramic_image])\n    return panoramic_image\n","from PIL import Image\n\ncombined_image_path = os.path.join(data_dir, \"combined_image.jpg\")\npanoramic_image = create_panoramic_view(query_image, retrieved_images)\ncv2.imwrite(combined_image_path, panoramic_image)\n\ncombined_image = Image.open(combined_image_path)\nshow_combined_image = combined_image.resize((300, 300))\nshow_combined_image.show()\n","import requests\nimport base64\n\nopenai_api_key = \"sk-***\"  # Change to your OpenAI API Key\n\n\ndef generate_ranking_explanation(\n    combined_image_path: str, caption: str, infos: dict = None\n) -> tuple[list[int], str]:\n    with open(combined_image_path, \"rb\") as image_file:\n        base64_image = base64.b64encode(image_file.read()).decode(\"utf-8\")\n\n    information = (\n        \"You are responsible for ranking results for a Composed Image Retrieval. \"\n        \"The user retrieves an image with an 'instruction' indicating their retrieval intent. \"\n        \"For example, if the user queries a red car with the instruction 'change this car to blue,' a similar type of car in blue would be ranked higher in the results. \"\n        \"Now you would receive instruction and query image with blue border. Every item has its red index number in its top left. Do not misunderstand it. \"\n        f\"User instruction: {caption} \\n\\n\"\n    )\n\n    # add additional information for each image\n    if infos:\n        for i, info in enumerate(infos[\"product\"]):\n            information += f\"{i}. {info}\\n\"\n\n    information += (\n        \"Provide a new ranked list of indices from most suitable to least suitable, followed by an explanation for the top 1 most suitable item only. \"\n        \"The format of the response has to be 'Ranked list: []' with the indices in brackets as integers, followed by 'Reasons:' plus the explanation why this most fit user's query intent.\"\n    )\n\n    headers = {\n        \"Content-Type\": \"application/json\",\n        \"Authorization\": f\"Bearer {openai_api_key}\",\n    }\n\n    payload = {\n        \"model\": \"gpt-4o\",\n        \"messages\": [\n            {\n                \"role\": \"user\",\n                \"content\": [\n                    {\"type\": \"text\", \"text\": information},\n                    {\n                        \"type\": \"image_url\",\n                        \"image_url\": {\"url\": f\"data:image/jpeg;base64,{base64_image}\"},\n                    },\n                ],\n            }\n        ],\n        \"max_tokens\": 300,\n    }\n\n    response = requests.post(\n        \"https://api.openai.com/v1/chat/completions\", headers=headers, json=payload\n    )\n    result = response.json()[\"choices\"][0][\"message\"][\"content\"]\n\n    # parse the ranked indices from the response\n    start_idx = result.find(\"[\")\n    end_idx = result.find(\"]\")\n    ranked_indices_str = result[start_idx + 1 : end_idx].split(\",\")\n    ranked_indices = [int(index.strip()) for index in ranked_indices_str]\n\n    # extract explanation\n    explanation = result[end_idx + 1 :].strip()\n\n    return ranked_indices, explanation\n","ranked_indices, explanation = generate_ranking_explanation(\n    combined_image_path, query_text\n)\n","print(explanation)\n\nbest_index = ranked_indices[0]\nbest_img = Image.open(retrieved_images[best_index])\nbest_img = best_img.resize((150, 150))\nbest_img.show()\n"],"headingContent":"","anchorList":[{"label":"使用 Milvus 的多模式 RAG","href":"Multimodal-RAG-with-Milvus","type":1,"isActive":false},{"label":"准备工作","href":"Preparation","type":2,"isActive":false},{"label":"加载数据","href":"Load-Data","type":2,"isActive":false},{"label":"使用生成式重排器进行多模态搜索","href":"Multimodal-Search-with-Generative-Reranker","type":2,"isActive":false}]}
    \ No newline at end of file
    +{"codeList":["$ pip install --upgrade pymilvus openai datasets opencv-python timm einops ftfy peft tqdm\n","$ git clone https://github.com/FlagOpen/FlagEmbedding.git\n$ pip install -e FlagEmbedding\n","$ wget https://github.com/milvus-io/bootcamp/releases/download/data/amazon_reviews_2023_subset.tar.gz\n$ tar -xzf amazon_reviews_2023_subset.tar.gz\n","$ wget https://huggingface.co/BAAI/bge-visualized/resolve/main/Visualized_base_en_v1.5.pth\n","import torch\nfrom FlagEmbedding.visual.modeling import Visualized_BGE\n\n\nclass Encoder:\n    def __init__(self, model_name: str, model_path: str):\n        self.model = Visualized_BGE(model_name_bge=model_name, model_weight=model_path)\n        self.model.eval()\n\n    def encode_query(self, image_path: str, text: str) -> list[float]:\n        with torch.no_grad():\n            query_emb = self.model.encode(image=image_path, text=text)\n        return query_emb.tolist()[0]\n\n    def encode_image(self, image_path: str) -> list[float]:\n        with torch.no_grad():\n            query_emb = self.model.encode(image=image_path)\n        return query_emb.tolist()[0]\n\n\nmodel_name = \"BAAI/bge-base-en-v1.5\"\nmodel_path = \"./Visualized_base_en_v1.5.pth\"  # Change to your own value if using a different model path\nencoder = Encoder(model_name, model_path)\n","import os\nfrom tqdm import tqdm\nfrom glob import glob\n\n\n# Generate embeddings for the image dataset\ndata_dir = (\n    \"./images_folder\"  # Change to your own value if using a different data directory\n)\nimage_list = glob(\n    os.path.join(data_dir, \"images\", \"*.jpg\")\n)  # We will only use images ending with \".jpg\"\nimage_dict = {}\nfor image_path in tqdm(image_list, desc=\"Generating image embeddings: \"):\n    try:\n        image_dict[image_path] = encoder.encode_image(image_path)\n    except Exception as e:\n        print(f\"Failed to generate embedding for {image_path}. Skipped.\")\n        continue\nprint(\"Number of encoded images:\", len(image_dict))\n","from pymilvus import MilvusClient\n\n\ndim = len(list(image_dict.values())[0])\ncollection_name = \"multimodal_rag_demo\"\n\n# Connect to Milvus client given URI\nmilvus_client = MilvusClient(uri=\"./milvus_demo.db\")\n\n# Create Milvus Collection\n# By default, vector field name is \"vector\"\nmilvus_client.create_collection(\n    collection_name=collection_name,\n    auto_id=True,\n    dimension=dim,\n    enable_dynamic_field=True,\n)\n\n# Insert data into collection\nmilvus_client.insert(\n    collection_name=collection_name,\n    data=[{\"image_path\": k, \"vector\": v} for k, v in image_dict.items()],\n)\n","query_image = os.path.join(\n    data_dir, \"leopard.jpg\"\n)  # Change to your own query image path\nquery_text = \"phone case with this image theme\"\n\n# Generate query embedding given image and text instructions\nquery_vec = encoder.encode_query(image_path=query_image, text=query_text)\n\nsearch_results = milvus_client.search(\n    collection_name=collection_name,\n    data=[query_vec],\n    output_fields=[\"image_path\"],\n    limit=9,  # Max number of search results to return\n    search_params={\"metric_type\": \"COSINE\", \"params\": {}},  # Search parameters\n)[0]\n\nretrieved_images = [hit.get(\"entity\").get(\"image_path\") for hit in search_results]\nprint(retrieved_images)\n","import numpy as np\nimport cv2\n\nimg_height = 300\nimg_width = 300\nrow_count = 3\n\n\ndef create_panoramic_view(query_image_path: str, retrieved_images: list) -> np.ndarray:\n    \"\"\"\n    creates a 5x5 panoramic view image from a list of images\n\n    args:\n        images: list of images to be combined\n\n    returns:\n        np.ndarray: the panoramic view image\n    \"\"\"\n    panoramic_width = img_width * row_count\n    panoramic_height = img_height * row_count\n    panoramic_image = np.full(\n        (panoramic_height, panoramic_width, 3), 255, dtype=np.uint8\n    )\n\n    # create and resize the query image with a blue border\n    query_image_null = np.full((panoramic_height, img_width, 3), 255, dtype=np.uint8)\n    query_image = Image.open(query_image_path).convert(\"RGB\")\n    query_array = np.array(query_image)[:, :, ::-1]\n    resized_image = cv2.resize(query_array, (img_width, img_height))\n\n    border_size = 10\n    blue = (255, 0, 0)  # blue color in BGR\n    bordered_query_image = cv2.copyMakeBorder(\n        resized_image,\n        border_size,\n        border_size,\n        border_size,\n        border_size,\n        cv2.BORDER_CONSTANT,\n        value=blue,\n    )\n\n    query_image_null[img_height * 2 : img_height * 3, 0:img_width] = cv2.resize(\n        bordered_query_image, (img_width, img_height)\n    )\n\n    # add text \"query\" below the query image\n    text = \"query\"\n    font_scale = 1\n    font_thickness = 2\n    text_org = (10, img_height * 3 + 30)\n    cv2.putText(\n        query_image_null,\n        text,\n        text_org,\n        cv2.FONT_HERSHEY_SIMPLEX,\n        font_scale,\n        blue,\n        font_thickness,\n        cv2.LINE_AA,\n    )\n\n    # combine the rest of the images into the panoramic view\n    retrieved_imgs = [\n        np.array(Image.open(img).convert(\"RGB\"))[:, :, ::-1] for img in retrieved_images\n    ]\n    for i, image in enumerate(retrieved_imgs):\n        image = cv2.resize(image, (img_width - 4, img_height - 4))\n        row = i // row_count\n        col = i % row_count\n        start_row = row * img_height\n        start_col = col * img_width\n\n        border_size = 2\n        bordered_image = cv2.copyMakeBorder(\n            image,\n            border_size,\n            border_size,\n            border_size,\n            border_size,\n            cv2.BORDER_CONSTANT,\n            value=(0, 0, 0),\n        )\n        panoramic_image[\n            start_row : start_row + img_height, start_col : start_col + img_width\n        ] = bordered_image\n\n        # add red index numbers to each image\n        text = str(i)\n        org = (start_col + 50, start_row + 30)\n        (font_width, font_height), baseline = cv2.getTextSize(\n            text, cv2.FONT_HERSHEY_SIMPLEX, 1, 2\n        )\n\n        top_left = (org[0] - 48, start_row + 2)\n        bottom_right = (org[0] - 48 + font_width + 5, org[1] + baseline + 5)\n\n        cv2.rectangle(\n            panoramic_image, top_left, bottom_right, (255, 255, 255), cv2.FILLED\n        )\n        cv2.putText(\n            panoramic_image,\n            text,\n            (start_col + 10, start_row + 30),\n            cv2.FONT_HERSHEY_SIMPLEX,\n            1,\n            (0, 0, 255),\n            2,\n            cv2.LINE_AA,\n        )\n\n    # combine the query image with the panoramic view\n    panoramic_image = np.hstack([query_image_null, panoramic_image])\n    return panoramic_image\n","from PIL import Image\n\ncombined_image_path = os.path.join(data_dir, \"combined_image.jpg\")\npanoramic_image = create_panoramic_view(query_image, retrieved_images)\ncv2.imwrite(combined_image_path, panoramic_image)\n\ncombined_image = Image.open(combined_image_path)\nshow_combined_image = combined_image.resize((300, 300))\nshow_combined_image.show()\n","import requests\nimport base64\n\nopenai_api_key = \"sk-***\"  # Change to your OpenAI API Key\n\n\ndef generate_ranking_explanation(\n    combined_image_path: str, caption: str, infos: dict = None\n) -> tuple[list[int], str]:\n    with open(combined_image_path, \"rb\") as image_file:\n        base64_image = base64.b64encode(image_file.read()).decode(\"utf-8\")\n\n    information = (\n        \"You are responsible for ranking results for a Composed Image Retrieval. \"\n        \"The user retrieves an image with an 'instruction' indicating their retrieval intent. \"\n        \"For example, if the user queries a red car with the instruction 'change this car to blue,' a similar type of car in blue would be ranked higher in the results. \"\n        \"Now you would receive instruction and query image with blue border. Every item has its red index number in its top left. Do not misunderstand it. \"\n        f\"User instruction: {caption} \\n\\n\"\n    )\n\n    # add additional information for each image\n    if infos:\n        for i, info in enumerate(infos[\"product\"]):\n            information += f\"{i}. {info}\\n\"\n\n    information += (\n        \"Provide a new ranked list of indices from most suitable to least suitable, followed by an explanation for the top 1 most suitable item only. \"\n        \"The format of the response has to be 'Ranked list: []' with the indices in brackets as integers, followed by 'Reasons:' plus the explanation why this most fit user's query intent.\"\n    )\n\n    headers = {\n        \"Content-Type\": \"application/json\",\n        \"Authorization\": f\"Bearer {openai_api_key}\",\n    }\n\n    payload = {\n        \"model\": \"gpt-4o\",\n        \"messages\": [\n            {\n                \"role\": \"user\",\n                \"content\": [\n                    {\"type\": \"text\", \"text\": information},\n                    {\n                        \"type\": \"image_url\",\n                        \"image_url\": {\"url\": f\"data:image/jpeg;base64,{base64_image}\"},\n                    },\n                ],\n            }\n        ],\n        \"max_tokens\": 300,\n    }\n\n    response = requests.post(\n        \"https://api.openai.com/v1/chat/completions\", headers=headers, json=payload\n    )\n    result = response.json()[\"choices\"][0][\"message\"][\"content\"]\n\n    # parse the ranked indices from the response\n    start_idx = result.find(\"[\")\n    end_idx = result.find(\"]\")\n    ranked_indices_str = result[start_idx + 1 : end_idx].split(\",\")\n    ranked_indices = [int(index.strip()) for index in ranked_indices_str]\n\n    # extract explanation\n    explanation = result[end_idx + 1 :].strip()\n\n    return ranked_indices, explanation\n","ranked_indices, explanation = generate_ranking_explanation(\n    combined_image_path, query_text\n)\n","print(explanation)\n\nbest_index = ranked_indices[0]\nbest_img = Image.open(retrieved_images[best_index])\nbest_img = best_img.resize((150, 150))\nbest_img.show()\n"],"headingContent":"Multimodal RAG with Milvus","anchorList":[{"label":"使用 Milvus 的多模式 RAG","href":"Multimodal-RAG-with-Milvus","type":1,"isActive":false},{"label":"准备工作","href":"Preparation","type":2,"isActive":false},{"label":"加载数据","href":"Load-Data","type":2,"isActive":false},{"label":"使用生成式 Reranker 进行多模态搜索","href":"Multimodal-Search-with-Generative-Reranker","type":2,"isActive":false}]}
    \ No newline at end of file
    diff --git a/localization/v2.4.x/site/zh/tutorials/multimodal_rag_with_milvus.md b/localization/v2.4.x/site/zh/tutorials/multimodal_rag_with_milvus.md
    index 351c3b44a..7f63cc6c4 100644
    --- a/localization/v2.4.x/site/zh/tutorials/multimodal_rag_with_milvus.md
    +++ b/localization/v2.4.x/site/zh/tutorials/multimodal_rag_with_milvus.md
    @@ -18,10 +18,11 @@ title: 使用 Milvus 的多模式 RAG
               d="M4 9h1v1H4c-1.5 0-3-1.69-3-3.5S2.55 3 4 3h4c1.45 0 3 1.69 3 3.5 0 1.41-.91 2.72-2 3.25V8.59c.58-.45 1-1.27 1-2.09C10 5.22 8.98 4 8 4H4c-.98 0-2 1.22-2 2.5S3 9 4 9zm9-3h-1v1h1c1 0 2 1.22 2 2.5S13.98 12 13 12H9c-.98 0-2-1.22-2-2.5 0-.83.42-1.64 1-2.09V6.25c-1.09.53-2 1.84-2 3.25C6 11.31 7.55 13 9 13h4c1.45 0 3-1.69 3-3.5S14.5 6 13 6z"
             >
           
    -    

    Open In Colab

    +

    Open In Colab +GitHub Repository

    -

    本教程展示了由 Milvus、可视化 BGE 模型GPT-4o 支持的多模式 RAG。通过该系统,用户可以上传图像并编辑文本说明,然后由 BGE 组成的检索模型进行处理,搜索候选图像。然后,GPT-4o 将充当重选器,选择最合适的图像,并提供选择背后的理由。这种强大的组合利用 Milvus 实现高效检索,利用 BGE 模型进行精确的图像处理和匹配,利用 GPT-4o 进行高级重新排序,从而带来无缝、直观的图像搜索体验。

    +

    本教程展示了由 Milvus、可视化 BGE 模型GPT-4o 支持的多模式 RAG。通过该系统,用户可以上传图像并编辑文本说明,然后由 BGE 组成的检索模型进行处理,搜索候选图像。然后,GPT-4o 作为 Reranker,选择最合适的图像,并提供选择背后的理由。这种强大的组合实现了无缝、直观的图像搜索体验,利用 Milvus 实现高效检索,利用 BGE 模型实现精确图像处理和匹配,利用 GPT-4o 实现高级 Rerankers。

    准备工作

    -

    加载嵌入模型

    我们将使用 Visualized BGE 模型 "bge-visualized-base-en-v1.5 "来生成图像和文本的嵌入模型。

    +

    加载嵌入模型

    我们将使用可视化 BGE 模型 "bge-visualized-base-en-v1.5 "来生成图像和文本的嵌入模型。

    1.下载权重

    $ wget https://huggingface.co/BAAI/bge-visualized/resolve/main/Visualized_base_en_v1.5.pth
     
    @@ -124,7 +125,7 @@ image_dict = {} Number of encoded images: 900
    -

    插入 Milvus

    将带有相应路径和嵌入信息的图片插入 Milvus 图片库。

    +

    插入 Milvus

    将带有相应路径和嵌入信息的图片插入 Milvus Collections。

    至于MilvusClient 的参数:

      @@ -161,7 +162,7 @@ milvus_client.insert( 'ids': [451537887696781312, 451537887696781313, ..., 451537887696782211], 'cost': 0}
    -

    使用生成式重排器进行多模态搜索

    在本节中,我们将首先通过多模态查询搜索相关图片,然后使用 LLM 服务对结果进行重排,并找出附带解释的最佳结果。

    +

    在本节中,我们将首先通过多模态查询搜索相关图片,然后使用 LLM 服务对结果进行 Reranker,并找出带有解释的最佳结果。

    现在,我们准备使用由图像和文本指令组成的查询数据执行高级图像搜索。

    query_image = os.path.join(
         data_dir, "leopard.jpg"
    @@ -331,7 +332,7 @@ show_combined_image.show()
       
         Create a panoramic view
          创建全景视图 

    -

    2.重新排名并解释

    +

    2.Rerankers 和解释

    我们将把组合图像发送到多模态 LLM 服务,同时发送适当的提示,以便对检索到的结果进行排序和解释。要启用 GPT-4o 作为 LLM,您需要准备OpenAI API 密钥

    import requests
     import base64
    diff --git a/localization/v2.4.x/site/zh/tutorials/tutorials-overview.json b/localization/v2.4.x/site/zh/tutorials/tutorials-overview.json
    index 3ee03e61b..5411b6aea 100644
    --- a/localization/v2.4.x/site/zh/tutorials/tutorials-overview.json
    +++ b/localization/v2.4.x/site/zh/tutorials/tutorials-overview.json
    @@ -1 +1 @@
    -{"codeList":[],"headingContent":"","anchorList":[{"label":"教程概览","href":"Tutorials-Overview","type":1,"isActive":false}]}
    \ No newline at end of file
    +{"codeList":[],"headingContent":"Tutorials Overview","anchorList":[{"label":"教程概览","href":"Tutorials-Overview","type":1,"isActive":false}]}
    \ No newline at end of file
    diff --git a/localization/v2.4.x/site/zh/tutorials/tutorials-overview.md b/localization/v2.4.x/site/zh/tutorials/tutorials-overview.md
    index fec3f9965..578f5c9f8 100644
    --- a/localization/v2.4.x/site/zh/tutorials/tutorials-overview.md
    +++ b/localization/v2.4.x/site/zh/tutorials/tutorials-overview.md
    @@ -1,7 +1,7 @@
     ---
     id: tutorials-overview.md
     summary: 本页为您提供与 Milvus 互动的教程列表。
    -title: 教程概述
    +title: 教程概览
     ---
     

    教程概览

    聚类压缩旨在提高搜索性能并降低大型集合的成本。本指南将帮助您了解聚类压缩以及该功能如何提高搜索性能。

    +

    聚类压缩旨在提高搜索性能,降低大型 Collections 的成本。本指南将帮助您了解聚类压缩以及该功能如何提高搜索性能。

    概述

    Milvus 会将输入的实体存储在集合内的分段中,并在分段已满时将其封存。如果出现这种情况,就会创建一个新的段来容纳更多的实体。因此,实体会任意地分布在不同的段中。这种分布要求 Milvus 搜索多个分段,以找到与给定查询向量最近的邻居。

    +

    Milvus 将输入的实体存储在 Collections 中的分段中,并在分段已满时将其封存。如果出现这种情况,就会创建一个新的段来容纳更多的实体。因此,实体会任意地分布在不同的段中。这种分布要求 Milvus 搜索多个分段,以找到与给定查询向量最近的邻居。

    Without clustering Compaction 无聚类压缩

    -

    如果 Milvus 能根据特定字段中的值将实体分配到各个网段,那么搜索范围就能限制在一个网段内,从而提高搜索性能。

    -

    聚类压缩(Clustering Compaction)是 Milvus 的一项功能,可根据标量字段中的值在集合中的段之间重新分配实体。要启用此功能,首先需要选择一个标量字段作为聚类键。这样,当实体的聚类键值在特定范围内时,Milvus 就能将实体重新分配到段中。当你触发聚类压缩时,Milvus 会生成/更新一个名为PartitionStats 的全局索引,它记录了段和聚类键值之间的映射关系。

    +

    如果 Milvus 可以根据特定字段中的值将实体分布在不同的段中,那么搜索范围就可以限制在一个段内,从而提高搜索性能。

    +

    聚类压缩(Clustering Compaction)是 Milvus 的一项功能,它能根据标量字段中的值在 Collections 中的段之间重新分配实体。要启用此功能,首先需要选择一个标量字段作为聚类键。这样,当实体的聚类键值在特定范围内时,Milvus 就能将实体重新分配到段中。当你触发聚类压缩时,Milvus 会生成/更新一个名为PartitionStats 的全局索引,它记录了段和聚类键值之间的映射关系。

    With Clustering Compaction @@ -77,16 +76,15 @@ summary: 聚类压缩旨在提高搜索性能并降低大型集合的成本。 timeout: 7200 queryNode: - enableSegmentPrune: true + enableSegmentPrune: true datanode: -clusteringCompaction: -memoryBufferRatio: 0.1 -workPoolSize: 8 + clusteringCompaction: + memoryBufferRatio: 0.1 + workPoolSize: 8 common: -usePartitionKeyAsClusteringKey: true + usePartitionKeyAsClusteringKey: true

    -
    • dataCoord.compaction.clustering

      @@ -94,12 +92,12 @@ usePartitionKeyAsClusteringKey: true - - + + - - - + + +
      配置项默认值默认值
      enable指定是否启用聚类压缩。
      如果需要为每个具有聚类密钥的集合启用此功能,请将其设置为true
      false
      autoEnable指定是否启用自动触发压缩。
      将此项设置为true 表示 Milvus 在指定的时间间隔内压缩具有聚类密钥的集合。
      false
      enable指定是否启用聚类压缩。
      如果需要为每个具有聚类密钥的 Collections 启用此功能,请将其设置为true
      false
      autoEnable指定是否启用自动触发压缩。
      将此项设置为true 表示 Milvus 在指定的时间间隔内压缩具有聚类密钥的 Collections。
      false
      triggerInterval以毫秒为单位指定 Milvus 开始聚类压缩的时间间隔。
      只有当autoEnable 设置为true 时,此参数才有效。
      -
      minInterval以毫秒为单位指定最小间隔。
      该参数仅在autoEnable 设置为true 时有效。
      将其设置为大于 triggerInterval 的整数有助于避免在短时间内重复压缩。
      -
      maxInterval以毫秒为单位指定最大间隔。
      该参数仅在autoEnable 设置为true 时有效。
      一旦 Milvus 检测到某个集合的聚类压缩持续时间超过此值,就会强制进行聚类压缩。
      -
      newDataSizeThreshold指定触发聚类压缩的上阈值。
      该参数仅在autoEnable 设置为true 时有效。
      一旦 Milvus 检测到数据集中的数据量超过此值,就会启动聚类压缩进程。
      -
      minInterval以秒为单位指定最小间隔。
      此参数仅在autoEnable 设置为true 时有效。
      将其设置为大于 triggerInterval 的整数有助于避免在短时间内重复压缩。
      -
      maxInterval指定最大间隔(以秒为单位)。
      该参数仅在autoEnable 设置为true 时有效。
      一旦 Milvus 检测到某个 Collections 的聚类压实时间超过此值,它就会强制进行聚类压实。
      -
      newDataSizeThreshold指定触发聚类压缩的上阈值。
      该参数仅在autoEnable 设置为true 时有效。
      一旦 Milvus 检测到 Collections 中的数据量超过此值,就会启动聚类压缩进程。
      -
      timeout指定聚类压缩的超时持续时间。
      如果执行时间超过此值,则聚类压缩失败。
      -
      @@ -131,28 +129,13 @@ usePartitionKeyAsClusteringKey: true 配置项配置项默认值 -usePartitionKeyAsClusteringKey指定是否将集合中的Partition Key用作聚类密钥。
      将其设置为true 表示将Partition Key用作聚类密钥。
      您可以在集合中通过显式设置聚类密钥来覆盖此设置。false +usePartitionKeyAsClusteringKey指定是否将 Collections 中的分区密钥用作聚类密钥。
      将其设置为true 表示将分区密钥用作聚类密钥。
      您可以在 Collections 中通过显式设置聚类密钥来覆盖此设置。false
    -

    要将上述更改应用到Milvus群集,请按照 "使用 Helm 配置 Milvus ""使用 Milvus Operators 配置 Milvus "中的步骤操作

    -

    集群配置

    要在特定集合中进行聚类压缩,应从集合中选择一个标量字段作为聚类密钥。

    +

    要将上述更改应用到 Milvus 群集,请按照 "使用 Helm 配置 Milvus"和 "使用 Milvus Operator 配置 Milvus"中的步骤操作

    +

    Collection 配置

    要在特定 Collections 中进行聚类压缩,应从 Collections 中选择一个标量字段作为聚类密钥。

    default_fields = [
         FieldSchema(name="id", dtype=DataType.INT64, is_primary=True),
         FieldSchema(name="key", dtype=DataType.INT64, is_clustering_key=True),
    @@ -161,13 +144,12 @@ usePartitionKeyAsClusteringKey: true
     ]
     
     default_schema = CollectionSchema(
    -fields=default_fields,
    -description="test clustering-key collection"
    +    fields=default_fields, 
    +    description="test clustering-key collection"
     )
     
     coll1 = Collection(name="clustering_test", schema=default_schema)
     
    -

    可以使用以下数据类型的标量字段作为聚类键:Int8,Int16,Int32,Int64,Float,Double, 和VarChar

    @@ -192,7 +174,7 @@ coll1.get_compaction_state(is_clustering=True) coll1.wait_for_compaction_completed(is_clustering=True)

    基准测试

    数据量和查询模式共同决定了聚类压缩所能带来的性能提升。一项内部基准测试表明,聚类压缩最多可将每秒查询次数(QPS)提高 25 倍。

    -

    基准测试的对象是一个集合,其中包含来自 2000 万个 768 维 LAION 数据集的实体,关键字段被指定为聚类关键字。在集合中触发聚类压缩后,会发送并发搜索,直到 CPU 使用率达到高水位。

    +

    该基准测试是在一个包含来自 2000 万个 768 维 LAION 数据集的实体的 Collections 上进行的,关键字段被指定为聚类关键字。在 Collections 中触发聚类压缩后,会发送并发搜索,直到 CPU 使用率达到高水位。

    @@ -262,7 +244,7 @@ coll1.wait_for_compaction_completed(is_clustering=Tru
    -

    随着搜索筛选器中搜索范围的缩小,剪切率也随之增加。这意味着在搜索过程中会跳过更多的实体。比较第一行和最后一行的统计数据,可以发现不进行聚类压缩的搜索需要扫描整个集合。另一方面,使用特定键进行聚类压缩的搜索可实现高达 25 倍的改进。

    +

    随着搜索筛选器中搜索范围的缩小,剪切率也随之增加。这意味着在搜索过程中会跳过更多的实体。比较第一行和最后一行的统计数据,可以发现不进行聚类压缩的搜索需要扫描整个 Collections。另一方面,使用特定键进行聚类压缩的搜索可以实现高达 25 倍的改进。

    最佳实践

    以下是一些有效使用聚类压缩的提示:

      -
    • 为数据量大的数据集启用此功能。 数据集中的数据量越大,搜索性能就越高。对于超过 100 万个实体的集合,启用此功能是一个不错的选择。

    • -
    • 选择合适的聚类关键字:可以使用通常用作筛选条件的标量字段作为聚类关键字。对于包含来自多个租户的数据的集合,可以使用区分一个租户和另一个租户的字段作为聚类密钥。

    • -
    • 使用分区键作为聚类键。如果想在 Milvus 实例的所有集合中启用此功能,或者在使用分区键的大型集合中仍然面临性能问题,可以将common.usePartitionKeyAsClusteringKey 设置为 true。这样,在选择集合中的标量字段作为分区键时,就会有一个聚类键和一个分区键。

      +
    • 为数据量较大的 Collections 启用此功能。 Collections 中的数据量越大,搜索性能就越高。对于超过 100 万个实体的集合,启用此功能是一个不错的选择。

    • +
    • 选择合适的聚类关键字:可以使用通常用作筛选条件的标量字段作为聚类关键字。对于保存多个租户数据的 Collections,可以利用区分一个租户和另一个租户的字段作为聚类密钥。

    • +
    • 使用分区密钥作为聚类密钥。如果想在 Milvus 实例中的所有 Collections 启用此功能,或者在使用分区密钥的大型 Collections 中仍面临性能问题,可以将common.usePartitionKeyAsClusteringKey 设置为 true。通过这样做,当你选择 Collections 中的标量字段作为分区键时,你将拥有一个聚类键和一个分区键。

      请注意,此设置不会阻止您选择另一个标量字段作为聚类键。明确指定的聚类键始终优先。

    diff --git a/localization/v2.4.x/site/zh/userGuide/insert-update-delete.json b/localization/v2.4.x/site/zh/userGuide/insert-update-delete.json index 392751259..8de2e9e07 100644 --- a/localization/v2.4.x/site/zh/userGuide/insert-update-delete.json +++ b/localization/v2.4.x/site/zh/userGuide/insert-update-delete.json @@ -1 +1 @@ -{"codeList":["from pymilvus import MilvusClient\n\n# 1. Set up a Milvus client\nclient = MilvusClient(\n uri=\"http://localhost:19530\"\n)\n\n# 2. Create a collection\nclient.create_collection(\n collection_name=\"quick_setup\",\n dimension=5,\n metric_type=\"IP\"\n)\n","import io.milvus.v2.client.ConnectConfig;\nimport io.milvus.v2.client.MilvusClientV2;\nimport io.milvus.v2.service.collection.request.CreateCollectionReq;\n\nString CLUSTER_ENDPOINT = \"http://localhost:19530\";\n\n// 1. Connect to Milvus server\nConnectConfig connectConfig = ConnectConfig.builder()\n .uri(CLUSTER_ENDPOINT)\n .build();\n\nMilvusClientV2 client = new MilvusClientV2(connectConfig);\n\n// 2. Create a collection in quick setup mode\nCreateCollectionReq quickSetupReq = CreateCollectionReq.builder()\n .collectionName(\"quick_setup\")\n .dimension(5)\n .metricType(\"IP\")\n .build();\n\nclient.createCollection(quickSetupReq);\n","const { MilvusClient, DataType, sleep } = require(\"@zilliz/milvus2-sdk-node\")\n\nconst address = \"http://localhost:19530\"\n\n// 1. Set up a Milvus Client\nclient = new MilvusClient({address});\n\n// 2. Create a collection in quick setup mode\nawait client.createCollection({\n collection_name: \"quick_setup\",\n dimension: 5,\n metric_type: \"IP\"\n}); \n","# 3. Insert some data\ndata=[\n {\"id\": 0, \"vector\": [0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592], \"color\": \"pink_8682\"},\n {\"id\": 1, \"vector\": [0.19886812562848388, 0.06023560599112088, 0.6976963061752597, 0.2614474506242501, 0.838729485096104], \"color\": \"red_7025\"},\n {\"id\": 2, \"vector\": [0.43742130801983836, -0.5597502546264526, 0.6457887650909682, 0.7894058910881185, 0.20785793220625592], \"color\": \"orange_6781\"},\n {\"id\": 3, \"vector\": [0.3172005263489739, 0.9719044792798428, -0.36981146090600725, -0.4860894583077995, 0.95791889146345], \"color\": \"pink_9298\"},\n {\"id\": 4, \"vector\": [0.4452349528804562, -0.8757026943054742, 0.8220779437047674, 0.46406290649483184, 0.30337481143159106], \"color\": \"red_4794\"},\n {\"id\": 5, \"vector\": [0.985825131989184, -0.8144651566660419, 0.6299267002202009, 0.1206906911183383, -0.1446277761879955], \"color\": \"yellow_4222\"},\n {\"id\": 6, \"vector\": [0.8371977790571115, -0.015764369584852833, -0.31062937026679327, -0.562666951622192, -0.8984947637863987], \"color\": \"red_9392\"},\n {\"id\": 7, \"vector\": [-0.33445148015177995, -0.2567135004164067, 0.8987539745369246, 0.9402995886420709, 0.5378064918413052], \"color\": \"grey_8510\"},\n {\"id\": 8, \"vector\": [0.39524717779832685, 0.4000257286739164, -0.5890507376891594, -0.8650502298996872, -0.6140360785406336], \"color\": \"white_9381\"},\n {\"id\": 9, \"vector\": [0.5718280481994695, 0.24070317428066512, -0.3737913482606834, -0.06726932177492717, -0.6980531615588608], \"color\": \"purple_4976\"}\n]\n\nres = client.insert(\n collection_name=\"quick_setup\",\n data=data\n)\n\nprint(res)\n\n# Output\n#\n# {\n# \"insert_count\": 10,\n# \"ids\": [\n# 0,\n# 1,\n# 2,\n# 3,\n# 4,\n# 5,\n# 6,\n# 7,\n# 8,\n# 9\n# ]\n# }\n","import java.util.Arrays;\nimport java.util.List;\nimport java.util.Map;\nimport io.milvus.v2.service.vector.request.InsertReq;\nimport io.milvus.v2.service.vector.response.InsertResp;\n\n// 3. Insert some data\nList data = Arrays.asList(\n new JSONObject(Map.of(\"id\", 0L, \"vector\", Arrays.asList(0.3580376395471989f, -0.6023495712049978f, 0.18414012509913835f, -0.26286205330961354f, 0.9029438446296592f), \"color\", \"pink_8682\")),\n new JSONObject(Map.of(\"id\", 1L, \"vector\", Arrays.asList(0.19886812562848388f, 0.06023560599112088f, 0.6976963061752597f, 0.2614474506242501f, 0.838729485096104f), \"color\", \"red_7025\")),\n new JSONObject(Map.of(\"id\", 2L, \"vector\", Arrays.asList(0.43742130801983836f, -0.5597502546264526f, 0.6457887650909682f, 0.7894058910881185f, 0.20785793220625592f), \"color\", \"orange_6781\")),\n new JSONObject(Map.of(\"id\", 3L, \"vector\", Arrays.asList(0.3172005263489739f, 0.9719044792798428f, -0.36981146090600725f, -0.4860894583077995f, 0.95791889146345f), \"color\", \"pink_9298\")),\n new JSONObject(Map.of(\"id\", 4L, \"vector\", Arrays.asList(0.4452349528804562f, -0.8757026943054742f, 0.8220779437047674f, 0.46406290649483184f, 0.30337481143159106f), \"color\", \"red_4794\")),\n new JSONObject(Map.of(\"id\", 5L, \"vector\", Arrays.asList(0.985825131989184f, -0.8144651566660419f, 0.6299267002202009f, 0.1206906911183383f, -0.1446277761879955f), \"color\", \"yellow_4222\")),\n new JSONObject(Map.of(\"id\", 6L, \"vector\", Arrays.asList(0.8371977790571115f, -0.015764369584852833f, -0.31062937026679327f, -0.562666951622192f, -0.8984947637863987f), \"color\", \"red_9392\")),\n new JSONObject(Map.of(\"id\", 7L, \"vector\", Arrays.asList(-0.33445148015177995f, -0.2567135004164067f, 0.8987539745369246f, 0.9402995886420709f, 0.5378064918413052f), \"color\", \"grey_8510\")),\n new JSONObject(Map.of(\"id\", 8L, \"vector\", Arrays.asList(0.39524717779832685f, 0.4000257286739164f, -0.5890507376891594f, -0.8650502298996872f, -0.6140360785406336f), \"color\", \"white_9381\")),\n new JSONObject(Map.of(\"id\", 9L, \"vector\", Arrays.asList(0.5718280481994695f, 0.24070317428066512f, -0.3737913482606834f, -0.06726932177492717f, -0.6980531615588608f), \"color\", \"purple_4976\"))\n);\n\nInsertReq insertReq = InsertReq.builder()\n .collectionName(\"quick_setup\")\n .data(data)\n .build();\n\nInsertResp insertResp = client.insert(insertReq);\n\nSystem.out.println(JSONObject.toJSON(insertResp));\n\n// Output:\n// {\"insertCnt\": 10}\n","// 3. Insert some data\n\nvar data = [\n {id: 0, vector: [0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592], color: \"pink_8682\"},\n {id: 1, vector: [0.19886812562848388, 0.06023560599112088, 0.6976963061752597, 0.2614474506242501, 0.838729485096104], color: \"red_7025\"},\n {id: 2, vector: [0.43742130801983836, -0.5597502546264526, 0.6457887650909682, 0.7894058910881185, 0.20785793220625592], color: \"orange_6781\"},\n {id: 3, vector: [0.3172005263489739, 0.9719044792798428, -0.36981146090600725, -0.4860894583077995, 0.95791889146345], color: \"pink_9298\"},\n {id: 4, vector: [0.4452349528804562, -0.8757026943054742, 0.8220779437047674, 0.46406290649483184, 0.30337481143159106], color: \"red_4794\"},\n {id: 5, vector: [0.985825131989184, -0.8144651566660419, 0.6299267002202009, 0.1206906911183383, -0.1446277761879955], color: \"yellow_4222\"},\n {id: 6, vector: [0.8371977790571115, -0.015764369584852833, -0.31062937026679327, -0.562666951622192, -0.8984947637863987], color: \"red_9392\"},\n {id: 7, vector: [-0.33445148015177995, -0.2567135004164067, 0.8987539745369246, 0.9402995886420709, 0.5378064918413052], color: \"grey_8510\"},\n {id: 8, vector: [0.39524717779832685, 0.4000257286739164, -0.5890507376891594, -0.8650502298996872, -0.6140360785406336], color: \"white_9381\"},\n {id: 9, vector: [0.5718280481994695, 0.24070317428066512, -0.3737913482606834, -0.06726932177492717, -0.6980531615588608], color: \"purple_4976\"} \n]\n\nvar res = await client.insert({\n collection_name: \"quick_setup\",\n data: data,\n})\n\nconsole.log(res.insert_cnt)\n\n// Output\n// \n// 10\n// \n","# 4. Insert some more data into a specific partition\ndata=[\n {\"id\": 10, \"vector\": [-0.5570353903748935, -0.8997887893201304, -0.7123782431855732, -0.6298990746450119, 0.6699215060604258], \"color\": \"red_1202\"},\n {\"id\": 11, \"vector\": [0.6319019033373907, 0.6821488267878275, 0.8552303045704168, 0.36929791364943054, -0.14152860714878068], \"color\": \"blue_4150\"},\n {\"id\": 12, \"vector\": [0.9483947484855766, -0.32294203351925344, 0.9759290319978025, 0.8262982148666174, -0.8351194181285713], \"color\": \"orange_4590\"},\n {\"id\": 13, \"vector\": [-0.5449109892498731, 0.043511240563786524, -0.25105249484790804, -0.012030655265886425, -0.0010987671273892108], \"color\": \"pink_9619\"},\n {\"id\": 14, \"vector\": [0.6603339372951424, -0.10866551787442225, -0.9435597754324891, 0.8230244263466688, -0.7986720938400362], \"color\": \"orange_4863\"},\n {\"id\": 15, \"vector\": [-0.8825129181091456, -0.9204557711667729, -0.935350065513425, 0.5484069690287079, 0.24448151140671204], \"color\": \"orange_7984\"},\n {\"id\": 16, \"vector\": [0.6285586391568163, 0.5389064528263487, -0.3163366239905099, 0.22036279378888013, 0.15077052220816167], \"color\": \"blue_9010\"},\n {\"id\": 17, \"vector\": [-0.20151825016059233, -0.905239387635804, 0.6749305353372479, -0.7324272081377843, -0.33007998971889263], \"color\": \"blue_4521\"},\n {\"id\": 18, \"vector\": [0.2432286610792349, 0.01785636564206139, -0.651356982731391, -0.35848148851027895, -0.7387383128324057], \"color\": \"orange_2529\"},\n {\"id\": 19, \"vector\": [0.055512329053363674, 0.7100266349039421, 0.4956956543575197, 0.24541352586717702, 0.4209030729923515], \"color\": \"red_9437\"}\n]\n\nclient.create_partition(\n collection_name=\"quick_setup\",\n partition_name=\"partitionA\"\n)\n\nres = client.insert(\n collection_name=\"quick_setup\",\n data=data,\n partition_name=\"partitionA\"\n)\n\nprint(res)\n\n# Output\n#\n# {\n# \"insert_count\": 10,\n# \"ids\": [\n# 10,\n# 11,\n# 12,\n# 13,\n# 14,\n# 15,\n# 16,\n# 17,\n# 18,\n# 19\n# ]\n# }\n","// 4. Insert some more data into a specific partition\ndata = Arrays.asList(\n new JSONObject(Map.of(\"id\", 10L, \"vector\", Arrays.asList(-0.5570353903748935f, -0.8997887893201304f, -0.7123782431855732f, -0.6298990746450119f, 0.6699215060604258f), \"color\", \"red_1202\")),\n new JSONObject(Map.of(\"id\", 11L, \"vector\", Arrays.asList(0.6319019033373907f, 0.6821488267878275f, 0.8552303045704168f, 0.36929791364943054f, -0.14152860714878068f), \"color\", \"blue_4150\")),\n new JSONObject(Map.of(\"id\", 12L, \"vector\", Arrays.asList(0.9483947484855766f, -0.32294203351925344f, 0.9759290319978025f, 0.8262982148666174f, -0.8351194181285713f), \"color\", \"orange_4590\")),\n new JSONObject(Map.of(\"id\", 13L, \"vector\", Arrays.asList(-0.5449109892498731f, 0.043511240563786524f, -0.25105249484790804f, -0.012030655265886425f, -0.0010987671273892108f), \"color\", \"pink_9619\")),\n new JSONObject(Map.of(\"id\", 14L, \"vector\", Arrays.asList(0.6603339372951424f, -0.10866551787442225f, -0.9435597754324891f, 0.8230244263466688f, -0.7986720938400362f), \"color\", \"orange_4863\")),\n new JSONObject(Map.of(\"id\", 15L, \"vector\", Arrays.asList(-0.8825129181091456f, -0.9204557711667729f, -0.935350065513425f, 0.5484069690287079f, 0.24448151140671204f), \"color\", \"orange_7984\")),\n new JSONObject(Map.of(\"id\", 16L, \"vector\", Arrays.asList(0.6285586391568163f, 0.5389064528263487f, -0.3163366239905099f, 0.22036279378888013f, 0.15077052220816167f), \"color\", \"blue_9010\")),\n new JSONObject(Map.of(\"id\", 17L, \"vector\", Arrays.asList(-0.20151825016059233f, -0.905239387635804f, 0.6749305353372479f, -0.7324272081377843f, -0.33007998971889263f), \"color\", \"blue_4521\")),\n new JSONObject(Map.of(\"id\", 18L, \"vector\", Arrays.asList(0.2432286610792349f, 0.01785636564206139f, -0.651356982731391f, -0.35848148851027895f, -0.7387383128324057f), \"color\", \"orange_2529\")),\n new JSONObject(Map.of(\"id\", 19L, \"vector\", Arrays.asList(0.055512329053363674f, 0.7100266349039421f, 0.4956956543575197f, 0.24541352586717702f, 0.4209030729923515f), \"color\", \"red_9437\"))\n);\n\nCreatePartitionReq createPartitionReq = CreatePartitionReq.builder()\n .collectionName(\"quick_setup\")\n .partitionName(\"partitionA\")\n .build();\n\nclient.createPartition(createPartitionReq);\n\ninsertReq = InsertReq.builder()\n .collectionName(\"quick_setup\")\n .data(data)\n .partitionName(\"partitionA\")\n .build();\n\ninsertResp = client.insert(insertReq);\n\nSystem.out.println(JSONObject.toJSON(insertResp));\n\n// Output:\n// {\"insertCnt\": 10}\n","// 4. Insert some more data into a specific partition\ndata = [\n {id: 10, vector: [-0.5570353903748935, -0.8997887893201304, -0.7123782431855732, -0.6298990746450119, 0.6699215060604258], color: \"red_1202\"},\n {id: 11, vector: [0.6319019033373907, 0.6821488267878275, 0.8552303045704168, 0.36929791364943054, -0.14152860714878068], color: \"blue_4150\"},\n {id: 12, vector: [0.9483947484855766, -0.32294203351925344, 0.9759290319978025, 0.8262982148666174, -0.8351194181285713], color: \"orange_4590\"},\n {id: 13, vector: [-0.5449109892498731, 0.043511240563786524, -0.25105249484790804, -0.012030655265886425, -0.0010987671273892108], color: \"pink_9619\"},\n {id: 14, vector: [0.6603339372951424, -0.10866551787442225, -0.9435597754324891, 0.8230244263466688, -0.7986720938400362], color: \"orange_4863\"},\n {id: 15, vector: [-0.8825129181091456, -0.9204557711667729, -0.935350065513425, 0.5484069690287079, 0.24448151140671204], color: \"orange_7984\"},\n {id: 16, vector: [0.6285586391568163, 0.5389064528263487, -0.3163366239905099, 0.22036279378888013, 0.15077052220816167], color: \"blue_9010\"},\n {id: 17, vector: [-0.20151825016059233, -0.905239387635804, 0.6749305353372479, -0.7324272081377843, -0.33007998971889263], color: \"blue_4521\"},\n {id: 18, vector: [0.2432286610792349, 0.01785636564206139, -0.651356982731391, -0.35848148851027895, -0.7387383128324057], color: \"orange_2529\"},\n {id: 19, vector: [0.055512329053363674, 0.7100266349039421, 0.4956956543575197, 0.24541352586717702, 0.4209030729923515], color: \"red_9437\"}\n]\n\nawait client.createPartition({\n collection_name: \"quick_setup\",\n partition_name: \"partitionA\"\n})\n\nres = await client.insert({\n collection_name: \"quick_setup\",\n data: data,\n partition_name: \"partitionA\"\n})\n\nconsole.log(res.insert_cnt)\n\n// Output\n// \n// 10\n// \n","# 5. Upsert some data\ndata=[\n {\"id\": 0, \"vector\": [-0.619954382375778, 0.4479436794798608, -0.17493894838751745, -0.4248030059917294, -0.8648452746018911], \"color\": \"black_9898\"},\n {\"id\": 1, \"vector\": [0.4762662251462588, -0.6942502138717026, -0.4490002642657902, -0.628696575798281, 0.9660395877041965], \"color\": \"red_7319\"},\n {\"id\": 2, \"vector\": [-0.8864122635045097, 0.9260170474445351, 0.801326976181461, 0.6383943392381306, 0.7563037341572827], \"color\": \"white_6465\"},\n {\"id\": 3, \"vector\": [0.14594326235891586, -0.3775407299900644, -0.3765479013078812, 0.20612075380355122, 0.4902678929632145], \"color\": \"orange_7580\"},\n {\"id\": 4, \"vector\": [0.4548498669607359, -0.887610217681605, 0.5655081329910452, 0.19220509387904117, 0.016513983433433577], \"color\": \"red_3314\"},\n {\"id\": 5, \"vector\": [0.11755001847051827, -0.7295149788999611, 0.2608115847524266, -0.1719167007897875, 0.7417611743754855], \"color\": \"black_9955\"},\n {\"id\": 6, \"vector\": [0.9363032158314308, 0.030699901477745373, 0.8365910312319647, 0.7823840208444011, 0.2625222076909237], \"color\": \"yellow_2461\"},\n {\"id\": 7, \"vector\": [0.0754823906014721, -0.6390658668265143, 0.5610517334334937, -0.8986261118798251, 0.9372056764266794], \"color\": \"white_5015\"},\n {\"id\": 8, \"vector\": [-0.3038434006935904, 0.1279149203380523, 0.503958664270957, -0.2622661156746988, 0.7407627307791929], \"color\": \"purple_6414\"},\n {\"id\": 9, \"vector\": [-0.7125086947677588, -0.8050968321012257, -0.32608864121785786, 0.3255654958645424, 0.26227968923834233], \"color\": \"brown_7231\"}\n]\n\nres = client.upsert(\n collection_name='quick_setup',\n data=data\n)\n\nprint(res)\n\n# Output\n#\n# {\n# \"upsert_count\": 10\n# }\n","// 5. Upsert some data\ndata = Arrays.asList(\n new JSONObject(Map.of(\"id\", 0L, \"vector\", Arrays.asList(-0.619954382375778f, 0.4479436794798608f, -0.17493894838751745f, -0.4248030059917294f, -0.8648452746018911f), \"color\", \"black_9898\")),\n new JSONObject(Map.of(\"id\", 1L, \"vector\", Arrays.asList(0.4762662251462588f, -0.6942502138717026f, -0.4490002642657902f, -0.628696575798281f, 0.9660395877041965f), \"color\", \"red_7319\")),\n new JSONObject(Map.of(\"id\", 2L, \"vector\", Arrays.asList(-0.8864122635045097f, 0.9260170474445351f, 0.801326976181461f, 0.6383943392381306f, 0.7563037341572827f), \"color\", \"white_6465\")),\n new JSONObject(Map.of(\"id\", 3L, \"vector\", Arrays.asList(0.14594326235891586f, -0.3775407299900644f, -0.3765479013078812f, 0.20612075380355122f, 0.4902678929632145f), \"color\", \"orange_7580\")),\n new JSONObject(Map.of(\"id\", 4L, \"vector\", Arrays.asList(0.4548498669607359f, -0.887610217681605f, 0.5655081329910452f, 0.19220509387904117f, 0.016513983433433577f), \"color\", \"red_3314\")),\n new JSONObject(Map.of(\"id\", 5L, \"vector\", Arrays.asList(0.11755001847051827f, -0.7295149788999611f, 0.2608115847524266f, -0.1719167007897875f, 0.7417611743754855f), \"color\", \"black_9955\")),\n new JSONObject(Map.of(\"id\", 6L, \"vector\", Arrays.asList(0.9363032158314308f, 0.030699901477745373f, 0.8365910312319647f, 0.7823840208444011f, 0.2625222076909237f), \"color\", \"yellow_2461\")),\n new JSONObject(Map.of(\"id\", 7L, \"vector\", Arrays.asList(0.0754823906014721f, -0.6390658668265143f, 0.5610517334334937f, -0.8986261118798251f, 0.9372056764266794f), \"color\", \"white_5015\")),\n new JSONObject(Map.of(\"id\", 8L, \"vector\", Arrays.asList(-0.3038434006935904f, 0.1279149203380523f, 0.503958664270957f, -0.2622661156746988f, 0.7407627307791929f), \"color\", \"purple_6414\")),\n new JSONObject(Map.of(\"id\", 9L, \"vector\", Arrays.asList(-0.7125086947677588f, -0.8050968321012257f, -0.32608864121785786f, 0.3255654958645424f, 0.26227968923834233f), \"color\", \"brown_7231\"))\n);\n\nUpsertReq upsertReq = UpsertReq.builder()\n .collectionName(\"quick_setup\")\n .data(data)\n .build();\n\nUpsertResp upsertResp = client.upsert(upsertReq);\n\nSystem.out.println(JSONObject.toJSON(upsertResp));\n\n// Output:\n// {\"upsertCnt\": 10}\n","// 5. Upsert some data\ndata = [\n {id: 0, vector: [-0.619954382375778, 0.4479436794798608, -0.17493894838751745, -0.4248030059917294, -0.8648452746018911], color: \"black_9898\"},\n {id: 1, vector: [0.4762662251462588, -0.6942502138717026, -0.4490002642657902, -0.628696575798281, 0.9660395877041965], color: \"red_7319\"},\n {id: 2, vector: [-0.8864122635045097, 0.9260170474445351, 0.801326976181461, 0.6383943392381306, 0.7563037341572827], color: \"white_6465\"},\n {id: 3, vector: [0.14594326235891586, -0.3775407299900644, -0.3765479013078812, 0.20612075380355122, 0.4902678929632145], color: \"orange_7580\"},\n {id: 4, vector: [0.4548498669607359, -0.887610217681605, 0.5655081329910452, 0.19220509387904117, 0.016513983433433577], color: \"red_3314\"},\n {id: 5, vector: [0.11755001847051827, -0.7295149788999611, 0.2608115847524266, -0.1719167007897875, 0.7417611743754855], color: \"black_9955\"},\n {id: 6, vector: [0.9363032158314308, 0.030699901477745373, 0.8365910312319647, 0.7823840208444011, 0.2625222076909237], color: \"yellow_2461\"},\n {id: 7, vector: [0.0754823906014721, -0.6390658668265143, 0.5610517334334937, -0.8986261118798251, 0.9372056764266794], color: \"white_5015\"},\n {id: 8, vector: [-0.3038434006935904, 0.1279149203380523, 0.503958664270957, -0.2622661156746988, 0.7407627307791929], color: \"purple_6414\"},\n {id: 9, vector: [-0.7125086947677588, -0.8050968321012257, -0.32608864121785786, 0.3255654958645424, 0.26227968923834233], color: \"brown_7231\"}\n]\n\nres = await client.upsert({\n collection_name: \"quick_setup\",\n data: data,\n})\n\nconsole.log(res.upsert_cnt)\n\n// Output\n// \n// 10\n// \n","# 6. Upsert data in partitions\ndata=[\n {\"id\": 10, \"vector\": [0.06998888224297328, 0.8582816610326578, -0.9657938677934292, 0.6527905683627726, -0.8668460657158576], \"color\": \"black_3651\"},\n {\"id\": 11, \"vector\": [0.6060703043917468, -0.3765080534566074, -0.7710758854987239, 0.36993888322346136, 0.5507513364206531], \"color\": \"grey_2049\"},\n {\"id\": 12, \"vector\": [-0.9041813104515337, -0.9610546012461163, 0.20033003106083358, 0.11842506351635174, 0.8327356724591011], \"color\": \"blue_6168\"},\n {\"id\": 13, \"vector\": [0.3202914977909075, -0.7279137773695252, -0.04747830871620273, 0.8266053056909548, 0.8277957187455489], \"color\": \"blue_1672\"},\n {\"id\": 14, \"vector\": [0.2975811497890859, 0.2946936202691086, 0.5399463833894609, 0.8385334966677529, -0.4450543984655133], \"color\": \"pink_1601\"},\n {\"id\": 15, \"vector\": [-0.04697464305600074, -0.08509022265734134, 0.9067184632552001, -0.2281912685064822, -0.9747503428652762], \"color\": \"yellow_9925\"},\n {\"id\": 16, \"vector\": [-0.9363075919673911, -0.8153981031085669, 0.7943039120490902, -0.2093886809842529, 0.0771191335807897], \"color\": \"orange_9872\"},\n {\"id\": 17, \"vector\": [-0.050451522820639916, 0.18931572752321935, 0.7522886192190488, -0.9071793089474034, 0.6032647330692296], \"color\": \"red_6450\"},\n {\"id\": 18, \"vector\": [-0.9181544231141592, 0.6700755998126806, -0.014174674636136642, 0.6325780463623432, -0.49662222164032976], \"color\": \"purple_7392\"},\n {\"id\": 19, \"vector\": [0.11426945899602536, 0.6089190684002581, -0.5842735738352236, 0.057050610092692855, -0.035163433018196244], \"color\": \"pink_4996\"}\n]\n\nres = client.upsert(\n collection_name=\"quick_setup\",\n data=data,\n partition_name=\"partitionA\"\n)\n\nprint(res)\n\n# Output\n#\n# {\n# \"upsert_count\": 10\n# }\n","import io.milvus.v2.service.vector.request.UpsertReq;\nimport io.milvus.v2.service.vector.response.UpsertResp;\n\n// 6. Upsert data in parition\n\ndata = Arrays.asList(\n new JSONObject(Map.of(\"id\", 10L, \"vector\", Arrays.asList(0.06998888224297328f, 0.8582816610326578f, -0.9657938677934292f, 0.6527905683627726f, -0.8668460657158576f), \"color\", \"black_3651\")),\n new JSONObject(Map.of(\"id\", 11L, \"vector\", Arrays.asList(0.6060703043917468f, -0.3765080534566074f, -0.7710758854987239f, 0.36993888322346136f, 0.5507513364206531f), \"color\", \"grey_2049\")),\n new JSONObject(Map.of(\"id\", 12L, \"vector\", Arrays.asList(-0.9041813104515337f, -0.9610546012461163f, 0.20033003106083358f, 0.11842506351635174f, 0.8327356724591011f), \"color\", \"blue_6168\")),\n new JSONObject(Map.of(\"id\", 13L, \"vector\", Arrays.asList(0.3202914977909075f, -0.7279137773695252f, -0.04747830871620273f, 0.8266053056909548f, 0.8277957187455489f), \"color\", \"blue_1672\")),\n new JSONObject(Map.of(\"id\", 14L, \"vector\", Arrays.asList(0.2975811497890859f, 0.2946936202691086f, 0.5399463833894609f, 0.8385334966677529f, -0.4450543984655133f), \"color\", \"pink_1601\")),\n new JSONObject(Map.of(\"id\", 15L, \"vector\", Arrays.asList(-0.04697464305600074f, -0.08509022265734134f, 0.9067184632552001f, -0.2281912685064822f, -0.9747503428652762f), \"color\", \"yellow_9925\")),\n new JSONObject(Map.of(\"id\", 16L, \"vector\", Arrays.asList(-0.9363075919673911f, -0.8153981031085669f, 0.7943039120490902f, -0.2093886809842529f, 0.0771191335807897f), \"color\", \"orange_9872\")),\n new JSONObject(Map.of(\"id\", 17L, \"vector\", Arrays.asList(-0.050451522820639916f, 0.18931572752321935f, 0.7522886192190488f, -0.9071793089474034f, 0.6032647330692296f), \"color\", \"red_6450\")),\n new JSONObject(Map.of(\"id\", 18L, \"vector\", Arrays.asList(-0.9181544231141592f, 0.6700755998126806f, -0.014174674636136642f, 0.6325780463623432f, -0.49662222164032976f), \"color\", \"purple_7392\")),\n new JSONObject(Map.of(\"id\", 19L, \"vector\", Arrays.asList(0.11426945899602536f, 0.6089190684002581f, -0.5842735738352236f, 0.057050610092692855f, -0.035163433018196244f), \"color\", \"pink_4996\"))\n);\n\nupsertReq = UpsertReq.builder()\n .collectionName(\"quick_setup\")\n .data(data)\n .partitionName(\"partitionA\")\n .build();\n\nupsertResp = client.upsert(upsertReq);\n\nSystem.out.println(JSONObject.toJSON(upsertResp));\n\n// Output:\n// {\"upsertCnt\": 10}\n","// 6. Upsert data in partitions\ndata = [\n {id: 10, vector: [0.06998888224297328, 0.8582816610326578, -0.9657938677934292, 0.6527905683627726, -0.8668460657158576], color: \"black_3651\"},\n {id: 11, vector: [0.6060703043917468, -0.3765080534566074, -0.7710758854987239, 0.36993888322346136, 0.5507513364206531], color: \"grey_2049\"},\n {id: 12, vector: [-0.9041813104515337, -0.9610546012461163, 0.20033003106083358, 0.11842506351635174, 0.8327356724591011], color: \"blue_6168\"},\n {id: 13, vector: [0.3202914977909075, -0.7279137773695252, -0.04747830871620273, 0.8266053056909548, 0.8277957187455489], color: \"blue_1672\"},\n {id: 14, vector: [0.2975811497890859, 0.2946936202691086, 0.5399463833894609, 0.8385334966677529, -0.4450543984655133], color: \"pink_1601\"},\n {id: 15, vector: [-0.04697464305600074, -0.08509022265734134, 0.9067184632552001, -0.2281912685064822, -0.9747503428652762], color: \"yellow_9925\"},\n {id: 16, vector: [-0.9363075919673911, -0.8153981031085669, 0.7943039120490902, -0.2093886809842529, 0.0771191335807897], color: \"orange_9872\"},\n {id: 17, vector: [-0.050451522820639916, 0.18931572752321935, 0.7522886192190488, -0.9071793089474034, 0.6032647330692296], color: \"red_6450\"},\n {id: 18, vector: [-0.9181544231141592, 0.6700755998126806, -0.014174674636136642, 0.6325780463623432, -0.49662222164032976], color: \"purple_7392\"},\n {id: 19, vector: [0.11426945899602536, 0.6089190684002581, -0.5842735738352236, 0.057050610092692855, -0.035163433018196244], color: \"pink_4996\"}\n]\n\nres = await client.upsert({\n collection_name: \"quick_setup\",\n data: data,\n partition_name: \"partitionA\"\n})\n\nconsole.log(res.upsert_cnt)\n\n// Output\n// \n// 10\n// \n","# 7. Delete entities\nres = client.delete(\n collection_name=\"quick_setup\",\n filter=\"id in [4,5,6]\"\n)\n\nprint(res)\n\n# Output\n#\n# {\n# \"delete_count\": 3\n# }\n","import io.milvus.v2.service.vector.request.DeleteReq;\nimport io.milvus.v2.service.vector.response.DeleteResp;\n\n\n// 7. Delete entities\n\nDeleteReq deleteReq = DeleteReq.builder()\n .collectionName(\"quick_setup\")\n .filter(\"id in [4, 5, 6]\")\n .build();\n\nDeleteResp deleteResp = client.delete(deleteReq);\n\nSystem.out.println(JSONObject.toJSON(deleteResp));\n\n// Output:\n// {\"deleteCnt\": 3}\n","// 7. Delete entities\nres = await client.delete({\n collection_name: \"quick_setup\",\n filter: \"id in [4,5,6]\"\n})\n\nconsole.log(res.delete_cnt)\n\n// Output\n// \n// 3\n// \n","res = client.delete(\n collection_name=\"quick_setup\",\n ids=[18, 19],\n partition_name=\"partitionA\"\n)\n\nprint(res)\n\n# Output\n#\n# {\n# \"delete_count\": 2\n# }\n","deleteReq = DeleteReq.builder()\n .collectionName(\"quick_setup\")\n .ids(Arrays.asList(18L, 19L))\n .partitionName(\"partitionA\")\n .build();\n\ndeleteResp = client.delete(deleteReq);\n\nSystem.out.println(JSONObject.toJSON(deleteResp));\n\n// Output:\n// {\"deleteCnt\": 2}\n","res = await client.delete({\n collection_name: \"quick_setup\",\n ids: [18, 19],\n partition_name: \"partitionA\"\n})\n\nconsole.log(res.delete_cnt)\n\n// Output\n// \n// 2\n// \n","res = client.delete(\ncollection_name='quick_setup',\npartition_name='partitionA',\nfilter='color like \"blue%\"'\n)\n\nprint(\"Entities deleted from partitionA: \", res['delete_count'])\n\n# Output:\n# Entities deleted from partitionA: 3\n","deleteReq = DeleteReq.builder()\n .collectionName(\"quick_setup\")\n .filter('color like \"blue%\"')\n .partitionName(\"partitionA\")\n .build();\n\ndeleteResp = client.delete(deleteReq);\n\nSystem.out.println(JSONObject.toJSON(deleteResp));\n\n// Output:\n// {\"deleteCnt\": 3}\n","res = await client.delete({\ncollection_name: \"quick_setup\",\npartition_name: \"partitionA\",\nfilter: 'color like \"blue%\"'\n})\n\nconsole.log(\"Entities deleted from partitionA: \" + res.delete_cnt)\n\n// Output:\n// Entities deleted from partitionA: 3\n"],"headingContent":"","anchorList":[{"label":"插入、上插和删除","href":"Insert-Upsert--Delete","type":1,"isActive":false},{"label":"开始之前","href":"Before-you-start","type":2,"isActive":false},{"label":"概述","href":"Overview","type":2,"isActive":false},{"label":"准备工作","href":"Preparations","type":2,"isActive":false},{"label":"插入实体","href":"Insert-entities","type":2,"isActive":false},{"label":"倒插实体","href":"Upsert-entities","type":2,"isActive":false},{"label":"删除实体","href":"Delete-entities","type":2,"isActive":false}]} \ No newline at end of file +{"codeList":["from pymilvus import MilvusClient\n\n# 1. Set up a Milvus client\nclient = MilvusClient(\n uri=\"http://localhost:19530\"\n)\n\n# 2. Create a collection\nclient.create_collection(\n collection_name=\"quick_setup\",\n dimension=5,\n metric_type=\"IP\"\n)\n","import io.milvus.v2.client.ConnectConfig;\nimport io.milvus.v2.client.MilvusClientV2;\nimport io.milvus.v2.service.collection.request.CreateCollectionReq;\n\nString CLUSTER_ENDPOINT = \"http://localhost:19530\";\n\n// 1. Connect to Milvus server\nConnectConfig connectConfig = ConnectConfig.builder()\n .uri(CLUSTER_ENDPOINT)\n .build();\n\nMilvusClientV2 client = new MilvusClientV2(connectConfig);\n\n// 2. Create a collection in quick setup mode\nCreateCollectionReq quickSetupReq = CreateCollectionReq.builder()\n .collectionName(\"quick_setup\")\n .dimension(5)\n .metricType(\"IP\")\n .build();\n\nclient.createCollection(quickSetupReq);\n","const { MilvusClient, DataType, sleep } = require(\"@zilliz/milvus2-sdk-node\")\n\nconst address = \"http://localhost:19530\"\n\n// 1. Set up a Milvus Client\nclient = new MilvusClient({address});\n\n// 2. Create a collection in quick setup mode\nawait client.createCollection({\n collection_name: \"quick_setup\",\n dimension: 5,\n metric_type: \"IP\"\n}); \n","# 3. Insert some data\ndata=[\n {\"id\": 0, \"vector\": [0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592], \"color\": \"pink_8682\"},\n {\"id\": 1, \"vector\": [0.19886812562848388, 0.06023560599112088, 0.6976963061752597, 0.2614474506242501, 0.838729485096104], \"color\": \"red_7025\"},\n {\"id\": 2, \"vector\": [0.43742130801983836, -0.5597502546264526, 0.6457887650909682, 0.7894058910881185, 0.20785793220625592], \"color\": \"orange_6781\"},\n {\"id\": 3, \"vector\": [0.3172005263489739, 0.9719044792798428, -0.36981146090600725, -0.4860894583077995, 0.95791889146345], \"color\": \"pink_9298\"},\n {\"id\": 4, \"vector\": [0.4452349528804562, -0.8757026943054742, 0.8220779437047674, 0.46406290649483184, 0.30337481143159106], \"color\": \"red_4794\"},\n {\"id\": 5, \"vector\": [0.985825131989184, -0.8144651566660419, 0.6299267002202009, 0.1206906911183383, -0.1446277761879955], \"color\": \"yellow_4222\"},\n {\"id\": 6, \"vector\": [0.8371977790571115, -0.015764369584852833, -0.31062937026679327, -0.562666951622192, -0.8984947637863987], \"color\": \"red_9392\"},\n {\"id\": 7, \"vector\": [-0.33445148015177995, -0.2567135004164067, 0.8987539745369246, 0.9402995886420709, 0.5378064918413052], \"color\": \"grey_8510\"},\n {\"id\": 8, \"vector\": [0.39524717779832685, 0.4000257286739164, -0.5890507376891594, -0.8650502298996872, -0.6140360785406336], \"color\": \"white_9381\"},\n {\"id\": 9, \"vector\": [0.5718280481994695, 0.24070317428066512, -0.3737913482606834, -0.06726932177492717, -0.6980531615588608], \"color\": \"purple_4976\"}\n]\n\nres = client.insert(\n collection_name=\"quick_setup\",\n data=data\n)\n\nprint(res)\n\n# Output\n#\n# {\n# \"insert_count\": 10,\n# \"ids\": [\n# 0,\n# 1,\n# 2,\n# 3,\n# 4,\n# 5,\n# 6,\n# 7,\n# 8,\n# 9\n# ]\n# }\n","import java.util.Arrays;\nimport java.util.List;\nimport java.util.Map;\nimport io.milvus.v2.service.vector.request.InsertReq;\nimport io.milvus.v2.service.vector.response.InsertResp;\n\n// 3. Insert some data\nList data = Arrays.asList(\n new JSONObject(Map.of(\"id\", 0L, \"vector\", Arrays.asList(0.3580376395471989f, -0.6023495712049978f, 0.18414012509913835f, -0.26286205330961354f, 0.9029438446296592f), \"color\", \"pink_8682\")),\n new JSONObject(Map.of(\"id\", 1L, \"vector\", Arrays.asList(0.19886812562848388f, 0.06023560599112088f, 0.6976963061752597f, 0.2614474506242501f, 0.838729485096104f), \"color\", \"red_7025\")),\n new JSONObject(Map.of(\"id\", 2L, \"vector\", Arrays.asList(0.43742130801983836f, -0.5597502546264526f, 0.6457887650909682f, 0.7894058910881185f, 0.20785793220625592f), \"color\", \"orange_6781\")),\n new JSONObject(Map.of(\"id\", 3L, \"vector\", Arrays.asList(0.3172005263489739f, 0.9719044792798428f, -0.36981146090600725f, -0.4860894583077995f, 0.95791889146345f), \"color\", \"pink_9298\")),\n new JSONObject(Map.of(\"id\", 4L, \"vector\", Arrays.asList(0.4452349528804562f, -0.8757026943054742f, 0.8220779437047674f, 0.46406290649483184f, 0.30337481143159106f), \"color\", \"red_4794\")),\n new JSONObject(Map.of(\"id\", 5L, \"vector\", Arrays.asList(0.985825131989184f, -0.8144651566660419f, 0.6299267002202009f, 0.1206906911183383f, -0.1446277761879955f), \"color\", \"yellow_4222\")),\n new JSONObject(Map.of(\"id\", 6L, \"vector\", Arrays.asList(0.8371977790571115f, -0.015764369584852833f, -0.31062937026679327f, -0.562666951622192f, -0.8984947637863987f), \"color\", \"red_9392\")),\n new JSONObject(Map.of(\"id\", 7L, \"vector\", Arrays.asList(-0.33445148015177995f, -0.2567135004164067f, 0.8987539745369246f, 0.9402995886420709f, 0.5378064918413052f), \"color\", \"grey_8510\")),\n new JSONObject(Map.of(\"id\", 8L, \"vector\", Arrays.asList(0.39524717779832685f, 0.4000257286739164f, -0.5890507376891594f, -0.8650502298996872f, -0.6140360785406336f), \"color\", \"white_9381\")),\n new JSONObject(Map.of(\"id\", 9L, \"vector\", Arrays.asList(0.5718280481994695f, 0.24070317428066512f, -0.3737913482606834f, -0.06726932177492717f, -0.6980531615588608f), \"color\", \"purple_4976\"))\n);\n\nInsertReq insertReq = InsertReq.builder()\n .collectionName(\"quick_setup\")\n .data(data)\n .build();\n\nInsertResp insertResp = client.insert(insertReq);\n\nSystem.out.println(JSONObject.toJSON(insertResp));\n\n// Output:\n// {\"insertCnt\": 10}\n","// 3. Insert some data\n\nvar data = [\n {id: 0, vector: [0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592], color: \"pink_8682\"},\n {id: 1, vector: [0.19886812562848388, 0.06023560599112088, 0.6976963061752597, 0.2614474506242501, 0.838729485096104], color: \"red_7025\"},\n {id: 2, vector: [0.43742130801983836, -0.5597502546264526, 0.6457887650909682, 0.7894058910881185, 0.20785793220625592], color: \"orange_6781\"},\n {id: 3, vector: [0.3172005263489739, 0.9719044792798428, -0.36981146090600725, -0.4860894583077995, 0.95791889146345], color: \"pink_9298\"},\n {id: 4, vector: [0.4452349528804562, -0.8757026943054742, 0.8220779437047674, 0.46406290649483184, 0.30337481143159106], color: \"red_4794\"},\n {id: 5, vector: [0.985825131989184, -0.8144651566660419, 0.6299267002202009, 0.1206906911183383, -0.1446277761879955], color: \"yellow_4222\"},\n {id: 6, vector: [0.8371977790571115, -0.015764369584852833, -0.31062937026679327, -0.562666951622192, -0.8984947637863987], color: \"red_9392\"},\n {id: 7, vector: [-0.33445148015177995, -0.2567135004164067, 0.8987539745369246, 0.9402995886420709, 0.5378064918413052], color: \"grey_8510\"},\n {id: 8, vector: [0.39524717779832685, 0.4000257286739164, -0.5890507376891594, -0.8650502298996872, -0.6140360785406336], color: \"white_9381\"},\n {id: 9, vector: [0.5718280481994695, 0.24070317428066512, -0.3737913482606834, -0.06726932177492717, -0.6980531615588608], color: \"purple_4976\"} \n]\n\nvar res = await client.insert({\n collection_name: \"quick_setup\",\n data: data,\n})\n\nconsole.log(res.insert_cnt)\n\n// Output\n// \n// 10\n// \n","# 4. Insert some more data into a specific partition\ndata=[\n {\"id\": 10, \"vector\": [-0.5570353903748935, -0.8997887893201304, -0.7123782431855732, -0.6298990746450119, 0.6699215060604258], \"color\": \"red_1202\"},\n {\"id\": 11, \"vector\": [0.6319019033373907, 0.6821488267878275, 0.8552303045704168, 0.36929791364943054, -0.14152860714878068], \"color\": \"blue_4150\"},\n {\"id\": 12, \"vector\": [0.9483947484855766, -0.32294203351925344, 0.9759290319978025, 0.8262982148666174, -0.8351194181285713], \"color\": \"orange_4590\"},\n {\"id\": 13, \"vector\": [-0.5449109892498731, 0.043511240563786524, -0.25105249484790804, -0.012030655265886425, -0.0010987671273892108], \"color\": \"pink_9619\"},\n {\"id\": 14, \"vector\": [0.6603339372951424, -0.10866551787442225, -0.9435597754324891, 0.8230244263466688, -0.7986720938400362], \"color\": \"orange_4863\"},\n {\"id\": 15, \"vector\": [-0.8825129181091456, -0.9204557711667729, -0.935350065513425, 0.5484069690287079, 0.24448151140671204], \"color\": \"orange_7984\"},\n {\"id\": 16, \"vector\": [0.6285586391568163, 0.5389064528263487, -0.3163366239905099, 0.22036279378888013, 0.15077052220816167], \"color\": \"blue_9010\"},\n {\"id\": 17, \"vector\": [-0.20151825016059233, -0.905239387635804, 0.6749305353372479, -0.7324272081377843, -0.33007998971889263], \"color\": \"blue_4521\"},\n {\"id\": 18, \"vector\": [0.2432286610792349, 0.01785636564206139, -0.651356982731391, -0.35848148851027895, -0.7387383128324057], \"color\": \"orange_2529\"},\n {\"id\": 19, \"vector\": [0.055512329053363674, 0.7100266349039421, 0.4956956543575197, 0.24541352586717702, 0.4209030729923515], \"color\": \"red_9437\"}\n]\n\nclient.create_partition(\n collection_name=\"quick_setup\",\n partition_name=\"partitionA\"\n)\n\nres = client.insert(\n collection_name=\"quick_setup\",\n data=data,\n partition_name=\"partitionA\"\n)\n\nprint(res)\n\n# Output\n#\n# {\n# \"insert_count\": 10,\n# \"ids\": [\n# 10,\n# 11,\n# 12,\n# 13,\n# 14,\n# 15,\n# 16,\n# 17,\n# 18,\n# 19\n# ]\n# }\n","// 4. Insert some more data into a specific partition\ndata = Arrays.asList(\n new JSONObject(Map.of(\"id\", 10L, \"vector\", Arrays.asList(-0.5570353903748935f, -0.8997887893201304f, -0.7123782431855732f, -0.6298990746450119f, 0.6699215060604258f), \"color\", \"red_1202\")),\n new JSONObject(Map.of(\"id\", 11L, \"vector\", Arrays.asList(0.6319019033373907f, 0.6821488267878275f, 0.8552303045704168f, 0.36929791364943054f, -0.14152860714878068f), \"color\", \"blue_4150\")),\n new JSONObject(Map.of(\"id\", 12L, \"vector\", Arrays.asList(0.9483947484855766f, -0.32294203351925344f, 0.9759290319978025f, 0.8262982148666174f, -0.8351194181285713f), \"color\", \"orange_4590\")),\n new JSONObject(Map.of(\"id\", 13L, \"vector\", Arrays.asList(-0.5449109892498731f, 0.043511240563786524f, -0.25105249484790804f, -0.012030655265886425f, -0.0010987671273892108f), \"color\", \"pink_9619\")),\n new JSONObject(Map.of(\"id\", 14L, \"vector\", Arrays.asList(0.6603339372951424f, -0.10866551787442225f, -0.9435597754324891f, 0.8230244263466688f, -0.7986720938400362f), \"color\", \"orange_4863\")),\n new JSONObject(Map.of(\"id\", 15L, \"vector\", Arrays.asList(-0.8825129181091456f, -0.9204557711667729f, -0.935350065513425f, 0.5484069690287079f, 0.24448151140671204f), \"color\", \"orange_7984\")),\n new JSONObject(Map.of(\"id\", 16L, \"vector\", Arrays.asList(0.6285586391568163f, 0.5389064528263487f, -0.3163366239905099f, 0.22036279378888013f, 0.15077052220816167f), \"color\", \"blue_9010\")),\n new JSONObject(Map.of(\"id\", 17L, \"vector\", Arrays.asList(-0.20151825016059233f, -0.905239387635804f, 0.6749305353372479f, -0.7324272081377843f, -0.33007998971889263f), \"color\", \"blue_4521\")),\n new JSONObject(Map.of(\"id\", 18L, \"vector\", Arrays.asList(0.2432286610792349f, 0.01785636564206139f, -0.651356982731391f, -0.35848148851027895f, -0.7387383128324057f), \"color\", \"orange_2529\")),\n new JSONObject(Map.of(\"id\", 19L, \"vector\", Arrays.asList(0.055512329053363674f, 0.7100266349039421f, 0.4956956543575197f, 0.24541352586717702f, 0.4209030729923515f), \"color\", \"red_9437\"))\n);\n\nCreatePartitionReq createPartitionReq = CreatePartitionReq.builder()\n .collectionName(\"quick_setup\")\n .partitionName(\"partitionA\")\n .build();\n\nclient.createPartition(createPartitionReq);\n\ninsertReq = InsertReq.builder()\n .collectionName(\"quick_setup\")\n .data(data)\n .partitionName(\"partitionA\")\n .build();\n\ninsertResp = client.insert(insertReq);\n\nSystem.out.println(JSONObject.toJSON(insertResp));\n\n// Output:\n// {\"insertCnt\": 10}\n","// 4. Insert some more data into a specific partition\ndata = [\n {id: 10, vector: [-0.5570353903748935, -0.8997887893201304, -0.7123782431855732, -0.6298990746450119, 0.6699215060604258], color: \"red_1202\"},\n {id: 11, vector: [0.6319019033373907, 0.6821488267878275, 0.8552303045704168, 0.36929791364943054, -0.14152860714878068], color: \"blue_4150\"},\n {id: 12, vector: [0.9483947484855766, -0.32294203351925344, 0.9759290319978025, 0.8262982148666174, -0.8351194181285713], color: \"orange_4590\"},\n {id: 13, vector: [-0.5449109892498731, 0.043511240563786524, -0.25105249484790804, -0.012030655265886425, -0.0010987671273892108], color: \"pink_9619\"},\n {id: 14, vector: [0.6603339372951424, -0.10866551787442225, -0.9435597754324891, 0.8230244263466688, -0.7986720938400362], color: \"orange_4863\"},\n {id: 15, vector: [-0.8825129181091456, -0.9204557711667729, -0.935350065513425, 0.5484069690287079, 0.24448151140671204], color: \"orange_7984\"},\n {id: 16, vector: [0.6285586391568163, 0.5389064528263487, -0.3163366239905099, 0.22036279378888013, 0.15077052220816167], color: \"blue_9010\"},\n {id: 17, vector: [-0.20151825016059233, -0.905239387635804, 0.6749305353372479, -0.7324272081377843, -0.33007998971889263], color: \"blue_4521\"},\n {id: 18, vector: [0.2432286610792349, 0.01785636564206139, -0.651356982731391, -0.35848148851027895, -0.7387383128324057], color: \"orange_2529\"},\n {id: 19, vector: [0.055512329053363674, 0.7100266349039421, 0.4956956543575197, 0.24541352586717702, 0.4209030729923515], color: \"red_9437\"}\n]\n\nawait client.createPartition({\n collection_name: \"quick_setup\",\n partition_name: \"partitionA\"\n})\n\nres = await client.insert({\n collection_name: \"quick_setup\",\n data: data,\n partition_name: \"partitionA\"\n})\n\nconsole.log(res.insert_cnt)\n\n// Output\n// \n// 10\n// \n","# 5. Upsert some data\ndata=[\n {\"id\": 0, \"vector\": [-0.619954382375778, 0.4479436794798608, -0.17493894838751745, -0.4248030059917294, -0.8648452746018911], \"color\": \"black_9898\"},\n {\"id\": 1, \"vector\": [0.4762662251462588, -0.6942502138717026, -0.4490002642657902, -0.628696575798281, 0.9660395877041965], \"color\": \"red_7319\"},\n {\"id\": 2, \"vector\": [-0.8864122635045097, 0.9260170474445351, 0.801326976181461, 0.6383943392381306, 0.7563037341572827], \"color\": \"white_6465\"},\n {\"id\": 3, \"vector\": [0.14594326235891586, -0.3775407299900644, -0.3765479013078812, 0.20612075380355122, 0.4902678929632145], \"color\": \"orange_7580\"},\n {\"id\": 4, \"vector\": [0.4548498669607359, -0.887610217681605, 0.5655081329910452, 0.19220509387904117, 0.016513983433433577], \"color\": \"red_3314\"},\n {\"id\": 5, \"vector\": [0.11755001847051827, -0.7295149788999611, 0.2608115847524266, -0.1719167007897875, 0.7417611743754855], \"color\": \"black_9955\"},\n {\"id\": 6, \"vector\": [0.9363032158314308, 0.030699901477745373, 0.8365910312319647, 0.7823840208444011, 0.2625222076909237], \"color\": \"yellow_2461\"},\n {\"id\": 7, \"vector\": [0.0754823906014721, -0.6390658668265143, 0.5610517334334937, -0.8986261118798251, 0.9372056764266794], \"color\": \"white_5015\"},\n {\"id\": 8, \"vector\": [-0.3038434006935904, 0.1279149203380523, 0.503958664270957, -0.2622661156746988, 0.7407627307791929], \"color\": \"purple_6414\"},\n {\"id\": 9, \"vector\": [-0.7125086947677588, -0.8050968321012257, -0.32608864121785786, 0.3255654958645424, 0.26227968923834233], \"color\": \"brown_7231\"}\n]\n\nres = client.upsert(\n collection_name='quick_setup',\n data=data\n)\n\nprint(res)\n\n# Output\n#\n# {\n# \"upsert_count\": 10\n# }\n","// 5. Upsert some data\ndata = Arrays.asList(\n new JSONObject(Map.of(\"id\", 0L, \"vector\", Arrays.asList(-0.619954382375778f, 0.4479436794798608f, -0.17493894838751745f, -0.4248030059917294f, -0.8648452746018911f), \"color\", \"black_9898\")),\n new JSONObject(Map.of(\"id\", 1L, \"vector\", Arrays.asList(0.4762662251462588f, -0.6942502138717026f, -0.4490002642657902f, -0.628696575798281f, 0.9660395877041965f), \"color\", \"red_7319\")),\n new JSONObject(Map.of(\"id\", 2L, \"vector\", Arrays.asList(-0.8864122635045097f, 0.9260170474445351f, 0.801326976181461f, 0.6383943392381306f, 0.7563037341572827f), \"color\", \"white_6465\")),\n new JSONObject(Map.of(\"id\", 3L, \"vector\", Arrays.asList(0.14594326235891586f, -0.3775407299900644f, -0.3765479013078812f, 0.20612075380355122f, 0.4902678929632145f), \"color\", \"orange_7580\")),\n new JSONObject(Map.of(\"id\", 4L, \"vector\", Arrays.asList(0.4548498669607359f, -0.887610217681605f, 0.5655081329910452f, 0.19220509387904117f, 0.016513983433433577f), \"color\", \"red_3314\")),\n new JSONObject(Map.of(\"id\", 5L, \"vector\", Arrays.asList(0.11755001847051827f, -0.7295149788999611f, 0.2608115847524266f, -0.1719167007897875f, 0.7417611743754855f), \"color\", \"black_9955\")),\n new JSONObject(Map.of(\"id\", 6L, \"vector\", Arrays.asList(0.9363032158314308f, 0.030699901477745373f, 0.8365910312319647f, 0.7823840208444011f, 0.2625222076909237f), \"color\", \"yellow_2461\")),\n new JSONObject(Map.of(\"id\", 7L, \"vector\", Arrays.asList(0.0754823906014721f, -0.6390658668265143f, 0.5610517334334937f, -0.8986261118798251f, 0.9372056764266794f), \"color\", \"white_5015\")),\n new JSONObject(Map.of(\"id\", 8L, \"vector\", Arrays.asList(-0.3038434006935904f, 0.1279149203380523f, 0.503958664270957f, -0.2622661156746988f, 0.7407627307791929f), \"color\", \"purple_6414\")),\n new JSONObject(Map.of(\"id\", 9L, \"vector\", Arrays.asList(-0.7125086947677588f, -0.8050968321012257f, -0.32608864121785786f, 0.3255654958645424f, 0.26227968923834233f), \"color\", \"brown_7231\"))\n);\n\nUpsertReq upsertReq = UpsertReq.builder()\n .collectionName(\"quick_setup\")\n .data(data)\n .build();\n\nUpsertResp upsertResp = client.upsert(upsertReq);\n\nSystem.out.println(JSONObject.toJSON(upsertResp));\n\n// Output:\n// {\"upsertCnt\": 10}\n","// 5. Upsert some data\ndata = [\n {id: 0, vector: [-0.619954382375778, 0.4479436794798608, -0.17493894838751745, -0.4248030059917294, -0.8648452746018911], color: \"black_9898\"},\n {id: 1, vector: [0.4762662251462588, -0.6942502138717026, -0.4490002642657902, -0.628696575798281, 0.9660395877041965], color: \"red_7319\"},\n {id: 2, vector: [-0.8864122635045097, 0.9260170474445351, 0.801326976181461, 0.6383943392381306, 0.7563037341572827], color: \"white_6465\"},\n {id: 3, vector: [0.14594326235891586, -0.3775407299900644, -0.3765479013078812, 0.20612075380355122, 0.4902678929632145], color: \"orange_7580\"},\n {id: 4, vector: [0.4548498669607359, -0.887610217681605, 0.5655081329910452, 0.19220509387904117, 0.016513983433433577], color: \"red_3314\"},\n {id: 5, vector: [0.11755001847051827, -0.7295149788999611, 0.2608115847524266, -0.1719167007897875, 0.7417611743754855], color: \"black_9955\"},\n {id: 6, vector: [0.9363032158314308, 0.030699901477745373, 0.8365910312319647, 0.7823840208444011, 0.2625222076909237], color: \"yellow_2461\"},\n {id: 7, vector: [0.0754823906014721, -0.6390658668265143, 0.5610517334334937, -0.8986261118798251, 0.9372056764266794], color: \"white_5015\"},\n {id: 8, vector: [-0.3038434006935904, 0.1279149203380523, 0.503958664270957, -0.2622661156746988, 0.7407627307791929], color: \"purple_6414\"},\n {id: 9, vector: [-0.7125086947677588, -0.8050968321012257, -0.32608864121785786, 0.3255654958645424, 0.26227968923834233], color: \"brown_7231\"}\n]\n\nres = await client.upsert({\n collection_name: \"quick_setup\",\n data: data,\n})\n\nconsole.log(res.upsert_cnt)\n\n// Output\n// \n// 10\n// \n","# 6. Upsert data in partitions\ndata=[\n {\"id\": 10, \"vector\": [0.06998888224297328, 0.8582816610326578, -0.9657938677934292, 0.6527905683627726, -0.8668460657158576], \"color\": \"black_3651\"},\n {\"id\": 11, \"vector\": [0.6060703043917468, -0.3765080534566074, -0.7710758854987239, 0.36993888322346136, 0.5507513364206531], \"color\": \"grey_2049\"},\n {\"id\": 12, \"vector\": [-0.9041813104515337, -0.9610546012461163, 0.20033003106083358, 0.11842506351635174, 0.8327356724591011], \"color\": \"blue_6168\"},\n {\"id\": 13, \"vector\": [0.3202914977909075, -0.7279137773695252, -0.04747830871620273, 0.8266053056909548, 0.8277957187455489], \"color\": \"blue_1672\"},\n {\"id\": 14, \"vector\": [0.2975811497890859, 0.2946936202691086, 0.5399463833894609, 0.8385334966677529, -0.4450543984655133], \"color\": \"pink_1601\"},\n {\"id\": 15, \"vector\": [-0.04697464305600074, -0.08509022265734134, 0.9067184632552001, -0.2281912685064822, -0.9747503428652762], \"color\": \"yellow_9925\"},\n {\"id\": 16, \"vector\": [-0.9363075919673911, -0.8153981031085669, 0.7943039120490902, -0.2093886809842529, 0.0771191335807897], \"color\": \"orange_9872\"},\n {\"id\": 17, \"vector\": [-0.050451522820639916, 0.18931572752321935, 0.7522886192190488, -0.9071793089474034, 0.6032647330692296], \"color\": \"red_6450\"},\n {\"id\": 18, \"vector\": [-0.9181544231141592, 0.6700755998126806, -0.014174674636136642, 0.6325780463623432, -0.49662222164032976], \"color\": \"purple_7392\"},\n {\"id\": 19, \"vector\": [0.11426945899602536, 0.6089190684002581, -0.5842735738352236, 0.057050610092692855, -0.035163433018196244], \"color\": \"pink_4996\"}\n]\n\nres = client.upsert(\n collection_name=\"quick_setup\",\n data=data,\n partition_name=\"partitionA\"\n)\n\nprint(res)\n\n# Output\n#\n# {\n# \"upsert_count\": 10\n# }\n","import io.milvus.v2.service.vector.request.UpsertReq;\nimport io.milvus.v2.service.vector.response.UpsertResp;\n\n// 6. Upsert data in parition\n\ndata = Arrays.asList(\n new JSONObject(Map.of(\"id\", 10L, \"vector\", Arrays.asList(0.06998888224297328f, 0.8582816610326578f, -0.9657938677934292f, 0.6527905683627726f, -0.8668460657158576f), \"color\", \"black_3651\")),\n new JSONObject(Map.of(\"id\", 11L, \"vector\", Arrays.asList(0.6060703043917468f, -0.3765080534566074f, -0.7710758854987239f, 0.36993888322346136f, 0.5507513364206531f), \"color\", \"grey_2049\")),\n new JSONObject(Map.of(\"id\", 12L, \"vector\", Arrays.asList(-0.9041813104515337f, -0.9610546012461163f, 0.20033003106083358f, 0.11842506351635174f, 0.8327356724591011f), \"color\", \"blue_6168\")),\n new JSONObject(Map.of(\"id\", 13L, \"vector\", Arrays.asList(0.3202914977909075f, -0.7279137773695252f, -0.04747830871620273f, 0.8266053056909548f, 0.8277957187455489f), \"color\", \"blue_1672\")),\n new JSONObject(Map.of(\"id\", 14L, \"vector\", Arrays.asList(0.2975811497890859f, 0.2946936202691086f, 0.5399463833894609f, 0.8385334966677529f, -0.4450543984655133f), \"color\", \"pink_1601\")),\n new JSONObject(Map.of(\"id\", 15L, \"vector\", Arrays.asList(-0.04697464305600074f, -0.08509022265734134f, 0.9067184632552001f, -0.2281912685064822f, -0.9747503428652762f), \"color\", \"yellow_9925\")),\n new JSONObject(Map.of(\"id\", 16L, \"vector\", Arrays.asList(-0.9363075919673911f, -0.8153981031085669f, 0.7943039120490902f, -0.2093886809842529f, 0.0771191335807897f), \"color\", \"orange_9872\")),\n new JSONObject(Map.of(\"id\", 17L, \"vector\", Arrays.asList(-0.050451522820639916f, 0.18931572752321935f, 0.7522886192190488f, -0.9071793089474034f, 0.6032647330692296f), \"color\", \"red_6450\")),\n new JSONObject(Map.of(\"id\", 18L, \"vector\", Arrays.asList(-0.9181544231141592f, 0.6700755998126806f, -0.014174674636136642f, 0.6325780463623432f, -0.49662222164032976f), \"color\", \"purple_7392\")),\n new JSONObject(Map.of(\"id\", 19L, \"vector\", Arrays.asList(0.11426945899602536f, 0.6089190684002581f, -0.5842735738352236f, 0.057050610092692855f, -0.035163433018196244f), \"color\", \"pink_4996\"))\n);\n\nupsertReq = UpsertReq.builder()\n .collectionName(\"quick_setup\")\n .data(data)\n .partitionName(\"partitionA\")\n .build();\n\nupsertResp = client.upsert(upsertReq);\n\nSystem.out.println(JSONObject.toJSON(upsertResp));\n\n// Output:\n// {\"upsertCnt\": 10}\n","// 6. Upsert data in partitions\ndata = [\n {id: 10, vector: [0.06998888224297328, 0.8582816610326578, -0.9657938677934292, 0.6527905683627726, -0.8668460657158576], color: \"black_3651\"},\n {id: 11, vector: [0.6060703043917468, -0.3765080534566074, -0.7710758854987239, 0.36993888322346136, 0.5507513364206531], color: \"grey_2049\"},\n {id: 12, vector: [-0.9041813104515337, -0.9610546012461163, 0.20033003106083358, 0.11842506351635174, 0.8327356724591011], color: \"blue_6168\"},\n {id: 13, vector: [0.3202914977909075, -0.7279137773695252, -0.04747830871620273, 0.8266053056909548, 0.8277957187455489], color: \"blue_1672\"},\n {id: 14, vector: [0.2975811497890859, 0.2946936202691086, 0.5399463833894609, 0.8385334966677529, -0.4450543984655133], color: \"pink_1601\"},\n {id: 15, vector: [-0.04697464305600074, -0.08509022265734134, 0.9067184632552001, -0.2281912685064822, -0.9747503428652762], color: \"yellow_9925\"},\n {id: 16, vector: [-0.9363075919673911, -0.8153981031085669, 0.7943039120490902, -0.2093886809842529, 0.0771191335807897], color: \"orange_9872\"},\n {id: 17, vector: [-0.050451522820639916, 0.18931572752321935, 0.7522886192190488, -0.9071793089474034, 0.6032647330692296], color: \"red_6450\"},\n {id: 18, vector: [-0.9181544231141592, 0.6700755998126806, -0.014174674636136642, 0.6325780463623432, -0.49662222164032976], color: \"purple_7392\"},\n {id: 19, vector: [0.11426945899602536, 0.6089190684002581, -0.5842735738352236, 0.057050610092692855, -0.035163433018196244], color: \"pink_4996\"}\n]\n\nres = await client.upsert({\n collection_name: \"quick_setup\",\n data: data,\n partition_name: \"partitionA\"\n})\n\nconsole.log(res.upsert_cnt)\n\n// Output\n// \n// 10\n// \n","# 7. Delete entities\nres = client.delete(\n collection_name=\"quick_setup\",\n filter=\"id in [4,5,6]\"\n)\n\nprint(res)\n\n# Output\n#\n# {\n# \"delete_count\": 3\n# }\n","import io.milvus.v2.service.vector.request.DeleteReq;\nimport io.milvus.v2.service.vector.response.DeleteResp;\n\n\n// 7. Delete entities\n\nDeleteReq deleteReq = DeleteReq.builder()\n .collectionName(\"quick_setup\")\n .filter(\"id in [4, 5, 6]\")\n .build();\n\nDeleteResp deleteResp = client.delete(deleteReq);\n\nSystem.out.println(JSONObject.toJSON(deleteResp));\n\n// Output:\n// {\"deleteCnt\": 3}\n","// 7. Delete entities\nres = await client.delete({\n collection_name: \"quick_setup\",\n filter: \"id in [4,5,6]\"\n})\n\nconsole.log(res.delete_cnt)\n\n// Output\n// \n// 3\n// \n","res = client.delete(\n collection_name=\"quick_setup\",\n ids=[18, 19],\n partition_name=\"partitionA\"\n)\n\nprint(res)\n\n# Output\n#\n# {\n# \"delete_count\": 2\n# }\n","deleteReq = DeleteReq.builder()\n .collectionName(\"quick_setup\")\n .ids(Arrays.asList(18L, 19L))\n .partitionName(\"partitionA\")\n .build();\n\ndeleteResp = client.delete(deleteReq);\n\nSystem.out.println(JSONObject.toJSON(deleteResp));\n\n// Output:\n// {\"deleteCnt\": 2}\n","res = await client.delete({\n collection_name: \"quick_setup\",\n ids: [18, 19],\n partition_name: \"partitionA\"\n})\n\nconsole.log(res.delete_cnt)\n\n// Output\n// \n// 2\n// \n","res = client.delete(\ncollection_name='quick_setup',\npartition_name='partitionA',\nfilter='color like \"blue%\"'\n)\n\nprint(\"Entities deleted from partitionA: \", res['delete_count'])\n\n# Output:\n# Entities deleted from partitionA: 3\n","deleteReq = DeleteReq.builder()\n .collectionName(\"quick_setup\")\n .filter('color like \"blue%\"')\n .partitionName(\"partitionA\")\n .build();\n\ndeleteResp = client.delete(deleteReq);\n\nSystem.out.println(JSONObject.toJSON(deleteResp));\n\n// Output:\n// {\"deleteCnt\": 3}\n","res = await client.delete({\ncollection_name: \"quick_setup\",\npartition_name: \"partitionA\",\nfilter: 'color like \"blue%\"'\n})\n\nconsole.log(\"Entities deleted from partitionA: \" + res.delete_cnt)\n\n// Output:\n// Entities deleted from partitionA: 3\n"],"headingContent":"Insert, Upsert & Delete","anchorList":[{"label":"插入、上插和删除","href":"Insert-Upsert--Delete","type":1,"isActive":false},{"label":"开始之前","href":"Before-you-start","type":2,"isActive":false},{"label":"概述","href":"Overview","type":2,"isActive":false},{"label":"准备工作","href":"Preparations","type":2,"isActive":false},{"label":"插入实体","href":"Insert-entities","type":2,"isActive":false},{"label":"倒插实体","href":"Upsert-entities","type":2,"isActive":false},{"label":"删除实体","href":"Delete-entities","type":2,"isActive":false}]} \ No newline at end of file diff --git a/localization/v2.4.x/site/zh/userGuide/insert-update-delete.md b/localization/v2.4.x/site/zh/userGuide/insert-update-delete.md index 9f30d93c2..89126859b 100644 --- a/localization/v2.4.x/site/zh/userGuide/insert-update-delete.md +++ b/localization/v2.4.x/site/zh/userGuide/insert-update-delete.md @@ -1,9 +1,8 @@ --- id: insert-update-delete.md -summary: 本指南将引导您了解集合中的数据操作,包括插入、向上插入和删除。 -title: 插入、倒置和删除 +summary: 本指南将向您介绍 Collections 中的数据操作符操作,包括插入、向上插入和删除。 +title: 插入、上插和删除 --- -

    插入、上插和删除

    本指南将指导你在集合中进行数据操作,包括插入、上载和删除。

    +

    本指南将向您介绍 Collections 中的数据操作符,包括插入、向上插入和删除。

    开始之前

    • 您已安装了所选的 SDK。要安装 SDK,请参阅安装 SDK

    • -
    • 您已创建了一个数据集。要创建数据集,请参阅管理数据集。

    • +
    • 您已创建了一个 Collections。要创建一个Collections,请参阅管理 Collections

    • 要插入大量数据,建议使用数据导入

    概述

    在 Milvus 数据集中,实体是一个数据集中可识别的单一实例。它代表特定类别中的一个独特成员,无论是图书馆中的一本书、基因组中的一个基因,还是任何其他可识别的实体。

    -

    集合中的实体共享一组共同的属性,称为模式,概述了每个实体必须遵守的结构,包括字段名称、数据类型和任何其他限制。

    -

    要将实体成功插入到集合中,所提供的数据必须包含目标集合的所有模式定义字段。此外,只有启用了Dynamic Field,才能包含非模式定义的字段。有关详细信息,请参阅启用Dynamic Field

    +

    实体,在 Milvus 集合的上下文中,是集合中的一个单一的、可识别的实例。它代表特定类别中的一个独特成员,无论是图书馆中的一本书、基因组中的一个基因,还是其他任何可识别的实体。

    +

    Collections 中的实体共享一组共同的属性(称为 Schema),这些属性概述了每个实体必须遵守的结构,包括字段名称、数据类型和其他限制条件。

    +

    要将实体成功插入 Collections,所提供的数据必须包含目标 Collections 的所有 Schema 定义字段。此外,只有启用了动态字段,才能包含非 Schema 定义的字段。有关详细信息,请参阅启用动态字段

    准备工作

    下面的代码片段重新利用了现有代码,以建立与 Milvus 集群的连接,并快速建立一个数据集。

    +

    下面的代码片段重新利用了现有代码,以建立与 Milvus 集群的连接,并快速设置一个 Collections。

    -

    对于准备工作,使用 MilvusClient连接到 Milvus 服务器,并使用 create_collection()在快速设置模式下创建集合。

    +

    对于准备工作,使用 MilvusClient连接到 Milvus 服务器,并使用 create_collection()以快速设置模式创建 Collections。

    -

    对于准备工作,使用 MilvusClientV2连接到 Milvus 服务器 createCollection()以快速设置模式创建采集。

    +

    对于准备工作,使用 MilvusClientV2连接到 Milvus 服务器,并使用 createCollection()以快速设置模式创建 Collections。

    -

    对于准备工作,使用 MilvusClient连接到 Milvus 服务器 createCollection()以快速设置模式创建集合。

    +

    对于准备工作,使用 MilvusClient连接到 Milvus 服务器,并使用 createCollection()以快速设置模式创建 Collections。

    @@ -89,17 +88,16 @@ title: 插入、倒置和删除 # 1. Set up a Milvus client client = MilvusClient( -uri="http://localhost:19530" + uri="http://localhost:19530" ) # 2. Create a collection client.create_collection( -collection_name="quick_setup", -dimension=5, -metric_type="IP" + collection_name="quick_setup", + dimension=5, + metric_type="IP" )
    -
    import io.milvus.v2.client.ConnectConfig;
     import io.milvus.v2.client.MilvusClientV2;
     import io.milvus.v2.service.collection.request.CreateCollectionReq;
    @@ -138,10 +136,10 @@ client = new M
     

    注释

    -

    上述代码生成的集合只包含两个字段:id (作为主键)和vector (作为向量字段),默认启用auto_idenable_dynamic_field 设置。插入数据时、

    +

    上述代码生成的 Collections 只包含两个字段:id (作为主键)和vector (作为向量字段),默认启用auto_idenable_dynamic_field 设置。插入数据时、

      -
    • 无需在要插入的数据中包含id,因为主字段会在插入数据时自动递增。

    • -
    • 非模式定义的字段将以键值对的形式保存在名为$meta 的预留 JSON 字段中。

    • +
    • 无需在要插入的数据中包含id,因为主字段会在插入数据时自动递增。

    • +
    • 非 Schema 定义的字段将以键值对的形式保存在名为$meta 的预留 JSON 字段中。

    插入实体

    要插入实体,需要将数据组织成一个字典列表,其中每个字典代表一个实体。每个字典都包含与目标集合中预定义字段和Dynamic Field相对应的键。

    +

    要插入实体,需要将数据组织成一个字典列表,其中每个字典代表一个实体。每个字典都包含与目标 Collections 中预定义字段和动态字段相对应的键。

    -

    要在集合中插入实体,请使用 insert()方法。

    +

    要将实体插入 Collections,请使用 insert()方法。

    -

    要将实体插入集合,请使用 insert()方法。

    +

    要将实体插入 Collections,请使用 insert()方法。

    -

    要将实体插入集合,请使用 insert()方法。

    +

    要将实体插入 Collections,请使用 insert()方法。

    @@ -186,8 +184,8 @@ data=[ ] res = client.insert( -collection_name="quick_setup", -data=data + collection_name="quick_setup", + data=data ) print(res) @@ -195,22 +193,21 @@ data=data # Output # # { -# "insert_count": 10, -# "ids": [ -# 0, -# 1, -# 2, -# 3, -# 4, -# 5, -# 6, -# 7, -# 8, -# 9 -# ] +# "insert_count": 10, +# "ids": [ +# 0, +# 1, +# 2, +# 3, +# 4, +# 5, +# 6, +# 7, +# 8, +# 9 +# ] # }
    -
    import java.util.Arrays;
     import java.util.List;
     import java.util.Map;
    @@ -288,14 +285,14 @@ data=[
     ]
     
     client.create_partition(
    -collection_name="quick_setup",
    -partition_name="partitionA"
    +    collection_name="quick_setup",
    +    partition_name="partitionA"
     )
     
     res = client.insert(
    -collection_name="quick_setup",
    -data=data,
    -partition_name="partitionA"
    +    collection_name="quick_setup",
    +    data=data,
    +    partition_name="partitionA"
     )
     
     print(res)
    @@ -303,22 +300,21 @@ partition_name="partitionA"
     # Output
     #
     # {
    -# "insert_count": 10,
    -# "ids": [
    -# 10,
    -# 11,
    -# 12,
    -# 13,
    -# 14,
    -# 15,
    -# 16,
    -# 17,
    -# 18,
    -# 19
    -# ]
    +#     "insert_count": 10,
    +#     "ids": [
    +#         10,
    +#         11,
    +#         12,
    +#         13,
    +#         14,
    +#         15,
    +#         16,
    +#         17,
    +#         18,
    +#         19
    +#     ]
     # }
     
    -
    // 4. Insert some more data into a specific partition
     data = Arrays.asList(
         new JSONObject(Map.of("id", 10L, "vector", Arrays.asList(-0.5570353903748935f, -0.8997887893201304f, -0.7123782431855732f, -0.6298990746450119f, 0.6699215060604258f), "color", "red_1202")),
    @@ -401,16 +397,15 @@ res = await client.
           
    -    

    向上插入数据是更新和插入操作的组合。在 Milvus 中,upsert 操作会根据实体的主键是否已存在于集合中,执行插入或更新实体的数据级操作。具体来说

    +

    向上插入数据是更新和插入操作的组合。在 Milvus 中,upsert 操作执行数据级操作,根据实体的主键是否已存在于 Collections 中来插入或更新实体。具体来说

      -
    • 如果实体的主键已存在于集合中,现有实体将被覆盖。

    • +
    • 如果实体的主键已经存在于 Collections 中,现有实体将被覆盖。

    • 如果集合中不存在主键,则会插入一个新实体。

      -
    • Upsert 操作不会更新主键。
    • -
    • Upsert 操作不支持已启用autoID 的集合。
    • -
    • 如果计划在大规模数据摄取(如数百万向量)时使用upsert 操作而不是insert ,请注意这会导致 Milvus 数据节点的内存消耗过高。
    • +
    • 向上插入操作不会更新主键。
    • +
    • 如果计划在大规模数据摄取(如数百万向量)时使用upsert 操作符而不是insert ,请注意这会导致 Milvus 数据节点的内存消耗过高。
    @@ -439,8 +434,8 @@ data=[ ] res = client.upsert( -collection_name='quick_setup', -data=data + collection_name='quick_setup', + data=data ) print(res) @@ -448,10 +443,9 @@ data=data # Output # # { -# "upsert_count": 10 +# "upsert_count": 10 # }
    -
    // 5. Upsert some data
     data = Arrays.asList(
         new JSONObject(Map.of("id", 0L, "vector", Arrays.asList(-0.619954382375778f, 0.4479436794798608f, -0.17493894838751745f, -0.4248030059917294f, -0.8648452746018911f), "color", "black_9898")),
    @@ -522,9 +516,9 @@ data=[
     ]
     
     res = client.upsert(
    -collection_name="quick_setup",
    -data=data,
    -partition_name="partitionA"
    +    collection_name="quick_setup",
    +    data=data,
    +    partition_name="partitionA"
     )
     
     print(res)
    @@ -532,10 +526,9 @@ partition_name="partitionA"
     # Output
     #
     # {
    -# "upsert_count": 10
    +#     "upsert_count": 10
     # }
     
    -
    import io.milvus.v2.service.vector.request.UpsertReq;
     import io.milvus.v2.service.vector.response.UpsertResp;
     
    @@ -611,19 +604,19 @@ res = await client.
    -

    如果不再需要某个实体,可以使用 delete().

    +

    如果不再需要某个实体,可以通过使用 delete().

    -

    如果不再需要某个实体,可以使用 delete().

    +

    如果不再需要某个实体,可以通过使用 delete().

    -

    如果不再需要实体,可以使用 delete().

    +

    如果不再需要某个实体,可以使用 . delete().

    -

    Milvus 提供了两种方法来确定要删除的实体。

    +

    Milvus 提供两种方法让你确定要删除的实体。

    • 通过过滤器删除实体。

      -

      使用过滤表达式删除实体时,请确保已加载集合。否则,Milvus 将返回错误信息。

      +

      使用过滤表达式删除实体时,请确保已加载集合。否则,Milvus 将返回错误。

      @@ -638,10 +631,9 @@ res = client.delete( # Output # # { -# "delete_count": 3 +# "delete_count": 3 # }
    -
    import io.milvus.v2.service.vector.request.DeleteReq;
     import io.milvus.v2.service.vector.response.DeleteResp;
     
    @@ -688,10 +680,9 @@ res = await client.# Output
     #
     # {
    -# "delete_count": 2
    +#     "delete_count": 2
     # }
     
    -
    deleteReq = DeleteReq.builder()
         .collectionName("quick_setup")
         .ids(Arrays.asList(18L, 19L))
    @@ -732,9 +723,8 @@ partition_name='partitionA',
     print("Entities deleted from partitionA: ", res['delete_count'])
     
     # Output:
    -# Entities deleted from partitionA: 3
    +# Entities deleted from partitionA:  3
     
    -
    deleteReq = DeleteReq.builder()
         .collectionName("quick_setup")
         .filter('color like "blue%"')
    diff --git a/localization/v2.4.x/site/zh/userGuide/manage-collections.json b/localization/v2.4.x/site/zh/userGuide/manage-collections.json
    index 0251fce39..992f8fbfc 100644
    --- a/localization/v2.4.x/site/zh/userGuide/manage-collections.json
    +++ b/localization/v2.4.x/site/zh/userGuide/manage-collections.json
    @@ -1,122 +1 @@
    -{
    -	"codeList": [
    -		"from pymilvus import MilvusClient, DataType\n\n# 1. Set up a Milvus client\nclient = MilvusClient(\n    uri=\"http://localhost:19530\"\n)\n\n# 2. Create a collection in quick setup mode\nclient.create_collection(\n    collection_name=\"quick_setup\",\n    dimension=5\n)\n\nres = client.get_load_state(\n    collection_name=\"quick_setup\"\n)\n\nprint(res)\n\n# Output\n#\n# {\n#     \"state\": \"\"\n# }\n",
    -		"import io.milvus.v2.client.ConnectConfig;\nimport io.milvus.v2.client.MilvusClientV2;\nimport io.milvus.v2.service.collection.request.GetLoadStateReq;\nimport io.milvus.v2.service.collection.request.CreateCollectionReq;\n\nString CLUSTER_ENDPOINT = \"http://localhost:19530\";\n\n// 1. Connect to Milvus server\nConnectConfig connectConfig = ConnectConfig.builder()\n    .uri(CLUSTER_ENDPOINT)\n    .build();\n\nMilvusClientV2 client = new MilvusClientV2(connectConfig);\n\n// 2. Create a collection in quick setup mode\nCreateCollectionReq quickSetupReq = CreateCollectionReq.builder()\n    .collectionName(\"quick_setup\")\n    .dimension(5)\n    .build();\n\nclient.createCollection(quickSetupReq);\n\n// Thread.sleep(5000);\n\nGetLoadStateReq quickSetupLoadStateReq = GetLoadStateReq.builder()\n    .collectionName(\"quick_setup\")\n    .build();\n\nBoolean res = client.getLoadState(quickSetupLoadStateReq);\n\nSystem.out.println(res);\n\n// Output:\n// true\n",
    -		"address = \"http://localhost:19530\"\n\n// 1. Set up a Milvus Client\nclient = new MilvusClient({address});\n\n// 2. Create a collection in quick setup mode\nlet res = await client.createCollection({\n    collection_name: \"quick_setup\",\n    dimension: 5,\n});  \n\nconsole.log(res.error_code)\n\n// Output\n// \n// Success\n// \n\nres = await client.getLoadState({\n    collection_name: \"quick_setup\"\n})\n\nconsole.log(res.state)\n\n// Output\n// \n// LoadStateLoaded\n// \n",
    -		"$ export MILVUS_URI=\"localhost:19530\"\n\n$ curl -X POST \"http://${MILVUS_URI}/v2/vectordb/collections/create\" \\\n-H \"Content-Type: application/json\" \\\n-d '{\n  \"collectionName\": \"quick_setup\",\n  \"dimension\": 5\n}'\n\n# Output\n#\n# {\n#     \"code\": 0,\n#     \"data\": {},\n# }\n\n$ curl -X POST \"http://${MILVUS_URI}/v2/vectordb/collections/get_load_state\" \\\n-H \"Content-Type: application/json\" \\\n-d '{\n  \"collectionName\": \"quick_setup\"\n}'\n\n# {\n#     \"code\": 0,\n#     \"data\": {\n#         \"loadProgress\": 100,\n#         \"loadState\": \"LoadStateLoaded\"\n#     }\n# }\n",
    -		"# 3. Create a collection in customized setup mode\n\n# 3.1. Create schema\nschema = MilvusClient.create_schema(\n    auto_id=False,\n    enable_dynamic_field=True,\n)\n\n# 3.2. Add fields to schema\nschema.add_field(field_name=\"my_id\", datatype=DataType.INT64, is_primary=True)\nschema.add_field(field_name=\"my_vector\", datatype=DataType.FLOAT_VECTOR, dim=5)\n",
    -		"import io.milvus.v2.common.DataType;\nimport io.milvus.v2.service.collection.request.CreateCollectionReq;\n\n// 3. Create a collection in customized setup mode\n\n// 3.1 Create schema\nCreateCollectionReq.CollectionSchema schema = client.createSchema();\n\n// 3.2 Add fields to schema\nschema.addField(AddFieldReq.builder()\n    .fieldName(\"my_id\")\n    .dataType(DataType.Int64)\n    .isPrimaryKey(true)\n    .autoID(false)\n    .build());\n\nschema.addField(AddFieldReq.builder()\n    .fieldName(\"my_vector\")\n    .dataType(DataType.FloatVector)\n    .dimension(5)\n    .build());\n",
    -		"// 3. Create a collection in customized setup mode\n// 3.1 Define fields\nconst fields = [\n    {\n        name: \"my_id\",\n        data_type: DataType.Int64,\n        is_primary_key: true,\n        auto_id: false\n    },\n    {\n        name: \"my_vector\",\n        data_type: DataType.FloatVector,\n        dim: 5\n    },\n]\n",
    -		"export fields='[{ \\\n    \"fieldName\": \"my_id\", \\\n    \"dataType\": \"Int64\", \\\n    \"isPrimary\": true \\\n}, \\\n{ \\\n    \"fieldName\": \"my_vector\", \\\n    \"dataType\": \"FloatVector\", \\\n    \"elementTypeParams\": { \\\n        \"dim\": 5 \\\n    } \\\n}]'\n",
    -		"# 3.3. Prepare index parameters\nindex_params = client.prepare_index_params()\n\n# 3.4. Add indexes\nindex_params.add_index(\n    field_name=\"my_id\",\n    index_type=\"STL_SORT\"\n)\n\nindex_params.add_index(\n    field_name=\"my_vector\", \n    index_type=\"IVF_FLAT\",\n    metric_type=\"IP\",\n    params={ \"nlist\": 128 }\n)\n",
    -		"import io.milvus.v2.common.IndexParam;\n\n// 3.3 Prepare index parameters\nIndexParam indexParamForIdField = IndexParam.builder()\n    .fieldName(\"my_id\")\n    .indexType(IndexParam.IndexType.STL_SORT)\n    .build();\n\nIndexParam indexParamForVectorField = IndexParam.builder()\n    .fieldName(\"my_vector\")\n    .indexType(IndexParam.IndexType.IVF_FLAT)\n    .metricType(IndexParam.MetricType.L2)\n    .extraParams(Map.of(\"nlist\", 1024))\n    .build();\n\nList indexParams = new ArrayList<>();\nindexParams.add(indexParamForIdField);\nindexParams.add(indexParamForVectorField);\n",
    -		"// 3.2 Prepare index parameters\nconst index_params = [{\n    field_name: \"my_id\",\n    index_type: \"STL_SORT\"\n},{\n    field_name: \"my_vector\",\n    index_type: \"IVF_FLAT\",\n    metric_type: \"IP\",\n    params: { nlist: 1024}\n}]\n",
    -		"export indexParams='[{ \\\n    \"fieldName\": \"my_id\", \\\n    \"indexName\": \"my_id\", \\\n    \"params\": { \\\n        \"index_type\": \"SLT_SORT\" \\\n  } \\\n}, { \\\n    \"fieldName\": \"my_vector\", \\\n    \"metricType\": \"COSINE\", \\\n    \"indexName\": \"my_vector\", \\\n    \"params\": { \\\n        \"index_type\": \"IVF_FLAT\", \\\n        \"nlist\": 1024 \\\n  } \\\n}]'\n",
    -		"# 3.5. Create a collection with the index loaded simultaneously\nclient.create_collection(\n    collection_name=\"customized_setup_1\",\n    schema=schema,\n    index_params=index_params\n)\n\ntime.sleep(5)\n\nres = client.get_load_state(\n    collection_name=\"customized_setup_1\"\n)\n\nprint(res)\n\n# Output\n#\n# {\n#     \"state\": \"\"\n# }\n",
    -		"import io.milvus.v2.service.collection.request.CreateCollectionReq;\nimport io.milvus.v2.service.collection.request.GetLoadStateReq;\n\n// 3.4 Create a collection with schema and index parameters\nCreateCollectionReq customizedSetupReq1 = CreateCollectionReq.builder()\n    .collectionName(\"customized_setup_1\")\n    .collectionSchema(schema)\n    .indexParams(indexParams)\n    .build();\n\nclient.createCollection(customizedSetupReq1);\n\n// Thread.sleep(5000);\n\n// 3.5 Get load state of the collection\nGetLoadStateReq customSetupLoadStateReq1 = GetLoadStateReq.builder()\n    .collectionName(\"customized_setup_1\")\n    .build();\n\nres = client.getLoadState(customSetupLoadStateReq1);\n\nSystem.out.println(res);\n\n// Output:\n// true\n",
    -		"// 3.3 Create a collection with fields and index parameters\nres = await client.createCollection({\n    collection_name: \"customized_setup_1\",\n    fields: fields,\n    index_params: index_params,\n})\n\nconsole.log(res.error_code)  \n\n// Output\n// \n// Success\n// \n\nres = await client.getLoadState({\n    collection_name: \"customized_setup_1\"\n})\n\nconsole.log(res.state)\n\n// Output\n// \n// LoadStateLoaded\n//   \n",
    -		"$ curl -X POST \"http://${MILVUS_URI}/v2/vectordb/collections/create\" \\\n-H \"Content-Type: application/json\" \\\n-d '{\n    \"collectionName\": \"customized_setup_1\",\n    \"schema\": {\n        \"autoId\": false,\n        \"enabledDynamicField\": false,\n        \"fields\": [\n            {\n                \"fieldName\": \"my_id\",\n                \"dataType\": \"Int64\",\n                \"isPrimary\": true\n            },\n            {\n                \"fieldName\": \"my_vector\",\n                \"dataType\": \"FloatVector\",\n                \"elementTypeParams\": {\n                    \"dim\": \"5\"\n                }\n            }\n        ]\n    },\n    \"indexParams\": [\n        {\n            \"fieldName\": \"my_vector\",\n            \"metricType\": \"COSINE\",\n            \"indexName\": \"my_vector\",\n            \"params\": {\n                \"index_type\": \"IVF_FLAT\",\n                \"nlist\": \"1024\"\n            }\n        },\n        {\n            \"fieldName\": \"my_id\",\n            \"indexName\": \"my_id\",\n            \"params\": {\n                \"index_type\": \"STL_SORT\"\n            }            \n        }\n    ]\n}'\n\n# Output\n#\n# {\n#     \"code\": 0,\n#     \"data\": {},\n# }\n\n$ curl -X POST \"http://${MILVUS_URI}/v2/vectordb/collections/get_load_state\" \\\n-H \"Content-Type: application/json\" \\\n-d '{\n    \"collectionName\": \"customized_setup_1\"\n}'\n\n# {\n#     \"code\": 0,\n#     \"data\": {\n#         \"loadProgress\": 100,\n#         \"loadState\": \"LoadStateLoaded\"\n#     }\n# }\n",
    -		"# 3.6. Create a collection and index it separately\nclient.create_collection(\n    collection_name=\"customized_setup_2\",\n    schema=schema,\n)\n\nres = client.get_load_state(\n    collection_name=\"customized_setup_2\"\n)\n\nprint(res)\n\n# Output\n#\n# {\n#     \"state\": \"\"\n# }\n",
    -		"// 3.6 Create a collection and index it separately\nCreateCollectionReq customizedSetupReq2 = CreateCollectionReq.builder()\n    .collectionName(\"customized_setup_2\")\n    .collectionSchema(schema)\n    .build();\n\nclient.createCollection(customizedSetupReq2);\n",
    -		"// 3.4 Create a collection and index it seperately\nres = await client.createCollection({\n    collection_name: \"customized_setup_2\",\n    fields: fields,\n})\n\nconsole.log(res.error_code)\n\n// Output\n// \n// Success\n// \n\nres = await client.getLoadState({\n    collection_name: \"customized_setup_2\"\n})\n\nconsole.log(res.state)\n\n// Output\n// \n// LoadStateNotLoad\n// \n",
    -		"$ curl -X POST \"http://${MILVUS_URI}/v2/vectordb/collections/create\" \\\n-H \"Content-Type: application/json\" \\\n-d '{\n    \"collectionName\": \"customized_setup_2\",\n    \"schema\": {\n        \"autoId\": false,\n        \"enabledDynamicField\": false,\n        \"fields\": [\n            {\n                \"fieldName\": \"my_id\",\n                \"dataType\": \"Int64\",\n                \"isPrimary\": true\n            },\n            {\n                \"fieldName\": \"my_vector\",\n                \"dataType\": \"FloatVector\",\n                \"elementTypeParams\": {\n                    \"dim\": \"5\"\n                }\n            }\n        ]\n        \n    }\n}'\n\n# Output\n#\n# {\n#     \"code\": 0,\n#     \"data\": {},\n# }\n\n$ curl -X POST \"http://${MILVUS_URI}/v2/vectordb/collections/get_load_state\" \\\n-H \"Content-Type: application/json\" \\\n-d '{\n    \"collectionName\": \"customized_setup_2\"\n}'\n\n# {\n#     \"code\": 0,\n#     \"data\": {\n#         \"loadState\": \"LoadStateNotLoaded\"\n#     }\n# }\n",
    -		"# 3.6 Create index\nclient.create_index(\n    collection_name=\"customized_setup_2\",\n    index_params=index_params\n)\n\nres = client.get_load_state(\n    collection_name=\"customized_setup_2\"\n)\n\nprint(res)\n\n# Output\n#\n# {\n#     \"state\": \"\"\n# }\n",
    -		"CreateIndexReq  createIndexReq = CreateIndexReq.builder()\n    .collectionName(\"customized_setup_2\")\n    .indexParams(indexParams)\n    .build();\n\nclient.createIndex(createIndexReq);\n\n// Thread.sleep(1000);\n\n// 3.7 Get load state of the collection\nGetLoadStateReq customSetupLoadStateReq2 = GetLoadStateReq.builder()\n    .collectionName(\"customized_setup_2\")\n    .build();\n\nres = client.getLoadState(customSetupLoadStateReq2);\n\nSystem.out.println(res);\n\n// Output:\n// false\n",
    -		"// 3.5 Create index\nres = await client.createIndex({\n    collection_name: \"customized_setup_2\",\n    field_name: \"my_vector\",\n    index_type: \"IVF_FLAT\",\n    metric_type: \"IP\",\n    params: { nlist: 1024}\n})\n\nres = await client.getLoadState({\n    collection_name: \"customized_setup_2\"\n})\n\nconsole.log(res.state)\n\n// Output\n// \n// LoadStateNotLoad\n//\n",
    -		"$ curl -X POST \"http://${MILVUS_URI}/v2/vectordb/indexes/create\" \\\n-H \"Content-Type: application/json\" \\\n-d '{\n    \"collectionName\": \"customized_setup_2\",\n    \"indexParams\": [\n        {\n            \"metricType\": \"L2\",\n            \"fieldName\": \"my_vector\",\n            \"indexName\": \"my_vector\",\n            \"indexConfig\": {\n                \"index_type\": \"IVF_FLAT\",\n                \"nlist\": \"1024\"\n            }\n        }\n    ]\n}'\n\n# Output\n#\n# {\n#     \"code\": 0,\n#     \"data\": {},\n# }\n\n$ curl -X POST \"http://${MILVUS_URI}/v2/vectordb/collections/get_load_state\" \\\n-H \"Content-Type: application/json\" \\\n-d '{\n    \"collectionName\": \"customized_setup_2\"\n}'\n\n# {\n#     \"code\": 0,\n#     \"data\": {\n#         \"loadState\": \"LoadStateNotLoaded\"\n#     }\n# }\n",
    -		"# 5. View Collections\nres = client.describe_collection(\n    collection_name=\"customized_setup_2\"\n)\n\nprint(res)\n\n# Output\n#\n# {\n#     \"collection_name\": \"customized_setup_2\",\n#     \"auto_id\": false,\n#     \"num_shards\": 1,\n#     \"description\": \"\",\n#     \"fields\": [\n#         {\n#             \"field_id\": 100,\n#             \"name\": \"my_id\",\n#             \"description\": \"\",\n#             \"type\": 5,\n#             \"params\": {},\n#             \"element_type\": 0,\n#             \"is_primary\": true\n#         },\n#         {\n#             \"field_id\": 101,\n#             \"name\": \"my_vector\",\n#             \"description\": \"\",\n#             \"type\": 101,\n#             \"params\": {\n#                 \"dim\": 5\n#             },\n#             \"element_type\": 0\n#         }\n#     ],\n#     \"aliases\": [],\n#     \"collection_id\": 448143479230158446,\n#     \"consistency_level\": 2,\n#     \"properties\": {},\n#     \"num_partitions\": 1,\n#     \"enable_dynamic_field\": true\n# }\n\n",
    -		"import io.milvus.v2.service.collection.request.DescribeCollectionReq;\nimport io.milvus.v2.service.collection.response.DescribeCollectionResp;\n\n// 4. View collections\nDescribeCollectionReq describeCollectionReq = DescribeCollectionReq.builder()\n    .collectionName(\"customized_setup_2\")\n    .build();\n\nDescribeCollectionResp describeCollectionRes = client.describeCollection(describeCollectionReq);\n\nSystem.out.println(JSONObject.toJSON(describeCollectionRes));\n\n// Output:\n// {\n//     \"createTime\": 449005822816026627,\n//     \"collectionSchema\": {\"fieldSchemaList\": [\n//         {\n//             \"autoID\": false,\n//             \"dataType\": \"Int64\",\n//             \"name\": \"my_id\",\n//             \"description\": \"\",\n//             \"isPrimaryKey\": true,\n//             \"maxLength\": 65535,\n//             \"isPartitionKey\": false\n//         },\n//         {\n//             \"autoID\": false,\n//             \"dataType\": \"FloatVector\",\n//             \"name\": \"my_vector\",\n//             \"description\": \"\",\n//             \"isPrimaryKey\": false,\n//             \"dimension\": 5,\n//             \"maxLength\": 65535,\n//             \"isPartitionKey\": false\n//         }\n//     ]},\n//     \"vectorFieldName\": [\"my_vector\"],\n//     \"autoID\": false,\n//     \"fieldNames\": [\n//         \"my_id\",\n//         \"my_vector\"\n//     ],\n//     \"description\": \"\",\n//     \"numOfPartitions\": 1,\n//     \"primaryFieldName\": \"my_id\",\n//     \"enableDynamicField\": true,\n//     \"collectionName\": \"customized_setup_2\"\n// }\n",
    -		"// 5. View Collections\nres = await client.describeCollection({\n    collection_name: \"customized_setup_2\"\n})\n\nconsole.log(res)\n\n// Output\n// \n// {\n//   virtual_channel_names: [ 'by-dev-rootcoord-dml_13_449007919953017716v0' ],\n//   physical_channel_names: [ 'by-dev-rootcoord-dml_13' ],\n//   aliases: [],\n//   start_positions: [],\n//   properties: [],\n//   status: {\n//     extra_info: {},\n//     error_code: 'Success',\n//     reason: '',\n//     code: 0,\n//     retriable: false,\n//     detail: ''\n//   },\n//   schema: {\n//     fields: [ [Object], [Object] ],\n//     properties: [],\n//     name: 'customized_setup_2',\n//     description: '',\n//     autoID: false,\n//     enable_dynamic_field: false\n//   },\n//   collectionID: '449007919953017716',\n//   created_timestamp: '449024569603784707',\n//   created_utc_timestamp: '1712892797866',\n//   shards_num: 1,\n//   consistency_level: 'Bounded',\n//   collection_name: 'customized_setup_2',\n//   db_name: 'default',\n//   num_partitions: '1'\n// }\n// \n",
    -		"curl -X POST \"http://${MILVUS_URI}/v2/vectordb/collections/describe\" \\\n-H \"Content-Type: application/json\" \\\n-d '{\n    \"dbName\": \"default\",\n    \"collectionName\": \"test_collection\"\n}'\n\n# {\n#     \"code\": 0,\n#     \"data\": {\n#         \"aliases\": [],\n#         \"autoId\": false,\n#         \"collectionID\": 448707763883002014,\n#         \"collectionName\": \"test_collection\",\n#         \"consistencyLevel\": \"Bounded\",\n#         \"description\": \"\",\n#         \"enableDynamicField\": true,\n#         \"fields\": [\n#             {\n#                 \"autoId\": false,\n#                 \"description\": \"\",\n#                 \"id\": 100,\n#                 \"name\": \"id\",\n#                 \"partitionKey\": false,\n#                 \"primaryKey\": true,\n#                 \"type\": \"Int64\"\n#             },\n#             {\n#                 \"autoId\": false,\n#                 \"description\": \"\",\n#                 \"id\": 101,\n#                 \"name\": \"vector\",\n#                 \"params\": [\n#                     {\n#                         \"key\": \"dim\",\n#                         \"value\": \"5\"\n#                     }\n#                 ],\n#                 \"partitionKey\": false,\n#                 \"primaryKey\": false,\n#                 \"type\": \"FloatVector\"\n#             }\n#         ],\n#         \"indexes\": [\n#             {\n#                 \"fieldName\": \"vector\",\n#                 \"indexName\": \"vector\",\n#                 \"metricType\": \"COSINE\"\n#             }\n#         ],\n#         \"load\": \"LoadStateLoaded\",\n#         \"partitionsNum\": 1,\n#         \"properties\": [],\n#         \"shardsNum\": 1\n#     }\n# }\n",
    -		"# 6. List all collection names\nres = client.list_collections()\n\nprint(res)\n\n# Output\n#\n# [\n#     \"customized_setup_2\",\n#     \"quick_setup\",\n#     \"customized_setup_1\"\n# ]\n",
    -		"import io.milvus.v2.service.collection.response.ListCollectionsResp;\n\n// 5. List all collection names\nListCollectionsResp listCollectionsRes = client.listCollections();\n\nSystem.out.println(listCollectionsRes.getCollectionNames());\n\n// Output:\n// [\n//     \"customized_setup_2\",\n//     \"quick_setup\",\n//     \"customized_setup_1\"\n// ]\n",
    -		"// 5. List all collection names\nListCollectionsResp listCollectionsRes = client.listCollections();\n\nSystem.out.println(listCollectionsRes.getCollectionNames());\n\n// Output:\n// [\n//     \"customized_setup_1\",\n//     \"quick_setup\",\n//     \"customized_setup_2\"\n// ]\n",
    -		"$ curl -X POST \"http://${MILVUS_URI}/v2/vectordb/collections/list\" \\\n-H \"Content-Type: application/json\" \\\n-d '{\n    \"dbName\": \"default\"\n}'\n\n# {\n#   \"code\": 0,\n#   \"data\": [\n#     \"quick_setup\",\n#     \"customized_setup_1\",\n#     \"customized_setup_2\"\n#   ]\n# }\n",
    -		"# 7. Load the collection\nclient.load_collection(\n    collection_name=\"customized_setup_2\",\n    replica_number=1 # Number of replicas to create on query nodes. Max value is 1 for Milvus Standalone, and no greater than `queryNode.replicas` for Milvus Cluster.\n)\n\nres = client.get_load_state(\n    collection_name=\"customized_setup_2\"\n)\n\nprint(res)\n\n# Output\n#\n# {\n#     \"state\": \"\"\n# }\n",
    -		"import io.milvus.v2.service.collection.request.LoadCollectionReq;\n\n// 6. Load the collection\nLoadCollectionReq loadCollectionReq = LoadCollectionReq.builder()\n    .collectionName(\"customized_setup_2\")\n    .build();\n\nclient.loadCollection(loadCollectionReq);\n\n// Thread.sleep(5000);\n\n// 7. Get load state of the collection\nGetLoadStateReq loadStateReq = GetLoadStateReq.builder()\n    .collectionName(\"customized_setup_2\")\n    .build();\n\nres = client.getLoadState(loadStateReq);\n\nSystem.out.println(res);\n\n// Output:\n// true\n",
    -		"// 7. Load the collection\nres = await client.loadCollection({\n    collection_name: \"customized_setup_2\"\n})\n\nconsole.log(res.error_code)\n\n// Output\n// \n// Success\n// \n\nawait sleep(3000)\n\nres = await client.getLoadState({\n    collection_name: \"customized_setup_2\"\n})\n\nconsole.log(res.state)\n\n// Output\n// \n// LoadStateLoaded\n// \n",
    -		"$ curl -X POST \"http://${MILVUS_URI}/v2/vectordb/collections/load\" \\\n-H \"Content-Type: application/json\" \\\n-d '{\n    \"collectionName\": \"customized_setup_2\"\n}'\n\n# Output\n#\n# {\n#     \"code\": 0,\n#     \"data\": {},\n# }\n\n$ curl -X POST \"http://${MILVUS_URI}/v2/vectordb/collections/get_load_state\" \\\n-H \"Content-Type: application/json\" \\\n-d '{\n  \"collectionName\": \"customized_setup_2\"\n}'\n\n# {\n#     \"code\": 0,\n#     \"data\": {\n#         \"loadProgress\": 100,\n#         \"loadState\": \"LoadStateLoaded\"\n#     }\n# }\n",
    -		"# 8. Release the collection\nclient.release_collection(\n    collection_name=\"customized_setup_2\"\n)\n\nres = client.get_load_state(\n    collection_name=\"customized_setup_2\"\n)\n\nprint(res)\n\n# Output\n#\n# {\n#     \"state\": \"\"\n# }\n",
    -		"import io.milvus.v2.service.collection.request.ReleaseCollectionReq;\n\n// 8. Release the collection\nReleaseCollectionReq releaseCollectionReq = ReleaseCollectionReq.builder()\n    .collectionName(\"customized_setup_2\")\n    .build();\n\nclient.releaseCollection(releaseCollectionReq);\n\n// Thread.sleep(1000);\n\nres = client.getLoadState(loadStateReq);\n\nSystem.out.println(res);\n\n// Output:\n// false\n",
    -		"// 8. Release the collection\nres = await client.releaseCollection({\n    collection_name: \"customized_setup_2\"\n})\n\nconsole.log(res.error_code)\n\n// Output\n// \n// Success\n// \n\nres = await client.getLoadState({\n    collection_name: \"customized_setup_2\"\n})\n\nconsole.log(res.state)\n\n// Output\n// \n// LoadStateNotLoad\n// \n",
    -		"$ curl -X POST \"http://${MILVUS_URI}/v2/vectordb/collections/release\" \\\n-H \"Content-Type: application/json\" \\\n-d '{\n    \"collectionName\": \"customized_setup_2\"\n}'\n\n# Output\n#\n# {\n#     \"code\": 0,\n#     \"data\": {},\n# }\n\n\n$ curl -X POST \"http://${MILVUS_URI}/v2/vectordb/collections/get_load_state\" \\\n-H \"Content-Type: application/json\" \\\n-d '{\n  \"collectionName\": \"customized_setup_2\"\n}'\n\n\n# {\n#     \"code\": 0,\n#     \"data\": {\n#         \"loadState\": \"LoadStateNotLoad\"\n#     }\n# }\n",
    -		"# 9.1. Create aliases\nclient.create_alias(\n    collection_name=\"customized_setup_2\",\n    alias=\"bob\"\n)\n\nclient.create_alias(\n    collection_name=\"customized_setup_2\",\n    alias=\"alice\"\n)\n",
    -		"import io.milvus.v2.service.utility.request.CreateAliasReq;\n\n// 9. Manage aliases\n\n// 9.1 Create alias\nCreateAliasReq createAliasReq = CreateAliasReq.builder()\n    .collectionName(\"customized_setup_2\")\n    .alias(\"bob\")\n    .build();\n\nclient.createAlias(createAliasReq);\n\ncreateAliasReq = CreateAliasReq.builder()\n    .collectionName(\"customized_setup_2\")\n    .alias(\"alice\")\n    .build();\n\nclient.createAlias(createAliasReq);\n",
    -		"// 9. Manage aliases\n// 9.1 Create aliases\nres = await client.createAlias({\n    collection_name: \"customized_setup_2\",\n    alias: \"bob\"\n})\n\nconsole.log(res.error_code)\n\n// Output\n// \n// Success\n// \n\nres = await client.createAlias({\n    collection_name: \"customized_setup_2\",\n    alias: \"alice\"\n})\n\nconsole.log(res.error_code)\n\n// Output\n// \n// Success\n// \n",
    -		"$ curl -X POST \"http://${MILVUS_URI}/v2/vectordb/aliases/create\" \\\n-H \"Content-Type: application/json\" \\\n-d '{\n    \"collectionName\": \"customized_setup_2\",\n    \"aliasName\": \"bob\"\n}'\n\n# Output\n#\n# {\n#     \"code\": 0,\n#     \"data\": {}\n# }\n\n$ curl -X POST \"http://${MILVUS_URI}/v2/vectordb/aliases/create\" \\\n-H \"Content-Type: application/json\" \\\n-d '{\n    \"collectionName\": \"customized_setup_2\",\n    \"aliasName\": \"alice\"\n}'\n\n# Output\n#\n# {\n#     \"code\": 0,\n#     \"data\": {}\n# }\n",
    -		"# 9.2. List aliases\nres = client.list_aliases(\n    collection_name=\"customized_setup_2\"\n)\n\nprint(res)\n\n# Output\n#\n# {\n#     \"aliases\": [\n#         \"bob\",\n#         \"alice\"\n#     ],\n#     \"collection_name\": \"customized_setup_2\",\n#     \"db_name\": \"default\"\n# }\n",
    -		"import io.milvus.v2.service.utility.request.ListAliasesReq;\nimport io.milvus.v2.service.utility.response.ListAliasResp;\n\n// 9.2 List alises\nListAliasesReq listAliasesReq = ListAliasesReq.builder()\n    .collectionName(\"customized_setup_2\")\n    .build();\n\nListAliasResp listAliasRes = client.listAliases(listAliasesReq);\n\nSystem.out.println(listAliasRes.getAlias());\n\n// Output:\n// [\n//     \"bob\",\n//     \"alice\"\n// ]\n",
    -		"// 9.2 List aliases\nres = await client.listAliases({\n    collection_name: \"customized_setup_2\"\n})\n\nconsole.log(res.aliases)\n\n// Output\n// \n// [ 'bob', 'alice' ]\n// \n",
    -		"$ curl -X POST \"http://${MILVUS_URI}/v2/vectordb/aliases/list\" \\\n-H \"Content-Type: application/json\" \\\n-d '{\n    \"collectionName\": \"customized_setup_2\"\n}'\n\n# {\n#     \"code\": 0,\n#     \"data\": [\n#         \"bob\",\n#         \"alice\"\n#     ]\n# }\n",
    -		"# 9.3. Describe aliases\nres = client.describe_alias(\n    alias=\"bob\"\n)\n\nprint(res)\n\n# Output\n#\n# {\n#     \"alias\": \"bob\",\n#     \"collection_name\": \"customized_setup_2\",\n#     \"db_name\": \"default\"\n# }\n",
    -		"import io.milvus.v2.service.utility.request.DescribeAliasReq;\nimport io.milvus.v2.service.utility.response.DescribeAliasResp;\n\n// 9.3 Describe alias\nDescribeAliasReq describeAliasReq = DescribeAliasReq.builder()\n    .alias(\"bob\")\n    .build();\n\nDescribeAliasResp describeAliasRes = client.describeAlias(describeAliasReq);\n\nSystem.out.println(JSONObject.toJSON(describeAliasRes));\n\n// Output:\n// {\n//     \"alias\": \"bob\",\n//     \"collectionName\": \"customized_setup_2\"\n// }\n",
    -		"// 9.3 Describe aliases\nres = await client.describeAlias({\n    collection_name: \"customized_setup_2\",\n    alias: \"bob\"\n})\n\nconsole.log(res)\n\n// Output\n// \n// {\n//   status: {\n//     extra_info: {},\n//     error_code: 'Success',\n//     reason: '',\n//     code: 0,\n//     retriable: false,\n//     detail: ''\n//   },\n//   db_name: 'default',\n//   alias: 'bob',\n//   collection: 'customized_setup_2'\n// }\n// \n",
    -		"$ curl -X POST \"http://${MILVUS_URI}/v2/vectordb/aliases/describe\" \\\n-H \"Content-Type: application/json\" \\\n-d '{\n    \"aliasName\": \"bob\"\n}'\n\n# {\n#     \"code\": 0,\n#     \"data\": {\n#         \"aliasName\": \"bob\",\n#         \"collectionName\": \"quick_setup\",\n#         \"dbName\": \"default\"\n#     }\n# }\n",
    -		"# 9.4 Reassign aliases to other collections\nclient.alter_alias(\n    collection_name=\"customized_setup_1\",\n    alias=\"alice\"\n)\n\nres = client.list_aliases(\n    collection_name=\"customized_setup_1\"\n)\n\nprint(res)\n\n# Output\n#\n# {\n#     \"aliases\": [\n#         \"alice\"\n#     ],\n#     \"collection_name\": \"customized_setup_1\",\n#     \"db_name\": \"default\"\n# }\n\nres = client.list_aliases(\n    collection_name=\"customized_setup_2\"\n)\n\nprint(res)\n\n# Output\n#\n# {\n#     \"aliases\": [\n#         \"bob\"\n#     ],\n#     \"collection_name\": \"customized_setup_2\",\n#     \"db_name\": \"default\"\n# }\n",
    -		"import io.milvus.v2.service.utility.request.AlterAliasReq;\n\n// 9.4 Reassign alias to other collections\nAlterAliasReq alterAliasReq = AlterAliasReq.builder()\n    .collectionName(\"customized_setup_1\")\n    .alias(\"alice\")\n    .build();\n\nclient.alterAlias(alterAliasReq);\n\nlistAliasesReq = ListAliasesReq.builder()\n    .collectionName(\"customized_setup_1\")\n    .build();\n\nlistAliasRes = client.listAliases(listAliasesReq);\n\nSystem.out.println(listAliasRes.getAlias());\n\n// Output:\n// [\"alice\"]\n\nlistAliasesReq = ListAliasesReq.builder()\n    .collectionName(\"customized_setup_2\")\n    .build();\n\nlistAliasRes = client.listAliases(listAliasesReq);\n\nSystem.out.println(listAliasRes.getAlias());\n\n// Output:\n// [\"bob\"]\n",
    -		"// 9.4 Reassign aliases to other collections\nres = await client.alterAlias({\n    collection_name: \"customized_setup_1\",\n    alias: \"alice\"\n})\n\nconsole.log(res.error_code)\n\n// Output\n// \n// Success\n// \n\nres = await client.listAliases({\n    collection_name: \"customized_setup_1\"\n})\n\nconsole.log(res.aliases)\n\n// Output\n// \n// [ 'alice' ]\n// \n\nres = await client.listAliases({\n    collection_name: \"customized_setup_2\"\n})\n\nconsole.log(res.aliases)\n\n// Output\n// \n// [ 'bob' ]\n// \n",
    -		"$ curl -X POST \"http://${MILVUS_URI}/v2/vectordb/aliases/alter\" \\\n-H \"Content-Type: application/json\" \\\n-d '{\n     \"collectionName\": \"customized_setup_1\",\n     \"aliasName\": \"alice\"\n}'\n\n# {\n#     \"code\": 0,\n#     \"data\": {}\n# }\n\n\n$ curl -X POST \"http://${MILVUS_URI}/v2/vectordb/aliases/list\" \\\n-H \"Content-Type: application/json\" \\\n-d '{\n    \"collectionName\": \"customized_setup_1\"\n}'\n\n\n# {\n#     \"code\": 0,\n#     \"data\": [\n#         \"alice\"\n#     ]\n# }\n\n\n$ curl -X POST \"http://${MILVUS_URI}/v2/vectordb/aliases/list\" \\\n-H \"Content-Type: application/json\" \\\n-d '{\n    \"collectionName\": \"customized_setup_2\"\n}'\n\n\n# {\n#     \"code\": 0,\n#     \"data\": [\n#         \"bob\"\n#     ]\n# }\n",
    -		"# 9.5 Drop aliases\nclient.drop_alias(\n    alias=\"bob\"\n)\n\nclient.drop_alias(\n    alias=\"alice\"\n)\n",
    -		"import io.milvus.v2.service.utility.request.DropAliasReq;\n\n// 9.5 Drop alias\nDropAliasReq dropAliasReq = DropAliasReq.builder()\n    .alias(\"bob\")\n    .build();\n\nclient.dropAlias(dropAliasReq);\n\ndropAliasReq = DropAliasReq.builder()\n    .alias(\"alice\")\n    .build();\n\nclient.dropAlias(dropAliasReq);\n",
    -		"// 9.5 Drop aliases\nres = await client.dropAlias({\n    alias: \"bob\"\n})\n\nconsole.log(res.error_code)\n\n// Output\n// \n// Success\n// \n\nres = await client.dropAlias({\n    alias: \"alice\"\n})\n\nconsole.log(res.error_code)\n\n// Output\n// \n// Success\n// \n",
    -		"$ curl -X POST \"http://${MILVUS_URI}/v2/vectordb/aliases/drop\" \\\n-H \"Content-Type: application/json\" \\\n-d '{\n    \"aliasName\": \"bob\"\n}'\n\n# {\n#     \"code\": 0,\n#     \"data\": {}\n# }\n\n\n$ curl -X POST \"http://${MILVUS_URI}/v2/vectordb/aliases/drop\" \\\n-H \"Content-Type: application/json\" \\\n-d '{\n    \"aliasName\": \"alice\"\n}'\n\n\n# {\n#     \"code\": 0,\n#     \"data\": {}\n# }\n",
    -		"from pymilvus import Collection, connections\n\n# Connect to Milvus server\nconnections.connect(host=\"localhost\", port=\"19530\") # Change to your Milvus server IP and port\n\n# Get existing collection\ncollection = Collection(\"quick_setup\")\n\n# Set the TTL for the data in the collection\ncollection.set_properties(\n    properties={\n        \"collection.ttl.seconds\": 60\n    }\n)\n",
    -		"from pymilvus import Collection, connections\n\n# Connect to Milvus server\nconnections.connect(host=\"localhost\", port=\"19530\") # Change to your Milvus server IP and port\n\n# Get existing collection\ncollection = Collection(\"quick_setup\")\n\n# Before setting memory mapping property, we need to release the collection first.\ncollection.release()\n\n# Set memory mapping property to True or Flase\ncollection.set_properties(\n    properties={\n        \"mmap.enabled\": True\n    }\n)\n",
    -		"# 10. Drop the collections\nclient.drop_collection(\n    collection_name=\"quick_setup\"\n)\n\nclient.drop_collection(\n    collection_name=\"customized_setup_1\"\n)\n\nclient.drop_collection(\n    collection_name=\"customized_setup_2\"\n)\n",
    -		"import io.milvus.v2.service.collection.request.DropCollectionReq;\n\n// 10. Drop collections\n\nDropCollectionReq dropQuickSetupParam = DropCollectionReq.builder()\n    .collectionName(\"quick_setup\")\n    .build();\n\nclient.dropCollection(dropQuickSetupParam);\n\nDropCollectionReq dropCustomizedSetupParam = DropCollectionReq.builder()\n    .collectionName(\"customized_setup_1\")\n    .build();\n\nclient.dropCollection(dropCustomizedSetupParam);\n\ndropCustomizedSetupParam = DropCollectionReq.builder()\n    .collectionName(\"customized_setup_2\")\n    .build();\n\nclient.dropCollection(dropCustomizedSetupParam);\n",
    -		"// 10. Drop the collection\nres = await client.dropCollection({\n    collection_name: \"customized_setup_2\"\n})\n\nconsole.log(res.error_code)\n\n// Output\n// \n// Success\n// \n\nres = await client.dropCollection({\n    collection_name: \"customized_setup_1\"\n})\n\nconsole.log(res.error_code)\n\n// Output\n// \n// Success\n// \n\nres = await client.dropCollection({\n    collection_name: \"quick_setup\"\n})\n\nconsole.log(res.error_code)\n\n// Output\n// \n// Success\n// \n",
    -		"$ curl -X POST \"http://${MILVUS_URI}/v2/vectordb/collections/drop\" \\\n-H \"Content-Type: application/json\" \\\n-d '{\n    \"collectionName\": \"quick_setup\"\n}'\n\n# {\n#     \"code\": 0,\n#     \"data\": {}\n# }\n\n\n$ curl -X POST \"http://${MILVUS_URI}/v2/vectordb/collections/drop\" \\\n-H \"Content-Type: application/json\" \\\n-d '{\n    \"collectionName\": \"customized_setup_1\"\n}'\n\n\n# {\n#     \"code\": 0,\n#     \"data\": {}\n# }\n\n\n$ curl -X POST \"http://${MILVUS_URI}/v2/vectordb/collections/drop\" \\\n-H \"Content-Type: application/json\" \\\n-d '{\n    \"collectionName\": \"customized_setup_2\"\n}'\n\n\n# {\n#     \"code\": 0,\n#     \"data\": {}\n# }\n"
    -	],
    -	"headingContent": "",
    -	"anchorList": [
    -		{
    -			"label": "管理 Collections",
    -			"href": "Manage-Collections",
    -			"type": 1,
    -			"isActive": false
    -		},
    -		{
    -			"label": "开始之前",
    -			"href": "Before-you-start",
    -			"type": 2,
    -			"isActive": false
    -		},
    -		{ "label": "概述", "href": "Overview", "type": 2, "isActive": false },
    -		{
    -			"label": "创建 Collections",
    -			"href": "Create-Collection",
    -			"type": 2,
    -			"isActive": false
    -		},
    -		{
    -			"label": "查看集合",
    -			"href": "View-Collections",
    -			"type": 2,
    -			"isActive": false
    -		},
    -		{
    -			"label": "加载和释放 Collections",
    -			"href": "Load--Release-Collection",
    -			"type": 2,
    -			"isActive": false
    -		},
    -		{
    -			"label": "设置别名",
    -			"href": "Set-up-aliases",
    -			"type": 2,
    -			"isActive": false
    -		},
    -		{
    -			"label": "设置属性",
    -			"href": "Set-Properties",
    -			"type": 2,
    -			"isActive": false
    -		},
    -		{
    -			"label": "放弃收藏",
    -			"href": "Drop-a-Collection",
    -			"type": 2,
    -			"isActive": false
    -		}
    -	]
    -}
    +{"codeList":["from pymilvus import MilvusClient, DataType\n\n# 1. Set up a Milvus client\nclient = MilvusClient(\n    uri=\"http://localhost:19530\"\n)\n\n# 2. Create a collection in quick setup mode\nclient.create_collection(\n    collection_name=\"quick_setup\",\n    dimension=5\n)\n\nres = client.get_load_state(\n    collection_name=\"quick_setup\"\n)\n\nprint(res)\n\n# Output\n#\n# {\n#     \"state\": \"\"\n# }\n","import io.milvus.v2.client.ConnectConfig;\nimport io.milvus.v2.client.MilvusClientV2;\nimport io.milvus.v2.service.collection.request.GetLoadStateReq;\nimport io.milvus.v2.service.collection.request.CreateCollectionReq;\n\nString CLUSTER_ENDPOINT = \"http://localhost:19530\";\n\n// 1. Connect to Milvus server\nConnectConfig connectConfig = ConnectConfig.builder()\n    .uri(CLUSTER_ENDPOINT)\n    .build();\n\nMilvusClientV2 client = new MilvusClientV2(connectConfig);\n\n// 2. Create a collection in quick setup mode\nCreateCollectionReq quickSetupReq = CreateCollectionReq.builder()\n    .collectionName(\"quick_setup\")\n    .dimension(5)\n    .build();\n\nclient.createCollection(quickSetupReq);\n\n// Thread.sleep(5000);\n\nGetLoadStateReq quickSetupLoadStateReq = GetLoadStateReq.builder()\n    .collectionName(\"quick_setup\")\n    .build();\n\nBoolean res = client.getLoadState(quickSetupLoadStateReq);\n\nSystem.out.println(res);\n\n// Output:\n// true\n","address = \"http://localhost:19530\"\n\n// 1. Set up a Milvus Client\nclient = new MilvusClient({address});\n\n// 2. Create a collection in quick setup mode\nlet res = await client.createCollection({\n    collection_name: \"quick_setup\",\n    dimension: 5,\n});  \n\nconsole.log(res.error_code)\n\n// Output\n// \n// Success\n// \n\nres = await client.getLoadState({\n    collection_name: \"quick_setup\"\n})\n\nconsole.log(res.state)\n\n// Output\n// \n// LoadStateLoaded\n// \n","$ export MILVUS_URI=\"localhost:19530\"\n\n$ curl -X POST \"http://${MILVUS_URI}/v2/vectordb/collections/create\" \\\n-H \"Content-Type: application/json\" \\\n-d '{\n  \"collectionName\": \"quick_setup\",\n  \"dimension\": 5\n}'\n\n# Output\n#\n# {\n#     \"code\": 0,\n#     \"data\": {},\n# }\n\n$ curl -X POST \"http://${MILVUS_URI}/v2/vectordb/collections/get_load_state\" \\\n-H \"Content-Type: application/json\" \\\n-d '{\n  \"collectionName\": \"quick_setup\"\n}'\n\n# {\n#     \"code\": 0,\n#     \"data\": {\n#         \"loadProgress\": 100,\n#         \"loadState\": \"LoadStateLoaded\"\n#     }\n# }\n","# 3. Create a collection in customized setup mode\n\n# 3.1. Create schema\nschema = MilvusClient.create_schema(\n    auto_id=False,\n    enable_dynamic_field=True,\n)\n\n# 3.2. Add fields to schema\nschema.add_field(field_name=\"my_id\", datatype=DataType.INT64, is_primary=True)\nschema.add_field(field_name=\"my_vector\", datatype=DataType.FLOAT_VECTOR, dim=5)\n","import io.milvus.v2.common.DataType;\nimport io.milvus.v2.service.collection.request.CreateCollectionReq;\n\n// 3. Create a collection in customized setup mode\n\n// 3.1 Create schema\nCreateCollectionReq.CollectionSchema schema = client.createSchema();\n\n// 3.2 Add fields to schema\nschema.addField(AddFieldReq.builder()\n    .fieldName(\"my_id\")\n    .dataType(DataType.Int64)\n    .isPrimaryKey(true)\n    .autoID(false)\n    .build());\n\nschema.addField(AddFieldReq.builder()\n    .fieldName(\"my_vector\")\n    .dataType(DataType.FloatVector)\n    .dimension(5)\n    .build());\n","// 3. Create a collection in customized setup mode\n// 3.1 Define fields\nconst fields = [\n    {\n        name: \"my_id\",\n        data_type: DataType.Int64,\n        is_primary_key: true,\n        auto_id: false\n    },\n    {\n        name: \"my_vector\",\n        data_type: DataType.FloatVector,\n        dim: 5\n    },\n]\n","export fields='[{ \\\n    \"fieldName\": \"my_id\", \\\n    \"dataType\": \"Int64\", \\\n    \"isPrimary\": true \\\n}, \\\n{ \\\n    \"fieldName\": \"my_vector\", \\\n    \"dataType\": \"FloatVector\", \\\n    \"elementTypeParams\": { \\\n        \"dim\": 5 \\\n    } \\\n}]'\n","# 3.3. Prepare index parameters\nindex_params = client.prepare_index_params()\n\n# 3.4. Add indexes\nindex_params.add_index(\n    field_name=\"my_id\",\n    index_type=\"STL_SORT\"\n)\n\nindex_params.add_index(\n    field_name=\"my_vector\", \n    index_type=\"IVF_FLAT\",\n    metric_type=\"IP\",\n    params={ \"nlist\": 128 }\n)\n","import io.milvus.v2.common.IndexParam;\n\n// 3.3 Prepare index parameters\nIndexParam indexParamForIdField = IndexParam.builder()\n    .fieldName(\"my_id\")\n    .indexType(IndexParam.IndexType.STL_SORT)\n    .build();\n\nIndexParam indexParamForVectorField = IndexParam.builder()\n    .fieldName(\"my_vector\")\n    .indexType(IndexParam.IndexType.IVF_FLAT)\n    .metricType(IndexParam.MetricType.L2)\n    .extraParams(Map.of(\"nlist\", 1024))\n    .build();\n\nList indexParams = new ArrayList<>();\nindexParams.add(indexParamForIdField);\nindexParams.add(indexParamForVectorField);\n","// 3.2 Prepare index parameters\nconst index_params = [{\n    field_name: \"my_id\",\n    index_type: \"STL_SORT\"\n},{\n    field_name: \"my_vector\",\n    index_type: \"IVF_FLAT\",\n    metric_type: \"IP\",\n    params: { nlist: 1024}\n}]\n","export indexParams='[{ \\\n    \"fieldName\": \"my_id\", \\\n    \"indexName\": \"my_id\", \\\n    \"params\": { \\\n        \"index_type\": \"SLT_SORT\" \\\n  } \\\n}, { \\\n    \"fieldName\": \"my_vector\", \\\n    \"metricType\": \"COSINE\", \\\n    \"indexName\": \"my_vector\", \\\n    \"params\": { \\\n        \"index_type\": \"IVF_FLAT\", \\\n        \"nlist\": 1024 \\\n  } \\\n}]'\n","# 3.5. Create a collection with the index loaded simultaneously\nclient.create_collection(\n    collection_name=\"customized_setup_1\",\n    schema=schema,\n    index_params=index_params\n)\n\ntime.sleep(5)\n\nres = client.get_load_state(\n    collection_name=\"customized_setup_1\"\n)\n\nprint(res)\n\n# Output\n#\n# {\n#     \"state\": \"\"\n# }\n","import io.milvus.v2.service.collection.request.CreateCollectionReq;\nimport io.milvus.v2.service.collection.request.GetLoadStateReq;\n\n// 3.4 Create a collection with schema and index parameters\nCreateCollectionReq customizedSetupReq1 = CreateCollectionReq.builder()\n    .collectionName(\"customized_setup_1\")\n    .collectionSchema(schema)\n    .indexParams(indexParams)\n    .build();\n\nclient.createCollection(customizedSetupReq1);\n\n// Thread.sleep(5000);\n\n// 3.5 Get load state of the collection\nGetLoadStateReq customSetupLoadStateReq1 = GetLoadStateReq.builder()\n    .collectionName(\"customized_setup_1\")\n    .build();\n\nres = client.getLoadState(customSetupLoadStateReq1);\n\nSystem.out.println(res);\n\n// Output:\n// true\n","// 3.3 Create a collection with fields and index parameters\nres = await client.createCollection({\n    collection_name: \"customized_setup_1\",\n    fields: fields,\n    index_params: index_params,\n})\n\nconsole.log(res.error_code)  \n\n// Output\n// \n// Success\n// \n\nres = await client.getLoadState({\n    collection_name: \"customized_setup_1\"\n})\n\nconsole.log(res.state)\n\n// Output\n// \n// LoadStateLoaded\n//   \n","$ curl -X POST \"http://${MILVUS_URI}/v2/vectordb/collections/create\" \\\n-H \"Content-Type: application/json\" \\\n-d '{\n    \"collectionName\": \"customized_setup_1\",\n    \"schema\": {\n        \"autoId\": false,\n        \"enabledDynamicField\": false,\n        \"fields\": [\n            {\n                \"fieldName\": \"my_id\",\n                \"dataType\": \"Int64\",\n                \"isPrimary\": true\n            },\n            {\n                \"fieldName\": \"my_vector\",\n                \"dataType\": \"FloatVector\",\n                \"elementTypeParams\": {\n                    \"dim\": \"5\"\n                }\n            }\n        ]\n    },\n    \"indexParams\": [\n        {\n            \"fieldName\": \"my_vector\",\n            \"metricType\": \"COSINE\",\n            \"indexName\": \"my_vector\",\n            \"params\": {\n                \"index_type\": \"IVF_FLAT\",\n                \"nlist\": \"1024\"\n            }\n        },\n        {\n            \"fieldName\": \"my_id\",\n            \"indexName\": \"my_id\",\n            \"params\": {\n                \"index_type\": \"STL_SORT\"\n            }            \n        }\n    ]\n}'\n\n# Output\n#\n# {\n#     \"code\": 0,\n#     \"data\": {},\n# }\n\n$ curl -X POST \"http://${MILVUS_URI}/v2/vectordb/collections/get_load_state\" \\\n-H \"Content-Type: application/json\" \\\n-d '{\n    \"collectionName\": \"customized_setup_1\"\n}'\n\n# {\n#     \"code\": 0,\n#     \"data\": {\n#         \"loadProgress\": 100,\n#         \"loadState\": \"LoadStateLoaded\"\n#     }\n# }\n","# 3.6. Create a collection and index it separately\nclient.create_collection(\n    collection_name=\"customized_setup_2\",\n    schema=schema,\n)\n\nres = client.get_load_state(\n    collection_name=\"customized_setup_2\"\n)\n\nprint(res)\n\n# Output\n#\n# {\n#     \"state\": \"\"\n# }\n","// 3.6 Create a collection and index it separately\nCreateCollectionReq customizedSetupReq2 = CreateCollectionReq.builder()\n    .collectionName(\"customized_setup_2\")\n    .collectionSchema(schema)\n    .build();\n\nclient.createCollection(customizedSetupReq2);\n","// 3.4 Create a collection and index it seperately\nres = await client.createCollection({\n    collection_name: \"customized_setup_2\",\n    fields: fields,\n})\n\nconsole.log(res.error_code)\n\n// Output\n// \n// Success\n// \n\nres = await client.getLoadState({\n    collection_name: \"customized_setup_2\"\n})\n\nconsole.log(res.state)\n\n// Output\n// \n// LoadStateNotLoad\n// \n","$ curl -X POST \"http://${MILVUS_URI}/v2/vectordb/collections/create\" \\\n-H \"Content-Type: application/json\" \\\n-d '{\n    \"collectionName\": \"customized_setup_2\",\n    \"schema\": {\n        \"autoId\": false,\n        \"enabledDynamicField\": false,\n        \"fields\": [\n            {\n                \"fieldName\": \"my_id\",\n                \"dataType\": \"Int64\",\n                \"isPrimary\": true\n            },\n            {\n                \"fieldName\": \"my_vector\",\n                \"dataType\": \"FloatVector\",\n                \"elementTypeParams\": {\n                    \"dim\": \"5\"\n                }\n            }\n        ]\n        \n    }\n}'\n\n# Output\n#\n# {\n#     \"code\": 0,\n#     \"data\": {},\n# }\n\n$ curl -X POST \"http://${MILVUS_URI}/v2/vectordb/collections/get_load_state\" \\\n-H \"Content-Type: application/json\" \\\n-d '{\n    \"collectionName\": \"customized_setup_2\"\n}'\n\n# {\n#     \"code\": 0,\n#     \"data\": {\n#         \"loadState\": \"LoadStateNotLoaded\"\n#     }\n# }\n","# 3.6 Create index\nclient.create_index(\n    collection_name=\"customized_setup_2\",\n    index_params=index_params\n)\n\nres = client.get_load_state(\n    collection_name=\"customized_setup_2\"\n)\n\nprint(res)\n\n# Output\n#\n# {\n#     \"state\": \"\"\n# }\n","CreateIndexReq  createIndexReq = CreateIndexReq.builder()\n    .collectionName(\"customized_setup_2\")\n    .indexParams(indexParams)\n    .build();\n\nclient.createIndex(createIndexReq);\n\n// Thread.sleep(1000);\n\n// 3.7 Get load state of the collection\nGetLoadStateReq customSetupLoadStateReq2 = GetLoadStateReq.builder()\n    .collectionName(\"customized_setup_2\")\n    .build();\n\nres = client.getLoadState(customSetupLoadStateReq2);\n\nSystem.out.println(res);\n\n// Output:\n// false\n","// 3.5 Create index\nres = await client.createIndex({\n    collection_name: \"customized_setup_2\",\n    field_name: \"my_vector\",\n    index_type: \"IVF_FLAT\",\n    metric_type: \"IP\",\n    params: { nlist: 1024}\n})\n\nres = await client.getLoadState({\n    collection_name: \"customized_setup_2\"\n})\n\nconsole.log(res.state)\n\n// Output\n// \n// LoadStateNotLoad\n//\n","$ curl -X POST \"http://${MILVUS_URI}/v2/vectordb/indexes/create\" \\\n-H \"Content-Type: application/json\" \\\n-d '{\n    \"collectionName\": \"customized_setup_2\",\n    \"indexParams\": [\n        {\n            \"metricType\": \"L2\",\n            \"fieldName\": \"my_vector\",\n            \"indexName\": \"my_vector\",\n            \"indexConfig\": {\n                \"index_type\": \"IVF_FLAT\",\n                \"nlist\": \"1024\"\n            }\n        }\n    ]\n}'\n\n# Output\n#\n# {\n#     \"code\": 0,\n#     \"data\": {},\n# }\n\n$ curl -X POST \"http://${MILVUS_URI}/v2/vectordb/collections/get_load_state\" \\\n-H \"Content-Type: application/json\" \\\n-d '{\n    \"collectionName\": \"customized_setup_2\"\n}'\n\n# {\n#     \"code\": 0,\n#     \"data\": {\n#         \"loadState\": \"LoadStateNotLoaded\"\n#     }\n# }\n","# 5. View Collections\nres = client.describe_collection(\n    collection_name=\"customized_setup_2\"\n)\n\nprint(res)\n\n# Output\n#\n# {\n#     \"collection_name\": \"customized_setup_2\",\n#     \"auto_id\": false,\n#     \"num_shards\": 1,\n#     \"description\": \"\",\n#     \"fields\": [\n#         {\n#             \"field_id\": 100,\n#             \"name\": \"my_id\",\n#             \"description\": \"\",\n#             \"type\": 5,\n#             \"params\": {},\n#             \"element_type\": 0,\n#             \"is_primary\": true\n#         },\n#         {\n#             \"field_id\": 101,\n#             \"name\": \"my_vector\",\n#             \"description\": \"\",\n#             \"type\": 101,\n#             \"params\": {\n#                 \"dim\": 5\n#             },\n#             \"element_type\": 0\n#         }\n#     ],\n#     \"aliases\": [],\n#     \"collection_id\": 448143479230158446,\n#     \"consistency_level\": 2,\n#     \"properties\": {},\n#     \"num_partitions\": 1,\n#     \"enable_dynamic_field\": true\n# }\n\n","import io.milvus.v2.service.collection.request.DescribeCollectionReq;\nimport io.milvus.v2.service.collection.response.DescribeCollectionResp;\n\n// 4. View collections\nDescribeCollectionReq describeCollectionReq = DescribeCollectionReq.builder()\n    .collectionName(\"customized_setup_2\")\n    .build();\n\nDescribeCollectionResp describeCollectionRes = client.describeCollection(describeCollectionReq);\n\nSystem.out.println(JSONObject.toJSON(describeCollectionRes));\n\n// Output:\n// {\n//     \"createTime\": 449005822816026627,\n//     \"collectionSchema\": {\"fieldSchemaList\": [\n//         {\n//             \"autoID\": false,\n//             \"dataType\": \"Int64\",\n//             \"name\": \"my_id\",\n//             \"description\": \"\",\n//             \"isPrimaryKey\": true,\n//             \"maxLength\": 65535,\n//             \"isPartitionKey\": false\n//         },\n//         {\n//             \"autoID\": false,\n//             \"dataType\": \"FloatVector\",\n//             \"name\": \"my_vector\",\n//             \"description\": \"\",\n//             \"isPrimaryKey\": false,\n//             \"dimension\": 5,\n//             \"maxLength\": 65535,\n//             \"isPartitionKey\": false\n//         }\n//     ]},\n//     \"vectorFieldName\": [\"my_vector\"],\n//     \"autoID\": false,\n//     \"fieldNames\": [\n//         \"my_id\",\n//         \"my_vector\"\n//     ],\n//     \"description\": \"\",\n//     \"numOfPartitions\": 1,\n//     \"primaryFieldName\": \"my_id\",\n//     \"enableDynamicField\": true,\n//     \"collectionName\": \"customized_setup_2\"\n// }\n","// 5. View Collections\nres = await client.describeCollection({\n    collection_name: \"customized_setup_2\"\n})\n\nconsole.log(res)\n\n// Output\n// \n// {\n//   virtual_channel_names: [ 'by-dev-rootcoord-dml_13_449007919953017716v0' ],\n//   physical_channel_names: [ 'by-dev-rootcoord-dml_13' ],\n//   aliases: [],\n//   start_positions: [],\n//   properties: [],\n//   status: {\n//     extra_info: {},\n//     error_code: 'Success',\n//     reason: '',\n//     code: 0,\n//     retriable: false,\n//     detail: ''\n//   },\n//   schema: {\n//     fields: [ [Object], [Object] ],\n//     properties: [],\n//     name: 'customized_setup_2',\n//     description: '',\n//     autoID: false,\n//     enable_dynamic_field: false\n//   },\n//   collectionID: '449007919953017716',\n//   created_timestamp: '449024569603784707',\n//   created_utc_timestamp: '1712892797866',\n//   shards_num: 1,\n//   consistency_level: 'Bounded',\n//   collection_name: 'customized_setup_2',\n//   db_name: 'default',\n//   num_partitions: '1'\n// }\n// \n","curl -X POST \"http://${MILVUS_URI}/v2/vectordb/collections/describe\" \\\n-H \"Content-Type: application/json\" \\\n-d '{\n    \"dbName\": \"default\",\n    \"collectionName\": \"test_collection\"\n}'\n\n# {\n#     \"code\": 0,\n#     \"data\": {\n#         \"aliases\": [],\n#         \"autoId\": false,\n#         \"collectionID\": 448707763883002014,\n#         \"collectionName\": \"test_collection\",\n#         \"consistencyLevel\": \"Bounded\",\n#         \"description\": \"\",\n#         \"enableDynamicField\": true,\n#         \"fields\": [\n#             {\n#                 \"autoId\": false,\n#                 \"description\": \"\",\n#                 \"id\": 100,\n#                 \"name\": \"id\",\n#                 \"partitionKey\": false,\n#                 \"primaryKey\": true,\n#                 \"type\": \"Int64\"\n#             },\n#             {\n#                 \"autoId\": false,\n#                 \"description\": \"\",\n#                 \"id\": 101,\n#                 \"name\": \"vector\",\n#                 \"params\": [\n#                     {\n#                         \"key\": \"dim\",\n#                         \"value\": \"5\"\n#                     }\n#                 ],\n#                 \"partitionKey\": false,\n#                 \"primaryKey\": false,\n#                 \"type\": \"FloatVector\"\n#             }\n#         ],\n#         \"indexes\": [\n#             {\n#                 \"fieldName\": \"vector\",\n#                 \"indexName\": \"vector\",\n#                 \"metricType\": \"COSINE\"\n#             }\n#         ],\n#         \"load\": \"LoadStateLoaded\",\n#         \"partitionsNum\": 1,\n#         \"properties\": [],\n#         \"shardsNum\": 1\n#     }\n# }\n","# 6. List all collection names\nres = client.list_collections()\n\nprint(res)\n\n# Output\n#\n# [\n#     \"customized_setup_2\",\n#     \"quick_setup\",\n#     \"customized_setup_1\"\n# ]\n","import io.milvus.v2.service.collection.response.ListCollectionsResp;\n\n// 5. List all collection names\nListCollectionsResp listCollectionsRes = client.listCollections();\n\nSystem.out.println(listCollectionsRes.getCollectionNames());\n\n// Output:\n// [\n//     \"customized_setup_2\",\n//     \"quick_setup\",\n//     \"customized_setup_1\"\n// ]\n","// 5. List all collection names\nListCollectionsResp listCollectionsRes = client.listCollections();\n\nSystem.out.println(listCollectionsRes.getCollectionNames());\n\n// Output:\n// [\n//     \"customized_setup_1\",\n//     \"quick_setup\",\n//     \"customized_setup_2\"\n// ]\n","$ curl -X POST \"http://${MILVUS_URI}/v2/vectordb/collections/list\" \\\n-H \"Content-Type: application/json\" \\\n-d '{\n    \"dbName\": \"default\"\n}'\n\n# {\n#   \"code\": 0,\n#   \"data\": [\n#     \"quick_setup\",\n#     \"customized_setup_1\",\n#     \"customized_setup_2\"\n#   ]\n# }\n","# 7. Load the collection\nclient.load_collection(\n    collection_name=\"customized_setup_2\",\n    replica_number=1 # Number of replicas to create on query nodes. Max value is 1 for Milvus Standalone, and no greater than `queryNode.replicas` for Milvus Cluster.\n)\n\nres = client.get_load_state(\n    collection_name=\"customized_setup_2\"\n)\n\nprint(res)\n\n# Output\n#\n# {\n#     \"state\": \"\"\n# }\n","import io.milvus.v2.service.collection.request.LoadCollectionReq;\n\n// 6. Load the collection\nLoadCollectionReq loadCollectionReq = LoadCollectionReq.builder()\n    .collectionName(\"customized_setup_2\")\n    .build();\n\nclient.loadCollection(loadCollectionReq);\n\n// Thread.sleep(5000);\n\n// 7. Get load state of the collection\nGetLoadStateReq loadStateReq = GetLoadStateReq.builder()\n    .collectionName(\"customized_setup_2\")\n    .build();\n\nres = client.getLoadState(loadStateReq);\n\nSystem.out.println(res);\n\n// Output:\n// true\n","// 7. Load the collection\nres = await client.loadCollection({\n    collection_name: \"customized_setup_2\"\n})\n\nconsole.log(res.error_code)\n\n// Output\n// \n// Success\n// \n\nawait sleep(3000)\n\nres = await client.getLoadState({\n    collection_name: \"customized_setup_2\"\n})\n\nconsole.log(res.state)\n\n// Output\n// \n// LoadStateLoaded\n// \n","$ curl -X POST \"http://${MILVUS_URI}/v2/vectordb/collections/load\" \\\n-H \"Content-Type: application/json\" \\\n-d '{\n    \"collectionName\": \"customized_setup_2\"\n}'\n\n# Output\n#\n# {\n#     \"code\": 0,\n#     \"data\": {},\n# }\n\n$ curl -X POST \"http://${MILVUS_URI}/v2/vectordb/collections/get_load_state\" \\\n-H \"Content-Type: application/json\" \\\n-d '{\n  \"collectionName\": \"customized_setup_2\"\n}'\n\n# {\n#     \"code\": 0,\n#     \"data\": {\n#         \"loadProgress\": 100,\n#         \"loadState\": \"LoadStateLoaded\"\n#     }\n# }\n","# 7. Load the collection\nclient.load_collection(\n    collection_name=\"customized_setup_2\",\n    load_fields=[\"my_id\", \"my_vector\"] # Load only the specified fields\n    skip_load_dynamic_field=True # Skip loading the dynamic field\n)\n\nres = client.get_load_state(\n    collection_name=\"customized_setup_2\"\n)\n\nprint(res)\n\n# Output\n#\n# {\n#     \"state\": \"\"\n# }\n","# 8. Release the collection\nclient.release_collection(\n    collection_name=\"customized_setup_2\"\n)\n\nres = client.get_load_state(\n    collection_name=\"customized_setup_2\"\n)\n\nprint(res)\n\n# Output\n#\n# {\n#     \"state\": \"\"\n# }\n","import io.milvus.v2.service.collection.request.ReleaseCollectionReq;\n\n// 8. Release the collection\nReleaseCollectionReq releaseCollectionReq = ReleaseCollectionReq.builder()\n    .collectionName(\"customized_setup_2\")\n    .build();\n\nclient.releaseCollection(releaseCollectionReq);\n\n// Thread.sleep(1000);\n\nres = client.getLoadState(loadStateReq);\n\nSystem.out.println(res);\n\n// Output:\n// false\n","// 8. Release the collection\nres = await client.releaseCollection({\n    collection_name: \"customized_setup_2\"\n})\n\nconsole.log(res.error_code)\n\n// Output\n// \n// Success\n// \n\nres = await client.getLoadState({\n    collection_name: \"customized_setup_2\"\n})\n\nconsole.log(res.state)\n\n// Output\n// \n// LoadStateNotLoad\n// \n","$ curl -X POST \"http://${MILVUS_URI}/v2/vectordb/collections/release\" \\\n-H \"Content-Type: application/json\" \\\n-d '{\n    \"collectionName\": \"customized_setup_2\"\n}'\n\n# Output\n#\n# {\n#     \"code\": 0,\n#     \"data\": {},\n# }\n\n\n$ curl -X POST \"http://${MILVUS_URI}/v2/vectordb/collections/get_load_state\" \\\n-H \"Content-Type: application/json\" \\\n-d '{\n  \"collectionName\": \"customized_setup_2\"\n}'\n\n\n# {\n#     \"code\": 0,\n#     \"data\": {\n#         \"loadState\": \"LoadStateNotLoad\"\n#     }\n# }\n","# 9.1. Create aliases\nclient.create_alias(\n    collection_name=\"customized_setup_2\",\n    alias=\"bob\"\n)\n\nclient.create_alias(\n    collection_name=\"customized_setup_2\",\n    alias=\"alice\"\n)\n","import io.milvus.v2.service.utility.request.CreateAliasReq;\n\n// 9. Manage aliases\n\n// 9.1 Create alias\nCreateAliasReq createAliasReq = CreateAliasReq.builder()\n    .collectionName(\"customized_setup_2\")\n    .alias(\"bob\")\n    .build();\n\nclient.createAlias(createAliasReq);\n\ncreateAliasReq = CreateAliasReq.builder()\n    .collectionName(\"customized_setup_2\")\n    .alias(\"alice\")\n    .build();\n\nclient.createAlias(createAliasReq);\n","// 9. Manage aliases\n// 9.1 Create aliases\nres = await client.createAlias({\n    collection_name: \"customized_setup_2\",\n    alias: \"bob\"\n})\n\nconsole.log(res.error_code)\n\n// Output\n// \n// Success\n// \n\nres = await client.createAlias({\n    collection_name: \"customized_setup_2\",\n    alias: \"alice\"\n})\n\nconsole.log(res.error_code)\n\n// Output\n// \n// Success\n// \n","$ curl -X POST \"http://${MILVUS_URI}/v2/vectordb/aliases/create\" \\\n-H \"Content-Type: application/json\" \\\n-d '{\n    \"collectionName\": \"customized_setup_2\",\n    \"aliasName\": \"bob\"\n}'\n\n# Output\n#\n# {\n#     \"code\": 0,\n#     \"data\": {}\n# }\n\n$ curl -X POST \"http://${MILVUS_URI}/v2/vectordb/aliases/create\" \\\n-H \"Content-Type: application/json\" \\\n-d '{\n    \"collectionName\": \"customized_setup_2\",\n    \"aliasName\": \"alice\"\n}'\n\n# Output\n#\n# {\n#     \"code\": 0,\n#     \"data\": {}\n# }\n","# 9.2. List aliases\nres = client.list_aliases(\n    collection_name=\"customized_setup_2\"\n)\n\nprint(res)\n\n# Output\n#\n# {\n#     \"aliases\": [\n#         \"bob\",\n#         \"alice\"\n#     ],\n#     \"collection_name\": \"customized_setup_2\",\n#     \"db_name\": \"default\"\n# }\n","import io.milvus.v2.service.utility.request.ListAliasesReq;\nimport io.milvus.v2.service.utility.response.ListAliasResp;\n\n// 9.2 List alises\nListAliasesReq listAliasesReq = ListAliasesReq.builder()\n    .collectionName(\"customized_setup_2\")\n    .build();\n\nListAliasResp listAliasRes = client.listAliases(listAliasesReq);\n\nSystem.out.println(listAliasRes.getAlias());\n\n// Output:\n// [\n//     \"bob\",\n//     \"alice\"\n// ]\n","// 9.2 List aliases\nres = await client.listAliases({\n    collection_name: \"customized_setup_2\"\n})\n\nconsole.log(res.aliases)\n\n// Output\n// \n// [ 'bob', 'alice' ]\n// \n","$ curl -X POST \"http://${MILVUS_URI}/v2/vectordb/aliases/list\" \\\n-H \"Content-Type: application/json\" \\\n-d '{\n    \"collectionName\": \"customized_setup_2\"\n}'\n\n# {\n#     \"code\": 0,\n#     \"data\": [\n#         \"bob\",\n#         \"alice\"\n#     ]\n# }\n","# 9.3. Describe aliases\nres = client.describe_alias(\n    alias=\"bob\"\n)\n\nprint(res)\n\n# Output\n#\n# {\n#     \"alias\": \"bob\",\n#     \"collection_name\": \"customized_setup_2\",\n#     \"db_name\": \"default\"\n# }\n","import io.milvus.v2.service.utility.request.DescribeAliasReq;\nimport io.milvus.v2.service.utility.response.DescribeAliasResp;\n\n// 9.3 Describe alias\nDescribeAliasReq describeAliasReq = DescribeAliasReq.builder()\n    .alias(\"bob\")\n    .build();\n\nDescribeAliasResp describeAliasRes = client.describeAlias(describeAliasReq);\n\nSystem.out.println(JSONObject.toJSON(describeAliasRes));\n\n// Output:\n// {\n//     \"alias\": \"bob\",\n//     \"collectionName\": \"customized_setup_2\"\n// }\n","// 9.3 Describe aliases\nres = await client.describeAlias({\n    collection_name: \"customized_setup_2\",\n    alias: \"bob\"\n})\n\nconsole.log(res)\n\n// Output\n// \n// {\n//   status: {\n//     extra_info: {},\n//     error_code: 'Success',\n//     reason: '',\n//     code: 0,\n//     retriable: false,\n//     detail: ''\n//   },\n//   db_name: 'default',\n//   alias: 'bob',\n//   collection: 'customized_setup_2'\n// }\n// \n","$ curl -X POST \"http://${MILVUS_URI}/v2/vectordb/aliases/describe\" \\\n-H \"Content-Type: application/json\" \\\n-d '{\n    \"aliasName\": \"bob\"\n}'\n\n# {\n#     \"code\": 0,\n#     \"data\": {\n#         \"aliasName\": \"bob\",\n#         \"collectionName\": \"quick_setup\",\n#         \"dbName\": \"default\"\n#     }\n# }\n","# 9.4 Reassign aliases to other collections\nclient.alter_alias(\n    collection_name=\"customized_setup_1\",\n    alias=\"alice\"\n)\n\nres = client.list_aliases(\n    collection_name=\"customized_setup_1\"\n)\n\nprint(res)\n\n# Output\n#\n# {\n#     \"aliases\": [\n#         \"alice\"\n#     ],\n#     \"collection_name\": \"customized_setup_1\",\n#     \"db_name\": \"default\"\n# }\n\nres = client.list_aliases(\n    collection_name=\"customized_setup_2\"\n)\n\nprint(res)\n\n# Output\n#\n# {\n#     \"aliases\": [\n#         \"bob\"\n#     ],\n#     \"collection_name\": \"customized_setup_2\",\n#     \"db_name\": \"default\"\n# }\n","import io.milvus.v2.service.utility.request.AlterAliasReq;\n\n// 9.4 Reassign alias to other collections\nAlterAliasReq alterAliasReq = AlterAliasReq.builder()\n    .collectionName(\"customized_setup_1\")\n    .alias(\"alice\")\n    .build();\n\nclient.alterAlias(alterAliasReq);\n\nlistAliasesReq = ListAliasesReq.builder()\n    .collectionName(\"customized_setup_1\")\n    .build();\n\nlistAliasRes = client.listAliases(listAliasesReq);\n\nSystem.out.println(listAliasRes.getAlias());\n\n// Output:\n// [\"alice\"]\n\nlistAliasesReq = ListAliasesReq.builder()\n    .collectionName(\"customized_setup_2\")\n    .build();\n\nlistAliasRes = client.listAliases(listAliasesReq);\n\nSystem.out.println(listAliasRes.getAlias());\n\n// Output:\n// [\"bob\"]\n","// 9.4 Reassign aliases to other collections\nres = await client.alterAlias({\n    collection_name: \"customized_setup_1\",\n    alias: \"alice\"\n})\n\nconsole.log(res.error_code)\n\n// Output\n// \n// Success\n// \n\nres = await client.listAliases({\n    collection_name: \"customized_setup_1\"\n})\n\nconsole.log(res.aliases)\n\n// Output\n// \n// [ 'alice' ]\n// \n\nres = await client.listAliases({\n    collection_name: \"customized_setup_2\"\n})\n\nconsole.log(res.aliases)\n\n// Output\n// \n// [ 'bob' ]\n// \n","$ curl -X POST \"http://${MILVUS_URI}/v2/vectordb/aliases/alter\" \\\n-H \"Content-Type: application/json\" \\\n-d '{\n     \"collectionName\": \"customized_setup_1\",\n     \"aliasName\": \"alice\"\n}'\n\n# {\n#     \"code\": 0,\n#     \"data\": {}\n# }\n\n\n$ curl -X POST \"http://${MILVUS_URI}/v2/vectordb/aliases/list\" \\\n-H \"Content-Type: application/json\" \\\n-d '{\n    \"collectionName\": \"customized_setup_1\"\n}'\n\n\n# {\n#     \"code\": 0,\n#     \"data\": [\n#         \"alice\"\n#     ]\n# }\n\n\n$ curl -X POST \"http://${MILVUS_URI}/v2/vectordb/aliases/list\" \\\n-H \"Content-Type: application/json\" \\\n-d '{\n    \"collectionName\": \"customized_setup_2\"\n}'\n\n\n# {\n#     \"code\": 0,\n#     \"data\": [\n#         \"bob\"\n#     ]\n# }\n","# 9.5 Drop aliases\nclient.drop_alias(\n    alias=\"bob\"\n)\n\nclient.drop_alias(\n    alias=\"alice\"\n)\n","import io.milvus.v2.service.utility.request.DropAliasReq;\n\n// 9.5 Drop alias\nDropAliasReq dropAliasReq = DropAliasReq.builder()\n    .alias(\"bob\")\n    .build();\n\nclient.dropAlias(dropAliasReq);\n\ndropAliasReq = DropAliasReq.builder()\n    .alias(\"alice\")\n    .build();\n\nclient.dropAlias(dropAliasReq);\n","// 9.5 Drop aliases\nres = await client.dropAlias({\n    alias: \"bob\"\n})\n\nconsole.log(res.error_code)\n\n// Output\n// \n// Success\n// \n\nres = await client.dropAlias({\n    alias: \"alice\"\n})\n\nconsole.log(res.error_code)\n\n// Output\n// \n// Success\n// \n","$ curl -X POST \"http://${MILVUS_URI}/v2/vectordb/aliases/drop\" \\\n-H \"Content-Type: application/json\" \\\n-d '{\n    \"aliasName\": \"bob\"\n}'\n\n# {\n#     \"code\": 0,\n#     \"data\": {}\n# }\n\n\n$ curl -X POST \"http://${MILVUS_URI}/v2/vectordb/aliases/drop\" \\\n-H \"Content-Type: application/json\" \\\n-d '{\n    \"aliasName\": \"alice\"\n}'\n\n\n# {\n#     \"code\": 0,\n#     \"data\": {}\n# }\n","from pymilvus import Collection, connections\n\n# Connect to Milvus server\nconnections.connect(host=\"localhost\", port=\"19530\") # Change to your Milvus server IP and port\n\n# Get existing collection\ncollection = Collection(\"quick_setup\")\n\n# Set the TTL for the data in the collection\ncollection.set_properties(\n    properties={\n        \"collection.ttl.seconds\": 60\n    }\n)\n","from pymilvus import Collection, connections\n\n# Connect to Milvus server\nconnections.connect(host=\"localhost\", port=\"19530\") # Change to your Milvus server IP and port\n\n# Get existing collection\ncollection = Collection(\"quick_setup\")\n\n# Before setting memory mapping property, we need to release the collection first.\ncollection.release()\n\n# Set memory mapping property to True or Flase\ncollection.set_properties(\n    properties={\n        \"mmap.enabled\": True\n    }\n)\n","# 10. Drop the collections\nclient.drop_collection(\n    collection_name=\"quick_setup\"\n)\n\nclient.drop_collection(\n    collection_name=\"customized_setup_1\"\n)\n\nclient.drop_collection(\n    collection_name=\"customized_setup_2\"\n)\n","import io.milvus.v2.service.collection.request.DropCollectionReq;\n\n// 10. Drop collections\n\nDropCollectionReq dropQuickSetupParam = DropCollectionReq.builder()\n    .collectionName(\"quick_setup\")\n    .build();\n\nclient.dropCollection(dropQuickSetupParam);\n\nDropCollectionReq dropCustomizedSetupParam = DropCollectionReq.builder()\n    .collectionName(\"customized_setup_1\")\n    .build();\n\nclient.dropCollection(dropCustomizedSetupParam);\n\ndropCustomizedSetupParam = DropCollectionReq.builder()\n    .collectionName(\"customized_setup_2\")\n    .build();\n\nclient.dropCollection(dropCustomizedSetupParam);\n","// 10. Drop the collection\nres = await client.dropCollection({\n    collection_name: \"customized_setup_2\"\n})\n\nconsole.log(res.error_code)\n\n// Output\n// \n// Success\n// \n\nres = await client.dropCollection({\n    collection_name: \"customized_setup_1\"\n})\n\nconsole.log(res.error_code)\n\n// Output\n// \n// Success\n// \n\nres = await client.dropCollection({\n    collection_name: \"quick_setup\"\n})\n\nconsole.log(res.error_code)\n\n// Output\n// \n// Success\n// \n","$ curl -X POST \"http://${MILVUS_URI}/v2/vectordb/collections/drop\" \\\n-H \"Content-Type: application/json\" \\\n-d '{\n    \"collectionName\": \"quick_setup\"\n}'\n\n# {\n#     \"code\": 0,\n#     \"data\": {}\n# }\n\n\n$ curl -X POST \"http://${MILVUS_URI}/v2/vectordb/collections/drop\" \\\n-H \"Content-Type: application/json\" \\\n-d '{\n    \"collectionName\": \"customized_setup_1\"\n}'\n\n\n# {\n#     \"code\": 0,\n#     \"data\": {}\n# }\n\n\n$ curl -X POST \"http://${MILVUS_URI}/v2/vectordb/collections/drop\" \\\n-H \"Content-Type: application/json\" \\\n-d '{\n    \"collectionName\": \"customized_setup_2\"\n}'\n\n\n# {\n#     \"code\": 0,\n#     \"data\": {}\n# }\n"],"headingContent":"Manage Collections","anchorList":[{"label":"管理收藏集","href":"Manage-Collections","type":1,"isActive":false},{"label":"开始之前","href":"Before-you-start","type":2,"isActive":false},{"label":"概述","href":"Overview","type":2,"isActive":false},{"label":"创建 Collections","href":"Create-Collection","type":2,"isActive":false},{"label":"查看集合","href":"View-Collections","type":2,"isActive":false},{"label":"加载和释放 Collections","href":"Load--Release-Collection","type":2,"isActive":false},{"label":"设置别名","href":"Set-up-aliases","type":2,"isActive":false},{"label":"设置属性","href":"Set-Properties","type":2,"isActive":false},{"label":"放弃收藏","href":"Drop-a-Collection","type":2,"isActive":false}]}
    \ No newline at end of file
    diff --git a/localization/v2.4.x/site/zh/userGuide/manage-collections.md b/localization/v2.4.x/site/zh/userGuide/manage-collections.md
    index 3a87850ba..a6c221e10 100644
    --- a/localization/v2.4.x/site/zh/userGuide/manage-collections.md
    +++ b/localization/v2.4.x/site/zh/userGuide/manage-collections.md
    @@ -1,9 +1,8 @@
     ---
     id: manage-collections.md
    -title: 管理 Collections
    +title: 管理收藏集
     ---
    -
    -

    管理 Collections

    -
    import io.milvus.v2.client.ConnectConfig;
     import io.milvus.v2.client.MilvusClientV2;
     import io.milvus.v2.service.collection.request.GetLoadStateReq;
    @@ -247,15 +245,14 @@ $ curl -X POST "http://# 3.1. Create schema
     schema = MilvusClient.create_schema(
    -auto_id=False,
    -enable_dynamic_field=True,
    +    auto_id=False,
    +    enable_dynamic_field=True,
     )
     
     # 3.2. Add fields to schema
     schema.add_field(field_name="my_id", datatype=DataType.INT64, is_primary=True)
     schema.add_field(field_name="my_vector", datatype=DataType.FLOAT_VECTOR, dim=5)
     
    -
    import io.milvus.v2.common.DataType;
     import io.milvus.v2.service.collection.request.CreateCollectionReq;
     
    @@ -321,7 +318,7 @@ schema.addField(AddFieldReq.builder()
         
         
           enable_dynamic_field
    -      如果插入到目标 Collections 的数据包括未在 Collections 模式中定义的字段,则决定 Milvus 是否将未定义字段的值保存在动态字段中。
    设置为"true "时,Milvus 将创建一个名为$meta的字段,用于保存插入数据中的任何未定义字段及其值。 + 如果插入到目标 Collections 的数据包括未在 Collections 模式中定义的字段,则决定 Milvus 是否将未定义字段的值保存在动态字段中。
    设置为 "true"时,Milvus 将创建一个名为$meta的字段,用于保存插入数据中的任何未定义字段及其值。 field_name @@ -337,7 +334,7 @@ schema.addField(AddFieldReq.builder() dim - 向量 Embeddings 的维数。
    对于DataType.FLOAT_VECTORDataType .BINARY _ VECTORDataType. FLOAT16_VECTORDataType.BFLOAT16_VECTOR类型的字段,这是必填项。如果使用DataType.SPARSE_FLOAT_VECTOR,请省略此参数。 + 向量 Embeddings 的维数。
    对于DataType.FLOAT_VECTORDataType.BINARY _VECTORDataType .FLOAT16_VECTORDataType.BFLOAT16_VECTOR类型的字段,这是必填项。如果使用DataType.SPARSE_FLOAT_VECTOR,请省略此参数。 @@ -367,7 +364,7 @@ schema.addField(AddFieldReq.builder() dimension - 向量 Embeddings 的维数。
    对于DataType.FloatVectorDataType .BinaryVectorDataType.Float16VectorDataType.BFloat16Vector类型的字段,此项为必填项。 + 向量 Embeddings 的维数。
    对于DataType.FloatVectorDataType.BinaryVectorDataType .Float16VectorDataType.BFloat16Vector类型的字段,此项为必填项。 @@ -451,18 +448,17 @@ index_params = client.prepare_index_params() # 3.4. Add indexes index_params.add_index( -field_name="my_id", -index_type="STL_SORT" + field_name="my_id", + index_type="STL_SORT" ) index_params.add_index( -field_name="my_vector", -index_type="IVF_FLAT", -metric_type="IP", -params={ "nlist": 128 } + field_name="my_vector", + index_type="IVF_FLAT", + metric_type="IP", + params={ "nlist": 128 } )
    -
    import io.milvus.v2.common.IndexParam;
     
     // 3.3 Prepare index parameters
    @@ -624,7 +620,7 @@ indexParams.add(indexParamForVectorField);
     

    上面的代码片段演示了如何分别为向量场和标量场设置索引参数。对于向量场,同时设置度量类型和索引类型。对于标量字段,只需设置索引类型。建议为向量场和任何经常用于筛选的标量场创建索引。

    第 3 步:创建 Collections

    你可以选择分别创建 Collections 和索引文件,或者在创建 Collections 时同时加载索引。

    -

    使用create_collection()以指定的 Schema 和索引参数创建集合,并使用get_load_state()检查集合的加载状态。

    +

    使用create_collection()以指定的 Schema 和索引参数创建集合,并使用get_load_state()检查集合的加载状态。

    使用createCollection()以指定的 Schema 和索引参数创建集合,并使用getLoadState()检查集合的加载状态。

    @@ -646,7 +642,7 @@ client.create_collection( time.sleep(5) res = client.get_load_state( -collection_name="customized_setup_1" + collection_name="customized_setup_1" ) print(res) @@ -654,10 +650,9 @@ collection_name="customized_setup_1" # Output # # { -# "state": "<LoadState: Loaded>" +# "state": "<LoadState: Loaded>" # }
    -
    import io.milvus.v2.service.collection.request.CreateCollectionReq;
     import io.milvus.v2.service.collection.request.GetLoadStateReq;
     
    @@ -772,7 +767,7 @@ $ curl -X POST "http://#     }
     # }
     
    -

    上面创建的 Collection 会自动加载。要了解加载和释放集合的更多信息,请参阅加载释放集合

    +

    上面创建的 Collection 会自动加载。要了解加载和释放集合的更多信息,请参阅加载和释放集合

  • 分别创建 Collections 和索引文件。

    @@ -783,7 +778,7 @@ client.create_collection( ) res = client.get_load_state( -collection_name="customized_setup_2" + collection_name="customized_setup_2" ) print(res) @@ -791,10 +786,9 @@ collection_name="customized_setup_2" # Output # # { -# "state": "<LoadState: NotLoad>" +# "state": "<LoadState: NotLoad>" # }
  • -
    // 3.6 Create a collection and index it separately
     CreateCollectionReq customizedSetupReq2 = CreateCollectionReq.builder()
         .collectionName("customized_setup_2")
    @@ -887,7 +881,7 @@ $ curl -X POST "http://schema
    -此 Collection 的Schema
    设置为 "无 "表示将使用默认设置创建此 Collection。
    要使用自定义Schema设置 Collection,需要创建一个CollectionSchema对象并在此处引用它。在这种情况下,Milvus 会忽略请求中携带的所有其他 Schema 相关设置。 +此 Collection 的 Schema。
    设置为 "无 "表示将使用默认设置创建此 Collection。
    要使用自定义Schema设置 Collection,需要创建一个CollectionSchema对象并在此处引用它。在这种情况下,Milvus 会忽略请求中携带的所有其他 Schema 相关设置。 index_params @@ -913,7 +907,7 @@ $ curl -X POST "http://indexParams
    -在此 Collection 中建立向量场索引的参数。要使用自定义Schema设置集合并自动将集合加载到内存中,需要创建一个包含IndexParam对象列表的IndexParams对象,并在此处引用该对象。 +在此 Collection 中建立向量场索引的参数。要使用自定义 Schema 设置集合并自动将集合加载到内存中,需要创建一个包含 IndexParam 对象列表的IndexParams对象,并在此处引用该对象。

    @@ -981,7 +975,7 @@ $ curl -X POST "http://fields.elementTypeParams
    -额外字段参数。 +额外的字段参数。 fields.elementTypeParams.dim @@ -999,7 +993,7 @@ client.create_index( ) res = client.get_load_state( -collection_name="customized_setup_2" + collection_name="customized_setup_2" ) print(res) @@ -1007,10 +1001,9 @@ collection_name="customized_setup_2" # Output # # { -# "state": "<LoadState: NotLoad>" +# "state": "<LoadState: NotLoad>" # }
    -
    CreateIndexReq  createIndexReq = CreateIndexReq.builder()
         .collectionName("customized_setup_2")
         .indexParams(indexParams)
    @@ -1234,41 +1227,40 @@ res = client.describe_collection(
     # Output
     #
     # {
    -# "collection_name": "customized_setup_2",
    -# "auto_id": false,
    -# "num_shards": 1,
    -# "description": "",
    -# "fields": [
    -# {
    -# "field_id": 100,
    -# "name": "my_id",
    -# "description": "",
    -# "type": 5,
    -# "params": {},
    -# "element_type": 0,
    -# "is_primary": true
    -# },
    -# {
    -# "field_id": 101,
    -# "name": "my_vector",
    -# "description": "",
    -# "type": 101,
    -# "params": {
    -# "dim": 5
    -# },
    -# "element_type": 0
    -# }
    -# ],
    -# "aliases": [],
    -# "collection_id": 448143479230158446,
    -# "consistency_level": 2,
    -# "properties": {},
    -# "num_partitions": 1,
    -# "enable_dynamic_field": true
    +#     "collection_name": "customized_setup_2",
    +#     "auto_id": false,
    +#     "num_shards": 1,
    +#     "description": "",
    +#     "fields": [
    +#         {
    +#             "field_id": 100,
    +#             "name": "my_id",
    +#             "description": "",
    +#             "type": 5,
    +#             "params": {},
    +#             "element_type": 0,
    +#             "is_primary": true
    +#         },
    +#         {
    +#             "field_id": 101,
    +#             "name": "my_vector",
    +#             "description": "",
    +#             "type": 101,
    +#             "params": {
    +#                 "dim": 5
    +#             },
    +#             "element_type": 0
    +#         }
    +#     ],
    +#     "aliases": [],
    +#     "collection_id": 448143479230158446,
    +#     "consistency_level": 2,
    +#     "properties": {},
    +#     "num_partitions": 1,
    +#     "enable_dynamic_field": true
     # }
     
     
    -
    import io.milvus.v2.service.collection.request.DescribeCollectionReq;
     import io.milvus.v2.service.collection.response.DescribeCollectionResp;
     
    @@ -1428,12 +1420,11 @@ res = client.list_collections()
     # Output
     #
     # [
    -# "customized_setup_2",
    -# "quick_setup",
    -# "customized_setup_1"
    +#     "customized_setup_2",
    +#     "quick_setup",
    +#     "customized_setup_1"
     # ]
     
    -
    import io.milvus.v2.service.collection.response.ListCollectionsResp;
     
     // 5. List all collection names
    @@ -1516,7 +1507,7 @@ client.load_collection(
     )
     
     res = client.get_load_state(
    -collection_name="customized_setup_2"
    +    collection_name="customized_setup_2"
     )
     
     print(res)
    @@ -1524,10 +1515,9 @@ collection_name="customized_setup_2"
     # Output
     #
     # {
    -# "state": "<LoadState: Loaded>"
    +#     "state": "<LoadState: Loaded>"
     # }
     
    -
    import io.milvus.v2.service.collection.request.LoadCollectionReq;
     
     // 6. Load the collection
    @@ -1603,7 +1593,34 @@ $ curl -X POST "http://#     }
     # }
     
    -

    发布 Collections

    +

    部分加载 Collections(公开预览版)

    +

    此功能目前处于公开预览阶段。应用程序接口和功能将来可能会更改。

    +
    +

    收到加载请求后,Milvus 会将所有向量字段索引和所有标量字段数据加载到内存中。如果某些字段不参与搜索和查询,您可以将其排除在加载之外,以减少内存使用,提高搜索性能。

    +
    +
    # 7. Load the collection
    +client.load_collection(
    +    collection_name="customized_setup_2",
    +    load_fields=["my_id", "my_vector"] # Load only the specified fields
    +    skip_load_dynamic_field=True # Skip loading the dynamic field
    +)
    +
    +res = client.get_load_state(
    +    collection_name="customized_setup_2"
    +)
    +
    +print(res)
    +
    +# Output
    +#
    +# {
    +#     "state": "<LoadState: Loaded>"
    +# }
    +
    +

    请注意,只有load_fields 中列出的字段才能用作搜索和查询的筛选条件和输出字段。在列表中应始终包含主键。不加载的字段名将不能用于筛选或输出。

    +

    可以使用skip_load_dynamic_field=True 跳过加载动态字段。Milvus 将动态字段视为单个字段,因此动态字段中的所有键将一起被包含或排除。

    +
    +

    释放 Collections

    要释放一个 Collections,请使用 release_collection()方法,并指定 Collections 名称。

    @@ -1623,7 +1640,7 @@ client.release_collection( ) res = client.get_load_state( -collection_name="customized_setup_2" + collection_name="customized_setup_2" ) print(res) @@ -1631,10 +1648,9 @@ collection_name="customized_setup_2" # Output # # { -# "state": "<LoadState: NotLoad>" +# "state": "<LoadState: NotLoad>" # }
    -
    import io.milvus.v2.service.collection.request.ReleaseCollectionReq;
     
     // 8. Release the collection
    @@ -1741,11 +1757,10 @@ client.create_alias(
     )
     
     client.create_alias(
    -collection_name="customized_setup_2",
    -alias="alice"
    +    collection_name="customized_setup_2",
    +    alias="alice"
     )
     
    -
    import io.milvus.v2.service.utility.request.CreateAliasReq;
     
     // 9. Manage aliases
    @@ -1915,15 +1930,14 @@ res = client.list_aliases(
     # Output
     #
     # {
    -# "aliases": [
    -# "bob",
    -# "alice"
    -# ],
    -# "collection_name": "customized_setup_2",
    -# "db_name": "default"
    +#     "aliases": [
    +#         "bob",
    +#         "alice"
    +#     ],
    +#     "collection_name": "customized_setup_2",
    +#     "db_name": "default"
     # }
     
    -
    import io.milvus.v2.service.utility.request.ListAliasesReq;
     import io.milvus.v2.service.utility.response.ListAliasResp;
     
    @@ -1992,12 +2006,11 @@ res = client.describe_alias(
     # Output
     #
     # {
    -# "alias": "bob",
    -# "collection_name": "customized_setup_2",
    -# "db_name": "default"
    +#     "alias": "bob",
    +#     "collection_name": "customized_setup_2",
    +#     "db_name": "default"
     # }
     
    -
    import io.milvus.v2.service.utility.request.DescribeAliasReq;
     import io.milvus.v2.service.utility.response.DescribeAliasResp;
     
    @@ -2077,7 +2090,7 @@ client.alter_alias(
     )
     
     res = client.list_aliases(
    -collection_name="customized_setup_1"
    +    collection_name="customized_setup_1"
     )
     
     print(res)
    @@ -2085,15 +2098,15 @@ collection_name="customized_setup_1"
     # Output
     #
     # {
    -# "aliases": [
    -# "alice"
    -# ],
    -# "collection_name": "customized_setup_1",
    -# "db_name": "default"
    +#     "aliases": [
    +#         "alice"
    +#     ],
    +#     "collection_name": "customized_setup_1",
    +#     "db_name": "default"
     # }
     
     res = client.list_aliases(
    -collection_name="customized_setup_2"
    +    collection_name="customized_setup_2"
     )
     
     print(res)
    @@ -2101,14 +2114,13 @@ collection_name="customized_setup_2"
     # Output
     #
     # {
    -# "aliases": [
    -# "bob"
    -# ],
    -# "collection_name": "customized_setup_2",
    -# "db_name": "default"
    +#     "aliases": [
    +#         "bob"
    +#     ],
    +#     "collection_name": "customized_setup_2",
    +#     "db_name": "default"
     # }
     
    -
    import io.milvus.v2.service.utility.request.AlterAliasReq;
     
     // 9.4 Reassign alias to other collections
    @@ -2238,10 +2250,9 @@ client.drop_alias(
     )
     
     client.drop_alias(
    -alias="alice"
    +    alias="alice"
     )
     
    -
    import io.milvus.v2.service.utility.request.DropAliasReq;
     
     // 9.5 Drop alias
    @@ -2334,13 +2345,12 @@ collection = Collection("quick_setup"
     
     # Set the TTL for the data in the collection
     collection.set_properties(
    -properties={
    -"collection.ttl.seconds": 60
    -}
    +    properties={
    +        "collection.ttl.seconds": 60
    +    }
     )
     
    - -

    设置 MMAP

    为 Collections 配置内存映射 (MMAP) 属性,该属性决定数据是否映射到内存中以提高查询性能。有关详细信息,请参阅配置内存映射 。

    +

    设置 MMAP

    为 Collections 配置内存映射 (MMAP) 属性,该属性决定数据是否映射到内存中以提高查询性能。有关详细信息,请参阅配置内存映射

    在设置 MMAP 属性之前,请先释放 Collection。否则会出错。

    @@ -2357,12 +2367,11 @@ collection.release() # Set memory mapping property to True or Flase collection.set_properties( -properties={ -"mmap.enabled": True -} + properties={ + "mmap.enabled": True + } )
    -

    放弃收藏

    -
    import io.milvus.v2.service.collection.request.DropCollectionReq;
     
     // 10. Drop collections
    diff --git a/localization/v2.4.x/site/zh/userGuide/manage-indexes/index-vector-fields.json b/localization/v2.4.x/site/zh/userGuide/manage-indexes/index-vector-fields.json
    index cbd17d3b1..162e8466b 100644
    --- a/localization/v2.4.x/site/zh/userGuide/manage-indexes/index-vector-fields.json
    +++ b/localization/v2.4.x/site/zh/userGuide/manage-indexes/index-vector-fields.json
    @@ -1 +1 @@
    -{"codeList":["from pymilvus import MilvusClient, DataType\n\n# 1. Set up a Milvus client\nclient = MilvusClient(\n    uri=\"http://localhost:19530\"\n)\n\n# 2. Create schema\n# 2.1. Create schema\nschema = MilvusClient.create_schema(\n    auto_id=False,\n    enable_dynamic_field=True,\n)\n\n# 2.2. Add fields to schema\nschema.add_field(field_name=\"id\", datatype=DataType.INT64, is_primary=True)\nschema.add_field(field_name=\"vector\", datatype=DataType.FLOAT_VECTOR, dim=5)\n\n# 3. Create collection\nclient.create_collection(\n    collection_name=\"customized_setup\", \n    schema=schema, \n)\n","import io.milvus.v2.client.ConnectConfig;\nimport io.milvus.v2.client.MilvusClientV2;\nimport io.milvus.v2.common.DataType;\nimport io.milvus.v2.service.collection.request.CreateCollectionReq;\n\nString CLUSTER_ENDPOINT = \"http://localhost:19530\";\n\n// 1. Connect to Milvus server\nConnectConfig connectConfig = ConnectConfig.builder()\n    .uri(CLUSTER_ENDPOINT)\n    .build();\n\nMilvusClientV2 client = new MilvusClientV2(connectConfig);\n\n// 2. Create a collection\n\n// 2.1 Create schema\nCreateCollectionReq.CollectionSchema schema = client.createSchema();\n\n// 2.2 Add fields to schema\nschema.addField(AddFieldReq.builder().fieldName(\"id\").dataType(DataType.Int64).isPrimaryKey(true).autoID(false).build());\nschema.addField(AddFieldReq.builder().fieldName(\"vector\").dataType(DataType.FloatVector).dimension(5).build());\n\n// 3 Create a collection without schema and index parameters\nCreateCollectionReq customizedSetupReq = CreateCollectionReq.builder()\n.collectionName(\"customized_setup\")\n.collectionSchema(schema)\n.build();\n\nclient.createCollection(customizedSetupReq);\n","// 1. Set up a Milvus Client\nclient = new MilvusClient({address, token});\n\n// 2. Define fields for the collection\nconst fields = [\n    {\n        name: \"id\",\n        data_type: DataType.Int64,\n        is_primary_key: true,\n        autoID: false\n    },\n    {\n        name: \"vector\",\n        data_type: DataType.FloatVector,\n        dim: 5\n    },\n]\n\n// 3. Create a collection\nres = await client.createCollection({\n    collection_name: \"customized_setup\",\n    fields: fields,\n})\n\nconsole.log(res.error_code)  \n\n// Output\n// \n// Success\n// \n","# 4.1. Set up the index parameters\nindex_params = MilvusClient.prepare_index_params()\n\n# 4.2. Add an index on the vector field.\nindex_params.add_index(\n    field_name=\"vector\",\n    metric_type=\"COSINE\",\n    index_type=\"IVF_FLAT\",\n    index_name=\"vector_index\",\n    params={ \"nlist\": 128 }\n)\n\n# 4.3. Create an index file\nclient.create_index(\n    collection_name=\"customized_setup\",\n    index_params=index_params\n)\n","import io.milvus.v2.common.IndexParam;\nimport io.milvus.v2.service.index.request.CreateIndexReq;\n\n// 4 Prepare index parameters\n\n// 4.2 Add an index for the vector field \"vector\"\nIndexParam indexParamForVectorField = IndexParam.builder()\n    .fieldName(\"vector\")\n    .indexName(\"vector_index\")\n    .indexType(IndexParam.IndexType.IVF_FLAT)\n    .metricType(IndexParam.MetricType.COSINE)\n    .extraParams(Map.of(\"nlist\", 128))\n    .build();\n\nList indexParams = new ArrayList<>();\nindexParams.add(indexParamForVectorField);\n\n// 4.3 Crate an index file\nCreateIndexReq createIndexReq = CreateIndexReq.builder()\n    .collectionName(\"customized_setup\")\n    .indexParams(indexParams)\n    .build();\n\nclient.createIndex(createIndexReq);\n","// 4. Set up index for the collection\n// 4.1. Set up the index parameters\nres = await client.createIndex({\n    collection_name: \"customized_setup\",\n    field_name: \"vector\",\n    index_type: \"AUTOINDEX\",\n    metric_type: \"COSINE\",   \n    index_name: \"vector_index\",\n    params: { \"nlist\": 128 }\n})\n\nconsole.log(res.error_code)\n\n// Output\n// \n// Success\n// \n","# 5. Describe index\nres = client.list_indexes(\n    collection_name=\"customized_setup\"\n)\n\nprint(res)\n\n# Output\n#\n# [\n#     \"vector_index\",\n# ]\n\nres = client.describe_index(\n    collection_name=\"customized_setup\",\n    index_name=\"vector_index\"\n)\n\nprint(res)\n\n# Output\n#\n# {\n#     \"index_type\": ,\n#     \"metric_type\": \"COSINE\",\n#     \"field_name\": \"vector\",\n#     \"index_name\": \"vector_index\"\n# }\n","import io.milvus.v2.service.index.request.DescribeIndexReq;\nimport io.milvus.v2.service.index.response.DescribeIndexResp;\n\n// 5. Describe index\n// 5.1 List the index names\nListIndexesReq listIndexesReq = ListIndexesReq.builder()\n    .collectionName(\"customized_setup\")\n    .build();\n\nList indexNames = client.listIndexes(listIndexesReq);\n\nSystem.out.println(indexNames);\n\n// Output:\n// [\n//     \"vector_index\"\n// ]\n\n// 5.2 Describe an index\nDescribeIndexReq describeIndexReq = DescribeIndexReq.builder()\n    .collectionName(\"customized_setup\")\n    .indexName(\"vector_index\")\n    .build();\n\nDescribeIndexResp describeIndexResp = client.describeIndex(describeIndexReq);\n\nSystem.out.println(JSONObject.toJSON(describeIndexResp));\n\n// Output:\n// {\n//     \"metricType\": \"COSINE\",\n//     \"indexType\": \"AUTOINDEX\",\n//     \"fieldName\": \"vector\",\n//     \"indexName\": \"vector_index\"\n// }\n","// 5. Describe the index\nres = await client.describeIndex({\n    collection_name: \"customized_setup\",\n    index_name: \"vector_index\"\n})\n\nconsole.log(JSON.stringify(res.index_descriptions, null, 2))\n\n// Output\n// \n// [\n//   {\n//     \"params\": [\n//       {\n//         \"key\": \"index_type\",\n//         \"value\": \"AUTOINDEX\"\n//       },\n//       {\n//         \"key\": \"metric_type\",\n//         \"value\": \"COSINE\"\n//       }\n//     ],\n//     \"index_name\": \"vector_index\",\n//     \"indexID\": \"449007919953063141\",\n//     \"field_name\": \"vector\",\n//     \"indexed_rows\": \"0\",\n//     \"total_rows\": \"0\",\n//     \"state\": \"Finished\",\n//     \"index_state_fail_reason\": \"\",\n//     \"pending_index_rows\": \"0\"\n//   }\n// ]\n// \n","# 6. Drop index\nclient.drop_index(\n    collection_name=\"customized_setup\",\n    index_name=\"vector_index\"\n)\n","// 6. Drop index\n\nDropIndexReq dropIndexReq = DropIndexReq.builder()\n    .collectionName(\"customized_setup\")\n    .indexName(\"vector_index\")\n    .build();\n\nclient.dropIndex(dropIndexReq);\n","// 6. Drop the index\nres = await client.dropIndex({\n    collection_name: \"customized_setup\",\n    index_name: \"vector_index\"\n})\n\nconsole.log(res.error_code)\n\n// Output\n// \n// Success\n// \n"],"headingContent":"","anchorList":[{"label":"索引向量字段","href":"Index-Vector-Fields","type":1,"isActive":false},{"label":"索引概述","href":"Overview","type":2,"isActive":false},{"label":"准备工作","href":"Preparations","type":2,"isActive":false},{"label":"为数据集建立索引","href":"Index-a-Collection","type":2,"isActive":false},{"label":"检查索引详细信息","href":"Check-Index-Details","type":2,"isActive":false},{"label":"删除索引","href":"Drop-an-Index","type":2,"isActive":false}]}
    \ No newline at end of file
    +{"codeList":["from pymilvus import MilvusClient, DataType\n\n# 1. Set up a Milvus client\nclient = MilvusClient(\n    uri=\"http://localhost:19530\"\n)\n\n# 2. Create schema\n# 2.1. Create schema\nschema = MilvusClient.create_schema(\n    auto_id=False,\n    enable_dynamic_field=True,\n)\n\n# 2.2. Add fields to schema\nschema.add_field(field_name=\"id\", datatype=DataType.INT64, is_primary=True)\nschema.add_field(field_name=\"vector\", datatype=DataType.FLOAT_VECTOR, dim=5)\n\n# 3. Create collection\nclient.create_collection(\n    collection_name=\"customized_setup\", \n    schema=schema, \n)\n","import io.milvus.v2.client.ConnectConfig;\nimport io.milvus.v2.client.MilvusClientV2;\nimport io.milvus.v2.common.DataType;\nimport io.milvus.v2.service.collection.request.CreateCollectionReq;\n\nString CLUSTER_ENDPOINT = \"http://localhost:19530\";\n\n// 1. Connect to Milvus server\nConnectConfig connectConfig = ConnectConfig.builder()\n    .uri(CLUSTER_ENDPOINT)\n    .build();\n\nMilvusClientV2 client = new MilvusClientV2(connectConfig);\n\n// 2. Create a collection\n\n// 2.1 Create schema\nCreateCollectionReq.CollectionSchema schema = client.createSchema();\n\n// 2.2 Add fields to schema\nschema.addField(AddFieldReq.builder().fieldName(\"id\").dataType(DataType.Int64).isPrimaryKey(true).autoID(false).build());\nschema.addField(AddFieldReq.builder().fieldName(\"vector\").dataType(DataType.FloatVector).dimension(5).build());\n\n// 3 Create a collection without schema and index parameters\nCreateCollectionReq customizedSetupReq = CreateCollectionReq.builder()\n.collectionName(\"customized_setup\")\n.collectionSchema(schema)\n.build();\n\nclient.createCollection(customizedSetupReq);\n","// 1. Set up a Milvus Client\nclient = new MilvusClient({address, token});\n\n// 2. Define fields for the collection\nconst fields = [\n    {\n        name: \"id\",\n        data_type: DataType.Int64,\n        is_primary_key: true,\n        autoID: false\n    },\n    {\n        name: \"vector\",\n        data_type: DataType.FloatVector,\n        dim: 5\n    },\n]\n\n// 3. Create a collection\nres = await client.createCollection({\n    collection_name: \"customized_setup\",\n    fields: fields,\n})\n\nconsole.log(res.error_code)  \n\n// Output\n// \n// Success\n// \n","# 4.1. Set up the index parameters\nindex_params = MilvusClient.prepare_index_params()\n\n# 4.2. Add an index on the vector field.\nindex_params.add_index(\n    field_name=\"vector\",\n    metric_type=\"COSINE\",\n    index_type=\"IVF_FLAT\",\n    index_name=\"vector_index\",\n    params={ \"nlist\": 128 }\n)\n\n# 4.3. Create an index file\nclient.create_index(\n    collection_name=\"customized_setup\",\n    index_params=index_params,\n    sync=False # Whether to wait for index creation to complete before returning. Defaults to True.\n)\n","import io.milvus.v2.common.IndexParam;\nimport io.milvus.v2.service.index.request.CreateIndexReq;\n\n// 4 Prepare index parameters\n\n// 4.2 Add an index for the vector field \"vector\"\nIndexParam indexParamForVectorField = IndexParam.builder()\n    .fieldName(\"vector\")\n    .indexName(\"vector_index\")\n    .indexType(IndexParam.IndexType.IVF_FLAT)\n    .metricType(IndexParam.MetricType.COSINE)\n    .extraParams(Map.of(\"nlist\", 128))\n    .build();\n\nList indexParams = new ArrayList<>();\nindexParams.add(indexParamForVectorField);\n\n// 4.3 Crate an index file\nCreateIndexReq createIndexReq = CreateIndexReq.builder()\n    .collectionName(\"customized_setup\")\n    .indexParams(indexParams)\n    .build();\n\nclient.createIndex(createIndexReq);\n","// 4. Set up index for the collection\n// 4.1. Set up the index parameters\nres = await client.createIndex({\n    collection_name: \"customized_setup\",\n    field_name: \"vector\",\n    index_type: \"AUTOINDEX\",\n    metric_type: \"COSINE\",   \n    index_name: \"vector_index\",\n    params: { \"nlist\": 128 }\n})\n\nconsole.log(res.error_code)\n\n// Output\n// \n// Success\n// \n","# 5. Describe index\nres = client.list_indexes(\n    collection_name=\"customized_setup\"\n)\n\nprint(res)\n\n# Output\n#\n# [\n#     \"vector_index\",\n# ]\n\nres = client.describe_index(\n    collection_name=\"customized_setup\",\n    index_name=\"vector_index\"\n)\n\nprint(res)\n\n# Output\n#\n# {\n#     \"index_type\": ,\n#     \"metric_type\": \"COSINE\",\n#     \"field_name\": \"vector\",\n#     \"index_name\": \"vector_index\"\n# }\n","import io.milvus.v2.service.index.request.DescribeIndexReq;\nimport io.milvus.v2.service.index.response.DescribeIndexResp;\n\n// 5. Describe index\n// 5.1 List the index names\nListIndexesReq listIndexesReq = ListIndexesReq.builder()\n    .collectionName(\"customized_setup\")\n    .build();\n\nList indexNames = client.listIndexes(listIndexesReq);\n\nSystem.out.println(indexNames);\n\n// Output:\n// [\n//     \"vector_index\"\n// ]\n\n// 5.2 Describe an index\nDescribeIndexReq describeIndexReq = DescribeIndexReq.builder()\n    .collectionName(\"customized_setup\")\n    .indexName(\"vector_index\")\n    .build();\n\nDescribeIndexResp describeIndexResp = client.describeIndex(describeIndexReq);\n\nSystem.out.println(JSONObject.toJSON(describeIndexResp));\n\n// Output:\n// {\n//     \"metricType\": \"COSINE\",\n//     \"indexType\": \"AUTOINDEX\",\n//     \"fieldName\": \"vector\",\n//     \"indexName\": \"vector_index\"\n// }\n","// 5. Describe the index\nres = await client.describeIndex({\n    collection_name: \"customized_setup\",\n    index_name: \"vector_index\"\n})\n\nconsole.log(JSON.stringify(res.index_descriptions, null, 2))\n\n// Output\n// \n// [\n//   {\n//     \"params\": [\n//       {\n//         \"key\": \"index_type\",\n//         \"value\": \"AUTOINDEX\"\n//       },\n//       {\n//         \"key\": \"metric_type\",\n//         \"value\": \"COSINE\"\n//       }\n//     ],\n//     \"index_name\": \"vector_index\",\n//     \"indexID\": \"449007919953063141\",\n//     \"field_name\": \"vector\",\n//     \"indexed_rows\": \"0\",\n//     \"total_rows\": \"0\",\n//     \"state\": \"Finished\",\n//     \"index_state_fail_reason\": \"\",\n//     \"pending_index_rows\": \"0\"\n//   }\n// ]\n// \n","# 6. Drop index\nclient.drop_index(\n    collection_name=\"customized_setup\",\n    index_name=\"vector_index\"\n)\n","// 6. Drop index\n\nDropIndexReq dropIndexReq = DropIndexReq.builder()\n    .collectionName(\"customized_setup\")\n    .indexName(\"vector_index\")\n    .build();\n\nclient.dropIndex(dropIndexReq);\n","// 6. Drop the index\nres = await client.dropIndex({\n    collection_name: \"customized_setup\",\n    index_name: \"vector_index\"\n})\n\nconsole.log(res.error_code)\n\n// Output\n// \n// Success\n// \n"],"headingContent":"Index Vector Fields","anchorList":[{"label":"索引向量字段","href":"Index-Vector-Fields","type":1,"isActive":false},{"label":"索引概述","href":"Overview","type":2,"isActive":false},{"label":"准备工作","href":"Preparations","type":2,"isActive":false},{"label":"索引一个 Collection","href":"Index-a-Collection","type":2,"isActive":false},{"label":"检查索引详细信息","href":"Check-Index-Details","type":2,"isActive":false},{"label":"删除索引","href":"Drop-an-Index","type":2,"isActive":false}]}
    \ No newline at end of file
    diff --git a/localization/v2.4.x/site/zh/userGuide/manage-indexes/index-vector-fields.md b/localization/v2.4.x/site/zh/userGuide/manage-indexes/index-vector-fields.md
    index 35a6a598a..9a2cd934b 100644
    --- a/localization/v2.4.x/site/zh/userGuide/manage-indexes/index-vector-fields.md
    +++ b/localization/v2.4.x/site/zh/userGuide/manage-indexes/index-vector-fields.md
    @@ -1,8 +1,8 @@
     ---
     id: index-vector-fields.md
     order: 1
    -summary: 本指南将指导您完成创建和管理集合中向量字段索引的基本操作。
    -title: 索引向量场
    +summary: 本指南将指导您完成创建和管理 Collections 中向量字段索引的基本操作。
    +title: 索引向量字段
     ---
     

    索引向量字段

    本指南将指导您完成在集合中创建和管理向量字段索引的基本操作。

    +

    本指南将指导您完成在 Collections 中创建和管理向量字段索引的基本操作。

    索引概述

    利用存储在索引文件中的元数据,Milvus 以专门的结构组织数据,便于在搜索或查询过程中快速检索所需的信息。

    -

    Milvus 提供了多种索引类型和指标,可对字段值进行排序,以实现高效的相似性搜索。下表列出了不同向量字段类型所支持的索引类型和度量。有关详情,请参阅内存索引相似性度量

    +

    Milvus 提供多种索引类型和度量标准,对字段值进行排序,以实现高效的相似性搜索。下表列出了不同向量字段类型所支持的索引类型和度量。有关详情,请参阅内存索引相似性度量

    @@ -103,20 +103,20 @@ title: 索引向量场 d="M4 9h1v1H4c-1.5 0-3-1.69-3-3.5S2.55 3 4 3h4c1.45 0 3 1.69 3 3.5 0 1.41-.91 2.72-2 3.25V8.59c.58-.45 1-1.27 1-2.09C10 5.22 8.98 4 8 4H4c-.98 0-2 1.22-2 2.5S3 9 4 9zm9-3h-1v1h1c1 0 2 1.22 2 2.5S13.98 12 13 12H9c-.98 0-2-1.22-2-2.5 0-.83.42-1.64 1-2.09V6.25c-1.09.53-2 1.84-2 3.25C6 11.31 7.55 13 9 13h4c1.45 0 3-1.69 3-3.5S14.5 6 13 6z" > -

    正如 "管理集合 "中所述,如果在创建集合请求中指定了以下条件,Milvus 会自动生成索引并将其加载到内存中:

    +

    正如管理 Collections 中所解释的,如果在创建 Collections 请求中指定了以下任一条件,Milvus 会在创建 Collections 时自动生成索引并将其加载到内存中:

    • 向量场的维度和度量类型,或

    • -
    • 模式和索引参数。

    • +
    • Schema 和索引参数。

    -

    下面的代码片段重新利用了现有代码,以建立与 Milvus 实例的连接,并在不指定索引参数的情况下创建集合。在这种情况下,该集合缺乏索引并保持未加载状态。

    +

    下面的代码片段对现有代码进行了重新利用,以建立与 Milvus 实例的连接,并在不指定其索引参数的情况下创建 Collections。在这种情况下,Collection 缺乏索引并保持未加载状态。

    -

    要准备索引,请使用 MilvusClient连接到 Milvus 服务器,并使用 create_schema(), add_field()create_collection().

    +

    要准备索引,请使用 MilvusClient连接到 Milvus 服务器,并通过使用 create_schema(), add_field()create_collection().

    要准备索引,使用 MilvusClientV2连接到 Milvus 服务器,并通过使用 createSchema(), addField()createCollection().

    -

    要准备索引,使用 MilvusClient连接到 Milvus 服务器,并使用 createCollection().

    +

    要准备索引,使用 MilvusClient连接到 Milvus 服务器,并通过使用 createCollection().

    @@ -206,7 +206,7 @@ res = await client.// Success //
    -

    为数据集建立索引

    import io.milvus.v2.common.IndexParam;
    @@ -323,12 +324,16 @@ res = await client.collection_name
    -      现有集合的名称。
    +      现有 Collections 的名称。
         
         
           index_params
           包含IndexParam对象列表的IndexParams对象。
         
    +    
    +      sync
    +      控制与客户端请求相关的索引构建方式。有效值:
    • True (默认):客户端等待索引完全建立后才返回。这意味着在该过程完成之前不会收到响应。
    • False:客户端收到请求后立即返回,索引正在后台建立。要了解索引创建是否已完成,请使用describe_index()方法。
    + @@ -371,7 +376,7 @@ res = await client.collection_name - + @@ -397,7 +402,7 @@ res = await client.

    备注

    -

    目前,只能为集合中的每个字段创建一个索引文件。

    +

    目前,只能为 Collections 中的每个字段创建一个索引文件。

    检查索引详细信息 -

    您可以检查为特定字段创建的索引文件,并收集使用该索引文件索引的行数统计。

    +

    您可以检查在特定字段上创建的索引文件,并收集使用该索引文件索引的行数统计。

    删除索引

    本指南将指导您如何在集合中创建和管理分区。

    +

    本指南将指导您如何创建和管理 Collections 中的分区。

    分区概述

    Milvus 中的分区表示对集合的细分。该功能允许将集合的物理存储划分为多个部分,通过将焦点缩小到较小的数据子集而不是整个集合,从而提高查询性能。

    -

    创建数据集时,至少会自动创建一个名为_default的默认分区。在一个数据集中最多可以创建 1,024 个分区。

    +

    在 Milvus 中,一个分区代表一个 Collections 的子分区。此功能允许将 Collections 的物理存储划分为多个部分,通过将关注点缩小到较小的数据子集而不是整个集合,有助于提高查询性能。

    +

    创建 Collections 时,至少会自动创建一个名为_default的默认分区。在一个 Collection 中最多可以创建 1,024 个分区。

    注释

    -

    Milvus 引入了一项名为 "Partition Key"(Partition Key)的功能,可利用底层分区根据特定字段的散列值来存储实体。该功能有助于实现多租户,提高搜索性能。有关详细信息,请阅读使用Partition Key

    -

    如果在集合中开启了Partition Key功能,Milvus 就会负责管理所有分区,从而减轻你的责任。

    +

    Milvus 引入了一项名为Partition Key 的功能,利用底层分区,根据特定字段的散列值来存储实体。该功能有助于实现多租户,提高搜索性能。有关详细信息,请阅读使用分区密钥

    +

    如果在 Collections 中开启了Partition Key功能,Milvus 会负责管理所有分区,从而减轻你的责任。

    准备工作

    下面的代码片段是对现有代码的重新利用,以建立与 Milvus 的连接,并在快速设置模式下创建数据集,表明数据集在创建时已加载。

    +

    下面的代码片段对现有代码进行了重新利用,以建立与 Milvus 的连接,并在快速设置模式下创建一个 Collection,表示创建时已加载该 Collection。

    -

    对于准备工作,使用 MilvusClient连接到 Milvus,并使用 create_collection()在快速设置模式下创建程序集。

    +

    对于准备工作,使用 MilvusClient连接到 Milvus,并使用 create_collection()以快速设置模式创建 Collections。

    -

    对于准备工作,使用 MilvusClientV2连接到 Milvus,并使用 createCollection()以快速设置模式创建采集。

    +

    对于准备工作,使用 MilvusClientV2连接 Milvus 并使用 createCollection()以快速设置模式创建 Collections。

    -

    对于准备工作,使用 MilvusClient连接到 Milvus createCollection()以快速设置模式创建集合。

    +

    对于准备工作,使用 MilvusClient连接到 Milvus,并使用 createCollection()以快速设置模式创建 Collections。

    @@ -73,17 +71,16 @@ summary: "" # 1. Set up a Milvus client client = MilvusClient( -uri="http://localhost:19530" + uri="http://localhost:19530" ) # 2. Create a collection client.create_collection( -collection_name="quick_setup", -dimension=5, + collection_name="quick_setup", + dimension=5, ) -
    import io.milvus.v2.client.ConnectConfig;
     import io.milvus.v2.client.MilvusClientV2;
     import io.milvus.v2.service.collection.request.CreateCollectionReq;
    @@ -118,7 +115,7 @@ client = new M
     

    注释

    -

    在上述代码片段中,集合的索引已与集合一起创建,表明集合在创建时已加载。

    +

    在上述代码片段中,Collection 的索引已与 Collection 一同创建,表明创建时已加载 Collection。

    列出分区

    一旦集合准备就绪,就可以列出其分区。

    +

    一旦集合就绪,就可以列出它的分区。

    要列出分区,请使用 list_partitions().

    @@ -155,7 +152,6 @@ res = client.list_partitions(collection_name="qui # # ["_default"] -
    import io.milvus.v2.service.partition.request.ListPartitionsReq;
     
     // 3. List all partitions in the collection
    @@ -182,10 +178,10 @@ res = await client.// [ '_default' ]
     // 
     
    -

    上述代码片段的输出包括指定集合中分区的名称。

    +

    上述代码片段的输出包括指定 Collections 内分区的名称。

    注释

    -

    如果在集合中将某个字段设为分区键,Milvus 会在创建集合的同时创建至少64 个分区。在列出分区时,结果可能与上述代码片段的输出不同。

    +

    如果在集合中设置了字段作为分区键,Milvus 会随集合创建至少64 个分区。在列出分区时,结果可能与上述代码片段的输出不同。

    详情请参阅使用分区键

    创建分区

    您可以向集合添加更多分区。一个数据集最多可有 1,024 个分区。

    +

    您可以向 Collection 添加更多分区。一个 Collection 最多可以有 1,024 个分区。

    要创建分区,请使用 create_partition().

    @@ -222,8 +218,8 @@ client.create_partition( ) client.create_partition( -collection_name="quick_setup", -partition_name="partitionB" + collection_name="quick_setup", + partition_name="partitionB" ) res = client.list_partitions(collection_name="quick_setup") @@ -233,7 +229,6 @@ res = client.list_partitions(collection_name="qui # # ["_default", "partitionA", "partitionB"] -
    import io.milvus.v2.service.partition.request.CreatePartitionReq;
     
     // 4. Create more partitions
    @@ -288,11 +283,11 @@ res = await client.// [ '_default', 'partitionA', 'partitionB' ]
     // 
     
    -

    上面的代码片段在一个集合中创建了一个分区,并列出了该集合的分区。

    +

    上面的代码片段在一个 Collection 中创建了一个分区,并列出了该 Collection 的分区。

    注释

    -

    如果将某个字段设置为集合中的分区键,Milvus 将负责管理集合中的分区。因此,在尝试创建分区时可能会遇到提示错误。

    -

    有关详细信息,请参阅使用Partition Key

    +

    如果你在一个 Collection 中设置了一个字段作为分区键,Milvus 会负责管理 Collection 中的分区。因此,在尝试创建分区时可能会遇到提示错误。

    +

    有关详情,请参阅使用分区密钥

    检查特定分区 -
    import io.milvus.v2.service.partition.request.HasPartitionReq;
     
     // 5. Check whether a partition exists
    @@ -395,7 +389,7 @@ res = await client.// false
     // 
     
    -

    上面的代码片段检查集合是否有名为partitionApartitionC 的分区。

    +

    上面的代码片段会检查 Collection 是否有名为partitionApartitionC 的分区。

    加载和释放分区

    -

    要检查集合及其分区的加载状态,请使用 get_load_state().

    +

    要检查 Collections 及其分区的加载状态,请使用 get_load_state().

    -

    要检查某个集合及其分区的加载状态,请使用 getLoadState().

    +

    要检查 Collections 及其分区的加载状态,请使用 getLoadState().

    -

    要检查数据集及其分区的负载状态,请使用 getLoadState().

    +

    要检查 Collections 及其分区的负载状态,请使用 getLoadState().

    @@ -433,12 +427,12 @@ res = client.get_load_state(collection_name="quic # Output # # { -# "state": "<LoadState: Loaded>" +# "state": "<LoadState: Loaded>" # } res = client.get_load_state( -collection_name="quick_setup", -partition_name="partitionA" + collection_name="quick_setup", + partition_name="partitionA" ) print(res) @@ -446,12 +440,12 @@ partition_name="partitionA" # Output # # { -# "state": "<LoadState: Loaded>" +# "state": "<LoadState: Loaded>" # } res = client.get_load_state( -collection_name="quick_setup", -partition_name="partitionB" + collection_name="quick_setup", + partition_name="partitionB" ) print(res) @@ -459,11 +453,10 @@ partition_name="partitionB" # Output # # { -# "state": "<LoadState: NotLoad>" +# "state": "<LoadState: NotLoad>" # } -
    import io.milvus.v2.service.collection.request.GetLoadStateReq;
     import io.milvus.v2.service.collection.request.ReleaseCollectionReq;
     import io.milvus.v2.service.partition.request.LoadPartitionsReq;
    @@ -584,20 +577,20 @@ res = await client.Loaded

    +

    如果一个 Collection 中至少有一个分区已加载,则标记为Loaded

  • 未加载

    -

    如果一个集合的任何分区都未加载,则该集合被标记为NotLoad

  • +

    如果一个 Collection 的任何分区都未加载,则将其标记为NotLoad

  • 加载中

    -

    如果集合中至少有一个分区处于加载过程中,则该集合被标记为正在加载。

  • +

    如果集合中至少有一个分区处于加载过程中,则该集合会被标记为正在加载。

    加载分区

    -

    要加载集合的所有分区,只需调用 load_collection().要加载集合的特定分区,可使用 load_partitions().

    +

    要加载 Collections 的所有分区,只需调用 load_collection().要加载 Collections 的特定分区,可使用 load_partitions().

    -

    要加载集合的所有分区,只需调用 loadCollection().要加载集合的特定分区,可使用 loadPartitions().

    +

    要加载 Collections 的所有分区,只需调用 loadCollection().要加载 Collections 的特定分区,可使用 loadPartitions().

    -

    要加载集合的所有分区,只需调用 loadCollection().要加载集合的特定分区,可使用 loadPartitions().

    +

    要加载 Collections 的所有分区,只需调用 . loadCollection().要加载 Collections 的特定分区,可使用 loadPartitions().

    @@ -612,10 +605,9 @@ res = client.get_load_state(collection_name="quic # Output # # { -# "state": "<LoadState: Loaded>" +# "state": "<LoadState: Loaded>" # }
    -
    LoadPartitionsReq loadPartitionsReq = LoadPartitionsReq.builder()
         .collectionName("quick_setup")
         .partitionNames(List.of("partitionA"))
    @@ -661,28 +653,27 @@ res = await client."quick_setup",
    -partition_name="partitionA"
    +    collection_name="quick_setup",
    +    partition_name="partitionA"
     )
     
     # Output
     #
     # {
    -# "state": "<LoadState: Loaded>"
    +#     "state": "<LoadState: Loaded>"
     # }
     
     res = client.get_load_status(
    -collection_name="quick_setup",
    -partition_name="partitionB"
    +    collection_name="quick_setup",
    +    partition_name="partitionB"
     )
     
     # Output
     #
     # {
    -# "state": "<LoadState: Loaded>"
    +#     "state": "<LoadState: Loaded>"
     # }
     
    -
    LoadPartitionsReq loadPartitionsReq = LoadPartitionsReq.builder()
         .collectionName("quick_setup")
         .partitionNames(List.of("partitionA", "partitionB"))
    @@ -743,14 +734,24 @@ res = await client.// LoadStateLoaded
     // 
     
    +

    要加载一个或多个分区中的指定字段,请执行以下操作:

    +
    client.load_partitions(
    +    collection_name="quick_setup",
    +    partition_names=["partitionA"],
    +    load_fields=["id", "vector"],
    +    skip_load_dynamic_field=True
    +)
    +
    +

    请注意,只有load_fields 中列出的字段才能用作搜索和查询中的筛选条件和输出字段。列表中应始终包含主键。不加载的字段名将不能用于筛选或输出。

    +

    可以使用skip_load_dynamic_field=True 跳过加载动态字段。Milvus 将动态字段视为单个字段,因此动态字段中的所有键将一起被包含或排除。

    释放分区

    -

    要释放集合的所有分区,只需调用 release_collection().要释放集合的特定分区,可使用 release_partitions().

    +

    要释放 Collections 的所有分区,只需调用 release_collection().要释放 Collections 的特定分区,可使用 release_partitions().

    -

    要释放集合的所有分区,只需调用 releaseCollection().要释放集合的特定分区,可使用 releasePartitions().

    +

    要释放 Collections 的所有分区,只需调用 releaseCollection().要释放集合的特定分区,可使用 releasePartitions().

    -

    要释放集合的所有分区,只需调用 releaseCollection().要释放集合的特定分区,可使用 releasePartitions().

    +

    要释放 Collection 的所有分区,只需调用 . releaseCollection().要释放集合的特定分区,可使用 releasePartitions().

    @@ -761,8 +762,8 @@ client.release_partitions( ) res = client.get_load_state( -collection_name="quick_setup", -partition_name="partitionA" + collection_name="quick_setup", + partition_name="partitionA" ) print(res) @@ -770,11 +771,10 @@ partition_name="partitionA" # Output # # { -# "state": "<LoadState: NotLoad>" +# "state": "<LoadState: NotLoad>" # } -
    import io.milvus.v2.service.partition.request.ReleasePartitionsReq;
     
     // 7. Release a partition
    @@ -821,16 +821,15 @@ res = await client."quick_setup",
    +    collection_name="quick_setup",
     )
     
     # Output
     #
     # {
    -# "state": "<LoadState: NotLoad>"
    +#     "state": "<LoadState: NotLoad>"
     # }
     
    -

    丢弃分区 -
    import io.milvus.v2.service.partition.request.ReleasePartitionsReq;
     
     ReleasePartitionsReq releasePartitionsReq = ReleasePartitionsReq.builder()
    @@ -941,10 +939,10 @@ res = await client.rootCoord.maxPartitionNum 来调整分区的最大数量。有关详情,请参阅系统配置

    -
  • 如何区分分区和Partition Key?

    -

    分区是物理存储单元,而Partition Key是逻辑概念,可根据指定列自动将数据分配到特定分区。

    -

    例如,在 Milvus 中,如果你有一个定义了分区键为color 字段的集合,系统会根据每个实体的color 字段的散列值自动将数据分配到分区。这一自动化流程免除了用户在插入或搜索数据时手动指定分区的责任。

    -

    另一方面,在手动创建分区时,需要根据分区键的标准为每个分区分配数据。如果你有一个带有color 字段的集合,你需要手动将color 值为red 的实体分配到partition A ,将color 值为blue 的实体分配到partition B 。这种手动管理需要更多的精力。

    -

    总之,分区和分区键都是用来优化数据计算和提高查询效率的。必须认识到,启用Partition Key意味着放弃对分区数据插入和加载的人工管理控制,因为这些过程完全由 Milvus 自动处理。

  • +

    默认情况下,Milvus 最多允许创建 1,024 个分区。您可以通过配置rootCoord.maxPartitionNum 来调整分区的最大数量。有关详情,请参阅系统配置

    +
  • 如何区分分区和分区 Key?

    +

    分区是物理存储单元,而分区密钥是逻辑概念,可根据指定列自动将数据分配到特定分区。

    +

    例如,在 Milvus 中,如果你的 Collections 的分区键定义为color 字段,系统会根据每个实体的color 字段的散列值自动将数据分配到分区。这一自动化流程免除了用户在插入或搜索数据时手动指定分区的责任。

    +

    另一方面,在手动创建分区时,需要根据分区 Key 的标准为每个分区分配数据。如果你有一个带有color 字段的 Collections,你会手动将color 值为red 的实体分配到partition A ,将color 值为blue 的实体分配到partition B 。这种手动管理需要更多的精力。

    +

    总之,分区和分区 Key 都是用来优化数据计算和提高查询效率的。必须认识到,启用分区键意味着放弃对分区数据插入和加载的人工管理控制,因为这些过程完全由 Milvus 自动处理。

  • diff --git a/localization/v2.4.x/site/zh/userGuide/search-query-get/single-vector-search.json b/localization/v2.4.x/site/zh/userGuide/search-query-get/single-vector-search.json index 8e3dec09d..536c160fa 100644 --- a/localization/v2.4.x/site/zh/userGuide/search-query-get/single-vector-search.json +++ b/localization/v2.4.x/site/zh/userGuide/search-query-get/single-vector-search.json @@ -1 +1 @@ -{"codeList":["# 1. Set up a Milvus client\nclient = MilvusClient(\n uri=CLUSTER_ENDPOINT,\n token=TOKEN \n)\n\n# 2. Create a collection\nclient.create_collection(\n collection_name=\"quick_setup\",\n dimension=5,\n metric_type=\"IP\"\n)\n\n# 3. Insert randomly generated vectors \ncolors = [\"green\", \"blue\", \"yellow\", \"red\", \"black\", \"white\", \"purple\", \"pink\", \"orange\", \"brown\", \"grey\"]\ndata = []\n\nfor i in range(1000):\n current_color = random.choice(colors)\n data.append({\n \"id\": i,\n \"vector\": [ random.uniform(-1, 1) for _ in range(5) ],\n \"color\": current_color,\n \"color_tag\": f\"{current_color}_{str(random.randint(1000, 9999))}\"\n })\n\nres = client.insert(\n collection_name=\"quick_setup\",\n data=data\n)\n\nprint(res)\n\n# Output\n#\n# {\n# \"insert_count\": 1000,\n# \"ids\": [\n# 0,\n# 1,\n# 2,\n# 3,\n# 4,\n# 5,\n# 6,\n# 7,\n# 8,\n# 9,\n# \"(990 more items hidden)\"\n# ]\n# }\n\n# 6.1 Create partitions \nclient.create_partition(\n collection_name=\"quick_setup\",\n partition_name=\"red\"\n)\n\nclient.create_partition(\n collection_name=\"quick_setup\",\n partition_name=\"blue\"\n)\n\n# 6.1 Insert data into partitions\nred_data = [ {\"id\": i, \"vector\": [ random.uniform(-1, 1) for _ in range(5) ], \"color\": \"red\", \"color_tag\": f\"red_{str(random.randint(1000, 9999))}\" } for i in range(500) ]\nblue_data = [ {\"id\": i, \"vector\": [ random.uniform(-1, 1) for _ in range(5) ], \"color\": \"blue\", \"color_tag\": f\"blue_{str(random.randint(1000, 9999))}\" } for i in range(500) ]\n\nres = client.insert(\n collection_name=\"quick_setup\",\n data=red_data,\n partition_name=\"red\"\n)\n\nprint(res)\n\n# Output\n#\n# {\n# \"insert_count\": 500,\n# \"ids\": [\n# 0,\n# 1,\n# 2,\n# 3,\n# 4,\n# 5,\n# 6,\n# 7,\n# 8,\n# 9,\n# \"(490 more items hidden)\"\n# ]\n# }\n\nres = client.insert(\n collection_name=\"quick_setup\",\n data=blue_data,\n partition_name=\"blue\"\n)\n\nprint(res)\n\n# Output\n#\n# {\n# \"insert_count\": 500,\n# \"ids\": [\n# 0,\n# 1,\n# 2,\n# 3,\n# 4,\n# 5,\n# 6,\n# 7,\n# 8,\n# 9,\n# \"(490 more items hidden)\"\n# ]\n# }\n","import java.util.ArrayList;\nimport java.util.Arrays;\nimport java.util.List;\nimport java.util.Map;\nimport java.util.Random;\n\nimport com.alibaba.fastjson.JSONObject;\n\nimport io.milvus.v2.client.ConnectConfig;\nimport io.milvus.v2.client.MilvusClientV2;\nimport io.milvus.v2.service.collection.request.CreateCollectionReq;\nimport io.milvus.v2.service.collection.request.GetLoadStateReq;\nimport io.milvus.v2.service.vector.request.InsertReq;\nimport io.milvus.v2.service.vector.response.InsertResp; \n\nString CLUSTER_ENDPOINT = \"http://localhost:19530\";\n\n// 1. Connect to Milvus server\nConnectConfig connectConfig = ConnectConfig.builder()\n .uri(CLUSTER_ENDPOINT)\n .build();\n\nMilvusClientV2 client = new MilvusClientV2(connectConfig); \n\n// 2. Create a collection in quick setup mode\nCreateCollectionReq quickSetupReq = CreateCollectionReq.builder()\n .collectionName(\"quick_setup\")\n .dimension(5)\n .metricType(\"IP\")\n .build();\n\nclient.createCollection(quickSetupReq);\n\nGetLoadStateReq loadStateReq = GetLoadStateReq.builder()\n .collectionName(\"quick_setup\")\n .build();\n\nboolean state = client.getLoadState(loadStateReq);\n\nSystem.out.println(state);\n\n// Output:\n// true\n\n// 3. Insert randomly generated vectors into the collection\nList colors = Arrays.asList(\"green\", \"blue\", \"yellow\", \"red\", \"black\", \"white\", \"purple\", \"pink\", \"orange\", \"brown\", \"grey\");\nList data = new ArrayList<>();\n\nfor (int i=0; i<1000; i++) {\n Random rand = new Random();\n String current_color = colors.get(rand.nextInt(colors.size()-1));\n JSONObject row = new JSONObject();\n row.put(\"id\", Long.valueOf(i));\n row.put(\"vector\", Arrays.asList(rand.nextFloat(), rand.nextFloat(), rand.nextFloat(), rand.nextFloat(), rand.nextFloat()));\n row.put(\"color_tag\", current_color + \"_\" + String.valueOf(rand.nextInt(8999) + 1000));\n data.add(row);\n}\n\nInsertReq insertReq = InsertReq.builder()\n .collectionName(\"quick_setup\")\n .data(data)\n .build();\n\nInsertResp insertResp = client.insert(insertReq);\n\nSystem.out.println(JSONObject.toJSON(insertResp));\n\n// Output:\n// {\"insertCnt\": 1000}\n\n// 6.1. Create a partition\nCreatePartitionReq partitionReq = CreatePartitionReq.builder()\n .collectionName(\"quick_setup\")\n .partitionName(\"red\")\n .build();\n\nclient.createPartition(partitionReq);\n\npartitionReq = CreatePartitionReq.builder()\n .collectionName(\"quick_setup\")\n .partitionName(\"blue\")\n .build();\n\nclient.createPartition(partitionReq);\n\n// 6.2 Insert data into the partition\ndata = new ArrayList<>();\n\nfor (int i=1000; i<1500; i++) {\n Random rand = new Random();\n String current_color = \"red\";\n JSONObject row = new JSONObject();\n row.put(\"id\", Long.valueOf(i));\n row.put(\"vector\", Arrays.asList(rand.nextFloat(), rand.nextFloat(), rand.nextFloat(), rand.nextFloat(), rand.nextFloat()));\n row.put(\"color\", current_color);\n row.put(\"color_tag\", current_color + \"_\" + String.valueOf(rand.nextInt(8999) + 1000));\n data.add(row);\n} \n\ninsertReq = InsertReq.builder()\n .collectionName(\"quick_setup\")\n .data(data)\n .partitionName(\"red\")\n .build();\n\ninsertResp = client.insert(insertReq);\n\nSystem.out.println(JSONObject.toJSON(insertResp));\n\n// Output:\n// {\"insertCnt\": 500}\n\ndata = new ArrayList<>();\n\nfor (int i=1500; i<2000; i++) {\n Random rand = new Random();\n String current_color = \"blue\";\n JSONObject row = new JSONObject();\n row.put(\"id\", Long.valueOf(i));\n row.put(\"vector\", Arrays.asList(rand.nextFloat(), rand.nextFloat(), rand.nextFloat(), rand.nextFloat(), rand.nextFloat()));\n row.put(\"color\", current_color);\n row.put(\"color_tag\", current_color + \"_\" + String.valueOf(rand.nextInt(8999) + 1000));\n data.add(row);\n}\n\ninsertReq = InsertReq.builder()\n .collectionName(\"quick_setup\")\n .data(data)\n .partitionName(\"blue\")\n .build();\n\ninsertResp = client.insert(insertReq);\n\nSystem.out.println(JSONObject.toJSON(insertResp));\n\n// Output:\n// {\"insertCnt\": 500}\n","const { MilvusClient, DataType, sleep } = require(\"@zilliz/milvus2-sdk-node\")\n\nconst address = \"http://localhost:19530\"\n\n// 1. Set up a Milvus Client\nclient = new MilvusClient({address});\n\n// 2. Create a collection in quick setup mode\nawait client.createCollection({\n collection_name: \"quick_setup\",\n dimension: 5,\n metric_type: \"IP\"\n}); \n\n// 3. Insert randomly generated vectors\nconst colors = [\"green\", \"blue\", \"yellow\", \"red\", \"black\", \"white\", \"purple\", \"pink\", \"orange\", \"brown\", \"grey\"]\ndata = []\n\nfor (let i = 0; i < 1000; i++) {\n current_color = colors[Math.floor(Math.random() * colors.length)]\n data.push({\n id: i,\n vector: [Math.random(), Math.random(), Math.random(), Math.random(), Math.random()],\n color: current_color,\n color_tag: `${current_color}_${Math.floor(Math.random() * 8999) + 1000}`\n })\n}\n\nvar res = await client.insert({\n collection_name: \"quick_setup\",\n data: data\n})\n\nconsole.log(res.insert_cnt)\n\n// Output\n// \n// 1000\n// \n\nawait client.createPartition({\n collection_name: \"quick_setup\",\n partition_name: \"red\"\n})\n\nawait client.createPartition({\n collection_name: \"quick_setup\",\n partition_name: \"blue\"\n})\n\n// 6.1 Insert data into partitions\nvar red_data = []\nvar blue_data = []\n\nfor (let i = 1000; i < 1500; i++) {\n red_data.push({\n id: i,\n vector: [Math.random(), Math.random(), Math.random(), Math.random(), Math.random()],\n color: \"red\",\n color_tag: `red_${Math.floor(Math.random() * 8999) + 1000}`\n })\n}\n\nfor (let i = 1500; i < 2000; i++) {\n blue_data.push({\n id: i,\n vector: [Math.random(), Math.random(), Math.random(), Math.random(), Math.random()],\n color: \"blue\",\n color_tag: `blue_${Math.floor(Math.random() * 8999) + 1000}`\n })\n}\n\nres = await client.insert({\n collection_name: \"quick_setup\",\n data: red_data,\n partition_name: \"red\"\n})\n\nconsole.log(res.insert_cnt)\n\n// Output\n// \n// 500\n// \n\nres = await client.insert({\n collection_name: \"quick_setup\",\n data: blue_data,\n partition_name: \"blue\"\n})\n\nconsole.log(res.insert_cnt)\n\n// Output\n// \n// 500\n// \n","# Single vector search\nres = client.search(\n collection_name=\"test_collection\", # Replace with the actual name of your collection\n # Replace with your query vector\n data=[[0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592]],\n limit=5, # Max. number of search results to return\n search_params={\"metric_type\": \"IP\", \"params\": {}} # Search parameters\n)\n\n# Convert the output to a formatted JSON string\nresult = json.dumps(res, indent=4)\nprint(result)\n","// 4. Single vector search\nList> query_vectors = Arrays.asList(Arrays.asList(0.3580376395471989f, -0.6023495712049978f, 0.18414012509913835f, -0.26286205330961354f, 0.9029438446296592f));\n\nSearchReq searchReq = SearchReq.builder()\n .collectionName(\"quick_setup\")\n .data(query_vectors)\n .topK(3) // The number of results to return\n .build();\n\nSearchResp searchResp = client.search(searchReq);\n\nSystem.out.println(JSONObject.toJSON(searchResp));\n","// 4. Single vector search\nvar query_vector = [0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592],\n\nres = await client.search({\n collection_name: \"quick_setup\",\n data: [query_vector],\n limit: 3, // The number of results to return\n})\n\nconsole.log(res.results)\n","[\n [\n {\n \"id\": 0,\n \"distance\": 1.4093276262283325,\n \"entity\": {}\n },\n {\n \"id\": 4,\n \"distance\": 0.9902134537696838,\n \"entity\": {}\n },\n {\n \"id\": 1,\n \"distance\": 0.8519943356513977,\n \"entity\": {}\n },\n {\n \"id\": 5,\n \"distance\": 0.7972343564033508,\n \"entity\": {}\n },\n {\n \"id\": 2,\n \"distance\": 0.5928734540939331,\n \"entity\": {}\n }\n ]\n]\n","{\"searchResults\": [[\n {\n \"score\": 1.263043,\n \"fields\": {\n \"vector\": [\n 0.9533119,\n 0.02538395,\n 0.76714665,\n 0.35481733,\n 0.9845762\n ],\n \"id\": 740\n }\n },\n {\n \"score\": 1.2377806,\n \"fields\": {\n \"vector\": [\n 0.7411156,\n 0.08687937,\n 0.8254139,\n 0.08370924,\n 0.99095553\n ],\n \"id\": 640\n }\n },\n {\n \"score\": 1.1869997,\n \"fields\": {\n \"vector\": [\n 0.87928146,\n 0.05324632,\n 0.6312755,\n 0.28005534,\n 0.9542448\n ],\n \"id\": 455\n }\n }\n]]}\n","[\n { score: 1.7463608980178833, id: '854' },\n { score: 1.744946002960205, id: '425' },\n { score: 1.7258622646331787, id: '718' }\n]\n","# Bulk-vector search\nres = client.search(\n collection_name=\"test_collection\", # Replace with the actual name of your collection\n data=[\n [0.19886812562848388, 0.06023560599112088, 0.6976963061752597, 0.2614474506242501, 0.838729485096104],\n [0.3172005263489739, 0.9719044792798428, -0.36981146090600725, -0.4860894583077995, 0.95791889146345]\n ], # Replace with your query vectors\n limit=2, # Max. number of search results to return\n search_params={\"metric_type\": \"IP\", \"params\": {}} # Search parameters\n)\n\nresult = json.dumps(res, indent=4)\nprint(result)\n","// 5. Batch vector search\nquery_vectors = Arrays.asList(\n Arrays.asList(0.3580376395471989f, -0.6023495712049978f, 0.18414012509913835f, -0.26286205330961354f, 0.9029438446296592f),\n Arrays.asList(0.19886812562848388f, 0.06023560599112088f, 0.6976963061752597f, 0.2614474506242501f, 0.838729485096104f)\n);\n\nsearchReq = SearchReq.builder()\n .collectionName(\"quick_setup\")\n .data(query_vectors)\n .topK(2)\n .build();\n\nsearchResp = client.search(searchReq);\n\nSystem.out.println(JSONObject.toJSON(searchResp));\n","// 5. Batch vector search\nvar query_vectors = [\n [0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592],\n [0.19886812562848388, 0.06023560599112088, 0.6976963061752597, 0.2614474506242501, 0.838729485096104]\n]\n\nres = await client.search({\n collection_name: \"quick_setup\",\n data: query_vectors,\n limit: 2,\n})\n\nconsole.log(res.results)\n","[\n [\n {\n \"id\": 1,\n \"distance\": 1.3017789125442505,\n \"entity\": {}\n },\n {\n \"id\": 7,\n \"distance\": 1.2419954538345337,\n \"entity\": {}\n }\n ], # Result set 1\n [\n {\n \"id\": 3,\n \"distance\": 2.3358664512634277,\n \"entity\": {}\n },\n {\n \"id\": 8,\n \"distance\": 0.5642921924591064,\n \"entity\": {}\n }\n ] # Result set 2\n]\n","// Two sets of vectors are returned as expected\n\n{\"searchResults\": [\n [\n {\n \"score\": 1.263043,\n \"fields\": {\n \"vector\": [\n 0.9533119,\n 0.02538395,\n 0.76714665,\n 0.35481733,\n 0.9845762\n ],\n \"id\": 740\n }\n },\n {\n \"score\": 1.2377806,\n \"fields\": {\n \"vector\": [\n 0.7411156,\n 0.08687937,\n 0.8254139,\n 0.08370924,\n 0.99095553\n ],\n \"id\": 640\n }\n }\n ],\n [\n {\n \"score\": 1.8654699,\n \"fields\": {\n \"vector\": [\n 0.4671427,\n 0.8378432,\n 0.98844475,\n 0.82763994,\n 0.9729997\n ],\n \"id\": 638\n }\n },\n {\n \"score\": 1.8581753,\n \"fields\": {\n \"vector\": [\n 0.735541,\n 0.60140246,\n 0.86730254,\n 0.93152493,\n 0.98603314\n ],\n \"id\": 855\n }\n }\n ]\n]}\n","[\n [\n { score: 2.3590476512908936, id: '854' },\n { score: 2.2896690368652344, id: '59' }\n [\n { score: 2.664059638977051, id: '59' },\n { score: 2.59483003616333, id: '854' }\n ]\n]\n","# 6.2 Search within a partition\nquery_vector = [0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592]\n\nres = client.search(\n collection_name=\"quick_setup\",\n data=[query_vector],\n limit=5,\n search_params={\"metric_type\": \"IP\", \"params\": {\"level\": 1}},\n partition_names=[\"red\"]\n)\n\nprint(res)\n","// 6.3 Search within partitions\nquery_vectors = Arrays.asList(Arrays.asList(0.3580376395471989f, -0.6023495712049978f, 0.18414012509913835f, -0.26286205330961354f, 0.9029438446296592f));\n\nsearchReq = SearchReq.builder()\n .collectionName(\"quick_setup\")\n .data(query_vectors)\n .partitionNames(Arrays.asList(\"red\"))\n .topK(5)\n .build();\n\nsearchResp = client.search(searchReq);\n\nSystem.out.println(JSONObject.toJSON(searchResp));\n","// 6.2 Search within partitions\nquery_vector = [0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592]\n\nres = await client.search({\n collection_name: \"quick_setup\",\n data: [query_vector],\n partition_names: [\"red\"],\n limit: 5,\n})\n\nconsole.log(res.results)\n","[\n [\n {\n \"id\": 16,\n \"distance\": 0.9200337529182434,\n \"entity\": {}\n },\n {\n \"id\": 14,\n \"distance\": 0.4505271911621094,\n \"entity\": {}\n },\n {\n \"id\": 15,\n \"distance\": 0.19924677908420563,\n \"entity\": {}\n },\n {\n \"id\": 17,\n \"distance\": 0.0075093843042850494,\n \"entity\": {}\n },\n {\n \"id\": 13,\n \"distance\": -0.14609718322753906,\n \"entity\": {}\n }\n ]\n]\n","{\"searchResults\": [\n [\n {\n \"score\": 1.1677284,\n \"fields\": {\n \"vector\": [\n 0.9986977,\n 0.17964739,\n 0.49086612,\n 0.23155272,\n 0.98438674\n ],\n \"id\": 1435\n }\n },\n {\n \"score\": 1.1476475,\n \"fields\": {\n \"vector\": [\n 0.6952647,\n 0.13417172,\n 0.91045254,\n 0.119336545,\n 0.9338931\n ],\n \"id\": 1291\n }\n },\n {\n \"score\": 1.0969629,\n \"fields\": {\n \"vector\": [\n 0.3363194,\n 0.028906643,\n 0.6675426,\n 0.030419827,\n 0.9735209\n ],\n \"id\": 1168\n }\n },\n {\n \"score\": 1.0741848,\n \"fields\": {\n \"vector\": [\n 0.9980543,\n 0.36063594,\n 0.66427994,\n 0.17359233,\n 0.94954175\n ],\n \"id\": 1164\n }\n },\n {\n \"score\": 1.0584627,\n \"fields\": {\n \"vector\": [\n 0.7187005,\n 0.12674773,\n 0.987718,\n 0.3110777,\n 0.86093885\n ],\n \"id\": 1085\n }\n }\n ],\n [\n {\n \"score\": 1.8030131,\n \"fields\": {\n \"vector\": [\n 0.59726167,\n 0.7054632,\n 0.9573117,\n 0.94529945,\n 0.8664103\n ],\n \"id\": 1203\n }\n },\n {\n \"score\": 1.7728865,\n \"fields\": {\n \"vector\": [\n 0.6672442,\n 0.60448086,\n 0.9325822,\n 0.80272985,\n 0.8861626\n ],\n \"id\": 1448\n }\n },\n {\n \"score\": 1.7536311,\n \"fields\": {\n \"vector\": [\n 0.59663296,\n 0.77831805,\n 0.8578314,\n 0.88818026,\n 0.9030075\n ],\n \"id\": 1010\n }\n },\n {\n \"score\": 1.7520742,\n \"fields\": {\n \"vector\": [\n 0.854198,\n 0.72294194,\n 0.9245805,\n 0.86126596,\n 0.7969224\n ],\n \"id\": 1219\n }\n },\n {\n \"score\": 1.7452049,\n \"fields\": {\n \"vector\": [\n 0.96419,\n 0.943535,\n 0.87611496,\n 0.8268136,\n 0.79786557\n ],\n \"id\": 1149\n }\n }\n ]\n]}\n","[\n { score: 3.0258803367614746, id: '1201' },\n { score: 3.004319190979004, id: '1458' },\n { score: 2.880324363708496, id: '1187' },\n { score: 2.8246407508850098, id: '1347' },\n { score: 2.797295093536377, id: '1406' }\n]\n","res = client.search(\n collection_name=\"quick_setup\",\n data=[query_vector],\n limit=5,\n search_params={\"metric_type\": \"IP\", \"params\": {\"level\": 1}},\n partition_names=[\"blue\"]\n)\n\nprint(res)\n","searchReq = SearchReq.builder()\n .collectionName(\"quick_setup\")\n .data(query_vectors)\n .partitionNames(Arrays.asList(\"blue\"))\n .topK(5)\n .build();\n\nsearchResp = client.search(searchReq);\n\nSystem.out.println(JSONObject.toJSON(searchResp));\n","res = await client.search({\n collection_name: \"quick_setup\",\n data: [query_vector],\n partition_names: [\"blue\"],\n limit: 5,\n})\n\nconsole.log(res.results)\n","[\n [\n {\n \"id\": 20,\n \"distance\": 2.363696813583374,\n \"entity\": {}\n },\n {\n \"id\": 26,\n \"distance\": 1.0665391683578491,\n \"entity\": {}\n },\n {\n \"id\": 23,\n \"distance\": 1.066049575805664,\n \"entity\": {}\n },\n {\n \"id\": 29,\n \"distance\": 0.8353596925735474,\n \"entity\": {}\n },\n {\n \"id\": 28,\n \"distance\": 0.7484277486801147,\n \"entity\": {}\n }\n ]\n]\n","{\"searchResults\": [\n [\n {\n \"score\": 1.1628494,\n \"fields\": {\n \"vector\": [\n 0.7442872,\n 0.046407282,\n 0.71031404,\n 0.3544345,\n 0.9819991\n ],\n \"id\": 1992\n }\n },\n {\n \"score\": 1.1470042,\n \"fields\": {\n \"vector\": [\n 0.5505825,\n 0.04367262,\n 0.9985836,\n 0.18922359,\n 0.93255126\n ],\n \"id\": 1977\n }\n },\n {\n \"score\": 1.1450152,\n \"fields\": {\n \"vector\": [\n 0.89994013,\n 0.052991092,\n 0.8645576,\n 0.6406729,\n 0.95679337\n ],\n \"id\": 1573\n }\n },\n {\n \"score\": 1.1439825,\n \"fields\": {\n \"vector\": [\n 0.9253267,\n 0.15890503,\n 0.7999555,\n 0.19126713,\n 0.898583\n ],\n \"id\": 1552\n }\n },\n {\n \"score\": 1.1029172,\n \"fields\": {\n \"vector\": [\n 0.95661926,\n 0.18777144,\n 0.38115507,\n 0.14323527,\n 0.93137646\n ],\n \"id\": 1823\n }\n }\n ],\n [\n {\n \"score\": 1.8005109,\n \"fields\": {\n \"vector\": [\n 0.5953582,\n 0.7794224,\n 0.9388869,\n 0.79825854,\n 0.9197286\n ],\n \"id\": 1888\n }\n },\n {\n \"score\": 1.7714822,\n \"fields\": {\n \"vector\": [\n 0.56805456,\n 0.89422905,\n 0.88187534,\n 0.914824,\n 0.8944365\n ],\n \"id\": 1648\n }\n },\n {\n \"score\": 1.7561421,\n \"fields\": {\n \"vector\": [\n 0.83421993,\n 0.39865613,\n 0.92319834,\n 0.42695504,\n 0.96633124\n ],\n \"id\": 1688\n }\n },\n {\n \"score\": 1.7553532,\n \"fields\": {\n \"vector\": [\n 0.89994013,\n 0.052991092,\n 0.8645576,\n 0.6406729,\n 0.95679337\n ],\n \"id\": 1573\n }\n },\n {\n \"score\": 1.7543385,\n \"fields\": {\n \"vector\": [\n 0.16542226,\n 0.38248396,\n 0.9888778,\n 0.80913955,\n 0.9501492\n ],\n \"id\": 1544\n }\n }\n ]\n]}\n","[\n { score: 2.8421106338500977, id: '1745' },\n { score: 2.838560104370117, id: '1782' },\n { score: 2.8134000301361084, id: '1511' },\n { score: 2.718268871307373, id: '1679' },\n { score: 2.7014894485473633, id: '1597' }\n]\n","# Search with output fields\nres = client.search(\n collection_name=\"test_collection\", # Replace with the actual name of your collection\n data=[[0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592]],\n limit=5, # Max. number of search results to return\n search_params={\"metric_type\": \"IP\", \"params\": {}}, # Search parameters\n output_fields=[\"color\"] # Output fields to return\n)\n\nresult = json.dumps(res, indent=4)\nprint(result)\n","// 7. Search with output fields\nquery_vectors = Arrays.asList(Arrays.asList(0.3580376395471989f, -0.6023495712049978f, 0.18414012509913835f, -0.26286205330961354f, 0.9029438446296592f));\n\nsearchReq = SearchReq.builder()\n .collectionName(\"quick_setup\")\n .data(query_vectors)\n .outputFields(Arrays.asList(\"color\"))\n .topK(5)\n .build();\n\nsearchResp = client.search(searchReq);\n\nSystem.out.println(JSONObject.toJSON(searchResp));\n","// 7. Search with output fields\nquery_vector = [0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592]\n\nres = await client.search({\n collection_name: \"quick_setup\",\n data: [query_vector],\n limit: 5,\n output_fields: [\"color\"],\n})\n\nconsole.log(res.results)\n","[\n [\n {\n \"id\": 0,\n \"distance\": 1.4093276262283325,\n \"entity\": {\n \"color\": \"pink_8682\"\n }\n },\n {\n \"id\": 16,\n \"distance\": 1.0159327983856201,\n \"entity\": {\n \"color\": \"yellow_1496\"\n }\n },\n {\n \"id\": 4,\n \"distance\": 0.9902134537696838,\n \"entity\": {\n \"color\": \"red_4794\"\n }\n },\n {\n \"id\": 14,\n \"distance\": 0.9803846478462219,\n \"entity\": {\n \"color\": \"green_2899\"\n }\n },\n {\n \"id\": 1,\n \"distance\": 0.8519943356513977,\n \"entity\": {\n \"color\": \"red_7025\"\n }\n }\n ]\n]\n","{\"searchResults\": [\n [\n {\n \"score\": 1.263043,\n \"fields\": {}\n },\n {\n \"score\": 1.2377806,\n \"fields\": {}\n },\n {\n \"score\": 1.1869997,\n \"fields\": {}\n },\n {\n \"score\": 1.1748955,\n \"fields\": {}\n },\n {\n \"score\": 1.1720343,\n \"fields\": {}\n }\n ]\n]}\n","\n[\n { score: 3.036271572113037, id: '59', color: 'orange' },\n { score: 3.0267879962921143, id: '1745', color: 'blue' },\n { score: 3.0069446563720703, id: '854', color: 'black' },\n { score: 2.984386682510376, id: '718', color: 'black' },\n { score: 2.916019916534424, id: '425', color: 'purple' }\n]\n","# Search with filter\nres = client.search(\n collection_name=\"test_collection\", # Replace with the actual name of your collection\n data=[[0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592]],\n limit=5, # Max. number of search results to return\n search_params={\"metric_type\": \"IP\", \"params\": {}}, # Search parameters\n output_fields=[\"color\"], # Output fields to return\n filter='color like \"red%\"'\n)\n\nresult = json.dumps(res, indent=4)\nprint(result)\n","// 8. Filtered search\nquery_vectors = Arrays.asList(Arrays.asList(0.3580376395471989f, -0.6023495712049978f, 0.18414012509913835f, -0.26286205330961354f, 0.9029438446296592f));\n\nsearchReq = SearchReq.builder()\n .collectionName(\"quick_setup\")\n .data(query_vectors)\n .outputFields(Arrays.asList(\"color_tag\"))\n .filter(\"color_tag like \\\"red%\\\"\")\n .topK(5)\n .build();\n\nsearchResp = client.search(searchReq);\n\nSystem.out.println(JSONObject.toJSON(searchResp));\n","// 8. Filtered search\n// 8.1 Filter with \"like\" operator and prefix wildcard\nquery_vector = [0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592]\n\nres = await client.search({\n collection_name: \"quick_setup\",\n data: [query_vector],\n limit: 5,\n filters: \"color_tag like \\\"red%\\\"\",\n output_fields: [\"color_tag\"]\n})\n\nconsole.log(res.results)\n","[\n [\n {\n \"id\": 4,\n \"distance\": 0.9902134537696838,\n \"entity\": {\n \"color\": \"red_4794\"\n }\n },\n {\n \"id\": 1,\n \"distance\": 0.8519943356513977,\n \"entity\": {\n \"color\": \"red_7025\"\n }\n },\n {\n \"id\": 6,\n \"distance\": -0.4113418459892273,\n \"entity\": {\n \"color\": \"red_9392\"\n }\n }\n ]\n]\n","{\"searchResults\": [\n [\n {\n \"score\": 1.1869997,\n \"fields\": {\"color_tag\": \"red_3026\"}\n },\n {\n \"score\": 1.1677284,\n \"fields\": {\"color_tag\": \"red_9030\"}\n },\n {\n \"score\": 1.1476475,\n \"fields\": {\"color_tag\": \"red_3744\"}\n },\n {\n \"score\": 1.0969629,\n \"fields\": {\"color_tag\": \"red_4168\"}\n },\n {\n \"score\": 1.0741848,\n \"fields\": {\"color_tag\": \"red_9678\"}\n }\n ]\n]}\n","[\n { score: 2.5080761909484863, id: '1201', color_tag: 'red_8904' },\n { score: 2.491129159927368, id: '425', color_tag: 'purple_8212' },\n { score: 2.4889798164367676, id: '1458', color_tag: 'red_6891' },\n { score: 2.42964243888855, id: '724', color_tag: 'black_9885' },\n { score: 2.4004223346710205, id: '854', color_tag: 'black_5990' }\n]\n","# Infix match on color field\nres = client.search(\n collection_name=\"test_collection\", # Replace with the actual name of your collection\n data=[[0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592]],\n limit=5, # Max. number of search results to return\n search_params={\"metric_type\": \"IP\", \"params\": {}}, # Search parameters\n output_fields=[\"color\"], # Output fields to return\n filter='color like \"%ll%\"' # Filter on color field, infix match on \"ll\"\n)\n\nresult = json.dumps(res, indent=4)\nprint(result)\n","// 8. Filtered search\nquery_vectors = Arrays.asList(Arrays.asList(0.3580376395471989f, -0.6023495712049978f, 0.18414012509913835f, -0.26286205330961354f, 0.9029438446296592f));\n\nsearchReq = SearchReq.builder()\n .collectionName(\"quick_setup\")\n .data(query_vectors)\n .outputFields(Arrays.asList(\"color_tag\"))\n .filter(\"color like \\\"%ll%\\\"\")\n .topK(5)\n .build();\n\nsearchResp = client.search(searchReq);\n\nSystem.out.println(JSONObject.toJSON(searchResp));\n","// 8. Filtered search\n// 8.1 Filter with \"like\" operator and prefix wildcard\nquery_vector = [0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592]\n\nres = await client.search({\n collection_name: \"quick_setup\",\n data: [query_vector],\n limit: 5,\n filters: \"color_tag like \\\"%ll%\\\"\",\n output_fields: [\"color_tag\"]\n})\n\nconsole.log(res.results)\n","[\n [\n {\n \"id\": 5,\n \"distance\": 0.7972343564033508,\n \"entity\": {\n \"color\": \"yellow_4222\"\n }\n }\n ]\n]\n","{\"searchResults\": [\n [\n {\n \"score\": 1.1869997,\n \"fields\": {\"color_tag\": \"yellow_4222\"}\n }\n ]\n]}\n","[\n { score: 2.5080761909484863, id: '1201', color_tag: 'yellow_4222' }\n]\n","# Conduct a range search\nsearch_params = {\n \"metric_type\": \"IP\",\n \"params\": {\n \"radius\": 0.8, # Radius of the search circle\n \"range_filter\": 1.0 # Range filter to filter out vectors that are not within the search circle\n }\n}\n\nres = client.search(\n collection_name=\"test_collection\", # Replace with the actual name of your collection\n data=[[0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592]],\n limit=3, # Max. number of search results to return\n search_params=search_params, # Search parameters\n output_fields=[\"color\"], # Output fields to return\n)\n\nresult = json.dumps(res, indent=4)\nprint(result)\n","// 9. Range search\nquery_vectors = Arrays.asList(Arrays.asList(0.3580376395471989f, -0.6023495712049978f, 0.18414012509913835f, -0.26286205330961354f, 0.9029438446296592f));\n\nsearchReq = SearchReq.builder()\n .collectionName(\"quick_setup\")\n .data(query_vectors)\n .outputFields(Arrays.asList(\"color_tag\"))\n .searchParams(Map.of(\"radius\", 0.1, \"range\", 1.0))\n .topK(5)\n .build();\n\nsearchResp = client.search(searchReq);\n\nSystem.out.println(JSONObject.toJSON(searchResp));\n","// 9. Range search\nquery_vector = [0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592]\n\nres = await client.search({\n collection_name: \"quick_setup\",\n data: [query_vector],\n limit: 5,\n params: {\n radius: 0.1,\n range: 1.0\n },\n output_fields: [\"color_tag\"]\n})\n\nconsole.log(res.results)\n","[\n [\n {\n \"id\": 4,\n \"distance\": 0.9902134537696838,\n \"entity\": {\n \"color\": \"red_4794\"\n }\n },\n {\n \"id\": 14,\n \"distance\": 0.9803846478462219,\n \"entity\": {\n \"color\": \"green_2899\"\n }\n },\n {\n \"id\": 1,\n \"distance\": 0.8519943356513977,\n \"entity\": {\n \"color\": \"red_7025\"\n }\n }\n ]\n]\n","{\"searchResults\": [\n [\n {\n \"score\": 1.263043,\n \"fields\": {\"color_tag\": \"green_2052\"}\n },\n {\n \"score\": 1.2377806,\n \"fields\": {\"color_tag\": \"purple_3709\"}\n },\n {\n \"score\": 1.1869997,\n \"fields\": {\"color_tag\": \"red_3026\"}\n },\n {\n \"score\": 1.1748955,\n \"fields\": {\"color_tag\": \"black_1646\"}\n },\n {\n \"score\": 1.1720343,\n \"fields\": {\"color_tag\": \"green_4853\"}\n }\n ]\n]}\n","[\n { score: 2.3387961387634277, id: '718', color_tag: 'black_7154' },\n { score: 2.3352415561676025, id: '1745', color_tag: 'blue_8741' },\n { score: 2.290485382080078, id: '1408', color_tag: 'red_2324' },\n { score: 2.285870313644409, id: '854', color_tag: 'black_5990' },\n { score: 2.2593345642089844, id: '1309', color_tag: 'red_8458' }\n]\n","# Connect to Milvus\nclient = MilvusClient(uri='http://localhost:19530') # Milvus server address\n\n# Load data into collection\nclient.load_collection(\"group_search\") # Collection name\n\n# Group search results\nres = client.search(\n collection_name=\"group_search\", # Collection name\n data=[[0.14529211512077012, 0.9147257273453546, 0.7965055218724449, 0.7009258593102812, 0.5605206522382088]], # Query vector\n search_params={\n \"metric_type\": \"L2\",\n \"params\": {\"nprobe\": 10},\n }, # Search parameters\n limit=10, # Max. number of search results to return\n group_by_field=\"doc_id\", # Group results by document ID\n output_fields=[\"doc_id\", \"passage_id\"]\n)\n\n# Retrieve the values in the `doc_id` column\ndoc_ids = [result['entity']['doc_id'] for result in res[0]]\n\nprint(doc_ids)\n","[5, 10, 1, 7, 9, 6, 3, 4, 8, 2]\n","# Connect to Milvus\nclient = MilvusClient(uri='http://localhost:19530') # Milvus server address\n\n# Load data into collection\nclient.load_collection(\"group_search\") # Collection name\n\n# Search without `group_by_field`\nres = client.search(\n collection_name=\"group_search\", # Collection name\n data=query_passage_vector, # Replace with your query vector\n search_params={\n \"metric_type\": \"L2\",\n \"params\": {\"nprobe\": 10},\n }, # Search parameters\n limit=10, # Max. number of search results to return\n # group_by_field=\"doc_id\", # Group results by document ID\n output_fields=[\"doc_id\", \"passage_id\"]\n)\n\n# Retrieve the values in the `doc_id` column\ndoc_ids = [result['entity']['doc_id'] for result in res[0]]\n\nprint(doc_ids)\n","[1, 10, 3, 10, 1, 9, 4, 4, 8, 6]\n","# In normal cases, you do not need to set search parameters manually\n# Except for range searches.\nsearch_parameters = {\n 'metric_type': 'L2',\n 'params': {\n 'nprobe': 10,\n 'level': 1,\n 'radius': 1.0\n 'range_filter': 0.8\n }\n}\n"],"headingContent":"","anchorList":[{"label":"单向量搜索","href":"Single-Vector-Search","type":1,"isActive":false},{"label":"搜索概述","href":"Overview","type":2,"isActive":false},{"label":"准备工作","href":"Preparations","type":2,"isActive":false},{"label":"基本搜索","href":"Basic-search","type":2,"isActive":false},{"label":"过滤搜索","href":"Filtered-search","type":2,"isActive":false},{"label":"范围搜索","href":"Range-search","type":2,"isActive":false},{"label":"分组搜索","href":"Grouping-search","type":2,"isActive":false},{"label":"搜索参数","href":"Search-parameters","type":2,"isActive":false}]} \ No newline at end of file +{"codeList":["# 1. Set up a Milvus client\nclient = MilvusClient(\n uri=CLUSTER_ENDPOINT,\n token=TOKEN \n)\n\n# 2. Create a collection\nclient.create_collection(\n collection_name=\"quick_setup\",\n dimension=5,\n metric_type=\"IP\"\n)\n\n# 3. Insert randomly generated vectors \ncolors = [\"green\", \"blue\", \"yellow\", \"red\", \"black\", \"white\", \"purple\", \"pink\", \"orange\", \"brown\", \"grey\"]\ndata = []\n\nfor i in range(1000):\n current_color = random.choice(colors)\n data.append({\n \"id\": i,\n \"vector\": [ random.uniform(-1, 1) for _ in range(5) ],\n \"color\": current_color,\n \"color_tag\": f\"{current_color}_{str(random.randint(1000, 9999))}\"\n })\n\nres = client.insert(\n collection_name=\"quick_setup\",\n data=data\n)\n\nprint(res)\n\n# Output\n#\n# {\n# \"insert_count\": 1000,\n# \"ids\": [\n# 0,\n# 1,\n# 2,\n# 3,\n# 4,\n# 5,\n# 6,\n# 7,\n# 8,\n# 9,\n# \"(990 more items hidden)\"\n# ]\n# }\n\n# 6.1 Create partitions \nclient.create_partition(\n collection_name=\"quick_setup\",\n partition_name=\"red\"\n)\n\nclient.create_partition(\n collection_name=\"quick_setup\",\n partition_name=\"blue\"\n)\n\n# 6.1 Insert data into partitions\nred_data = [ {\"id\": i, \"vector\": [ random.uniform(-1, 1) for _ in range(5) ], \"color\": \"red\", \"color_tag\": f\"red_{str(random.randint(1000, 9999))}\" } for i in range(500) ]\nblue_data = [ {\"id\": i, \"vector\": [ random.uniform(-1, 1) for _ in range(5) ], \"color\": \"blue\", \"color_tag\": f\"blue_{str(random.randint(1000, 9999))}\" } for i in range(500) ]\n\nres = client.insert(\n collection_name=\"quick_setup\",\n data=red_data,\n partition_name=\"red\"\n)\n\nprint(res)\n\n# Output\n#\n# {\n# \"insert_count\": 500,\n# \"ids\": [\n# 0,\n# 1,\n# 2,\n# 3,\n# 4,\n# 5,\n# 6,\n# 7,\n# 8,\n# 9,\n# \"(490 more items hidden)\"\n# ]\n# }\n\nres = client.insert(\n collection_name=\"quick_setup\",\n data=blue_data,\n partition_name=\"blue\"\n)\n\nprint(res)\n\n# Output\n#\n# {\n# \"insert_count\": 500,\n# \"ids\": [\n# 0,\n# 1,\n# 2,\n# 3,\n# 4,\n# 5,\n# 6,\n# 7,\n# 8,\n# 9,\n# \"(490 more items hidden)\"\n# ]\n# }\n","import java.util.ArrayList;\nimport java.util.Arrays;\nimport java.util.List;\nimport java.util.Map;\nimport java.util.Random;\n\nimport com.alibaba.fastjson.JSONObject;\n\nimport io.milvus.v2.client.ConnectConfig;\nimport io.milvus.v2.client.MilvusClientV2;\nimport io.milvus.v2.service.collection.request.CreateCollectionReq;\nimport io.milvus.v2.service.collection.request.GetLoadStateReq;\nimport io.milvus.v2.service.vector.request.InsertReq;\nimport io.milvus.v2.service.vector.response.InsertResp; \n\nString CLUSTER_ENDPOINT = \"http://localhost:19530\";\n\n// 1. Connect to Milvus server\nConnectConfig connectConfig = ConnectConfig.builder()\n .uri(CLUSTER_ENDPOINT)\n .build();\n\nMilvusClientV2 client = new MilvusClientV2(connectConfig); \n\n// 2. Create a collection in quick setup mode\nCreateCollectionReq quickSetupReq = CreateCollectionReq.builder()\n .collectionName(\"quick_setup\")\n .dimension(5)\n .metricType(\"IP\")\n .build();\n\nclient.createCollection(quickSetupReq);\n\nGetLoadStateReq loadStateReq = GetLoadStateReq.builder()\n .collectionName(\"quick_setup\")\n .build();\n\nboolean state = client.getLoadState(loadStateReq);\n\nSystem.out.println(state);\n\n// Output:\n// true\n\n// 3. Insert randomly generated vectors into the collection\nList colors = Arrays.asList(\"green\", \"blue\", \"yellow\", \"red\", \"black\", \"white\", \"purple\", \"pink\", \"orange\", \"brown\", \"grey\");\nList data = new ArrayList<>();\n\nfor (int i=0; i<1000; i++) {\n Random rand = new Random();\n String current_color = colors.get(rand.nextInt(colors.size()-1));\n JSONObject row = new JSONObject();\n row.put(\"id\", Long.valueOf(i));\n row.put(\"vector\", Arrays.asList(rand.nextFloat(), rand.nextFloat(), rand.nextFloat(), rand.nextFloat(), rand.nextFloat()));\n row.put(\"color_tag\", current_color + \"_\" + String.valueOf(rand.nextInt(8999) + 1000));\n data.add(row);\n}\n\nInsertReq insertReq = InsertReq.builder()\n .collectionName(\"quick_setup\")\n .data(data)\n .build();\n\nInsertResp insertResp = client.insert(insertReq);\n\nSystem.out.println(JSONObject.toJSON(insertResp));\n\n// Output:\n// {\"insertCnt\": 1000}\n\n// 6.1. Create a partition\nCreatePartitionReq partitionReq = CreatePartitionReq.builder()\n .collectionName(\"quick_setup\")\n .partitionName(\"red\")\n .build();\n\nclient.createPartition(partitionReq);\n\npartitionReq = CreatePartitionReq.builder()\n .collectionName(\"quick_setup\")\n .partitionName(\"blue\")\n .build();\n\nclient.createPartition(partitionReq);\n\n// 6.2 Insert data into the partition\ndata = new ArrayList<>();\n\nfor (int i=1000; i<1500; i++) {\n Random rand = new Random();\n String current_color = \"red\";\n JSONObject row = new JSONObject();\n row.put(\"id\", Long.valueOf(i));\n row.put(\"vector\", Arrays.asList(rand.nextFloat(), rand.nextFloat(), rand.nextFloat(), rand.nextFloat(), rand.nextFloat()));\n row.put(\"color\", current_color);\n row.put(\"color_tag\", current_color + \"_\" + String.valueOf(rand.nextInt(8999) + 1000));\n data.add(row);\n} \n\ninsertReq = InsertReq.builder()\n .collectionName(\"quick_setup\")\n .data(data)\n .partitionName(\"red\")\n .build();\n\ninsertResp = client.insert(insertReq);\n\nSystem.out.println(JSONObject.toJSON(insertResp));\n\n// Output:\n// {\"insertCnt\": 500}\n\ndata = new ArrayList<>();\n\nfor (int i=1500; i<2000; i++) {\n Random rand = new Random();\n String current_color = \"blue\";\n JSONObject row = new JSONObject();\n row.put(\"id\", Long.valueOf(i));\n row.put(\"vector\", Arrays.asList(rand.nextFloat(), rand.nextFloat(), rand.nextFloat(), rand.nextFloat(), rand.nextFloat()));\n row.put(\"color\", current_color);\n row.put(\"color_tag\", current_color + \"_\" + String.valueOf(rand.nextInt(8999) + 1000));\n data.add(row);\n}\n\ninsertReq = InsertReq.builder()\n .collectionName(\"quick_setup\")\n .data(data)\n .partitionName(\"blue\")\n .build();\n\ninsertResp = client.insert(insertReq);\n\nSystem.out.println(JSONObject.toJSON(insertResp));\n\n// Output:\n// {\"insertCnt\": 500}\n","const { MilvusClient, DataType, sleep } = require(\"@zilliz/milvus2-sdk-node\")\n\nconst address = \"http://localhost:19530\"\n\n// 1. Set up a Milvus Client\nclient = new MilvusClient({address});\n\n// 2. Create a collection in quick setup mode\nawait client.createCollection({\n collection_name: \"quick_setup\",\n dimension: 5,\n metric_type: \"IP\"\n}); \n\n// 3. Insert randomly generated vectors\nconst colors = [\"green\", \"blue\", \"yellow\", \"red\", \"black\", \"white\", \"purple\", \"pink\", \"orange\", \"brown\", \"grey\"]\ndata = []\n\nfor (let i = 0; i < 1000; i++) {\n current_color = colors[Math.floor(Math.random() * colors.length)]\n data.push({\n id: i,\n vector: [Math.random(), Math.random(), Math.random(), Math.random(), Math.random()],\n color: current_color,\n color_tag: `${current_color}_${Math.floor(Math.random() * 8999) + 1000}`\n })\n}\n\nvar res = await client.insert({\n collection_name: \"quick_setup\",\n data: data\n})\n\nconsole.log(res.insert_cnt)\n\n// Output\n// \n// 1000\n// \n\nawait client.createPartition({\n collection_name: \"quick_setup\",\n partition_name: \"red\"\n})\n\nawait client.createPartition({\n collection_name: \"quick_setup\",\n partition_name: \"blue\"\n})\n\n// 6.1 Insert data into partitions\nvar red_data = []\nvar blue_data = []\n\nfor (let i = 1000; i < 1500; i++) {\n red_data.push({\n id: i,\n vector: [Math.random(), Math.random(), Math.random(), Math.random(), Math.random()],\n color: \"red\",\n color_tag: `red_${Math.floor(Math.random() * 8999) + 1000}`\n })\n}\n\nfor (let i = 1500; i < 2000; i++) {\n blue_data.push({\n id: i,\n vector: [Math.random(), Math.random(), Math.random(), Math.random(), Math.random()],\n color: \"blue\",\n color_tag: `blue_${Math.floor(Math.random() * 8999) + 1000}`\n })\n}\n\nres = await client.insert({\n collection_name: \"quick_setup\",\n data: red_data,\n partition_name: \"red\"\n})\n\nconsole.log(res.insert_cnt)\n\n// Output\n// \n// 500\n// \n\nres = await client.insert({\n collection_name: \"quick_setup\",\n data: blue_data,\n partition_name: \"blue\"\n})\n\nconsole.log(res.insert_cnt)\n\n// Output\n// \n// 500\n// \n","# Single vector search\nres = client.search(\n collection_name=\"test_collection\", # Replace with the actual name of your collection\n # Replace with your query vector\n data=[[0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592]],\n limit=5, # Max. number of search results to return\n search_params={\"metric_type\": \"IP\", \"params\": {}} # Search parameters\n)\n\n# Convert the output to a formatted JSON string\nresult = json.dumps(res, indent=4)\nprint(result)\n","// 4. Single vector search\nList> query_vectors = Arrays.asList(Arrays.asList(0.3580376395471989f, -0.6023495712049978f, 0.18414012509913835f, -0.26286205330961354f, 0.9029438446296592f));\n\nSearchReq searchReq = SearchReq.builder()\n .collectionName(\"quick_setup\")\n .data(query_vectors)\n .topK(3) // The number of results to return\n .build();\n\nSearchResp searchResp = client.search(searchReq);\n\nSystem.out.println(JSONObject.toJSON(searchResp));\n","// 4. Single vector search\nvar query_vector = [0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592],\n\nres = await client.search({\n collection_name: \"quick_setup\",\n data: [query_vector],\n limit: 3, // The number of results to return\n})\n\nconsole.log(res.results)\n","[\n [\n {\n \"id\": 0,\n \"distance\": 1.4093276262283325,\n \"entity\": {}\n },\n {\n \"id\": 4,\n \"distance\": 0.9902134537696838,\n \"entity\": {}\n },\n {\n \"id\": 1,\n \"distance\": 0.8519943356513977,\n \"entity\": {}\n },\n {\n \"id\": 5,\n \"distance\": 0.7972343564033508,\n \"entity\": {}\n },\n {\n \"id\": 2,\n \"distance\": 0.5928734540939331,\n \"entity\": {}\n }\n ]\n]\n","{\"searchResults\": [[\n {\n \"score\": 1.263043,\n \"fields\": {\n \"vector\": [\n 0.9533119,\n 0.02538395,\n 0.76714665,\n 0.35481733,\n 0.9845762\n ],\n \"id\": 740\n }\n },\n {\n \"score\": 1.2377806,\n \"fields\": {\n \"vector\": [\n 0.7411156,\n 0.08687937,\n 0.8254139,\n 0.08370924,\n 0.99095553\n ],\n \"id\": 640\n }\n },\n {\n \"score\": 1.1869997,\n \"fields\": {\n \"vector\": [\n 0.87928146,\n 0.05324632,\n 0.6312755,\n 0.28005534,\n 0.9542448\n ],\n \"id\": 455\n }\n }\n]]}\n","[\n { score: 1.7463608980178833, id: '854' },\n { score: 1.744946002960205, id: '425' },\n { score: 1.7258622646331787, id: '718' }\n]\n","# Bulk-vector search\nres = client.search(\n collection_name=\"test_collection\", # Replace with the actual name of your collection\n data=[\n [0.19886812562848388, 0.06023560599112088, 0.6976963061752597, 0.2614474506242501, 0.838729485096104],\n [0.3172005263489739, 0.9719044792798428, -0.36981146090600725, -0.4860894583077995, 0.95791889146345]\n ], # Replace with your query vectors\n limit=2, # Max. number of search results to return\n search_params={\"metric_type\": \"IP\", \"params\": {}} # Search parameters\n)\n\nresult = json.dumps(res, indent=4)\nprint(result)\n","// 5. Batch vector search\nquery_vectors = Arrays.asList(\n Arrays.asList(0.3580376395471989f, -0.6023495712049978f, 0.18414012509913835f, -0.26286205330961354f, 0.9029438446296592f),\n Arrays.asList(0.19886812562848388f, 0.06023560599112088f, 0.6976963061752597f, 0.2614474506242501f, 0.838729485096104f)\n);\n\nsearchReq = SearchReq.builder()\n .collectionName(\"quick_setup\")\n .data(query_vectors)\n .topK(2)\n .build();\n\nsearchResp = client.search(searchReq);\n\nSystem.out.println(JSONObject.toJSON(searchResp));\n","// 5. Batch vector search\nvar query_vectors = [\n [0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592],\n [0.19886812562848388, 0.06023560599112088, 0.6976963061752597, 0.2614474506242501, 0.838729485096104]\n]\n\nres = await client.search({\n collection_name: \"quick_setup\",\n data: query_vectors,\n limit: 2,\n})\n\nconsole.log(res.results)\n","[\n [\n {\n \"id\": 1,\n \"distance\": 1.3017789125442505,\n \"entity\": {}\n },\n {\n \"id\": 7,\n \"distance\": 1.2419954538345337,\n \"entity\": {}\n }\n ], # Result set 1\n [\n {\n \"id\": 3,\n \"distance\": 2.3358664512634277,\n \"entity\": {}\n },\n {\n \"id\": 8,\n \"distance\": 0.5642921924591064,\n \"entity\": {}\n }\n ] # Result set 2\n]\n","// Two sets of vectors are returned as expected\n\n{\"searchResults\": [\n [\n {\n \"score\": 1.263043,\n \"fields\": {\n \"vector\": [\n 0.9533119,\n 0.02538395,\n 0.76714665,\n 0.35481733,\n 0.9845762\n ],\n \"id\": 740\n }\n },\n {\n \"score\": 1.2377806,\n \"fields\": {\n \"vector\": [\n 0.7411156,\n 0.08687937,\n 0.8254139,\n 0.08370924,\n 0.99095553\n ],\n \"id\": 640\n }\n }\n ],\n [\n {\n \"score\": 1.8654699,\n \"fields\": {\n \"vector\": [\n 0.4671427,\n 0.8378432,\n 0.98844475,\n 0.82763994,\n 0.9729997\n ],\n \"id\": 638\n }\n },\n {\n \"score\": 1.8581753,\n \"fields\": {\n \"vector\": [\n 0.735541,\n 0.60140246,\n 0.86730254,\n 0.93152493,\n 0.98603314\n ],\n \"id\": 855\n }\n }\n ]\n]}\n","[\n [\n { score: 2.3590476512908936, id: '854' },\n { score: 2.2896690368652344, id: '59' }\n [\n { score: 2.664059638977051, id: '59' },\n { score: 2.59483003616333, id: '854' }\n ]\n]\n","# 6.2 Search within a partition\nquery_vector = [0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592]\n\nres = client.search(\n collection_name=\"quick_setup\",\n data=[query_vector],\n limit=5,\n search_params={\"metric_type\": \"IP\", \"params\": {\"level\": 1}},\n partition_names=[\"red\"]\n)\n\nprint(res)\n","// 6.3 Search within partitions\nquery_vectors = Arrays.asList(Arrays.asList(0.3580376395471989f, -0.6023495712049978f, 0.18414012509913835f, -0.26286205330961354f, 0.9029438446296592f));\n\nsearchReq = SearchReq.builder()\n .collectionName(\"quick_setup\")\n .data(query_vectors)\n .partitionNames(Arrays.asList(\"red\"))\n .topK(5)\n .build();\n\nsearchResp = client.search(searchReq);\n\nSystem.out.println(JSONObject.toJSON(searchResp));\n","// 6.2 Search within partitions\nquery_vector = [0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592]\n\nres = await client.search({\n collection_name: \"quick_setup\",\n data: [query_vector],\n partition_names: [\"red\"],\n limit: 5,\n})\n\nconsole.log(res.results)\n","[\n [\n {\n \"id\": 16,\n \"distance\": 0.9200337529182434,\n \"entity\": {}\n },\n {\n \"id\": 14,\n \"distance\": 0.4505271911621094,\n \"entity\": {}\n },\n {\n \"id\": 15,\n \"distance\": 0.19924677908420563,\n \"entity\": {}\n },\n {\n \"id\": 17,\n \"distance\": 0.0075093843042850494,\n \"entity\": {}\n },\n {\n \"id\": 13,\n \"distance\": -0.14609718322753906,\n \"entity\": {}\n }\n ]\n]\n","{\"searchResults\": [\n [\n {\n \"score\": 1.1677284,\n \"fields\": {\n \"vector\": [\n 0.9986977,\n 0.17964739,\n 0.49086612,\n 0.23155272,\n 0.98438674\n ],\n \"id\": 1435\n }\n },\n {\n \"score\": 1.1476475,\n \"fields\": {\n \"vector\": [\n 0.6952647,\n 0.13417172,\n 0.91045254,\n 0.119336545,\n 0.9338931\n ],\n \"id\": 1291\n }\n },\n {\n \"score\": 1.0969629,\n \"fields\": {\n \"vector\": [\n 0.3363194,\n 0.028906643,\n 0.6675426,\n 0.030419827,\n 0.9735209\n ],\n \"id\": 1168\n }\n },\n {\n \"score\": 1.0741848,\n \"fields\": {\n \"vector\": [\n 0.9980543,\n 0.36063594,\n 0.66427994,\n 0.17359233,\n 0.94954175\n ],\n \"id\": 1164\n }\n },\n {\n \"score\": 1.0584627,\n \"fields\": {\n \"vector\": [\n 0.7187005,\n 0.12674773,\n 0.987718,\n 0.3110777,\n 0.86093885\n ],\n \"id\": 1085\n }\n }\n ],\n [\n {\n \"score\": 1.8030131,\n \"fields\": {\n \"vector\": [\n 0.59726167,\n 0.7054632,\n 0.9573117,\n 0.94529945,\n 0.8664103\n ],\n \"id\": 1203\n }\n },\n {\n \"score\": 1.7728865,\n \"fields\": {\n \"vector\": [\n 0.6672442,\n 0.60448086,\n 0.9325822,\n 0.80272985,\n 0.8861626\n ],\n \"id\": 1448\n }\n },\n {\n \"score\": 1.7536311,\n \"fields\": {\n \"vector\": [\n 0.59663296,\n 0.77831805,\n 0.8578314,\n 0.88818026,\n 0.9030075\n ],\n \"id\": 1010\n }\n },\n {\n \"score\": 1.7520742,\n \"fields\": {\n \"vector\": [\n 0.854198,\n 0.72294194,\n 0.9245805,\n 0.86126596,\n 0.7969224\n ],\n \"id\": 1219\n }\n },\n {\n \"score\": 1.7452049,\n \"fields\": {\n \"vector\": [\n 0.96419,\n 0.943535,\n 0.87611496,\n 0.8268136,\n 0.79786557\n ],\n \"id\": 1149\n }\n }\n ]\n]}\n","[\n { score: 3.0258803367614746, id: '1201' },\n { score: 3.004319190979004, id: '1458' },\n { score: 2.880324363708496, id: '1187' },\n { score: 2.8246407508850098, id: '1347' },\n { score: 2.797295093536377, id: '1406' }\n]\n","res = client.search(\n collection_name=\"quick_setup\",\n data=[query_vector],\n limit=5,\n search_params={\"metric_type\": \"IP\", \"params\": {\"level\": 1}},\n partition_names=[\"blue\"]\n)\n\nprint(res)\n","searchReq = SearchReq.builder()\n .collectionName(\"quick_setup\")\n .data(query_vectors)\n .partitionNames(Arrays.asList(\"blue\"))\n .topK(5)\n .build();\n\nsearchResp = client.search(searchReq);\n\nSystem.out.println(JSONObject.toJSON(searchResp));\n","res = await client.search({\n collection_name: \"quick_setup\",\n data: [query_vector],\n partition_names: [\"blue\"],\n limit: 5,\n})\n\nconsole.log(res.results)\n","[\n [\n {\n \"id\": 20,\n \"distance\": 2.363696813583374,\n \"entity\": {}\n },\n {\n \"id\": 26,\n \"distance\": 1.0665391683578491,\n \"entity\": {}\n },\n {\n \"id\": 23,\n \"distance\": 1.066049575805664,\n \"entity\": {}\n },\n {\n \"id\": 29,\n \"distance\": 0.8353596925735474,\n \"entity\": {}\n },\n {\n \"id\": 28,\n \"distance\": 0.7484277486801147,\n \"entity\": {}\n }\n ]\n]\n","{\"searchResults\": [\n [\n {\n \"score\": 1.1628494,\n \"fields\": {\n \"vector\": [\n 0.7442872,\n 0.046407282,\n 0.71031404,\n 0.3544345,\n 0.9819991\n ],\n \"id\": 1992\n }\n },\n {\n \"score\": 1.1470042,\n \"fields\": {\n \"vector\": [\n 0.5505825,\n 0.04367262,\n 0.9985836,\n 0.18922359,\n 0.93255126\n ],\n \"id\": 1977\n }\n },\n {\n \"score\": 1.1450152,\n \"fields\": {\n \"vector\": [\n 0.89994013,\n 0.052991092,\n 0.8645576,\n 0.6406729,\n 0.95679337\n ],\n \"id\": 1573\n }\n },\n {\n \"score\": 1.1439825,\n \"fields\": {\n \"vector\": [\n 0.9253267,\n 0.15890503,\n 0.7999555,\n 0.19126713,\n 0.898583\n ],\n \"id\": 1552\n }\n },\n {\n \"score\": 1.1029172,\n \"fields\": {\n \"vector\": [\n 0.95661926,\n 0.18777144,\n 0.38115507,\n 0.14323527,\n 0.93137646\n ],\n \"id\": 1823\n }\n }\n ],\n [\n {\n \"score\": 1.8005109,\n \"fields\": {\n \"vector\": [\n 0.5953582,\n 0.7794224,\n 0.9388869,\n 0.79825854,\n 0.9197286\n ],\n \"id\": 1888\n }\n },\n {\n \"score\": 1.7714822,\n \"fields\": {\n \"vector\": [\n 0.56805456,\n 0.89422905,\n 0.88187534,\n 0.914824,\n 0.8944365\n ],\n \"id\": 1648\n }\n },\n {\n \"score\": 1.7561421,\n \"fields\": {\n \"vector\": [\n 0.83421993,\n 0.39865613,\n 0.92319834,\n 0.42695504,\n 0.96633124\n ],\n \"id\": 1688\n }\n },\n {\n \"score\": 1.7553532,\n \"fields\": {\n \"vector\": [\n 0.89994013,\n 0.052991092,\n 0.8645576,\n 0.6406729,\n 0.95679337\n ],\n \"id\": 1573\n }\n },\n {\n \"score\": 1.7543385,\n \"fields\": {\n \"vector\": [\n 0.16542226,\n 0.38248396,\n 0.9888778,\n 0.80913955,\n 0.9501492\n ],\n \"id\": 1544\n }\n }\n ]\n]}\n","[\n { score: 2.8421106338500977, id: '1745' },\n { score: 2.838560104370117, id: '1782' },\n { score: 2.8134000301361084, id: '1511' },\n { score: 2.718268871307373, id: '1679' },\n { score: 2.7014894485473633, id: '1597' }\n]\n","# Search with output fields\nres = client.search(\n collection_name=\"test_collection\", # Replace with the actual name of your collection\n data=[[0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592]],\n limit=5, # Max. number of search results to return\n search_params={\"metric_type\": \"IP\", \"params\": {}}, # Search parameters\n output_fields=[\"color\"] # Output fields to return\n)\n\nresult = json.dumps(res, indent=4)\nprint(result)\n","// 7. Search with output fields\nquery_vectors = Arrays.asList(Arrays.asList(0.3580376395471989f, -0.6023495712049978f, 0.18414012509913835f, -0.26286205330961354f, 0.9029438446296592f));\n\nsearchReq = SearchReq.builder()\n .collectionName(\"quick_setup\")\n .data(query_vectors)\n .outputFields(Arrays.asList(\"color\"))\n .topK(5)\n .build();\n\nsearchResp = client.search(searchReq);\n\nSystem.out.println(JSONObject.toJSON(searchResp));\n","// 7. Search with output fields\nquery_vector = [0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592]\n\nres = await client.search({\n collection_name: \"quick_setup\",\n data: [query_vector],\n limit: 5,\n output_fields: [\"color\"],\n})\n\nconsole.log(res.results)\n","[\n [\n {\n \"id\": 0,\n \"distance\": 1.4093276262283325,\n \"entity\": {\n \"color\": \"pink_8682\"\n }\n },\n {\n \"id\": 16,\n \"distance\": 1.0159327983856201,\n \"entity\": {\n \"color\": \"yellow_1496\"\n }\n },\n {\n \"id\": 4,\n \"distance\": 0.9902134537696838,\n \"entity\": {\n \"color\": \"red_4794\"\n }\n },\n {\n \"id\": 14,\n \"distance\": 0.9803846478462219,\n \"entity\": {\n \"color\": \"green_2899\"\n }\n },\n {\n \"id\": 1,\n \"distance\": 0.8519943356513977,\n \"entity\": {\n \"color\": \"red_7025\"\n }\n }\n ]\n]\n","{\"searchResults\": [\n [\n {\n \"score\": 1.263043,\n \"fields\": {}\n },\n {\n \"score\": 1.2377806,\n \"fields\": {}\n },\n {\n \"score\": 1.1869997,\n \"fields\": {}\n },\n {\n \"score\": 1.1748955,\n \"fields\": {}\n },\n {\n \"score\": 1.1720343,\n \"fields\": {}\n }\n ]\n]}\n","\n[\n { score: 3.036271572113037, id: '59', color: 'orange' },\n { score: 3.0267879962921143, id: '1745', color: 'blue' },\n { score: 3.0069446563720703, id: '854', color: 'black' },\n { score: 2.984386682510376, id: '718', color: 'black' },\n { score: 2.916019916534424, id: '425', color: 'purple' }\n]\n","# Search with filter\nres = client.search(\n collection_name=\"test_collection\", # Replace with the actual name of your collection\n data=[[0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592]],\n limit=5, # Max. number of search results to return\n search_params={\"metric_type\": \"IP\", \"params\": {}}, # Search parameters\n output_fields=[\"color\"], # Output fields to return\n filter='color like \"red%\"'\n)\n\nresult = json.dumps(res, indent=4)\nprint(result)\n","// 8. Filtered search\nquery_vectors = Arrays.asList(Arrays.asList(0.3580376395471989f, -0.6023495712049978f, 0.18414012509913835f, -0.26286205330961354f, 0.9029438446296592f));\n\nsearchReq = SearchReq.builder()\n .collectionName(\"quick_setup\")\n .data(query_vectors)\n .outputFields(Arrays.asList(\"color_tag\"))\n .filter(\"color_tag like \\\"red%\\\"\")\n .topK(5)\n .build();\n\nsearchResp = client.search(searchReq);\n\nSystem.out.println(JSONObject.toJSON(searchResp));\n","// 8. Filtered search\n// 8.1 Filter with \"like\" operator and prefix wildcard\nquery_vector = [0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592]\n\nres = await client.search({\n collection_name: \"quick_setup\",\n data: [query_vector],\n limit: 5,\n filters: \"color_tag like \\\"red%\\\"\",\n output_fields: [\"color_tag\"]\n})\n\nconsole.log(res.results)\n","[\n [\n {\n \"id\": 4,\n \"distance\": 0.9902134537696838,\n \"entity\": {\n \"color\": \"red_4794\"\n }\n },\n {\n \"id\": 1,\n \"distance\": 0.8519943356513977,\n \"entity\": {\n \"color\": \"red_7025\"\n }\n },\n {\n \"id\": 6,\n \"distance\": -0.4113418459892273,\n \"entity\": {\n \"color\": \"red_9392\"\n }\n }\n ]\n]\n","{\"searchResults\": [\n [\n {\n \"score\": 1.1869997,\n \"fields\": {\"color_tag\": \"red_3026\"}\n },\n {\n \"score\": 1.1677284,\n \"fields\": {\"color_tag\": \"red_9030\"}\n },\n {\n \"score\": 1.1476475,\n \"fields\": {\"color_tag\": \"red_3744\"}\n },\n {\n \"score\": 1.0969629,\n \"fields\": {\"color_tag\": \"red_4168\"}\n },\n {\n \"score\": 1.0741848,\n \"fields\": {\"color_tag\": \"red_9678\"}\n }\n ]\n]}\n","[\n { score: 2.5080761909484863, id: '1201', color_tag: 'red_8904' },\n { score: 2.491129159927368, id: '425', color_tag: 'purple_8212' },\n { score: 2.4889798164367676, id: '1458', color_tag: 'red_6891' },\n { score: 2.42964243888855, id: '724', color_tag: 'black_9885' },\n { score: 2.4004223346710205, id: '854', color_tag: 'black_5990' }\n]\n","# Infix match on color field\nres = client.search(\n collection_name=\"test_collection\", # Replace with the actual name of your collection\n data=[[0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592]],\n limit=5, # Max. number of search results to return\n search_params={\"metric_type\": \"IP\", \"params\": {}}, # Search parameters\n output_fields=[\"color\"], # Output fields to return\n filter='color like \"%ll%\"' # Filter on color field, infix match on \"ll\"\n)\n\nresult = json.dumps(res, indent=4)\nprint(result)\n","// 8. Filtered search\nquery_vectors = Arrays.asList(Arrays.asList(0.3580376395471989f, -0.6023495712049978f, 0.18414012509913835f, -0.26286205330961354f, 0.9029438446296592f));\n\nsearchReq = SearchReq.builder()\n .collectionName(\"quick_setup\")\n .data(query_vectors)\n .outputFields(Arrays.asList(\"color_tag\"))\n .filter(\"color like \\\"%ll%\\\"\")\n .topK(5)\n .build();\n\nsearchResp = client.search(searchReq);\n\nSystem.out.println(JSONObject.toJSON(searchResp));\n","// 8. Filtered search\n// 8.1 Filter with \"like\" operator and prefix wildcard\nquery_vector = [0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592]\n\nres = await client.search({\n collection_name: \"quick_setup\",\n data: [query_vector],\n limit: 5,\n filters: \"color_tag like \\\"%ll%\\\"\",\n output_fields: [\"color_tag\"]\n})\n\nconsole.log(res.results)\n","[\n [\n {\n \"id\": 5,\n \"distance\": 0.7972343564033508,\n \"entity\": {\n \"color\": \"yellow_4222\"\n }\n }\n ]\n]\n","{\"searchResults\": [\n [\n {\n \"score\": 1.1869997,\n \"fields\": {\"color_tag\": \"yellow_4222\"}\n }\n ]\n]}\n","[\n { score: 2.5080761909484863, id: '1201', color_tag: 'yellow_4222' }\n]\n","# Conduct a range search\nsearch_params = {\n \"metric_type\": \"IP\",\n \"params\": {\n \"radius\": 0.8, # Radius of the search circle\n \"range_filter\": 1.0 # Range filter to filter out vectors that are not within the search circle\n }\n}\n\nres = client.search(\n collection_name=\"test_collection\", # Replace with the actual name of your collection\n data=[[0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592]],\n limit=3, # Max. number of search results to return\n search_params=search_params, # Search parameters\n output_fields=[\"color\"], # Output fields to return\n)\n\nresult = json.dumps(res, indent=4)\nprint(result)\n","// 9. Range search\nquery_vectors = Arrays.asList(Arrays.asList(0.3580376395471989f, -0.6023495712049978f, 0.18414012509913835f, -0.26286205330961354f, 0.9029438446296592f));\n\nsearchReq = SearchReq.builder()\n .collectionName(\"quick_setup\")\n .data(query_vectors)\n .outputFields(Arrays.asList(\"color_tag\"))\n .searchParams(Map.of(\"radius\", 0.1, \"range\", 1.0))\n .topK(5)\n .build();\n\nsearchResp = client.search(searchReq);\n\nSystem.out.println(JSONObject.toJSON(searchResp));\n","// 9. Range search\nquery_vector = [0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592]\n\nres = await client.search({\n collection_name: \"quick_setup\",\n data: [query_vector],\n limit: 5,\n params: {\n radius: 0.1,\n range: 1.0\n },\n output_fields: [\"color_tag\"]\n})\n\nconsole.log(res.results)\n","[\n [\n {\n \"id\": 4,\n \"distance\": 0.9902134537696838,\n \"entity\": {\n \"color\": \"red_4794\"\n }\n },\n {\n \"id\": 14,\n \"distance\": 0.9803846478462219,\n \"entity\": {\n \"color\": \"green_2899\"\n }\n },\n {\n \"id\": 1,\n \"distance\": 0.8519943356513977,\n \"entity\": {\n \"color\": \"red_7025\"\n }\n }\n ]\n]\n","{\"searchResults\": [\n [\n {\n \"score\": 1.263043,\n \"fields\": {\"color_tag\": \"green_2052\"}\n },\n {\n \"score\": 1.2377806,\n \"fields\": {\"color_tag\": \"purple_3709\"}\n },\n {\n \"score\": 1.1869997,\n \"fields\": {\"color_tag\": \"red_3026\"}\n },\n {\n \"score\": 1.1748955,\n \"fields\": {\"color_tag\": \"black_1646\"}\n },\n {\n \"score\": 1.1720343,\n \"fields\": {\"color_tag\": \"green_4853\"}\n }\n ]\n]}\n","[\n { score: 2.3387961387634277, id: '718', color_tag: 'black_7154' },\n { score: 2.3352415561676025, id: '1745', color_tag: 'blue_8741' },\n { score: 2.290485382080078, id: '1408', color_tag: 'red_2324' },\n { score: 2.285870313644409, id: '854', color_tag: 'black_5990' },\n { score: 2.2593345642089844, id: '1309', color_tag: 'red_8458' }\n]\n","# Connect to Milvus\nclient = MilvusClient(uri='http://localhost:19530') # Milvus server address\n\n# Load data into collection\nclient.load_collection(\"group_search\") # Collection name\n\n# Group search results\nres = client.search(\n collection_name=\"group_search\", # Collection name\n data=[[0.14529211512077012, 0.9147257273453546, 0.7965055218724449, 0.7009258593102812, 0.5605206522382088]], # Query vector\n search_params={\n \"metric_type\": \"L2\",\n \"params\": {\"nprobe\": 10},\n }, # Search parameters\n limit=5, # Max. number of groups to return\n group_by_field=\"doc_id\", # Group results by document ID\n group_size=2, # returned at most 2 passages per document, the default value is 1\n group_strict_size=True, # ensure every group contains exactly 3 passages\n output_fields=[\"doc_id\", \"passage_id\"]\n)\n\n# Retrieve the values in the `doc_id` column\ndoc_ids = [result['entity']['doc_id'] for result in res[0]]\npassage_ids = [result['entity']['passage_id'] for result in res[0]]\n\nprint(doc_ids)\nprint(passage_ids)\n","[\"doc_11\", \"doc_11\", \"doc_7\", \"doc_7\", \"doc_3\", \"doc_3\", \"doc_2\", \"doc_2\", \"doc_8\", \"doc_8\"]\n[5, 10, 11, 10, 9, 6, 5, 4, 9, 2]\n","# Connect to Milvus\nclient = MilvusClient(uri='http://localhost:19530') # Milvus server address\n\n# Load data into collection\nclient.load_collection(\"group_search\") # Collection name\n\n# Search without `group_by_field`\nres = client.search(\n collection_name=\"group_search\", # Collection name\n data=query_passage_vector, # Replace with your query vector\n search_params={\n \"metric_type\": \"L2\",\n \"params\": {\"nprobe\": 10},\n }, # Search parameters\n limit=5, # Max. number of search results to return\n # group_by_field=\"doc_id\", # Group results by document ID\n # group_size=2, \n # group_strict_size=True,\n output_fields=[\"doc_id\", \"passage_id\"]\n)\n\n# Retrieve the values in the `doc_id` column\ndoc_ids = [result['entity']['doc_id'] for result in res[0]]\npassage_ids = [result['entity']['passage_id'] for result in res[0]]\n\nprint(doc_ids)\nprint(passage_ids)\n","[\"doc_11\", \"doc_11\", \"doc_11\", \"doc_11\", \"doc_11\"]\n[1, 10, 3, 12, 9]\n","# In normal cases, you do not need to set search parameters manually\n# Except for range searches.\nsearch_parameters = {\n 'metric_type': 'L2',\n 'params': {\n 'nprobe': 10,\n 'level': 1,\n 'radius': 1.0\n 'range_filter': 0.8\n }\n}\n"],"headingContent":"Single-Vector Search","anchorList":[{"label":"单向量搜索","href":"Single-Vector-Search","type":1,"isActive":false},{"label":"搜索概述","href":"Overview","type":2,"isActive":false},{"label":"准备工作","href":"Preparations","type":2,"isActive":false},{"label":"基本搜索","href":"Basic-search","type":2,"isActive":false},{"label":"过滤搜索","href":"Filtered-search","type":2,"isActive":false},{"label":"范围搜索","href":"Range-search","type":2,"isActive":false},{"label":"分组搜索","href":"Grouping-search","type":2,"isActive":false},{"label":"搜索参数","href":"Search-parameters","type":2,"isActive":false}]} \ No newline at end of file diff --git a/localization/v2.4.x/site/zh/userGuide/search-query-get/single-vector-search.md b/localization/v2.4.x/site/zh/userGuide/search-query-get/single-vector-search.md index 8857d0e9a..98e9191f6 100644 --- a/localization/v2.4.x/site/zh/userGuide/search-query-get/single-vector-search.md +++ b/localization/v2.4.x/site/zh/userGuide/search-query-get/single-vector-search.md @@ -19,10 +19,10 @@ title: 单向量搜索 d="M4 9h1v1H4c-1.5 0-3-1.69-3-3.5S2.55 3 4 3h4c1.45 0 3 1.69 3 3.5 0 1.41-.91 2.72-2 3.25V8.59c.58-.45 1-1.27 1-2.09C10 5.22 8.98 4 8 4H4c-.98 0-2 1.22-2 2.5S3 9 4 9zm9-3h-1v1h1c1 0 2 1.22 2 2.5S13.98 12 13 12H9c-.98 0-2-1.22-2-2.5 0-.83.42-1.64 1-2.09V6.25c-1.09.53-2 1.84-2 3.25C6 11.31 7.55 13 9 13h4c1.45 0 3-1.69 3-3.5S14.5 6 13 6z" > -

    插入数据后,下一步就是在 Milvus 中对集合进行相似性搜索。

    -

    Milvus 允许您进行两种类型的搜索,具体取决于您的 Collections 中向量场的数量:

    +

    插入数据后,下一步就是在 Milvus 中对 Collections 进行相似性搜索。

    +

    根据 Collections 中向量场的数量,Milvus 允许您进行两种类型的搜索:

      -
    • 单向量搜索:如果您的 Collections 只有一个向量场,请使用 search()方法来查找最相似的实体。该方法会将您的查询向量与集合中的现有向量进行比较,并返回最匹配的 ID 以及它们之间的距离。作为选项,它还可以返回结果的向量值和元数据。
    • +
    • 单向量搜索:如果您的 Collections 只有一个向量场,请使用 search()方法来查找最相似的实体。该方法会将您的查询向量与 Collections 中的现有向量进行比较,并返回最匹配的 ID 以及它们之间的距离。作为选项,它还可以返回结果的向量值和元数据。
    • 混合搜索:对于有两个或更多向量字段的 Collections,可使用 hybrid_search()方法。该方法会执行多个近似近邻(ANN)搜索请求,并在重新排序后将结果组合起来,返回最相关的匹配结果。

    本指南主要介绍如何在 Milvus 中执行单向量搜索。有关混合搜索的详细信息,请参阅混合搜索

    @@ -63,7 +63,7 @@ title: 单向量搜索 d="M4 9h1v1H4c-1.5 0-3-1.69-3-3.5S2.55 3 4 3h4c1.45 0 3 1.69 3 3.5 0 1.41-.91 2.72-2 3.25V8.59c.58-.45 1-1.27 1-2.09C10 5.22 8.98 4 8 4H4c-.98 0-2 1.22-2 2.5S3 9 4 9zm9-3h-1v1h1c1 0 2 1.22 2 2.5S13.98 12 13 12H9c-.98 0-2-1.22-2-2.5 0-.83.42-1.64 1-2.09V6.25c-1.09.53-2 1.84-2 3.25C6 11.31 7.55 13 9 13h4c1.45 0 3-1.69 3-3.5S14.5 6 13 6z" > -

    下面的代码片段对现有代码进行了重新利用,以建立与 Milvus 的连接并快速设置集合。

    +

    下面的代码片段对现有代码进行了重新利用,以建立与 Milvus 的连接并快速设置 Collections。

    # 1. Set up a Milvus client
    @@ -500,7 +500,7 @@ res = await client.limit
    -      
    + @@ -526,7 +526,7 @@ res = await client.topK - +
    现有集合的名称。现有 Collections 的名称。
    field_name要返回的实体总数。
    可以将此参数与param中的偏移量结合使用,以启用分页。
    此值与param中的偏移量之和小于 16,384。
    要返回的实体总数。
    可以将此参数与param中的偏移量结合使用,以启用分页。
    此值与param中的偏移量之和应小于 16,384。
    search_params搜索结果中要返回的记录数。该参数使用与limit参数相同的语法,因此只需设置其中一个。
    可以将该参数与param中的偏移量结合使用,以启用分页。
    该值与param中的偏移量之和小于 16,384。
    搜索结果中要返回的记录数。该参数使用与limit参数相同的语法,因此只需设置其中一个。
    可以将该参数与param中的偏移量结合使用,以启用分页。
    该值与param中的偏移量之和应小于 16,384。
    @@ -548,7 +548,7 @@ res = await client.limit - 要返回的实体总数。
    可以将此参数与param中的偏移量结合使用,以启用分页。
    此值与param中的偏移量之和小于 16,384。 + 要返回的实体总数。
    可以将此参数与param中的偏移量结合使用,以启用分页。
    此值与param中的偏移量之和应小于 16,384。 @@ -1699,7 +1699,7 @@ res = await client.HAMMING汉明距离越小,相似度越高。要从结果中排除最接近的向量,请确保:
    range_filter <= distance <radius -

    要了解有关距离度量类型的更多信息,请参阅相似度量

    +

    要了解有关距离度量类型的更多信息,请参阅 "相似度量"

    在 Milvus 中,按特定字段分组搜索可以避免结果中出现相同字段项的冗余。你可以获得特定字段的不同结果集。

    -

    考虑一个文档 Collections,每个文档分成不同的段落。每个段落由一个向量 Embeddings 表示,属于一个文档。要查找相关文档而不是相似段落,可以在search() 命令中加入group_by_field 参数,按文档 ID 对结果进行分组。这有助于返回最相关和最独特的文档,而不是同一文档中的不同段落。

    -

    以下是按字段分组搜索结果的示例代码:

    +

    在 Milvus 中,分组搜索旨在提高搜索结果的全面性和准确性。

    +

    考虑 RAG 中的一种情况,即文档负载被分成不同的段落,每个段落由一个向量嵌入表示。用户希望找到最相关的段落,以准确地提示 LLMs。普通的 Milvus 搜索功能可以满足这一要求,但它可能会导致搜索结果高度倾斜和偏差:大部分段落只来自少数几个文档,搜索结果的全面性非常差。这可能会严重影响词法管理器给出结果的准确性甚至正确性,并对词法管理器用户的使用体验造成负面影响。

    +

    分组搜索可以有效解决这个问题。通过传递 group_by_field 和 group_size,Milvus 用户可以将搜索结果分成若干组,并确保每组的实体数量不超过特定的 group_size。这一功能可以大大提高搜索结果的全面性和公平性,明显改善 LLM 输出的质量。

    +

    以下是按字段对搜索结果分组的示例代码:

    # Connect to Milvus
     client = MilvusClient(uri='http://localhost:19530') # Milvus server address
     
    @@ -1732,21 +1733,26 @@ res = client.search(
         "metric_type": "L2",
         "params": {"nprobe": 10},
         }, # Search parameters
    -    limit=10, # Max. number of search results to return
    +    limit=5, # Max. number of groups to return
         group_by_field="doc_id", # Group results by document ID
    +    group_size=2, # returned at most 2 passages per document, the default value is 1
    +    group_strict_size=True, # ensure every group contains exactly 3 passages
         output_fields=["doc_id", "passage_id"]
     )
     
     # Retrieve the values in the `doc_id` column
     doc_ids = [result['entity']['doc_id'] for result in res[0]]
    +passage_ids = [result['entity']['passage_id'] for result in res[0]]
     
     print(doc_ids)
    +print(passage_ids)
     

    输出结果类似于下面的内容:

    -
    [5, 10, 1, 7, 9, 6, 3, 4, 8, 2]
    +
    ["doc_11", "doc_11", "doc_7", "doc_7", "doc_3", "doc_3", "doc_2", "doc_2", "doc_8", "doc_8"]
    +[5, 10, 11, 10, 9, 6, 5, 4, 9, 2]
     
    -

    在给定的输出中,可以看到返回的实体不包含任何重复的doc_id 值。

    -

    为了进行比较,让我们注释掉group_by_field 并进行常规搜索:

    +

    在给定的输出结果中,我们可以看到,每个文档都恰好检索到了两个段落,总共有 5 个文档集合构成了结果。

    +

    为了便于比较,我们注释掉与组相关的参数,然后进行常规搜索:

    # Connect to Milvus
     client = MilvusClient(uri='http://localhost:19530') # Milvus server address
     
    @@ -1761,27 +1767,33 @@ res = client.search(
         "metric_type": "L2",
         "params": {"nprobe": 10},
         }, # Search parameters
    -    limit=10, # Max. number of search results to return
    +    limit=5, # Max. number of search results to return
         # group_by_field="doc_id", # Group results by document ID
    +    # group_size=2, 
    +    # group_strict_size=True,
         output_fields=["doc_id", "passage_id"]
     )
     
     # Retrieve the values in the `doc_id` column
     doc_ids = [result['entity']['doc_id'] for result in res[0]]
    +passage_ids = [result['entity']['passage_id'] for result in res[0]]
     
     print(doc_ids)
    +print(passage_ids)
     

    输出结果类似于下面的内容:

    -
    [1, 10, 3, 10, 1, 9, 4, 4, 8, 6]
    +
    ["doc_11", "doc_11", "doc_11", "doc_11", "doc_11"]
    +[1, 10, 3, 12, 9]
     
    -

    在给定的输出中,可以看到返回的实体包含重复的doc_id 值。

    +

    在给定的输出中,可以观察到 "doc_11 "完全占据了搜索结果,盖过了其他文档的高质量段落,这可能是对 LLM 的一个不良提示。

    +

    还有一点需要注意:默认情况下,分组搜索(grouping_search)会在有足够多的组时立即返回结果,这可能会导致每个组中的结果数量不足以满足 group_size 的要求。如果你在意每个组的结果数量,请设置 group_strict_size=True,如上面的代码所示。这将使 Milvus 努力为每组获取足够的结果,但性能会略有下降。

    限制

      -
    • 索引:此分组功能仅适用于使用HNSWIVF_ FLATFLAT类型编制索引的 Collections。有关详细信息,请参阅内存索引

    • +
    • 索引:此分组功能仅适用于使用HNSWIVF_ FLATFLAT类型编制索引的 Collections。更多信息,请参阅内存索引

    • 向量:目前,分组搜索不支持BINARY_VECTOR类型的向量字段。有关数据类型的更多信息,请参阅支持的数据类型

    • 字段:目前,分组搜索只支持单列。无法在group_by_field 配置中指定多个字段名。 此外,分组搜索与 JSON、FLOAT、DOUBLE、ARRAY 或向量字段的数据类型不兼容。

    • 性能影响:请注意,性能会随着查询向量数的增加而降低。以具有 2 个 CPU 内核和 8 GB 内存的集群为例,分组搜索的执行时间会随着输入查询向量数量的增加而成正比增加。

    • -
    • 功能:目前,范围搜索搜索迭代器混合 搜索均不支持分组搜索

    • +
    • 功能:目前,范围搜索搜索迭代器不支持分组搜索。

    搜索参数

    除范围搜索外,上述搜索均使用默认搜索参数。在一般情况下,无需手动设置搜索参数。

    +

    除范围搜索外,上述搜索均使用默认搜索参数。在正常情况下,无需手动设置搜索参数。

    # In normal cases, you do not need to set search parameters manually
     # Except for range searches.
     search_parameters = {
    @@ -1818,7 +1830,7 @@ search_parameters = {
     
     
     metric_type如何测量向量 Embeddings 之间的相似性。
    可能的值为IP,L2,COSINE,JACCARD, 和HAMMING ,默认值为已加载索引文件的值。 -params.nprobe搜索时要查询的单位数量。
    取值范围为 [1,nlist[1]]。 +params.nprobe搜索时要查询的单位数量。
    取值范围为 [1,nlist[1]]。 params.level搜索精度级别。
    可能的值为123 ,默认值为1 。值越高,结果越精确,但性能越差。 params.radius定义搜索空间的外部边界。只有与查询向量距离在此范围内的向量才会被视为潜在匹配。
    值范围由metric_type 参数决定。例如,如果metric_type 设置为L2 ,则有效值范围为[0, ∞] 。如果metric_type 设置为COSINE ,则有效值范围为[-1, 1] 。更多信息,请参阅 "相似度指标"params.range_filterradius 设置搜索的外部界限,而range_filter 可选择用于定义内部界限,创建一个距离范围,向量必须在该范围内才会被视为匹配。
    值范围由metric_type 参数决定。例如,如果metric_type 设置为L2 ,则有效值范围为[0, ∞] 。如果metric_type 设置为COSINE ,则有效值范围为[-1, 1] 。更多信息,请参阅 "相似度指标"。 diff --git a/localization/v2.4.x/site/zh/userGuide/search-query-get/with-iterators.json b/localization/v2.4.x/site/zh/userGuide/search-query-get/with-iterators.json index b8c43c3b4..d5914902a 100644 --- a/localization/v2.4.x/site/zh/userGuide/search-query-get/with-iterators.json +++ b/localization/v2.4.x/site/zh/userGuide/search-query-get/with-iterators.json @@ -1 +1 @@ -{"codeList":["from pymilvus import MilvusClient\n\n# 1. Set up a Milvus client\nclient = MilvusClient(\n uri=\"http://localhost:19530\"\n)\n\n# 2. Create a collection\nclient.create_collection(\n collection_name=\"quick_setup\",\n dimension=5,\n)\n","import io.milvus.client.MilvusServiceClient;\nimport io.milvus.param.ConnectParam;\nimport io.milvus.param.highlevel.collection.CreateSimpleCollectionParam;\n\nString CLUSTER_ENDPOINT = \"http://localhost:19530\";\n\n// 1. Connect to Milvus server\nConnectParam connectParam = ConnectParam.newBuilder()\n .withUri(CLUSTER_ENDPOINT)\n .build();\n\nMilvusServiceClient client = new MilvusServiceClient(connectParam);\n\n// 2. Create a collection\nCreateSimpleCollectionParam createCollectionParam = CreateSimpleCollectionParam.newBuilder()\n .withCollectionName(\"quick_setup\")\n .withDimension(5)\n .build();\n\nclient.createCollection(createCollectionParam);\n","# 3. Insert randomly generated vectors \ncolors = [\"green\", \"blue\", \"yellow\", \"red\", \"black\", \"white\", \"purple\", \"pink\", \"orange\", \"brown\", \"grey\"]\ndata = []\n\nfor i in range(10000):\n current_color = random.choice(colors)\n current_tag = random.randint(1000, 9999)\n data.append({\n \"id\": i,\n \"vector\": [ random.uniform(-1, 1) for _ in range(5) ],\n \"color\": current_color,\n \"tag\": current_tag,\n \"color_tag\": f\"{current_color}_{str(current_tag)}\"\n })\n\nprint(data[0])\n\n# Output\n#\n# {\n# \"id\": 0,\n# \"vector\": [\n# -0.5705990742218152,\n# 0.39844925120642083,\n# -0.8791287928610869,\n# 0.024163154953680932,\n# 0.6837669917169638\n# ],\n# \"color\": \"purple\",\n# \"tag\": 7774,\n# \"color_tag\": \"purple_7774\"\n# }\n\nres = client.insert(\n collection_name=\"quick_setup\",\n data=data,\n)\n\nprint(res)\n\n# Output\n#\n# {\n# \"insert_count\": 10000,\n# \"ids\": [\n# 0,\n# 1,\n# 2,\n# 3,\n# 4,\n# 5,\n# 6,\n# 7,\n# 8,\n# 9,\n# \"(9990 more items hidden)\"\n# ]\n# }\n","import java.util.ArrayList;\nimport java.util.Arrays;\nimport java.util.List;\nimport java.util.Random;\n\nimport com.alibaba.fastjson.JSONObject;\n\nimport io.milvus.param.R;\nimport io.milvus.param.dml.InsertParam;\nimport io.milvus.response.MutationResultWrapper;\nimport io.milvus.grpc.MutationResult;\n\n\n// 3. Insert randomly generated vectors into the collection\nList colors = Arrays.asList(\"green\", \"blue\", \"yellow\", \"red\", \"black\", \"white\", \"purple\", \"pink\", \"orange\", \"brown\", \"grey\");\nList data = new ArrayList<>();\n\nfor (int i=0; i<10000; i++) {\n Random rand = new Random();\n String current_color = colors.get(rand.nextInt(colors.size()-1));\n JSONObject row = new JSONObject();\n row.put(\"id\", Long.valueOf(i));\n row.put(\"vector\", Arrays.asList(rand.nextFloat(), rand.nextFloat(), rand.nextFloat(), rand.nextFloat(), rand.nextFloat()));\n row.put(\"color_tag\", current_color + \"_\" + String.valueOf(rand.nextInt(8999) + 1000));\n data.add(row);\n}\n\nInsertParam insertParam = InsertParam.newBuilder()\n .withCollectionName(\"quick_setup\")\n .withRows(data)\n .build();\n\nR insertRes = client.insert(insertParam);\n\nif (insertRes.getStatus() != R.Status.Success.getCode()) {\n System.err.println(insertRes.getMessage());\n}\n\nMutationResultWrapper wrapper = new MutationResultWrapper(insertRes.getData());\nSystem.out.println(wrapper.getInsertCount());\n","from pymilvus import Collection\n\n# 4. Search with iterator\nconnections.connect(host=\"127.0.0.1\", port=19530)\ncollection = Collection(\"quick_setup\")\n\nquery_vectors = [[0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592]]\nsearch_params = {\n \"metric_type\": \"IP\",\n \"params\": {\"nprobe\": 10}\n}\n\niterator = collection.search_iterator(\n data=query_vectors,\n anns_field=\"vector\",\n batch_size=10,\n param=search_params,\n output_fields=[\"color_tag\"],\n limit=3\n)\n\nresults = []\n\nwhile True:\n result = iterator.next()\n if not result:\n iterator.close()\n break\n \n results.extend(result)\n \n for hit in result:\n results.append(hit.to_dict())\n\nprint(results)\n\n# Output\n#\n# [\n# {\n# \"id\": 1756,\n# \"distance\": 2.0642056465148926,\n# \"entity\": {\n# \"color_tag\": \"black_9109\"\n# }\n# },\n# {\n# \"id\": 6488,\n# \"distance\": 1.9437453746795654,\n# \"entity\": {\n# \"color_tag\": \"purple_8164\"\n# }\n# },\n# {\n# \"id\": 3338,\n# \"distance\": 1.9107104539871216,\n# \"entity\": {\n# \"color_tag\": \"brown_8121\"\n# }\n# }\n# ]\n","import io.milvus.param.dml.QueryIteratorParam;\nimport io.milvus.param.dml.SearchIteratorParam;\nimport io.milvus.response.QueryResultsWrapper;\nimport io.milvus.orm.iterator.SearchIterator;\n\n// 4. Search with iterators\nSearchIteratorParam iteratorParam = SearchIteratorParam.newBuilder()\n .withCollectionName(\"quick_setup\")\n .withVectorFieldName(\"vector\")\n // Use withFloatVectors() in clusters compatible with Milvus 2.4.x\n .withVectors(Arrays.asList(0.3580376395471989f, -0.6023495712049978f, 0.18414012509913835f, -0.26286205330961354f, 0.9029438446296592f))\n .withBatchSize(10L)\n .withParams(\"{\\\"metric_type\\\": \\\"COSINE\\\", \\\"params\\\": {\\\"level\\\": 1}}\")\n .build();\n \n\nR searchIteratorRes = client.searchIterator(iteratorParam);\n\nif (searchIteratorRes.getStatus() != R.Status.Success.getCode()) {\n System.err.println(searchIteratorRes.getMessage());\n}\n\nSearchIterator searchIterator = searchIteratorRes.getData();\nList results = new ArrayList<>();\n\nwhile (true) {\n List batchResults = searchIterator.next();\n if (batchResults.isEmpty()) {\n searchIterator.close();\n break;\n }\n for (QueryResultsWrapper.RowRecord rowRecord : batchResults) {\n results.add(rowRecord);\n }\n}\n\nSystem.out.println(results.size());\n","# 6. Query with iterator\niterator = collection.query_iterator(\n batch_size=10, # Controls the size of the return each time you call next()\n expr=\"color_tag like \\\"brown_8\\\"\",\n output_fields=[\"color_tag\"]\n)\n\nresults = []\n\nwhile True:\n result = iterator.next()\n if not result:\n iterator.close()\n break\n \n results.extend(result)\n \n# 8. Check the search results\nprint(len(results))\n\nprint(results[:3])\n\n# Output\n#\n# [\n# {\n# \"color_tag\": \"brown_8785\",\n# \"id\": 94\n# },\n# {\n# \"color_tag\": \"brown_8568\",\n# \"id\": 176\n# },\n# {\n# \"color_tag\": \"brown_8721\",\n# \"id\": 289\n# }\n# ]\n","import io.milvus.param.dml.QueryIteratorParam;\nimport io.milvus.orm.iterator.QueryIterator;\n\n// 5. Query with iterators\n\ntry {\n Files.write(Path.of(\"results.json\"), JSON.toJSONString(new ArrayList<>()).getBytes(), StandardOpenOption.CREATE, StandardOpenOption.TRUNCATE_EXISTING);\n} catch (Exception e) {\n // TODO: handle exception\n e.printStackTrace();\n}\n\nQueryIteratorParam queryIteratorParam = QueryIteratorParam.newBuilder()\n .withCollectionName(\"quick_setup\")\n .withExpr(\"color_tag like \\\"brown_8%\\\"\")\n .withBatchSize(50L)\n .addOutField(\"vector\")\n .addOutField(\"color_tag\")\n .build();\n\nR queryIteratRes = client.queryIterator(queryIteratorParam);\n\nif (queryIteratRes.getStatus() != R.Status.Success.getCode()) {\n System.err.println(queryIteratRes.getMessage());\n}\n\nQueryIterator queryIterator = queryIteratRes.getData();\n\nwhile (true) {\n List batchResults = queryIterator.next();\n if (batchResults.isEmpty()) {\n queryIterator.close();\n break;\n }\n\n String jsonString = \"\";\n List jsonObject = new ArrayList<>();\n try {\n jsonString = Files.readString(Path.of(\"results.json\"));\n jsonObject = JSON.parseArray(jsonString).toJavaList(null);\n } catch (IOException e) {\n // TODO Auto-generated catch block\n e.printStackTrace();\n }\n\n for (QueryResultsWrapper.RowRecord queryResult : batchResults) {\n JSONObject row = new JSONObject();\n row.put(\"id\", queryResult.get(\"id\"));\n row.put(\"vector\", queryResult.get(\"vector\"));\n row.put(\"color_tag\", queryResult.get(\"color_tag\"));\n jsonObject.add(row);\n }\n\n try {\n Files.write(Path.of(\"results.json\"), JSON.toJSONString(jsonObject).getBytes(), StandardOpenOption.WRITE);\n } catch (IOException e) {\n // TODO Auto-generated catch block\n e.printStackTrace();\n }\n}\n"],"headingContent":"","anchorList":[{"label":"使用迭代器","href":"With-Iterators","type":1,"isActive":false},{"label":"概述","href":"Overview","type":2,"isActive":false},{"label":"准备工作","href":"Preparations","type":2,"isActive":false},{"label":"使用迭代器搜索","href":"Search-with-iterator","type":2,"isActive":false},{"label":"使用迭代器查询","href":"Query-with-an-iterator","type":2,"isActive":false}]} \ No newline at end of file +{"codeList":["from pymilvus import MilvusClient\n\n# 1. Set up a Milvus client\nclient = MilvusClient(\n uri=\"http://localhost:19530\"\n)\n\n# 2. Create a collection\nclient.create_collection(\n collection_name=\"quick_setup\",\n dimension=5,\n)\n","import io.milvus.client.MilvusServiceClient;\nimport io.milvus.param.ConnectParam;\nimport io.milvus.param.highlevel.collection.CreateSimpleCollectionParam;\n\nString CLUSTER_ENDPOINT = \"http://localhost:19530\";\n\n// 1. Connect to Milvus server\nConnectParam connectParam = ConnectParam.newBuilder()\n .withUri(CLUSTER_ENDPOINT)\n .build();\n\nMilvusServiceClient client = new MilvusServiceClient(connectParam);\n\n// 2. Create a collection\nCreateSimpleCollectionParam createCollectionParam = CreateSimpleCollectionParam.newBuilder()\n .withCollectionName(\"quick_setup\")\n .withDimension(5)\n .build();\n\nclient.createCollection(createCollectionParam);\n","# 3. Insert randomly generated vectors \ncolors = [\"green\", \"blue\", \"yellow\", \"red\", \"black\", \"white\", \"purple\", \"pink\", \"orange\", \"brown\", \"grey\"]\ndata = []\n\nfor i in range(10000):\n current_color = random.choice(colors)\n current_tag = random.randint(1000, 9999)\n data.append({\n \"id\": i,\n \"vector\": [ random.uniform(-1, 1) for _ in range(5) ],\n \"color\": current_color,\n \"tag\": current_tag,\n \"color_tag\": f\"{current_color}_{str(current_tag)}\"\n })\n\nprint(data[0])\n\n# Output\n#\n# {\n# \"id\": 0,\n# \"vector\": [\n# -0.5705990742218152,\n# 0.39844925120642083,\n# -0.8791287928610869,\n# 0.024163154953680932,\n# 0.6837669917169638\n# ],\n# \"color\": \"purple\",\n# \"tag\": 7774,\n# \"color_tag\": \"purple_7774\"\n# }\n\nres = client.insert(\n collection_name=\"quick_setup\",\n data=data,\n)\n\nprint(res)\n\n# Output\n#\n# {\n# \"insert_count\": 10000,\n# \"ids\": [\n# 0,\n# 1,\n# 2,\n# 3,\n# 4,\n# 5,\n# 6,\n# 7,\n# 8,\n# 9,\n# \"(9990 more items hidden)\"\n# ]\n# }\n","import java.util.ArrayList;\nimport java.util.Arrays;\nimport java.util.List;\nimport java.util.Random;\n\nimport com.alibaba.fastjson.JSONObject;\n\nimport io.milvus.param.R;\nimport io.milvus.param.dml.InsertParam;\nimport io.milvus.response.MutationResultWrapper;\nimport io.milvus.grpc.MutationResult;\n\n\n// 3. Insert randomly generated vectors into the collection\nList colors = Arrays.asList(\"green\", \"blue\", \"yellow\", \"red\", \"black\", \"white\", \"purple\", \"pink\", \"orange\", \"brown\", \"grey\");\nList data = new ArrayList<>();\n\nfor (int i=0; i<10000; i++) {\n Random rand = new Random();\n String current_color = colors.get(rand.nextInt(colors.size()-1));\n JSONObject row = new JSONObject();\n row.put(\"id\", Long.valueOf(i));\n row.put(\"vector\", Arrays.asList(rand.nextFloat(), rand.nextFloat(), rand.nextFloat(), rand.nextFloat(), rand.nextFloat()));\n row.put(\"color_tag\", current_color + \"_\" + String.valueOf(rand.nextInt(8999) + 1000));\n data.add(row);\n}\n\nInsertParam insertParam = InsertParam.newBuilder()\n .withCollectionName(\"quick_setup\")\n .withRows(data)\n .build();\n\nR insertRes = client.insert(insertParam);\n\nif (insertRes.getStatus() != R.Status.Success.getCode()) {\n System.err.println(insertRes.getMessage());\n}\n\nMutationResultWrapper wrapper = new MutationResultWrapper(insertRes.getData());\nSystem.out.println(wrapper.getInsertCount());\n","from pymilvus import Collection\n\n# 4. Search with iterator\nconnections.connect(host=\"127.0.0.1\", port=19530)\ncollection = Collection(\"quick_setup\")\n\nquery_vectors = [[0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592]]\nsearch_params = {\n \"metric_type\": \"IP\",\n \"params\": {\"nprobe\": 10}\n}\n\niterator = collection.search_iterator(\n data=query_vectors,\n anns_field=\"vector\",\n batch_size=10,\n param=search_params,\n output_fields=[\"color_tag\"],\n limit=300\n)\n# search 300 entities totally with 10 entities per page\n\nresults = []\n\nwhile True:\n result = iterator.next()\n if not result:\n iterator.close()\n break\n \n results.extend(result)\n \n for hit in result:\n results.append(hit.to_dict())\n\nprint(results)\n\n# Output\n#\n# [\n# {\n# \"id\": 1756,\n# \"distance\": 2.0642056465148926,\n# \"entity\": {\n# \"color_tag\": \"black_9109\"\n# }\n# },\n# {\n# \"id\": 6488,\n# \"distance\": 1.9437453746795654,\n# \"entity\": {\n# \"color_tag\": \"purple_8164\"\n# }\n# },\n# {\n# \"id\": 3338,\n# \"distance\": 1.9107104539871216,\n# \"entity\": {\n# \"color_tag\": \"brown_8121\"\n# }\n# }\n# ]\n","import io.milvus.param.dml.QueryIteratorParam;\nimport io.milvus.param.dml.SearchIteratorParam;\nimport io.milvus.response.QueryResultsWrapper;\nimport io.milvus.orm.iterator.SearchIterator;\n\n// 4. Search with iterators\nSearchIteratorParam iteratorParam = SearchIteratorParam.newBuilder()\n .withCollectionName(\"quick_setup\")\n .withVectorFieldName(\"vector\")\n // Use withFloatVectors() in clusters compatible with Milvus 2.4.x\n .withVectors(Arrays.asList(0.3580376395471989f, -0.6023495712049978f, 0.18414012509913835f, -0.26286205330961354f, 0.9029438446296592f))\n .withBatchSize(10L)\n .withParams(\"{\\\"metric_type\\\": \\\"COSINE\\\", \\\"params\\\": {\\\"level\\\": 1}}\")\n .build();\n \n\nR searchIteratorRes = client.searchIterator(iteratorParam);\n\nif (searchIteratorRes.getStatus() != R.Status.Success.getCode()) {\n System.err.println(searchIteratorRes.getMessage());\n}\n\nSearchIterator searchIterator = searchIteratorRes.getData();\nList results = new ArrayList<>();\n\nwhile (true) {\n List batchResults = searchIterator.next();\n if (batchResults.isEmpty()) {\n searchIterator.close();\n break;\n }\n for (QueryResultsWrapper.RowRecord rowRecord : batchResults) {\n results.add(rowRecord);\n }\n}\n\nSystem.out.println(results.size());\n","# 6. Query with iterator\niterator = collection.query_iterator(\n batch_size=10, # Controls the size of the return each time you call next()\n expr=\"color_tag like \\\"brown_8\\\"\",\n output_fields=[\"color_tag\"]\n)\n\nresults = []\n\nwhile True:\n result = iterator.next()\n if not result:\n iterator.close()\n break\n \n results.extend(result)\n \n# 8. Check the search results\nprint(len(results))\n\nprint(results[:3])\n\n# Output\n#\n# [\n# {\n# \"color_tag\": \"brown_8785\",\n# \"id\": 94\n# },\n# {\n# \"color_tag\": \"brown_8568\",\n# \"id\": 176\n# },\n# {\n# \"color_tag\": \"brown_8721\",\n# \"id\": 289\n# }\n# ]\n","import io.milvus.param.dml.QueryIteratorParam;\nimport io.milvus.orm.iterator.QueryIterator;\n\n// 5. Query with iterators\n\ntry {\n Files.write(Path.of(\"results.json\"), JSON.toJSONString(new ArrayList<>()).getBytes(), StandardOpenOption.CREATE, StandardOpenOption.TRUNCATE_EXISTING);\n} catch (Exception e) {\n // TODO: handle exception\n e.printStackTrace();\n}\n\nQueryIteratorParam queryIteratorParam = QueryIteratorParam.newBuilder()\n .withCollectionName(\"quick_setup\")\n .withExpr(\"color_tag like \\\"brown_8%\\\"\")\n .withBatchSize(50L)\n .addOutField(\"vector\")\n .addOutField(\"color_tag\")\n .build();\n\nR queryIteratRes = client.queryIterator(queryIteratorParam);\n\nif (queryIteratRes.getStatus() != R.Status.Success.getCode()) {\n System.err.println(queryIteratRes.getMessage());\n}\n\nQueryIterator queryIterator = queryIteratRes.getData();\n\nwhile (true) {\n List batchResults = queryIterator.next();\n if (batchResults.isEmpty()) {\n queryIterator.close();\n break;\n }\n\n String jsonString = \"\";\n List jsonObject = new ArrayList<>();\n try {\n jsonString = Files.readString(Path.of(\"results.json\"));\n jsonObject = JSON.parseArray(jsonString).toJavaList(null);\n } catch (IOException e) {\n // TODO Auto-generated catch block\n e.printStackTrace();\n }\n\n for (QueryResultsWrapper.RowRecord queryResult : batchResults) {\n JSONObject row = new JSONObject();\n row.put(\"id\", queryResult.get(\"id\"));\n row.put(\"vector\", queryResult.get(\"vector\"));\n row.put(\"color_tag\", queryResult.get(\"color_tag\"));\n jsonObject.add(row);\n }\n\n try {\n Files.write(Path.of(\"results.json\"), JSON.toJSONString(jsonObject).getBytes(), StandardOpenOption.WRITE);\n } catch (IOException e) {\n // TODO Auto-generated catch block\n e.printStackTrace();\n }\n}\n"],"headingContent":"With Iterators","anchorList":[{"label":"使用迭代器","href":"With-Iterators","type":1,"isActive":false},{"label":"概述","href":"Overview","type":2,"isActive":false},{"label":"准备工作","href":"Preparations","type":2,"isActive":false},{"label":"使用迭代器搜索","href":"Search-with-iterator","type":2,"isActive":false},{"label":"使用迭代器查询","href":"Query-with-an-iterator","type":2,"isActive":false}]} \ No newline at end of file diff --git a/localization/v2.4.x/site/zh/userGuide/search-query-get/with-iterators.md b/localization/v2.4.x/site/zh/userGuide/search-query-get/with-iterators.md index 033c5b774..2a1ef05b3 100644 --- a/localization/v2.4.x/site/zh/userGuide/search-query-get/with-iterators.md +++ b/localization/v2.4.x/site/zh/userGuide/search-query-get/with-iterators.md @@ -19,7 +19,7 @@ title: 使用迭代器 d="M4 9h1v1H4c-1.5 0-3-1.69-3-3.5S2.55 3 4 3h4c1.45 0 3 1.69 3 3.5 0 1.41-.91 2.72-2 3.25V8.59c.58-.45 1-1.27 1-2.09C10 5.22 8.98 4 8 4H4c-.98 0-2 1.22-2 2.5S3 9 4 9zm9-3h-1v1h1c1 0 2 1.22 2 2.5S13.98 12 13 12H9c-.98 0-2-1.22-2-2.5 0-.83.42-1.64 1-2.09V6.25c-1.09.53-2 1.84-2 3.25C6 11.31 7.55 13 9 13h4c1.45 0 3-1.69 3-3.5S14.5 6 13 6z" > -

    Milvus 提供搜索和查询迭代器,用于迭代大量实体的结果。由于 Milvus 将 TopK 限制在 16384,用户可以使用迭代器在批处理模式下返回大量甚至整个集合中的实体。

    +

    Milvus 提供搜索和查询迭代器,用于迭代大量实体。由于 Milvus 将 TopK 限制在 16384,用户可以使用迭代器以批处理模式返回大量甚至整个 Collections 中的实体。

    概述

    迭代器是一种功能强大的工具,能帮助你使用主键值和布尔表达式迭代大量数据或集合中的所有数据。这可以大大改进检索数据的方式。传统的偏移限制参数会随着时间的推移而降低效率,而迭代器则不同,它提供了一种更具可扩展性的解决方案。

    +

    迭代器是扫描整个 Collections 或通过指定主键值或过滤表达式迭代大量实体的有效工具。与带有偏移限制参数的搜索或查询调用相比,使用迭代器更高效、更可扩展。

    使用迭代器的好处

    • 简单:消除了复杂的偏移限制设置。

    • 高效:只获取需要的数据,提供可扩展的数据检索。

    • @@ -62,12 +62,12 @@ title: 使用迭代器 d="M4 9h1v1H4c-1.5 0-3-1.69-3-3.5S2.55 3 4 3h4c1.45 0 3 1.69 3 3.5 0 1.41-.91 2.72-2 3.25V8.59c.58-.45 1-1.27 1-2.09C10 5.22 8.98 4 8 4H4c-.98 0-2 1.22-2 2.5S3 9 4 9zm9-3h-1v1h1c1 0 2 1.22 2 2.5S13.98 12 13 12H9c-.98 0-2-1.22-2-2.5 0-.83.42-1.64 1-2.09V6.25c-1.09.53-2 1.84-2 3.25C6 11.31 7.55 13 9 13h4c1.45 0 3-1.69 3-3.5S14.5 6 13 6z" > -

      以下步骤重新利用代码连接到 Milvus,快速建立一个数据集,并在数据集中插入超过 10,000 个随机生成的实体。

      -

      步骤 1:创建集合

      -

      使用 MilvusClient连接到 Milvus 服务器,并使用 create_collection()来创建集合。

      +

      以下准备步骤连接 Milvus 并将随机生成的实体插入 Collections。

      +

      步骤 1:创建 Collections

      +

      使用 MilvusClient连接到 Milvus 服务器,并使用 create_collection()来创建 Collections。

      -

      使用 MilvusClientV2连接到 Milvus 服务器并 createCollection()创建集合。

      +

      使用 MilvusClientV2连接到 Milvus 服务器,并使用 createCollection()创建 Collections。

      @@ -106,10 +106,10 @@ client.create_collection( client.createCollection(createCollectionParam);

    第二步:插入随机生成的实体

    -

    使用 insert()将实体插入集合。

    +

    使用 insert()将实体插入 Collections。

    -

    使用 insert()将实体插入集合。

    +

    使用 insert()将实体插入 Collections。

    @@ -264,8 +264,9 @@ iterator = collection.search_iterator( batch_size=10, param=search_params, output_fields=["color_tag"], - limit=3 + limit=300 ) +# search 300 entities totally with 10 entities per page results = [] @@ -356,23 +357,23 @@ System.out.println(results.size()); data - 向量嵌入的列表。
    Milvus 会搜索与指定向量嵌入最相似的向量嵌入。 + 一个向量嵌入列表。
    Milvus 会搜索与指定向量嵌入最相似的向量嵌入。 anns_field - 当前集合中的向量字段名称。 + 当前 Collections 中的向量字段名称。 batch_size - 每次在当前迭代器上调用next() 时要返回的实体数量。
    默认值为1000。将其设置为适当的值,以控制每次迭代返回的实体数量。 + 每次在当前迭代器上调用next() 时要返回的实体数量。
    默认值为1000。将其设置为合适的值,以控制每次迭代返回的实体数量。 param - 此操作的特定参数设置。
    • metric_type:应用于此操作的度量类型。应与上面指定的向量场索引时使用的类型相同。可能的值有L2IPCOSINEJACCARDHAMMING
    • params:附加参数。详情请参阅search_iterator()
    + 该操作符特有的参数设置。
    • metric_type:应用于此操作的度量类型。应与上面指定的向量场索引时使用的类型相同。可能的值有L2IPCOSINEJACCARDHAMMING
    • params:附加参数。详情请参阅search_iterator()
    output_fields - 要包含在返回的每个实体中的字段名列表。
    默认值为"无"。如果未指定,则只包含主字段。 + 要包含在返回的每个实体中的字段名列表。
    默认值为 "无"。如果未指定,则只包含主字段。 limit @@ -390,7 +391,7 @@ System.out.println(results.size()); withCollectionName - 设置集合名称。集合名称不能为空或空值。 + 设置 Collections 名称。Collection 名称不能为空或 null。 withVectorFieldName @@ -551,7 +552,7 @@ R<QueryIterator> queryIteratRes = c output_fields - 要包含在返回的每个实体中的字段名列表。
    该值默认为"无"。如果未指定,则只包含主字段。 + 要包含在返回的每个实体中的字段名列表。
    该值默认为 "无"。如果未指定,则只包含主字段。 limit @@ -569,7 +570,7 @@ R<QueryIterator> queryIteratRes = c withCollectionName - 设置集合名称。集合名称不能为空或空值。 + 设置 Collections 名称。Collection 名称不能为空或 null。 withExpr diff --git a/localization/v2.4.x/site/zh/userGuide/tools/cli_commands.json b/localization/v2.4.x/site/zh/userGuide/tools/cli_commands.json index cf2e53f5a..00d9fec6f 100644 --- a/localization/v2.4.x/site/zh/userGuide/tools/cli_commands.json +++ b/localization/v2.4.x/site/zh/userGuide/tools/cli_commands.json @@ -1 +1 @@ -{"codeList":["clear\n","connect [-uri (text)] [-t (text)]\n","milvus_cli > connect -uri http://127.0.0.1:19530 \n","create database -db (text) \n","milvus_cli > create database -db testdb\n","use database -db (text) \n","milvus_cli > use database -db testdb\n","list databases \n","milvus_cli > list databases\n","delete database -db (text) \n","milvus_cli > delete database -db testdb\n","create user -u (text) -p (text)\n","milvus_cli > create user -u zilliz -p zilliz\n","create alias -c (text) -a (text) [-A] \n","milvus_cli > create alias -c car -a carAlias1\n","milvus_cli > create alias -c car2 -A -a carAlias1\n","create collection -c (text) -f (text) -p (text) [-a] [-d (text)]\n","## For array field: --schema-field support :::(:if Varchar)\n\nmilvus_cli > create collection -c car -f id:INT64:primary_field -f vector:FLOAT_VECTOR:128 -f color:INT64:color -f brand:ARRAY:64:VARCHAR:128 -p id -A -d 'car_collection'\n","create partition -c (text) -p (text) [-d (text)]\n","milvus_cli > create partition -c car -p new_partition -d test_add_partition\n","create index\n","milvus_cli > create index\n\nCollection name (car, car2): car2\n\nThe name of the field to create an index for (vector): vector\n\nIndex name: vectorIndex\n\n# Default is ''\nIndex type FLAT, IVF_FLAT, IVF_SQ8, IVF_PQ, RNSG, HNSW, ANNOY, AUTOINDEX, DISKANN, GPU_IVF_FLAT, GPU_IVF_PQ, SPARSE_INVERTED_INDEX, SPARSE_WAND, SCANN, STL_SORT, Trie, INVERTED, ) []: IVF_FLAT \n\n# Default is ''\nIndex metric type (L2, IP, HAMMING, TANIMOTO, COSINE, ) []: \n\nTimeout []:\n","delete user -u (text)\n","milvus_cli > delete user -u zilliz\n","delete alias -a (text) \n","delete collection -c (text) \n","milvus_cli > delete collection -c car\n","delete entities -c (text) -p (text) \n","milvus_cli > delete entities -c car\n\nThe expression to specify entities to be deleted, such as \"film_id in [ 0, 1 ]\": film_id in [ 0, 1 ]\n\nYou are trying to delete the entities of collection. This action cannot be undone!\n\nDo you want to continue? [y/N]: y\n","delete partition -c (text) -p (text)\n","milvus_cli > delete partition -c car -p new_partition\n","delete index -c (text) -in (text)\n","milvus_cli > delete index -c car -in indexName\n","show collection -c (text)\n","milvus_cli > show collection -c test_collection_insert\n","show partition -c (text) -p (text)\n","milvus_cli > show partition -c test_collection_insert -p _default\n","show index -c (text) -in (text)\n","milvus_cli > show index -c test_collection -in index_name\n","exit\n","help \n","import -c (text)[-p (text)] \n","milvus_cli > import -c car 'examples/import_csv/vectors.csv'\n\nReading csv file... [####################################] 100%\n\nColumn names are ['vector', 'color', 'brand']\n\nProcessed 50001 lines.\n\nInserting ...\n\nInsert successfully.\n-------------------------- ------------------\nTotal insert entities: 50000\nTotal collection entities: 150000\nMilvus timestamp: 428849214449254403\n-------------------------- ------------------\n","milvus_cli > import -c car 'https://raw.githubusercontent.com/milvus-\nio/milvus_cli/main/examples/import_csv/vectors.csv'\n\nReading file from remote URL.\n\nReading csv file... [####################################] 100%\n\nColumn names are ['vector', 'color', 'brand']\n\nProcessed 50001 lines.\n\nInserting ...\n\nInsert successfully.\n\n-------------------------- ------------------\nTotal insert entities: 50000\nTotal collection entities: 150000\nMilvus timestamp: 428849214449254403\n-------------------------- ------------------\n","list users\n","list collections\n","list indexes -c (text)\n","list partitions -c (text)\n","load -c (text) [-p (text)]\n","query\n","milvus_cli > query\n\nCollection name: car\n\nThe query expression: id in [ 428960801420883491, 428960801420883492,\n428960801420883493 ]\n\nName of partitions that contain entities(split by \",\" if multiple) []:\ndefault\n\nA list of fields to return(split by \",\" if multiple) []: color, brand\n\ntimeout []:\n\nGuarantee timestamp. This instructs Milvus to see all operations performed before a provided timestamp. If no such timestamp is provided, then Milvus will search all operations performed to date. [0]:\nGraceful time. Only used in bounded consistency level. If graceful_time is set, PyMilvus will use current timestamp minus the graceful_time as the guarantee_timestamp. This option is 5s by default if not set. [5]:\n","milvus_cli > query\n\nCollection name: car\n\nThe query expression: id > 428960801420883491\n\nName of partitions that contain entities(split by \",\" if multiple) []:\ndefault\n\nA list of fields to return(split by \",\" if multiple) []: id, color,\nbrand\n\ntimeout []:\n\nGuarantee timestamp. This instructs Milvus to see all operations performed before a provided timestamp. If no such timestamp is provided, then Milvus will search all operations performed to date. [0]:\nGraceful time. Only used in bounded consistency level. If graceful_time is set, PyMilvus will use current timestamp minus the graceful_time as the guarantee_timestamp. This option is 5s by default if not set. [5]:\n","release -c (text) [-p (text)]\n","search\n","milvus_cli > search\n\nCollection name (car, test_collection): car\n\nThe vectors of search data(the length of data is number of query (nq), the dim of every vector in data must be equal to vector field’s of collection. You can also import a csv file\nout headers): examples/import_csv/search_vectors.csv\n\nThe vector field used to search of collection (vector): vector\n\nSearch parameter nprobe's value: 10\n\nThe max number of returned record, also known as topk: 2\n\nThe boolean expression used to filter attribute []: id > 0\n\nThe names of partitions to search (split by \",\" if multiple) ['_default'] []: _default\n\ntimeout []:\n\nGuarantee Timestamp(It instructs Milvus to see all operations performed before a provided timestamp. If no such timestamp is provided, then Milvus will search all operations performed to date) [0]:\n\n","milvus_cli > search\n\nCollection name (car, test_collection): car\n\nThe vectors of search data(the length of data is number of query (nq), the dim of every vector in data must be equal to vector field’s of collection. You can also import a csv file without headers):\n [[0.71, 0.76, 0.17, 0.13, 0.42, 0.07, 0.15, 0.67, 0.58, 0.02, 0.39, 0.47, 0.58, 0.88, 0.73, 0.31, 0.23, 0.57, 0.33, 0.2, 0.03, 0.43, 0.78, 0.49, 0.17, 0.56, 0.76, 0.54, 0.45, 0.46, 0.05, 0.1, 0.43, 0.63, 0.29, 0.44, 0.65, 0.01, 0.35, 0.46, 0.66, 0.7, 0.88, 0.07, 0.49, 0.92, 0.57, 0.5, 0.16, 0.77, 0.98, 0.1, 0.44, 0.88, 0.82, 0.16, 0.67, 0.63, 0.57, 0.55, 0.95, 0.13, 0.64, 0.43, 0.71, 0.81, 0.43, 0.65, 0.76, 0.7, 0.05, 0.24, 0.03, 0.9, 0.46, 0.28, 0.92, 0.25, 0.97, 0.79, 0.73, 0.97, 0.49, 0.28, 0.64, 0.19, 0.23, 0.51, 0.09, 0.1, 0.53, 0.03, 0.23, 0.94, 0.87, 0.14, 0.42, 0.82, 0.91, 0.11, 0.91, 0.37, 0.26, 0.6, 0.89, 0.6, 0.32, 0.11, 0.98, 0.67, 0.12, 0.66, 0.47, 0.02, 0.15, 0.6, 0.64, 0.57, 0.14, 0.81, 0.75, 0.11, 0.49, 0.78, 0.16, 0.63, 0.57, 0.18]]\n\nThe vector field used to search of collection (vector): vector\n\nSearch parameter nprobe's value: 10\n\nThe specified number of decimal places of returned distance [-1]: 5\n\nThe max number of returned record, also known as topk: 2\n\nThe boolean expression used to filter attribute []: id > 0\n\nThe names of partitions to search (split by \",\" if multiple) ['_default'] []: _default\n\ntimeout []:\n\nGuarantee Timestamp(It instructs Milvus to see all operations performed before a provided timestamp. If no such timestamp is provided, then Milvus will search all operations performed to date) [0]:\n\n","milvus_cli > search\n\nCollection name (car, car2): car\n\nThe vectors of search data(the length of data is number of query (nq), the dim of every vector in data must be equal to vector field’s of collection. You can also import a csv file without headers): examples/import_csv/search_vectors.csv\n\nThe vector field used to search of collection (vector): vector\n\nThe specified number of decimal places of returned distance [-1]: 5\n\nThe max number of returned record, also known as topk: 2\n\nThe boolean expression used to filter attribute []:\n\nThe names of partitions to search (split by \",\" if multiple) ['_default'] []:\n\ntimeout []:\n\nGuarantee Timestamp(It instructs Milvus to see all operations performed before a provided timestamp. If no such timestamp is provided, then Milvus will search all operations performed to date) [0]:\n\n","list connections \n","show index_progress -c (text) [-i (text)]\n","show loading_progress -c (text) [-p (text)]\n","version\n","$ milvus_cli --version\nMilvus_CLI v0.4.0\n"],"headingContent":"","anchorList":[{"label":"Milvus_CLI 命令参考","href":"MilvusCLI-Command-Reference","type":1,"isActive":false},{"label":"清除","href":"clear","type":2,"isActive":false},{"label":"连接","href":"connect","type":2,"isActive":false},{"label":"创建数据库","href":"create-Database","type":2,"isActive":false},{"label":"使用数据库","href":"use-Database","type":2,"isActive":false},{"label":"列出数据库","href":"List-Databases","type":2,"isActive":false},{"label":"删除数据库","href":"Delete-Database","type":2,"isActive":false},{"label":"创建用户","href":"create-user","type":2,"isActive":false},{"label":"创建别名","href":"create-alias","type":2,"isActive":false},{"label":"创建集合","href":"create-collection","type":2,"isActive":false},{"label":"创建分区","href":"create-partition","type":2,"isActive":false},{"label":"创建索引","href":"create-index","type":2,"isActive":false},{"label":"删除用户","href":"delete-user","type":2,"isActive":false},{"label":"删除别名","href":"delete-alias","type":2,"isActive":false},{"label":"删除集合","href":"delete-collection","type":2,"isActive":false},{"label":"删除实体","href":"delete-entities","type":2,"isActive":false},{"label":"删除分区","href":"delete-partition","type":2,"isActive":false},{"label":"删除索引","href":"delete-index","type":2,"isActive":false},{"label":"show collection","href":"show-collection","type":2,"isActive":false},{"label":"show partition","href":"show-partition","type":2,"isActive":false},{"label":"show index","href":"show-index","type":2,"isActive":false},{"label":"退出","href":"exit","type":2,"isActive":false},{"label":"帮助","href":"help","type":2,"isActive":false},{"label":"导入","href":"import","type":2,"isActive":false},{"label":"列出用户","href":"list-users","type":2,"isActive":false},{"label":"list collections","href":"list-collections","type":2,"isActive":false},{"label":"列出索引","href":"list-indexes","type":2,"isActive":false},{"label":"列出分区","href":"list-partitions","type":2,"isActive":false},{"label":"加载","href":"load","type":2,"isActive":false},{"label":"查询","href":"query","type":2,"isActive":false},{"label":"释放","href":"release","type":2,"isActive":false},{"label":"搜索","href":"search","type":2,"isActive":false},{"label":"列出连接","href":"List-connection","type":2,"isActive":false},{"label":"show index_progress","href":"show-indexprogress","type":2,"isActive":false},{"label":"显示加载进度","href":"show-loadingprogress","type":2,"isActive":false},{"label":"版本","href":"version","type":2,"isActive":false}]} \ No newline at end of file +{"codeList":["clear\n","connect [-uri (text)] [-t (text)]\n","milvus_cli > connect -uri http://127.0.0.1:19530 \n","create database -db (text) \n","milvus_cli > create database -db testdb\n","use database -db (text) \n","milvus_cli > use database -db testdb\n","list databases \n","milvus_cli > list databases\n","delete database -db (text) \n","milvus_cli > delete database -db testdb\n","create user -u (text) -p (text)\n","milvus_cli > create user -u zilliz -p zilliz\n","create alias -c (text) -a (text) [-A] \n","milvus_cli > create alias -c car -a carAlias1\n","milvus_cli > create alias -c car2 -A -a carAlias1\n","create collection -c (text) -f (text) -p (text) [-a] [-d (text)]\n","## For array field: --schema-field support :::(:if Varchar)\n\nmilvus_cli > create collection -c car -f id:INT64:primary_field -f vector:FLOAT_VECTOR:128 -f color:INT64:color -f brand:ARRAY:64:VARCHAR:128 -p id -A -d 'car_collection'\n","create partition -c (text) -p (text) [-d (text)]\n","milvus_cli > create partition -c car -p new_partition -d test_add_partition\n","create index\n","milvus_cli > create index\n\nCollection name (car, car2): car2\n\nThe name of the field to create an index for (vector): vector\n\nIndex name: vectorIndex\n\n# Default is ''\nIndex type FLAT, IVF_FLAT, IVF_SQ8, IVF_PQ, RNSG, HNSW, ANNOY, AUTOINDEX, DISKANN, GPU_IVF_FLAT, GPU_IVF_PQ, SPARSE_INVERTED_INDEX, SPARSE_WAND, SCANN, STL_SORT, Trie, INVERTED, ) []: IVF_FLAT \n\n# Default is ''\nIndex metric type (L2, IP, HAMMING, TANIMOTO, COSINE, ) []: \n\nTimeout []:\n","delete user -u (text)\n","milvus_cli > delete user -u zilliz\n","delete alias -a (text) \n","delete collection -c (text) \n","milvus_cli > delete collection -c car\n","delete entities -c (text) -p (text) \n","milvus_cli > delete entities -c car\n\nThe expression to specify entities to be deleted, such as \"film_id in [ 0, 1 ]\": film_id in [ 0, 1 ]\n\nYou are trying to delete the entities of collection. This action cannot be undone!\n\nDo you want to continue? [y/N]: y\n","delete partition -c (text) -p (text)\n","milvus_cli > delete partition -c car -p new_partition\n","delete index -c (text) -in (text)\n","milvus_cli > delete index -c car -in indexName\n","show collection -c (text)\n","milvus_cli > show collection -c test_collection_insert\n","show partition -c (text) -p (text)\n","milvus_cli > show partition -c test_collection_insert -p _default\n","show index -c (text) -in (text)\n","milvus_cli > show index -c test_collection -in index_name\n","exit\n","help \n","import -c (text)[-p (text)] \n","milvus_cli > import -c car 'examples/import_csv/vectors.csv'\n\nReading csv file... [####################################] 100%\n\nColumn names are ['vector', 'color', 'brand']\n\nProcessed 50001 lines.\n\nInserting ...\n\nInsert successfully.\n-------------------------- ------------------\nTotal insert entities: 50000\nTotal collection entities: 150000\nMilvus timestamp: 428849214449254403\n-------------------------- ------------------\n","milvus_cli > import -c car 'https://raw.githubusercontent.com/milvus-\nio/milvus_cli/main/examples/import_csv/vectors.csv'\n\nReading file from remote URL.\n\nReading csv file... [####################################] 100%\n\nColumn names are ['vector', 'color', 'brand']\n\nProcessed 50001 lines.\n\nInserting ...\n\nInsert successfully.\n\n-------------------------- ------------------\nTotal insert entities: 50000\nTotal collection entities: 150000\nMilvus timestamp: 428849214449254403\n-------------------------- ------------------\n","list users\n","list collections\n","list indexes -c (text)\n","list partitions -c (text)\n","load -c (text) [-p (text)]\n","query\n","milvus_cli > query\n\nCollection name: car\n\nThe query expression: id in [ 428960801420883491, 428960801420883492,\n428960801420883493 ]\n\nName of partitions that contain entities(split by \",\" if multiple) []:\ndefault\n\nA list of fields to return(split by \",\" if multiple) []: color, brand\n\ntimeout []:\n\nGuarantee timestamp. This instructs Milvus to see all operations performed before a provided timestamp. If no such timestamp is provided, then Milvus will search all operations performed to date. [0]:\nGraceful time. Only used in bounded consistency level. If graceful_time is set, PyMilvus will use current timestamp minus the graceful_time as the guarantee_timestamp. This option is 5s by default if not set. [5]:\n","milvus_cli > query\n\nCollection name: car\n\nThe query expression: id > 428960801420883491\n\nName of partitions that contain entities(split by \",\" if multiple) []:\ndefault\n\nA list of fields to return(split by \",\" if multiple) []: id, color,\nbrand\n\ntimeout []:\n\nGuarantee timestamp. This instructs Milvus to see all operations performed before a provided timestamp. If no such timestamp is provided, then Milvus will search all operations performed to date. [0]:\nGraceful time. Only used in bounded consistency level. If graceful_time is set, PyMilvus will use current timestamp minus the graceful_time as the guarantee_timestamp. This option is 5s by default if not set. [5]:\n","release -c (text) [-p (text)]\n","search\n","milvus_cli > search\n\nCollection name (car, test_collection): car\n\nThe vectors of search data(the length of data is number of query (nq), the dim of every vector in data must be equal to vector field’s of collection. You can also import a csv file\nout headers): examples/import_csv/search_vectors.csv\n\nThe vector field used to search of collection (vector): vector\n\nSearch parameter nprobe's value: 10\n\nThe max number of returned record, also known as topk: 2\n\nThe boolean expression used to filter attribute []: id > 0\n\nThe names of partitions to search (split by \",\" if multiple) ['_default'] []: _default\n\ntimeout []:\n\nGuarantee Timestamp(It instructs Milvus to see all operations performed before a provided timestamp. If no such timestamp is provided, then Milvus will search all operations performed to date) [0]:\n\n","milvus_cli > search\n\nCollection name (car, test_collection): car\n\nThe vectors of search data(the length of data is number of query (nq), the dim of every vector in data must be equal to vector field’s of collection. You can also import a csv file without headers):\n [[0.71, 0.76, 0.17, 0.13, 0.42, 0.07, 0.15, 0.67, 0.58, 0.02, 0.39, 0.47, 0.58, 0.88, 0.73, 0.31, 0.23, 0.57, 0.33, 0.2, 0.03, 0.43, 0.78, 0.49, 0.17, 0.56, 0.76, 0.54, 0.45, 0.46, 0.05, 0.1, 0.43, 0.63, 0.29, 0.44, 0.65, 0.01, 0.35, 0.46, 0.66, 0.7, 0.88, 0.07, 0.49, 0.92, 0.57, 0.5, 0.16, 0.77, 0.98, 0.1, 0.44, 0.88, 0.82, 0.16, 0.67, 0.63, 0.57, 0.55, 0.95, 0.13, 0.64, 0.43, 0.71, 0.81, 0.43, 0.65, 0.76, 0.7, 0.05, 0.24, 0.03, 0.9, 0.46, 0.28, 0.92, 0.25, 0.97, 0.79, 0.73, 0.97, 0.49, 0.28, 0.64, 0.19, 0.23, 0.51, 0.09, 0.1, 0.53, 0.03, 0.23, 0.94, 0.87, 0.14, 0.42, 0.82, 0.91, 0.11, 0.91, 0.37, 0.26, 0.6, 0.89, 0.6, 0.32, 0.11, 0.98, 0.67, 0.12, 0.66, 0.47, 0.02, 0.15, 0.6, 0.64, 0.57, 0.14, 0.81, 0.75, 0.11, 0.49, 0.78, 0.16, 0.63, 0.57, 0.18]]\n\nThe vector field used to search of collection (vector): vector\n\nSearch parameter nprobe's value: 10\n\nThe specified number of decimal places of returned distance [-1]: 5\n\nThe max number of returned record, also known as topk: 2\n\nThe boolean expression used to filter attribute []: id > 0\n\nThe names of partitions to search (split by \",\" if multiple) ['_default'] []: _default\n\ntimeout []:\n\nGuarantee Timestamp(It instructs Milvus to see all operations performed before a provided timestamp. If no such timestamp is provided, then Milvus will search all operations performed to date) [0]:\n\n","milvus_cli > search\n\nCollection name (car, car2): car\n\nThe vectors of search data(the length of data is number of query (nq), the dim of every vector in data must be equal to vector field’s of collection. You can also import a csv file without headers): examples/import_csv/search_vectors.csv\n\nThe vector field used to search of collection (vector): vector\n\nThe specified number of decimal places of returned distance [-1]: 5\n\nThe max number of returned record, also known as topk: 2\n\nThe boolean expression used to filter attribute []:\n\nThe names of partitions to search (split by \",\" if multiple) ['_default'] []:\n\ntimeout []:\n\nGuarantee Timestamp(It instructs Milvus to see all operations performed before a provided timestamp. If no such timestamp is provided, then Milvus will search all operations performed to date) [0]:\n\n","list connections \n","show index_progress -c (text) [-i (text)]\n","show loading_progress -c (text) [-p (text)]\n","version\n","$ milvus_cli --version\nMilvus_CLI v0.4.0\n"],"headingContent":"Milvus_CLI Command Reference","anchorList":[{"label":"Milvus_CLI 命令参考","href":"MilvusCLI-Command-Reference","type":1,"isActive":false},{"label":"清除","href":"clear","type":2,"isActive":false},{"label":"连接","href":"connect","type":2,"isActive":false},{"label":"创建数据库","href":"create-Database","type":2,"isActive":false},{"label":"使用数据库","href":"use-Database","type":2,"isActive":false},{"label":"列出数据库","href":"list-Databases","type":2,"isActive":false},{"label":"删除数据库","href":"delete-Database","type":2,"isActive":false},{"label":"创建用户","href":"create-user","type":2,"isActive":false},{"label":"创建别名","href":"create-alias","type":2,"isActive":false},{"label":"创建 Collection","href":"create-collection","type":2,"isActive":false},{"label":"创建分区","href":"create-partition","type":2,"isActive":false},{"label":"创建索引","href":"create-index","type":2,"isActive":false},{"label":"删除用户","href":"delete-user","type":2,"isActive":false},{"label":"删除别名","href":"delete-alias","type":2,"isActive":false},{"label":"delete Collections","href":"delete-collection","type":2,"isActive":false},{"label":"删除实体","href":"delete-entities","type":2,"isActive":false},{"label":"删除分区","href":"delete-partition","type":2,"isActive":false},{"label":"删除索引","href":"delete-index","type":2,"isActive":false},{"label":"show Collections","href":"show-collection","type":2,"isActive":false},{"label":"show partition","href":"show-partition","type":2,"isActive":false},{"label":"show index","href":"show-index","type":2,"isActive":false},{"label":"退出","href":"exit","type":2,"isActive":false},{"label":"帮助","href":"help","type":2,"isActive":false},{"label":"导入","href":"import","type":2,"isActive":false},{"label":"列出用户","href":"list-users","type":2,"isActive":false},{"label":"list Collections","href":"list-collections","type":2,"isActive":false},{"label":"列出索引","href":"list-indexes","type":2,"isActive":false},{"label":"列出分区","href":"list-partitions","type":2,"isActive":false},{"label":"加载","href":"load","type":2,"isActive":false},{"label":"查询","href":"query","type":2,"isActive":false},{"label":"释放","href":"release","type":2,"isActive":false},{"label":"搜索","href":"search","type":2,"isActive":false},{"label":"列出连接","href":"list-connection","type":2,"isActive":false},{"label":"show index_progress","href":"show-indexprogress","type":2,"isActive":false},{"label":"显示加载进度","href":"show-loadingprogress","type":2,"isActive":false},{"label":"版本","href":"version","type":2,"isActive":false}]} \ No newline at end of file diff --git a/localization/v2.4.x/site/zh/userGuide/tools/cli_commands.md b/localization/v2.4.x/site/zh/userGuide/tools/cli_commands.md index 22175b24a..a197d0e3e 100644 --- a/localization/v2.4.x/site/zh/userGuide/tools/cli_commands.md +++ b/localization/v2.4.x/site/zh/userGuide/tools/cli_commands.md @@ -1,9 +1,8 @@ --- id: cli_commands.md -summary: 使用命令与Milvus互动。 +summary: 使用命令与 Milvus 互动。 title: Milvus_CLI 命令参考 --- -

    Milvus_CLI 命令参考

    Milvus 命令行界面(CLI)是一种命令行工具,支持数据库连接、数据操作以及数据导入和导出。

    +

    Milvus 命令行界面(CLI)是一个命令行工具,支持数据库连接、数据操作和数据导入导出。

    本主题介绍所有支持的命令和相应的选项。还包括一些示例供您参考。

    清除

    -

    列出数据库

    示例 1

    下面的示例列出了 milvus 中的数据库。

    milvus_cli > list databases
     
    -

    删除数据库

    为集合指定唯一的别名。

    -
    一个数据集可以有多个别名。但是,一个别名最多只能对应一个数据集。
    +

    为 Collections 指定唯一的别名。

    +
    一个 Collection 可以有多个别名。不过,一个别名最多对应一个 Collection。

    语法

    create alias -c (text) -a (text) [-A] 
     
    @@ -255,23 +254,23 @@ title: Milvus_CLI 命令参考 选项全名说明 --c-集合名称集合名称。 +-c-collection-名称Collections 的名称。 -a-别名别名。 --A-alter(可选)将别名转移到指定集合的标志。 +-A-alter(可选)将别名转移到指定 Collections 的标志。 -help不适用显示命令使用帮助。

    示例

    示例 1

    -

    下面的示例为car 程序集创建了carAlias1carAlias2 别名。

    +

    下面的示例为car Collection 创建了carAlias1carAlias2 别名。

    milvus_cli > create alias -c car -a carAlias1
     

    例 2

    例 2 基于例 1。
    -

    下面的示例将carAlias1 别名从car 集合转移到car2 集合。

    +

    下面的示例将carAlias1 别名从car Collection 转移到car2 Collection。

    milvus_cli > create alias -c car2 -A -a carAlias1
     
    -

    创建集合

    创建集合。

    +

    创建一个 Collection。

    语法

    create collection -c (text) -f (text) -p (text) [-a] [-d (text)]
     
    @@ -296,13 +295,13 @@ title: Milvus_CLI 命令参考 选项全名说明 --c-集合名称集合的名称。 --f-字段 Schema (多个)<fieldName>:<dataType>:<dimOfVector/desc> 格式的字段 Schema 。 --p-主键字段主键字段的名称。 --a-schema-auto-id(自动 ID(可选)自动生成 ID 的标志。 --desc-模式描述(可选)集合的描述。 +-c-Collection-名称Collections 的名称。 +-f-Schema 字段(多个)<fieldName>:<dataType>:<dimOfVector/desc> 格式的字段 Schema。 +-p-Schema-主键字段主键字段的名称。 +-a-Schema-auto-id(可选)自动生成 ID 的标志。 +-desc-Schema-描述(可选) Collections 的描述。 -级别-一致性级别(可选)一致性级别:有界、会话、强、最终。 --d-是否动态(可选)Collection Schema 是否支持Dynamic Field。 +-d-是否动态(可选) Collections Schema 是否支持动态字段。 -s-碎片数(可选)分区编号 -帮助不适用显示命令使用帮助。 @@ -312,7 +311,6 @@ title: Milvus_CLI 命令参考 milvus_cli > create collection -c car -f id:INT64:primary_field -f vector:FLOAT_VECTOR:128 -f color:INT64:color -f brand:ARRAY:64:VARCHAR:128 -p id -A -d 'car_collection'
    -

    创建分区

    为字段创建索引。

    -
    目前,一个集合最多支持一个索引。
    +
    目前,一个 Collection 最多支持一个索引。

    语法

    create index
     
    @@ -387,14 +385,13 @@ The name of the field to create an index for ( Index name: vectorIndex # Default is '' -Index type FLAT, IVF_FLAT, IVF_SQ8, IVF_PQ, RNSG, HNSW, ANNOY, AUTOINDEX, DISKANN, GPU_IVF_FLAT, GPU_IVF_PQ, SPARSE_INVERTED_INDEX, SPARSE_WAND, SCANN, STL_SORT, Trie, INVERTED, ) []: IVF_FLAT +Index type FLAT, IVF_FLAT, IVF_SQ8, IVF_PQ, RNSG, HNSW, ANNOY, AUTOINDEX, DISKANN, GPU_IVF_FLAT, GPU_IVF_PQ, SPARSE_INVERTED_INDEX, SPARSE_WAND, SCANN, STL_SORT, Trie, INVERTED, ) []: IVF_FLAT # Default is '' -Index metric type (L2, IP, HAMMING, TANIMOTO, COSINE, ) []: +Index metric type (L2, IP, HAMMING, TANIMOTO, COSINE, ) []: Timeout []:
    -

    删除用户

    删除集合。

    +

    删除一个 Collection。

    语法

    delete collection -c (text) 
     
    @@ -479,7 +476,7 @@ Timeout []: 选项全名说明 --c-集合名称要删除的集合名称。 +-c-collection-名称要删除的 Collection 的名称。 -帮助不适用显示命令使用帮助。 @@ -511,7 +508,7 @@ Timeout []: 选项全名说明 --c-集合名称要删除的实体所属的集合名称。 +-c-collection-名称要删除的实体所属的 Collections 名称。 -p-分区(可选)要删除的分区名称。 -help不适用显示命令使用帮助。 @@ -525,7 +522,6 @@ Timeout []: Do you want to continue? [y/N]: y
    -

    删除分区

    删除索引和相应的索引文件。

    -
    目前,一个数据集最多支持一个索引。
    +
    目前,一个 Collection 最多支持一个索引。

    语法

    delete index -c (text) -in (text)
     
    @@ -585,7 +581,7 @@ Timeout []: 选项全名说明 --c-集合名称集合名称。 +-c-Collection-名称Collections 的名称。 -在-索引名称索引名称。 -帮助不适用显示命令使用帮助。 @@ -593,7 +589,7 @@ Timeout []:

    示例

    milvus_cli > delete index -c car -in indexName
     
    -

    show collection

    显示集合的详细信息。

    +

    显示 Collection 的详细信息。

    语法

    show collection -c (text)
     
    @@ -618,7 +614,7 @@ Timeout []: 选项全名说明 --c-集合名称集合名称。 +-c-collection-名称Collections 的名称。 -帮助不适用显示命令使用帮助。 @@ -650,7 +646,7 @@ Timeout []: 选项全名说明 --c-集合名称分区所属集合的名称。 +-c-Collection-名称分区所属 Collections 的名称。 -p-分区分区的名称。 -help不适用显示命令使用帮助。 @@ -683,7 +679,7 @@ Timeout []: 选项全名说明 --c-集合名称集合名称。 +-c-collection-名称Collections 的名称。 -在-索引名称索引名称。 @@ -746,18 +742,18 @@ Timeout []: 清除清除屏幕。 连接连接 Milvus。 -创建创建数据集、数据库、分区、用户和索引。 -删除删除数据集、数据库、分区、别名、用户或索引。 +创建创建 Collections、数据库、分区、用户和索引。 +删除删除 Collections、数据库、分区、别名、用户或索引。 退出关闭命令行窗口。 帮助显示命令使用帮助。 插入将数据导入分区。 -列表列出集合、数据库、分区、用户或索引。 -加载加载集合或分区。 +列表列出 Collections、数据库、分区、用户或索引。 +加载加载一个 Collection 或分区。 查询显示符合所有输入条件的查询结果。 -释放释放数据集或分区。 +释放释放一个 Collection 或分区。 搜索执行向量相似性搜索或混合搜索。 -显示显示连接、数据库、数据集、加载进度或索引进度。 -重命名重命名数据集 +显示显示连接、数据库、Collection、加载进度或索引进度。 +重命名重命名 Collections 使用使用数据库 版本显示 Milvus_CLI 的版本。 @@ -787,7 +783,7 @@ Timeout []: 选项全名说明 --c-数据集名称插入数据的集合名称。 +-c-collection-名称插入数据的 Collection 的名称。 -p-分区(可选)插入数据的分区名称。不通过此分区选项表示选择"_默认 "分区。 -help不适用显示命令使用帮助。 @@ -796,7 +792,7 @@ Timeout []: 下面的示例导入了一个本地 CSV 文件。

    milvus_cli > import -c car 'examples/import_csv/vectors.csv'
     
    -Reading csv file... [####################################] 100%
    +Reading csv file...  [####################################]  100%
     
     Column names are ['vector', 'color', 'brand']
     
    @@ -805,17 +801,12 @@ Processed 50001 lines.
     Inserting ...
     
     Insert successfully.
    -
    ----
    -
    -Total insert entities: 50000
    -Total collection entities: 150000
    -Milvus timestamp: 428849214449254403
    -
    ----
    -
    +--------------------------  ------------------
    +Total insert entities:                   50000
    +Total collection entities:              150000
    +Milvus timestamp:           428849214449254403
    +--------------------------  ------------------
     
    -

    示例 2

    下面的示例导入了一个远程 CSV 文件。

    milvus_cli > import -c car 'https://raw.githubusercontent.com/milvus-
    @@ -823,7 +814,7 @@ io/milvus_cli/main/examples/import_csv/vectors.csv'
     
     Reading file from remote URL.
     
    -Reading csv file... [####################################] 100%
    +Reading csv file...  [####################################]  100%
     
     Column names are ['vector', 'color', 'brand']
     
    @@ -833,16 +824,12 @@ Inserting ...
     
     Insert successfully.
     
    ----
    -
    -Total insert entities: 50000
    -Total collection entities: 150000
    -Milvus timestamp: 428849214449254403
    -
    ----
    -
    +--------------------------  ------------------
    +Total insert entities:                   50000
    +Total collection entities:              150000
    +Milvus timestamp:           428849214449254403
    +--------------------------  ------------------
     
    -

    列出用户

    list users
     

    选项

    | 选项 | 全名 | 说明 | | --help | n/a | 显示命令使用帮助。|

    -

    list collections

    列出所有集合。

    +

    列出所有 Collections。

    语法

    list collections
     
    @@ -905,8 +892,8 @@ Milvus timestamp: 428849214449254403 d="M4 9h1v1H4c-1.5 0-3-1.69-3-3.5S2.55 3 4 3h4c1.45 0 3 1.69 3 3.5 0 1.41-.91 2.72-2 3.25V8.59c.58-.45 1-1.27 1-2.09C10 5.22 8.98 4 8 4H4c-.98 0-2 1.22-2 2.5S3 9 4 9zm9-3h-1v1h1c1 0 2 1.22 2 2.5S13.98 12 13 12H9c-.98 0-2-1.22-2-2.5 0-.83.42-1.64 1-2.09V6.25c-1.09.53-2 1.84-2 3.25C6 11.31 7.55 13 9 13h4c1.45 0 3-1.69 3-3.5S14.5 6 13 6z" > -

    列出集合的所有索引。

    -
    目前,一个数据集最多支持一个索引。
    +

    列出 Collections 的所有索引。

    +
    目前,一个 Collection 最多支持一个索引。

    语法

    list indexes -c (text)
     
    @@ -916,7 +903,7 @@ Milvus timestamp: 428849214449254403 选项全名说明 --c-集合名称集合名称。 +-c-collection-名称Collections 的名称。 -帮助不适用显示命令使用帮助。 @@ -935,7 +922,7 @@ Milvus timestamp: 428849214449254403 d="M4 9h1v1H4c-1.5 0-3-1.69-3-3.5S2.55 3 4 3h4c1.45 0 3 1.69 3 3.5 0 1.41-.91 2.72-2 3.25V8.59c.58-.45 1-1.27 1-2.09C10 5.22 8.98 4 8 4H4c-.98 0-2 1.22-2 2.5S3 9 4 9zm9-3h-1v1h1c1 0 2 1.22 2 2.5S13.98 12 13 12H9c-.98 0-2-1.22-2-2.5 0-.83.42-1.64 1-2.09V6.25c-1.09.53-2 1.84-2 3.25C6 11.31 7.55 13 9 13h4c1.45 0 3-1.69 3-3.5S14.5 6 13 6z" > -

    列出数据集中的所有分区。

    +

    列出 Collections 的所有分区。

    语法

    list partitions -c (text)
     
    @@ -945,7 +932,7 @@ Milvus timestamp: 428849214449254403 选项全名说明 --c-集合名称集合名称。 +-c-Collection-名称Collections 的名称。 -帮助不适用显示命令使用帮助。 @@ -964,7 +951,7 @@ Milvus timestamp: 428849214449254403 d="M4 9h1v1H4c-1.5 0-3-1.69-3-3.5S2.55 3 4 3h4c1.45 0 3 1.69 3 3.5 0 1.41-.91 2.72-2 3.25V8.59c.58-.45 1-1.27 1-2.09C10 5.22 8.98 4 8 4H4c-.98 0-2 1.22-2 2.5S3 9 4 9zm9-3h-1v1h1c1 0 2 1.22 2 2.5S13.98 12 13 12H9c-.98 0-2-1.22-2-2.5 0-.83.42-1.64 1-2.09V6.25c-1.09.53-2 1.84-2 3.25C6 11.31 7.55 13 9 13h4c1.45 0 3-1.69 3-3.5S14.5 6 13 6z" > -

    将数据集或分区从硬盘空间加载到 RAM。

    +

    将一个 Collection 或分区从硬盘空间加载到 RAM。

    语法

    load -c (text) [-p (text)]
     
    @@ -974,7 +961,7 @@ Milvus timestamp: 428849214449254403 选项全名说明 --c-集合名称分区所属集合的名称。 +-c-Collection-名称分区所属 Collections 的名称。 -p-分区(可选/多个)分区的名称。 -help不适用显示命令使用帮助。 @@ -1027,7 +1014,6 @@ timeout []: Guarantee timestamp. This instructs Milvus to see all operations performed before a provided timestamp. If no such timestamp is provided, then Milvus will search all operations performed to date. [0]: Graceful time. Only used in bounded consistency level. If graceful_time is set, PyMilvus will use current timestamp minus the graceful_time as the guarantee_timestamp. This option is 5s by default if not set. [5]:
    -

    例 2

    执行查询并提示输入所需内容:

    milvus_cli > query
    @@ -1047,7 +1033,6 @@ timeout []:
     Guarantee timestamp. This instructs Milvus to see all operations performed before a provided timestamp. If no such timestamp is provided, then Milvus will search all operations performed to date. [0]:
     Graceful time. Only used in bounded consistency level. If graceful_time is set, PyMilvus will use current timestamp minus the graceful_time as the guarantee_timestamp. This option is 5s by default if not set. [5]:
     
    -

    释放

    从 RAM 中释放数据集或分区。

    +

    从 RAM 中释放一个 Collection 或分区。

    语法

    release -c (text) [-p (text)]
     
    @@ -1073,7 +1058,7 @@ Graceful time. Only used in bounded consistenc 选项全名说明 --c-集合名称分区所属集合的名称。 +-c-Collection-名称分区所属 Collections 的名称。 -p-分区(可选/多个)分区的名称。 -help不适用显示命令使用帮助。 @@ -1124,22 +1109,21 @@ The max number of returned record, also known The boolean expression used to filter attribute []: id > 0 -The names of partitions to search (split by "," if multiple) ['_default'] []: \_default +The names of partitions to search (split by "," if multiple) ['_default'] []: _default timeout []: Guarantee Timestamp(It instructs Milvus to see all operations performed before a provided timestamp. If no such timestamp is provided, then Milvus will search all operations performed to date) [0]:
    -

    -

    在索引库中执行搜索,并提示输入所需内容:

    +

    对索引 Collections 执行搜索,并提示输入所需内容:

    milvus_cli > search
     
     Collection name (car, test_collection): car
     
     The vectors of search data(the length of data is number of query (nq), the dim of every vector in data must be equal to vector field’s of collection. You can also import a csv file without headers):
    -[[0.71, 0.76, 0.17, 0.13, 0.42, 0.07, 0.15, 0.67, 0.58, 0.02, 0.39, 0.47, 0.58, 0.88, 0.73, 0.31, 0.23, 0.57, 0.33, 0.2, 0.03, 0.43, 0.78, 0.49, 0.17, 0.56, 0.76, 0.54, 0.45, 0.46, 0.05, 0.1, 0.43, 0.63, 0.29, 0.44, 0.65, 0.01, 0.35, 0.46, 0.66, 0.7, 0.88, 0.07, 0.49, 0.92, 0.57, 0.5, 0.16, 0.77, 0.98, 0.1, 0.44, 0.88, 0.82, 0.16, 0.67, 0.63, 0.57, 0.55, 0.95, 0.13, 0.64, 0.43, 0.71, 0.81, 0.43, 0.65, 0.76, 0.7, 0.05, 0.24, 0.03, 0.9, 0.46, 0.28, 0.92, 0.25, 0.97, 0.79, 0.73, 0.97, 0.49, 0.28, 0.64, 0.19, 0.23, 0.51, 0.09, 0.1, 0.53, 0.03, 0.23, 0.94, 0.87, 0.14, 0.42, 0.82, 0.91, 0.11, 0.91, 0.37, 0.26, 0.6, 0.89, 0.6, 0.32, 0.11, 0.98, 0.67, 0.12, 0.66, 0.47, 0.02, 0.15, 0.6, 0.64, 0.57, 0.14, 0.81, 0.75, 0.11, 0.49, 0.78, 0.16, 0.63, 0.57, 0.18]]
    +    [[0.71, 0.76, 0.17, 0.13, 0.42, 0.07, 0.15, 0.67, 0.58, 0.02, 0.39, 0.47, 0.58, 0.88, 0.73, 0.31, 0.23, 0.57, 0.33, 0.2, 0.03, 0.43, 0.78, 0.49, 0.17, 0.56, 0.76, 0.54, 0.45, 0.46, 0.05, 0.1, 0.43, 0.63, 0.29, 0.44, 0.65, 0.01, 0.35, 0.46, 0.66, 0.7, 0.88, 0.07, 0.49, 0.92, 0.57, 0.5, 0.16, 0.77, 0.98, 0.1, 0.44, 0.88, 0.82, 0.16, 0.67, 0.63, 0.57, 0.55, 0.95, 0.13, 0.64, 0.43, 0.71, 0.81, 0.43, 0.65, 0.76, 0.7, 0.05, 0.24, 0.03, 0.9, 0.46, 0.28, 0.92, 0.25, 0.97, 0.79, 0.73, 0.97, 0.49, 0.28, 0.64, 0.19, 0.23, 0.51, 0.09, 0.1, 0.53, 0.03, 0.23, 0.94, 0.87, 0.14, 0.42, 0.82, 0.91, 0.11, 0.91, 0.37, 0.26, 0.6, 0.89, 0.6, 0.32, 0.11, 0.98, 0.67, 0.12, 0.66, 0.47, 0.02, 0.15, 0.6, 0.64, 0.57, 0.14, 0.81, 0.75, 0.11, 0.49, 0.78, 0.16, 0.63, 0.57, 0.18]]
     
     The vector field used to search of collection (vector): vector
     
    @@ -1151,16 +1135,15 @@ The max number of returned record, also known
     
     The boolean expression used to filter attribute []: id > 0
     
    -The names of partitions to search (split by "," if multiple) ['_default'] []: \_default
    +The names of partitions to search (split by "," if multiple) ['_default'] []: _default
     
     timeout []:
     
     Guarantee Timestamp(It instructs Milvus to see all operations performed before a provided timestamp. If no such timestamp is provided, then Milvus will search all operations performed to date) [0]:
     
     
    -

    -

    在非索引集合上执行搜索,并提示输入所需内容:

    +

    在非索引 Collections 上执行搜索,并提示输入所需内容:

    milvus_cli > search
     
     Collection name (car, car2): car
    @@ -1182,8 +1165,7 @@ timeout []:
     Guarantee Timestamp(It instructs Milvus to see all operations performed before a provided timestamp. If no such timestamp is provided, then Milvus will search all operations performed to date) [0]:
     
     
    - -

    列出连接

    显示实体加载的进度。

    +

    显示 Collections 的加载进度。

    语法

    show loading_progress -c (text) [-p (text)]
     

    选项

    - + - + diff --git a/localization/v2.4.x/site/zh/userGuide/tools/milvus_backup_overview.json b/localization/v2.4.x/site/zh/userGuide/tools/milvus_backup_overview.json index 92a0be27e..3afb4bf14 100644 --- a/localization/v2.4.x/site/zh/userGuide/tools/milvus_backup_overview.json +++ b/localization/v2.4.x/site/zh/userGuide/tools/milvus_backup_overview.json @@ -1 +1 @@ -{"codeList":[],"headingContent":"","anchorList":[{"label":"Milvus 备份","href":"Milvus-Backup","type":1,"isActive":false},{"label":"前提条件","href":"Prerequisites","type":2,"isActive":false},{"label":"架构","href":"Architecture","type":2,"isActive":false},{"label":"最新版本","href":"Latest-release","type":2,"isActive":false}]} \ No newline at end of file +{"codeList":[],"headingContent":"Milvus Backup","anchorList":[{"label":"Milvus 备份","href":"Milvus-Backup","type":1,"isActive":false},{"label":"前提条件","href":"Prerequisites","type":2,"isActive":false},{"label":"架构","href":"Architecture","type":2,"isActive":false},{"label":"最新版本","href":"Latest-release","type":2,"isActive":false}]} \ No newline at end of file diff --git a/localization/v2.4.x/site/zh/userGuide/tools/milvus_backup_overview.md b/localization/v2.4.x/site/zh/userGuide/tools/milvus_backup_overview.md index b33b7e8a2..7595779e0 100644 --- a/localization/v2.4.x/site/zh/userGuide/tools/milvus_backup_overview.md +++ b/localization/v2.4.x/site/zh/userGuide/tools/milvus_backup_overview.md @@ -1,9 +1,8 @@ --- id: milvus_backup_overview.md summary: Milvus-Backup 是一款允许用户备份和恢复 Milvus 数据的工具。 -title: Milvus备份 +title: Milvus 备份 --- -

    Milvus 备份

    diff --git a/localization/v2.4.x/site/zh/userGuide/use-partition-key.json b/localization/v2.4.x/site/zh/userGuide/use-partition-key.json index f4c454380..299c2558d 100644 --- a/localization/v2.4.x/site/zh/userGuide/use-partition-key.json +++ b/localization/v2.4.x/site/zh/userGuide/use-partition-key.json @@ -1,63 +1 @@ -{ - "codeList": [ - "import random, time\nfrom pymilvus import connections, MilvusClient, DataType\n\nSERVER_ADDR = \"http://localhost:19530\"\n\n# 1. Set up a Milvus client\nclient = MilvusClient(\n uri=SERVER_ADDR\n)\n\n# 2. Create a collection\nschema = MilvusClient.create_schema(\n auto_id=False,\n enable_dynamic_field=True,\n partition_key_field=\"color\",\n num_partitions=16 # Number of partitions. Defaults to 16.\n)\n\nschema.add_field(field_name=\"id\", datatype=DataType.INT64, is_primary=True)\nschema.add_field(field_name=\"vector\", datatype=DataType.FLOAT_VECTOR, dim=5)\nschema.add_field(field_name=\"color\", datatype=DataType.VARCHAR, max_length=512)\n", - "import io.milvus.v2.client.ConnectConfig;\nimport io.milvus.v2.client.MilvusClientV2;\nimport io.milvus.v2.common.DataType;\nimport io.milvus.v2.common.IndexParam;\nimport io.milvus.v2.service.collection.request.AddFieldReq;\nimport io.milvus.v2.service.collection.request.CreateCollectionReq;\n\nString CLUSTER_ENDPOINT = \"http://localhost:19530\";\n\n// 1. Connect to Milvus server\nConnectConfig connectConfig = ConnectConfig.builder()\n .uri(CLUSTER_ENDPOINT)\n .build();\n\nMilvusClientV2 client = new MilvusClientV2(connectConfig);\n\n// 2. Create a collection in customized setup mode\n\n// 2.1 Create schema\nCreateCollectionReq.CollectionSchema schema = client.createSchema();\n\n// 2.2 Add fields to schema\nschema.addField(AddFieldReq.builder()\n .fieldName(\"id\")\n .dataType(DataType.Int64)\n .isPrimaryKey(true)\n .autoID(false)\n .build());\n\nschema.addField(AddFieldReq.builder()\n .fieldName(\"vector\")\n .dataType(DataType.FloatVector)\n .dimension(5)\n .build());\n \nschema.addField(AddFieldReq.builder()\n .fieldName(\"color\")\n .dataType(DataType.VarChar)\n .maxLength(512)\n .isPartitionKey(true)\n .build());\n", - "const { MilvusClient, DataType, sleep } = require(\"@zilliz/milvus2-sdk-node\")\n\nconst address = \"http://localhost:19530\"\n\nasync function main() {\n// 1. Set up a Milvus Client\nclient = new MilvusClient({address}); \n\n// 2. Create a collection\n// 2.1 Define fields\nconst fields = [\n {\n name: \"id\",\n data_type: DataType.Int64,\n is_primary_key: true,\n auto_id: false\n },\n {\n name: \"vector\",\n data_type: DataType.FloatVector,\n dim: 5\n },\n {\n name: \"color\",\n data_type: DataType.VarChar,\n max_length: 512,\n is_partition_key: true\n }\n]\n", - "index_params = MilvusClient.prepare_index_params()\n\nindex_params.add_index(\n field_name=\"id\",\n index_type=\"STL_SORT\"\n)\n\nindex_params.add_index(\n field_name=\"color\",\n index_type=\"Trie\"\n)\n\nindex_params.add_index(\n field_name=\"vector\",\n index_type=\"IVF_FLAT\",\n metric_type=\"L2\",\n params={\"nlist\": 1024}\n)\n", - "// 2.3 Prepare index parameters\nIndexParam indexParamForVectorField = IndexParam.builder()\n .fieldName(\"vector\")\n .indexType(IndexParam.IndexType.IVF_FLAT)\n .metricType(IndexParam.MetricType.IP)\n .extraParams(Map.of(\"nlist\", 1024))\n .build();\n\nList indexParams = new ArrayList<>();\nindexParams.add(indexParamForVectorField);\n", - "// 2.2 Prepare index parameters\nconst index_params = [{\n field_name: \"color\",\n index_type: \"Trie\"\n},{\n field_name: \"id\",\n index_type: \"STL_SORT\"\n},{\n field_name: \"vector\",\n index_type: \"IVF_FLAT\",\n metric_type: \"IP\",\n params: { nlist: 1024}\n}]\n", - "client.create_collection(\n collection_name=\"test_collection\",\n schema=schema,\n index_params=index_params\n)\n", - "// 2.4 Create a collection with schema and index parameters\nCreateCollectionReq customizedSetupReq = CreateCollectionReq.builder()\n .collectionName(\"test_collection\")\n .collectionSchema(schema)\n .indexParams(indexParams) \n .build();\n\nclient.createCollection(customizedSetupReq);\n", - "// 2.3 Create a collection with fields and index parameters\nres = await client.createCollection({\n collection_name: \"test_collection\",\n fields: fields, \n index_params: index_params,\n})\n\nconsole.log(res.error_code)\n\n// Output\n// \n// Success\n//\n", - "# 3. Insert randomly generated vectors \ncolors = [\"green\", \"blue\", \"yellow\", \"red\", \"black\", \"white\", \"purple\", \"pink\", \"orange\", \"brown\", \"grey\"]\ndata = []\n\nfor i in range(1000):\n current_color = random.choice(colors)\n current_tag = random.randint(1000, 9999)\n data.append({\n \"id\": i,\n \"vector\": [ random.uniform(-1, 1) for _ in range(5) ],\n \"color\": current_color,\n \"tag\": current_tag,\n \"color_tag\": f\"{current_color}_{str(current_tag)}\"\n })\n\nprint(data[0])\n", - "// 3. Insert randomly generated vectors\nList colors = Arrays.asList(\"green\", \"blue\", \"yellow\", \"red\", \"black\", \"white\", \"purple\", \"pink\", \"orange\", \"brown\", \"grey\");\nList data = new ArrayList<>();\n\nfor (int i=0; i<1000; i++) {\n Random rand = new Random();\n String current_color = colors.get(rand.nextInt(colors.size()-1));\n int current_tag = rand.nextInt(8999) + 1000;\n JSONObject row = new JSONObject();\n row.put(\"id\", Long.valueOf(i));\n row.put(\"vector\", Arrays.asList(rand.nextFloat(), rand.nextFloat(), rand.nextFloat(), rand.nextFloat(), rand.nextFloat()));\n row.put(\"color\", current_color);\n row.put(\"tag\", current_tag);\n row.put(\"color_tag\", current_color + \"_\" + String.valueOf(rand.nextInt(8999) + 1000));\n data.add(row);\n}\n\nSystem.out.println(JSONObject.toJSON(data.get(0))); \n", - "// 3. Insert randomly generated vectors \nconst colors = [\"green\", \"blue\", \"yellow\", \"red\", \"black\", \"white\", \"purple\", \"pink\", \"orange\", \"brown\", \"grey\"]\nvar data = []\n\nfor (let i = 0; i < 1000; i++) {\n const current_color = colors[Math.floor(Math.random() * colors.length)]\n const current_tag = Math.floor(Math.random() * 8999 + 1000)\n data.push({\n id: i,\n vector: [Math.random(), Math.random(), Math.random(), Math.random(), Math.random()],\n color: current_color,\n tag: current_tag,\n color_tag: `${current_color}_${current_tag}`\n })\n}\n\nconsole.log(data[0])\n", - "{\n id: 0,\n vector: [\n 0.1275656405044483,\n 0.47417858592773277,\n 0.13858264437643286,\n 0.2390904907020377,\n 0.8447862593689635\n ],\n color: 'blue',\n tag: 2064,\n color_tag: 'blue_2064'\n}\n", - "res = client.insert(\n collection_name=\"test_collection\",\n data=data\n)\n\nprint(res)\n\n# Output\n#\n# {\n# \"insert_count\": 1000,\n# \"ids\": [\n# 0,\n# 1,\n# 2,\n# 3,\n# 4,\n# 5,\n# 6,\n# 7,\n# 8,\n# 9,\n# \"(990 more items hidden)\"\n# ]\n# }\n", - "// 3.1 Insert data into the collection\nInsertReq insertReq = InsertReq.builder()\n .collectionName(\"test_collection\")\n .data(data)\n .build();\n\nInsertResp insertResp = client.insert(insertReq);\n\nSystem.out.println(JSONObject.toJSON(insertResp));\n\n// Output:\n// {\"insertCnt\": 1000}\n", - "res = await client.insert({\n collection_name: \"test_collection\",\n data: data,\n})\n\nconsole.log(res.insert_cnt)\n\n// Output\n// \n// 1000\n// \n", - "# 4. Search with partition key\nquery_vectors = [[0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592]]\n\nres = client.search(\n collection_name=\"test_collection\",\n data=query_vectors,\n filter=\"color == 'green'\",\n search_params={\"metric_type\": \"L2\", \"params\": {\"nprobe\": 10}},\n output_fields=[\"id\", \"color_tag\"],\n limit=3\n)\n\nprint(res)\n\n# Output\n#\n# [\n# [\n# {\n# \"id\": 970,\n# \"distance\": 0.5770174264907837,\n# \"entity\": {\n# \"id\": 970,\n# \"color_tag\": \"green_9828\"\n# }\n# },\n# {\n# \"id\": 115,\n# \"distance\": 0.6898155808448792,\n# \"entity\": {\n# \"id\": 115,\n# \"color_tag\": \"green_4073\"\n# }\n# },\n# {\n# \"id\": 899,\n# \"distance\": 0.7028976678848267,\n# \"entity\": {\n# \"id\": 899,\n# \"color_tag\": \"green_9897\"\n# }\n# }\n# ]\n# ]\n", - "// 4. Search with partition key\nList> query_vectors = Arrays.asList(Arrays.asList(0.3580376395471989f, -0.6023495712049978f, 0.18414012509913835f, -0.26286205330961354f, 0.9029438446296592f));\n\nSearchReq searchReq = SearchReq.builder()\n .collectionName(\"test_collection\")\n .data(query_vectors)\n .filter(\"color == \\\"green\\\"\")\n .topK(3)\n .build();\n\nSearchResp searchResp = client.search(searchReq);\n\nSystem.out.println(JSONObject.toJSON(searchResp)); \n\n// Output:\n// {\"searchResults\": [[\n// {\n// \"distance\": 1.0586997,\n// \"id\": 414,\n// \"entity\": {}\n// },\n// {\n// \"distance\": 0.981384,\n// \"id\": 293,\n// \"entity\": {}\n// },\n// {\n// \"distance\": 0.9548756,\n// \"id\": 325,\n// \"entity\": {}\n// }\n// ]]}\n", - "// 4. Search with partition key\nconst query_vectors = [0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592]\n\nres = await client.search({\n collection_name: \"test_collection\",\n data: query_vectors,\n filter: \"color == 'green'\",\n output_fields: [\"color_tag\"],\n limit: 3\n})\n\nconsole.log(res.results)\n\n// Output\n// \n// [\n// { score: 2.402090549468994, id: '135', color_tag: 'green_2694' },\n// { score: 2.3938629627227783, id: '326', color_tag: 'green_7104' },\n// { score: 2.3235254287719727, id: '801', color_tag: 'green_3162' }\n// ]\n// \n" - ], - "headingContent": "", - "anchorList": [ - { - "label": "使用Partition Key", - "href": "Use-Partition-Key", - "type": 1, - "isActive": false - }, - { "label": "概述", "href": "Overview", "type": 2, "isActive": false }, - { - "label": "启用Partition Key", - "href": "Enable-partition-key", - "type": 2, - "isActive": false - }, - { - "label": "列出分区", - "href": "List-partitions", - "type": 2, - "isActive": false - }, - { - "label": "插入数据", - "href": "Insert-data", - "type": 2, - "isActive": false - }, - { - "label": "使用分区键", - "href": "Use-partition-key", - "type": 2, - "isActive": false - }, - { - "label": "典型用例", - "href": "Typical-use-cases", - "type": 2, - "isActive": false - } - ] -} +{"codeList":["import random, time\nfrom pymilvus import connections, MilvusClient, DataType\n\nSERVER_ADDR = \"http://localhost:19530\"\n\n# 1. Set up a Milvus client\nclient = MilvusClient(\n uri=SERVER_ADDR\n)\n\n# 2. Create a collection\nschema = MilvusClient.create_schema(\n auto_id=False,\n enable_dynamic_field=True,\n partition_key_field=\"color\",\n num_partitions=64 # Number of partitions. Defaults to 64.\n)\n\nschema.add_field(field_name=\"id\", datatype=DataType.INT64, is_primary=True)\nschema.add_field(field_name=\"vector\", datatype=DataType.FLOAT_VECTOR, dim=5)\nschema.add_field(field_name=\"color\", datatype=DataType.VARCHAR, max_length=512)\n","import io.milvus.v2.client.ConnectConfig;\nimport io.milvus.v2.client.MilvusClientV2;\nimport io.milvus.v2.common.DataType;\nimport io.milvus.v2.common.IndexParam;\nimport io.milvus.v2.service.collection.request.AddFieldReq;\nimport io.milvus.v2.service.collection.request.CreateCollectionReq;\n\nString CLUSTER_ENDPOINT = \"http://localhost:19530\";\n\n// 1. Connect to Milvus server\nConnectConfig connectConfig = ConnectConfig.builder()\n .uri(CLUSTER_ENDPOINT)\n .build();\n\nMilvusClientV2 client = new MilvusClientV2(connectConfig);\n\n// 2. Create a collection in customized setup mode\n\n// 2.1 Create schema\nCreateCollectionReq.CollectionSchema schema = client.createSchema();\n\n// 2.2 Add fields to schema\nschema.addField(AddFieldReq.builder()\n .fieldName(\"id\")\n .dataType(DataType.Int64)\n .isPrimaryKey(true)\n .autoID(false)\n .build());\n\nschema.addField(AddFieldReq.builder()\n .fieldName(\"vector\")\n .dataType(DataType.FloatVector)\n .dimension(5)\n .build());\n \nschema.addField(AddFieldReq.builder()\n .fieldName(\"color\")\n .dataType(DataType.VarChar)\n .maxLength(512)\n .isPartitionKey(true)\n .build());\n","const { MilvusClient, DataType, sleep } = require(\"@zilliz/milvus2-sdk-node\")\n\nconst address = \"http://localhost:19530\"\n\nasync function main() {\n// 1. Set up a Milvus Client\nclient = new MilvusClient({address}); \n\n// 2. Create a collection\n// 2.1 Define fields\nconst fields = [\n {\n name: \"id\",\n data_type: DataType.Int64,\n is_primary_key: true,\n auto_id: false\n },\n {\n name: \"vector\",\n data_type: DataType.FloatVector,\n dim: 5\n },\n {\n name: \"color\",\n data_type: DataType.VarChar,\n max_length: 512,\n is_partition_key: true\n }\n]\n","index_params = MilvusClient.prepare_index_params()\n\nindex_params.add_index(\n field_name=\"id\",\n index_type=\"STL_SORT\"\n)\n\nindex_params.add_index(\n field_name=\"color\",\n index_type=\"Trie\"\n)\n\nindex_params.add_index(\n field_name=\"vector\",\n index_type=\"IVF_FLAT\",\n metric_type=\"L2\",\n params={\"nlist\": 1024}\n)\n","// 2.3 Prepare index parameters\nIndexParam indexParamForVectorField = IndexParam.builder()\n .fieldName(\"vector\")\n .indexType(IndexParam.IndexType.IVF_FLAT)\n .metricType(IndexParam.MetricType.IP)\n .extraParams(Map.of(\"nlist\", 1024))\n .build();\n\nList indexParams = new ArrayList<>();\nindexParams.add(indexParamForVectorField);\n","// 2.2 Prepare index parameters\nconst index_params = [{\n field_name: \"color\",\n index_type: \"Trie\"\n},{\n field_name: \"id\",\n index_type: \"STL_SORT\"\n},{\n field_name: \"vector\",\n index_type: \"IVF_FLAT\",\n metric_type: \"IP\",\n params: { nlist: 1024}\n}]\n","client.create_collection(\n collection_name=\"test_collection\",\n schema=schema,\n index_params=index_params\n)\n","// 2.4 Create a collection with schema and index parameters\nCreateCollectionReq customizedSetupReq = CreateCollectionReq.builder()\n .collectionName(\"test_collection\")\n .collectionSchema(schema)\n .indexParams(indexParams) \n .build();\n\nclient.createCollection(customizedSetupReq);\n","// 2.3 Create a collection with fields and index parameters\nres = await client.createCollection({\n collection_name: \"test_collection\",\n fields: fields, \n index_params: index_params,\n})\n\nconsole.log(res.error_code)\n\n// Output\n// \n// Success\n//\n","# 3. Insert randomly generated vectors \ncolors = [\"green\", \"blue\", \"yellow\", \"red\", \"black\", \"white\", \"purple\", \"pink\", \"orange\", \"brown\", \"grey\"]\ndata = []\n\nfor i in range(1000):\n current_color = random.choice(colors)\n current_tag = random.randint(1000, 9999)\n data.append({\n \"id\": i,\n \"vector\": [ random.uniform(-1, 1) for _ in range(5) ],\n \"color\": current_color,\n \"tag\": current_tag,\n \"color_tag\": f\"{current_color}_{str(current_tag)}\"\n })\n\nprint(data[0])\n","// 3. Insert randomly generated vectors\nList colors = Arrays.asList(\"green\", \"blue\", \"yellow\", \"red\", \"black\", \"white\", \"purple\", \"pink\", \"orange\", \"brown\", \"grey\");\nList data = new ArrayList<>();\n\nfor (int i=0; i<1000; i++) {\n Random rand = new Random();\n String current_color = colors.get(rand.nextInt(colors.size()-1));\n int current_tag = rand.nextInt(8999) + 1000;\n JSONObject row = new JSONObject();\n row.put(\"id\", Long.valueOf(i));\n row.put(\"vector\", Arrays.asList(rand.nextFloat(), rand.nextFloat(), rand.nextFloat(), rand.nextFloat(), rand.nextFloat()));\n row.put(\"color\", current_color);\n row.put(\"tag\", current_tag);\n row.put(\"color_tag\", current_color + \"_\" + String.valueOf(rand.nextInt(8999) + 1000));\n data.add(row);\n}\n\nSystem.out.println(JSONObject.toJSON(data.get(0))); \n","// 3. Insert randomly generated vectors \nconst colors = [\"green\", \"blue\", \"yellow\", \"red\", \"black\", \"white\", \"purple\", \"pink\", \"orange\", \"brown\", \"grey\"]\nvar data = []\n\nfor (let i = 0; i < 1000; i++) {\n const current_color = colors[Math.floor(Math.random() * colors.length)]\n const current_tag = Math.floor(Math.random() * 8999 + 1000)\n data.push({\n id: i,\n vector: [Math.random(), Math.random(), Math.random(), Math.random(), Math.random()],\n color: current_color,\n tag: current_tag,\n color_tag: `${current_color}_${current_tag}`\n })\n}\n\nconsole.log(data[0])\n","{\n id: 0,\n vector: [\n 0.1275656405044483,\n 0.47417858592773277,\n 0.13858264437643286,\n 0.2390904907020377,\n 0.8447862593689635\n ],\n color: 'blue',\n tag: 2064,\n color_tag: 'blue_2064'\n}\n","res = client.insert(\n collection_name=\"test_collection\",\n data=data\n)\n\nprint(res)\n\n# Output\n#\n# {\n# \"insert_count\": 1000,\n# \"ids\": [\n# 0,\n# 1,\n# 2,\n# 3,\n# 4,\n# 5,\n# 6,\n# 7,\n# 8,\n# 9,\n# \"(990 more items hidden)\"\n# ]\n# }\n","// 3.1 Insert data into the collection\nInsertReq insertReq = InsertReq.builder()\n .collectionName(\"test_collection\")\n .data(data)\n .build();\n\nInsertResp insertResp = client.insert(insertReq);\n\nSystem.out.println(JSONObject.toJSON(insertResp));\n\n// Output:\n// {\"insertCnt\": 1000}\n","res = await client.insert({\n collection_name: \"test_collection\",\n data: data,\n})\n\nconsole.log(res.insert_cnt)\n\n// Output\n// \n// 1000\n// \n","# 4. Search with partition key\nquery_vectors = [[0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592]]\n\nres = client.search(\n collection_name=\"test_collection\",\n data=query_vectors,\n filter=\"color == 'green'\",\n search_params={\"metric_type\": \"L2\", \"params\": {\"nprobe\": 10}},\n output_fields=[\"id\", \"color_tag\"],\n limit=3\n)\n\nprint(res)\n\n# Output\n#\n# [\n# [\n# {\n# \"id\": 970,\n# \"distance\": 0.5770174264907837,\n# \"entity\": {\n# \"id\": 970,\n# \"color_tag\": \"green_9828\"\n# }\n# },\n# {\n# \"id\": 115,\n# \"distance\": 0.6898155808448792,\n# \"entity\": {\n# \"id\": 115,\n# \"color_tag\": \"green_4073\"\n# }\n# },\n# {\n# \"id\": 899,\n# \"distance\": 0.7028976678848267,\n# \"entity\": {\n# \"id\": 899,\n# \"color_tag\": \"green_9897\"\n# }\n# }\n# ]\n# ]\n","// 4. Search with partition key\nList> query_vectors = Arrays.asList(Arrays.asList(0.3580376395471989f, -0.6023495712049978f, 0.18414012509913835f, -0.26286205330961354f, 0.9029438446296592f));\n\nSearchReq searchReq = SearchReq.builder()\n .collectionName(\"test_collection\")\n .data(query_vectors)\n .filter(\"color == \\\"green\\\"\")\n .topK(3)\n .build();\n\nSearchResp searchResp = client.search(searchReq);\n\nSystem.out.println(JSONObject.toJSON(searchResp)); \n\n// Output:\n// {\"searchResults\": [[\n// {\n// \"distance\": 1.0586997,\n// \"id\": 414,\n// \"entity\": {}\n// },\n// {\n// \"distance\": 0.981384,\n// \"id\": 293,\n// \"entity\": {}\n// },\n// {\n// \"distance\": 0.9548756,\n// \"id\": 325,\n// \"entity\": {}\n// }\n// ]]}\n","// 4. Search with partition key\nconst query_vectors = [0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592]\n\nres = await client.search({\n collection_name: \"test_collection\",\n data: query_vectors,\n filter: \"color == 'green'\",\n output_fields: [\"color_tag\"],\n limit: 3\n})\n\nconsole.log(res.results)\n\n// Output\n// \n// [\n// { score: 2.402090549468994, id: '135', color_tag: 'green_2694' },\n// { score: 2.3938629627227783, id: '326', color_tag: 'green_7104' },\n// { score: 2.3235254287719727, id: '801', color_tag: 'green_3162' }\n// ]\n// \n"],"headingContent":"Use Partition Key","anchorList":[{"label":"使用分区密钥","href":"Use-Partition-Key","type":1,"isActive":false},{"label":"概述","href":"Overview","type":2,"isActive":false},{"label":"启用分区密钥","href":"Enable-partition-key","type":2,"isActive":false},{"label":"列出分区","href":"List-partitions","type":2,"isActive":false},{"label":"插入数据","href":"Insert-data","type":2,"isActive":false},{"label":"使用 Partition Key","href":"Use-partition-key","type":2,"isActive":false},{"label":"典型用例","href":"Typical-use-cases","type":2,"isActive":false}]} \ No newline at end of file diff --git a/localization/v2.4.x/site/zh/userGuide/use-partition-key.md b/localization/v2.4.x/site/zh/userGuide/use-partition-key.md index 85b7dca70..b1afe17fa 100644 --- a/localization/v2.4.x/site/zh/userGuide/use-partition-key.md +++ b/localization/v2.4.x/site/zh/userGuide/use-partition-key.md @@ -1,10 +1,8 @@ --- id: use-partition-key.md -title: 使用Partition Key -summary: "" +title: 使用分区密钥 --- - -

    使用Partition Key

    本指南将指导您使用Partition Key来加速从收藏中检索数据。

    +

    本指南将指导您使用分区密钥来加速从 Collections 中检索数据。

    概述

    你可以将集合中的一个特定字段设置为分区键,这样 Milvus 就会根据该字段中各自的分区值,将进入的实体分配到不同的分区中。这样,具有相同键值的实体就会被归类到一个分区中,从而在通过键字段进行过滤时,无需扫描无关的分区,从而加快了搜索性能。与传统的过滤方法相比,分区键可以大大提高查询性能。

    -

    您可以使用Partition Key实现多租户。有关多租户的详细信息,请阅读多租户

    -

    启用Partition Key

    你可以将 Collections 中的一个特定字段设置为分区键,这样 Milvus 就会根据这个字段中各自的分区值,将进入的实体分配到不同的分区中。这样,具有相同键值的实体就会被归类到一个分区中,通过键字段进行过滤时,就无需扫描不相关的分区,从而加快了搜索性能。与传统的过滤方法相比,分区键可以大大提高查询性能。

    +

    可以使用 Partition Key 实现多租户。有关多租户的详细信息,请阅读多租户

    +

    启用分区密钥

    要将某个字段设置为分区键,请在创建Collection Schema 时指定partition_key_field

    -

    在下面的示例代码中,num_partitions 决定将创建的分区数量。默认情况下,它被设置为16 。建议保留默认值。

    +

    要将某个字段设置为分区键,请在创建 Collections Schema 时指定partition_key_field

    +

    在下面的示例代码中,num_partitions 决定将创建的分区数量。默认情况下,它被设置为64 。建议保留默认值。

    有关参数的更多信息,请参阅 MilvusClient, create_schema()add_field()有关参数的更多信息,请参阅 SDK 参考资料。

    @@ -72,22 +70,21 @@ SERVER_ADDR = "http://localhost:19530"# 1. Set up a Milvus client client = MilvusClient( -uri=SERVER_ADDR + uri=SERVER_ADDR ) # 2. Create a collection schema = MilvusClient.create_schema( -auto_id=False, -enable_dynamic_field=True, -partition_key_field="color", -num_partitions=16 # Number of partitions. Defaults to 16. + auto_id=False, + enable_dynamic_field=True, + partition_key_field="color", + num_partitions=64 # Number of partitions. Defaults to 64. ) schema.add_field(field_name="id", datatype=DataType.INT64, is_primary=True) schema.add_field(field_name="vector", datatype=DataType.FLOAT_VECTOR, dim=5) schema.add_field(field_name="color", datatype=DataType.VARCHAR, max_length=512) -
    import io.milvus.v2.client.ConnectConfig;
     import io.milvus.v2.client.MilvusClientV2;
     import io.milvus.v2.common.DataType;
    @@ -166,23 +163,22 @@ client = new M
     
    index_params = MilvusClient.prepare_index_params()
     
     index_params.add_index(
    -field_name="id",
    -index_type="STL_SORT"
    +    field_name="id",
    +    index_type="STL_SORT"
     )
     
     index_params.add_index(
    -field_name="color",
    -index_type="Trie"
    +    field_name="color",
    +    index_type="Trie"
     )
     
     index_params.add_index(
    -field_name="vector",
    -index_type="IVF_FLAT",
    -metric_type="L2",
    -params={"nlist": 1024}
    +    field_name="vector",
    +    index_type="IVF_FLAT",
    +    metric_type="L2",
    +    params={"nlist": 1024}
     )
     
    -
    // 2.3 Prepare index parameters
     IndexParam indexParamForVectorField = IndexParam.builder()
         .fieldName("vector")
    @@ -208,7 +204,7 @@ indexParams.add(indexParamForVectorFie
         params: { nlist: 1024}
     }]
     
    -

    最后,您可以创建一个集合。

    +

    最后,您可以创建一个 Collection。

    client.create_collection(
    @@ -226,7 +222,6 @@ indexParams.add(indexParamForVectorFie
     
     client.createCollection(customizedSetupReq);
     
    -
    // 2.3 Create a collection with fields and index parameters
     res = await client.createCollection({
         collection_name: "test_collection",
    @@ -256,8 +251,8 @@ res = await client.
           
    -    

    一旦某个集合的字段被用作分区键,Milvus 就会创建指定数量的分区,并代表你管理它们。因此,你不能再操作该集合中的分区。

    -

    下面的代码段演示了一旦某个字段被用作分区键,会在集合中创建 64 个分区。

    +

    一旦 Collection 的某个字段被用作分区 Key,Milvus 就会创建指定数量的分区,并代表你管理它们。因此,你不能再操作此 Collection 中的分区。

    +

    下面的代码段演示了一旦某个 Collection 的某个字段被用作分区键,该 Collection 中就会出现 64 个分区。

    插入数据

    集合准备就绪后,开始按如下步骤插入数据:

    +

    Collections 准备就绪后,按如下步骤开始插入数据:

    准备数据

    # 3. Insert randomly generated vectors 
    @@ -281,19 +276,18 @@ colors = ["green", for i in range(1000):
    -current*color = random.choice(colors)
    -current_tag = random.randint(1000, 9999)
    -data.append({
    -"id": i,
    -"vector": [ random.uniform(-1, 1) for * in range(5) ],
    -"color": current*color,
    -"tag": current_tag,
    -"color_tag": f"{current_color}*{str(current_tag)}"
    -})
    +    current_color = random.choice(colors)
    +    current_tag = random.randint(1000, 9999)
    +    data.append({
    +        "id": i,
    +        "vector": [ random.uniform(-1, 1) for _ in range(5) ],
    +        "color": current_color,
    +        "tag": current_tag,
    +        "color_tag": f"{current_color}_{str(current_tag)}"
    +    })
     
     print(data[0])
     
    -
    // 3. Insert randomly generated vectors
     List<String> colors = Arrays.asList("green", "blue", "yellow", "red", "black", "white", "purple", "pink", "orange", "brown", "grey");
     List<JSONObject> data = new ArrayList<>();
    @@ -347,13 +341,13 @@ data.append({
     }
     

    插入数据

    -

    使用 insert()方法将数据插入集合。

    +

    使用 insert()方法将数据插入 Collections。

    -

    使用 insert()方法将数据插入数据集。

    +

    使用 insert()方法将数据插入 Collections。

    -

    使用 insert()方法将数据插入集合。

    +

    使用 insert()方法将数据插入 Collections。

    @@ -367,23 +361,22 @@ data.append({ # Output # # { -# "insert_count": 1000, -# "ids": [ -# 0, -# 1, -# 2, -# 3, -# 4, -# 5, -# 6, -# 7, -# 8, -# 9, -# "(990 more items hidden)" -# ] +# "insert_count": 1000, +# "ids": [ +# 0, +# 1, +# 2, +# 3, +# 4, +# 5, +# 6, +# 7, +# 8, +# 9, +# "(990 more items hidden)" +# ] # }
    -
    // 3.1 Insert data into the collection
     InsertReq insertReq = InsertReq.builder()
         .collectionName("test_collection")
    @@ -409,7 +402,7 @@ data.append({
     // 1000
     // 
     
    -

    使用分区键

    索引和加载集合并插入数据后,就可以使用分区键进行相似性搜索。

    +

    索引和加载集合以及插入数据后,就可以使用 Partition Key 进行相似性搜索。

    有关参数的更多信息,请参阅 search()中的

    @@ -436,12 +429,12 @@ data.append({

    备注

    -

    要使用分区键进行相似性搜索,应在搜索请求的布尔表达式中包含以下任一项:

    +

    要使用 Partition Key 进行相似性搜索,应在搜索请求的布尔表达式中包含以下任一项:

    • expr='<partition_key>=="xxxx"'

    • expr='<partition_key> in ["xxx", "xxx"]'

    -

    请将<partition_key> 替换为指定为分区键的字段名称。

    +

    请将<partition_key> 替换为指定为 Partition Key 的字段名称。

    @@ -449,12 +442,12 @@ data.append({ query_vectors = [[0.3580376395471989, -0.6023495712049978, 0.18414012509913835, -0.26286205330961354, 0.9029438446296592]] res = client.search( -collection_name="test_collection", -data=query_vectors, -filter="color == 'green'", -search_params={"metric_type": "L2", "params": {"nprobe": 10}}, -output_fields=["id", "color_tag"], -limit=3 + collection_name="test_collection", + data=query_vectors, + filter="color == 'green'", + search_params={"metric_type": "L2", "params": {"nprobe": 10}}, + output_fields=["id", "color_tag"], + limit=3 ) print(res) @@ -462,35 +455,34 @@ limit=3 # Output # # [ -# [ -# { -# "id": 970, -# "distance": 0.5770174264907837, -# "entity": { -# "id": 970, -# "color_tag": "green_9828" -# } -# }, -# { -# "id": 115, -# "distance": 0.6898155808448792, -# "entity": { -# "id": 115, -# "color_tag": "green_4073" -# } -# }, -# { -# "id": 899, -# "distance": 0.7028976678848267, -# "entity": { -# "id": 899, -# "color_tag": "green_9897" -# } -# } -# ] +# [ +# { +# "id": 970, +# "distance": 0.5770174264907837, +# "entity": { +# "id": 970, +# "color_tag": "green_9828" +# } +# }, +# { +# "id": 115, +# "distance": 0.6898155808448792, +# "entity": { +# "id": 115, +# "color_tag": "green_4073" +# } +# }, +# { +# "id": 899, +# "distance": 0.7028976678848267, +# "entity": { +# "id": 899, +# "color_tag": "green_9897" +# } +# } +# ] # ]
    -
    // 4. Search with partition key
     List<List<Float>> query_vectors = Arrays.asList(Arrays.asList(0.3580376395471989f, -0.6023495712049978f, 0.18414012509913835f, -0.26286205330961354f, 0.9029438446296592f));
     
    @@ -561,4 +553,4 @@ res = await client.
           
    -    

    您可以利用Partition Key功能实现更好的搜索性能并启用多租户功能。具体做法是为每个实体指定一个特定于租户的值作为分区键字段。在搜索或查询集合时,通过在布尔表达式中包含分区键字段,可以根据租户特定值过滤实体。这种方法可确保按租户进行数据隔离,并避免扫描不必要的分区。

    +

    您可以利用 Partition Key 功能实现更好的搜索性能并启用多租户功能。具体做法是为每个实体指定一个特定于租户的值作为分区键字段。在搜索或查询 Collections 时,可以通过在布尔表达式中包含 Partition Key 字段,根据租户特定值过滤实体。这种方法可确保按租户进行数据隔离,并避免扫描不必要的分区。

    diff --git a/tools/cache.json b/tools/cache.json index c17847bc2..8dddfd54c 100644 --- a/tools/cache.json +++ b/tools/cache.json @@ -1,6 +1,6 @@ { "v2.4.x": { - "v2.4.x/site/en/userGuide/clustering-compaction.md": "2024-08-29T11:27:05.390Z", + "v2.4.x/site/en/userGuide/clustering-compaction.md": "2024-10-22T09:21:03.679Z", "v2.4.x/site/en/about/comparison.md": "2024-08-30T01:59:59.593Z", "v2.4.x/site/en/about/limitations.md": "2024-08-30T01:59:59.686Z", "v2.4.x/site/en/about/milvus_adopters.md": "2024-08-30T01:59:59.692Z", @@ -12,24 +12,24 @@ "v2.4.x/site/en/adminGuide/clouds/aws/aws.md": "2024-08-30T01:59:59.959Z", "v2.4.x/site/en/adminGuide/clouds/aws/aws_layer7.md": "2024-08-30T01:59:59.974Z", "v2.4.x/site/en/adminGuide/clouds/aws/eks.md": "2024-08-30T02:00:00.074Z", - "v2.4.x/site/en/adminGuide/clouds/aws/s3.md": "2024-08-30T02:00:00.115Z", + "v2.4.x/site/en/adminGuide/clouds/aws/s3.md": "2024-10-22T09:10:25.421Z", "v2.4.x/site/en/adminGuide/clouds/azure/abs.md": "2024-08-30T02:00:00.150Z", "v2.4.x/site/en/adminGuide/clouds/azure/azure.md": "2024-08-30T02:00:00.228Z", "v2.4.x/site/en/adminGuide/clouds/azure/ingress.md": "2024-08-30T02:00:00.274Z", "v2.4.x/site/en/adminGuide/clouds/gcp/gcp.md": "2024-08-30T02:00:00.346Z", "v2.4.x/site/en/adminGuide/clouds/gcp/gcp_layer7.md": "2024-08-30T02:00:00.371Z", "v2.4.x/site/en/adminGuide/clouds/gcp/gcs.md": "2024-08-30T02:00:00.395Z", - "v2.4.x/site/en/adminGuide/clouds/openshift/openshift.md": "2024-08-30T02:00:00.428Z", - "v2.4.x/site/en/adminGuide/configure-docker.md": "2024-08-30T02:00:00.482Z", + "v2.4.x/site/en/adminGuide/clouds/openshift/openshift.md": "2024-10-22T09:10:34.193Z", + "v2.4.x/site/en/adminGuide/configure-docker.md": "2024-10-22T09:10:43.248Z", "v2.4.x/site/en/adminGuide/configure-helm.md": "2024-08-30T02:00:00.513Z", - "v2.4.x/site/en/adminGuide/configure-querynode-localdisk.md": "2024-08-30T02:00:00.581Z", + "v2.4.x/site/en/adminGuide/configure-querynode-localdisk.md": "2024-10-22T09:10:55.820Z", "v2.4.x/site/en/adminGuide/configure_access_logs.md": "2024-08-30T02:00:00.612Z", "v2.4.x/site/en/adminGuide/configure_operator.md": "2024-08-30T02:00:00.648Z", "v2.4.x/site/en/adminGuide/deploy_etcd.md": "2024-08-30T02:00:00.669Z", "v2.4.x/site/en/adminGuide/deploy_pulsar.md": "2024-08-30T02:00:00.715Z", "v2.4.x/site/en/adminGuide/deploy_s3.md": "2024-08-30T02:00:00.739Z", "v2.4.x/site/en/adminGuide/dynamic_config.md": "2024-08-30T02:00:00.793Z", - "v2.4.x/site/en/adminGuide/limit_collection_counts.md": "2024-08-30T02:00:00.812Z", + "v2.4.x/site/en/adminGuide/limit_collection_counts.md": "2024-10-22T09:11:05.158Z", "v2.4.x/site/en/adminGuide/message_storage_operator.md": "2024-08-30T02:00:00.867Z", "v2.4.x/site/en/adminGuide/meta_storage_operator.md": "2024-08-30T02:00:00.884Z", "v2.4.x/site/en/adminGuide/monitor/alert.md": "2024-08-30T02:00:00.904Z", @@ -39,71 +39,71 @@ "v2.4.x/site/en/adminGuide/monitor/monitor_overview.md": "2024-08-30T02:00:01.002Z", "v2.4.x/site/en/adminGuide/monitor/visualize.md": "2024-08-30T02:00:01.014Z", "v2.4.x/site/en/adminGuide/object_storage_operator.md": "2024-08-30T02:00:01.039Z", - "v2.4.x/site/en/adminGuide/rbac.md": "2024-08-30T02:00:01.087Z", - "v2.4.x/site/en/adminGuide/resource_group.md": "2024-08-30T02:00:01.162Z", + "v2.4.x/site/en/adminGuide/rbac.md": "2024-10-22T09:11:17.669Z", + "v2.4.x/site/en/adminGuide/resource_group.md": "2024-10-22T09:11:34.315Z", "v2.4.x/site/en/adminGuide/scale-dependencies.md": "2024-08-30T02:00:01.210Z", "v2.4.x/site/en/adminGuide/scaleout.md": "2024-08-30T02:00:01.236Z", - "v2.4.x/site/en/adminGuide/tls.md": "2024-08-30T02:00:01.294Z", - "v2.4.x/site/en/adminGuide/upgrade_milvus_cluster-docker.md": "2024-08-30T02:00:01.318Z", - "v2.4.x/site/en/adminGuide/upgrade_milvus_cluster-helm.md": "2024-08-30T02:00:01.376Z", - "v2.4.x/site/en/adminGuide/upgrade_milvus_cluster-operator.md": "2024-08-30T02:00:01.403Z", - "v2.4.x/site/en/adminGuide/upgrade_milvus_standalone-docker.md": "2024-08-30T02:00:01.422Z", - "v2.4.x/site/en/adminGuide/upgrade_milvus_standalone-helm.md": "2024-08-30T02:00:01.467Z", - "v2.4.x/site/en/adminGuide/upgrade_milvus_standalone-operator.md": "2024-08-30T02:00:01.492Z", + "v2.4.x/site/en/adminGuide/tls.md": "2024-10-22T09:11:52.146Z", + "v2.4.x/site/en/adminGuide/upgrade_milvus_cluster-docker.md": "2024-10-22T09:12:02.956Z", + "v2.4.x/site/en/adminGuide/upgrade_milvus_cluster-helm.md": "2024-10-22T09:12:16.455Z", + "v2.4.x/site/en/adminGuide/upgrade_milvus_cluster-operator.md": "2024-10-22T09:12:28.129Z", + "v2.4.x/site/en/adminGuide/upgrade_milvus_standalone-docker.md": "2024-10-22T09:12:39.262Z", + "v2.4.x/site/en/adminGuide/upgrade_milvus_standalone-helm.md": "2024-10-22T09:12:51.589Z", + "v2.4.x/site/en/adminGuide/upgrade_milvus_standalone-operator.md": "2024-10-22T09:13:02.729Z", "v2.4.x/site/en/embeddings/embed-with-bgm-m3.md": "2024-08-30T02:00:01.503Z", "v2.4.x/site/en/embeddings/embed-with-bm25.md": "2024-08-30T02:00:01.511Z", - "v2.4.x/site/en/embeddings/embed-with-cohere.md": "2024-08-30T02:00:01.527Z", - "v2.4.x/site/en/embeddings/embed-with-jina.md": "2024-08-30T02:00:01.539Z", + "v2.4.x/site/en/embeddings/embed-with-cohere.md": "2024-10-22T09:13:12.276Z", + "v2.4.x/site/en/embeddings/embed-with-jina.md": "2024-10-22T09:13:22.664Z", "v2.4.x/site/en/embeddings/embed-with-openai.md": "2024-08-30T02:00:01.553Z", "v2.4.x/site/en/embeddings/embed-with-sentence-transform.md": "2024-08-30T02:00:01.563Z", "v2.4.x/site/en/embeddings/embed-with-splade.md": "2024-08-30T02:00:01.570Z", - "v2.4.x/site/en/embeddings/embed-with-voyage.md": "2024-08-30T02:00:01.576Z", - "v2.4.x/site/en/embeddings/embeddings.md": "2024-08-30T02:00:01.605Z", - "v2.4.x/site/en/faq/operational_faq.md": "2024-08-30T02:00:01.611Z", - "v2.4.x/site/en/faq/performance_faq.md": "2024-08-30T02:00:01.618Z", - "v2.4.x/site/en/faq/product_faq.md": "2024-08-30T02:00:01.626Z", + "v2.4.x/site/en/embeddings/embed-with-voyage.md": "2024-10-22T09:13:31.889Z", + "v2.4.x/site/en/embeddings/embeddings.md": "2024-10-22T09:13:43.193Z", + "v2.4.x/site/en/faq/operational_faq.md": "2024-10-22T09:13:55.170Z", + "v2.4.x/site/en/faq/performance_faq.md": "2024-10-22T09:14:05.268Z", + "v2.4.x/site/en/faq/product_faq.md": "2024-10-22T09:14:18.805Z", "v2.4.x/site/en/faq/troubleshooting.md": "2024-08-30T02:00:01.645Z", "v2.4.x/site/en/getstarted/install-overview.md": "2024-08-30T02:00:01.678Z", "v2.4.x/site/en/getstarted/install_SDKs/install-go.md": "2024-08-30T02:00:01.691Z", - "v2.4.x/site/en/getstarted/install_SDKs/install-java.md": "2024-08-30T02:00:01.707Z", + "v2.4.x/site/en/getstarted/install_SDKs/install-java.md": "2024-10-22T09:14:27.141Z", "v2.4.x/site/en/getstarted/install_SDKs/install-node.md": "2024-08-30T02:00:01.739Z", - "v2.4.x/site/en/getstarted/install_SDKs/install-pymilvus.md": "2024-08-30T02:00:01.759Z", - "v2.4.x/site/en/getstarted/quickstart.md": "2024-08-30T02:00:01.998Z", + "v2.4.x/site/en/getstarted/install_SDKs/install-pymilvus.md": "2024-10-22T09:14:35.729Z", + "v2.4.x/site/en/getstarted/quickstart.md": "2024-10-22T09:14:52.132Z", "v2.4.x/site/en/home/home.md": "2024-08-30T02:00:02.013Z", "v2.4.x/site/en/reference/benchmark.md": "2024-08-30T02:00:02.064Z", - "v2.4.x/site/en/getstarted/run-milvus-docker/install_standalone-docker-compose.md": "2024-08-30T02:00:15.613Z", + "v2.4.x/site/en/getstarted/run-milvus-docker/install_standalone-docker-compose.md": "2024-10-22T09:15:02.324Z", "v2.4.x/site/en/getstarted/run-milvus-docker/install_standalone-docker.md": "2024-08-30T02:00:15.641Z", "v2.4.x/site/en/getstarted/run-milvus-docker/prerequisite-docker.md": "2024-08-30T02:00:15.692Z", "v2.4.x/site/en/getstarted/run-milvus-gpu/install_cluster-helm-gpu.md": "2024-08-30T02:00:15.783Z", - "v2.4.x/site/en/getstarted/run-milvus-gpu/install_standalone-docker-compose-gpu.md": "2024-08-30T02:00:15.820Z", + "v2.4.x/site/en/getstarted/run-milvus-gpu/install_standalone-docker-compose-gpu.md": "2024-10-22T09:15:14.572Z", "v2.4.x/site/en/getstarted/run-milvus-gpu/prerequisite-gpu.md": "2024-08-30T02:00:15.845Z", "v2.4.x/site/en/getstarted/run-milvus-k8s/install_cluster-helm.md": "2024-08-30T02:00:15.918Z", "v2.4.x/site/en/getstarted/run-milvus-k8s/install_cluster-milvusoperator.md": "2024-08-30T02:00:16.012Z", "v2.4.x/site/en/getstarted/run-milvus-k8s/prerequisite-helm.md": "2024-08-30T02:00:16.040Z", "v2.4.x/site/en/integrations/integrate_with_airbyte.md": "2024-08-30T02:00:16.102Z", - "v2.4.x/site/en/integrations/integrate_with_bentoml.md": "2024-08-30T02:00:16.180Z", - "v2.4.x/site/en/integrations/integrate_with_camel.md": "2024-08-30T02:00:16.246Z", + "v2.4.x/site/en/integrations/integrate_with_bentoml.md": "2024-10-22T09:16:03.872Z", + "v2.4.x/site/en/integrations/integrate_with_camel.md": "2024-10-22T09:16:18.516Z", "v2.4.x/site/en/integrations/integrate_with_cohere.md": "2024-08-30T02:00:16.305Z", - "v2.4.x/site/en/integrations/integrate_with_dspy.md": "2024-08-30T02:00:16.363Z", + "v2.4.x/site/en/integrations/integrate_with_dspy.md": "2024-10-22T09:16:31.427Z", "v2.4.x/site/en/integrations/integrate_with_fastgpt.md": "2024-08-30T02:00:16.418Z", - "v2.4.x/site/en/integrations/integrate_with_haystack.md": "2024-08-30T02:00:16.459Z", - "v2.4.x/site/en/integrations/integrate_with_hugging-face.md": "2024-08-30T02:00:16.493Z", - "v2.4.x/site/en/integrations/integrate_with_jina.md": "2024-08-30T02:00:16.555Z", + "v2.4.x/site/en/integrations/integrate_with_haystack.md": "2024-10-22T09:16:42.309Z", + "v2.4.x/site/en/integrations/integrate_with_hugging-face.md": "2024-10-22T09:16:53.623Z", + "v2.4.x/site/en/integrations/integrate_with_jina.md": "2024-10-22T09:17:07.461Z", "v2.4.x/site/en/integrations/integrate_with_langfuse.md": "2024-08-30T02:00:16.587Z", - "v2.4.x/site/en/integrations/integrate_with_llamaindex.md": "2024-08-30T02:00:16.617Z", + "v2.4.x/site/en/integrations/integrate_with_llamaindex.md": "2024-10-22T09:17:17.740Z", "v2.4.x/site/en/integrations/integrate_with_memgpt.md": "2024-08-30T02:00:16.643Z", - "v2.4.x/site/en/integrations/integrate_with_openai.md": "2024-08-30T02:00:16.662Z", + "v2.4.x/site/en/integrations/integrate_with_openai.md": "2024-10-22T09:17:26.724Z", "v2.4.x/site/en/integrations/integrate_with_pytorch.md": "2024-08-30T02:00:16.721Z", - "v2.4.x/site/en/integrations/integrate_with_ragas.md": "2024-08-30T02:00:16.792Z", + "v2.4.x/site/en/integrations/integrate_with_ragas.md": "2024-10-22T09:17:39.592Z", "v2.4.x/site/en/integrations/integrate_with_sentencetransformers.md": "2024-08-30T02:00:16.845Z", "v2.4.x/site/en/integrations/integrate_with_snowpark.md": "2024-08-30T02:00:16.899Z", "v2.4.x/site/en/integrations/integrate_with_spark.md": "2024-08-30T02:00:17.025Z", - "v2.4.x/site/en/integrations/integrate_with_vanna.md": "2024-08-30T02:00:17.066Z", + "v2.4.x/site/en/integrations/integrate_with_vanna.md": "2024-10-22T09:17:50.446Z", "v2.4.x/site/en/integrations/integrate_with_voxel51.md": "2024-08-30T02:00:17.137Z", - "v2.4.x/site/en/integrations/integrate_with_voyageai.md": "2024-08-30T02:00:17.156Z", + "v2.4.x/site/en/integrations/integrate_with_voyageai.md": "2024-10-22T09:17:59.676Z", "v2.4.x/site/en/integrations/integrate_with_whyhow.md": "2024-08-30T02:00:17.191Z", "v2.4.x/site/en/integrations/langchain/basic_usage_langchain.md": "2024-08-30T02:00:17.251Z", - "v2.4.x/site/en/integrations/langchain/integrate_with_langchain.md": "2024-08-30T02:00:17.297Z", + "v2.4.x/site/en/integrations/langchain/integrate_with_langchain.md": "2024-10-22T09:18:20.003Z", "v2.4.x/site/en/integrations/langchain/milvus_hybrid_search_retriever.md": "2024-08-30T02:00:17.356Z", "v2.4.x/site/en/migrate/es2m.md": "2024-08-30T02:00:17.416Z", "v2.4.x/site/en/migrate/f2m.md": "2024-08-30T02:00:17.450Z", @@ -111,7 +111,7 @@ "v2.4.x/site/en/migrate/m2m.md": "2024-08-30T02:00:17.534Z", "v2.4.x/site/en/migrate/migrate_overview.md": "2024-08-30T02:00:17.555Z", "v2.4.x/site/en/migrate/milvusdm_install.md": "2024-08-30T02:00:17.568Z", - "v2.4.x/site/en/reference/architecture/architecture_overview.md": "2024-08-30T02:00:28.461Z", + "v2.4.x/site/en/reference/architecture/architecture_overview.md": "2024-10-22T09:18:28.504Z", "v2.4.x/site/en/reference/architecture/data_processing.md": "2024-08-30T02:00:28.498Z", "v2.4.x/site/en/reference/architecture/four_layers.md": "2024-08-30T02:00:28.537Z", "v2.4.x/site/en/reference/architecture/main_components.md": "2024-08-30T02:00:28.558Z", @@ -119,7 +119,7 @@ "v2.4.x/site/en/reference/boolean.md": "2024-08-30T02:00:29.302Z", "v2.4.x/site/en/reference/consistency.md": "2024-08-30T02:00:29.326Z", "v2.4.x/site/en/reference/coordinator_ha.md": "2024-08-30T02:00:29.359Z", - "v2.4.x/site/en/reference/disk_index.md": "2024-08-30T02:00:29.388Z", + "v2.4.x/site/en/reference/disk_index.md": "2024-10-22T09:18:39.497Z", "v2.4.x/site/en/reference/glossary.md": "2024-08-30T02:00:29.823Z", "v2.4.x/site/en/reference/gpu_index.md": "2024-08-30T02:00:29.874Z", "v2.4.x/site/en/reference/index.md": "2024-08-30T02:00:29.959Z", @@ -127,7 +127,7 @@ "v2.4.x/site/en/reference/metric.md": "2024-08-30T02:00:30.026Z", "v2.4.x/site/en/reference/mmap.md": "2024-08-30T02:00:30.054Z", "v2.4.x/site/en/reference/multi_tenancy.md": "2024-08-30T02:00:30.076Z", - "v2.4.x/site/en/reference/replica.md": "2024-08-30T02:00:30.098Z", + "v2.4.x/site/en/reference/replica.md": "2024-10-22T09:18:50.777Z", "v2.4.x/site/en/reference/reranking.md": "2024-08-30T02:00:30.124Z", "v2.4.x/site/en/reference/scalar_index.md": "2024-08-30T02:00:30.153Z", "v2.4.x/site/en/reference/schema.md": "2024-08-30T02:00:30.223Z", @@ -168,12 +168,12 @@ "v2.4.x/site/en/rerankers/rerankers-overview.md": "2024-08-30T02:20:13.577Z", "v2.4.x/site/en/rerankers/rerankers-voyage.md": "2024-08-30T02:20:13.588Z", "v2.4.x/site/en/tutorials/audio_similarity_search.md": "2024-08-30T02:20:13.593Z", - "v2.4.x/site/en/tutorials/build-rag-with-milvus.md": "2024-08-30T02:20:13.640Z", + "v2.4.x/site/en/tutorials/build-rag-with-milvus.md": "2024-10-22T09:19:49.412Z", "v2.4.x/site/en/tutorials/dna_sequence_classification.md": "2024-08-30T02:20:13.645Z", - "v2.4.x/site/en/tutorials/hybrid_search_with_milvus.md": "2024-08-30T02:20:13.672Z", + "v2.4.x/site/en/tutorials/hybrid_search_with_milvus.md": "2024-10-22T09:20:19.023Z", "v2.4.x/site/en/tutorials/image_deduplication_system.md": "2024-08-30T02:20:13.704Z", - "v2.4.x/site/en/tutorials/image_similarity_search.md": "2024-08-30T02:20:13.775Z", - "v2.4.x/site/en/tutorials/multimodal_rag_with_milvus.md": "2024-08-30T02:20:13.824Z", + "v2.4.x/site/en/tutorials/image_similarity_search.md": "2024-10-22T09:20:29.180Z", + "v2.4.x/site/en/tutorials/multimodal_rag_with_milvus.md": "2024-10-22T09:20:41.612Z", "v2.4.x/site/en/tutorials/question_answering_system.md": "2024-08-30T02:20:13.829Z", "v2.4.x/site/en/tutorials/recommendation_system.md": "2024-08-30T02:20:13.841Z", "v2.4.x/site/en/tutorials/text_image_search.md": "2024-08-30T02:20:13.855Z", @@ -182,19 +182,19 @@ "v2.4.x/site/en/userGuide/data-import/import-data.md": "2024-08-30T02:20:57.446Z", "v2.4.x/site/en/userGuide/data-import/prepare-source-data.md": "2024-08-30T02:20:57.539Z", "v2.4.x/site/en/userGuide/enable-dynamic-field.md": "2024-08-30T02:20:57.639Z", - "v2.4.x/site/en/userGuide/insert-update-delete.md": "2024-08-30T02:20:57.910Z", + "v2.4.x/site/en/userGuide/insert-update-delete.md": "2024-10-22T09:21:19.825Z", "v2.4.x/site/en/userGuide/manage-indexes/index-scalar-fields.md": "2024-08-30T02:20:58.367Z", - "v2.4.x/site/en/userGuide/manage-indexes/index-vector-fields.md": "2024-08-30T02:20:58.449Z", + "v2.4.x/site/en/userGuide/manage-indexes/index-vector-fields.md": "2024-10-22T09:21:55.128Z", "v2.4.x/site/en/userGuide/manage-indexes/index-with-gpu.md": "2024-08-30T02:20:58.498Z", - "v2.4.x/site/en/userGuide/manage-partitions.md": "2024-08-30T02:20:58.651Z", + "v2.4.x/site/en/userGuide/manage-partitions.md": "2024-10-22T09:22:10.328Z", "v2.4.x/site/en/userGuide/manage_databases.md": "2024-08-30T02:20:58.747Z", "v2.4.x/site/en/userGuide/search-query-get/get-and-scalar-query.md": "2024-08-30T02:20:58.999Z", "v2.4.x/site/en/userGuide/search-query-get/multi-vector-search.md": "2024-08-30T02:20:59.055Z", - "v2.4.x/site/en/userGuide/search-query-get/with-iterators.md": "2024-08-30T02:20:59.502Z", + "v2.4.x/site/en/userGuide/search-query-get/with-iterators.md": "2024-10-22T09:22:44.567Z", "v2.4.x/site/en/userGuide/tools/birdwatcher_install_guides.md": "2024-08-30T02:20:59.518Z", "v2.4.x/site/en/userGuide/tools/birdwatcher_overview.md": "2024-08-30T02:20:59.530Z", "v2.4.x/site/en/userGuide/tools/birdwatcher_usage_guides.md": "2024-08-30T02:20:59.630Z", - "v2.4.x/site/en/userGuide/tools/cli_commands.md": "2024-08-30T02:21:00.628Z", + "v2.4.x/site/en/userGuide/tools/cli_commands.md": "2024-10-22T09:23:16.500Z", "v2.4.x/site/en/userGuide/tools/cli_overview.md": "2024-08-30T02:21:00.641Z", "v2.4.x/site/en/userGuide/tools/install_cli.md": "2024-08-30T02:21:00.658Z", "v2.4.x/site/en/userGuide/tools/milvus-cdc/cdc-monitoring.md": "2024-08-30T02:21:00.663Z", @@ -203,22 +203,22 @@ "v2.4.x/site/en/userGuide/tools/milvus-cdc/milvus-cdc-overview.md": "2024-08-30T02:21:00.772Z", "v2.4.x/site/en/userGuide/tools/milvus_backup_api.md": "2024-08-30T02:21:00.822Z", "v2.4.x/site/en/userGuide/tools/milvus_backup_cli.md": "2024-08-30T02:21:00.860Z", - "v2.4.x/site/en/userGuide/tools/milvus_backup_overview.md": "2024-08-30T02:21:00.870Z", - "v2.4.x/site/en/userGuide/use-partition-key.md": "2024-08-30T02:21:01.325Z", - "v2.4.x/site/en/integrations/integrations_overview.md": "2024-09-04T01:48:05.175Z", + "v2.4.x/site/en/userGuide/tools/milvus_backup_overview.md": "2024-10-22T09:23:28.035Z", + "v2.4.x/site/en/userGuide/use-partition-key.md": "2024-10-22T09:23:40.960Z", + "v2.4.x/site/en/integrations/integrations_overview.md": "2024-10-22T09:18:08.943Z", "v2.4.x/site/en/integrations/llama_agents_metadata.md": "2024-09-04T01:48:24.481Z", "v2.4.x/site/en/integrations/milvus_rag_with_vllm.md": "2024-09-04T01:48:38.820Z", "v2.4.x/site/en/integrations/video_search_with_twelvelabs_and_milvus.md": "2024-09-04T01:48:53.421Z", - "v2.4.x/site/en/tutorials/tutorials-overview.md": "2024-09-04T01:49:01.478Z", + "v2.4.x/site/en/tutorials/tutorials-overview.md": "2024-10-22T09:20:50.342Z", "v2.4.x/site/en/reference/array_data_type.md": "2024-09-04T05:52:37.189Z", - "v2.4.x/site/en/userGuide/search-query-get/single-vector-search.md": "2024-09-04T05:52:54.234Z", + "v2.4.x/site/en/userGuide/search-query-get/single-vector-search.md": "2024-10-22T09:22:31.152Z", "v2.4.x/site/en/userGuide/use-json-fields.md": "2024-09-04T05:53:07.135Z", - "v2.4.x/site/en/menuStructure/en.json": "2024-10-22T09:05:51.444Z", - "v2.4.x/site/en/userGuide/manage-collections.md": "2024-09-04T06:47:02.070Z", + "v2.4.x/site/en/menuStructure/en.json": "2024-10-22T09:38:04.597Z", + "v2.4.x/site/en/userGuide/manage-collections.md": "2024-10-22T09:21:42.714Z", "v2.4.x/site/en/getstarted/milvus_lite.md": "2024-09-06T02:54:42.351Z", - "v2.4.x/site/en/release_notes.md": "2024-09-13T08:16:46.312Z", + "v2.4.x/site/en/release_notes.md": "2024-10-22T09:19:37.947Z", "v2.4.x/site/en/integrations/kotaemon_with_milvus.md": "2024-09-18T08:19:26.009Z", - "v2.4.x/site/en/tutorials/graph_rag_with_milvus.md": "2024-09-18T08:19:34.657Z", + "v2.4.x/site/en/tutorials/graph_rag_with_milvus.md": "2024-10-22T09:20:06.440Z", "v2.4.x/site/en/integrations/kafka-connect-milvus.md": "2024-09-19T02:38:51.836Z", "v2.4.x/site/en/embeddings/embed-with-instructor.md": "2024-09-23T06:16:26.195Z", "v2.4.x/site/en/embeddings/embed-with-mgte.md": "2024-09-23T06:16:30.607Z", @@ -226,8 +226,8 @@ "v2.4.x/site/en/embeddings/embed-with-nomic.md": "2024-09-23T06:16:44.134Z", "v2.4.x/site/en/integrations/apify_milvus_rag.md": "2024-09-29T02:57:55.544Z", "v2.4.x/site/en/integrations/dify_with_milvus.md": "2024-09-29T02:58:00.320Z", - "v2.4.x/site/en/integrations/evaluation_with_deepeval.md": "2024-09-29T02:58:09.090Z", - "v2.4.x/site/en/integrations/evaluation_with_phoenix.md": "2024-09-29T02:58:18.531Z", + "v2.4.x/site/en/integrations/evaluation_with_deepeval.md": "2024-10-22T09:15:31.124Z", + "v2.4.x/site/en/integrations/evaluation_with_phoenix.md": "2024-10-22T09:15:49.768Z", "v2.4.x/site/en/integrations/build_rag_on_arm.md": "2024-10-15T03:26:00.948Z", "v2.4.x/site/en/integrations/rag_with_langflow.md": "2024-10-15T03:26:07.319Z", "v2.4.x/site/en/tutorials/contextual_retrieval_with_milvus.md": "2024-10-15T03:26:17.061Z", diff --git a/tools/translate.js b/tools/translate.js index c772bf25e..ae38cbe7b 100644 --- a/tools/translate.js +++ b/tools/translate.js @@ -70,7 +70,7 @@ async function bootstrap() { const cacheOutdated = !cache[version] || !cache[version][path] || - new Date(cache[version][path]) < modifiedTime; + new Date(cache[version][path]) < new Date(modifiedTime); if (!deprecated && cacheOutdated) { updatedFiles.push(path); }
    选项全名说明
    选项全称说明
    -c-集合名称实体所属集合的名称。
    -c-collection-名称实体所属 Collections 的名称。
    -p-分区(可选/多个)加载分区的名称。
    -help不适用显示命令使用帮助。