diff --git a/src/current/_config_cockroachdb.yml b/src/current/_config_cockroachdb.yml
index e9af33ab40d..12a460bb0ff 100644
--- a/src/current/_config_cockroachdb.yml
+++ b/src/current/_config_cockroachdb.yml
@@ -4,4 +4,4 @@ destination: _site/docs
homepage_title: CockroachDB Docs
versions:
stable: v24.3
- dev: v24.3
+ dev: v25.1
diff --git a/src/current/_data/metrics/available-metrics-in-metrics-list.csv b/src/current/_data/metrics/available-metrics-in-metrics-list.csv
new file mode 100644
index 00000000000..ce8aa57543a
--- /dev/null
+++ b/src/current/_data/metrics/available-metrics-in-metrics-list.csv
@@ -0,0 +1,477 @@
+metric_id
+addsstable.applications
+addsstable.copies
+addsstable.proposals
+admission.io.overload
+capacity
+capacity.available
+capacity.reserved
+capacity.used
+exec.error
+exec.latency
+exec.success
+gcbytesage
+gossip.bytes.received
+gossip.bytes.sent
+gossip.connections.incoming
+gossip.connections.outgoing
+gossip.connections.refused
+gossip.infos.received
+gossip.infos.sent
+intentage
+intentbytes
+intentcount
+keybytes
+keycount
+leases.epoch
+leases.error
+leases.expiration
+leases.success
+leases.transfers.error
+leases.transfers.success
+livebytes
+livecount
+liveness.epochincrements
+liveness.heartbeatfailures
+liveness.heartbeatlatency
+liveness.heartbeatsuccesses
+liveness.livenodes
+node-id
+queue.consistency.pending
+queue.consistency.process.failure
+queue.consistency.process.success
+queue.consistency.processingnanos
+queue.gc.info.abortspanconsidered
+queue.gc.info.abortspangcnum
+queue.gc.info.abortspanscanned
+queue.gc.info.clearrangefailed
+queue.gc.info.clearrangesuccess
+queue.gc.info.intentsconsidered
+queue.gc.info.intenttxns
+queue.gc.info.numkeysaffected
+queue.gc.info.pushtxn
+queue.gc.info.resolvesuccess
+queue.gc.info.resolvetotal
+queue.gc.info.transactionspangcaborted
+queue.gc.info.transactionspangccommitted
+queue.gc.info.transactionspangcpending
+queue.gc.info.transactionspanscanned
+queue.gc.pending
+queue.gc.process.failure
+queue.gc.process.success
+queue.gc.processingnanos
+queue.raftlog.pending
+queue.raftlog.process.failure
+queue.raftlog.process.success
+queue.raftlog.processingnanos
+queue.raftsnapshot.pending
+queue.raftsnapshot.process.failure
+queue.raftsnapshot.process.success
+queue.raftsnapshot.processingnanos
+queue.replicagc.pending
+queue.replicagc.process.failure
+queue.replicagc.process.success
+queue.replicagc.processingnanos
+queue.replicagc.removereplica
+queue.replicate.addreplica
+queue.replicate.addreplica.error
+queue.replicate.addreplica.success
+queue.replicate.pending
+queue.replicate.process.failure
+queue.replicate.process.success
+queue.replicate.processingnanos
+queue.replicate.purgatory
+queue.replicate.rebalancereplica
+queue.replicate.removedeadreplica
+queue.replicate.removedeadreplica.error
+queue.replicate.removedeadreplica.success
+queue.replicate.removedecommissioningreplica.error
+queue.replicate.removedecommissioningreplica.success
+queue.replicate.removereplica
+queue.replicate.removereplica.error
+queue.replicate.removereplica.success
+queue.replicate.replacedeadreplica.error
+queue.replicate.replacedeadreplica.success
+queue.replicate.replacedecommissioningreplica.error
+queue.replicate.replacedecommissioningreplica.success
+queue.replicate.transferlease
+queue.split.pending
+queue.split.process.failure
+queue.split.process.success
+queue.split.processingnanos
+queue.tsmaintenance.pending
+queue.tsmaintenance.process.failure
+queue.tsmaintenance.process.success
+queue.tsmaintenance.processingnanos
+raft.commandsapplied
+raft.heartbeats.pending
+raft.process.commandcommit.latency
+raft.process.logcommit.latency
+raft.process.tickingnanos
+raft.process.workingnanos
+raft.rcvd.app
+raft.rcvd.appresp
+raft.rcvd.dropped
+raft.rcvd.heartbeat
+raft.rcvd.heartbeatresp
+raft.rcvd.prevote
+raft.rcvd.prevoteresp
+raft.rcvd.prop
+raft.rcvd.snap
+raft.rcvd.timeoutnow
+raft.rcvd.transferleader
+raft.rcvd.vote
+raft.rcvd.voteresp
+raft.ticks
+raftlog.behind
+raftlog.truncated
+range.adds
+range.merges
+range.raftleadertransfers
+range.removes
+range.snapshots.generated
+range.snapshots.rcvd-bytes
+range.snapshots.rebalancing.rcvd-bytes
+range.snapshots.rebalancing.sent-bytes
+range.snapshots.recovery.rcvd-bytes
+range.snapshots.recovery.sent-bytes
+range.snapshots.recv-in-progress
+range.snapshots.recv-queue
+range.snapshots.recv-total-in-progress
+range.snapshots.send-in-progress
+range.snapshots.send-queue
+range.snapshots.send-total-in-progress
+range.snapshots.sent-bytes
+range.snapshots.unknown.rcvd-bytes
+range.snapshots.unknown.sent-bytes
+range.splits
+rangekeybytes
+rangekeycount
+ranges
+ranges.overreplicated
+ranges.unavailable
+ranges.underreplicated
+rangevalbytes
+rangevalcount
+rebalancing.queriespersecond
+rebalancing.readbytespersecond
+rebalancing.readspersecond
+rebalancing.requestspersecond
+rebalancing.writebytespersecond
+rebalancing.writespersecond
+replicas
+replicas.leaders
+replicas.leaders_invalid_lease
+replicas.leaders_not_leaseholders
+replicas.leaseholders
+replicas.quiescent
+replicas.reserved
+requests.backpressure.split
+requests.slow.lease
+requests.slow.raft
+rocksdb.block.cache.hits
+rocksdb.block.cache.misses
+rocksdb.block.cache.usage
+rocksdb.bloom.filter.prefix.checked
+rocksdb.bloom.filter.prefix.useful
+rocksdb.compactions
+rocksdb.flushes
+rocksdb.memtable.total-size
+rocksdb.num-sstables
+rocksdb.read-amplification
+rocksdb.table-readers-mem-estimate
+storage.keys.range-key-set.count
+storage.l0-level-score
+storage.l0-level-size
+storage.l0-num-files
+storage.l0-sublevels
+storage.l1-level-score
+storage.l1-level-size
+storage.l2-level-score
+storage.l2-level-size
+storage.l3-level-score
+storage.l3-level-size
+storage.l4-level-score
+storage.l4-level-size
+storage.l5-level-score
+storage.l5-level-size
+storage.l6-level-score
+storage.l6-level-size
+storage.marked-for-compaction-files
+storage.write-stalls
+sysbytes
+syscount
+tenant.consumption.cross_region_network_ru
+tenant.consumption.external_io_egress_bytes
+tenant.consumption.pgwire_egress_bytes
+tenant.consumption.read_batches
+tenant.consumption.read_bytes
+tenant.consumption.read_requests
+tenant.consumption.request_units
+tenant.consumption.sql_pods_cpu_seconds
+tenant.consumption.write_batches
+tenant.consumption.write_bytes
+tenant.consumption.write_requests
+timeseries.write.bytes
+timeseries.write.errors
+timeseries.write.samples
+totalbytes
+txnwaitqueue.deadlocks_total
+valbytes
+valcount
+changefeed.aggregator_progress
+changefeed.backfill_count
+changefeed.backfill_pending_ranges
+changefeed.checkpoint_progress
+changefeed.commit_latency
+changefeed.emitted_bytes
+changefeed.emitted_messages
+changefeed.error_retries
+changefeed.failures
+changefeed.lagging_ranges
+changefeed.max_behind_nanos
+changefeed.message_size_hist
+changefeed.running
+clock-offset.meannanos
+clock-offset.stddevnanos
+cluster.preserve-downgrade-option.last-updated
+distsender.batches
+distsender.batches.partial
+distsender.errors.notleaseholder
+distsender.rpc.sent
+distsender.rpc.sent.local
+distsender.rpc.sent.nextreplicaerror
+jobs.auto_create_stats.currently_paused
+jobs.auto_create_stats.currently_running
+jobs.auto_create_stats.resume_failed
+jobs.backup.currently_paused
+jobs.backup.currently_running
+jobs.changefeed.currently_paused
+jobs.changefeed.expired_pts_records
+jobs.changefeed.protected_age_sec
+jobs.changefeed.resume_retry_error
+jobs.create_stats.currently_running
+jobs.row_level_ttl.currently_paused
+jobs.row_level_ttl.currently_running
+jobs.row_level_ttl.delete_duration
+jobs.row_level_ttl.num_active_spans
+jobs.row_level_ttl.resume_completed
+jobs.row_level_ttl.resume_failed
+jobs.row_level_ttl.rows_deleted
+jobs.row_level_ttl.rows_selected
+jobs.row_level_ttl.select_duration
+jobs.row_level_ttl.span_total_duration
+jobs.row_level_ttl.total_expired_rows
+jobs.row_level_ttl.total_rows
+physical_replication.logical_bytes
+physical_replication.replicated_time_seconds
+requests.slow.distsender
+round-trip-latency
+rpc.connection.avg_round_trip_latency
+rpc.connection.failures
+rpc.connection.healthy
+rpc.connection.healthy_nanos
+rpc.connection.heartbeats
+rpc.connection.unhealthy
+rpc.connection.unhealthy_nanos
+schedules.BACKUP.failed
+schedules.BACKUP.last-completed-time
+schedules.BACKUP.protected_age_sec
+schedules.BACKUP.protected_record_count
+schedules.BACKUP.started
+schedules.BACKUP.succeeded
+schedules.scheduled-row-level-ttl-executor.failed
+sql.bytesin
+sql.bytesout
+sql.conn.latency
+sql.conns
+sql.ddl.count
+sql.delete.count
+sql.distsql.contended_queries.count
+sql.distsql.exec.latency
+sql.distsql.flows.active
+sql.distsql.flows.total
+sql.distsql.queries.active
+sql.distsql.queries.total
+sql.distsql.select.count
+sql.distsql.service.latency
+sql.exec.latency
+sql.failure.count
+sql.full.scan.count
+sql.guardrails.max_row_size_err.count
+sql.guardrails.max_row_size_log.count
+sql.insert.count
+sql.mem.distsql.current
+sql.mem.distsql.max
+sql.mem.internal.session.current
+sql.mem.internal.session.max
+sql.mem.internal.txn.current
+sql.mem.internal.txn.max
+sql.mem.root.current
+sql.mem.root.max
+sql.misc.count
+sql.new_conns
+sql.pgwire_cancel.ignored
+sql.pgwire_cancel.successful
+sql.pgwire_cancel.total
+sql.query.count
+sql.select.count
+sql.service.latency
+sql.statements.active
+sql.txn.abort.count
+sql.txn.begin.count
+sql.txn.commit.count
+sql.txn.contended.count
+sql.txn.latency
+sql.txn.rollback.count
+sql.txns.open
+sql.update.count
+tenant.sql_usage.cross_region_network_ru
+tenant.sql_usage.estimated_cpu_seconds
+tenant.sql_usage.external_io_egress_bytes
+tenant.sql_usage.external_io_ingress_bytes
+tenant.sql_usage.kv_request_units
+tenant.sql_usage.pgwire_egress_bytes
+tenant.sql_usage.provisioned_vcpus
+tenant.sql_usage.read_batches
+tenant.sql_usage.read_bytes
+tenant.sql_usage.read_requests
+tenant.sql_usage.request_units
+tenant.sql_usage.sql_pods_cpu_seconds
+tenant.sql_usage.write_batches
+tenant.sql_usage.write_bytes
+tenant.sql_usage.write_requests
+txn.aborts
+txn.commits
+txn.commits1PC
+txn.durations
+txn.restarts
+txn.restarts.asyncwritefailure
+txn.restarts.readwithinuncertainty
+txn.restarts.serializable
+txn.restarts.txnaborted
+txn.restarts.txnpush
+txn.restarts.unknown
+txn.restarts.writetooold
+build.timestamp
+sys.cgo.allocbytes
+sys.cgo.totalbytes
+sys.cgocalls
+sys.cpu.combined.percent-normalized
+sys.cpu.host.combined.percent-normalized
+sys.cpu.sys.ns
+sys.cpu.sys.percent
+sys.cpu.user.ns
+sys.cpu.user.percent
+sys.fd.open
+sys.fd.softlimit
+sys.gc.count
+sys.gc.pause.ns
+sys.gc.pause.percent
+sys.go.allocbytes
+sys.go.totalbytes
+sys.goroutines
+sys.host.disk.iopsinprogress
+sys.host.disk.read.bytes
+sys.host.disk.read.count
+sys.host.disk.write.bytes
+sys.host.disk.write.count
+sys.host.net.recv.bytes
+sys.host.net.send.bytes
+sys.rss
+sys.runnable.goroutines.per.cpu
+sys.totalmem
+sys.uptime
+jobs.auto_config_env_runner.currently_paused
+jobs.auto_config_env_runner.protected_age_sec
+jobs.auto_config_env_runner.protected_record_count
+jobs.auto_config_runner.currently_paused
+jobs.auto_config_runner.protected_age_sec
+jobs.auto_config_runner.protected_record_count
+jobs.auto_config_task.currently_paused
+jobs.auto_config_task.protected_age_sec
+jobs.auto_config_task.protected_record_count
+jobs.auto_create_partial_stats.currently_paused
+jobs.auto_create_partial_stats.protected_age_sec
+jobs.auto_create_partial_stats.protected_record_count
+jobs.auto_create_stats.protected_age_sec
+jobs.auto_create_stats.protected_record_count
+jobs.auto_schema_telemetry.currently_paused
+jobs.auto_schema_telemetry.protected_age_sec
+jobs.auto_schema_telemetry.protected_record_count
+jobs.auto_span_config_reconciliation.currently_paused
+jobs.auto_span_config_reconciliation.protected_age_sec
+jobs.auto_span_config_reconciliation.protected_record_count
+jobs.auto_sql_stats_compaction.currently_paused
+jobs.auto_sql_stats_compaction.protected_age_sec
+jobs.auto_sql_stats_compaction.protected_record_count
+jobs.auto_update_sql_activity.currently_paused
+jobs.auto_update_sql_activity.protected_age_sec
+jobs.auto_update_sql_activity.protected_record_count
+jobs.backup.protected_age_sec
+jobs.backup.protected_record_count
+jobs.changefeed.protected_record_count
+jobs.create_stats.currently_paused
+jobs.create_stats.protected_age_sec
+jobs.create_stats.protected_record_count
+jobs.history_retention.currently_paused
+jobs.history_retention.protected_age_sec
+jobs.history_retention.protected_record_count
+jobs.import.currently_paused
+jobs.import.protected_age_sec
+jobs.import.protected_record_count
+jobs.import_rollback.currently_paused
+jobs.import_rollback.protected_age_sec
+jobs.import_rollback.protected_record_count
+jobs.key_visualizer.currently_paused
+jobs.key_visualizer.protected_age_sec
+jobs.key_visualizer.protected_record_count
+jobs.logical_replication.currently_paused
+jobs.logical_replication.protected_age_sec
+jobs.logical_replication.protected_record_count
+jobs.migration.currently_paused
+jobs.migration.protected_age_sec
+jobs.migration.protected_record_count
+jobs.mvcc_statistics_update.currently_paused
+jobs.mvcc_statistics_update.protected_age_sec
+jobs.mvcc_statistics_update.protected_record_count
+jobs.new_schema_change.currently_paused
+jobs.new_schema_change.protected_age_sec
+jobs.new_schema_change.protected_record_count
+jobs.poll_jobs_stats.currently_paused
+jobs.poll_jobs_stats.protected_age_sec
+jobs.poll_jobs_stats.protected_record_count
+jobs.replication_stream_ingestion.currently_paused
+jobs.replication_stream_ingestion.protected_age_sec
+jobs.replication_stream_ingestion.protected_record_count
+jobs.replication_stream_producer.currently_paused
+jobs.replication_stream_producer.protected_age_sec
+jobs.replication_stream_producer.protected_record_count
+jobs.restore.currently_paused
+jobs.restore.protected_age_sec
+jobs.restore.protected_record_count
+jobs.row_level_ttl.protected_age_sec
+jobs.row_level_ttl.protected_record_count
+jobs.schema_change.currently_paused
+jobs.schema_change.protected_age_sec
+jobs.schema_change.protected_record_count
+jobs.schema_change_gc.currently_paused
+jobs.schema_change_gc.protected_age_sec
+jobs.schema_change_gc.protected_record_count
+jobs.standby_read_ts_poller.currently_paused
+jobs.standby_read_ts_poller.protected_age_sec
+jobs.standby_read_ts_poller.protected_record_count
+jobs.typedesc_schema_change.currently_paused
+jobs.typedesc_schema_change.protected_age_sec
+jobs.typedesc_schema_change.protected_record_count
+jobs.update_table_metadata_cache.currently_paused
+jobs.update_table_metadata_cache.protected_age_sec
+jobs.update_table_metadata_cache.protected_record_count
+sql.crud_query.count
+sql.crud_query.started.count
+auth.cert.conn.latency
+auth.gss.conn.latency
+auth.jwt.conn.latency
+auth.ldap.conn.latency
+auth.password.conn.latency
+auth.scram.conn.latency
\ No newline at end of file
diff --git a/src/current/_data/metrics/available-metrics-not-in-metrics-list.csv b/src/current/_data/metrics/available-metrics-not-in-metrics-list.csv
new file mode 100644
index 00000000000..1cd86aace0a
--- /dev/null
+++ b/src/current/_data/metrics/available-metrics-not-in-metrics-list.csv
@@ -0,0 +1,19 @@
+metric_id,description,y-axis label,type,unit
+"security.certificate.expiration.ca","Expiration for the CA certificate. 0 means no certificate or error.","Certificate Expiration",GAUGE,TIMESTAMP_SEC
+"security.certificate.expiration.client-ca","Expiration for the client CA certificate. 0 means no certificate or error.","Certificate Expiration",GAUGE,TIMESTAMP_SEC
+"security.certificate.expiration.client","Minimum expiration for client certificates, labeled by SQL user. 0 means no certificate or error.","Certificate Expiration",GAUGE,TIMESTAMP_SEC
+"security.certificate.expiration.ui-ca","Expiration for the UI CA certificate. 0 means no certificate or error.","Certificate Expiration",GAUGE,TIMESTAMP_SEC
+"security.certificate.expiration.node","Expiration for the node certificate. 0 means no certificate or error.","Certificate Expiration",GAUGE,TIMESTAMP_SEC
+"security.certificate.expiration.node-client","Expiration for the node's client certificate. 0 means no certificate or error.","Certificate Expiration",GAUGE,TIMESTAMP_SEC
+"security.certificate.expiration.ui","Expiration for the UI certificate. 0 means no certificate or error.","Certificate Expiration",GAUGE,TIMESTAMP_SEC
+"security.certificate.expiration.ca-client-tenant","Expiration for the Tenant Client CA certificate. 0 means no certificate or error.","Certificate Expiration",GAUGE,TIMESTAMP_SEC
+"security.certificate.expiration.client-tenant","Expiration for the Tenant Client certificate. 0 means no certificate or error.","Certificate Expiration",GAUGE,TIMESTAMP_SEC
+"security.certificate.ttl.ca","Seconds till expiration for the CA certificate. 0 means expired, no certificate or error.","Certificate TTL",GAUGE,TIMESTAMP_SEC
+"security.certificate.ttl.client-ca","Seconds till expiration for the client CA certificate. 0 means expired, no certificate or error.","Certificate TTL",GAUGE,TIMESTAMP_SEC
+"security.certificate.ttl.client","Seconds till expiration for the client certificates, labeled by SQL user. 0 means expired, no certificate or error.","Certificate TTL",GAUGE,TIMESTAMP_SEC
+"security.certificate.ttl.ui-ca","Seconds till expiration for the UI CA certificate. 0 means expired, no certificate or error.","Certificate TTL",GAUGE,TIMESTAMP_SEC
+"security.certificate.ttl.node","Seconds till expiration for the node certificate. 0 means expired, no certificate or error.","Certificate TTL",GAUGE,TIMESTAMP_SEC
+"security.certificate.ttl.node-client","Seconds till expiration for the node's client certificate. 0 means expired, no certificate or error.","Certificate TTL",GAUGE,TIMESTAMP_SEC
+"security.certificate.ttl.ui","Seconds till expiration for the UI certificate. 0 means expired, no certificate or error.","Certificate TTL",GAUGE,TIMESTAMP_SEC
+"security.certificate.ttl.ca-client-tenant","Seconds till expiration for the Tenant Client CA certificate. 0 means expired, no certificate or error.","Certificate TTL",GAUGE,TIMESTAMP_SEC
+"security.certificate.ttl.client-tenant","Seconds till expiration for the Tenant Client certificate. 0 means expired, no certificate or error.","Certificate TTL",GAUGE,TIMESTAMP_SEC
\ No newline at end of file
diff --git a/src/current/_data/metrics/child-metrics.yml b/src/current/_data/metrics/child-metrics.yml
index e6393213331..afe92da0139 100644
--- a/src/current/_data/metrics/child-metrics.yml
+++ b/src/current/_data/metrics/child-metrics.yml
@@ -233,4 +233,20 @@
feature: all
- child_metric_id: rpc.connection.avg_round_trip_latency
- feature: all
\ No newline at end of file
+ feature: all
+
+- child_metric_id: logical_replication.catchup_ranges_by_label
+ feature: ldr
+
+- child_metric_id: logical_replication.events_dlqed_by_label
+ feature: ldr
+
+- child_metric_id: logical_replication.events_ingested_by_label
+ feature: ldr
+
+- child_metric_id: logical_replication.replicated_time_by_label
+ feature: ldr
+
+- child_metric_id: logical_replication.scanning_ranges_by_label
+ feature: ldr
+
diff --git a/src/current/_data/metrics/metrics-list.csv b/src/current/_data/metrics/metrics-list.csv
index 32a5cb72e41..0792493fc70 100644
--- a/src/current/_data/metrics/metrics-list.csv
+++ b/src/current/_data/metrics/metrics-list.csv
@@ -16,8 +16,8 @@ STORAGE,admission.admitted.elastic-cpu,Number of requests admitted,Requests,COUN
STORAGE,admission.admitted.elastic-cpu.bulk-normal-pri,Number of requests admitted,Requests,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE
STORAGE,admission.admitted.elastic-cpu.normal-pri,Number of requests admitted,Requests,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE
STORAGE,admission.admitted.elastic-stores,Number of requests admitted,Requests,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE
+STORAGE,admission.admitted.elastic-stores.bulk-low-pri,Number of requests admitted,Requests,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE
STORAGE,admission.admitted.elastic-stores.bulk-normal-pri,Number of requests admitted,Requests,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE
-STORAGE,admission.admitted.elastic-stores.ttl-low-pri,Number of requests admitted,Requests,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE
STORAGE,admission.admitted.kv,Number of requests admitted,Requests,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE
STORAGE,admission.admitted.kv-stores,Number of requests admitted,Requests,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE
STORAGE,admission.admitted.kv-stores.high-pri,Number of requests admitted,Requests,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE
@@ -51,8 +51,8 @@ STORAGE,admission.errored.elastic-cpu,Number of requests not admitted due to err
STORAGE,admission.errored.elastic-cpu.bulk-normal-pri,Number of requests not admitted due to error,Requests,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE
STORAGE,admission.errored.elastic-cpu.normal-pri,Number of requests not admitted due to error,Requests,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE
STORAGE,admission.errored.elastic-stores,Number of requests not admitted due to error,Requests,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE
+STORAGE,admission.errored.elastic-stores.bulk-low-pri,Number of requests not admitted due to error,Requests,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE
STORAGE,admission.errored.elastic-stores.bulk-normal-pri,Number of requests not admitted due to error,Requests,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE
-STORAGE,admission.errored.elastic-stores.ttl-low-pri,Number of requests not admitted due to error,Requests,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE
STORAGE,admission.errored.kv,Number of requests not admitted due to error,Requests,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE
STORAGE,admission.errored.kv-stores,Number of requests not admitted due to error,Requests,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE
STORAGE,admission.errored.kv-stores.high-pri,Number of requests not admitted due to error,Requests,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE
@@ -106,8 +106,8 @@ STORAGE,admission.requested.elastic-cpu,Number of requests,Requests,COUNTER,COUN
STORAGE,admission.requested.elastic-cpu.bulk-normal-pri,Number of requests,Requests,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE
STORAGE,admission.requested.elastic-cpu.normal-pri,Number of requests,Requests,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE
STORAGE,admission.requested.elastic-stores,Number of requests,Requests,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE
+STORAGE,admission.requested.elastic-stores.bulk-low-pri,Number of requests,Requests,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE
STORAGE,admission.requested.elastic-stores.bulk-normal-pri,Number of requests,Requests,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE
-STORAGE,admission.requested.elastic-stores.ttl-low-pri,Number of requests,Requests,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE
STORAGE,admission.requested.kv,Number of requests,Requests,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE
STORAGE,admission.requested.kv-stores,Number of requests,Requests,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE
STORAGE,admission.requested.kv-stores.high-pri,Number of requests,Requests,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE
@@ -133,8 +133,8 @@ STORAGE,admission.wait_durations.elastic-cpu,Wait time durations for requests th
STORAGE,admission.wait_durations.elastic-cpu.bulk-normal-pri,Wait time durations for requests that waited,Wait time Duration,HISTOGRAM,NANOSECONDS,AVG,NONE
STORAGE,admission.wait_durations.elastic-cpu.normal-pri,Wait time durations for requests that waited,Wait time Duration,HISTOGRAM,NANOSECONDS,AVG,NONE
STORAGE,admission.wait_durations.elastic-stores,Wait time durations for requests that waited,Wait time Duration,HISTOGRAM,NANOSECONDS,AVG,NONE
+STORAGE,admission.wait_durations.elastic-stores.bulk-low-pri,Wait time durations for requests that waited,Wait time Duration,HISTOGRAM,NANOSECONDS,AVG,NONE
STORAGE,admission.wait_durations.elastic-stores.bulk-normal-pri,Wait time durations for requests that waited,Wait time Duration,HISTOGRAM,NANOSECONDS,AVG,NONE
-STORAGE,admission.wait_durations.elastic-stores.ttl-low-pri,Wait time durations for requests that waited,Wait time Duration,HISTOGRAM,NANOSECONDS,AVG,NONE
STORAGE,admission.wait_durations.kv,Wait time durations for requests that waited,Wait time Duration,HISTOGRAM,NANOSECONDS,AVG,NONE
STORAGE,admission.wait_durations.kv-stores,Wait time durations for requests that waited,Wait time Duration,HISTOGRAM,NANOSECONDS,AVG,NONE
STORAGE,admission.wait_durations.kv-stores.high-pri,Wait time durations for requests that waited,Wait time Duration,HISTOGRAM,NANOSECONDS,AVG,NONE
@@ -143,6 +143,7 @@ STORAGE,admission.wait_durations.kv-stores.normal-pri,Wait time durations for re
STORAGE,admission.wait_durations.kv.high-pri,Wait time durations for requests that waited,Wait time Duration,HISTOGRAM,NANOSECONDS,AVG,NONE
STORAGE,admission.wait_durations.kv.locking-normal-pri,Wait time durations for requests that waited,Wait time Duration,HISTOGRAM,NANOSECONDS,AVG,NONE
STORAGE,admission.wait_durations.kv.normal-pri,Wait time durations for requests that waited,Wait time Duration,HISTOGRAM,NANOSECONDS,AVG,NONE
+STORAGE,admission.wait_durations.snapshot_ingest,Wait time for snapshot ingest requests that waited,Wait time Duration,HISTOGRAM,NANOSECONDS,AVG,NONE
STORAGE,admission.wait_durations.sql-kv-response,Wait time durations for requests that waited,Wait time Duration,HISTOGRAM,NANOSECONDS,AVG,NONE
STORAGE,admission.wait_durations.sql-kv-response.locking-normal-pri,Wait time durations for requests that waited,Wait time Duration,HISTOGRAM,NANOSECONDS,AVG,NONE
STORAGE,admission.wait_durations.sql-kv-response.normal-pri,Wait time durations for requests that waited,Wait time Duration,HISTOGRAM,NANOSECONDS,AVG,NONE
@@ -159,8 +160,8 @@ STORAGE,admission.wait_queue_length.elastic-cpu,Length of wait queue,Requests,GA
STORAGE,admission.wait_queue_length.elastic-cpu.bulk-normal-pri,Length of wait queue,Requests,GAUGE,COUNT,AVG,NONE
STORAGE,admission.wait_queue_length.elastic-cpu.normal-pri,Length of wait queue,Requests,GAUGE,COUNT,AVG,NONE
STORAGE,admission.wait_queue_length.elastic-stores,Length of wait queue,Requests,GAUGE,COUNT,AVG,NONE
+STORAGE,admission.wait_queue_length.elastic-stores.bulk-low-pri,Length of wait queue,Requests,GAUGE,COUNT,AVG,NONE
STORAGE,admission.wait_queue_length.elastic-stores.bulk-normal-pri,Length of wait queue,Requests,GAUGE,COUNT,AVG,NONE
-STORAGE,admission.wait_queue_length.elastic-stores.ttl-low-pri,Length of wait queue,Requests,GAUGE,COUNT,AVG,NONE
STORAGE,admission.wait_queue_length.kv,Length of wait queue,Requests,GAUGE,COUNT,AVG,NONE
STORAGE,admission.wait_queue_length.kv-stores,Length of wait queue,Requests,GAUGE,COUNT,AVG,NONE
STORAGE,admission.wait_queue_length.kv-stores.high-pri,Length of wait queue,Requests,GAUGE,COUNT,AVG,NONE
@@ -283,6 +284,8 @@ STORAGE,kv.prober.write.quarantine.oldest_duration,The duration that the oldest
STORAGE,kv.rangefeed.budget_allocation_blocked,Number of times RangeFeed waited for budget availability,Events,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE
STORAGE,kv.rangefeed.budget_allocation_failed,Number of times RangeFeed failed because memory budget was exceeded,Events,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE
STORAGE,kv.rangefeed.catchup_scan_nanos,Time spent in RangeFeed catchup scan,Nanoseconds,COUNTER,NANOSECONDS,AVG,NON_NEGATIVE_DERIVATIVE
+STORAGE,kv.rangefeed.closed_timestamp.slow_ranges,Number of ranges that have a closed timestamp lagging by more than 5x target lag. Periodically re-calculated,Ranges,GAUGE,COUNT,AVG,NONE
+STORAGE,kv.rangefeed.closed_timestamp_max_behind_nanos,Largest latency between realtime and replica max closed timestamp for replicas that have active rangeeds on them,Nanoseconds,GAUGE,NANOSECONDS,AVG,NONE
STORAGE,kv.rangefeed.mem_shared,Memory usage by rangefeeds,Memory,GAUGE,BYTES,AVG,NONE
STORAGE,kv.rangefeed.mem_system,Memory usage by rangefeeds on system ranges,Memory,GAUGE,BYTES,AVG,NONE
STORAGE,kv.rangefeed.processors_goroutine,Number of active RangeFeed processors using goroutines,Processors,GAUGE,COUNT,AVG,NONE
@@ -358,9 +361,57 @@ STORAGE,kvadmission.flow_token_dispatch.pending_nodes,Number of nodes pending fl
STORAGE,kvadmission.flow_token_dispatch.pending_regular,Number of pending regular flow token dispatches,Dispatches,GAUGE,COUNT,AVG,NONE
STORAGE,kvadmission.flow_token_dispatch.remote_elastic,Number of remote elastic flow token dispatches,Dispatches,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE
STORAGE,kvadmission.flow_token_dispatch.remote_regular,Number of remote regular flow token dispatches,Dispatches,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE
+STORAGE,kvflowcontrol.eval_wait.elastic.duration,Latency histogram for time elastic requests spent waiting for flow tokens to evaluate,Nanoseconds,HISTOGRAM,NANOSECONDS,AVG,NONE
+STORAGE,kvflowcontrol.eval_wait.elastic.requests.admitted,Number of elastic requests admitted by the flow controller,Requests,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE
+STORAGE,kvflowcontrol.eval_wait.elastic.requests.bypassed,Number of waiting elastic requests that bypassed the flow controller due the evaluating replica not being the leader,Requests,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE
+STORAGE,kvflowcontrol.eval_wait.elastic.requests.errored,Number of elastic requests that errored out while waiting for flow tokens,Requests,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE
+STORAGE,kvflowcontrol.eval_wait.elastic.requests.waiting,Number of elastic requests waiting for flow tokens,Requests,GAUGE,COUNT,AVG,NONE
+STORAGE,kvflowcontrol.eval_wait.regular.duration,Latency histogram for time regular requests spent waiting for flow tokens to evaluate,Nanoseconds,HISTOGRAM,NANOSECONDS,AVG,NONE
+STORAGE,kvflowcontrol.eval_wait.regular.requests.admitted,Number of regular requests admitted by the flow controller,Requests,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE
+STORAGE,kvflowcontrol.eval_wait.regular.requests.bypassed,Number of waiting regular requests that bypassed the flow controller due the evaluating replica not being the leader,Requests,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE
+STORAGE,kvflowcontrol.eval_wait.regular.requests.errored,Number of regular requests that errored out while waiting for flow tokens,Requests,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE
+STORAGE,kvflowcontrol.eval_wait.regular.requests.waiting,Number of regular requests waiting for flow tokens,Requests,GAUGE,COUNT,AVG,NONE
+STORAGE,kvflowcontrol.range_controller.count,"Gauge of range flow controllers currently open, this should align with the number of leaders",Count,GAUGE,COUNT,AVG,NONE
+STORAGE,kvflowcontrol.send_queue.bytes,"Byte size of all raft entries queued for sending to followers, waiting on available elastic send tokens",Bytes,GAUGE,BYTES,AVG,NONE
+STORAGE,kvflowcontrol.send_queue.count,"Count of all raft entries queued for sending to followers, waiting on available elastic send tokens",Bytes,GAUGE,COUNT,AVG,NONE
+STORAGE,kvflowcontrol.send_queue.prevent.count,Counter of replication streams that were prevented from forming a send queue,Preventions,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE
+STORAGE,kvflowcontrol.send_queue.scheduled.deducted_bytes,Gauge of elastic send token bytes already deducted by replication streams waiting on the scheduler,Bytes,GAUGE,BYTES,AVG,NONE
+STORAGE,kvflowcontrol.send_queue.scheduled.force_flush,Gauge of replication streams scheduled to force flush their send queue,Scheduled force flushes,GAUGE,COUNT,AVG,NONE
+STORAGE,kvflowcontrol.streams.eval.elastic.blocked_count,Number of eval replication streams with no flow tokens available for elastic requests,Count,GAUGE,COUNT,AVG,NONE
+STORAGE,kvflowcontrol.streams.eval.elastic.total_count,Total number of eval replication streams for elastic requests,Count,GAUGE,COUNT,AVG,NONE
+STORAGE,kvflowcontrol.streams.eval.regular.blocked_count,Number of eval replication streams with no flow tokens available for regular requests,Count,GAUGE,COUNT,AVG,NONE
+STORAGE,kvflowcontrol.streams.eval.regular.total_count,Total number of eval replication streams for regular requests,Count,GAUGE,COUNT,AVG,NONE
+STORAGE,kvflowcontrol.streams.send.elastic.blocked_count,Number of send replication streams with no flow tokens available for elastic requests,Count,GAUGE,COUNT,AVG,NONE
+STORAGE,kvflowcontrol.streams.send.elastic.total_count,Total number of send replication streams for elastic requests,Count,GAUGE,COUNT,AVG,NONE
+STORAGE,kvflowcontrol.streams.send.regular.blocked_count,Number of send replication streams with no flow tokens available for regular requests,Count,GAUGE,COUNT,AVG,NONE
+STORAGE,kvflowcontrol.streams.send.regular.total_count,Total number of send replication streams for regular requests,Count,GAUGE,COUNT,AVG,NONE
+STORAGE,kvflowcontrol.tokens.eval.elastic.available,"Flow eval tokens available for elastic requests, across all replication streams",Bytes,GAUGE,BYTES,AVG,NONE
+STORAGE,kvflowcontrol.tokens.eval.elastic.deducted,"Flow eval tokens deducted by elastic requests, across all replication streams",Bytes,COUNTER,BYTES,AVG,NON_NEGATIVE_DERIVATIVE
+STORAGE,kvflowcontrol.tokens.eval.elastic.returned,"Flow eval tokens returned by elastic requests, across all replication streams",Bytes,COUNTER,BYTES,AVG,NON_NEGATIVE_DERIVATIVE
+STORAGE,kvflowcontrol.tokens.eval.elastic.returned.disconnect,"Flow eval tokens returned early by elastic due disconnects, across all replication stream, this is a subset of returned tokens",Bytes,COUNTER,BYTES,AVG,NON_NEGATIVE_DERIVATIVE
+STORAGE,kvflowcontrol.tokens.eval.elastic.unaccounted,"Flow eval tokens returned by elastic requests that were unaccounted for, across all replication streams",Bytes,COUNTER,BYTES,AVG,NON_NEGATIVE_DERIVATIVE
+STORAGE,kvflowcontrol.tokens.eval.regular.available,"Flow eval tokens available for regular requests, across all replication streams",Bytes,GAUGE,BYTES,AVG,NONE
+STORAGE,kvflowcontrol.tokens.eval.regular.deducted,"Flow eval tokens deducted by regular requests, across all replication streams",Bytes,COUNTER,BYTES,AVG,NON_NEGATIVE_DERIVATIVE
+STORAGE,kvflowcontrol.tokens.eval.regular.returned,"Flow eval tokens returned by regular requests, across all replication streams",Bytes,COUNTER,BYTES,AVG,NON_NEGATIVE_DERIVATIVE
+STORAGE,kvflowcontrol.tokens.eval.regular.returned.disconnect,"Flow eval tokens returned early by regular due disconnects, across all replication stream, this is a subset of returned tokens",Bytes,COUNTER,BYTES,AVG,NON_NEGATIVE_DERIVATIVE
+STORAGE,kvflowcontrol.tokens.eval.regular.unaccounted,"Flow eval tokens returned by regular requests that were unaccounted for, across all replication streams",Bytes,COUNTER,BYTES,AVG,NON_NEGATIVE_DERIVATIVE
+STORAGE,kvflowcontrol.tokens.send.elastic.available,"Flow send tokens available for elastic requests, across all replication streams",Bytes,GAUGE,BYTES,AVG,NONE
+STORAGE,kvflowcontrol.tokens.send.elastic.deducted,"Flow send tokens deducted by elastic requests, across all replication streams",Bytes,COUNTER,BYTES,AVG,NON_NEGATIVE_DERIVATIVE
+STORAGE,kvflowcontrol.tokens.send.elastic.deducted.force_flush_send_queue,"Flow send tokens deducted by elastic requests, across all replication streams due to force flushing the stream's send queue",Bytes,COUNTER,BYTES,AVG,NON_NEGATIVE_DERIVATIVE
+STORAGE,kvflowcontrol.tokens.send.elastic.deducted.prevent_send_queue,"Flow send tokens deducted by elastic requests, across all replication streams to prevent forming a send queue",Bytes,COUNTER,BYTES,AVG,NON_NEGATIVE_DERIVATIVE
+STORAGE,kvflowcontrol.tokens.send.elastic.returned,"Flow send tokens returned by elastic requests, across all replication streams",Bytes,COUNTER,BYTES,AVG,NON_NEGATIVE_DERIVATIVE
+STORAGE,kvflowcontrol.tokens.send.elastic.returned.disconnect,"Flow send tokens returned early by elastic due disconnects, across all replication stream, this is a subset of returned tokens",Bytes,COUNTER,BYTES,AVG,NON_NEGATIVE_DERIVATIVE
+STORAGE,kvflowcontrol.tokens.send.elastic.unaccounted,"Flow send tokens returned by elastic requests that were unaccounted for, across all replication streams",Bytes,COUNTER,BYTES,AVG,NON_NEGATIVE_DERIVATIVE
+STORAGE,kvflowcontrol.tokens.send.regular.available,"Flow send tokens available for regular requests, across all replication streams",Bytes,GAUGE,BYTES,AVG,NONE
+STORAGE,kvflowcontrol.tokens.send.regular.deducted,"Flow send tokens deducted by regular requests, across all replication streams",Bytes,COUNTER,BYTES,AVG,NON_NEGATIVE_DERIVATIVE
+STORAGE,kvflowcontrol.tokens.send.regular.deducted.prevent_send_queue,"Flow send tokens deducted by regular requests, across all replication streams to prevent forming a send queue",Bytes,COUNTER,BYTES,AVG,NON_NEGATIVE_DERIVATIVE
+STORAGE,kvflowcontrol.tokens.send.regular.returned,"Flow send tokens returned by regular requests, across all replication streams",Bytes,COUNTER,BYTES,AVG,NON_NEGATIVE_DERIVATIVE
+STORAGE,kvflowcontrol.tokens.send.regular.returned.disconnect,"Flow send tokens returned early by regular due disconnects, across all replication stream, this is a subset of returned tokens",Bytes,COUNTER,BYTES,AVG,NON_NEGATIVE_DERIVATIVE
+STORAGE,kvflowcontrol.tokens.send.regular.unaccounted,"Flow send tokens returned by regular requests that were unaccounted for, across all replication streams",Bytes,COUNTER,BYTES,AVG,NON_NEGATIVE_DERIVATIVE
STORAGE,leases.epoch,Number of replica leaseholders using epoch-based leases,Replicas,GAUGE,COUNT,AVG,NONE
STORAGE,leases.error,Number of failed lease requests,Lease Requests,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE
STORAGE,leases.expiration,Number of replica leaseholders using expiration-based leases,Replicas,GAUGE,COUNT,AVG,NONE
+STORAGE,leases.leader,Number of replica leaseholders using leader leases,Replicas,GAUGE,COUNT,AVG,NONE
STORAGE,leases.liveness,Number of replica leaseholders for the liveness range(s),Replicas,GAUGE,COUNT,AVG,NONE
STORAGE,leases.preferences.less-preferred,Number of replica leaseholders which satisfy a lease preference which is not the most preferred,Replicas,GAUGE,COUNT,AVG,NONE
STORAGE,leases.preferences.violating,Number of replica leaseholders which violate lease preferences,Replicas,GAUGE,COUNT,AVG,NONE
@@ -581,8 +632,11 @@ STORAGE,raft.rcvd.cross_zone.bytes,"Number of bytes received by this store for c
regions. To ensure accurate monitoring of transmitted data, it is important
to set up a consistent locality configuration across nodes. Note that this
does not include raft snapshot received.",Bytes,COUNTER,BYTES,AVG,NON_NEGATIVE_DERIVATIVE
+STORAGE,raft.rcvd.defortifyleader,Number of MsgDeFortifyLeader messages received by this store,Messages,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE
STORAGE,raft.rcvd.dropped,Number of incoming Raft messages dropped (due to queue length or size),Messages,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE
STORAGE,raft.rcvd.dropped_bytes,Bytes of dropped incoming Raft messages,Bytes,COUNTER,BYTES,AVG,NON_NEGATIVE_DERIVATIVE
+STORAGE,raft.rcvd.fortifyleader,Number of MsgFortifyLeader messages received by this store,Messages,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE
+STORAGE,raft.rcvd.fortifyleaderresp,Number of MsgFortifyLeaderResp messages received by this store,Messages,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE
STORAGE,raft.rcvd.heartbeat,"Number of (coalesced, if enabled) MsgHeartbeat messages received by this store",Messages,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE
STORAGE,raft.rcvd.heartbeatresp,"Number of (coalesced, if enabled) MsgHeartbeatResp messages received by this store",Messages,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE
STORAGE,raft.rcvd.prevote,Number of MsgPreVote messages received by this store,Messages,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE
@@ -768,6 +822,7 @@ STORAGE,range.splits,Number of range splits,Range Ops,COUNTER,COUNT,AVG,NON_NEGA
STORAGE,rangekeybytes,Number of bytes taken up by range keys (e.g. MVCC range tombstones),Storage,GAUGE,BYTES,AVG,NONE
STORAGE,rangekeycount,Count of all range keys (e.g. MVCC range tombstones),Keys,GAUGE,COUNT,AVG,NONE
STORAGE,ranges,Number of ranges,Ranges,GAUGE,COUNT,AVG,NONE
+STORAGE,ranges.decommissioning,Number of ranges with at lease one replica on a decommissioning node,Ranges,GAUGE,COUNT,AVG,NONE
STORAGE,ranges.overreplicated,Number of ranges with more live replicas than the replication target,Ranges,GAUGE,COUNT,AVG,NONE
STORAGE,ranges.unavailable,Number of ranges with fewer live replicas than needed for quorum,Ranges,GAUGE,COUNT,AVG,NONE
STORAGE,ranges.underreplicated,Number of ranges with fewer live replicas than the replication target,Ranges,GAUGE,COUNT,AVG,NONE
@@ -911,9 +966,6 @@ STORAGE,storage.batch-commit.wal-queue-wait.duration,"Cumulative time spent wait
STORAGE,storage.batch-commit.wal-rotation.duration,"Cumulative time spent waiting for WAL rotation, for batch commit. See storage.AggregatedBatchCommitStats for details.",Nanoseconds,COUNTER,NANOSECONDS,AVG,NON_NEGATIVE_DERIVATIVE
STORAGE,storage.block-load.active,The number of sstable block loads currently in progress,Block loads,GAUGE,COUNT,AVG,NONE
STORAGE,storage.block-load.queued,The cumulative number of SSTable block loads that were delayed because too many loads were active (see also: `storage.block_load.node_max_active`),Block loads,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE
-STORAGE,storage.category-pebble-manifest.bytes-written,Bytes written to disk,Bytes,GAUGE,BYTES,AVG,NONE
-STORAGE,storage.category-pebble-wal.bytes-written,Bytes written to disk,Bytes,GAUGE,BYTES,AVG,NONE
-STORAGE,storage.category-unspecified.bytes-written,Bytes written to disk,Bytes,GAUGE,BYTES,AVG,NONE
STORAGE,storage.checkpoints,"The number of checkpoint directories found in storage.
This is the number of directories found in the auxiliary/checkpoints directory.
@@ -926,6 +978,8 @@ A likely cause of having a checkpoint is that one of the ranges in this store
had inconsistent data among its replicas. Such checkpoint directories are
located in auxiliary/checkpoints/rN_at_M, where N is the range ID, and M is the
Raft applied index at which this checkpoint was taken.",Directories,GAUGE,COUNT,AVG,NONE
+STORAGE,storage.compactions.cancelled.bytes,Cumulative volume of data written to sstables during compactions that were ultimately cancelled due to a conflicting operation.,Bytes,COUNTER,BYTES,AVG,NON_NEGATIVE_DERIVATIVE
+STORAGE,storage.compactions.cancelled.count,Cumulative count of compactions that were cancelled before they completed due to a conflicting operation.,Compactions,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE
STORAGE,storage.compactions.duration,"Cumulative sum of all compaction durations.
The rate of this value provides the effective compaction concurrency of a store,
@@ -970,6 +1024,24 @@ STORAGE,storage.ingest.count,Number of successful ingestions performed,Events,GA
STORAGE,storage.iterator.block-load.bytes,Bytes loaded by storage engine iterators (possibly cached). See storage.AggregatedIteratorStats for details.,Bytes,COUNTER,BYTES,AVG,NON_NEGATIVE_DERIVATIVE
STORAGE,storage.iterator.block-load.cached-bytes,Bytes loaded by storage engine iterators from the block cache. See storage.AggregatedIteratorStats for details.,Bytes,COUNTER,BYTES,AVG,NON_NEGATIVE_DERIVATIVE
STORAGE,storage.iterator.block-load.read-duration,Cumulative time storage engine iterators spent loading blocks from durable storage. See storage.AggregatedIteratorStats for details.,Nanoseconds,COUNTER,NANOSECONDS,AVG,NON_NEGATIVE_DERIVATIVE
+STORAGE,storage.iterator.category-batch-eval.block-load.bytes,Bytes loaded by storage sstable iterators (possibly cached).,Bytes,COUNTER,BYTES,AVG,NON_NEGATIVE_DERIVATIVE
+STORAGE,storage.iterator.category-batch-eval.block-load.cached-bytes,Bytes loaded by storage sstable iterators from the block cache,Bytes,COUNTER,BYTES,AVG,NON_NEGATIVE_DERIVATIVE
+STORAGE,storage.iterator.category-batch-eval.block-load.latency-sum,"Cumulative latency for loading bytes not in the block cache, by storage sstable iterators",Latency,COUNTER,NANOSECONDS,AVG,NON_NEGATIVE_DERIVATIVE
+STORAGE,storage.iterator.category-crdb-unknown.block-load.bytes,Bytes loaded by storage sstable iterators (possibly cached).,Bytes,COUNTER,BYTES,AVG,NON_NEGATIVE_DERIVATIVE
+STORAGE,storage.iterator.category-crdb-unknown.block-load.cached-bytes,Bytes loaded by storage sstable iterators from the block cache,Bytes,COUNTER,BYTES,AVG,NON_NEGATIVE_DERIVATIVE
+STORAGE,storage.iterator.category-crdb-unknown.block-load.latency-sum,"Cumulative latency for loading bytes not in the block cache, by storage sstable iterators",Latency,COUNTER,NANOSECONDS,AVG,NON_NEGATIVE_DERIVATIVE
+STORAGE,storage.iterator.category-mvcc-gc.block-load.bytes,Bytes loaded by storage sstable iterators (possibly cached).,Bytes,COUNTER,BYTES,AVG,NON_NEGATIVE_DERIVATIVE
+STORAGE,storage.iterator.category-mvcc-gc.block-load.cached-bytes,Bytes loaded by storage sstable iterators from the block cache,Bytes,COUNTER,BYTES,AVG,NON_NEGATIVE_DERIVATIVE
+STORAGE,storage.iterator.category-mvcc-gc.block-load.latency-sum,"Cumulative latency for loading bytes not in the block cache, by storage sstable iterators",Latency,COUNTER,NANOSECONDS,AVG,NON_NEGATIVE_DERIVATIVE
+STORAGE,storage.iterator.category-rangefeed.block-load.bytes,Bytes loaded by storage sstable iterators (possibly cached).,Bytes,COUNTER,BYTES,AVG,NON_NEGATIVE_DERIVATIVE
+STORAGE,storage.iterator.category-rangefeed.block-load.cached-bytes,Bytes loaded by storage sstable iterators from the block cache,Bytes,COUNTER,BYTES,AVG,NON_NEGATIVE_DERIVATIVE
+STORAGE,storage.iterator.category-rangefeed.block-load.latency-sum,"Cumulative latency for loading bytes not in the block cache, by storage sstable iterators",Latency,COUNTER,NANOSECONDS,AVG,NON_NEGATIVE_DERIVATIVE
+STORAGE,storage.iterator.category-replication.block-load.bytes,Bytes loaded by storage sstable iterators (possibly cached).,Bytes,COUNTER,BYTES,AVG,NON_NEGATIVE_DERIVATIVE
+STORAGE,storage.iterator.category-replication.block-load.cached-bytes,Bytes loaded by storage sstable iterators from the block cache,Bytes,COUNTER,BYTES,AVG,NON_NEGATIVE_DERIVATIVE
+STORAGE,storage.iterator.category-replication.block-load.latency-sum,"Cumulative latency for loading bytes not in the block cache, by storage sstable iterators",Latency,COUNTER,NANOSECONDS,AVG,NON_NEGATIVE_DERIVATIVE
+STORAGE,storage.iterator.category-scan-regular.block-load.bytes,Bytes loaded by storage sstable iterators (possibly cached).,Bytes,COUNTER,BYTES,AVG,NON_NEGATIVE_DERIVATIVE
+STORAGE,storage.iterator.category-scan-regular.block-load.cached-bytes,Bytes loaded by storage sstable iterators from the block cache,Bytes,COUNTER,BYTES,AVG,NON_NEGATIVE_DERIVATIVE
+STORAGE,storage.iterator.category-scan-regular.block-load.latency-sum,"Cumulative latency for loading bytes not in the block cache, by storage sstable iterators",Latency,COUNTER,NANOSECONDS,AVG,NON_NEGATIVE_DERIVATIVE
STORAGE,storage.iterator.external.seeks,Cumulative count of seeks performed on storage engine iterators. See storage.AggregatedIteratorStats for details.,Iterator Ops,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE
STORAGE,storage.iterator.external.steps,Cumulative count of steps performed on storage engine iterators. See storage.AggregatedIteratorStats for details.,Iterator Ops,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE
STORAGE,storage.iterator.internal.seeks,"Cumulative count of seeks performed internally within storage engine iterators.
@@ -1040,8 +1112,30 @@ STORAGE,storage.wal.failover.secondary.duration,Cumulative time spent writing to
STORAGE,storage.wal.failover.switch.count,Count of the number of times WAL writing has switched from primary to secondary and vice versa.,Events,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE
STORAGE,storage.wal.failover.write_and_sync.latency,The observed latency for writing and syncing to the write ahead log. Only populated when WAL failover is configured,Nanoseconds,HISTOGRAM,NANOSECONDS,AVG,NONE
STORAGE,storage.wal.fsync.latency,The write ahead log fsync latency,Fsync Latency,HISTOGRAM,NANOSECONDS,AVG,NONE
+STORAGE,storage.write-amplification,"Running measure of write-amplification.
+
+Write amplification is measured as the ratio of bytes written to disk relative to the logical
+bytes present in sstables, over the life of a store. This metric is a running average
+of the write amplification as tracked by Pebble.",Ratio of bytes written to logical bytes,GAUGE,COUNT,AVG,NONE
STORAGE,storage.write-stall-nanos,Total write stall duration in nanos,Nanoseconds,GAUGE,NANOSECONDS,AVG,NONE
STORAGE,storage.write-stalls,Number of instances of intentional write stalls to backpressure incoming writes,Events,GAUGE,COUNT,AVG,NONE
+STORAGE,storeliveness.heartbeat.failures,Number of Store Liveness heartbeats that failed to be sent out by the Store Liveness Support Manager,Heartbeats,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE
+STORAGE,storeliveness.heartbeat.successes,Number of Store Liveness heartbeats sent out by the Store Liveness Support Manager,Heartbeats,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE
+STORAGE,storeliveness.message_handle.failures,Number of incoming Store Liveness messages that failed to be handled by the Store Liveness Support Manager,Messages,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE
+STORAGE,storeliveness.message_handle.successes,Number of incoming Store Liveness messages handled by the Store Liveness Support Manager,Messages,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE
+STORAGE,storeliveness.support_for.stores,Number of stores that the Store Liveness Support Manager has ever provided support for,Stores,GAUGE,COUNT,AVG,NONE
+STORAGE,storeliveness.support_from.stores,Number of stores that the Store Liveness Support Manager is requesting support from by sending heartbeats,Stores,GAUGE,COUNT,AVG,NONE
+STORAGE,storeliveness.support_withdraw.failures,Number of times the Store Liveness Support Manager has encountered an error while withdrawing support for another store,Support Withdrawals,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE
+STORAGE,storeliveness.support_withdraw.successes,Number of times the Store Liveness Support Manager has successfully withdrawn support for another store,Support Withdrawals,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE
+STORAGE,storeliveness.transport.receive-queue-bytes,Total byte size of pending incoming messages from Store Liveness Transport,Bytes,GAUGE,BYTES,AVG,NONE
+STORAGE,storeliveness.transport.receive-queue-size,Number of pending incoming messages from the Store Liveness Transport,Messages,GAUGE,COUNT,AVG,NONE
+STORAGE,storeliveness.transport.receive_dropped,Number of Store Liveness messages dropped by the Store Liveness Transport on the receiver side,Messages,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE
+STORAGE,storeliveness.transport.received,Number of Store Liveness messages received by the Store Liveness Transport,Messages,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE
+STORAGE,storeliveness.transport.send-queue-bytes,Total byte size of pending outgoing messages in all Store Liveness Transport per-store send queues,Bytes,GAUGE,BYTES,AVG,NONE
+STORAGE,storeliveness.transport.send-queue-idle,Number of Store Liveness Transport per-store send queues that have become idle due to no recently-sent messages,Messages,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE
+STORAGE,storeliveness.transport.send-queue-size,Number of pending outgoing messages in all Store Liveness Transport per-store send queues,Messages,GAUGE,COUNT,AVG,NONE
+STORAGE,storeliveness.transport.send_dropped,Number of Store Liveness messages dropped by the Store Liveness Transport on the sender side,Messages,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE
+STORAGE,storeliveness.transport.sent,Number of Store Liveness messages sent by the Store Liveness Transport,Messages,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE
STORAGE,sysbytes,Number of bytes in system KV pairs,Storage,GAUGE,BYTES,AVG,NONE
STORAGE,syscount,Count of system KV pairs,Keys,GAUGE,COUNT,AVG,NONE
STORAGE,tenant.consumption.cross_region_network_ru,Total number of RUs charged for cross-region network traffic,Request Units,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE
@@ -1087,6 +1181,12 @@ STORAGE,txnwaitqueue.query.wait_time,Histogram of durations spent in queue by qu
STORAGE,txnwaitqueue.query.waiting,Number of transaction status queries waiting for an updated transaction record,Waiting Queries,GAUGE,COUNT,AVG,NONE
STORAGE,valbytes,Number of bytes taken up by values,Storage,GAUGE,BYTES,AVG,NONE
STORAGE,valcount,Count of all values,MVCC Values,GAUGE,COUNT,AVG,NONE
+APPLICATION,auth.cert.conn.latency,Latency to establish and authenticate a SQL connection using certificate,Nanoseconds,HISTOGRAM,NANOSECONDS,AVG,NONE
+APPLICATION,auth.gss.conn.latency,Latency to establish and authenticate a SQL connection using GSS,Nanoseconds,HISTOGRAM,NANOSECONDS,AVG,NONE
+APPLICATION,auth.jwt.conn.latency,Latency to establish and authenticate a SQL connection using JWT Token,Nanoseconds,HISTOGRAM,NANOSECONDS,AVG,NONE
+APPLICATION,auth.ldap.conn.latency,Latency to establish and authenticate a SQL connection using LDAP,Nanoseconds,HISTOGRAM,NANOSECONDS,AVG,NONE
+APPLICATION,auth.password.conn.latency,Latency to establish and authenticate a SQL connection using password,Nanoseconds,HISTOGRAM,NANOSECONDS,AVG,NONE
+APPLICATION,auth.scram.conn.latency,Latency to establish and authenticate a SQL connection using SCRAM,Nanoseconds,HISTOGRAM,NANOSECONDS,AVG,NONE
APPLICATION,backup.last-failed-time.kms-inaccessible,The unix timestamp of the most recent failure of backup due to errKMSInaccessible by a backup specified as maintaining this metric,Jobs,GAUGE,TIMESTAMP_SEC,AVG,NONE
APPLICATION,changefeed.admit_latency,"Event admission latency: a difference between event MVCC timestamp and the time it was admitted into changefeed pipeline; Note: this metric includes the time spent waiting until event can be processed due to backpressure or time spent resolving schema descriptors. Also note, this metric excludes latency during backfill",Nanoseconds,HISTOGRAM,NANOSECONDS,AVG,NONE
APPLICATION,changefeed.aggregator_progress,The earliest timestamp up to which any aggregator is guaranteed to have emitted all values for,Unix Timestamp Nanoseconds,GAUGE,TIMESTAMP_NS,AVG,NONE
@@ -1094,15 +1194,35 @@ APPLICATION,changefeed.backfill_count,Number of changefeeds currently executing
APPLICATION,changefeed.backfill_pending_ranges,Number of ranges in an ongoing backfill that are yet to be fully emitted,Count,GAUGE,COUNT,AVG,NONE
APPLICATION,changefeed.batch_reduction_count,Number of times a changefeed aggregator node attempted to reduce the size of message batches it emitted to the sink,Batch Size Reductions,GAUGE,COUNT,AVG,NONE
APPLICATION,changefeed.buffer_entries.allocated_mem,Current quota pool memory allocation,Bytes,GAUGE,BYTES,AVG,NONE
+APPLICATION,changefeed.buffer_entries.allocated_mem.aggregator,Current quota pool memory allocation - between the kvfeed and the sink,Bytes,GAUGE,BYTES,AVG,NONE
+APPLICATION,changefeed.buffer_entries.allocated_mem.rangefeed,Current quota pool memory allocation - between the rangefeed and the kvfeed,Bytes,GAUGE,BYTES,AVG,NONE
APPLICATION,changefeed.buffer_entries.flush,Number of flush elements added to the buffer,Events,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE
+APPLICATION,changefeed.buffer_entries.flush.aggregator,Number of flush elements added to the buffer - between the kvfeed and the sink,Events,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE
+APPLICATION,changefeed.buffer_entries.flush.rangefeed,Number of flush elements added to the buffer - between the rangefeed and the kvfeed,Events,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE
APPLICATION,changefeed.buffer_entries.in,Total entries entering the buffer between raft and changefeed sinks,Entries,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE
+APPLICATION,changefeed.buffer_entries.in.aggregator,Total entries entering the buffer between raft and changefeed sinks - between the kvfeed and the sink,Entries,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE
+APPLICATION,changefeed.buffer_entries.in.rangefeed,Total entries entering the buffer between raft and changefeed sinks - between the rangefeed and the kvfeed,Entries,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE
APPLICATION,changefeed.buffer_entries.kv,Number of kv elements added to the buffer,Events,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE
+APPLICATION,changefeed.buffer_entries.kv.aggregator,Number of kv elements added to the buffer - between the kvfeed and the sink,Events,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE
+APPLICATION,changefeed.buffer_entries.kv.rangefeed,Number of kv elements added to the buffer - between the rangefeed and the kvfeed,Events,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE
APPLICATION,changefeed.buffer_entries.out,Total entries leaving the buffer between raft and changefeed sinks,Entries,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE
+APPLICATION,changefeed.buffer_entries.out.aggregator,Total entries leaving the buffer between raft and changefeed sinks - between the kvfeed and the sink,Entries,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE
+APPLICATION,changefeed.buffer_entries.out.rangefeed,Total entries leaving the buffer between raft and changefeed sinks - between the rangefeed and the kvfeed,Entries,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE
APPLICATION,changefeed.buffer_entries.released,"Total entries processed, emitted and acknowledged by the sinks",Entries,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE
+APPLICATION,changefeed.buffer_entries.released.aggregator,"Total entries processed, emitted and acknowledged by the sinks - between the kvfeed and the sink",Entries,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE
+APPLICATION,changefeed.buffer_entries.released.rangefeed,"Total entries processed, emitted and acknowledged by the sinks - between the rangefeed and the kvfeed",Entries,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE
APPLICATION,changefeed.buffer_entries.resolved,Number of resolved elements added to the buffer,Events,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE
+APPLICATION,changefeed.buffer_entries.resolved.aggregator,Number of resolved elements added to the buffer - between the kvfeed and the sink,Events,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE
+APPLICATION,changefeed.buffer_entries.resolved.rangefeed,Number of resolved elements added to the buffer - between the rangefeed and the kvfeed,Events,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE
APPLICATION,changefeed.buffer_entries_mem.acquired,Total amount of memory acquired for entries as they enter the system,Entries,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE
+APPLICATION,changefeed.buffer_entries_mem.acquired.aggregator,Total amount of memory acquired for entries as they enter the system - between the kvfeed and the sink,Entries,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE
+APPLICATION,changefeed.buffer_entries_mem.acquired.rangefeed,Total amount of memory acquired for entries as they enter the system - between the rangefeed and the kvfeed,Entries,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE
APPLICATION,changefeed.buffer_entries_mem.released,Total amount of memory released by the entries after they have been emitted,Entries,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE
+APPLICATION,changefeed.buffer_entries_mem.released.aggregator,Total amount of memory released by the entries after they have been emitted - between the kvfeed and the sink,Entries,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE
+APPLICATION,changefeed.buffer_entries_mem.released.rangefeed,Total amount of memory released by the entries after they have been emitted - between the rangefeed and the kvfeed,Entries,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE
APPLICATION,changefeed.buffer_pushback_nanos,Total time spent waiting while the buffer was full,Nanoseconds,COUNTER,NANOSECONDS,AVG,NON_NEGATIVE_DERIVATIVE
+APPLICATION,changefeed.buffer_pushback_nanos.aggregator,Total time spent waiting while the buffer was full - between the kvfeed and the sink,Nanoseconds,COUNTER,NANOSECONDS,AVG,NON_NEGATIVE_DERIVATIVE
+APPLICATION,changefeed.buffer_pushback_nanos.rangefeed,Total time spent waiting while the buffer was full - between the rangefeed and the kvfeed,Nanoseconds,COUNTER,NANOSECONDS,AVG,NON_NEGATIVE_DERIVATIVE
APPLICATION,changefeed.bytes.messages_pushback_nanos,Total time spent throttled for bytes quota,Nanoseconds,COUNTER,NANOSECONDS,AVG,NON_NEGATIVE_DERIVATIVE
APPLICATION,changefeed.checkpoint_hist_nanos,Time spent checkpointing changefeed progress,Changefeeds,HISTOGRAM,NANOSECONDS,AVG,NONE
APPLICATION,changefeed.checkpoint_progress,The earliest timestamp of any changefeed's persisted checkpoint (values prior to this timestamp will never need to be re-emitted),Unix Timestamp Nanoseconds,GAUGE,TIMESTAMP_NS,AVG,NONE
@@ -1126,6 +1246,8 @@ APPLICATION,changefeed.lagging_ranges,The number of ranges considered to be lagg
APPLICATION,changefeed.max_behind_nanos,(Deprecated in favor of checkpoint_progress) The most any changefeed's persisted checkpoint is behind the present,Nanoseconds,GAUGE,NANOSECONDS,AVG,NONE
APPLICATION,changefeed.message_size_hist,Message size histogram,Bytes,HISTOGRAM,BYTES,AVG,NONE
APPLICATION,changefeed.messages.messages_pushback_nanos,Total time spent throttled for messages quota,Nanoseconds,COUNTER,NANOSECONDS,AVG,NON_NEGATIVE_DERIVATIVE
+APPLICATION,changefeed.network.bytes_in,The number of bytes received from the network by changefeeds,Bytes,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE
+APPLICATION,changefeed.network.bytes_out,The number of bytes sent over the network by changefeeds,Bytes,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE
APPLICATION,changefeed.nprocs_consume_event_nanos,Total time spent waiting to add an event to the parallel consumer,Nanoseconds,HISTOGRAM,NANOSECONDS,AVG,NONE
APPLICATION,changefeed.nprocs_flush_nanos,Total time spent idle waiting for the parallel consumer to flush,Nanoseconds,HISTOGRAM,NANOSECONDS,AVG,NONE
APPLICATION,changefeed.nprocs_in_flight_count,Number of buffered events in the parallel consumer,Count of Events,GAUGE,COUNT,AVG,NONE
@@ -1140,8 +1262,18 @@ APPLICATION,changefeed.schema_registry.retry_count,Number of retries encountered
APPLICATION,changefeed.schemafeed.table_history_scans,The number of table history scans during polling,Counts,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE
APPLICATION,changefeed.schemafeed.table_metadata_nanos,Time blocked while verifying table metadata histories,Nanoseconds,COUNTER,NANOSECONDS,AVG,NON_NEGATIVE_DERIVATIVE
APPLICATION,changefeed.sink_batch_hist_nanos,Time spent batched in the sink buffer before being flushed and acknowledged,Changefeeds,HISTOGRAM,NANOSECONDS,AVG,NONE
+APPLICATION,changefeed.sink_errors,Number of changefeed errors caused by the sink,Count,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE
APPLICATION,changefeed.sink_io_inflight,The number of keys currently inflight as IO requests being sent to the sink,Messages,GAUGE,COUNT,AVG,NONE
APPLICATION,changefeed.size_based_flushes,Total size based flushes across all feeds,Flushes,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE
+APPLICATION,changefeed.stage.checkpoint_job_progress.latency,Latency of the changefeed stage: checkpointing job progress,Latency,HISTOGRAM,NANOSECONDS,AVG,NONE
+APPLICATION,changefeed.stage.downstream_client_send.latency,Latency of the changefeed stage: flushing messages from the sink's client to its downstream. This includes sends that failed for most but not all sinks.,Latency,HISTOGRAM,NANOSECONDS,AVG,NONE
+APPLICATION,changefeed.stage.emit_row.latency,Latency of the changefeed stage: emitting row to sink,Latency,HISTOGRAM,NANOSECONDS,AVG,NONE
+APPLICATION,changefeed.stage.encode.latency,Latency of the changefeed stage: encoding data,Latency,HISTOGRAM,NANOSECONDS,AVG,NONE
+APPLICATION,changefeed.stage.kv_feed_buffer.latency,Latency of the changefeed stage: waiting to buffer kv events,Latency,HISTOGRAM,NANOSECONDS,AVG,NONE
+APPLICATION,changefeed.stage.kv_feed_wait_for_table_event.latency,Latency of the changefeed stage: waiting for a table schema event to join to the kv event,Latency,HISTOGRAM,NANOSECONDS,AVG,NONE
+APPLICATION,changefeed.stage.rangefeed_buffer_checkpoint.latency,Latency of the changefeed stage: buffering rangefeed checkpoint events,Latency,HISTOGRAM,NANOSECONDS,AVG,NONE
+APPLICATION,changefeed.stage.rangefeed_buffer_value.latency,Latency of the changefeed stage: buffering rangefeed value events,Latency,HISTOGRAM,NANOSECONDS,AVG,NONE
+APPLICATION,changefeed.total_ranges,The total number of ranges being watched by changefeed aggregators,Ranges,GAUGE,COUNT,AVG,NONE
APPLICATION,changefeed.usage.error_count,Count of errors encountered while generating usage metrics for changefeeds,Errors,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE
APPLICATION,changefeed.usage.query_duration,Time taken by the queries used to generate usage metrics for changefeeds,Nanoseconds,HISTOGRAM,NANOSECONDS,AVG,NONE
APPLICATION,changefeed.usage.table_bytes,Aggregated number of bytes of data per table watched by changefeeds,Storage,GAUGE,BYTES,AVG,NONE
@@ -1178,6 +1310,7 @@ APPLICATION,distsender.batch_responses.cross_zone.bytes,"Total byte count of rep
monitor the data transmitted.",Bytes,COUNTER,BYTES,AVG,NON_NEGATIVE_DERIVATIVE
APPLICATION,distsender.batch_responses.replica_addressed.bytes,Total byte count of replica-addressed batch responses received,Bytes,COUNTER,BYTES,AVG,NON_NEGATIVE_DERIVATIVE
APPLICATION,distsender.batches,Number of batches processed,Batches,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE
+APPLICATION,distsender.batches.async.in_progress,Number of partial batches currently being executed asynchronously,Partial Batches,GAUGE,COUNT,AVG,NONE
APPLICATION,distsender.batches.async.sent,Number of partial batches sent asynchronously,Partial Batches,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE
APPLICATION,distsender.batches.async.throttled,Number of partial batches not sent asynchronously due to throttling,Partial Batches,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE
APPLICATION,distsender.batches.partial,Number of partial batches processed after being divided on range boundaries,Partial Batches,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE
@@ -1803,6 +1936,18 @@ APPLICATION,jobs.auto_config_task.protected_record_count,Number of protected tim
APPLICATION,jobs.auto_config_task.resume_completed,Number of auto_config_task jobs which successfully resumed to completion,jobs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE
APPLICATION,jobs.auto_config_task.resume_failed,Number of auto_config_task jobs which failed with a non-retriable error,jobs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE
APPLICATION,jobs.auto_config_task.resume_retry_error,Number of auto_config_task jobs which failed with a retriable error,jobs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE
+APPLICATION,jobs.auto_create_partial_stats.currently_idle,Number of auto_create_partial_stats jobs currently considered Idle and can be freely shut down,jobs,GAUGE,COUNT,AVG,NONE
+APPLICATION,jobs.auto_create_partial_stats.currently_paused,Number of auto_create_partial_stats jobs currently considered Paused,jobs,GAUGE,COUNT,AVG,NONE
+APPLICATION,jobs.auto_create_partial_stats.currently_running,Number of auto_create_partial_stats jobs currently running in Resume or OnFailOrCancel state,jobs,GAUGE,COUNT,AVG,NONE
+APPLICATION,jobs.auto_create_partial_stats.expired_pts_records,Number of expired protected timestamp records owned by auto_create_partial_stats jobs,records,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE
+APPLICATION,jobs.auto_create_partial_stats.fail_or_cancel_completed,Number of auto_create_partial_stats jobs which successfully completed their failure or cancelation process,jobs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE
+APPLICATION,jobs.auto_create_partial_stats.fail_or_cancel_failed,Number of auto_create_partial_stats jobs which failed with a non-retriable error on their failure or cancelation process,jobs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE
+APPLICATION,jobs.auto_create_partial_stats.fail_or_cancel_retry_error,Number of auto_create_partial_stats jobs which failed with a retriable error on their failure or cancelation process,jobs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE
+APPLICATION,jobs.auto_create_partial_stats.protected_age_sec,The age of the oldest PTS record protected by auto_create_partial_stats jobs,seconds,GAUGE,SECONDS,AVG,NONE
+APPLICATION,jobs.auto_create_partial_stats.protected_record_count,Number of protected timestamp records held by auto_create_partial_stats jobs,records,GAUGE,COUNT,AVG,NONE
+APPLICATION,jobs.auto_create_partial_stats.resume_completed,Number of auto_create_partial_stats jobs which successfully resumed to completion,jobs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE
+APPLICATION,jobs.auto_create_partial_stats.resume_failed,Number of auto_create_partial_stats jobs which failed with a non-retriable error,jobs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE
+APPLICATION,jobs.auto_create_partial_stats.resume_retry_error,Number of auto_create_partial_stats jobs which failed with a retriable error,jobs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE
APPLICATION,jobs.auto_create_stats.currently_idle,Number of auto_create_stats jobs currently considered Idle and can be freely shut down,jobs,GAUGE,COUNT,AVG,NONE
APPLICATION,jobs.auto_create_stats.currently_paused,Number of auto_create_stats jobs currently considered Paused,jobs,GAUGE,COUNT,AVG,NONE
APPLICATION,jobs.auto_create_stats.currently_running,Number of auto_create_stats jobs currently running in Resume or OnFailOrCancel state,jobs,GAUGE,COUNT,AVG,NONE
@@ -2091,6 +2236,18 @@ APPLICATION,jobs.schema_change_gc.protected_record_count,Number of protected tim
APPLICATION,jobs.schema_change_gc.resume_completed,Number of schema_change_gc jobs which successfully resumed to completion,jobs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE
APPLICATION,jobs.schema_change_gc.resume_failed,Number of schema_change_gc jobs which failed with a non-retriable error,jobs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE
APPLICATION,jobs.schema_change_gc.resume_retry_error,Number of schema_change_gc jobs which failed with a retriable error,jobs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE
+APPLICATION,jobs.standby_read_ts_poller.currently_idle,Number of standby_read_ts_poller jobs currently considered Idle and can be freely shut down,jobs,GAUGE,COUNT,AVG,NONE
+APPLICATION,jobs.standby_read_ts_poller.currently_paused,Number of standby_read_ts_poller jobs currently considered Paused,jobs,GAUGE,COUNT,AVG,NONE
+APPLICATION,jobs.standby_read_ts_poller.currently_running,Number of standby_read_ts_poller jobs currently running in Resume or OnFailOrCancel state,jobs,GAUGE,COUNT,AVG,NONE
+APPLICATION,jobs.standby_read_ts_poller.expired_pts_records,Number of expired protected timestamp records owned by standby_read_ts_poller jobs,records,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE
+APPLICATION,jobs.standby_read_ts_poller.fail_or_cancel_completed,Number of standby_read_ts_poller jobs which successfully completed their failure or cancelation process,jobs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE
+APPLICATION,jobs.standby_read_ts_poller.fail_or_cancel_failed,Number of standby_read_ts_poller jobs which failed with a non-retriable error on their failure or cancelation process,jobs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE
+APPLICATION,jobs.standby_read_ts_poller.fail_or_cancel_retry_error,Number of standby_read_ts_poller jobs which failed with a retriable error on their failure or cancelation process,jobs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE
+APPLICATION,jobs.standby_read_ts_poller.protected_age_sec,The age of the oldest PTS record protected by standby_read_ts_poller jobs,seconds,GAUGE,SECONDS,AVG,NONE
+APPLICATION,jobs.standby_read_ts_poller.protected_record_count,Number of protected timestamp records held by standby_read_ts_poller jobs,records,GAUGE,COUNT,AVG,NONE
+APPLICATION,jobs.standby_read_ts_poller.resume_completed,Number of standby_read_ts_poller jobs which successfully resumed to completion,jobs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE
+APPLICATION,jobs.standby_read_ts_poller.resume_failed,Number of standby_read_ts_poller jobs which failed with a non-retriable error,jobs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE
+APPLICATION,jobs.standby_read_ts_poller.resume_retry_error,Number of standby_read_ts_poller jobs which failed with a retriable error,jobs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE
APPLICATION,jobs.typedesc_schema_change.currently_idle,Number of typedesc_schema_change jobs currently considered Idle and can be freely shut down,jobs,GAUGE,COUNT,AVG,NONE
APPLICATION,jobs.typedesc_schema_change.currently_paused,Number of typedesc_schema_change jobs currently considered Paused,jobs,GAUGE,COUNT,AVG,NONE
APPLICATION,jobs.typedesc_schema_change.currently_running,Number of typedesc_schema_change jobs currently running in Resume or OnFailOrCancel state,jobs,GAUGE,COUNT,AVG,NONE
@@ -2103,46 +2260,63 @@ APPLICATION,jobs.typedesc_schema_change.protected_record_count,Number of protect
APPLICATION,jobs.typedesc_schema_change.resume_completed,Number of typedesc_schema_change jobs which successfully resumed to completion,jobs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE
APPLICATION,jobs.typedesc_schema_change.resume_failed,Number of typedesc_schema_change jobs which failed with a non-retriable error,jobs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE
APPLICATION,jobs.typedesc_schema_change.resume_retry_error,Number of typedesc_schema_change jobs which failed with a retriable error,jobs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE
+APPLICATION,jobs.update_table_metadata_cache.currently_idle,Number of update_table_metadata_cache jobs currently considered Idle and can be freely shut down,jobs,GAUGE,COUNT,AVG,NONE
+APPLICATION,jobs.update_table_metadata_cache.currently_paused,Number of update_table_metadata_cache jobs currently considered Paused,jobs,GAUGE,COUNT,AVG,NONE
+APPLICATION,jobs.update_table_metadata_cache.currently_running,Number of update_table_metadata_cache jobs currently running in Resume or OnFailOrCancel state,jobs,GAUGE,COUNT,AVG,NONE
+APPLICATION,jobs.update_table_metadata_cache.expired_pts_records,Number of expired protected timestamp records owned by update_table_metadata_cache jobs,records,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE
+APPLICATION,jobs.update_table_metadata_cache.fail_or_cancel_completed,Number of update_table_metadata_cache jobs which successfully completed their failure or cancelation process,jobs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE
+APPLICATION,jobs.update_table_metadata_cache.fail_or_cancel_failed,Number of update_table_metadata_cache jobs which failed with a non-retriable error on their failure or cancelation process,jobs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE
+APPLICATION,jobs.update_table_metadata_cache.fail_or_cancel_retry_error,Number of update_table_metadata_cache jobs which failed with a retriable error on their failure or cancelation process,jobs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE
+APPLICATION,jobs.update_table_metadata_cache.protected_age_sec,The age of the oldest PTS record protected by update_table_metadata_cache jobs,seconds,GAUGE,SECONDS,AVG,NONE
+APPLICATION,jobs.update_table_metadata_cache.protected_record_count,Number of protected timestamp records held by update_table_metadata_cache jobs,records,GAUGE,COUNT,AVG,NONE
+APPLICATION,jobs.update_table_metadata_cache.resume_completed,Number of update_table_metadata_cache jobs which successfully resumed to completion,jobs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE
+APPLICATION,jobs.update_table_metadata_cache.resume_failed,Number of update_table_metadata_cache jobs which failed with a non-retriable error,jobs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE
+APPLICATION,jobs.update_table_metadata_cache.resume_retry_error,Number of update_table_metadata_cache jobs which failed with a retriable error,jobs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE
APPLICATION,kv.protectedts.reconciliation.errors,number of errors encountered during reconciliation runs on this node,Count,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE
APPLICATION,kv.protectedts.reconciliation.num_runs,number of successful reconciliation runs on this node,Count,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE
APPLICATION,kv.protectedts.reconciliation.records_processed,number of records processed without error during reconciliation on this node,Count,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE
APPLICATION,kv.protectedts.reconciliation.records_removed,number of records removed during reconciliation runs on this node,Count,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE
APPLICATION,logical_replication.batch_hist_nanos,Time spent flushing a batch,Nanoseconds,HISTOGRAM,NANOSECONDS,AVG,NONE
+APPLICATION,logical_replication.catchup_ranges,Source side ranges undergoing catch up scans (inaccurate with multiple LDR jobs),Ranges,GAUGE,COUNT,AVG,NONE
+APPLICATION,logical_replication.catchup_ranges_by_label,Source side ranges undergoing catch up scans,Ranges,GAUGE,COUNT,AVG,NON_NEGATIVE_DERIVATIVE
APPLICATION,logical_replication.checkpoint_events_ingested,Checkpoint events ingested by all replication jobs,Events,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE
APPLICATION,logical_replication.commit_latency,"Event commit latency: a difference between event MVCC timestamp and the time it was flushed into disk. If we batch events, then the difference between the oldest event in the batch and flush is recorded",Nanoseconds,HISTOGRAM,NANOSECONDS,AVG,NONE
APPLICATION,logical_replication.events_dlqed,Row update events sent to DLQ,Failures,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE
APPLICATION,logical_replication.events_dlqed_age,Row update events sent to DLQ due to reaching the maximum time allowed in the retry queue,Failures,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE
+APPLICATION,logical_replication.events_dlqed_by_label,Row update events sent to DLQ by label,Failures,GAUGE,COUNT,AVG,NON_NEGATIVE_DERIVATIVE
APPLICATION,logical_replication.events_dlqed_errtype,Row update events sent to DLQ due to an error not considered retryable,Failures,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE
APPLICATION,logical_replication.events_dlqed_space,Row update events sent to DLQ due to capacity of the retry queue,Failures,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE
APPLICATION,logical_replication.events_ingested,Events ingested by all replication jobs,Events,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE
+APPLICATION,logical_replication.events_ingested_by_label,Events ingested by all replication jobs by label,Events,GAUGE,COUNT,AVG,NON_NEGATIVE_DERIVATIVE
APPLICATION,logical_replication.events_initial_failure,Failed attempts to apply an incoming row update,Failures,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE
APPLICATION,logical_replication.events_initial_success,Successful applications of an incoming row update,Failures,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE
APPLICATION,logical_replication.events_retry_failure,Failed re-attempts to apply a row update,Failures,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE
APPLICATION,logical_replication.events_retry_success,Row update events applied after one or more retries,Failures,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE
-APPLICATION,logical_replication.flush_bytes,Number of bytes in a given flush,Logical bytes,HISTOGRAM,BYTES,AVG,NONE
-APPLICATION,logical_replication.flush_hist_nanos,Time spent flushing messages across all replication streams,Nanoseconds,HISTOGRAM,NANOSECONDS,AVG,NONE
-APPLICATION,logical_replication.flush_row_count,Number of rows in a given flush,Rows,HISTOGRAM,COUNT,AVG,NONE
+APPLICATION,logical_replication.kv.update_too_old,Total number of updates that were not applied because they were too old,Events,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE
+APPLICATION,logical_replication.kv.value_refreshes,Total number of batches that refreshed the previous value,Events,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE
APPLICATION,logical_replication.logical_bytes,Logical bytes (sum of keys + values) received by all replication jobs,Bytes,COUNTER,BYTES,AVG,NON_NEGATIVE_DERIVATIVE
-APPLICATION,logical_replication.optimistic_insert_conflict_count,Total number of times the optimistic insert encountered a conflict,Events,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE
APPLICATION,logical_replication.replan_count,Total number of dist sql replanning events,Events,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE
+APPLICATION,logical_replication.replicated_time_by_label,Replicated time of the logical replication stream by label,Seconds,GAUGE,SECONDS,AVG,NON_NEGATIVE_DERIVATIVE
APPLICATION,logical_replication.replicated_time_seconds,The replicated time of the logical replication stream in seconds since the unix epoch.,Seconds,GAUGE,SECONDS,AVG,NONE
APPLICATION,logical_replication.retry_queue_bytes,The replicated time of the logical replication stream in seconds since the unix epoch.,Bytes,GAUGE,BYTES,AVG,NONE
APPLICATION,logical_replication.retry_queue_events,The replicated time of the logical replication stream in seconds since the unix epoch.,Events,GAUGE,COUNT,AVG,NONE
+APPLICATION,logical_replication.scanning_ranges,Source side ranges undergoing an initial scan (inaccurate with multiple LDR jobs),Ranges,GAUGE,COUNT,AVG,NONE
+APPLICATION,logical_replication.scanning_ranges_by_label,Source side ranges undergoing an initial scan,Ranges,GAUGE,COUNT,AVG,NON_NEGATIVE_DERIVATIVE
+APPLICATION,obs.tablemetadata.update_job.duration,Time spent running the update table metadata job.,Duration,HISTOGRAM,NANOSECONDS,AVG,NONE
+APPLICATION,obs.tablemetadata.update_job.errors,The total number of errors that have been emitted from the update table metadata job.,Errors,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE
+APPLICATION,obs.tablemetadata.update_job.runs,The total number of runs of the update table metadata job.,Executions,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE
+APPLICATION,obs.tablemetadata.update_job.table_updates,The total number of rows that have been updated in system.table_metadata,Rows Updated,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE
APPLICATION,physical_replication.admit_latency,Event admission latency: a difference between event MVCC timestamp and the time it was admitted into ingestion processor,Nanoseconds,HISTOGRAM,NANOSECONDS,AVG,NONE
APPLICATION,physical_replication.commit_latency,"Event commit latency: a difference between event MVCC timestamp and the time it was flushed into disk. If we batch events, then the difference between the oldest event in the batch and flush is recorded",Nanoseconds,HISTOGRAM,NANOSECONDS,AVG,NONE
-APPLICATION,physical_replication.cutover_progress,The number of ranges left to revert in order to complete an inflight cutover,Ranges,GAUGE,COUNT,AVG,NONE
APPLICATION,physical_replication.distsql_replan_count,Total number of dist sql replanning events,Events,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE
-APPLICATION,physical_replication.earliest_data_checkpoint_span,The earliest timestamp of the last checkpoint forwarded by an ingestion data processor,Timestamp,GAUGE,TIMESTAMP_NS,AVG,NONE
APPLICATION,physical_replication.events_ingested,Events ingested by all replication jobs,Events,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE
+APPLICATION,physical_replication.failover_progress,The number of ranges left to revert in order to complete an inflight cutover,Ranges,GAUGE,COUNT,AVG,NONE
APPLICATION,physical_replication.flush_hist_nanos,Time spent flushing messages across all replication streams,Nanoseconds,HISTOGRAM,NANOSECONDS,AVG,NONE
APPLICATION,physical_replication.flushes,Total flushes across all replication jobs,Flushes,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE
-APPLICATION,physical_replication.job_progress_updates,Total number of updates to the ingestion job progress,Job Updates,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE
-APPLICATION,physical_replication.latest_data_checkpoint_span,The latest timestamp of the last checkpoint forwarded by an ingestion data processor,Timestamp,GAUGE,TIMESTAMP_NS,AVG,NONE
APPLICATION,physical_replication.logical_bytes,Logical bytes (sum of keys + values) ingested by all replication jobs,Bytes,COUNTER,BYTES,AVG,NON_NEGATIVE_DERIVATIVE
APPLICATION,physical_replication.replicated_time_seconds,The replicated time of the physical replication stream in seconds since the unix epoch.,Seconds,GAUGE,SECONDS,AVG,NONE
APPLICATION,physical_replication.resolved_events_ingested,Resolved events ingested by all replication jobs,Events,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE
APPLICATION,physical_replication.running,Number of currently running replication streams,Replication Streams,GAUGE,COUNT,AVG,NONE
-APPLICATION,physical_replication.sst_bytes,SST bytes (compressed) sent to KV by all replication jobs,Bytes,COUNTER,BYTES,AVG,NON_NEGATIVE_DERIVATIVE
APPLICATION,requests.slow.distsender,"Number of range-bound RPCs currently stuck or retrying for a long time.
Note that this is not a good signal for KV health. The remote side of the
@@ -2159,6 +2333,8 @@ metrics such as packet loss, retransmits, etc, to conclusively diagnose network
issues. Heartbeats are not very frequent (~seconds), so they may not capture
rare or short-lived degradations.
",Round-trip time,HISTOGRAM,NANOSECONDS,AVG,NONE
+APPLICATION,rpc.client.bytes.egress,Counter of TCP bytes sent via gRPC on connections we initiated.,Bytes,COUNTER,BYTES,AVG,NON_NEGATIVE_DERIVATIVE
+APPLICATION,rpc.client.bytes.ingress,Counter of TCP bytes received via gRPC on connections we initiated.,Bytes,COUNTER,BYTES,AVG,NON_NEGATIVE_DERIVATIVE
APPLICATION,rpc.connection.avg_round_trip_latency,"Sum of exponentially weighted moving average of round-trip latencies, as measured through a gRPC RPC.
Dividing this Gauge by rpc.connection.healthy gives an approximation of average
@@ -2170,6 +2346,12 @@ these provide per-peer moving averages.
This metric does not track failed connection. A failed connection's contribution
is reset to zero.
",Latency,GAUGE,NANOSECONDS,AVG,NONE
+APPLICATION,rpc.connection.connected,"Counter of TCP level connected connections.
+
+This metric is the number of gRPC connections from the TCP level. Unlike rpc.connection.healthy
+this metric does not take into account whether the application has been able to heartbeat
+over this connection.
+",Connections,GAUGE,COUNT,AVG,NONE
APPLICATION,rpc.connection.failures,"Counter of failed connections.
This includes both the event in which a healthy connection terminates as well as
@@ -2215,6 +2397,7 @@ APPLICATION,schedules.scheduled-schema-telemetry-executor.succeeded,Number of sc
APPLICATION,schedules.scheduled-sql-stats-compaction-executor.failed,Number of scheduled-sql-stats-compaction-executor jobs failed,Jobs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE
APPLICATION,schedules.scheduled-sql-stats-compaction-executor.started,Number of scheduled-sql-stats-compaction-executor jobs started,Jobs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE
APPLICATION,schedules.scheduled-sql-stats-compaction-executor.succeeded,Number of scheduled-sql-stats-compaction-executor jobs succeeded,Jobs,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE
+APPLICATION,server.http.request.duration.nanos,Duration of an HTTP request in nanoseconds.,Duration,HISTOGRAM,NANOSECONDS,AVG,NONE
APPLICATION,sql.bytesin,Number of SQL bytes received,SQL Bytes,COUNTER,BYTES,AVG,NON_NEGATIVE_DERIVATIVE
APPLICATION,sql.bytesout,Number of SQL bytes sent,SQL Bytes,COUNTER,BYTES,AVG,NON_NEGATIVE_DERIVATIVE
APPLICATION,sql.conn.failures,Number of SQL connection failures,Connections,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE
@@ -2234,6 +2417,10 @@ APPLICATION,sql.copy.nonatomic.started.count,Number of non-atomic COPY SQL state
APPLICATION,sql.copy.nonatomic.started.count.internal,Number of non-atomic COPY SQL statements started (internal queries),SQL Internal Statements,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE
APPLICATION,sql.copy.started.count,Number of COPY SQL statements started,SQL Statements,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE
APPLICATION,sql.copy.started.count.internal,Number of COPY SQL statements started (internal queries),SQL Internal Statements,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE
+APPLICATION,sql.crud_query.count,"Number of SQL SELECT, INSERT, UPDATE, DELETE statements successfully executed",SQL Statements,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE
+APPLICATION,sql.crud_query.count.internal,"Number of SQL SELECT, INSERT, UPDATE, DELETE statements successfully executed (internal queries)",SQL Internal Statements,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE
+APPLICATION,sql.crud_query.started.count,"Number of SQL SELECT, INSERT, UPDATE, DELETE statements started",SQL Statements,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE
+APPLICATION,sql.crud_query.started.count.internal,"Number of SQL SELECT, INSERT, UPDATE, DELETE statements started (internal queries)",SQL Internal Statements,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE
APPLICATION,sql.ddl.count,Number of SQL DDL statements successfully executed,SQL Statements,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE
APPLICATION,sql.ddl.count.internal,Number of SQL DDL statements successfully executed (internal queries),SQL Internal Statements,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE
APPLICATION,sql.ddl.started.count,Number of SQL DDL statements started,SQL Statements,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE
@@ -2349,10 +2536,10 @@ APPLICATION,sql.pre_serve.conn.failures,Number of SQL connection failures prior
APPLICATION,sql.pre_serve.mem.cur,Current memory usage for SQL connections prior to routing the connection to the target SQL server,Memory,GAUGE,BYTES,AVG,NONE
APPLICATION,sql.pre_serve.mem.max,Memory usage for SQL connections prior to routing the connection to the target SQL server,Memory,HISTOGRAM,BYTES,AVG,NONE
APPLICATION,sql.pre_serve.new_conns,Number of SQL connections created prior to routing the connection to the target SQL server,Connections,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE
-APPLICATION,sql.query.count,Number of SQL queries executed,SQL Statements,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE
-APPLICATION,sql.query.count.internal,Number of SQL queries executed (internal queries),SQL Internal Statements,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE
-APPLICATION,sql.query.started.count,Number of SQL queries started,SQL Statements,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE
-APPLICATION,sql.query.started.count.internal,Number of SQL queries started (internal queries),SQL Internal Statements,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE
+APPLICATION,sql.query.count,"Number of SQL operations started including queries, and transaction control statements",SQL Statements,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE
+APPLICATION,sql.query.count.internal,"Number of SQL operations started including queries, and transaction control statements (internal queries)",SQL Internal Statements,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE
+APPLICATION,sql.query.started.count,"Number of SQL operations started including queries, and transaction control statements",SQL Statements,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE
+APPLICATION,sql.query.started.count.internal,"Number of SQL operations started including queries, and transaction control statements (internal queries)",SQL Internal Statements,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE
APPLICATION,sql.restart_savepoint.count,Number of `SAVEPOINT cockroach_restart` statements successfully executed,SQL Statements,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE
APPLICATION,sql.restart_savepoint.count.internal,Number of `SAVEPOINT cockroach_restart` statements successfully executed (internal queries),SQL Internal Statements,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE
APPLICATION,sql.restart_savepoint.release.count,Number of `RELEASE SAVEPOINT cockroach_restart` statements successfully executed,SQL Statements,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE
@@ -2450,6 +2637,7 @@ APPLICATION,tenant.sql_usage.external_io_egress_bytes,Total number of bytes writ
APPLICATION,tenant.sql_usage.external_io_ingress_bytes,Total number of bytes read from external services such as cloud storage providers,Bytes,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE
APPLICATION,tenant.sql_usage.kv_request_units,RU consumption attributable to KV,Request Units,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE
APPLICATION,tenant.sql_usage.pgwire_egress_bytes,Total number of bytes transferred from a SQL pod to the client,Bytes,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE
+APPLICATION,tenant.sql_usage.provisioned_vcpus,Number of vcpus available to the virtual cluster,Count,GAUGE,COUNT,AVG,NONE
APPLICATION,tenant.sql_usage.read_batches,Total number of KV read batches,Requests,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE
APPLICATION,tenant.sql_usage.read_bytes,Total number of bytes read from KV,Bytes,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE
APPLICATION,tenant.sql_usage.read_requests,Total number of KV read requests,Requests,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE
@@ -2485,7 +2673,6 @@ APPLICATION,txn.restarts.txnaborted,Number of restarts due to an abort by a conc
APPLICATION,txn.restarts.txnpush,Number of restarts due to a transaction push failure,Restarted Transactions,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE
APPLICATION,txn.restarts.unknown,Number of restarts due to a unknown reasons,Restarted Transactions,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE
APPLICATION,txn.restarts.writetooold,Number of restarts due to a concurrent writer committing first,Restarted Transactions,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE
-APPLICATION,txn.restarts.writetoooldmulti,Number of restarts due to multiple concurrent writers committing first,Restarted Transactions,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE
APPLICATION,txn.rollbacks.async.failed,Number of KV transaction that failed to send abort asynchronously which is not always retried,KV Transactions,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE
APPLICATION,txn.rollbacks.failed,Number of KV transaction that failed to send final abort,KV Transactions,COUNTER,COUNT,AVG,NON_NEGATIVE_DERIVATIVE
SERVER,build.timestamp,Build information,Build Time,GAUGE,TIMESTAMP_SEC,AVG,NONE
diff --git a/src/current/_data/releases.yml b/src/current/_data/releases.yml
index 4d4b4a42498..4a5163b430c 100644
--- a/src/current/_data/releases.yml
+++ b/src/current/_data/releases.yml
@@ -7301,7 +7301,7 @@
- release_name: v24.2.5
major_version: v24.2
- release_date: '2024-11-14'
+ release_date: '2024-11-18'
release_type: Production
go_version: go1.22.8
sha: beddec16a5a438d66bd3f5f4202085cf21c0973c
@@ -7329,7 +7329,7 @@
- release_name: v23.1.29
major_version: v23.1
- release_date: '2024-11-14'
+ release_date: '2024-11-18'
release_type: Production
go_version: go1.22.8
sha: cb0646dc26328963ca2c89d7a8d105d166836d6d
@@ -7356,7 +7356,7 @@
- release_name: v24.1.7
major_version: v24.1
- release_date: '2024-11-14'
+ release_date: '2024-11-18'
release_type: Production
go_version: go1.22.8
sha: 3613a619e327fb412ac62036ce2c7060c25e48db
@@ -7410,7 +7410,7 @@
- release_name: v23.2.16
major_version: v23.2
- release_date: '2024-11-16'
+ release_date: '2024-11-18'
release_type: Production
go_version: go1.22.8
sha: 720d706874e2424b32c7c9347f6d52140f83beee
@@ -7460,4 +7460,165 @@
docker_arm_experimental: false
docker_arm_limited_access: false
source: true
- previous_release: v24.3.0-beta.3
\ No newline at end of file
+ previous_release: v24.3.0-beta.3
+
+- release_name: v24.2.6
+ major_version: v24.2
+ release_date: '2024-12-12'
+ release_type: Production
+ go_version: go1.22.8
+ sha: 74f60821a82c570ab58b8d7716c1d74b85e87df0
+ has_sql_only: true
+ has_sha256sum: true
+ mac:
+ mac_arm: true
+ mac_arm_experimental: true
+ mac_arm_limited_access: false
+ windows: true
+ linux:
+ linux_arm: true
+ linux_arm_experimental: false
+ linux_arm_limited_access: false
+ linux_intel_fips: true
+ linux_arm_fips: false
+ docker:
+ docker_image: cockroachdb/cockroach
+ docker_arm: true
+ docker_arm_experimental: false
+ docker_arm_limited_access: false
+ source: true
+ previous_release: v24.2.5
+
+- release_name: v24.1.8
+ major_version: v24.1
+ release_date: '2024-12-12'
+ release_type: Production
+ go_version: go1.22.8
+ sha: 05e5df3c04a577643a59b78f574b0084832e67e6
+ has_sql_only: true
+ has_sha256sum: true
+ mac:
+ mac_arm: true
+ mac_arm_experimental: true
+ mac_arm_limited_access: false
+ windows: true
+ linux:
+ linux_arm: true
+ linux_arm_experimental: false
+ linux_arm_limited_access: false
+ linux_intel_fips: true
+ linux_arm_fips: false
+ docker:
+ docker_image: cockroachdb/cockroach
+ docker_arm: true
+ docker_arm_experimental: false
+ docker_arm_limited_access: false
+ source: true
+ previous_release: v24.1.7
+
+- release_name: v23.1.30
+ major_version: v23.1
+ release_date: '2024-12-12'
+ release_type: Production
+ go_version: go1.22.8
+ sha: 2ee70ad63870ea3b44cb8593d0ac84d379547fc6
+ has_sql_only: true
+ has_sha256sum: true
+ mac:
+ mac_arm: true
+ mac_arm_experimental: true
+ mac_arm_limited_access: true
+ windows: true
+ linux:
+ linux_arm: true
+ linux_arm_experimental: false
+ linux_arm_limited_access: false
+ linux_intel_fips: true
+ linux_arm_fips: false
+ docker:
+ docker_image: cockroachdb/cockroach
+ docker_arm: true
+ docker_arm_experimental: false
+ docker_arm_limited_access: false
+ source: true
+ previous_release: v23.1.29
+
+- release_name: v23.2.17
+ major_version: v23.2
+ release_date: '2024-12-12'
+ release_type: Production
+ go_version: go1.22.8
+ sha: 1a524cd3ced15426926fb4898162ab43c7484d92
+ has_sql_only: true
+ has_sha256sum: true
+ mac:
+ mac_arm: true
+ mac_arm_experimental: true
+ mac_arm_limited_access: false
+ windows: true
+ linux:
+ linux_arm: true
+ linux_arm_experimental: false
+ linux_arm_limited_access: false
+ linux_intel_fips: true
+ linux_arm_fips: false
+ docker:
+ docker_image: cockroachdb/cockroach
+ docker_arm: true
+ docker_arm_experimental: false
+ docker_arm_limited_access: false
+ source: true
+ previous_release: v23.2.16
+
+- release_name: v24.3.1
+ major_version: v24.3
+ release_date: '2024-12-12'
+ release_type: Production
+ go_version: go1.22.8
+ sha: f9b40bb0eb7ed20acd68f24afc4c402614a9274b
+ has_sql_only: true
+ has_sha256sum: true
+ mac:
+ mac_arm: true
+ mac_arm_experimental: true
+ mac_arm_limited_access: false
+ windows: true
+ linux:
+ linux_arm: true
+ linux_arm_experimental: false
+ linux_arm_limited_access: false
+ linux_intel_fips: true
+ linux_arm_fips: false
+ docker:
+ docker_image: cockroachdb/cockroach
+ docker_arm: true
+ docker_arm_experimental: false
+ docker_arm_limited_access: false
+ source: true
+ previous_release: v24.3.0
+
+- release_name: v25.1.0-alpha.1
+ major_version: v25.1
+ release_date: '2024-12-19'
+ release_type: Testing
+ go_version: go1.22.8
+ sha: d18eb683b2759fd8814dacf0baa913f596074a17
+ has_sql_only: true
+ has_sha256sum: true
+ mac:
+ mac_arm: true
+ mac_arm_experimental: true
+ mac_arm_limited_access: false
+ windows: true
+ linux:
+ linux_arm: true
+ linux_arm_experimental: false
+ linux_arm_limited_access: false
+ linux_intel_fips: true
+ linux_arm_fips: false
+ docker:
+ docker_image: cockroachdb/cockroach-unstable
+ docker_arm: true
+ docker_arm_experimental: false
+ docker_arm_limited_access: false
+ source: true
\ No newline at end of file
diff --git a/src/current/_data/versions.csv b/src/current/_data/versions.csv
index e0330551c68..fa9c41a5bf4 100644
--- a/src/current/_data/versions.csv
+++ b/src/current/_data/versions.csv
@@ -16,3 +16,4 @@ v23.2,2024-02-05,2025-02-05,2025-08-05,23.2.6,23.2.7,2024-07-08,2025-07-08,2026-
v24.1,2024-05-20,2025-05-20,2025-11-20,24.1.5,24.1.6,2024-10-21,2025-10-21,2026-10-21,v23.2,release-24.1
v24.2,2024-08-12,2025-02-12,N/A,N/A,N/A,N/A,N/A,N/A,v24.1,release-24.2
v24.3,2024-11-18,2025-11-18,2026-05-18,N/A,N/A,N/A,N/A,N/A,v24.2,release-24.3
+v25.1,N/A,N/A,N/A,N/A,N/A,N/A,N/A,N/A,v24.2,release-24.3
diff --git a/src/current/_includes/cockroachcloud/use-cockroachcloud-instead.md b/src/current/_includes/cockroachcloud/use-cockroachcloud-instead.md
index 778cab6d340..7f6d96404a3 100644
--- a/src/current/_includes/cockroachcloud/use-cockroachcloud-instead.md
+++ b/src/current/_includes/cockroachcloud/use-cockroachcloud-instead.md
@@ -1,3 +1,3 @@
{{site.data.alerts.callout_success}}
-To deploy a free CockroachDB {{ site.data.products.cloud }} cluster instead of running CockroachDB yourself, see the Quickstart.
+To try CockroachDB {{ site.data.products.cloud }} instead of running CockroachDB yourself, refer to the Cloud Quickstart.
{{site.data.alerts.end}}
diff --git a/src/current/_includes/common/cdc-cloud-costs-link.md b/src/current/_includes/common/cdc-cloud-costs-link.md
new file mode 100644
index 00000000000..5f0b169325d
--- /dev/null
+++ b/src/current/_includes/common/cdc-cloud-costs-link.md
@@ -0,0 +1 @@
+If you're using a CockroachDB {{ site.data.products.cloud }} cluster, refer to [Understand CockroachDB Cloud Costs]({% link cockroachcloud/costs.md %}) for detail on how CDC is billed monthly based on usage.
\ No newline at end of file
diff --git a/src/current/_includes/common/define-watched-cdc.md b/src/current/_includes/common/define-watched-cdc.md
new file mode 100644
index 00000000000..20fb749c5e2
--- /dev/null
+++ b/src/current/_includes/common/define-watched-cdc.md
@@ -0,0 +1 @@
+The main feature of {% if page.name == "change-data-capture-overview.md" %} CockroachDB CDC {% else %} [CockroachDB CDC]({% link {{site.current_cloud_version}}/change-data-capture-overview.md %}) {% endif %} is the _changefeed_, which targets an allowlist of tables, known as _watched tables_.
diff --git a/src/current/_includes/common/telemetry-table.html b/src/current/_includes/common/telemetry-table.html
index 557d766c574..7fb0bad4d11 100644
--- a/src/current/_includes/common/telemetry-table.html
+++ b/src/current/_includes/common/telemetry-table.html
@@ -83,9 +83,7 @@
Altered cluster settings
-
- Cluster settings that have been altered from their default values.
- Note that any values of type 'string' are redacted, such as the cluster organization.
+
Cluster settings that have been altered from their default values. Note that every value of type 'string' is redacted, such as the cluster organization, as well as sensitive settings, such as OIDC authentication.
diff --git a/src/current/_includes/common/upgrade/prepare-to-upgrade-self-hosted.md b/src/current/_includes/common/upgrade/prepare-to-upgrade-self-hosted.md
index 5875f314b32..afd1fb5bf02 100644
--- a/src/current/_includes/common/upgrade/prepare-to-upgrade-self-hosted.md
+++ b/src/current/_includes/common/upgrade/prepare-to-upgrade-self-hosted.md
@@ -1,11 +1,9 @@
Before beginning a major-version or patch upgrade:
1. Verify the overall health of your cluster using the [DB Console]({% link {{ page.version.version }}/ui-cluster-overview-page.md %}):
- - Under **Node Status**, make sure all nodes that should be live are listed as such. If any nodes are unexpectedly listed as `SUSPECT` or `DEAD`, identify why the nodes are offline and either restart them or [decommission]({% link {{ page.version.version }}/node-shutdown.md %}?filters=decommission#remove-nodes) them before beginning your upgrade. If there are `DEAD` and non-decommissioned nodes in your cluster, the upgrade cannot be finalized.
-
- If any node is not fully decommissioned, try the following:
- 1. First, reissue the [decommission command]({% link {{ page.version.version }}/node-shutdown.md %}?filters=decommission#decommission-the-node). The second command typically succeeds within a few minutes.
- 1. If the second decommission command does not succeed, [recommission]({% link {{ page.version.version }}/node-shutdown.md %}?filters=decommission#recommission-nodes) and then decommission it again. Before continuing the upgrade, the node must be marked as `decommissioned`.
+ - Under **Node Status**, make sure all nodes that should be live are listed as such. If any nodes are unexpectedly listed as `SUSPECT` or `DEAD`, identify why the nodes are offline and either restart them or [decommission]({% link {{ page.version.version }}/node-shutdown.md %}?filters=decommission#remove-nodes) them before beginning your upgrade. If there are `DEAD` and non-decommissioned nodes in your cluster, the upgrade cannot be finalized. If any node is not fully decommissioned, try the following:
+ 1. First, reissue the [decommission command]({% link {{ page.version.version }}/node-shutdown.md %}?filters=decommission#decommission-the-node). The second command typically succeeds within a few minutes.
+ 1. If the second decommission command does not succeed, [recommission]({% link {{ page.version.version }}/node-shutdown.md %}?filters=decommission#recommission-nodes) and then decommission it again. Before continuing the upgrade, the node must be marked as `decommissioned`.
- Under **Replication Status**, make sure there are `0` under-replicated and unavailable ranges. Otherwise, performing a rolling upgrade increases the risk that ranges will lose a majority of their replicas and cause cluster unavailability. Therefore, it's important to identify and resolve the cause of range under-replication and/or unavailability before beginning your upgrade.
- In the **Node List**, make sure all nodes are on the same version. Upgrade them to the cluster's current version before continuing. If any nodes are behind, this also indicates that the previous major-version upgrade may not be finalized.
- In the **Metrics** dashboards, make sure [CPU]({% link {{ page.version.version }}/common-issues-to-monitor.md %}#cpu-usage), [memory]({% link {{ page.version.version }}/common-issues-to-monitor.md %}#database-memory-usage), and [storage]({% link {{ page.version.version }}/common-issues-to-monitor.md %}#storage-capacity) capacity are within acceptable values for each node. Nodes must be able to tolerate some increase in case the new version uses more resources for your workload. If any of these metrics is above healthy limits, consider [adding nodes]({% link {{ page.version.version }}/cockroach-start.md %}) to your cluster before beginning your upgrade.
diff --git a/src/current/_includes/releases/v23.1/v23.1.29.md b/src/current/_includes/releases/v23.1/v23.1.29.md
index 00a857cf34c..a69b6cd42d9 100644
--- a/src/current/_includes/releases/v23.1/v23.1.29.md
+++ b/src/current/_includes/releases/v23.1/v23.1.29.md
@@ -1,6 +1,6 @@
## v23.1.29
-Release Date: November 14, 2024
+Release Date: November 18, 2024
{% include releases/new-release-downloads-docker-image.md release=include.release %}
diff --git a/src/current/_includes/releases/v23.1/v23.1.30.md b/src/current/_includes/releases/v23.1/v23.1.30.md
new file mode 100644
index 00000000000..84ca49d3100
--- /dev/null
+++ b/src/current/_includes/releases/v23.1/v23.1.30.md
@@ -0,0 +1,35 @@
+## v23.1.30
+
+Release Date: December 12, 2024
+
+{% include releases/new-release-downloads-docker-image.md release=include.release %}
+
+
Security updates
+
+- All cluster settings that accept strings are now fully redacted when transmitted as part of CockroachDB's diagnostics telemetry. The transmitted payload includes a record of modified cluster settings and their values when they are not strings. If you previously applied the mitigations in [Technical Advisory 133479]({% link advisories/a133479.md %}), you can safely turn on diagnostic reporting via the `diagnostics.reporting.enabled` cluster setting without leaking sensitive cluster settings values. [#134063][#134063]
+
+
General changes
+
+- `COCKROACH_SKIP_ENABLING_DIAGNOSTIC_REPORTING` is no longer mentioned in the `cockroach demo` command. [#134083][#134083]
+
+
Bug fixes
+
+- Fixed a bug where CockroachDB could encounter an internal error `interface conversion: coldata.Column is` in an edge case. The bug was present in v22.2.13+, v23.1.9+, and v23.2+. [#133758][#133758]
+- Fixed a bug that caused incorrect `NOT NULL` constraint violation errors on `UPSERT` and `INSERT ... ON CONFLICT ... DO UPDATE` statements when those statements updated an existing row and a subset of columns that did not include a `NOT NULL` column of the table. This bug had been present since at least v20.1.0. [#133824][#133824]
+- Fixed an unhandled error that could occur when using `REVOKE ... ON SEQUENCE FROM ... user` on an object that is not a sequence. [#133706][#133706]
+- Addressed a panic that could occur inside `CREATE TABLE AS` if sequence builtin expressions had invalid function overloads. [#133866][#133866]
+- Previously, when executing queries with index / lookup joins where ordering needed to be maintained, CockroachDB's behavior could lead to increased query latency, potentially by several orders of magnitude. This bug was introduced in v22.2, and is now fixed. [#134363][#134363]
+- Fixed a bug where `DROP CASCADE` would occasionally panic with an `un-dropped backref` message on partitioned tables. [#134524][#134524]
+- Reduced the duration of partitions in the gossip network when a node crashes. This eliminates false positives in the `ranges.unavailable` metric. [#134603][#134603]
+
+[#133706]: https://github.com/cockroachdb/cockroach/pull/133706
+[#133758]: https://github.com/cockroachdb/cockroach/pull/133758
+[#133824]: https://github.com/cockroachdb/cockroach/pull/133824
+[#133866]: https://github.com/cockroachdb/cockroach/pull/133866
+[#134063]: https://github.com/cockroachdb/cockroach/pull/134063
+[#134083]: https://github.com/cockroachdb/cockroach/pull/134083
+[#134363]: https://github.com/cockroachdb/cockroach/pull/134363
+[#134524]: https://github.com/cockroachdb/cockroach/pull/134524
+[#134603]: https://github.com/cockroachdb/cockroach/pull/134603
+[#134649]: https://github.com/cockroachdb/cockroach/pull/134649
+[154e9f0e0]: https://github.com/cockroachdb/cockroach/commit/154e9f0e0
diff --git a/src/current/_includes/releases/v23.2/v23.2.13.md b/src/current/_includes/releases/v23.2/v23.2.13.md
index 28ce188820c..26f4a561790 100644
--- a/src/current/_includes/releases/v23.2/v23.2.13.md
+++ b/src/current/_includes/releases/v23.2/v23.2.13.md
@@ -16,7 +16,7 @@ Release Date: October 17, 2024
[#130664][#130664]
-- The new [metric]({% link v23.2/metrics.md %}) `changefeed.total_ranges` allows observation of the number of changes that are watched by a changefeed aggregator. It uses the same polling interval as `changefeed.lagging_ranges`, which is controlled by the changefeed option `lagging_ranges_polling_interval`. [#130984][#130984]
+- The new [metric]({% link v23.2/metrics.md %}) `changefeed.total_ranges` allows observation of the number of ranges that are watched by a changefeed aggregator. It uses the same polling interval as `changefeed.lagging_ranges`, which is controlled by the changefeed option `lagging_ranges_polling_interval`. [#130984][#130984]
- The following groups of [metrics]({% link v23.2/metrics.md %}) and [logs]({% link v23.2/logging.md %}) have been renamed to include the buffer they are associated with. The previous metrics are still maintained for backward compatibility.
- `changefeed.buffer_entries.*`
- `changefeed.buffer_entries_mem.*`
@@ -81,6 +81,7 @@ Release Date: October 17, 2024
[#130664]: https://github.com/cockroachdb/cockroach/pull/130664
[#130790]: https://github.com/cockroachdb/cockroach/pull/130790
[#130919]: https://github.com/cockroachdb/cockroach/pull/130919
+[#130984]: https://github.com/cockroachdb/cockroach/pull/130984
[#130988]: https://github.com/cockroachdb/cockroach/pull/130988
[#131065]: https://github.com/cockroachdb/cockroach/pull/131065
[#131128]: https://github.com/cockroachdb/cockroach/pull/131128
diff --git a/src/current/_includes/releases/v23.2/v23.2.16.md b/src/current/_includes/releases/v23.2/v23.2.16.md
index 5793f589357..6e205c889b3 100644
--- a/src/current/_includes/releases/v23.2/v23.2.16.md
+++ b/src/current/_includes/releases/v23.2/v23.2.16.md
@@ -1,6 +1,6 @@
## v23.2.16
-Release Date: November 15, 2024
+Release Date: November 18, 2024
{% include releases/new-release-downloads-docker-image.md release=include.release %}
diff --git a/src/current/_includes/releases/v23.2/v23.2.17.md b/src/current/_includes/releases/v23.2/v23.2.17.md
new file mode 100644
index 00000000000..29855bc2638
--- /dev/null
+++ b/src/current/_includes/releases/v23.2/v23.2.17.md
@@ -0,0 +1,63 @@
+## v23.2.17
+
+Release Date: December 12, 2024
+
+{% include releases/new-release-downloads-docker-image.md release=include.release %}
+
+
Security updates
+
+- All cluster settings that accept strings are now fully redacted when transmitted as part of Cockroach Labs' diagnostics telemetry. The payload includes a record of modified cluster settings and their values when they are not strings. If you previously applied the mitigations in Technical Advisory 133479, you can safely set the value of cluster setting `server.redact_sensitive_settings.enabled` to `false` and turn on diagnostic reporting via the `diagnostics.reporting.enabled` cluster setting without leaking sensitive cluster setting values. [#134015][#134015]
+
+
General changes
+
+- `COCKROACH_SKIP_ENABLING_DIAGNOSTIC_REPORTING` is no longer mentioned in the `cockroach demo` command. [#134085][#134085]
+
+
+
+- Added `system.users` to the list of system tables that changefeeds protect with protected timestamps (PTS). This table is required for CDC queries. [#134233][#134233]
+
+
Operational changes
+
+- Added a new cluster setting `ui.database_locality_metadata.enabled`, which allows operators to disable loading extended database and table region information in the DB Console's Databases and Table Details pages. This information can cause significant CPU load on large clusters with many ranges. Versions of this page from v24.3 onwards do not have this problem. If you require this data, you can use the `SHOW RANGES FROM {DATABASE| TABLE}` query via SQL to compute on-demand. [#134093][#134093]
+
+
Bug fixes
+
+- Previously, CockroachDB could encounter an internal error of the form `interface conversion: coldata.Column is` in an edge case. This is now fixed. The bug was present in versions v22.2.13 and later, v23.1.9 and later, and v23.2 and later. [#133759][#133759]
+- Fixed a bug that caused incorrect `NOT NULL` constraint violation errors on `UPSERT` and `INSERT ... ON CONFLICT ... DO UPDATE` statements when those statements updated an existing row and a subset of columns that did not include a `NOT NULL` column of the table. This bug had been present since at least v20.1.0. [#133823][#133823]
+- Fixed an unhandled error that could occur when using `REVOKE ... ON SEQUENCE ... FROM user` on an object that was not a sequence. [#133707][#133707]
+- Addressed a panic that could occur inside `CREATE TABLE AS` that occurred if sequence builtin expressions had invalid function overloads. [#133867][#133867]
+- String constants can now be compared against collated strings. [#134114][#134114]
+- Previously, when executing queries with index or lookup joins when the ordering needed to be maintained, CockroachDB in some cases could get into a pathological state which would lead to increased query latency, possibly by several orders of magnitude. This bug was introduced in v22.2 and is now fixed. [#134364][#134364]
+- Addressed a bug with `DROP CASCADE` that would occasionally panic with an `un-dropped backref` message on partitioned tables. [#134523][#134523]
+- Reduced the duration of partitions in the gossip network when a node crashes. This eliminates false positives in the `ranges.unavailable` metric. [#134602][#134602]
+- An error message is no longer returned when a non-admin user runs `DROP ROLE IF EXISTS` on a user that does not exist. [#134967][#134967]
+- Fixed a bug that could cause incorrect query results when the optimizer planned a lookup join on an index containing a column of type `CHAR(N)`, `VARCHAR(N)`, `BIT(N)`, `VARBIT(N)`, or `DECIMAL(M, N)`, and the query held that column constant to a single value (e.g., with an equality filter). [#135113][#135113]
+- Fixed an unhandled error that would occur if `DROP SCHEMA` was executed on the `public` schema of the `system` database, or on an internal schema like `pg_catalog` or `information_schema`. [#135196][#135196]
+- Fixed a bug that caused incorrect evaluation of some binary expressions involving `CHAR(N)` values and untyped string literals with trailing whitespace characters. For example, the expression `'f'::CHAR = 'f '` now correctly evaluates to `true`. [#135691][#135691]
+
+
Performance improvements
+
+- CockroachDB now avoids loading unnecessary file blocks shortly after a rebalance in a rare case. [#134526][#134526] [#135303][#135303] [#135577][#135577]
+- Reduced the write-amplification impact of rebalances by splitting snapshot sstable files into smaller ones before ingesting them into Pebble. [#134526][#134526] [#135303][#135303] [#135577][#135577]
+
+[#133707]: https://github.com/cockroachdb/cockroach/pull/133707
+[#133759]: https://github.com/cockroachdb/cockroach/pull/133759
+[#133823]: https://github.com/cockroachdb/cockroach/pull/133823
+[#133867]: https://github.com/cockroachdb/cockroach/pull/133867
+[#134015]: https://github.com/cockroachdb/cockroach/pull/134015
+[#134085]: https://github.com/cockroachdb/cockroach/pull/134085
+[#134093]: https://github.com/cockroachdb/cockroach/pull/134093
+[#134114]: https://github.com/cockroachdb/cockroach/pull/134114
+[#134233]: https://github.com/cockroachdb/cockroach/pull/134233
+[#134364]: https://github.com/cockroachdb/cockroach/pull/134364
+[#134523]: https://github.com/cockroachdb/cockroach/pull/134523
+[#134526]: https://github.com/cockroachdb/cockroach/pull/134526
+[#134602]: https://github.com/cockroachdb/cockroach/pull/134602
+[#134647]: https://github.com/cockroachdb/cockroach/pull/134647
+[#134967]: https://github.com/cockroachdb/cockroach/pull/134967
+[#135113]: https://github.com/cockroachdb/cockroach/pull/135113
+[#135196]: https://github.com/cockroachdb/cockroach/pull/135196
+[#135303]: https://github.com/cockroachdb/cockroach/pull/135303
+[#135577]: https://github.com/cockroachdb/cockroach/pull/135577
+[#135691]: https://github.com/cockroachdb/cockroach/pull/135691
+[#136006]: https://github.com/cockroachdb/cockroach/pull/136006
diff --git a/src/current/_includes/releases/v24.1/v24.1.7.md b/src/current/_includes/releases/v24.1/v24.1.7.md
index 9517b2ec9bb..f0d339f9c81 100644
--- a/src/current/_includes/releases/v24.1/v24.1.7.md
+++ b/src/current/_includes/releases/v24.1/v24.1.7.md
@@ -1,6 +1,6 @@
## v24.1.7
-Release Date: November 14, 2024
+Release Date: November 18, 2024
{% include releases/new-release-downloads-docker-image.md release=include.release %}
diff --git a/src/current/_includes/releases/v24.1/v24.1.8.md b/src/current/_includes/releases/v24.1/v24.1.8.md
new file mode 100644
index 00000000000..a6b427cf04b
--- /dev/null
+++ b/src/current/_includes/releases/v24.1/v24.1.8.md
@@ -0,0 +1,73 @@
+## v24.1.8
+
+Release Date: December 12, 2024
+
+{% include releases/new-release-downloads-docker-image.md release=include.release %}
+
+
Security updates
+
+- All cluster settings that accept strings are now fully redacted when transmitted as part of our diagnostics telemetry. The transmitted payload includes a record of modified cluster settings and their values when they are not strings. If you previously applied the mitigations in [Technical Advisory 133479]({% link advisories/a133479.md %}), you can safely set the value of cluster setting `server.redact_sensitive_settings.enabled` to false and turn on diagnostic reporting via the `diagnostics.reporting.enabled` cluster setting without leaking sensitive cluster settings values. [#134016][#134016]
+
+
General changes
+
+- `COCKROACH_SKIP_ENABLING_DIAGNOSTIC_REPORTING` is no longer mentioned in the `cockroach demo` command. [#134087][#134087]
+- Added `system.users` to the list of system tables that changefeeds protect with protected timestamps (PTS). This table is required for change data capture queries. [#134836][#134836]
+- Added changefeed support for the `mvcc_timestamp` option with the `avro` format. If both options are specified, the `avro` schema includes an `mvcc_timestamp` metadata field and emits the row's MVCC timestamp with the row data. [#136482][#136482]
+
+
Operational changes
+
+- To prevent unnecessary queuing in admission control CPU queues, the `goschedstats.always_use_short_sample_period.enabled` setting should be set to `true` for any production cluster. [#133583][#133583]
+- A new cluster setting `ui.database_locality_metadata.enabled`, when set to `true`, disables loading database and table region information in the DB Console Database and Table pages. This information can cause significant CPU load on large clusters with many ranges. The Database and Table pages in v24.3 onwards do not have this problem. If you require this data, use the `SHOW RANGES FROM {DATABASE|TABLE}` SQL statement to compute this information on-demand. [#134094][#134094]
+- The row-level TTL job now will periodically log progress by showing the number of table spans that have been processed so far. [#135179][#135179]
+
+
Bug fixes
+
+- Fixed a bug where CockroachDB could encounter an internal error `interface conversion: coldata.Column is` in an edge case. The bug was present in v22.2.13+, v23.1.9+, and v23.2+. [#133760][#133760]
+- Fixed an unhandled error that could occur when using `REVOKE ... ON SEQUENCE ... FROM user` on an object that is not a sequence. [#133708][#133708]
+- Fixed a bug that caused incorrect `NOT NULL` constraint violation errors on `UPSERT` and `INSERT ... ON CONFLICT ... DO UPDATE` statements when those statements updated an existing row and a subset of columns that did not include a `NOT NULL` column of the table. This bug had been present since at least v20.1.0. [#133822][#133822]
+- Addressed a panic that could occur inside `CREATE TABLE AS` if sequence builtin expressions had invalid function overloads. [#133868][#133868]
+- String constants can now be compared against collated strings. [#134105][#134105]
+- Previously, when executing queries with index / lookup joins where ordering needed to be maintained, CockroachDB's behavior could lead to increased query latency, potentially by several orders of magnitude. This bug was introduced in v22.2, and is now fixed. [#134365][#134365]
+- Fixed a bug where `DISCARD ALL` statements were counted under the `sql.ddl.count` metric. Now these statements will be counted under the `sql.misc.count` metric. [#134508][#134508]
+- Fixed a bug where `DROP CASCADE` would occasionally panic with an `un-dropped backref` message on partitioned tables. [#134522][#134522]
+- Reduced the duration of partitions in the gossip network when a node crashes. This eliminates false positives in the `ranges.unavailable` metric. [#134601][#134601]
+- As a non-admin user who runs `DROP ROLE IF EXISTS` on a user that does not exist, you no longer get an error message. [#134968][#134968]
+- Fixed a bug that caused quotes around the name of a routine to be dropped when the routine was called within another routine. This could prevent the correct routine name from being resolved if the nested routine name was case-sensitive. The bug has existed since v24.1, when nested routines were introduced. [#134001][#134001]
+- Fixed a bug that could cause incorrect query results when the optimizer planned a lookup join on an index containing a column of type `CHAR(N)`, `VARCHAR(N)`, `BIT(N)`, `VARBIT(N)`, or `DECIMAL(M, N)`, and the query held that column constant to a single value (for example, with an equality filter). [#135111][#135111]
+- Fixed an unhandled error that would occur if `DROP SCHEMA` was executed on the `public` schema of the `system` database, or on an internal schema, such as `pg_catalog` or `information_schema`. [#135195][#135195]
+- Fixed a bug that caused incorrect evaluation of some binary expressions involving `CHAR(N)` values and untyped string literals with trailing whitespace characters. For example, the expression `'f'::CHAR = 'f '` now correctly evaluates to `true`. [#135690][#135690]
+- `CREATE SCHEMA` now returns the correct error if a the schema name is missing. [#135926][#135926]
+
+
Performance improvements
+
+- Unnecessary block loads of SSTable files are now avoided in some rare cases after a replica rebalance. [#134541][#134541]
+
+[#133583]: https://github.com/cockroachdb/cockroach/pull/133583
+[#133708]: https://github.com/cockroachdb/cockroach/pull/133708
+[#133760]: https://github.com/cockroachdb/cockroach/pull/133760
+[#133822]: https://github.com/cockroachdb/cockroach/pull/133822
+[#133868]: https://github.com/cockroachdb/cockroach/pull/133868
+[#134001]: https://github.com/cockroachdb/cockroach/pull/134001
+[#134016]: https://github.com/cockroachdb/cockroach/pull/134016
+[#134087]: https://github.com/cockroachdb/cockroach/pull/134087
+[#134094]: https://github.com/cockroachdb/cockroach/pull/134094
+[#134100]: https://github.com/cockroachdb/cockroach/pull/134100
+[#134105]: https://github.com/cockroachdb/cockroach/pull/134105
+[#134365]: https://github.com/cockroachdb/cockroach/pull/134365
+[#134446]: https://github.com/cockroachdb/cockroach/pull/134446
+[#134508]: https://github.com/cockroachdb/cockroach/pull/134508
+[#134522]: https://github.com/cockroachdb/cockroach/pull/134522
+[#134541]: https://github.com/cockroachdb/cockroach/pull/134541
+[#134601]: https://github.com/cockroachdb/cockroach/pull/134601
+[#134648]: https://github.com/cockroachdb/cockroach/pull/134648
+[#134731]: https://github.com/cockroachdb/cockroach/pull/134731
+[#134836]: https://github.com/cockroachdb/cockroach/pull/134836
+[#134968]: https://github.com/cockroachdb/cockroach/pull/134968
+[#135111]: https://github.com/cockroachdb/cockroach/pull/135111
+[#135179]: https://github.com/cockroachdb/cockroach/pull/135179
+[#135195]: https://github.com/cockroachdb/cockroach/pull/135195
+[#135614]: https://github.com/cockroachdb/cockroach/pull/135614
+[#135690]: https://github.com/cockroachdb/cockroach/pull/135690
+[#135926]: https://github.com/cockroachdb/cockroach/pull/135926
+[#136008]: https://github.com/cockroachdb/cockroach/pull/136008
+[#136482]: https://github.com/cockroachdb/cockroach/pull/136482
diff --git a/src/current/_includes/releases/v24.2/v24.2.5.md b/src/current/_includes/releases/v24.2/v24.2.5.md
index 643d7a89006..844401e23df 100644
--- a/src/current/_includes/releases/v24.2/v24.2.5.md
+++ b/src/current/_includes/releases/v24.2/v24.2.5.md
@@ -1,6 +1,6 @@
## v24.2.5
-Release Date: November 14, 2024
+Release Date: November 18, 2024
{% include releases/new-release-downloads-docker-image.md release=include.release %}
diff --git a/src/current/_includes/releases/v24.2/v24.2.6.md b/src/current/_includes/releases/v24.2/v24.2.6.md
new file mode 100644
index 00000000000..3cedad5d3a0
--- /dev/null
+++ b/src/current/_includes/releases/v24.2/v24.2.6.md
@@ -0,0 +1,78 @@
+## v24.2.6
+
+Release Date: December 12, 2024
+
+{% include releases/new-release-downloads-docker-image.md release=include.release %}
+
+
Security updates
+
+- All cluster settings that accept strings are now fully redacted when transmitted as part of diagnostics telemetry. This payload includes a record of modified cluster settings and their values when they are not strings. Customers who previously applied the mitigations in [Technical Advisory 133479]({% link advisories/a133479.md %}) can safely set the value of cluster setting `server.redact_sensitive_settings.enabled` to false and turn on diagnostic reporting via the `diagnostics.reporting.enabled` cluster setting without leaking sensitive cluster settings values. [#134017][#134017]
+
+
General changes
+
+- `COCKROACH_SKIP_ENABLING_DIAGNOSTIC_REPORTING` is no longer mentioned in the `cockroach demo` command. [#134088][#134088]
+- Added `system.users` to the list of system tables that changefeeds protect with protected timestamps. This table is required for change data capture queries. [#134837][#134837]
+
+
Operational changes
+
+- The `goschedstats.always_use_short_sample_period.enabled` setting should be set to true for any production cluster, to prevent unnecessary queuing in admission control CPU queues. [#133584][#133584]
+- Added a new cluster setting `ui.database_locality_metadata.enabled` that allows operators to disable loading extended database and table region information in the DB Console Database and Table pages. This information can cause significant CPU load on large clusters with many ranges. Versions of this page from v24.3 and later do not have this problem. If customers require this data, they can use the `SHOW RANGES FROM {DATABASE| TABLE}` query via SQL to compute on-demand. [#134095][#134095]
+- Row-level TTL jobs now periodically log progress by showing the number of table spans that have been processed so far. [#135170][#135170]
+
+
Bug fixes
+
+- Fixed a bug that caused non-reusable query plans, e.g., plans for DDL and `SHOW ...` statements, to be cached and reused in future executions, possibly causing stale results to be returned. This bug only occurred when `plan_cache_mode` was set to `auto` or `force_generic_plan`, both of which are not currently the default settings. [#133074][#133074]
+- Previously, CockroachDB could encounter an internal error of the form `interface conversion: coldata.Column is` in an edge case and this is now fixed. The bug is present in v22.2.13+, v23.1.9+, v23.2+. [#133761][#133761]
+- Fixed a bug that caused incorrect `NOT NULL` constraint violation errors on `UPSERT` and `INSERT .. ON CONFLICT .. DO UPDATE` statements when those statements updated an existing row and a subset of columns that did not include a `NOT NULL` column of the table. This bug has been present since at least v20.1.0. [#133821][#133821]
+- Fixed an unhandled error that could occur when using `REVOKE ... ON SEQUENCE FROM ... user` on an object that is not a sequence. [#133709][#133709]
+- Addressed a panic inside `CREATE TABLE AS` if sequence builtin expressions had invalid function overloads. [#133869][#133869]
+- `STRING`constants can now be compared against collated strings. [#134084][#134084]
+- When executing queries with index / lookup joins when the ordering needs to be maintained, previously CockroachDB could experience increased query latency, possibly by several orders of magnitude. This bug was introduced in v22.2 and is now fixed. [#134366][#134366]
+- Fixed a minor bug where `DISCARD ALL` statements were counted under the `sql.ddl.count` metric. Now these will be counted under the `sql.misc.count` metric. [#134509][#134509]
+- Addressed a bug with `DROP CASCADE` that would occasionally panic with an undropped `backref` message on partitioned tables. [#134472][#134472]
+- Reduced the duration of partitions in the gossip network when a node crashes in order to eliminate false positives in the `ranges.unavailable` metric. [#134600][#134600]
+- Non-`admin` users that run `DROP ROLE IF EXISTS` on a user that does not exist will no longer receive an error message. [#134969][#134969]
+- Fixed a bug that caused quotes around the name of a routine to be dropped when it was called within another routine. This could prevent the correct routine from being resolved if the nested routine name was case sensitive. The bug has existed since v24.1, when nested routines were introduced. [#134000][#134000]
+- Fixed a bug that could cause incorrect query results when the optimizer planned a lookup join on an index containing a column of type `CHAR(N)`, `VARCHAR(N)`, `BIT(N)`, `VARBIT(N)`, or `DECIMAL(M, N)`, and the query held that column constant to a single value (e.g., with an equality filter). [#135076][#135076]
+- Fixed an unhandled error that would occur if `DROP SCHEMA` was executed on the `public` schema of the `system` database, or on an internal schema like `pg_catalog` or `information_schema`. [#135180][#135180]
+- Fixed a bug that caused incorrect evaluation of some binary expressions involving `CHAR(N)` values and untyped string literals with trailing whitespace characters. For example, the expression `'f'::CHAR = 'f '` now correctly evaluates to `true`. [#135689][#135689]
+- Fixed a bug where `ALTER DATABASE` operations that modify the zone config would hang if an invalid zone config already exists. [#135215][#135215]
+- `CREATE SCHEMA` now returns the correct error if a the schema name is missing. [#135927][#135927]
+- Using more than one `DECLARE` statment in the definition of a user-defined function now correctly declares additional variables. [#135738][#135738]
+- Fixed a bug where CockroachDB would encounter an internal error when evaluating `FETCH ABSOLUTE 0` statements. The bug has been present since v22.1. [#134992][#134992]
+- Fixed an issue where corrupted table statistics could cause the `cockroach` process to crash. [#136041][#136041]
+- Fixed a bug that causes the optimizer to use stale table statistics after altering an `ENUM` type used in the table. [#136812][#136812]
+
+[#133074]: https://github.com/cockroachdb/cockroach/pull/133074
+[#133584]: https://github.com/cockroachdb/cockroach/pull/133584
+[#133709]: https://github.com/cockroachdb/cockroach/pull/133709
+[#133761]: https://github.com/cockroachdb/cockroach/pull/133761
+[#133821]: https://github.com/cockroachdb/cockroach/pull/133821
+[#133869]: https://github.com/cockroachdb/cockroach/pull/133869
+[#134000]: https://github.com/cockroachdb/cockroach/pull/134000
+[#134017]: https://github.com/cockroachdb/cockroach/pull/134017
+[#134084]: https://github.com/cockroachdb/cockroach/pull/134084
+[#134088]: https://github.com/cockroachdb/cockroach/pull/134088
+[#134095]: https://github.com/cockroachdb/cockroach/pull/134095
+[#134099]: https://github.com/cockroachdb/cockroach/pull/134099
+[#134366]: https://github.com/cockroachdb/cockroach/pull/134366
+[#134447]: https://github.com/cockroachdb/cockroach/pull/134447
+[#134472]: https://github.com/cockroachdb/cockroach/pull/134472
+[#134509]: https://github.com/cockroachdb/cockroach/pull/134509
+[#134600]: https://github.com/cockroachdb/cockroach/pull/134600
+[#134646]: https://github.com/cockroachdb/cockroach/pull/134646
+[#134730]: https://github.com/cockroachdb/cockroach/pull/134730
+[#134837]: https://github.com/cockroachdb/cockroach/pull/134837
+[#134969]: https://github.com/cockroachdb/cockroach/pull/134969
+[#134992]: https://github.com/cockroachdb/cockroach/pull/134992
+[#135076]: https://github.com/cockroachdb/cockroach/pull/135076
+[#135170]: https://github.com/cockroachdb/cockroach/pull/135170
+[#135180]: https://github.com/cockroachdb/cockroach/pull/135180
+[#135215]: https://github.com/cockroachdb/cockroach/pull/135215
+[#135611]: https://github.com/cockroachdb/cockroach/pull/135611
+[#135689]: https://github.com/cockroachdb/cockroach/pull/135689
+[#135738]: https://github.com/cockroachdb/cockroach/pull/135738
+[#135927]: https://github.com/cockroachdb/cockroach/pull/135927
+[#136010]: https://github.com/cockroachdb/cockroach/pull/136010
+[#136041]: https://github.com/cockroachdb/cockroach/pull/136041
+[#136812]: https://github.com/cockroachdb/cockroach/pull/136812
diff --git a/src/current/_includes/releases/v24.3/v24.3.1.md b/src/current/_includes/releases/v24.3/v24.3.1.md
new file mode 100644
index 00000000000..4db85eb32a8
--- /dev/null
+++ b/src/current/_includes/releases/v24.3/v24.3.1.md
@@ -0,0 +1,78 @@
+## v24.3.1
+
+Release Date: December 12, 2024
+
+{% include releases/new-release-downloads-docker-image.md release=include.release %}
+
+
SQL language changes
+
+- When triggers fire one another cyclically, the new `recursion_depth_limit` setting now limits the depth of the recursion. By default, the limit is `1000` nested trigger executions. [#135046][#135046]
+
+
Operational changes
+
+- The metrics scrape HTTP endpoint at `/ _status/vars` will now truncate HELP text at the first sentence, reducing the metadata for metrics with large descriptions. Customers can still access these descriptions via our docs. [#135021][#135021]
+- The row-level TTL job now periodically updates the progress meter in the jobs introspection interfaces, including `SHOW JOBS` and the Jobs page in the DB console. [#135171][#135171]
+- Telemetry delivery is now considered successful even in cases where we experience a network timeout. This will prevent throttling in cases outside an operator's control. [#136481][#136481]
+
+
DB Console changes
+
+- When activating statement diagnostics in the DB Console, users now have the option to produce a redacted bundle as output. This bundle will omit sensitive data. [#134993][#134993]
+
+
Other changes
+
+- Protected timestamp records for changefeeds now include the `system.users` table. This ensures that user information remains available when running CDC queries against historical data. [#134238][#134238]
+
+
Bug fixes
+
+- Fixed a bug that could cause `DELETE` triggers not to fire on cascading delete, and which could cause `INSERT` triggers to match incorrectly in the same scenario. [#134896][#134896]
+- Reduced the duration of partitions in the gossip network when a node crashes in order to eliminate false positives in the `ranges.unavailable` metric. [#134480][#134480]
+- When a non-admin user runs `DROP ROLE IF EXISTS` on a user that does not exist, an error is no longer returned. [#134970][#134970]
+- Fixed a bug that could cause incorrect query results when the optimizer planned a lookup join on an index containing a column of type `CHAR(N)`, `VARCHAR(N)`, `BIT(N)`, `VARBIT(N)`, or `DECIMAL(M, N)`, and the query held that column constant to a single value (e.g. with an equality filter). [#135037][#135037]
+- Fixed an unhandled error that would occur if `DROP SCHEMA` was executed on the `public` schema of the `system` database, or on an internal schema like `pg_catalog` or `information_schema`. [#135181][#135181]
+- A bug has been fixed that caused incorrect evaluation of some binary expressions involving `CHAR(N)` values and untyped string literals with trailing whitespace characters. For example, the expression `'f'::CHAR = 'f '` now correctly evaluates to `true`. [#134710][#134710]
+- Prevent `ALTER DATABASE` operations that modify the zone config from hanging if an invalid zone config already exists. [#135216][#135216]
+- `CREATE SCHEMA` now returns the correct error if a schema name is missing. [#135928][#135928]
+- `percentile_cont` and `percentile_disc` aggregate functions now support `float4` data type inputs. Previously, these functions would return an error when used with `float4` values. [#135764][#135764]
+- `security.certificate.*` metrics now update correctly when certificates are reloaded during node runtime. Previously, these metrics would not reflect changes to certificates after node startup. [#136227][#136227]
+- SQL roles created from LDAP groups that contain periods (.) or hyphens (-) in their Common Names (CN) no longer result in authorization failures. [#134942][#134942]
+- LDAP authorization now supports partial group mapping, allowing users to authenticate even when some LDAP groups do not have corresponding CockroachDB roles. Previously, authentication would fail if any LDAP group lacked a matching database role. [#135587][#135587]
+- Regional by row tables with uniqueness constraints where the region is not part of those uniqueness constraints and which also contain non-unique indices will now have those constraints properly enforced when modified at `READ COMMITTED` isolation. This bug was introduced in v24.3.0. [#137367][#137367]
+
+
Performance improvements
+
+- The `_status/nodes_ui` API no longer returns unnecessary metrics in its response. This decreases the payload size of the API and improves the load time of various DB Console pages and components. [#135209][#135209]
+- PL/pgSQL loops now execute up to 3-4x faster through improved optimization, particularly when they contain subqueries. This enhancement improves performance for routines with many iterations or nested operations. [#135648][#135648]
+
+[#133230]: https://github.com/cockroachdb/cockroach/pull/133230
+[#134238]: https://github.com/cockroachdb/cockroach/pull/134238
+[#134480]: https://github.com/cockroachdb/cockroach/pull/134480
+[#134710]: https://github.com/cockroachdb/cockroach/pull/134710
+[#134729]: https://github.com/cockroachdb/cockroach/pull/134729
+[#134896]: https://github.com/cockroachdb/cockroach/pull/134896
+[#134942]: https://github.com/cockroachdb/cockroach/pull/134942
+[#134970]: https://github.com/cockroachdb/cockroach/pull/134970
+[#134993]: https://github.com/cockroachdb/cockroach/pull/134993
+[#135021]: https://github.com/cockroachdb/cockroach/pull/135021
+[#135037]: https://github.com/cockroachdb/cockroach/pull/135037
+[#135046]: https://github.com/cockroachdb/cockroach/pull/135046
+[#135094]: https://github.com/cockroachdb/cockroach/pull/135094
+[#135120]: https://github.com/cockroachdb/cockroach/pull/135120
+[#135171]: https://github.com/cockroachdb/cockroach/pull/135171
+[#135181]: https://github.com/cockroachdb/cockroach/pull/135181
+[#135209]: https://github.com/cockroachdb/cockroach/pull/135209
+[#135216]: https://github.com/cockroachdb/cockroach/pull/135216
+[#135587]: https://github.com/cockroachdb/cockroach/pull/135587
+[#135648]: https://github.com/cockroachdb/cockroach/pull/135648
+[#135764]: https://github.com/cockroachdb/cockroach/pull/135764
+[#135928]: https://github.com/cockroachdb/cockroach/pull/135928
+[#136011]: https://github.com/cockroachdb/cockroach/pull/136011
+[#136227]: https://github.com/cockroachdb/cockroach/pull/136227
+[#136481]: https://github.com/cockroachdb/cockroach/pull/136481
+[#137367]: https://github.com/cockroachdb/cockroach/pull/137367
+[0d7f6eed3]: https://github.com/cockroachdb/cockroach/commit/0d7f6eed3
+[1f2b1b084]: https://github.com/cockroachdb/cockroach/commit/1f2b1b084
+[3cbd07fbd]: https://github.com/cockroachdb/cockroach/commit/3cbd07fbd
+[3f5305a4c]: https://github.com/cockroachdb/cockroach/commit/3f5305a4c
+[965dded2a]: https://github.com/cockroachdb/cockroach/commit/965dded2a
+[989a49c3f]: https://github.com/cockroachdb/cockroach/commit/989a49c3f
+[9951e3e61]: https://github.com/cockroachdb/cockroach/commit/9951e3e61
diff --git a/src/current/_includes/releases/v25.1/backward-incompatible.md b/src/current/_includes/releases/v25.1/backward-incompatible.md
new file mode 100644
index 00000000000..d81c2f57213
--- /dev/null
+++ b/src/current/_includes/releases/v25.1/backward-incompatible.md
@@ -0,0 +1,10 @@
+Before [upgrading to CockroachDB v24.3]({% link v24.3/upgrade-cockroach-version.md %}), be sure to review the following backward-incompatible changes, as well as [key cluster setting changes](#v24-3-0-cluster-settings), and adjust your deployment as necessary.
+
+If you plan to upgrade to v24.3 directly from v24.1 and skip v24.2, be sure to also review the [v24.2 release notes]({% link releases/v24.2.md %}) for backward-incompatible changes from v24.1.
+
+- Upgrading to v24.3 is blocked if no [license]({% link v24.3/licensing-faqs.md %}) is installed, or if a trial/free license is installed with telemetry disabled. [#130576][#130576]
+
+[#130576]: https://github.com/cockroachdb/cockroach/pull/130576
+
+{% comment %}Remove this anchor when it is added to the v24.3.0 GA release notes{% endcomment %}
+
diff --git a/src/current/_includes/releases/v25.1/deprecations.md b/src/current/_includes/releases/v25.1/deprecations.md
new file mode 100644
index 00000000000..abf7fd16114
--- /dev/null
+++ b/src/current/_includes/releases/v25.1/deprecations.md
@@ -0,0 +1,5 @@
+The following deprecations are announced in v24.3. If you plan to upgrade to v24.3 directly from v24.1 and skip v24.2, be sure to also review the [v24.2 release notes]({% link releases/v24.2.md %}) for deprecations.
+
+- The session variable [`enforce_home_region_follower_reads_enabled`]({% link v24.3/session-variables.md %}#enforce-home-region-follower-reads-enabled) is now deprecated, and will be removed in a future release. The related session variable [`enforce_home_region`]({% link v24.3/session-variables.md %}#enforce-home-region) is **not** deprecated. [#129024][#129024]
+
+[#129024]: https://github.com/cockroachdb/cockroach/pull/129024
diff --git a/src/current/_includes/releases/v25.1/feature-detail-key.html b/src/current/_includes/releases/v25.1/feature-detail-key.html
new file mode 100644
index 00000000000..e11ead7e232
--- /dev/null
+++ b/src/current/_includes/releases/v25.1/feature-detail-key.html
@@ -0,0 +1,25 @@
+
+
+
+
Feature detail key
+
+
+
+
+
★
+
Features marked "All★" were recently made available in the CockroachDB Cloud platform. They are available for all supported versions of CockroachDB, under the deployment methods specified in their row under Availability.
+
+
+
★★
+
Features marked "All★★" were recently made available via tools maintained outside of the CockroachDB binary. They are available to use with all supported versions of CockroachDB, under the deployment methods specified in their row under Availability.
+
+
+
{% include icon-yes.html %}
+
Feature is available for this deployment method of CockroachDB as specified in the icon’s column: CockroachDB Self-hosted, CockroachDB Advanced, CockroachDB Standard, or CockroachDB Basic.
+
+
+
{% include icon-no.html %}
+
Feature is not available for this deployment method of CockroachDB as specified in the icon’s column: CockroachDB Self-hosted, CockroachDB Advanced, CockroachDB Standard, or CockroachDB Basic.
+
+
+
diff --git a/src/current/_includes/releases/v25.1/feature-highlights-change-data-capture.html b/src/current/_includes/releases/v25.1/feature-highlights-change-data-capture.html
new file mode 100644
index 00000000000..1ebb2379d08
--- /dev/null
+++ b/src/current/_includes/releases/v25.1/feature-highlights-change-data-capture.html
@@ -0,0 +1,30 @@
+
+
+
+
Feature
+
Availability
+
+
+
Ver.
+
Self-hosted
+
Advanced
+
Standard
+
Basic
+
+
+
+
+
+
IAM authentication support for Amazon MSK Serverless
+
+ Changefeeds support IAM Authentication with Amazon MSK Serverless clusters (Amazon Managed Streaming for Apache Kafka). This feature is generally available.
+
+
+
24.3
+
{% include icon-yes.html %}
+
{% include icon-yes.html %}
+
{% include icon-yes.html %}
+
{% include icon-yes.html %}
+
+
+
diff --git a/src/current/_includes/releases/v25.1/feature-highlights-cloud.html b/src/current/_includes/releases/v25.1/feature-highlights-cloud.html
new file mode 100644
index 00000000000..91bf1a065e6
--- /dev/null
+++ b/src/current/_includes/releases/v25.1/feature-highlights-cloud.html
@@ -0,0 +1,32 @@
+
+
+
+
Feature
+
Availability
+
+
+
Ver.
+
Self-hosted
+
Advanced
+
Standard
+
Basic
+
+
+
+
+
+
Free trial on CockroachDB Cloud
+
+ New CockroachDB Cloud organizations can benefit from a 30-day free trial that enables you to consume up to $400 worth of free credits. Get started by signing up for CockroachDB Cloud
+
+
+
All
+ ★
+
+
{% include icon-no.html %}
+
{% include icon-no.html %}
+
{% include icon-yes.html %}
+
{% include icon-yes.html %}
+
+
+
diff --git a/src/current/_includes/releases/v25.1/feature-highlights-disaster-recovery.html b/src/current/_includes/releases/v25.1/feature-highlights-disaster-recovery.html
new file mode 100644
index 00000000000..45576c202d2
--- /dev/null
+++ b/src/current/_includes/releases/v25.1/feature-highlights-disaster-recovery.html
@@ -0,0 +1,49 @@
+
+
+
+
Feature
+
Availability
+
+
+
Ver.
+
Self-hosted
+
Advanced
+
Standard
+
Basic
+
+
+
+
+
+
SELECT now supported on PCR standby clusters
+
+ Physical cluster replication (PCR) has been enhanced to support SELECT operations on standby clusters. This enables you to scale read performance by running, for example, non-critical workloads on standby clusters.
+
+
+
24.3
+
{% include icon-yes.html %}
+
{% include icon-no.html %}
+
{% include icon-no.html %}
+
{% include icon-no.html %}
+
+
+
+
Logical Data Replication in Preview
+
+ Logical data replication (LDR) continuously replicates tables between an active source CockroachDB cluster to an active destination CockroachDB cluster. Both source and destination can receive application reads and writes, and participate in bidirectional LDR replication for eventual consistency in the replicating tables.
+
+
+ The active-active setup between clusters can provide protection against cluster, datacenter, or region failure while still achieving single-region low latency reads and writes in the individual CockroachDB clusters. Each cluster in an LDR job still benefits individually from multi-active availability with CockroachDB's built-in Raft replication providing data consistency across nodes, zones, and regions.
+
+
+ This feature is in Preview.
+
+
+
24.3
+
{% include icon-yes.html %}
+
{% include icon-no.html %}
+
{% include icon-no.html %}
+
{% include icon-no.html %}
+
+
+
diff --git a/src/current/_includes/releases/v25.1/feature-highlights-licensing.html b/src/current/_includes/releases/v25.1/feature-highlights-licensing.html
new file mode 100644
index 00000000000..5d6e8a24f57
--- /dev/null
+++ b/src/current/_includes/releases/v25.1/feature-highlights-licensing.html
@@ -0,0 +1,55 @@
+
+
+
+
Feature
+
Availability
+
+
+
Ver.
+
Self-hosted
+
Advanced
+
Standard
+
Basic
+
+
+
+
+
+
Licensing changes
+
+ All versions of CockroachDB starting from the release date of 24.3.0 onward, including patch fixes for versions 23.1-24.2, are made available under the CockroachDB Software License.
+
+
+ See below for a summary of license options for self-hosted deployments. All Cloud deployments automatically have a valid Enterprise license.
+
+
+
+
+ Enterprise: This paid license allows usage of all CockroachDB features in accordance with the terms specified in the CockroachDB Software License.
+
+
+
+
+ Enterprise Free: Same functionality as Enterprise, but free of charge for businesses with less than $10M in annual revenue. Clusters will be throttled after 7 days without sending telemetry. License must be renewed annually.
+
+
+
+
+ Enterprise Trial: A 30 day self-service trial license. Telemetry is required during the trial. Clusters will be throttled after 7 days without sending telemetry. Telemetry can be disabled once the cluster is upgraded to a paid Enterprise license.
+
+
+
+
+ See the Licensing FAQs page for more details on the CockroachDB Software License and license options.
+
+ You may acquire CockroachDB licenses through the CockroachDB Cloud console.
+
+
+
24.3
+
{% include icon-yes.html %}
+
{% include icon-yes.html %}
+
{% include icon-yes.html %}
+
{% include icon-yes.html %}
+
+
+
diff --git a/src/current/_includes/releases/v25.1/feature-highlights-observability.html b/src/current/_includes/releases/v25.1/feature-highlights-observability.html
new file mode 100644
index 00000000000..2696c34ec5b
--- /dev/null
+++ b/src/current/_includes/releases/v25.1/feature-highlights-observability.html
@@ -0,0 +1,94 @@
+
+
+
+
Feature
+
Availability
+
+
+
Ver.
+
Self-hosted
+
Advanced
+
Standard
+
Basic
+
+
+
+
+
+
Improved usability for the DB Console Metrics page
+
+ Introduced several enhancements to the DB Console Metrics page to support large scale clusters, including the following:
+
+
+
+ Added on-hover cursor support that will display the closest time-series value and highlight the node in the legend to allow users to quickly pinpoint outliers.
+
+
+
+
+ Improved legend visibillity and made legends scrollable to improve usability and reduce vertical scrolling.
+
+
+
+
+
+
24.3
+
{% include icon-yes.html %}
+
{% include icon-yes.html %}
+
{% include icon-no.html %}
+
{% include icon-no.html %}
+
+
+
+
Improved peformance and scalability for the DB Console Databases pages
+
+ CockroachDB now caches the data that is surfaced in the Databases page. This enhances the performance and scalability of the Databases page for large-scale clusters.
+
+
+
24.3
+
{% include icon-yes.html %}
+
{% include icon-yes.html %}
+
{% include icon-no.html %}
+
{% include icon-no.html %}
+
+
+
+
Improved admission control observability
+
+ The DB Console Overload page now provides additional metrics to help identify overload in the system. Graphs and metrics on this page provide quick signals on which resource is exhausted and whether it is due to background activity or foreground.
+
+
+ There are now 4 graphs for admission queue delay:
+
+
+
+
+ Foreground (regular) CPU work
+
+
+
+
+ Store (IO) work
+
+
+
+
+ Background (elastic) CPU work
+
+
+
+
+ Replication admission control (store overload on replicas)
+
+
+
+
+
+
24.3
+
{% include icon-yes.html %}
+
{% include icon-yes.html %}
+
{% include icon-no.html %}
+
{% include icon-no.html %}
+
+
+
diff --git a/src/current/_includes/releases/v25.1/feature-highlights-security.html b/src/current/_includes/releases/v25.1/feature-highlights-security.html
new file mode 100644
index 00000000000..ad472d3031f
--- /dev/null
+++ b/src/current/_includes/releases/v25.1/feature-highlights-security.html
@@ -0,0 +1,30 @@
+
+
+
+
Feature
+
Availability
+
+
+
Ver.
+
Self-hosted
+
Advanced
+
Standard
+
Basic
+
+
+
+
+
+
LDAP support in Preview
+
+ CockroachDB supports [authentication and authorization using LDAP-compatible directory services]({% link v24.3/ldap-authentication.md %}), such as Active Directory and Microsoft Entra ID. This allows you to integrate CockroachDB clusters with your organization's existing identity infrastructure for centralized user management and access control. This feature is available in Preview.
+
+
+
24.3
+
{% include icon-yes.html %}
+
{% include icon-no.html %}
+
{% include icon-no.html %}
+
{% include icon-no.html %}
+
+
+
diff --git a/src/current/_includes/releases/v25.1/feature-highlights-sql.html b/src/current/_includes/releases/v25.1/feature-highlights-sql.html
new file mode 100644
index 00000000000..3a42a20d59e
--- /dev/null
+++ b/src/current/_includes/releases/v25.1/feature-highlights-sql.html
@@ -0,0 +1,43 @@
+
+
+
+
Feature
+
Availability
+
+
+
Ver.
+
Self-hosted
+
Advanced
+
Standard
+
Basic
+
+
+
+
+
+
User-defined functions and stored procedures support SECURITY DEFINER
+
+ You can create or alter a user-defined function (UDF) or stored procedure with [EXTERNAL] SECURITY DEFINER instead of the default [EXTERNAL] SECURITY INVOKER. With [SECURITY DEFINER], the privileges of the owner are checked when the UDF or stored procedure is executed, rather than the privileges of the executor. The EXTERNAL keyword is optional and exists for SQL language conformity.
+
+
+
24.3
+
{% include icon-yes.html %}
+
{% include icon-yes.html %}
+
{% include icon-yes.html %}
+
{% include icon-yes.html %}
+
+
+
+
CockroachDB now supports triggers in Preview
+
+ CockroachDB now supports triggers. Triggers allow automatic execution of specified functions in response to specified events on a particular table or view. They can be used for automating tasks, enforcing business rules, and maintaining data integrity. This feature is in Preview.
+
+
+
24.3
+
{% include icon-yes.html %}
+
{% include icon-yes.html %}
+
{% include icon-yes.html %}
+
{% include icon-yes.html %}
+
+
+
diff --git a/src/current/_includes/releases/v25.1/upgrade-finalization.md b/src/current/_includes/releases/v25.1/upgrade-finalization.md
new file mode 100644
index 00000000000..4cd49187ca0
--- /dev/null
+++ b/src/current/_includes/releases/v25.1/upgrade-finalization.md
@@ -0,0 +1,4 @@
+During a major-version upgrade, certain features and performance improvements may not be available until the upgrade is finalized.
+
+- A cluster must have an [Enterprise license]({% link v24.3/licensing-faqs.md %}#set-a-license) or a [trial license]({% link v24.3/licensing-faqs.md %}#obtain-a-license) set before an upgrade to v24.3 can be finalized.
+- New clusters that are initialized for the first time on v24.3, and clusters that are upgraded to v24.3 will now have a [zone config]({% link v24.3/configure-replication-zones.md %}) defined for the `timeseries` range if it does not already exist, which specifies the value for `gc.ttlseconds`, but inherits all other attributes from the zone config for the `default` range.
diff --git a/src/current/_includes/releases/v25.1/v25.1.0-alpha.1.md b/src/current/_includes/releases/v25.1/v25.1.0-alpha.1.md
new file mode 100644
index 00000000000..4d6c951e07b
--- /dev/null
+++ b/src/current/_includes/releases/v25.1/v25.1.0-alpha.1.md
@@ -0,0 +1,190 @@
+## v25.1.0-alpha.1
+
+Release Date: December 19, 2024
+
+{% include releases/new-release-downloads-docker-image.md release=include.release %}
+
+
Backward-incompatible changes
+
+- The old `BACKUP TO`, `RESTORE FROM `, and `SHOW BACKUP IN ` syntaxes are now fully deprecated and no longer usable. [#133610][#133610]
+
+
Security updates
+
+- Added support for partial roles from LDAP synced group to be mapped to CockroachDB roles and ensure appropriate erroring for undesired behavior. [#135552][#135552]
+
+
General changes
+
+- To improve the granularity of changefeed pipeline metrics, the changefeed metrics `changefeed.admit_latency` and `changefeed.commit_latency` now have histogram buckets from `5ms` to `60m` (previously `500ms` to `5m`). The following changefeed metrics now have histogram buckets from `5ms` to `10m` (previously `500ms` to `5m`):
+ - `changefeed.parallel_io_queue_nanos`
+ - `changefeed.parallel_io_result_queue_nanos`
+ - `changefeed.sink_batch_hist_nanos`
+ - `changefeed.flush_hist_nanos`
+ - `changefeed.kafka_throttling_hist_nanos`
+[#136265][#136265]
+- Added support for multiple seed brokers in the new Kafka sink. [#136632][#136632]
+- Added the new metric `distsender.rangefeed.catchup_ranges_waiting_client_side` that counts how many rangefeeds are waiting on the client-side limiter to start performing catchup scans. [#136741][#136741]
+- Added `system.users` to the list of system tables that changefeeds protect with protected timestamps. This table is required for change data capture queries. [#133568][#133568]
+- Added the `AWS_USE_PATH_STYLE` parameter to S3 URI parsing. [#136715][#136715]
+
+
SQL language changes
+
+- Two new builtin functions, `crdb_internal.{lease_holder, range_stats}_with_errors`, include errors encountered while fetching leaseholder and range stats. These new builtins are used by the `crdb_internal.ranges` table, which includes a new column, `errors`, that combines the errors from the builtins. [#131232][#131232]
+- The cluster setting `sql.stats.automatic_partial_collection.enabled` is now enabled by default, which enables automatic collection of partial table stats. Partial table stats (i.e., those created with `CREATE STATISTICS ... USING EXTREMES`) scan the lower and upper ends of indexes to collect statistics outside the range covered by the previous full statistics collection. [#133988][#133988]
+- When triggers fire one another cyclically, the new `recursion_depth_limit` session variable now limits the depth of the recursion. By default, the limit is `1000` nested trigger executions. [#134498][#134498]
+- The names of `BEFORE` triggers fired by a mutation now show up in the `EXPLAIN` output. The trigger-function invocations are visible in the output of verbose `EXPLAIN`. [#135556][#135556]
+- `AFTER` triggers will now show up in the output of `EXPLAIN`, as well as `EXPLAIN ANALYZE`. [#135556][#135556]
+- Added support for `SHOW TRIGGERS`, which displays the names of all triggers on a table, as well as whether each trigger is enabled. The user must have any privilege on the table, or be its owner. [#135778][#135778]
+- Added support for `SHOW CREATE TRIGGER`, which displays the `CREATE` statement for a trigger. The user must have any privilege on the table, or be its owner. [#135778][#135778]
+- Added an informational notice to the result of `CREATE TABLE ... AS` statements that describes that indexes and constraints are not copied to the new table. [#135845][#135845]
+- Altering a column’s type no longer requires enabling the `enable_experimental_alter_column_type_general` session variable. This change makes the feature generally available. [#135936][#135936]
+- Added support for `COLLATE` expressions on arrays of strings to match PostgreSQL more closely. [#133751][#133751]
+- Added the column `readable_high_water_timestamp` to the output of `SHOW CHANGEFEED JOBS`. This human-readable form will be easier to consume. `high_water_timestamp` still exists and is in epoch nanoseconds. [#135623][#135623]
+- The `sql_safe_updates` session variable must be disabled to perform `ALTER COLUMN TYPE` operations that require a column rewrite. [#136110][#136110]
+- Added the `CREATE LOGICALLY REPLICATED` syntax that will direct logical data replication jobs to create the destination table(s) using a copy of the source table(s). [#136841][#136841]
+- It is now possible to execute queries with correlated joins with sub-queries or common table expressions in both the `INNER` and `OUTER` context. Errors with the following message: `unimplemented: apply joins with subqueries in the "inner" and "outer" contexts are not supported` will no longer occur. [#136506][#136506]
+- It is now possible to include a common table expression within the body of a user-defined function or stored procedure. [#136506][#136506]
+
+
Operational changes
+
+- Retired the cluster setting `kv.rangefeed.scheduler.enabled`. The rangefeed scheduler is now unconditionally enabled. [#132825][#132825]
+- Added the cluster setting `ui.database_locality_metadata.enabled` that allows operators to disable loading extended database and table region information in the DB Console Database and Table pages. This information can cause significant CPU load on large clusters with many ranges. Versions of this page from v24.3 and later do not have this problem. If customers require this data, they can use the `SHOW RANGES FROM {DATABASE| TABLE}` query via SQL to compute on-demand. [#133075][#133075]
+- The metrics scrape HTTP endpoint at `/ _status/vars` will now truncate `HELP` text at the first sentence, reducing the metadata for metrics with large descriptions. Descriptions are still accessible in the documentation. [#134724][#134724]
+- The row-level TTL job will now periodically update the progress meter in the jobs introspection interfaces, including `SHOW JOBS` and the Jobs page in the DB console. [#135142][#135142]
+- The `kv.bulk_io_write.min_capacity_remaining_fraction` cluster setting can be be set between `0.04` and `0.3`. [#135779][#135779]
+- Added two new metrics, `sql.distsql.select.distributed_exec.count` and `sql.distsql.select.distributed_exec.count.internal`. These metrics count the number of `SELECT` statements that actually execute with full or partial distribution. These metrics differ from `sql.distsql.select.count` and `sql.distsql.select.count.internal` in that the latter count the number of `SELECT` statements that are **planned** with full or partial distribution, but might not necessarily execute with full or partial distribution, depending on the location of data. [#135236][#135236]
+- Added the new metric `sql.distsql.distributed_exec.count` that counts the number of invocations of the execution engine with full or partial distribution. (This is in contrast to `sql.distsql.queries.total`, which counts the total number of invocations of the execution engine.) [#135236][#135236]
+- Added some clarification that the following metrics count invocations of the execution engine and not SQL queries (which could each result in multiple invocations of the execution engine):
+ - `sql.distsql.queries.active`
+ - `sql.distsql.queries.total`
+ - `sql.distsql.distributed_exec.count` [#135236][#135236]
+- The default value for the cluster setting `trace.span_registry.enabled` has been changed from `true` to `false`. [#135682][#135682]
+- Removed the `sql.auth.resolve_membership_single_scan.enabled` cluster setting. This was added in case it was necessary to revert back to the previous behavior for looking up role memberships, but this cluster setting has not been needed in practice since this was added in v23.1. [#135852][#135852]
+- Telemetry delivery is now considered successful even in cases where CockroachDB experiences a network timeout. This will prevent throttling in cases outside an operator's control. [#136219][#136219]
+- When a schema change job is completed, rolls back, or encounters a failure, the time taken since the job began is now logged in a structured log in the `SQL_SCHEMA` log channel. [#136899][#136899]
+
+
DB Console changes
+
+- As of v25.1 the legacy Database page, which was previously available through the Advanced Debug page, is no longer available. [#134005][#134005]
+- When activating statement diagnostics in the DB Console, users now have the option to produce a redacted bundle as output. This bundle will omit sensitive data. [#134723][#134723]
+- Fixed a list of UI bugs on the DB Console Overview and Node Overview pages. [#135293][#135293]
+- Removed the link for the legacy table page on the Plan Details page. [#136311][#136311]
+- Changed the table and index contents of the Hot Ranges page in DB console. [#134106][#134106]
+
+
Bug fixes
+
+- Fixed a bug that prevented the `CREATE` statement for a routine from being shown in a statement bundle. This happened when the routine was created on a schema other than `public`. The bug has existed since v23.1. [#132147][#132147]
+- Reduced the duration of partitions in the gossip network when a node crashes in order to eliminate false positives in the `ranges.unavailable` metric. [#133300][#133300]
+- Fixed a bug that caused incorrect evaluation of some binary expressions involving `CHAR(N)` values and untyped string literals with trailing whitespace characters. For example, the expression `'f'::CHAR = 'f '` now correctly evaluates to `true`. [#133037][#133037]
+- Fixed a race condition in Sarama when Kafka throttling is enabled. [#133563][#133563]
+- Fixed a metrics bug in rangefeed restarts introduced in v23.2. [#133947][#133947]
+- Fixed a bug that could result in incorrect metrics related to retryable rangefeed errors. [#133991][#133991]
+- Fixed a bug that could cause `DELETE` triggers not to fire on cascading `DELETE`, and which could cause `INSERT` triggers to match incorrectly in the same scenario. [#134759][#134759]
+- Non-`admin` users that run `DROP ROLE IF EXISTS` on a user that does not exist will no longer receive an error message. [#134850][#134850]
+- Fixed a bug where CockroachDB would encounter an internal error when evaluating `FETCH ABSOLUTE 0` statements. The bug had been present since v22.1. [#134919][#134919]
+- Fixed a bug that could cause incorrect query results when the optimizer planned a lookup join on an index containing a column of type `CHAR(N)`, `VARCHAR(N)`, `BIT(N)`, `VARBIT(N)`, or `DECIMAL(M, N)`, and the query held that column constant to a single value (e.g., with an equality filter). [#134891][#134891]
+- Fixed a bug that prevented restoring cluster backups taken in a multi-region cluster that had configured the `system` database with a region configuration into a non-multi-region cluster. [#134604][#134604]
+- Fixed an unhandled error that would occur if `DROP SCHEMA` was executed on the `public` schema of the `system` database, or on an internal schema like `pg_catalog` or `information_schema`. [#135149][#135149]
+- `ALTER DATABASE` operations no longer hang when the operation modifies the zone config if an invalid zone config already exists. [#135168][#135168]
+- CockroachDB now correctly evaluates `percentile_cont` and `percentile_disc` aggregates over `FLOAT4` values. [#135130][#135130]
+- The schema changer's backfill process now includes a retry mechanism that reduces the batch size when memory issues occur. This improves the likelihood of operation success without requiring manual adjustment of the `bulko.index_backfill.batch_size` cluster setting. [#135563][#135563]
+- `CREATE SCHEMA` now returns the correct error if a the schema name is missing. [#135910][#135910]
+- Fixed an issue where corrupted table statistics could cause the `cockroach` process to crash. [#135944][#135944]
+- Table statistics collection in CockroachDB could previously run into `no bytes in account to release` errors in some edge cases (when the SQL memory budget, configured via `--max-sql-memory` flag, was close to being exhausted). The bug has been present since v21.2 and is now fixed. [#135970][#135970]
+- `security.certificate.*` metrics will now be updated if a node loads new certificates while running. [#135596][#135596]
+- A table that is participating in logical data replication can no longer be dropped. Previously, this was allowed, which would cause all the replicated rows to end up in the dead-letter queue. [#136172][#136172]
+- `ALTER COLUMN SET NOT NULL` was not enforced consistently when the table was created in the same transaction. [#136298][#136298]
+- `CREATE` relation / type could leave dangling namespace entries if the schema was concurrently being dropped. [#136325][#136325]
+- The `idle_in_session_timeout` session variable now excludes the time spent waiting for schema changer jobs to complete, preventing unintended session termination during schema change operations. [#136463][#136463]
+- Fixed a bug that causes the optimizer to use stale table statistics after altering an `ENUM` type used in the table. [#136538][#136538]
+- CockroachDB now better respects the `statement_timeout` limit on queries involving the top K sort and merge join operations. [#136631][#136631]
+- Fixed a bug that would cause the `make_timestamp` and `make_timestamptz` builtin functions to incorrectly extract the `seconds` argument if the value was less than `1`. [#136804][#136804]
+- Fixed possible index corruption caused by triggers that could occur when the following conditions were satisfied:
+ 1. A query calls a user-defined function or stored procedure, and also performs a mutation on a table.
+ 2. The user-defined function or storage procedure contains a statement that either fires an `AFTER` trigger, or fires a `CASCADE` that itself fires a trigger.
+ 3. The trigger modifies the same row as the outer statement.
+ 4. Either the outer or inner mutation is something other than an `INSERT` without an `ON CONFLICT` clause. [#136076][#136076]
+- Fixed an issue where license enforcement was not consistently disabled for single-node clusters started with `cockroach start-single-node`, ensuring proper behavior on cluster restarts. [#136926][#136926]
+- Fixed a bug that caused an incorrect filesystem to be logged as part of the store information. [#137024][#137024]
+
+
Performance improvements
+
+- The `/_status/nodes_ui` API no longer returns unnecessary metrics in its response. This decreases the payload size of the API and improves the load time of various DB Console pages and components. [#135186][#135186]
+- Performance for some PL/pgSQL loops is now significantly improved, by as much as 3–4 times. This is due to applying tail-call optimization in more cases to the recursive sub-routines that implement loops. [#135145][#135145]
+- Improved the internal caching logic for role membership information. This reduces the latency impact of commands such as `DROP ROLE`, `CREATE ROLE`, and `GRANT role TO user`, which cause the role membership cache to be invalidated. [#135852][#135852]
+- The session variable `plan_cache_mode` now defaults to `auto`, enabling generic query plans for some queries. [#135668][#135668]
+- GRPC streams are now pooled across unary intra-cluster RPCs, allowing for reuse of gRPC resources to reduce the cost of remote key-value layer access. This pooling can be disabled using the `rpc.batch_stream_pool.enabled` cluster setting. [#136648][#136648]
+
+
Multi-tenancy
+
+- The `nodes` endpoint should work for `shared` secondary tenants. Since nodes are common to all the tenants, this API endpoint behaves similarly to the system tenant's endpoint. [#131644][#131644]
+
+[#131232]: https://github.com/cockroachdb/cockroach/pull/131232
+[#131644]: https://github.com/cockroachdb/cockroach/pull/131644
+[#132147]: https://github.com/cockroachdb/cockroach/pull/132147
+[#132825]: https://github.com/cockroachdb/cockroach/pull/132825
+[#133037]: https://github.com/cockroachdb/cockroach/pull/133037
+[#133075]: https://github.com/cockroachdb/cockroach/pull/133075
+[#133080]: https://github.com/cockroachdb/cockroach/pull/133080
+[#133215]: https://github.com/cockroachdb/cockroach/pull/133215
+[#133300]: https://github.com/cockroachdb/cockroach/pull/133300
+[#133563]: https://github.com/cockroachdb/cockroach/pull/133563
+[#133568]: https://github.com/cockroachdb/cockroach/pull/133568
+[#133610]: https://github.com/cockroachdb/cockroach/pull/133610
+[#133751]: https://github.com/cockroachdb/cockroach/pull/133751
+[#133947]: https://github.com/cockroachdb/cockroach/pull/133947
+[#133988]: https://github.com/cockroachdb/cockroach/pull/133988
+[#133991]: https://github.com/cockroachdb/cockroach/pull/133991
+[#134005]: https://github.com/cockroachdb/cockroach/pull/134005
+[#134106]: https://github.com/cockroachdb/cockroach/pull/134106
+[#134498]: https://github.com/cockroachdb/cockroach/pull/134498
+[#134604]: https://github.com/cockroachdb/cockroach/pull/134604
+[#134723]: https://github.com/cockroachdb/cockroach/pull/134723
+[#134724]: https://github.com/cockroachdb/cockroach/pull/134724
+[#134759]: https://github.com/cockroachdb/cockroach/pull/134759
+[#134850]: https://github.com/cockroachdb/cockroach/pull/134850
+[#134891]: https://github.com/cockroachdb/cockroach/pull/134891
+[#134919]: https://github.com/cockroachdb/cockroach/pull/134919
+[#135130]: https://github.com/cockroachdb/cockroach/pull/135130
+[#135142]: https://github.com/cockroachdb/cockroach/pull/135142
+[#135145]: https://github.com/cockroachdb/cockroach/pull/135145
+[#135149]: https://github.com/cockroachdb/cockroach/pull/135149
+[#135168]: https://github.com/cockroachdb/cockroach/pull/135168
+[#135186]: https://github.com/cockroachdb/cockroach/pull/135186
+[#135236]: https://github.com/cockroachdb/cockroach/pull/135236
+[#135293]: https://github.com/cockroachdb/cockroach/pull/135293
+[#135552]: https://github.com/cockroachdb/cockroach/pull/135552
+[#135556]: https://github.com/cockroachdb/cockroach/pull/135556
+[#135563]: https://github.com/cockroachdb/cockroach/pull/135563
+[#135596]: https://github.com/cockroachdb/cockroach/pull/135596
+[#135623]: https://github.com/cockroachdb/cockroach/pull/135623
+[#135668]: https://github.com/cockroachdb/cockroach/pull/135668
+[#135682]: https://github.com/cockroachdb/cockroach/pull/135682
+[#135778]: https://github.com/cockroachdb/cockroach/pull/135778
+[#135779]: https://github.com/cockroachdb/cockroach/pull/135779
+[#135845]: https://github.com/cockroachdb/cockroach/pull/135845
+[#135852]: https://github.com/cockroachdb/cockroach/pull/135852
+[#135910]: https://github.com/cockroachdb/cockroach/pull/135910
+[#135936]: https://github.com/cockroachdb/cockroach/pull/135936
+[#135944]: https://github.com/cockroachdb/cockroach/pull/135944
+[#135970]: https://github.com/cockroachdb/cockroach/pull/135970
+[#136076]: https://github.com/cockroachdb/cockroach/pull/136076
+[#136110]: https://github.com/cockroachdb/cockroach/pull/136110
+[#136172]: https://github.com/cockroachdb/cockroach/pull/136172
+[#136219]: https://github.com/cockroachdb/cockroach/pull/136219
+[#136265]: https://github.com/cockroachdb/cockroach/pull/136265
+[#136298]: https://github.com/cockroachdb/cockroach/pull/136298
+[#136311]: https://github.com/cockroachdb/cockroach/pull/136311
+[#136325]: https://github.com/cockroachdb/cockroach/pull/136325
+[#136463]: https://github.com/cockroachdb/cockroach/pull/136463
+[#136506]: https://github.com/cockroachdb/cockroach/pull/136506
+[#136538]: https://github.com/cockroachdb/cockroach/pull/136538
+[#136631]: https://github.com/cockroachdb/cockroach/pull/136631
+[#136632]: https://github.com/cockroachdb/cockroach/pull/136632
+[#136648]: https://github.com/cockroachdb/cockroach/pull/136648
+[#136715]: https://github.com/cockroachdb/cockroach/pull/136715
+[#136741]: https://github.com/cockroachdb/cockroach/pull/136741
+[#136804]: https://github.com/cockroachdb/cockroach/pull/136804
+[#136841]: https://github.com/cockroachdb/cockroach/pull/136841
+[#136899]: https://github.com/cockroachdb/cockroach/pull/136899
+[#136926]: https://github.com/cockroachdb/cockroach/pull/136926
+[#137024]: https://github.com/cockroachdb/cockroach/pull/137024
\ No newline at end of file
diff --git a/src/current/_includes/sidebar-data-v25.1.json b/src/current/_includes/sidebar-data-v25.1.json
new file mode 100644
index 00000000000..4fe4209a469
--- /dev/null
+++ b/src/current/_includes/sidebar-data-v25.1.json
@@ -0,0 +1,28 @@
+[
+ {
+ "title": "Docs Home",
+ "is_top_level": true,
+ "urls": [
+ "/"
+ ]
+ },
+ {% include_cached v25.1/sidebar-data/get-started.json %},
+ {% include_cached v25.1/sidebar-data/releases.json %},
+ {% include_cached v25.1/sidebar-data/feature-overview.json %},
+ {% include_cached v25.1/sidebar-data/resilience.json %},
+ {% include_cached v25.1/sidebar-data/connect-to-cockroachdb.json %},
+ {% include_cached v25.1/sidebar-data/migrate.json %},
+ {% include_cached v25.1/sidebar-data/cloud-deployments.json %},
+ {% include_cached v25.1/sidebar-data/self-hosted-deployments.json %},
+ {% include_cached v25.1/sidebar-data/schema-design.json %},
+ {% include_cached v25.1/sidebar-data/reads-and-writes.json %},
+ {% include_cached v25.1/sidebar-data/stream-data.json %},
+ {% include_cached v25.1/sidebar-data/cross-cluster-replication.json %},
+ {% include_cached v25.1/sidebar-data/multi-region-capabilities.json %},
+ {% include_cached v25.1/sidebar-data/optimize-performance.json %},
+ {% include_cached v25.1/sidebar-data/troubleshooting.json %},
+ {% include_cached v25.1/sidebar-data/sql.json %},
+ {% include_cached v25.1/sidebar-data/reference.json %},
+ {% include_cached v25.1/sidebar-data/faqs.json %},
+ {% include_cached sidebar-data-cockroach-university.json %}
+]
diff --git a/src/current/_includes/v23.1/cdc/lagging-ranges.md b/src/current/_includes/v23.1/cdc/lagging-ranges.md
index 339a5571443..e0061df7d4b 100644
--- a/src/current/_includes/v23.1/cdc/lagging-ranges.md
+++ b/src/current/_includes/v23.1/cdc/lagging-ranges.md
@@ -1,10 +1,12 @@
-{% include_cached new-in.html version="v23.1.12" %} Use the `changefeed.lagging_ranges` metric to track the number of ranges that are behind in a changefeed. This is calculated based on the [cluster settings]({% link {{ page.version.version }}/cluster-settings.md %}):
+{% include_cached new-in.html version="v23.1.12" %} Use the `changefeed.lagging_ranges` metric to track the number of [ranges]({% link {{ page.version.version }}/architecture/overview.md %}#architecture-range) that are behind in a changefeed. This is calculated based on the [cluster settings]({% link {{ page.version.version }}/cluster-settings.md %}):
- `changefeed.lagging_ranges_threshold` sets a duration from the present that determines the length of time a range is considered to be lagging behind, which will then track in the [`lagging_ranges`]({% link {{ page.version.version }}/monitor-and-debug-changefeeds.md %}#using-changefeed-metrics-labels) metric. Note that ranges undergoing an [initial scan]({% link {{ page.version.version }}/create-changefeed.md %}#initial-scan) for longer than the threshold duration are considered to be lagging. Starting a changefeed with an initial scan on a large table will likely increment the metric for each range in the table. As ranges complete the initial scan, the number of ranges lagging behind will decrease.
- **Default:** `3m`
- `changefeed.lagging_ranges_polling_interval` sets the interval rate for when lagging ranges are checked and the `lagging_ranges` metric is updated. Polling adds latency to the `lagging_ranges` metric being updated. For example, if a range falls behind by 3 minutes, the metric may not update until an additional minute afterward.
- **Default:** `1m`
+{% include_cached new-in.html version="v23.1.29" %} Use the `changefeed.total_ranges` metric to monitor the number of ranges that are watched by [aggregator processors]({% link {{ page.version.version }}/how-does-an-enterprise-changefeed-work.md %}) participating in the changefeed job. If you're experiencing lagging ranges, `changefeed.total_ranges` may indicate that the number of ranges watched by aggregator processors in the job is unbalanced. You may want to try [pausing]({% link {{ page.version.version }}/pause-job.md %}) the changefeed and then [resuming]({% link {{ page.version.version }}/resume-job.md %}) it, so that the changefeed replans the work in the cluster. `changefeed.total_ranges` shares the same polling interval as the `changefeed.lagging_ranges` metric, which is controlled by the `changefeed.lagging_ranges_polling_interval` cluster setting.
+
{{site.data.alerts.callout_success}}
-You can use the [`metrics_label`]({% link {{ page.version.version }}/monitor-and-debug-changefeeds.md %}#using-changefeed-metrics-labels) option to track the `lagging_ranges` metric per changefeed.
+You can use the [`metrics_label`]({% link {{ page.version.version }}/monitor-and-debug-changefeeds.md %}#using-changefeed-metrics-labels) option to track the `lagging_ranges` and `total_ranges` metric per changefeed.
{{site.data.alerts.end}}
\ No newline at end of file
diff --git a/src/current/_includes/v23.1/storage/free-up-disk-space.md b/src/current/_includes/v23.1/storage/free-up-disk-space.md
index c63b70b766e..e4a6b08a57a 100644
--- a/src/current/_includes/v23.1/storage/free-up-disk-space.md
+++ b/src/current/_includes/v23.1/storage/free-up-disk-space.md
@@ -1 +1 @@
-For instructions on how to free up disk space as quickly as possible after deleting data, see [How can I free up disk space quickly?]({% link {{ page.version.version }}/operational-faqs.md %}#how-can-i-free-up-disk-space-quickly)
+For instructions on how to free up disk space as quickly as possible after dropping a table, see [How can I free up disk space that was used by a dropped table?]({% link {{ page.version.version }}/operational-faqs.md %}#how-can-i-free-up-disk-space-when-dropping-a-table)
diff --git a/src/current/_includes/v23.2/cdc/lagging-ranges.md b/src/current/_includes/v23.2/cdc/lagging-ranges.md
index b784a93cbfb..35d269bc706 100644
--- a/src/current/_includes/v23.2/cdc/lagging-ranges.md
+++ b/src/current/_includes/v23.2/cdc/lagging-ranges.md
@@ -1,10 +1,12 @@
-{% include_cached new-in.html version="v23.2" %} Use the `changefeed.lagging_ranges` metric to track the number of ranges that are behind in a changefeed. This is calculated based on the [changefeed options]({% link {{ page.version.version }}/create-changefeed.md %}#options):
+{% include_cached new-in.html version="v23.2" %} Use the `changefeed.lagging_ranges` metric to track the number of [ranges]({% link {{ page.version.version }}/architecture/overview.md %}#architecture-range) that are behind in a changefeed. This is calculated based on the [changefeed options]({% link {{ page.version.version }}/create-changefeed.md %}#options):
- `lagging_ranges_threshold` sets a duration from the present that determines the length of time a range is considered to be lagging behind, which will then track in the [`lagging_ranges`]({% link {{ page.version.version }}/monitor-and-debug-changefeeds.md %}#lagging-ranges-metric) metric. Note that ranges undergoing an [initial scan]({% link {{ page.version.version }}/create-changefeed.md %}#initial-scan) for longer than the threshold duration are considered to be lagging. Starting a changefeed with an initial scan on a large table will likely increment the metric for each range in the table. As ranges complete the initial scan, the number of ranges lagging behind will decrease.
- **Default:** `3m`
- `lagging_ranges_polling_interval` sets the interval rate for when lagging ranges are checked and the `lagging_ranges` metric is updated. Polling adds latency to the `lagging_ranges` metric being updated. For example, if a range falls behind by 3 minutes, the metric may not update until an additional minute afterward.
- **Default:** `1m`
+{% include_cached new-in.html version="v23.2.13" %} Use the `changefeed.total_ranges` metric to monitor the number of ranges that are watched by [aggregator processors]({% link {{ page.version.version }}/how-does-an-enterprise-changefeed-work.md %}) participating in the changefeed job. If you're experiencing lagging ranges, `changefeed.total_ranges` may indicate that the number of ranges watched by aggregator processors in the job is unbalanced. You may want to try [pausing]({% link {{ page.version.version }}/pause-job.md %}) the changefeed and then [resuming]({% link {{ page.version.version }}/resume-job.md %}) it, so that the changefeed replans the work in the cluster. `changefeed.total_ranges` shares the same polling interval as the `changefeed.lagging_ranges` metric, which is controlled by the `lagging_ranges_polling_interval` option.
+
{{site.data.alerts.callout_success}}
-You can use the [`metrics_label`]({% link {{ page.version.version }}/monitor-and-debug-changefeeds.md %}#using-changefeed-metrics-labels) option to track the `lagging_ranges` metric per changefeed.
+You can use the [`metrics_label`]({% link {{ page.version.version }}/monitor-and-debug-changefeeds.md %}#using-changefeed-metrics-labels) option to track the `lagging_ranges` and `total_ranges` metric per changefeed.
{{site.data.alerts.end}}
\ No newline at end of file
diff --git a/src/current/_includes/v23.2/known-limitations/physical-cluster-replication.md b/src/current/_includes/v23.2/known-limitations/physical-cluster-replication.md
index 0a9fc624cf2..7d0d79a79b5 100644
--- a/src/current/_includes/v23.2/known-limitations/physical-cluster-replication.md
+++ b/src/current/_includes/v23.2/known-limitations/physical-cluster-replication.md
@@ -1,6 +1,6 @@
- Physical cluster replication is supported only on CockroachDB {{ site.data.products.core }} in new v23.2 clusters. Physical Cluster Replication cannot be enabled on clusters that have been upgraded from a previous version of CockroachDB.
- Read queries are not supported on the standby cluster before [cutover]({% link {{ page.version.version }}/cutover-replication.md %}).
-- The primary and standby cluster **cannot have different [region topology]({% link {{ page.version.version }}/topology-patterns.md %})**. For example, replicating a multi-region primary cluster to a single-region standby cluster is not supported. Mismatching regions between a multi-region primary and standby cluster is also not supported.
+- The primary and standby clusters must have the same [zone configurations]({% link {{ page.version.version }}/configure-replication-zones.md %}).
- Cutting back to the primary cluster after a cutover is a manual process. Refer to [Cut back to the primary cluster]({% link {{ page.version.version }}/cutover-replication.md %}#cut-back-to-the-primary-cluster). In addition, after cutover, to continue using physical cluster replication, you must configure it again.
- Before cutover to the standby, the standby cluster does not support running [backups]({% link {{ page.version.version }}/backup-and-restore-overview.md %}) or [changefeeds]({% link {{ page.version.version }}/change-data-capture-overview.md %}).
- After a cutover, there is no mechanism to stop applications from connecting to the original primary cluster. It is necessary to redirect application traffic manually, such as by using a network load balancer or adjusting DNS records.
diff --git a/src/current/_includes/v23.2/storage/free-up-disk-space.md b/src/current/_includes/v23.2/storage/free-up-disk-space.md
index c63b70b766e..e4a6b08a57a 100644
--- a/src/current/_includes/v23.2/storage/free-up-disk-space.md
+++ b/src/current/_includes/v23.2/storage/free-up-disk-space.md
@@ -1 +1 @@
-For instructions on how to free up disk space as quickly as possible after deleting data, see [How can I free up disk space quickly?]({% link {{ page.version.version }}/operational-faqs.md %}#how-can-i-free-up-disk-space-quickly)
+For instructions on how to free up disk space as quickly as possible after dropping a table, see [How can I free up disk space that was used by a dropped table?]({% link {{ page.version.version }}/operational-faqs.md %}#how-can-i-free-up-disk-space-when-dropping-a-table)
diff --git a/src/current/_includes/v23.2/ui/databases.md b/src/current/_includes/v23.2/ui/databases.md
index 94ae7fc0585..1b8ec510206 100644
--- a/src/current/_includes/v23.2/ui/databases.md
+++ b/src/current/_includes/v23.2/ui/databases.md
@@ -15,7 +15,7 @@ The following information is displayed for each database:
{% endif -%}
| Tables | The number of tables in the database. |
{% if page.cloud != true -%}
-| Regions/Nodes | The regions and nodes on which the tables in the database are located. This is not displayed on a single-node cluster. |
+| Regions/Nodes | The regions and nodes on which the tables in the database are located. This is not displayed on a single-node cluster.
On a multi-node cluster, the display of this information is controlled by the cluster setting [`ui.database_locality_metadata.enabled`](#ui-database_locality_metadata-enabled-cluster-setting) (default `true`). |
| Index Recommendations | The number of index recommendations for the database. |
{%- else -%}
| Regions | The regions where the tables in the database are located. |
@@ -26,6 +26,11 @@ Click a **database name** to open the **Tables** page.
- Select **View: Tables** in the pulldown menu to display the [Tables view](#tables-view).
- Select **View: Grants** in the pulldown menu to display the [Grants view](#grants-view).
+{% if page.cloud != true -%}
+### `ui.database_locality_metadata.enabled` cluster setting
+{% include_cached new-in.html version="v23.2.17" %} Retrieving extended database and table region information can cause significant CPU load on large multi-node clusters with many ranges. You can prevent the retrieval of this data and the associated CPU load by disabling the [`ui.database_locality_metadata.enabled` cluster setting]({{ link_prefix }}cluster-settings.html#setting-ui-database-locality-metadata-enabled). When set to `false`, “No data” will be displayed for region data and replica counts. If you require this data, use the SQL statement [`SHOW RANGES FROM {DATABASE|TABLE}`]({{ link_prefix }}show-ranges.html) to compute this information.
+{% endif -%}
+
## Search and filter
By default, the Databases page shows all databases running on the cluster. By default, the [**Tables** view](#tables-view) and the [**Grants** view](#grants-view) show all tables in a selected database.
@@ -63,7 +68,7 @@ The following information is displayed for each table:
| Columns | The number of columns in the table. |
| Indexes | The number of indexes in the table. |
{% if page.cloud != true -%}
-| Regions | The regions and nodes on which the table data is stored. This is not displayed on a single-node cluster. |
+| Regions | The regions and nodes on which the table data is stored. This is not displayed on a single-node cluster.
On a multi-node cluster, the display of this information is controlled by the cluster setting [`ui.database_locality_metadata.enabled`](#ui-database_locality_metadata-enabled-cluster-setting) (default `true`). |
{% else -%}
| Regions | The regions where the table data is stored.
{% endif -%}
@@ -84,14 +89,14 @@ The table details include:
{% if page.cloud != true %}
- **Size**: The approximate disk size of all replicas of this table on the cluster.
-- **Replicas**: The number of [replicas]({{ link_prefix }}architecture/replication-layer.html) of this table on the cluster.
+- **Replicas**: The number of [replicas]({{ link_prefix }}architecture/replication-layer.html) of this table on the cluster. On a multi-node cluster, the display of this information is controlled by the cluster setting [`ui.database_locality_metadata.enabled`](#ui-database_locality_metadata-enabled-cluster-setting) (default `true`).
- **Ranges**: The number of [ranges]({{ link_prefix }}architecture/glossary.html#architecture-range) in this table.
- **% of Live Data**: Percentage of total uncompressed logical data that has not been modified (updated or deleted).
- **Table Stats Last Updated**: The last time table statistics were created or updated.
{% endif %}
- **Auto Stats Collection**: Whether [automatic statistics collection]({{ link_prefix }}cost-based-optimizer.html#table-statistics) is enabled.
{% if page.cloud != true %}
-- **Regions/Nodes**: The regions and nodes on which the table data is stored. This is not displayed on a single-node cluster.
+- **Regions/Nodes**: The regions and nodes on which the table data is stored. This is not displayed on a single-node cluster. On a multi-node cluster, the display of this information is controlled by the cluster setting [`ui.database_locality_metadata.enabled`](#ui-database_locality_metadata-enabled-cluster-setting) (default `true`).
{% else %}
- **Regions**: The regions where the table data is stored.
{% endif %}
diff --git a/src/current/_includes/v24.1/cdc/lagging-ranges.md b/src/current/_includes/v24.1/cdc/lagging-ranges.md
index 8d0b5eb6c23..17a7e035916 100644
--- a/src/current/_includes/v24.1/cdc/lagging-ranges.md
+++ b/src/current/_includes/v24.1/cdc/lagging-ranges.md
@@ -1,10 +1,12 @@
-Use the `changefeed.lagging_ranges` metric to track the number of ranges that are behind in a changefeed. This is calculated based on the [changefeed options]({% link {{ page.version.version }}/create-changefeed.md %}#options):
+Use the `changefeed.lagging_ranges` metric to track the number of [ranges]({% link {{ page.version.version }}/architecture/overview.md %}#range) that are behind in a changefeed. This is calculated based on the [changefeed options]({% link {{ page.version.version }}/create-changefeed.md %}#options):
- `lagging_ranges_threshold` sets a duration from the present that determines the length of time a range is considered to be lagging behind, which will then track in the [`lagging_ranges`]({% link {{ page.version.version }}/monitor-and-debug-changefeeds.md %}#lagging-ranges-metric) metric. Note that ranges undergoing an [initial scan]({% link {{ page.version.version }}/create-changefeed.md %}#initial-scan) for longer than the threshold duration are considered to be lagging. Starting a changefeed with an initial scan on a large table will likely increment the metric for each range in the table. As ranges complete the initial scan, the number of ranges lagging behind will decrease.
- **Default:** `3m`
- `lagging_ranges_polling_interval` sets the interval rate for when lagging ranges are checked and the `lagging_ranges` metric is updated. Polling adds latency to the `lagging_ranges` metric being updated. For example, if a range falls behind by 3 minutes, the metric may not update until an additional minute afterward.
- **Default:** `1m`
+{% include_cached new-in.html version="v24.1.6" %} Use the `changefeed.total_ranges` metric to monitor the number of ranges that are watched by [aggregator processors]({% link {{ page.version.version }}/how-does-an-enterprise-changefeed-work.md %}) participating in the changefeed job. If you're experiencing lagging ranges, `changefeed.total_ranges` may indicate that the number of ranges watched by aggregator processors in the job is unbalanced. You may want to try [pausing]({% link {{ page.version.version }}/pause-job.md %}) the changefeed and then [resuming]({% link {{ page.version.version }}/resume-job.md %}) it, so that the changefeed replans the work in the cluster. `changefeed.total_ranges` shares the same polling interval as the `changefeed.lagging_ranges` metric, which is controlled by the `lagging_ranges_polling_interval` option.
+
{{site.data.alerts.callout_success}}
-You can use the [`metrics_label`]({% link {{ page.version.version }}/monitor-and-debug-changefeeds.md %}#using-changefeed-metrics-labels) option to track the `lagging_ranges` metric per changefeed.
+You can use the [`metrics_label`]({% link {{ page.version.version }}/monitor-and-debug-changefeeds.md %}#using-changefeed-metrics-labels) option to track the `lagging_ranges` and `total_ranges` metric per changefeed.
{{site.data.alerts.end}}
\ No newline at end of file
diff --git a/src/current/_includes/v24.1/known-limitations/physical-cluster-replication.md b/src/current/_includes/v24.1/known-limitations/physical-cluster-replication.md
index f914c7eced2..a6c7edf2f32 100644
--- a/src/current/_includes/v24.1/known-limitations/physical-cluster-replication.md
+++ b/src/current/_includes/v24.1/known-limitations/physical-cluster-replication.md
@@ -1,6 +1,6 @@
- Physical cluster replication is supported only on CockroachDB {{ site.data.products.core }} in new clusters on v23.2 or above. Physical Cluster Replication cannot be enabled on clusters that have been upgraded from a previous version of CockroachDB.
- Read queries are not supported on the standby cluster before [cutover]({% link {{ page.version.version }}/cutover-replication.md %}).
-- The primary and standby cluster **cannot have different [region topology]({% link {{ page.version.version }}/topology-patterns.md %})**. For example, replicating a multi-region primary cluster to a single-region standby cluster is not supported. Mismatching regions between a multi-region primary and standby cluster is also not supported.
+- The primary and standby clusters must have the same [zone configurations]({% link {{ page.version.version }}/configure-replication-zones.md %}).
- Cutting back to the primary cluster after a cutover is a manual process. Refer to [Cut back to the primary cluster]({% link {{ page.version.version }}/cutover-replication.md %}#cut-back-to-the-primary-cluster). In addition, after cutover, to continue using physical cluster replication, you must configure it again.
- Before cutover to the standby, the standby cluster does not support running [backups]({% link {{ page.version.version }}/backup-and-restore-overview.md %}) or [changefeeds]({% link {{ page.version.version }}/change-data-capture-overview.md %}).
- Large data imports, such as those produced by [`RESTORE`]({% link {{ page.version.version }}/restore.md %}) or [`IMPORT INTO`]({% link {{ page.version.version }}/import-into.md %}), may dramatically increase [replication lag]({% link {{ page.version.version }}/physical-cluster-replication-technical-overview.md %}#cutover-and-promotion-process).
diff --git a/src/current/_includes/v24.1/storage/free-up-disk-space.md b/src/current/_includes/v24.1/storage/free-up-disk-space.md
index c63b70b766e..e4a6b08a57a 100644
--- a/src/current/_includes/v24.1/storage/free-up-disk-space.md
+++ b/src/current/_includes/v24.1/storage/free-up-disk-space.md
@@ -1 +1 @@
-For instructions on how to free up disk space as quickly as possible after deleting data, see [How can I free up disk space quickly?]({% link {{ page.version.version }}/operational-faqs.md %}#how-can-i-free-up-disk-space-quickly)
+For instructions on how to free up disk space as quickly as possible after dropping a table, see [How can I free up disk space that was used by a dropped table?]({% link {{ page.version.version }}/operational-faqs.md %}#how-can-i-free-up-disk-space-when-dropping-a-table)
diff --git a/src/current/_includes/v24.1/ui/databases.md b/src/current/_includes/v24.1/ui/databases.md
index 94ae7fc0585..529b2b11efb 100644
--- a/src/current/_includes/v24.1/ui/databases.md
+++ b/src/current/_includes/v24.1/ui/databases.md
@@ -15,7 +15,7 @@ The following information is displayed for each database:
{% endif -%}
| Tables | The number of tables in the database. |
{% if page.cloud != true -%}
-| Regions/Nodes | The regions and nodes on which the tables in the database are located. This is not displayed on a single-node cluster. |
+| Regions/Nodes | The regions and nodes on which the tables in the database are located. This is not displayed on a single-node cluster.
On a multi-node cluster, the display of this information is controlled by the cluster setting [`ui.database_locality_metadata.enabled`](#ui-database_locality_metadata-enabled-cluster-setting) (default `true`). |
| Index Recommendations | The number of index recommendations for the database. |
{%- else -%}
| Regions | The regions where the tables in the database are located. |
@@ -26,6 +26,11 @@ Click a **database name** to open the **Tables** page.
- Select **View: Tables** in the pulldown menu to display the [Tables view](#tables-view).
- Select **View: Grants** in the pulldown menu to display the [Grants view](#grants-view).
+{% if page.cloud != true -%}
+### `ui.database_locality_metadata.enabled` cluster setting
+{% include_cached new-in.html version="v24.1.8" %} Retrieving extended database and table region information can cause significant CPU load on large multi-node clusters with many ranges. You can prevent the retrieval of this data and the associated CPU load by disabling the [`ui.database_locality_metadata.enabled` cluster setting]({{ link_prefix }}cluster-settings.html#setting-ui-database-locality-metadata-enabled). When set to `false`, “No data” will be displayed for region data and replica counts. If you require this data, use the SQL statement [`SHOW RANGES FROM {DATABASE|TABLE}`]({{ link_prefix }}show-ranges.html) to compute this information.
+{% endif -%}
+
## Search and filter
By default, the Databases page shows all databases running on the cluster. By default, the [**Tables** view](#tables-view) and the [**Grants** view](#grants-view) show all tables in a selected database.
@@ -63,7 +68,7 @@ The following information is displayed for each table:
| Columns | The number of columns in the table. |
| Indexes | The number of indexes in the table. |
{% if page.cloud != true -%}
-| Regions | The regions and nodes on which the table data is stored. This is not displayed on a single-node cluster. |
+| Regions | The regions and nodes on which the table data is stored. This is not displayed on a single-node cluster.
On a multi-node cluster, the display of this information is controlled by the cluster setting [`ui.database_locality_metadata.enabled`](#ui-database_locality_metadata-enabled-cluster-setting) (default `true`). |
{% else -%}
| Regions | The regions where the table data is stored.
{% endif -%}
@@ -84,14 +89,14 @@ The table details include:
{% if page.cloud != true %}
- **Size**: The approximate disk size of all replicas of this table on the cluster.
-- **Replicas**: The number of [replicas]({{ link_prefix }}architecture/replication-layer.html) of this table on the cluster.
+- **Replicas**: The number of [replicas]({{ link_prefix }}architecture/replication-layer.html) of this table on the cluster. On a multi-node cluster, the display of this information is controlled by the cluster setting [`ui.database_locality_metadata.enabled`](#ui-database_locality_metadata-enabled-cluster-setting) (default `true`).
- **Ranges**: The number of [ranges]({{ link_prefix }}architecture/glossary.html#architecture-range) in this table.
- **% of Live Data**: Percentage of total uncompressed logical data that has not been modified (updated or deleted).
- **Table Stats Last Updated**: The last time table statistics were created or updated.
{% endif %}
- **Auto Stats Collection**: Whether [automatic statistics collection]({{ link_prefix }}cost-based-optimizer.html#table-statistics) is enabled.
{% if page.cloud != true %}
-- **Regions/Nodes**: The regions and nodes on which the table data is stored. This is not displayed on a single-node cluster.
+- **Regions/Nodes**: The regions and nodes on which the table data is stored. This is not displayed on a single-node cluster. On a multi-node cluster, the display of this information is controlled by the cluster setting [`ui.database_locality_metadata.enabled`](#ui-database_locality_metadata-enabled-cluster-setting) (default `true`).
{% else %}
- **Regions**: The regions where the table data is stored.
{% endif %}
diff --git a/src/current/_includes/v24.2/cdc/lagging-ranges.md b/src/current/_includes/v24.2/cdc/lagging-ranges.md
index 8d0b5eb6c23..eb22275849a 100644
--- a/src/current/_includes/v24.2/cdc/lagging-ranges.md
+++ b/src/current/_includes/v24.2/cdc/lagging-ranges.md
@@ -1,10 +1,12 @@
-Use the `changefeed.lagging_ranges` metric to track the number of ranges that are behind in a changefeed. This is calculated based on the [changefeed options]({% link {{ page.version.version }}/create-changefeed.md %}#options):
+Use the `changefeed.lagging_ranges` metric to track the number of [ranges]({% link {{ page.version.version }}/architecture/overview.md %}#architecture-range) that are behind in a changefeed. This is calculated based on the [changefeed options]({% link {{ page.version.version }}/create-changefeed.md %}#options):
- `lagging_ranges_threshold` sets a duration from the present that determines the length of time a range is considered to be lagging behind, which will then track in the [`lagging_ranges`]({% link {{ page.version.version }}/monitor-and-debug-changefeeds.md %}#lagging-ranges-metric) metric. Note that ranges undergoing an [initial scan]({% link {{ page.version.version }}/create-changefeed.md %}#initial-scan) for longer than the threshold duration are considered to be lagging. Starting a changefeed with an initial scan on a large table will likely increment the metric for each range in the table. As ranges complete the initial scan, the number of ranges lagging behind will decrease.
- **Default:** `3m`
- `lagging_ranges_polling_interval` sets the interval rate for when lagging ranges are checked and the `lagging_ranges` metric is updated. Polling adds latency to the `lagging_ranges` metric being updated. For example, if a range falls behind by 3 minutes, the metric may not update until an additional minute afterward.
- **Default:** `1m`
+{% include_cached new-in.html version="v24.2.4" %} Use the `changefeed.total_ranges` metric to monitor the number of ranges that are watched by [aggregator processors]({% link {{ page.version.version }}/how-does-an-enterprise-changefeed-work.md %}) participating in the changefeed job. If you're experiencing lagging ranges, `changefeed.total_ranges` may indicate that the number of ranges watched by aggregator processors in the job is unbalanced. You may want to try [pausing]({% link {{ page.version.version }}/pause-job.md %}) the changefeed and then [resuming]({% link {{ page.version.version }}/resume-job.md %}) it, so that the changefeed replans the work in the cluster. `changefeed.total_ranges` shares the same polling interval as the `changefeed.lagging_ranges` metric, which is controlled by the `lagging_ranges_polling_interval` option.
+
{{site.data.alerts.callout_success}}
-You can use the [`metrics_label`]({% link {{ page.version.version }}/monitor-and-debug-changefeeds.md %}#using-changefeed-metrics-labels) option to track the `lagging_ranges` metric per changefeed.
+You can use the [`metrics_label`]({% link {{ page.version.version }}/monitor-and-debug-changefeeds.md %}#using-changefeed-metrics-labels) option to track the `lagging_ranges` and `total_ranges` metric per changefeed.
{{site.data.alerts.end}}
\ No newline at end of file
diff --git a/src/current/_includes/v24.2/known-limitations/pcr-scheduled-changefeeds.md b/src/current/_includes/v24.2/known-limitations/pcr-scheduled-changefeeds.md
deleted file mode 100644
index 31fbf83187c..00000000000
--- a/src/current/_includes/v24.2/known-limitations/pcr-scheduled-changefeeds.md
+++ /dev/null
@@ -1 +0,0 @@
-After the [cutover process]({% link {{ page.version.version }}/cutover-replication.md %}) for [physical cluster replication]({% link {{ page.version.version }}/physical-cluster-replication-overview.md %}), [scheduled changefeeds]({% link {{ page.version.version }}/create-schedule-for-changefeed.md %}) will continue on the promoted cluster. You will need to manage [pausing]({% link {{ page.version.version }}/pause-schedules.md %}) or [canceling]({% link {{ page.version.version }}/drop-schedules.md %}) the schedule on the promoted standby cluster to avoid two clusters running the same changefeed to one sink. [#123776](https://github.com/cockroachdb/cockroach/issues/123776)
\ No newline at end of file
diff --git a/src/current/_includes/v24.2/known-limitations/physical-cluster-replication.md b/src/current/_includes/v24.2/known-limitations/physical-cluster-replication.md
index f914c7eced2..a6c7edf2f32 100644
--- a/src/current/_includes/v24.2/known-limitations/physical-cluster-replication.md
+++ b/src/current/_includes/v24.2/known-limitations/physical-cluster-replication.md
@@ -1,6 +1,6 @@
- Physical cluster replication is supported only on CockroachDB {{ site.data.products.core }} in new clusters on v23.2 or above. Physical Cluster Replication cannot be enabled on clusters that have been upgraded from a previous version of CockroachDB.
- Read queries are not supported on the standby cluster before [cutover]({% link {{ page.version.version }}/cutover-replication.md %}).
-- The primary and standby cluster **cannot have different [region topology]({% link {{ page.version.version }}/topology-patterns.md %})**. For example, replicating a multi-region primary cluster to a single-region standby cluster is not supported. Mismatching regions between a multi-region primary and standby cluster is also not supported.
+- The primary and standby clusters must have the same [zone configurations]({% link {{ page.version.version }}/configure-replication-zones.md %}).
- Cutting back to the primary cluster after a cutover is a manual process. Refer to [Cut back to the primary cluster]({% link {{ page.version.version }}/cutover-replication.md %}#cut-back-to-the-primary-cluster). In addition, after cutover, to continue using physical cluster replication, you must configure it again.
- Before cutover to the standby, the standby cluster does not support running [backups]({% link {{ page.version.version }}/backup-and-restore-overview.md %}) or [changefeeds]({% link {{ page.version.version }}/change-data-capture-overview.md %}).
- Large data imports, such as those produced by [`RESTORE`]({% link {{ page.version.version }}/restore.md %}) or [`IMPORT INTO`]({% link {{ page.version.version }}/import-into.md %}), may dramatically increase [replication lag]({% link {{ page.version.version }}/physical-cluster-replication-technical-overview.md %}#cutover-and-promotion-process).
diff --git a/src/current/_includes/v24.2/storage/free-up-disk-space.md b/src/current/_includes/v24.2/storage/free-up-disk-space.md
index c63b70b766e..e4a6b08a57a 100644
--- a/src/current/_includes/v24.2/storage/free-up-disk-space.md
+++ b/src/current/_includes/v24.2/storage/free-up-disk-space.md
@@ -1 +1 @@
-For instructions on how to free up disk space as quickly as possible after deleting data, see [How can I free up disk space quickly?]({% link {{ page.version.version }}/operational-faqs.md %}#how-can-i-free-up-disk-space-quickly)
+For instructions on how to free up disk space as quickly as possible after dropping a table, see [How can I free up disk space that was used by a dropped table?]({% link {{ page.version.version }}/operational-faqs.md %}#how-can-i-free-up-disk-space-when-dropping-a-table)
diff --git a/src/current/_includes/v24.2/ui/databases.md b/src/current/_includes/v24.2/ui/databases.md
index 94ae7fc0585..0465cb16860 100644
--- a/src/current/_includes/v24.2/ui/databases.md
+++ b/src/current/_includes/v24.2/ui/databases.md
@@ -15,7 +15,7 @@ The following information is displayed for each database:
{% endif -%}
| Tables | The number of tables in the database. |
{% if page.cloud != true -%}
-| Regions/Nodes | The regions and nodes on which the tables in the database are located. This is not displayed on a single-node cluster. |
+| Regions/Nodes | The regions and nodes on which the tables in the database are located. This is not displayed on a single-node cluster.
On a multi-node cluster, the display of this information is controlled by the cluster setting [`ui.database_locality_metadata.enabled`](#ui-database_locality_metadata-enabled-cluster-setting) (default `true`). |
| Index Recommendations | The number of index recommendations for the database. |
{%- else -%}
| Regions | The regions where the tables in the database are located. |
@@ -26,6 +26,11 @@ Click a **database name** to open the **Tables** page.
- Select **View: Tables** in the pulldown menu to display the [Tables view](#tables-view).
- Select **View: Grants** in the pulldown menu to display the [Grants view](#grants-view).
+{% if page.cloud != true -%}
+### `ui.database_locality_metadata.enabled` cluster setting
+{% include_cached new-in.html version="v24.2.6" %} Retrieving extended database and table region information can cause significant CPU load on large multi-node clusters with many ranges. You can prevent the retrieval of this data and the associated CPU load by disabling the [`ui.database_locality_metadata.enabled` cluster setting]({{ link_prefix }}cluster-settings.html#setting-ui-database-locality-metadata-enabled). When set to `false`, “No data” will be displayed for region data and replica counts. If you require this data, use the SQL statement [`SHOW RANGES FROM {DATABASE|TABLE}`]({{ link_prefix }}show-ranges.html) to compute this information.
+{% endif -%}
+
## Search and filter
By default, the Databases page shows all databases running on the cluster. By default, the [**Tables** view](#tables-view) and the [**Grants** view](#grants-view) show all tables in a selected database.
@@ -63,7 +68,7 @@ The following information is displayed for each table:
| Columns | The number of columns in the table. |
| Indexes | The number of indexes in the table. |
{% if page.cloud != true -%}
-| Regions | The regions and nodes on which the table data is stored. This is not displayed on a single-node cluster. |
+| Regions | The regions and nodes on which the table data is stored. This is not displayed on a single-node cluster.
On a multi-node cluster, the display of this information is controlled by the cluster setting [`ui.database_locality_metadata.enabled`](#ui-database_locality_metadata-enabled-cluster-setting) (default `true`). |
{% else -%}
| Regions | The regions where the table data is stored.
{% endif -%}
@@ -84,14 +89,14 @@ The table details include:
{% if page.cloud != true %}
- **Size**: The approximate disk size of all replicas of this table on the cluster.
-- **Replicas**: The number of [replicas]({{ link_prefix }}architecture/replication-layer.html) of this table on the cluster.
+- **Replicas**: The number of [replicas]({{ link_prefix }}architecture/replication-layer.html) of this table on the cluster. On a multi-node cluster, the display of this information is controlled by the cluster setting [`ui.database_locality_metadata.enabled`](#ui-database_locality_metadata-enabled-cluster-setting) (default `true`).
- **Ranges**: The number of [ranges]({{ link_prefix }}architecture/glossary.html#architecture-range) in this table.
- **% of Live Data**: Percentage of total uncompressed logical data that has not been modified (updated or deleted).
- **Table Stats Last Updated**: The last time table statistics were created or updated.
{% endif %}
- **Auto Stats Collection**: Whether [automatic statistics collection]({{ link_prefix }}cost-based-optimizer.html#table-statistics) is enabled.
{% if page.cloud != true %}
-- **Regions/Nodes**: The regions and nodes on which the table data is stored. This is not displayed on a single-node cluster.
+- **Regions/Nodes**: The regions and nodes on which the table data is stored. This is not displayed on a single-node cluster. On a multi-node cluster, the display of this information is controlled by the cluster setting [`ui.database_locality_metadata.enabled`](#ui-database_locality_metadata-enabled-cluster-setting) (default `true`).
{% else %}
- **Regions**: The regions where the table data is stored.
{% endif %}
diff --git a/src/current/_includes/v24.3/backups/backup-to-deprec.md b/src/current/_includes/v24.3/backups/backup-to-deprec.md
deleted file mode 100644
index 77a2b6cae18..00000000000
--- a/src/current/_includes/v24.3/backups/backup-to-deprec.md
+++ /dev/null
@@ -1,7 +0,0 @@
-{{site.data.alerts.callout_danger}}
-The `BACKUP ... TO` and `RESTORE ... FROM` syntax is **deprecated** as of v22.1 and will be removed in a future release.
-
-We recommend using the `BACKUP ... INTO {collectionURI}` syntax, which creates or adds to a [backup collection]({% link {{ page.version.version }}/take-full-and-incremental-backups.md %}#backup-collections) in your storage location. For restoring backups, we recommend using `RESTORE FROM {backup} IN {collectionURI}` with `{backup}` being [`LATEST`]({% link {{ page.version.version }}/restore.md %}#restore-the-most-recent-full-or-incremental-backup) or a specific [subdirectory]({% link {{ page.version.version }}/restore.md %}#subdir-param).
-
-For guidance on the syntax for backups and restores, see the [`BACKUP`]({% link {{ page.version.version }}/backup.md %}#examples) and [`RESTORE`]({% link {{ page.version.version }}/restore.md %}#examples) examples.
-{{site.data.alerts.end}}
diff --git a/src/current/_includes/v24.3/backups/old-syntax-removed.md b/src/current/_includes/v24.3/backups/old-syntax-removed.md
new file mode 100644
index 00000000000..7052fe0d3af
--- /dev/null
+++ b/src/current/_includes/v24.3/backups/old-syntax-removed.md
@@ -0,0 +1,5 @@
+{{site.data.alerts.callout_danger}}
+The `BACKUP ... TO` and `RESTORE ... FROM {storage_uri}` syntax has been removed from CockroachDB v24.3 and later.
+
+For details on the syntax to run `BACKUP` and `RESTORE`, refer to the {% if page.name == "backup.md" %} [backup](#examples) {% else %} [backup]({% link {{ page.version.version }}/backup.md %}#examples) {% endif %} and {% if page.name == "restore.md" %} [restore](#examples) {% else %} [restore]({% link {{ page.version.version }}/restore.md %}#examples) {% endif %} examples.
+{{site.data.alerts.end}}
\ No newline at end of file
diff --git a/src/current/_includes/v24.3/backups/recommend-backups-for-upgrade.md b/src/current/_includes/v24.3/backups/recommend-backups-for-upgrade.md
index 375f4489914..2ef075abf9c 100644
--- a/src/current/_includes/v24.3/backups/recommend-backups-for-upgrade.md
+++ b/src/current/_includes/v24.3/backups/recommend-backups-for-upgrade.md
@@ -3,7 +3,4 @@
When upgrading to a major release, you can optionally [take a self-managed backup]({% link cockroachcloud/take-and-restore-self-managed-backups.md %}) of your cluster to your own cloud storage, as an extra layer of protection in case the upgrade leads to issues.
{% else %}
-CockroachDB is designed with high fault tolerance. However, taking regular backups of your data is an operational best practice for [disaster recovery]({% link {{ page.version.version }}/disaster-recovery-planning.md %}) planning.
-
-We recommend that you enable [managed backups]({% link cockroachcloud/managed-backups.md %}#managed-backup-settings) and confirm that the cluster is backed up before beginning a major-version upgrade. This provides an extra layer of protection in case the upgrade leads to issues.
-{% endif %}
+CockroachDB is designed with high fault tolerance. However, taking regular backups of your data is an operational best practice for [disaster recovery]({% link {{ page.version.version }}/disaster-recovery-planning.md %}) planning.{% endif %}
diff --git a/src/current/_includes/v24.3/cdc/lagging-ranges.md b/src/current/_includes/v24.3/cdc/lagging-ranges.md
index 8d0b5eb6c23..45180baa57f 100644
--- a/src/current/_includes/v24.3/cdc/lagging-ranges.md
+++ b/src/current/_includes/v24.3/cdc/lagging-ranges.md
@@ -1,10 +1,12 @@
-Use the `changefeed.lagging_ranges` metric to track the number of ranges that are behind in a changefeed. This is calculated based on the [changefeed options]({% link {{ page.version.version }}/create-changefeed.md %}#options):
+Use the `changefeed.lagging_ranges` metric to track the number of [ranges]({% link {{ page.version.version }}/architecture/overview.md %}#range) that are behind in a changefeed. This is calculated based on the [changefeed options]({% link {{ page.version.version }}/create-changefeed.md %}#options):
- `lagging_ranges_threshold` sets a duration from the present that determines the length of time a range is considered to be lagging behind, which will then track in the [`lagging_ranges`]({% link {{ page.version.version }}/monitor-and-debug-changefeeds.md %}#lagging-ranges-metric) metric. Note that ranges undergoing an [initial scan]({% link {{ page.version.version }}/create-changefeed.md %}#initial-scan) for longer than the threshold duration are considered to be lagging. Starting a changefeed with an initial scan on a large table will likely increment the metric for each range in the table. As ranges complete the initial scan, the number of ranges lagging behind will decrease.
- **Default:** `3m`
- `lagging_ranges_polling_interval` sets the interval rate for when lagging ranges are checked and the `lagging_ranges` metric is updated. Polling adds latency to the `lagging_ranges` metric being updated. For example, if a range falls behind by 3 minutes, the metric may not update until an additional minute afterward.
- **Default:** `1m`
+{% include_cached new-in.html version="v24.3" %} Use the `changefeed.total_ranges` metric to monitor the number of ranges that are watched by [aggregator processors]({% link {{ page.version.version }}/how-does-an-enterprise-changefeed-work.md %}) participating in the changefeed job. If you're experiencing lagging ranges, `changefeed.total_ranges` may indicate that the number of ranges watched by aggregator processors in the job is unbalanced. You may want to try [pausing]({% link {{ page.version.version }}/pause-job.md %}) the changefeed and then [resuming]({% link {{ page.version.version }}/resume-job.md %}) it, so that the changefeed replans the work in the cluster. `changefeed.total_ranges` shares the same polling interval as the `changefeed.lagging_ranges` metric, which is controlled by the `lagging_ranges_polling_interval` option.
+
{{site.data.alerts.callout_success}}
-You can use the [`metrics_label`]({% link {{ page.version.version }}/monitor-and-debug-changefeeds.md %}#using-changefeed-metrics-labels) option to track the `lagging_ranges` metric per changefeed.
+You can use the [`metrics_label`]({% link {{ page.version.version }}/monitor-and-debug-changefeeds.md %}#using-changefeed-metrics-labels) option to track the `lagging_ranges` and `total_ranges` metric per changefeed.
{{site.data.alerts.end}}
\ No newline at end of file
diff --git a/src/current/_includes/v24.3/child-metrics-table.md b/src/current/_includes/v24.3/child-metrics-table.md
index 05d57c55453..2b1a16f092a 100644
--- a/src/current/_includes/v24.3/child-metrics-table.md
+++ b/src/current/_includes/v24.3/child-metrics-table.md
@@ -7,7 +7,7 @@ Following is a list of the metrics that have child metrics:
CockroachDB Metric Name
-
Description When Aggregated
+
{% if feature == "ldr" %}Description{% else %}Description When Aggregated{% endif %}
Type
Unit
diff --git a/src/current/_includes/v24.3/known-limitations/compression-level-kafka-config.md b/src/current/_includes/v24.3/known-limitations/compression-level-kafka-config.md
new file mode 100644
index 00000000000..0635319c5af
--- /dev/null
+++ b/src/current/_includes/v24.3/known-limitations/compression-level-kafka-config.md
@@ -0,0 +1 @@
+Changefeeds created in v24.3 of CockroachDB that emit to [Kafka]({% link {{ page.version.version }}/changefeed-sinks.md %}#kafka), or changefeeds created in earlier versions with the `changefeed.new_kafka_sink.enabled` cluster setting enabled, do not support negative compression level values for `GZIP` compression in the [`kafka_sink_config = {... "CompressionLevel" = ...}`]({% link {{ page.version.version }}/changefeed-sinks.md %}#compressionlevel) option field. [#136492](https://github.com/cockroachdb/cockroach/issues/136492)
\ No newline at end of file
diff --git a/src/current/_includes/v24.3/known-limitations/pcr-scheduled-changefeeds.md b/src/current/_includes/v24.3/known-limitations/pcr-scheduled-changefeeds.md
deleted file mode 100644
index 3d6b8aa8628..00000000000
--- a/src/current/_includes/v24.3/known-limitations/pcr-scheduled-changefeeds.md
+++ /dev/null
@@ -1 +0,0 @@
-After the [failover process]({% link {{ page.version.version }}/failover-replication.md %}) for [physical cluster replication]({% link {{ page.version.version }}/physical-cluster-replication-overview.md %}), [scheduled changefeeds]({% link {{ page.version.version }}/create-schedule-for-changefeed.md %}) will continue on the promoted cluster. You will need to manage [pausing]({% link {{ page.version.version }}/pause-schedules.md %}) or [canceling]({% link {{ page.version.version }}/drop-schedules.md %}) the schedule on the promoted standby cluster to avoid two clusters running the same changefeed to one sink. [#123776](https://github.com/cockroachdb/cockroach/issues/123776)
\ No newline at end of file
diff --git a/src/current/_includes/v24.3/known-limitations/physical-cluster-replication.md b/src/current/_includes/v24.3/known-limitations/physical-cluster-replication.md
index 1371e178a25..abd0fc2b10b 100644
--- a/src/current/_includes/v24.3/known-limitations/physical-cluster-replication.md
+++ b/src/current/_includes/v24.3/known-limitations/physical-cluster-replication.md
@@ -1,6 +1,5 @@
- Physical cluster replication is supported only on CockroachDB {{ site.data.products.core }} in new clusters on v23.2 or above. Physical Cluster Replication cannot be enabled on clusters that have been upgraded from a previous version of CockroachDB.
-- Read queries are not supported on the standby cluster before [failover]({% link {{ page.version.version }}/failover-replication.md %}).
-- The primary and standby cluster **cannot have different [region topology]({% link {{ page.version.version }}/topology-patterns.md %})**. For example, replicating a multi-region primary cluster to a single-region standby cluster is not supported. Mismatching regions between a multi-region primary and standby cluster is also not supported.
+- The primary and standby clusters must have the same [zone configurations]({% link {{ page.version.version }}/configure-replication-zones.md %}).
- Failing back to the primary cluster after a failover is a manual process. Refer to [Fail back to the primary cluster]({% link {{ page.version.version }}/failover-replication.md %}#fail-back-to-the-primary-cluster). In addition, after failover, to continue using physical cluster replication, you must configure it again.
- Before failover to the standby, the standby cluster does not support running [backups]({% link {{ page.version.version }}/backup-and-restore-overview.md %}) or [changefeeds]({% link {{ page.version.version }}/change-data-capture-overview.md %}).
- Large data imports, such as those produced by [`RESTORE`]({% link {{ page.version.version }}/restore.md %}) or [`IMPORT INTO`]({% link {{ page.version.version }}/import-into.md %}), may dramatically increase [replication lag]({% link {{ page.version.version }}/physical-cluster-replication-technical-overview.md %}#failover-and-promotion-process).
diff --git a/src/current/_includes/v24.3/ldr/immediate-description.md b/src/current/_includes/v24.3/ldr/immediate-description.md
new file mode 100644
index 00000000000..eb87361a009
--- /dev/null
+++ b/src/current/_includes/v24.3/ldr/immediate-description.md
@@ -0,0 +1 @@
+Attempts to replicate the changed row directly into the destination table, without re-running constraint validations. It does not support writing into tables with [foreign key]({% link {{ page.version.version }}/foreign-key.md %}) constraints.
\ No newline at end of file
diff --git a/src/current/_includes/v24.3/ldr/validated-description.md b/src/current/_includes/v24.3/ldr/validated-description.md
new file mode 100644
index 00000000000..4b7bb9a8b18
--- /dev/null
+++ b/src/current/_includes/v24.3/ldr/validated-description.md
@@ -0,0 +1 @@
+Attempts to apply the write in a similar way to a user-run query, which would re-run all constraint validations relevant to the destination table(s). If the change violates foreign key dependencies, unique constraints, or other constraints, the row will be put in the [dead letter queue (DLQ)]({% link {{ page.version.version }}/manage-logical-data-replication.md %}#dead-letter-queue-dlq) instead. Like the [SQL layer]({% link {{ page.version.version }}/architecture/sql-layer.md %}), `validated` mode does not recognize deletion tombstones. As a result, an update to the same key from cluster A will successfully apply on cluster B, even if that key was deleted on cluster B before the LDR job streamed the cluster A update to the key.
\ No newline at end of file
diff --git a/src/current/_includes/v24.3/metric-names.md b/src/current/_includes/v24.3/metric-names.md
index 9bbe10b49d5..c72864fd149 100644
--- a/src/current/_includes/v24.3/metric-names.md
+++ b/src/current/_includes/v24.3/metric-names.md
@@ -1,331 +1,29 @@
-Name | Description
------|------------
-`addsstable.applications` | Number of SSTable ingestions applied (i.e., applied by Replicas)
-`addsstable.copies` | Number of SSTable ingestions that required copying files during application
-`addsstable.proposals` | Number of SSTable ingestions proposed (i.e., sent to Raft by lease holders)
-`build.timestamp` | Build information
-`capacity.available` | Available storage capacity
-`capacity.reserved` | Capacity reserved for snapshots
-`capacity.used` | Used storage capacity
-`capacity` | Total storage capacity
-`changefeed.aggregator_progress` | The earliest timestamp up to which any [aggregator]({% link {{ page.version.version }}/how-does-an-enterprise-changefeed-work.md %}) is guaranteed to have emitted all values for which it is responsible. **Note:** This metric may regress when a changefeed restarts due to a transient error. Consider tracking the `changefeed.checkpoint_progress` metric, which will not regress.
-`changefeed.checkpoint_progress` | The earliest timestamp of any changefeed's persisted checkpoint (values prior to this timestamp will never need to be re-emitted).
-`changefeed.failures` | Total number of [changefeed jobs]({% link {{ page.version.version }}/show-jobs.md %}#show-changefeed-jobs) which have failed.
-`changefeed.lagging_ranges` | Number of ranges which are behind in a changefeed. This is calculated based on the changefeed options:
[`lagging_ranges_threshold`]({% link {{ page.version.version }}/create-changefeed.md %}#lagging-ranges-threshold), which is the amount of time that a range checkpoint needs to be in the past to be considered lagging.
[`lagging_ranges_polling_interval`]({% link {{ page.version.version }}/create-changefeed.md %}#lagging-ranges-polling-interval), which is the frequency at which lagging ranges are polled and the metric is updated.
-`changefeed.running` | Number of currently running changefeeds, including sinkless.
-`clock-offset.meannanos` | Mean clock offset with other nodes in nanoseconds
-`clock-offset.stddevnanos` | Std dev clock offset with other nodes in nanoseconds
-`cluster.preserve-downgrade-option.last-updated` | Unix timestamp of last updated time for cluster.preserve_downgrade_option
-`compactor.compactingnanos` | Number of nanoseconds spent compacting ranges
-`compactor.compactions.failure` | Number of failed compaction requests sent to the storage engine
-`compactor.compactions.success` | Number of successful compaction requests sent to the storage engine
-`compactor.suggestionbytes.compacted` | Number of logical bytes compacted from suggested compactions
-`compactor.suggestionbytes.queued` | Number of logical bytes in suggested compactions in the queue
-`compactor.suggestionbytes.skipped` | Number of logical bytes in suggested compactions which were not compacted
-`distsender.batches.partial` | Number of partial batches processed
-`distsender.batches` | Number of batches processed
-`distsender.errors.notleaseholder` | Number of NotLeaseHolderErrors encountered
-`distsender.rpc.sent.local` | Number of local RPCs sent
-`distsender.rpc.sent.nextreplicaerror` | Number of RPCs sent due to per-replica errors
-`distsender.rpc.sent` | Number of RPCs sent
-`exec.error` | Number of batch KV requests that failed to execute on this node
-`exec.latency` | Latency in nanoseconds of batch KV requests executed on this node
-`exec.success` | Number of batch KV requests executed successfully on this node
-`gcbytesage` | Cumulative age of non-live data in seconds
-`gossip.bytes.received` | Number of received gossip bytes
-`gossip.bytes.sent` | Number of sent gossip bytes
-`gossip.connections.incoming` | Number of active incoming gossip connections
-`gossip.connections.outgoing` | Number of active outgoing gossip connections
-`gossip.connections.refused` | Number of refused incoming gossip connections
-`gossip.infos.received` | Number of received gossip Info objects
-`gossip.infos.sent` | Number of sent gossip Info objects
-`intentage` | Cumulative age of intents in seconds
-`intentbytes` | Number of bytes in intent KV pairs
-`intentcount` | Count of intent keys
-`jobs.changefeed.expired_pts_records` | Number of expired [protected timestamp]({% link {{ page.version.version }}/architecture/storage-layer.md %}#protected-timestamps) records owned by [changefeed jobs]({% link {{ page.version.version }}/show-jobs.md %}#show-changefeed-jobs).
-`jobs.{job_type}.currently_paused` | Number of `{job_type}` [jobs]({% link {{ page.version.version }}/show-jobs.md %}) currently considered paused. See the [`/_status/vars`]({% link {{ page.version.version }}/monitoring-and-alerting.md %}#prometheus-endpoint) endpoint for all job types.
-`jobs.{job_type}.protected_age_sec` | The age of the oldest [protected timestamp]({% link {{ page.version.version }}/architecture/storage-layer.md %}#protected-timestamps) record protecting `{job_type}` [jobs]({% link {{ page.version.version }}/show-jobs.md %}). See the [`/_status/vars`]({% link {{ page.version.version }}/monitoring-and-alerting.md %}#prometheus-endpoint) endpoint for all job types.
-`jobs.{job_type}.protected_record_count` | Number of [protected timestamp]({% link {{ page.version.version }}/architecture/storage-layer.md %}#protected-timestamps) records held by `{job_type}` [jobs]({% link {{ page.version.version }}/show-jobs.md %}). See the [`/_status/vars`]({% link {{ page.version.version }}/monitoring-and-alerting.md %}#prometheus-endpoint) endpoint for all job types.
-`jobs.row_level_ttl.num_active_spans` | Number of active spans the TTL job is deleting from
-`jobs.row_level_ttl.span_total_duration` | Duration for processing a span during row level TTL
-`keybytes` | Number of bytes taken up by keys
-`keycount` | Count of all keys
-`lastupdatenanos` | Time in nanoseconds since Unix epoch at which bytes/keys/intents metrics were last updated
-`leases.epoch` | Number of replica leaseholders using epoch-based leases
-`leases.error` | Number of failed lease requests
-`leases.expiration` | Number of replica leaseholders using expiration-based leases
-`leases.success` | Number of successful lease requests
-`leases.transfers.error` | Number of failed lease transfers
-`leases.transfers.success` | Number of successful lease transfers
-`livebytes` | Number of bytes of live data (keys plus values), including unreplicated data
-`livecount` | Count of live keys
-`liveness.epochincrements` | Number of times this node has incremented its liveness epoch
-`liveness.heartbeatfailures` | Number of failed node liveness heartbeats from this node
-`liveness.heartbeatlatency` | Node liveness heartbeat latency in nanoseconds
-`liveness.heartbeatsuccesses` | Number of successful node liveness heartbeats from this node
-`liveness.livenodes` | Number of live nodes in the cluster (will be 0 if this node is not itself live)
-`node-id` | node ID with labels for advertised RPC and HTTP addresses
-`queue.consistency.pending` | Number of pending replicas in the consistency checker queue
-`queue.consistency.process.failure` | Number of replicas which failed processing in the consistency checker queue
-`queue.consistency.process.success` | Number of replicas successfully processed by the consistency checker queue
-`queue.consistency.processingnanos` | Nanoseconds spent processing replicas in the consistency checker queue
-`queue.gc.info.abortspanconsidered` | Number of AbortSpan entries eligible for removal based on their ages
-`queue.gc.info.abortspangcnum` | Number of AbortSpan entries fit for removal
-`queue.gc.info.abortspanscanned` | Number of transactions present in the AbortSpan scanned from the engine
-`queue.gc.info.clearrangefailed` | Number of failed ClearRange operations during GC
-`queue.gc.info.clearrangesuccess` | Number of successful ClearRange operations during GC
-`queue.gc.info.intentsconsidered` | Number of intents eligible to be considered because they are at least two hours old
-`queue.gc.info.intenttxns` | Number of associated distinct transactions
-`queue.gc.info.numkeysaffected` | Number of keys with GC'able data
-`queue.gc.info.pushtxn` | Number of attempted pushes
-`queue.gc.info.resolvesuccess` | Number of successful intent resolutions
-`queue.gc.info.resolvetotal` | Number of attempted intent resolutions
-`queue.gc.info.transactionspangcaborted` | Number of GC'able entries corresponding to aborted txns
-`queue.gc.info.transactionspangccommitted` | Number of GC'able entries corresponding to committed txns
-`queue.gc.info.transactionspangcpending` | Number of GC'able entries corresponding to pending txns
-`queue.gc.info.transactionspanscanned` | Number of entries in transaction spans scanned from the engine
-`queue.gc.pending` | Number of pending replicas in the GC queue
-`queue.gc.process.failure` | Number of replicas which failed processing in the GC queue
-`queue.gc.process.success` | Number of replicas successfully processed by the GC queue
-`queue.gc.processingnanos` | Nanoseconds spent processing replicas in the GC queue
-`queue.raftlog.pending` | Number of pending replicas in the Raft log queue
-`queue.raftlog.process.failure` | Number of replicas which failed processing in the Raft log queue
-`queue.raftlog.process.success` | Number of replicas successfully processed by the Raft log queue
-`queue.raftlog.processingnanos` | Nanoseconds spent processing replicas in the Raft log queue
-`queue.raftsnapshot.pending` | Number of pending replicas in the Raft repair queue
-`queue.raftsnapshot.process.failure` | Number of replicas which failed processing in the Raft repair queue
-`queue.raftsnapshot.process.success` | Number of replicas successfully processed by the Raft repair queue
-`queue.raftsnapshot.processingnanos` | Nanoseconds spent processing replicas in the Raft repair queue
-`queue.replicagc.pending` | Number of pending replicas in the replica GC queue
-`queue.replicagc.process.failure` | Number of replicas which failed processing in the replica GC queue
-`queue.replicagc.process.success` | Number of replicas successfully processed by the replica GC queue
-`queue.replicagc.processingnanos` | Nanoseconds spent processing replicas in the replica GC queue
-`queue.replicagc.removereplica` | Number of replica removals attempted by the replica gc queue
-`queue.replicate.addreplica` | Number of replica additions attempted by the replicate queue
-`queue.replicate.addreplica.error` | Number of failed replica additions processed by the replicate queue
-`queue.replicate.addreplica.success` | Number of successful replica additions processed by the replicate queue
-`queue.replicate.pending` | Number of pending replicas in the replicate queue
-`queue.replicate.process.failure` | Number of replicas which failed processing in the replicate queue
-`queue.replicate.process.success` | Number of replicas successfully processed by the replicate queue
-`queue.replicate.processingnanos` | Nanoseconds spent processing replicas in the replicate queue
-`queue.replicate.purgatory` | Number of replicas in the replicate queue's purgatory, awaiting allocation options
-`queue.replicate.rebalancereplica` | Number of replica rebalancer-initiated additions attempted by the replicate queue
-`queue.replicate.removedeadreplica` | Number of dead replica removals attempted by the replicate queue (typically in response to a node outage)
-`queue.replicate.removedeadreplica.error` | Number of failed dead replica removals processed by the replicate queue
-`queue.replicate.removedeadreplica.success` | Number of successful dead replica removals processed by the replicate queue
-`queue.replicate.removedecommissioningreplica.error` | Number of failed decommissioning replica removals processed by the replicate queue
-`queue.replicate.removedecommissioningreplica.success` | Number of successful decommissioning replica removals processed by the replicate queue
-`queue.replicate.removereplica` | Number of replica removals attempted by the replicate queue (typically in response to a rebalancer-initiated addition)
-`queue.replicate.removereplica.error` | Number of failed replica removals processed by the replicate queue
-`queue.replicate.removereplica.success` | Number of successful replica removals processed by the replicate queue
-`queue.replicate.replacedeadreplica.error` | Number of failed dead replica replacements processed by the replicate queue
-`queue.replicate.replacedeadreplica.success` | Number of successful dead replica replacements processed by the replicate queue
-`queue.replicate.replacedecommissioningreplica.error` | Number of failed decommissioning replica replacements processed by the replicate queue
-`queue.replicate.replacedecommissioningreplica.success` | Number of successful decommissioning replica replacements processed by the replicate queue
-`queue.replicate.transferlease` | Number of range lease transfers attempted by the replicate queue
-`queue.split.pending` | Number of pending replicas in the split queue
-`queue.split.process.failure` | Number of replicas which failed processing in the split queue
-`queue.split.process.success` | Number of replicas successfully processed by the split queue
-`queue.split.processingnanos` | Nanoseconds spent processing replicas in the split queue
-`queue.tsmaintenance.pending` | Number of pending replicas in the time series maintenance queue
-`queue.tsmaintenance.process.failure` | Number of replicas which failed processing in the time series maintenance queue
-`queue.tsmaintenance.process.success` | Number of replicas successfully processed by the time series maintenance queue
-`queue.tsmaintenance.processingnanos` | Nanoseconds spent processing replicas in the time series maintenance queue
-`raft.commandsapplied` | Count of Raft commands applied
-`raft.enqueued.pending` | Number of pending outgoing messages in the Raft Transport queue
-`raft.heartbeats.pending` | Number of pending heartbeats and responses waiting to be coalesced
-`raft.process.commandcommit.latency` | Latency histogram in nanoseconds for committing Raft commands
-`raft.process.logcommit.latency` | Latency histogram in nanoseconds for committing Raft log entries
-`raft.process.tickingnanos` | Nanoseconds spent in store.processRaft() processing replica.Tick()
-`raft.process.workingnanos` | Nanoseconds spent in store.processRaft() working
-`raft.rcvd.app` | Number of MsgApp messages received by this store
-`raft.rcvd.appresp` | Number of MsgAppResp messages received by this store
-`raft.rcvd.dropped` | Number of dropped incoming Raft messages
-`raft.rcvd.heartbeat` | Number of (coalesced, if enabled) MsgHeartbeat messages received by this store
-`raft.rcvd.heartbeatresp` | Number of (coalesced, if enabled) MsgHeartbeatResp messages received by this store
-`raft.rcvd.prevote` | Number of MsgPreVote messages received by this store
-`raft.rcvd.prevoteresp` | Number of MsgPreVoteResp messages received by this store
-`raft.rcvd.prop` | Number of MsgProp messages received by this store
-`raft.rcvd.snap` | Number of MsgSnap messages received by this store
-`raft.rcvd.timeoutnow` | Number of MsgTimeoutNow messages received by this store
-`raft.rcvd.transferleader` | Number of MsgTransferLeader messages received by this store
-`raft.rcvd.vote` | Number of MsgVote messages received by this store
-`raft.rcvd.voteresp` | Number of MsgVoteResp messages received by this store
-`raft.ticks` | Number of Raft ticks queued
-`raftlog.behind` | Number of Raft log entries followers on other stores are behind
-`raftlog.truncated` | Number of Raft log entries truncated
-`range.adds` | Number of range additions
-`range.raftleadertransfers` | Number of Raft leader transfers
-`range.removes` | Number of range removals
-`range.snapshots.recv-in-progress` | Number of non-empty snapshots in progress on a receiver store
-`range.snapshots.recv-queue` | Number of queued non-empty snapshots on a receiver store
-`range.snapshots.recv-total-in-progress` | Number of empty and non-empty snapshots in progress on a receiver store
-`range.snapshots.send-in-progress` | Number of non-empty snapshots in progress on a sender store
-`range.snapshots.send-queue` | Number of queued non-empty snapshots on a sender store
-`range.snapshots.send-total-in-progress` | Number of empty and non-empty in-progress snapshots on a sender store
-`range.snapshots.generated` | Number of generated snapshots
-`range.snapshots.normal-applied` | Number of applied snapshots
-`range.snapshots.preemptive-applied` | Number of applied preemptive snapshots
-`range.snapshots.rcvd-bytes` | Number of snapshot bytes received
-`range.snapshots.rebalancing.rcvd-bytes` | Number of rebalancing snapshot bytes received
-`range.snapshots.rebalancing.sent-bytes` | Number of rebalancing snapshot bytes sent
-`range.snapshots.recovery.rcvd-bytes` | Number of recovery snapshot bytes received
-`range.snapshots.recovery.sent-bytes` | Number of recovery snapshot bytes sent
-`range.snapshots.recv-in-progress` | Number of non-empty snapshots being received
-`range.snapshots.recv-queue` | Number of snapshots queued to receive
-`range.snapshots.recv-total-in-progress` | Number of total snapshots being received
-`range.snapshots.send-in-progress` | Number of non-empty snapshots being sent
-`range.snapshots.send-queue` | Number of snapshots queued to send
-`range.snapshots.send-total-in-progress` | Number of total snapshots being sent
-`range.snapshots.sent-bytes` | Number of snapshot bytes sent
-`range.snapshots.unknown.rcvd-bytes` | Number of unknown snapshot bytes received
-`range.snapshots.unknown.sent-bytes` | Number of unknown snapshot bytes sent
-`range.splits` | Number of range splits
-`rangekeybytes` | Number of bytes taken up by range keys (e.g., MVCC range tombstones)
-`rangekeycount` | Count of all range keys (e.g., MVCC range tombstones)
-`ranges.unavailable` | Number of ranges with fewer live replicas than needed for quorum
-`ranges.underreplicated` | Number of ranges with fewer live replicas than the replication target
-`ranges` | Number of ranges
-`rangevalbytes` | Number of bytes taken up by range key values (e.g., MVCC range tombstones)
-`rangevalcount` | Count of all range key values (e.g., MVCC range tombstones)
-`rebalancing.queriespersecond` | Number of kv-level requests received per second by the store, considering the last 30 minutes, as used in rebalancing decisions.
-`rebalancing.readbytespersecond` | Number of bytes written per second, considering the last 30 minutes.
-`rebalancing.readspersecond` | Number of keys read recently per second, considering the last 30 minutes.
-`rebalancing.requestspersecond` | Number of requests received recently per second, considering the last 30 minutes.
-`rebalancing.writebytespersecond` | Number of bytes read recently per second, considering the last 30 minutes.
-`rebalancing.writespersecond` | Number of keys written (i.e. applied by Raft) per second to the store, considering the last 30 minutes.
-`replicas.commandqueue.combinedqueuesize` | Number of commands in all CommandQueues combined
-`replicas.commandqueue.combinedreadcount` | Number of read-only commands in all CommandQueues combined
-`replicas.commandqueue.combinedwritecount` | Number of read-write commands in all CommandQueues combined
-`replicas.commandqueue.maxoverlaps` | Largest number of overlapping commands seen when adding to any CommandQueue
-`replicas.commandqueue.maxreadcount` | Largest number of read-only commands in any CommandQueue
-`replicas.commandqueue.maxsize` | Largest number of commands in any CommandQueue
-`replicas.commandqueue.maxtreesize` | Largest number of intervals in any CommandQueue's interval tree
-`replicas.commandqueue.maxwritecount` | Largest number of read-write commands in any CommandQueue
-`replicas.leaders_invalid_lease` | Number of replicas that are Raft leaders whose lease is invalid
-`replicas.leaders_not_leaseholders` | Number of replicas that are Raft leaders whose range lease is held by another store
-`replicas.leaders` | Number of Raft leaders
-`replicas.leaseholders` | Number of lease holders
-`replicas.quiescent` | Number of quiesced replicas
-`replicas.reserved` | Number of replicas reserved for snapshots
-`replicas` | Number of replicas
-`requests.backpressure.split` | Number of backpressured writes waiting on a Range split
-`requests.slow.commandqueue` | Number of requests that have been stuck for a long time in the command queue
-`requests.slow.distsender` | Number of requests that have been stuck for a long time in the dist sender
-`requests.slow.lease` | Number of requests that have been stuck for a long time acquiring a lease
-`requests.slow.raft` | Number of requests that have been stuck for a long time in Raft
-`rocksdb.block.cache.hits` | Count of block cache hits
-`rocksdb.block.cache.misses` | Count of block cache misses
-`rocksdb.block.cache.pinned-usage` | Bytes pinned by the block cache
-`rocksdb.block.cache.usage` | Bytes used by the block cache
-`rocksdb.bloom.filter.prefix.checked` | Number of times the bloom filter was checked
-`rocksdb.bloom.filter.prefix.useful` | Number of times the bloom filter helped avoid iterator creation
-`rocksdb.compactions` | Number of table compactions
-`rocksdb.flushes` | Number of table flushes
-`rocksdb.memtable.total-size` | Current size of memtable in bytes
-`rocksdb.num-sstables` | Number of storage engine SSTables
-`rocksdb.read-amplification` | Number of disk reads per query
-`rocksdb.table-readers-mem-estimate` | Memory used by index and filter blocks
-`round-trip-latency` | Distribution of round-trip latencies with other nodes in nanoseconds
-`security.certificate.expiration.ca` | Expiration timestamp in seconds since Unix epoch for the CA certificate. 0 means no certificate or error.
-`security.certificate.expiration.node` | Expiration timestamp in seconds since Unix epoch for the node certificate. 0 means no certificate or error.
-`schedules.BACKUP.protected_age_sec` | The age of the oldest [protected timestamp]({% link {{ page.version.version }}/architecture/storage-layer.md %}#protected-timestamps) record protected by `BACKUP` schedules.
-`schedules.BACKUP.protected_record_count` | Number of [protected timestamp]({% link {{ page.version.version }}/architecture/storage-layer.md %}#protected-timestamps) records held by `BACKUP` schedules.
-`sql.bytesin` | Number of SQL bytes received
-`sql.bytesout` | Number of SQL bytes sent
-`sql.conns` | Number of active SQL connections. For new recent connections, refer to `sql.new_conns`.
-`sql.ddl.count` | Number of SQL DDL statements
-`sql.delete.count` | Number of SQL DELETE statements
-`sql.distsql.exec.latency` | Latency in nanoseconds of SQL statement executions running on the distributed execution engine. This metric does not include the time to parse and plan the statement.
-`sql.distsql.flows.active` | Number of distributed SQL flows currently active
-`sql.distsql.flows.total` | Number of distributed SQL flows executed
-`sql.distsql.queries.active` | Number of distributed SQL queries currently active
-`sql.distsql.queries.total` | Number of distributed SQL queries executed
-`sql.distsql.select.count` | Number of DistSQL SELECT statements
-`sql.distsql.service.latency` | Latency in nanoseconds of SQL statement executions running on the distributed execution engine, including the time to parse and plan the statement.
-`sql.exec.latency` | Latency in nanoseconds of all SQL statement executions. This metric does not include the time to parse and plan the statement.
-`sql.guardrails.max_row_size_err.count` | Number of times a large row violates the corresponding `sql.guardrails.max_row_size_err` limit.
-`sql.guardrails.max_row_size_log.count` | Number of times a large row violates the corresponding `sql.guardrails.max_row_size_log` limit.
-`sql.insert.count` | Number of SQL INSERT statements
-`sql.mem.current` | Current sql statement memory usage
-`sql.mem.distsql.current` | Current sql statement memory usage for distsql
-`sql.mem.distsql.max` | Memory usage per sql statement for distsql
-`sql.mem.max` | Memory usage per sql statement
-`sql.mem.root.current` | Current sql statement memory usage for root
-`sql.mem.root.max` | Memory usage per sql statement for root
-`sql.mem.session.current` | Current sql session memory usage
-`sql.mem.session.max` | Memory usage per sql session
-`sql.mem.txn.current` | Current sql transaction memory usage
-`sql.mem.txn.max` | Memory usage per sql transaction
-`sql.misc.count` | Number of other SQL statements
-`sql.new_conns` | Number of new SQL connections in the previous second. For all connections, refer to `sql.conns`.
-`sql.pgwire_cancel.total` | Counter of the number of pgwire query cancel requests
-`sql.pgwire_cancel.ignored` | Counter of the number of pgwire query cancel requests that were ignored due to rate limiting
-`sql.pgwire_cancel.successful` | Counter of the number of pgwire query cancel requests that were successful
-`sql.query.count` | Number of SQL queries
-`sql.select.count` | Number of SQL SELECT statements
-`sql.service.latency` | Latency in nanoseconds of SQL request execution, including the time to parse and plan the statement.
-`sql.txn.abort.count` | Number of SQL transaction ABORT statements
-`sql.txn.begin.count` | Number of SQL transaction BEGIN statements
-`sql.txn.commit.count` | Number of SQL transaction COMMIT statements
-`sql.txn.contended.count` | Number of SQL transactions that experienced contention
-`sql.txn.isolation.executed_at.read_committed` | Number of times a [`READ COMMITTED`]({% link {{ page.version.version }}/read-committed.md %}) transaction was executed.
-`sql.txn.isolation.upgraded_from.read_committed` | Number of times a [`READ COMMITTED`]({% link {{ page.version.version }}/read-committed.md %}) transaction was automatically upgraded to a stronger isolation level.
-`sql.txn.rollback.count` | Number of SQL transaction ROLLBACK statements
-`sql.update.count` | Number of SQL UPDATE statements
-`storage.l0-level-score` | Compaction score of level 0
-`storage.l1-level-score` | Compaction score of level 1
-`storage.l2-level-score` | Compaction score of level 2
-`storage.l3-level-score` | Compaction score of level 3
-`storage.l4-level-score` | Compaction score of level 4
-`storage.l5-level-score` | Compaction score of level 5
-`storage.l6-level-score` | Compaction score of level 6
-`storage.l0-level-size` | Size of the SSTables in level 0
-`storage.l1-level-size` | Size of the SSTables in level 1
-`storage.l2-level-size` | Size of the SSTables in level 2
-`storage.l3-level-size` | Size of the SSTables in level 3
-`storage.l4-level-size` | Size of the SSTables in level 4
-`storage.l5-level-size` | Size of the SSTables in level 5
-`storage.l6-level-size` | Size of the SSTables in level 6
-`storage.keys.range-key-set.count` | Approximate count of RangeKeySet internal keys across the storage engine.
-`storage.marked-for-compaction-files` | Count of SSTables marked for compaction
-`sys.cgo.allocbytes` | Current bytes of memory allocated by cgo
-`sys.cgo.totalbytes` | Total bytes of memory allocated by cgo, but not released
-`sys.cgocalls` | Total number of cgo call
-`sys.cpu.sys.ns` | Total system cpu time in nanoseconds
-`sys.cpu.sys.percent` | Current system cpu percentage
-`sys.cpu.user.ns` | Total user cpu time in nanoseconds
-`sys.cpu.user.percent` | Current user cpu percentage
-`sys.fd.open` | Process open file descriptors
-`sys.fd.softlimit` | Process open FD soft limit
-`sys.gc.count` | Total number of GC runs
-`sys.gc.pause.ns` | Total GC pause in nanoseconds
-`sys.gc.pause.percent` | Current GC pause percentage
-`sys.go.allocbytes` | Current bytes of memory allocated by go
-`sys.go.totalbytes` | Total bytes of memory allocated by go, but not released
-`sys.goroutines` | Current number of goroutines
-`sys.rss` | Current process RSS
-`sys.uptime` | Process uptime in seconds
-`sysbytes` | Number of bytes in system KV pairs
-`syscount` | Count of system KV pairs
-`timeseries.write.bytes` | Total size in bytes of metric samples written to disk
-`timeseries.write.errors` | Total errors encountered while attempting to write metrics to disk
-`timeseries.write.samples` | Total number of metric samples written to disk
-`totalbytes` | Total number of bytes taken up by keys and values including non-live data
-`tscache.skl.read.pages` | Number of pages in the read timestamp cache
-`tscache.skl.read.rotations` | Number of page rotations in the read timestamp cache
-`tscache.skl.write.pages` | Number of pages in the write timestamp cache
-`tscache.skl.write.rotations` | Number of page rotations in the write timestamp cache
-`txn.abandons` | Number of abandoned KV transactions
-`txn.aborts` | Number of aborted KV transactions
-`txn.autoretries` | Number of automatic retries to avoid serializable restarts
-`txn.commits1PC` | Number of committed one-phase KV transactions
-`txn.commits` | Number of committed KV transactions (including 1PC)
-`txn.durations` | KV transaction durations in nanoseconds
-`txn.restarts.deleterange` | Number of restarts due to a forwarded commit timestamp and a DeleteRange command
-`txn.restarts.possiblereplay` | Number of restarts due to possible replays of command batches at the storage layer
-`txn.restarts.serializable` | Number of restarts due to a forwarded commit timestamp and isolation=SERIALIZABLE
-`txn.restarts.writetooold` | Number of restarts due to a concurrent writer committing first
-`txn.restarts` | Number of restarted KV transactions
-`valbytes` | Number of bytes taken up by values
-`valcount` | Count of all values
+{% assign list1 = site.data.metrics.available-metrics-in-metrics-list %}
+{% assign list2 = site.data.metrics.available-metrics-not-in-metrics-list %}
+
+{% assign available_metrics_combined = list1 | concat: list2 %}
+{% assign available_metrics_sorted = available_metrics_combined | sort: "metric_id" %}
+
+
+
+
+
CockroachDB Metric Name
+
Description
+
Type
+
Unit
+
+
+
+ {% for m in available_metrics_sorted %} {% comment %} Iterate through the available_metrics. {% endcomment %}
+ {% assign metrics-list = site.data.metrics.metrics-list | where: "metric", m.metric_id %}
+ {% comment %} Get the row from the metrics-list with the given metric_id. {% endcomment %}
+
+
{{ m.metric_id }}
+ {% comment %} Use the value from the metrics-list, if any, followed by the value in the available-metrics-not-in-metrics-list, if any. {% endcomment %}
+
diff --git a/src/current/_includes/v24.3/sidebar-data/self-hosted-deployments.json b/src/current/_includes/v24.3/sidebar-data/self-hosted-deployments.json
index 1e48de5aeff..76608d8939a 100644
--- a/src/current/_includes/v24.3/sidebar-data/self-hosted-deployments.json
+++ b/src/current/_includes/v24.3/sidebar-data/self-hosted-deployments.json
@@ -406,89 +406,95 @@
"title": "Metrics Dashboards",
"items": [
{
- "title": "Overview Dashboard",
+ "title": "Overview",
"urls": [
"/${VERSION}/ui-overview-dashboard.html"
]
},
{
- "title": "Hardware Dashboard",
+ "title": "Hardware",
"urls": [
"/${VERSION}/ui-hardware-dashboard.html"
]
},
{
- "title": "Runtime Dashboard",
+ "title": "Runtime",
"urls": [
"/${VERSION}/ui-runtime-dashboard.html"
]
},
{
- "title": "Networking Dashboard",
+ "title": "Networking",
"urls": [
"/${VERSION}/ui-networking-dashboard.html"
]
},
{
- "title": "SQL Dashboard",
+ "title": "SQL",
"urls": [
"/${VERSION}/ui-sql-dashboard.html"
]
},
{
- "title": "Storage Dashboard",
+ "title": "Storage",
"urls": [
"/${VERSION}/ui-storage-dashboard.html"
]
},
{
- "title": "Replication Dashboard",
+ "title": "Replication",
"urls": [
"/${VERSION}/ui-replication-dashboard.html"
]
},
{
- "title": "Distributed Dashboard",
+ "title": "Distributed",
"urls": [
"/${VERSION}/ui-distributed-dashboard.html"
]
},
{
- "title": "Queues Dashboard",
+ "title": "Queues",
"urls": [
"/${VERSION}/ui-queues-dashboard.html"
]
},
{
- "title": "Slow Requests Dashboard",
+ "title": "Slow Requests",
"urls": [
"/${VERSION}/ui-slow-requests-dashboard.html"
]
},
{
- "title": "Changefeeds Dashboard",
+ "title": "Changefeeds",
"urls": [
"/${VERSION}/ui-cdc-dashboard.html"
]
},
{
- "title": "Overload Dashboard",
+ "title": "Overload",
"urls": [
"/${VERSION}/ui-overload-dashboard.html"
]
},
{
- "title": "TTL Dashboard",
+ "title": "TTL",
"urls": [
"/${VERSION}/ui-ttl-dashboard.html"
]
},
{
- "title": "Physical Cluster Replication Dashboard",
+ "title": "Physical Cluster Replication",
"urls": [
"/${VERSION}/ui-physical-cluster-replication-dashboard.html"
]
},
+ {
+ "title": "Logical Data Replication",
+ "urls": [
+ "/${VERSION}/ui-logical-data-replication-dashboard.html"
+ ]
+ },
{
"title": "Custom Chart",
"urls": [
diff --git a/src/current/_includes/v24.3/storage/free-up-disk-space.md b/src/current/_includes/v24.3/storage/free-up-disk-space.md
index c63b70b766e..e4a6b08a57a 100644
--- a/src/current/_includes/v24.3/storage/free-up-disk-space.md
+++ b/src/current/_includes/v24.3/storage/free-up-disk-space.md
@@ -1 +1 @@
-For instructions on how to free up disk space as quickly as possible after deleting data, see [How can I free up disk space quickly?]({% link {{ page.version.version }}/operational-faqs.md %}#how-can-i-free-up-disk-space-quickly)
+For instructions on how to free up disk space as quickly as possible after dropping a table, see [How can I free up disk space that was used by a dropped table?]({% link {{ page.version.version }}/operational-faqs.md %}#how-can-i-free-up-disk-space-when-dropping-a-table)
diff --git a/src/current/_includes/v24.3/upgrade-requirements.md b/src/current/_includes/v24.3/upgrade-requirements.md
index f1111c9393d..d729ee9c6ee 100644
--- a/src/current/_includes/v24.3/upgrade-requirements.md
+++ b/src/current/_includes/v24.3/upgrade-requirements.md
@@ -1,4 +1,5 @@
CockroachDB v24.3 is a Regular release. To upgrade to it, you must be running either:
+
- [v24.2]({% link v24.2/upgrade-cockroach-version.md %}), the previous Innovation release.
- [v24.1]({% link v24.1/upgrade-cockroach-version.md %}), the previous Regular release.
diff --git a/src/current/_includes/v25.1/app/before-you-begin.md b/src/current/_includes/v25.1/app/before-you-begin.md
new file mode 100644
index 00000000000..8daf2f91005
--- /dev/null
+++ b/src/current/_includes/v25.1/app/before-you-begin.md
@@ -0,0 +1,12 @@
+1. [Install CockroachDB]({% link {{ page.version.version }}/install-cockroachdb.md %}).
+1. Start up a [secure]({% link {{ page.version.version }}/secure-a-cluster.md %}) or [insecure]({% link {{ page.version.version }}/start-a-local-cluster.md %}) local cluster.
+1. Choose the instructions that correspond to whether your cluster is secure or insecure:
+
+
+
+
+
+
+
+{% include {{ page.version.version }}/prod-deployment/insecure-flag.md %}
+
\ No newline at end of file
diff --git a/src/current/_includes/v25.1/app/cc-free-tier-params.md b/src/current/_includes/v25.1/app/cc-free-tier-params.md
new file mode 100644
index 00000000000..f8a196cdd8e
--- /dev/null
+++ b/src/current/_includes/v25.1/app/cc-free-tier-params.md
@@ -0,0 +1,10 @@
+Where:
+
+- `{username}` and `{password}` specify the SQL username and password that you created earlier.
+- `{globalhost}` is the name of the CockroachDB {{ site.data.products.cloud }} free tier host (e.g., `free-tier.gcp-us-central1.cockroachlabs.cloud`).
+- `{path to the CA certificate}` is the path to the `cc-ca.crt` file that you downloaded from the CockroachDB {{ site.data.products.cloud }} Console.
+- `{cluster_name}` is the name of your cluster.
+
+{{site.data.alerts.callout_info}}
+If you are using the connection string that you [copied from the **Connection info** modal](#set-up-your-cluster-connection), your username, password, hostname, and cluster name will be pre-populated.
+{{site.data.alerts.end}}
\ No newline at end of file
diff --git a/src/current/_includes/v25.1/app/create-a-database.md b/src/current/_includes/v25.1/app/create-a-database.md
new file mode 100644
index 00000000000..b2fb4af6f79
--- /dev/null
+++ b/src/current/_includes/v25.1/app/create-a-database.md
@@ -0,0 +1,54 @@
+
+
+1. In the SQL shell, create the `bank` database that your application will use:
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ sql
+ > CREATE DATABASE bank;
+ ~~~
+
+1. Create a SQL user for your app:
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ sql
+ > CREATE USER WITH PASSWORD ;
+ ~~~
+
+ Take note of the username and password. You will use it in your application code later.
+
+1. Give the user the necessary permissions:
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ sql
+ > GRANT ALL ON DATABASE bank TO ;
+ ~~~
+
+
+
+
+
+1. If you haven't already, [download the CockroachDB binary]({% link {{ page.version.version }}/install-cockroachdb.md %}).
+1. Start the [built-in SQL shell]({% link {{ page.version.version }}/cockroach-sql.md %}) using the connection string you got from the CockroachDB {{ site.data.products.cloud }} Console:
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ cockroach sql \
+ --url=''
+ ~~~
+
+1. In the SQL shell, create the `bank` database that your application will use:
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ sql
+ > CREATE DATABASE bank;
+ ~~~
+
+1. Exit the SQL shell:
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ sql
+ > \q
+ ~~~
+
+
+
\ No newline at end of file
diff --git a/src/current/_includes/v25.1/app/create-maxroach-user-and-bank-database.md b/src/current/_includes/v25.1/app/create-maxroach-user-and-bank-database.md
new file mode 100644
index 00000000000..4e81a23b6bc
--- /dev/null
+++ b/src/current/_includes/v25.1/app/create-maxroach-user-and-bank-database.md
@@ -0,0 +1,32 @@
+Start the [built-in SQL shell]({% link {{ page.version.version }}/cockroach-sql.md %}):
+
+{% include_cached copy-clipboard.html %}
+~~~ shell
+$ cockroach sql --certs-dir=certs
+~~~
+
+In the SQL shell, issue the following statements to create the `maxroach` user and `bank` database:
+
+{% include_cached copy-clipboard.html %}
+~~~ sql
+> CREATE USER IF NOT EXISTS maxroach;
+~~~
+
+{% include_cached copy-clipboard.html %}
+~~~ sql
+> CREATE DATABASE bank;
+~~~
+
+Give the `maxroach` user the necessary permissions:
+
+{% include_cached copy-clipboard.html %}
+~~~ sql
+> GRANT ALL ON DATABASE bank TO maxroach;
+~~~
+
+Exit the SQL shell:
+
+{% include_cached copy-clipboard.html %}
+~~~ sql
+> \q
+~~~
diff --git a/src/current/_includes/v25.1/app/for-a-complete-example-go.md b/src/current/_includes/v25.1/app/for-a-complete-example-go.md
new file mode 100644
index 00000000000..5149489f6a6
--- /dev/null
+++ b/src/current/_includes/v25.1/app/for-a-complete-example-go.md
@@ -0,0 +1,4 @@
+For complete examples, see:
+
+- [Build a Go App with CockroachDB]({% link {{ page.version.version }}/build-a-go-app-with-cockroachdb.md %}) (pgx)
+- [Build a Go App with CockroachDB and GORM]({% link {{ page.version.version }}/build-a-go-app-with-cockroachdb.md %})
diff --git a/src/current/_includes/v25.1/app/for-a-complete-example-java.md b/src/current/_includes/v25.1/app/for-a-complete-example-java.md
new file mode 100644
index 00000000000..392ec2014d7
--- /dev/null
+++ b/src/current/_includes/v25.1/app/for-a-complete-example-java.md
@@ -0,0 +1,4 @@
+For complete examples, see:
+
+- [Build a Java App with CockroachDB]({% link {{ page.version.version }}/build-a-java-app-with-cockroachdb.md %}) (JDBC)
+- [Build a Java App with CockroachDB and Hibernate]({% link {{ page.version.version }}/build-a-java-app-with-cockroachdb-hibernate.md %})
diff --git a/src/current/_includes/v25.1/app/for-a-complete-example-python.md b/src/current/_includes/v25.1/app/for-a-complete-example-python.md
new file mode 100644
index 00000000000..29d0352eab3
--- /dev/null
+++ b/src/current/_includes/v25.1/app/for-a-complete-example-python.md
@@ -0,0 +1,6 @@
+For complete examples, see:
+
+- [Build a Python App with CockroachDB]({% link {{ page.version.version }}/build-a-python-app-with-cockroachdb-psycopg3.md %}) (psycopg3)
+- [Build a Python App with CockroachDB and SQLAlchemy]({% link {{ page.version.version }}/build-a-python-app-with-cockroachdb-sqlalchemy.md %})
+- [Build a Python App with CockroachDB and Django]({% link {{ page.version.version }}/build-a-python-app-with-cockroachdb-django.md %})
+- [Build a Python App with CockroachDB and asyncpg]({% link {{ page.version.version }}/build-a-python-app-with-cockroachdb-asyncpg.md %})
diff --git a/src/current/_includes/v25.1/app/hibernate-dialects-note.md b/src/current/_includes/v25.1/app/hibernate-dialects-note.md
new file mode 100644
index 00000000000..287f314d393
--- /dev/null
+++ b/src/current/_includes/v25.1/app/hibernate-dialects-note.md
@@ -0,0 +1,5 @@
+Versions of the Hibernate CockroachDB dialect correspond to the version of CockroachDB installed on your machine. For example, `org.hibernate.dialect.CockroachDB201Dialect` corresponds to CockroachDB v20.1 and later, and `org.hibernate.dialect.CockroachDB192Dialect` corresponds to CockroachDB v19.2 and later.
+
+All dialect versions are forward-compatible (e.g., CockroachDB v20.1 is compatible with `CockroachDB192Dialect`), as long as your application is not affected by any backward-incompatible changes listed in your CockroachDB version's [release notes]({% link releases/index.md %}). In the event of a CockroachDB version upgrade, using a previous version of the CockroachDB dialect will not break an application, but, to enable all features available in your version of CockroachDB, we recommend keeping the dialect version in sync with the installed version of CockroachDB.
+
+Not all versions of CockroachDB have a corresponding dialect yet. Use the dialect number that is closest to your installed version of CockroachDB. For example, use `CockroachDB201Dialect` when using CockroachDB v21.1 and later.
diff --git a/src/current/_includes/v25.1/app/insecure/create-maxroach-user-and-bank-database.md b/src/current/_includes/v25.1/app/insecure/create-maxroach-user-and-bank-database.md
new file mode 100644
index 00000000000..36c4814c12e
--- /dev/null
+++ b/src/current/_includes/v25.1/app/insecure/create-maxroach-user-and-bank-database.md
@@ -0,0 +1,32 @@
+Start the [built-in SQL shell]({% link {{ page.version.version }}/cockroach-sql.md %}):
+
+{% include_cached copy-clipboard.html %}
+~~~ shell
+$ cockroach sql --insecure
+~~~
+
+In the SQL shell, issue the following statements to create the `maxroach` user and `bank` database:
+
+{% include_cached copy-clipboard.html %}
+~~~ sql
+> CREATE USER IF NOT EXISTS maxroach;
+~~~
+
+{% include_cached copy-clipboard.html %}
+~~~ sql
+> CREATE DATABASE bank;
+~~~
+
+Give the `maxroach` user the necessary permissions:
+
+{% include_cached copy-clipboard.html %}
+~~~ sql
+> GRANT ALL ON DATABASE bank TO maxroach;
+~~~
+
+Exit the SQL shell:
+
+{% include_cached copy-clipboard.html %}
+~~~ sql
+> \q
+~~~
diff --git a/src/current/_includes/v25.1/app/insecure/jooq-basic-sample/Sample.java b/src/current/_includes/v25.1/app/insecure/jooq-basic-sample/Sample.java
new file mode 100644
index 00000000000..d1a54a8ddd2
--- /dev/null
+++ b/src/current/_includes/v25.1/app/insecure/jooq-basic-sample/Sample.java
@@ -0,0 +1,215 @@
+package com.cockroachlabs;
+
+import com.cockroachlabs.example.jooq.db.Tables;
+import com.cockroachlabs.example.jooq.db.tables.records.AccountsRecord;
+import org.jooq.DSLContext;
+import org.jooq.SQLDialect;
+import org.jooq.Source;
+import org.jooq.conf.RenderQuotedNames;
+import org.jooq.conf.Settings;
+import org.jooq.exception.DataAccessException;
+import org.jooq.impl.DSL;
+
+import java.io.InputStream;
+import java.sql.Connection;
+import java.sql.DriverManager;
+import java.sql.SQLException;
+import java.util.*;
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.concurrent.atomic.AtomicLong;
+import java.util.function.Function;
+
+import static com.cockroachlabs.example.jooq.db.Tables.ACCOUNTS;
+
+public class Sample {
+
+ private static final Random RAND = new Random();
+ private static final boolean FORCE_RETRY = false;
+ private static final String RETRY_SQL_STATE = "40001";
+ private static final int MAX_ATTEMPT_COUNT = 6;
+
+ private static Function addAccounts() {
+ return ctx -> {
+ long rv = 0;
+
+ ctx.delete(ACCOUNTS).execute();
+ ctx.batchInsert(
+ new AccountsRecord(1L, 1000L),
+ new AccountsRecord(2L, 250L),
+ new AccountsRecord(3L, 314159L)
+ ).execute();
+
+ rv = 1;
+ System.out.printf("APP: addAccounts() --> %d\n", rv);
+ return rv;
+ };
+ }
+
+ private static Function transferFunds(long fromId, long toId, long amount) {
+ return ctx -> {
+ long rv = 0;
+
+ AccountsRecord fromAccount = ctx.fetchSingle(ACCOUNTS, ACCOUNTS.ID.eq(fromId));
+ AccountsRecord toAccount = ctx.fetchSingle(ACCOUNTS, ACCOUNTS.ID.eq(toId));
+
+ if (!(amount > fromAccount.getBalance())) {
+ fromAccount.setBalance(fromAccount.getBalance() - amount);
+ toAccount.setBalance(toAccount.getBalance() + amount);
+
+ ctx.batchUpdate(fromAccount, toAccount).execute();
+ rv = amount;
+ System.out.printf("APP: transferFunds(%d, %d, %d) --> %d\n", fromId, toId, amount, rv);
+ }
+
+ return rv;
+ };
+ }
+
+ // Test our retry handling logic if FORCE_RETRY is true. This
+ // method is only used to test the retry logic. It is not
+ // intended for production code.
+ private static Function forceRetryLogic() {
+ return ctx -> {
+ long rv = -1;
+ try {
+ System.out.printf("APP: testRetryLogic: BEFORE EXCEPTION\n");
+ ctx.execute("SELECT crdb_internal.force_retry('1s')");
+ } catch (DataAccessException e) {
+ System.out.printf("APP: testRetryLogic: AFTER EXCEPTION\n");
+ throw e;
+ }
+ return rv;
+ };
+ }
+
+ private static Function getAccountBalance(long id) {
+ return ctx -> {
+ AccountsRecord account = ctx.fetchSingle(ACCOUNTS, ACCOUNTS.ID.eq(id));
+ long balance = account.getBalance();
+ System.out.printf("APP: getAccountBalance(%d) --> %d\n", id, balance);
+ return balance;
+ };
+ }
+
+ // Run SQL code in a way that automatically handles the
+ // transaction retry logic so we do not have to duplicate it in
+ // various places.
+ private static long runTransaction(DSLContext session, Function fn) {
+ AtomicLong rv = new AtomicLong(0L);
+ AtomicInteger attemptCount = new AtomicInteger(0);
+
+ while (attemptCount.get() < MAX_ATTEMPT_COUNT) {
+ attemptCount.incrementAndGet();
+
+ if (attemptCount.get() > 1) {
+ System.out.printf("APP: Entering retry loop again, iteration %d\n", attemptCount.get());
+ }
+
+ if (session.connectionResult(connection -> {
+ connection.setAutoCommit(false);
+ System.out.printf("APP: BEGIN;\n");
+
+ if (attemptCount.get() == MAX_ATTEMPT_COUNT) {
+ String err = String.format("hit max of %s attempts, aborting", MAX_ATTEMPT_COUNT);
+ throw new RuntimeException(err);
+ }
+
+ // This block is only used to test the retry logic.
+ // It is not necessary in production code. See also
+ // the method 'testRetryLogic()'.
+ if (FORCE_RETRY) {
+ session.fetch("SELECT now()");
+ }
+
+ try {
+ rv.set(fn.apply(session));
+ if (rv.get() != -1) {
+ connection.commit();
+ System.out.printf("APP: COMMIT;\n");
+ return true;
+ }
+ } catch (DataAccessException | SQLException e) {
+ String sqlState = e instanceof SQLException ? ((SQLException) e).getSQLState() : ((DataAccessException) e).sqlState();
+
+ if (RETRY_SQL_STATE.equals(sqlState)) {
+ // Since this is a transaction retry error, we
+ // roll back the transaction and sleep a little
+ // before trying again. Each time through the
+ // loop we sleep for a little longer than the last
+ // time (A.K.A. exponential backoff).
+ System.out.printf("APP: retryable exception occurred:\n sql state = [%s]\n message = [%s]\n retry counter = %s\n", sqlState, e.getMessage(), attemptCount.get());
+ System.out.printf("APP: ROLLBACK;\n");
+ connection.rollback();
+ int sleepMillis = (int)(Math.pow(2, attemptCount.get()) * 100) + RAND.nextInt(100);
+ System.out.printf("APP: Hit 40001 transaction retry error, sleeping %s milliseconds\n", sleepMillis);
+ try {
+ Thread.sleep(sleepMillis);
+ } catch (InterruptedException ignored) {
+ // no-op
+ }
+ rv.set(-1L);
+ } else {
+ throw e;
+ }
+ }
+
+ return false;
+ })) {
+ break;
+ }
+ }
+
+ return rv.get();
+ }
+
+ public static void main(String[] args) throws Exception {
+ try (Connection connection = DriverManager.getConnection(
+ "jdbc:postgresql://localhost:26257/bank?sslmode=disable",
+ "maxroach",
+ ""
+ )) {
+ DSLContext ctx = DSL.using(connection, SQLDialect.COCKROACHDB, new Settings()
+ .withExecuteLogging(true)
+ .withRenderQuotedNames(RenderQuotedNames.NEVER));
+
+ // Initialise database with db.sql script
+ try (InputStream in = Sample.class.getResourceAsStream("/db.sql")) {
+ ctx.parser().parse(Source.of(in).readString()).executeBatch();
+ }
+
+ long fromAccountId = 1;
+ long toAccountId = 2;
+ long transferAmount = 100;
+
+ if (FORCE_RETRY) {
+ System.out.printf("APP: About to test retry logic in 'runTransaction'\n");
+ runTransaction(ctx, forceRetryLogic());
+ } else {
+
+ runTransaction(ctx, addAccounts());
+ long fromBalance = runTransaction(ctx, getAccountBalance(fromAccountId));
+ long toBalance = runTransaction(ctx, getAccountBalance(toAccountId));
+ if (fromBalance != -1 && toBalance != -1) {
+ // Success!
+ System.out.printf("APP: getAccountBalance(%d) --> %d\n", fromAccountId, fromBalance);
+ System.out.printf("APP: getAccountBalance(%d) --> %d\n", toAccountId, toBalance);
+ }
+
+ // Transfer $100 from account 1 to account 2
+ long transferResult = runTransaction(ctx, transferFunds(fromAccountId, toAccountId, transferAmount));
+ if (transferResult != -1) {
+ // Success!
+ System.out.printf("APP: transferFunds(%d, %d, %d) --> %d \n", fromAccountId, toAccountId, transferAmount, transferResult);
+
+ long fromBalanceAfter = runTransaction(ctx, getAccountBalance(fromAccountId));
+ long toBalanceAfter = runTransaction(ctx, getAccountBalance(toAccountId));
+ if (fromBalanceAfter != -1 && toBalanceAfter != -1) {
+ // Success!
+ System.out.printf("APP: getAccountBalance(%d) --> %d\n", fromAccountId, fromBalanceAfter);
+ System.out.printf("APP: getAccountBalance(%d) --> %d\n", toAccountId, toBalanceAfter);
+ }
+ }
+ }
+ }
+ }
+}
diff --git a/src/current/_includes/v25.1/app/insecure/jooq-basic-sample/jooq-basic-sample.zip b/src/current/_includes/v25.1/app/insecure/jooq-basic-sample/jooq-basic-sample.zip
new file mode 100644
index 00000000000..f11f86b8f43
Binary files /dev/null and b/src/current/_includes/v25.1/app/insecure/jooq-basic-sample/jooq-basic-sample.zip differ
diff --git a/src/current/_includes/v25.1/app/insecure/upperdb-basic-sample/main.go b/src/current/_includes/v25.1/app/insecure/upperdb-basic-sample/main.go
new file mode 100644
index 00000000000..5c855356d7e
--- /dev/null
+++ b/src/current/_includes/v25.1/app/insecure/upperdb-basic-sample/main.go
@@ -0,0 +1,185 @@
+package main
+
+import (
+ "fmt"
+ "log"
+ "time"
+
+ "github.com/upper/db/v4"
+ "github.com/upper/db/v4/adapter/cockroachdb"
+)
+
+// The settings variable stores connection details.
+var settings = cockroachdb.ConnectionURL{
+ Host: "localhost",
+ Database: "bank",
+ User: "maxroach",
+ Options: map[string]string{
+ // Insecure node.
+ "sslmode": "disable",
+ },
+}
+
+// Accounts is a handy way to represent a collection.
+func Accounts(sess db.Session) db.Store {
+ return sess.Collection("accounts")
+}
+
+// Account is used to represent a single record in the "accounts" table.
+type Account struct {
+ ID uint64 `db:"id,omitempty"`
+ Balance int64 `db:"balance"`
+}
+
+// Collection is required in order to create a relation between the Account
+// struct and the "accounts" table.
+func (a *Account) Store(sess db.Session) db.Store {
+ return Accounts(sess)
+}
+
+// createTables creates all the tables that are neccessary to run this example.
+func createTables(sess db.Session) error {
+ _, err := sess.SQL().Exec(`
+ CREATE TABLE IF NOT EXISTS accounts (
+ ID SERIAL PRIMARY KEY,
+ balance INT
+ )
+ `)
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+// crdbForceRetry can be used to simulate a transaction error and
+// demonstrate upper/db's ability to retry the transaction automatically.
+//
+// By default, upper/db will retry the transaction five times, if you want
+// to modify this number use: sess.SetMaxTransactionRetries(n).
+//
+// This is only used for demonstration purposes and not intended
+// for production code.
+func crdbForceRetry(sess db.Session) error {
+ var err error
+
+ // The first statement in a transaction can be retried transparently on the
+ // server, so we need to add a placeholder statement so that our
+ // force_retry() statement isn't the first one.
+ _, err = sess.SQL().Exec(`SELECT 1`)
+ if err != nil {
+ return err
+ }
+
+ // If force_retry is called during the specified interval from the beginning
+ // of the transaction it returns a retryable error. If not, 0 is returned
+ // instead of an error.
+ _, err = sess.SQL().Exec(`SELECT crdb_internal.force_retry('1s'::INTERVAL)`)
+ if err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func main() {
+ // Connect to the local CockroachDB node.
+ sess, err := cockroachdb.Open(settings)
+ if err != nil {
+ log.Fatal("cockroachdb.Open: ", err)
+ }
+ defer sess.Close()
+
+ // Adjust this number to fit your specific needs (set to 5, by default)
+ // sess.SetMaxTransactionRetries(10)
+
+ // Create the "accounts" table
+ createTables(sess)
+
+ // Delete all the previous items in the "accounts" table.
+ err = Accounts(sess).Truncate()
+ if err != nil {
+ log.Fatal("Truncate: ", err)
+ }
+
+ // Create a new account with a balance of 1000.
+ account1 := Account{Balance: 1000}
+ err = Accounts(sess).InsertReturning(&account1)
+ if err != nil {
+ log.Fatal("sess.Save: ", err)
+ }
+
+ // Create a new account with a balance of 250.
+ account2 := Account{Balance: 250}
+ err = Accounts(sess).InsertReturning(&account2)
+ if err != nil {
+ log.Fatal("sess.Save: ", err)
+ }
+
+ // Printing records
+ printRecords(sess)
+
+ // Change the balance of the first account.
+ account1.Balance = 500
+ err = sess.Save(&account1)
+ if err != nil {
+ log.Fatal("sess.Save: ", err)
+ }
+
+ // Change the balance of the second account.
+ account2.Balance = 999
+ err = sess.Save(&account2)
+ if err != nil {
+ log.Fatal("sess.Save: ", err)
+ }
+
+ // Printing records
+ printRecords(sess)
+
+ // Delete the first record.
+ err = sess.Delete(&account1)
+ if err != nil {
+ log.Fatal("Delete: ", err)
+ }
+
+ startTime := time.Now()
+
+ // Add a couple of new records within a transaction.
+ err = sess.Tx(func(tx db.Session) error {
+ var err error
+
+ if err = tx.Save(&Account{Balance: 887}); err != nil {
+ return err
+ }
+
+ if time.Now().Sub(startTime) < time.Second*1 {
+ // Will fail continuously for 2 seconds.
+ if err = crdbForceRetry(tx); err != nil {
+ return err
+ }
+ }
+
+ if err = tx.Save(&Account{Balance: 342}); err != nil {
+ return err
+ }
+
+ return nil
+ })
+ if err != nil {
+ log.Fatal("Could not commit transaction: ", err)
+ }
+
+ // Printing records
+ printRecords(sess)
+}
+
+func printRecords(sess db.Session) {
+ accounts := []Account{}
+ err := Accounts(sess).Find().All(&accounts)
+ if err != nil {
+ log.Fatal("Find: ", err)
+ }
+ log.Printf("Balances:")
+ for i := range accounts {
+ fmt.Printf("\taccounts[%d]: %d\n", accounts[i].ID, accounts[i].Balance)
+ }
+}
diff --git a/src/current/_includes/v25.1/app/java-tls-note.md b/src/current/_includes/v25.1/app/java-tls-note.md
new file mode 100644
index 00000000000..fd490d2b0a8
--- /dev/null
+++ b/src/current/_includes/v25.1/app/java-tls-note.md
@@ -0,0 +1,13 @@
+CockroachDB supports TLS 1.2 and 1.3, and uses 1.3 by default.
+
+{% include common/tls-bad-cipher-warning.md %}
+
+[A bug in the TLS 1.3 implementation](https://bugs.openjdk.java.net/browse/JDK-8236039) in Java 11 versions lower than 11.0.7 and Java 13 versions lower than 13.0.3 makes the versions incompatible with CockroachDB.
+
+If an incompatible version is used, the client may throw the following exception:
+
+`javax.net.ssl.SSLHandshakeException: extension (5) should not be presented in certificate_request`
+
+For applications running Java 11 or 13, make sure that you have version 11.0.7 or higher, or 13.0.3 or higher.
+
+If you cannot upgrade to a version higher than 11.0.7 or 13.0.3, you must configure the application to use TLS 1.2. For example, when starting your app, use: `$ java -Djdk.tls.client.protocols=TLSv1.2 appName`
diff --git a/src/current/_includes/v25.1/app/java-version-note.md b/src/current/_includes/v25.1/app/java-version-note.md
new file mode 100644
index 00000000000..3d559314262
--- /dev/null
+++ b/src/current/_includes/v25.1/app/java-version-note.md
@@ -0,0 +1,3 @@
+{{site.data.alerts.callout_info}}
+We recommend using Java versions 8+ with CockroachDB.
+{{site.data.alerts.end}}
\ No newline at end of file
diff --git a/src/current/_includes/v25.1/app/jooq-basic-sample/Sample.java b/src/current/_includes/v25.1/app/jooq-basic-sample/Sample.java
new file mode 100644
index 00000000000..fd71726603e
--- /dev/null
+++ b/src/current/_includes/v25.1/app/jooq-basic-sample/Sample.java
@@ -0,0 +1,215 @@
+package com.cockroachlabs;
+
+import com.cockroachlabs.example.jooq.db.Tables;
+import com.cockroachlabs.example.jooq.db.tables.records.AccountsRecord;
+import org.jooq.DSLContext;
+import org.jooq.SQLDialect;
+import org.jooq.Source;
+import org.jooq.conf.RenderQuotedNames;
+import org.jooq.conf.Settings;
+import org.jooq.exception.DataAccessException;
+import org.jooq.impl.DSL;
+
+import java.io.InputStream;
+import java.sql.Connection;
+import java.sql.DriverManager;
+import java.sql.SQLException;
+import java.util.*;
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.concurrent.atomic.AtomicLong;
+import java.util.function.Function;
+
+import static com.cockroachlabs.example.jooq.db.Tables.ACCOUNTS;
+
+public class Sample {
+
+ private static final Random RAND = new Random();
+ private static final boolean FORCE_RETRY = false;
+ private static final String RETRY_SQL_STATE = "40001";
+ private static final int MAX_ATTEMPT_COUNT = 6;
+
+ private static Function addAccounts() {
+ return ctx -> {
+ long rv = 0;
+
+ ctx.delete(ACCOUNTS).execute();
+ ctx.batchInsert(
+ new AccountsRecord(1L, 1000L),
+ new AccountsRecord(2L, 250L),
+ new AccountsRecord(3L, 314159L)
+ ).execute();
+
+ rv = 1;
+ System.out.printf("APP: addAccounts() --> %d\n", rv);
+ return rv;
+ };
+ }
+
+ private static Function transferFunds(long fromId, long toId, long amount) {
+ return ctx -> {
+ long rv = 0;
+
+ AccountsRecord fromAccount = ctx.fetchSingle(ACCOUNTS, ACCOUNTS.ID.eq(fromId));
+ AccountsRecord toAccount = ctx.fetchSingle(ACCOUNTS, ACCOUNTS.ID.eq(toId));
+
+ if (!(amount > fromAccount.getBalance())) {
+ fromAccount.setBalance(fromAccount.getBalance() - amount);
+ toAccount.setBalance(toAccount.getBalance() + amount);
+
+ ctx.batchUpdate(fromAccount, toAccount).execute();
+ rv = amount;
+ System.out.printf("APP: transferFunds(%d, %d, %d) --> %d\n", fromId, toId, amount, rv);
+ }
+
+ return rv;
+ };
+ }
+
+ // Test our retry handling logic if FORCE_RETRY is true. This
+ // method is only used to test the retry logic. It is not
+ // intended for production code.
+ private static Function forceRetryLogic() {
+ return ctx -> {
+ long rv = -1;
+ try {
+ System.out.printf("APP: testRetryLogic: BEFORE EXCEPTION\n");
+ ctx.execute("SELECT crdb_internal.force_retry('1s')");
+ } catch (DataAccessException e) {
+ System.out.printf("APP: testRetryLogic: AFTER EXCEPTION\n");
+ throw e;
+ }
+ return rv;
+ };
+ }
+
+ private static Function getAccountBalance(long id) {
+ return ctx -> {
+ AccountsRecord account = ctx.fetchSingle(ACCOUNTS, ACCOUNTS.ID.eq(id));
+ long balance = account.getBalance();
+ System.out.printf("APP: getAccountBalance(%d) --> %d\n", id, balance);
+ return balance;
+ };
+ }
+
+ // Run SQL code in a way that automatically handles the
+ // transaction retry logic so we do not have to duplicate it in
+ // various places.
+ private static long runTransaction(DSLContext session, Function fn) {
+ AtomicLong rv = new AtomicLong(0L);
+ AtomicInteger attemptCount = new AtomicInteger(0);
+
+ while (attemptCount.get() < MAX_ATTEMPT_COUNT) {
+ attemptCount.incrementAndGet();
+
+ if (attemptCount.get() > 1) {
+ System.out.printf("APP: Entering retry loop again, iteration %d\n", attemptCount.get());
+ }
+
+ if (session.connectionResult(connection -> {
+ connection.setAutoCommit(false);
+ System.out.printf("APP: BEGIN;\n");
+
+ if (attemptCount.get() == MAX_ATTEMPT_COUNT) {
+ String err = String.format("hit max of %s attempts, aborting", MAX_ATTEMPT_COUNT);
+ throw new RuntimeException(err);
+ }
+
+ // This block is only used to test the retry logic.
+ // It is not necessary in production code. See also
+ // the method 'testRetryLogic()'.
+ if (FORCE_RETRY) {
+ session.fetch("SELECT now()");
+ }
+
+ try {
+ rv.set(fn.apply(session));
+ if (rv.get() != -1) {
+ connection.commit();
+ System.out.printf("APP: COMMIT;\n");
+ return true;
+ }
+ } catch (DataAccessException | SQLException e) {
+ String sqlState = e instanceof SQLException ? ((SQLException) e).getSQLState() : ((DataAccessException) e).sqlState();
+
+ if (RETRY_SQL_STATE.equals(sqlState)) {
+ // Since this is a transaction retry error, we
+ // roll back the transaction and sleep a little
+ // before trying again. Each time through the
+ // loop we sleep for a little longer than the last
+ // time (A.K.A. exponential backoff).
+ System.out.printf("APP: retryable exception occurred:\n sql state = [%s]\n message = [%s]\n retry counter = %s\n", sqlState, e.getMessage(), attemptCount.get());
+ System.out.printf("APP: ROLLBACK;\n");
+ connection.rollback();
+ int sleepMillis = (int)(Math.pow(2, attemptCount.get()) * 100) + RAND.nextInt(100);
+ System.out.printf("APP: Hit 40001 transaction retry error, sleeping %s milliseconds\n", sleepMillis);
+ try {
+ Thread.sleep(sleepMillis);
+ } catch (InterruptedException ignored) {
+ // no-op
+ }
+ rv.set(-1L);
+ } else {
+ throw e;
+ }
+ }
+
+ return false;
+ })) {
+ break;
+ }
+ }
+
+ return rv.get();
+ }
+
+ public static void main(String[] args) throws Exception {
+ try (Connection connection = DriverManager.getConnection(
+ "jdbc:postgresql://localhost:26257/bank?ssl=true&sslmode=require&sslrootcert=certs/ca.crt&sslkey=certs/client.maxroach.key.pk8&sslcert=certs/client.maxroach.crt",
+ "maxroach",
+ ""
+ )) {
+ DSLContext ctx = DSL.using(connection, SQLDialect.COCKROACHDB, new Settings()
+ .withExecuteLogging(true)
+ .withRenderQuotedNames(RenderQuotedNames.NEVER));
+
+ // Initialise database with db.sql script
+ try (InputStream in = Sample.class.getResourceAsStream("/db.sql")) {
+ ctx.parser().parse(Source.of(in).readString()).executeBatch();
+ }
+
+ long fromAccountId = 1;
+ long toAccountId = 2;
+ long transferAmount = 100;
+
+ if (FORCE_RETRY) {
+ System.out.printf("APP: About to test retry logic in 'runTransaction'\n");
+ runTransaction(ctx, forceRetryLogic());
+ } else {
+
+ runTransaction(ctx, addAccounts());
+ long fromBalance = runTransaction(ctx, getAccountBalance(fromAccountId));
+ long toBalance = runTransaction(ctx, getAccountBalance(toAccountId));
+ if (fromBalance != -1 && toBalance != -1) {
+ // Success!
+ System.out.printf("APP: getAccountBalance(%d) --> %d\n", fromAccountId, fromBalance);
+ System.out.printf("APP: getAccountBalance(%d) --> %d\n", toAccountId, toBalance);
+ }
+
+ // Transfer $100 from account 1 to account 2
+ long transferResult = runTransaction(ctx, transferFunds(fromAccountId, toAccountId, transferAmount));
+ if (transferResult != -1) {
+ // Success!
+ System.out.printf("APP: transferFunds(%d, %d, %d) --> %d \n", fromAccountId, toAccountId, transferAmount, transferResult);
+
+ long fromBalanceAfter = runTransaction(ctx, getAccountBalance(fromAccountId));
+ long toBalanceAfter = runTransaction(ctx, getAccountBalance(toAccountId));
+ if (fromBalanceAfter != -1 && toBalanceAfter != -1) {
+ // Success!
+ System.out.printf("APP: getAccountBalance(%d) --> %d\n", fromAccountId, fromBalanceAfter);
+ System.out.printf("APP: getAccountBalance(%d) --> %d\n", toAccountId, toBalanceAfter);
+ }
+ }
+ }
+ }
+ }
+}
diff --git a/src/current/_includes/v25.1/app/jooq-basic-sample/jooq-basic-sample.zip b/src/current/_includes/v25.1/app/jooq-basic-sample/jooq-basic-sample.zip
new file mode 100644
index 00000000000..859305478c0
Binary files /dev/null and b/src/current/_includes/v25.1/app/jooq-basic-sample/jooq-basic-sample.zip differ
diff --git a/src/current/_includes/v25.1/app/pkcs8-gen.md b/src/current/_includes/v25.1/app/pkcs8-gen.md
new file mode 100644
index 00000000000..3a750a5eea9
--- /dev/null
+++ b/src/current/_includes/v25.1/app/pkcs8-gen.md
@@ -0,0 +1,8 @@
+You can pass the [`--also-generate-pkcs8-key` flag]({% link {{ page.version.version }}/cockroach-cert.md %}#flag-pkcs8) to [`cockroach cert`]({% link {{ page.version.version }}/cockroach-cert.md %}) to generate a key in [PKCS#8 format](https://tools.ietf.org/html/rfc5208), which is the standard key encoding format in Java. For example, if you have the user `max`:
+
+{% include_cached copy-clipboard.html %}
+~~~ shell
+$ cockroach cert create-client max --certs-dir=certs --ca-key=my-safe-directory/ca.key --also-generate-pkcs8-key
+~~~
+
+The generated PKCS8 key will be named `client.max.key.pk8`.
diff --git a/src/current/_includes/v25.1/app/python/sqlalchemy/sqlalchemy-large-txns.py b/src/current/_includes/v25.1/app/python/sqlalchemy/sqlalchemy-large-txns.py
new file mode 100644
index 00000000000..7a6ef82c2e3
--- /dev/null
+++ b/src/current/_includes/v25.1/app/python/sqlalchemy/sqlalchemy-large-txns.py
@@ -0,0 +1,57 @@
+from sqlalchemy import create_engine, Column, Float, Integer
+from sqlalchemy.ext.declarative import declarative_base
+from sqlalchemy.orm import sessionmaker
+from cockroachdb.sqlalchemy import run_transaction
+from random import random
+
+Base = declarative_base()
+
+# The code below assumes you have run the following SQL statements.
+
+# CREATE DATABASE pointstore;
+
+# USE pointstore;
+
+# CREATE TABLE points (
+# id INT PRIMARY KEY DEFAULT unique_rowid(),
+# x FLOAT NOT NULL,
+# y FLOAT NOT NULL,
+# z FLOAT NOT NULL
+# );
+
+engine = create_engine(
+ # For cockroach demo:
+ 'cockroachdb://:@:/bank?sslmode=require',
+ echo=True # Log SQL queries to stdout
+)
+
+
+class Point(Base):
+ __tablename__ = 'points'
+ id = Column(Integer, primary_key=True)
+ x = Column(Float)
+ y = Column(Float)
+ z = Column(Float)
+
+
+def add_points(num_points):
+ chunk_size = 1000 # Tune this based on object sizes.
+
+ def add_points_helper(sess, chunk, num_points):
+ points = []
+ for i in range(chunk, min(chunk + chunk_size, num_points)):
+ points.append(
+ Point(x=random()*1024, y=random()*1024, z=random()*1024)
+ )
+ sess.bulk_save_objects(points)
+
+ for chunk in range(0, num_points, chunk_size):
+ run_transaction(
+ sessionmaker(bind=engine),
+ lambda s: add_points_helper(
+ s, chunk, min(chunk + chunk_size, num_points)
+ )
+ )
+
+
+add_points(10000)
diff --git a/src/current/_includes/v25.1/app/retry-errors.md b/src/current/_includes/v25.1/app/retry-errors.md
new file mode 100644
index 00000000000..7333d53f6bc
--- /dev/null
+++ b/src/current/_includes/v25.1/app/retry-errors.md
@@ -0,0 +1,3 @@
+{{site.data.alerts.callout_info}}
+When running under the default [`SERIALIZABLE`]({% link {{ page.version.version }}/demo-serializable.md %}) isolation level, your application should [use a retry loop to handle transaction retry errors]({% link {{ page.version.version }}/query-behavior-troubleshooting.md %}#transaction-retry-errors) that can occur under [contention]({{ link_prefix }}performance-best-practices-overview.html#understanding-and-avoiding-transaction-contention).
+{{site.data.alerts.end}}
diff --git a/src/current/_includes/v25.1/app/see-also-links.md b/src/current/_includes/v25.1/app/see-also-links.md
new file mode 100644
index 00000000000..805672736af
--- /dev/null
+++ b/src/current/_includes/v25.1/app/see-also-links.md
@@ -0,0 +1,9 @@
+You might also be interested in the following pages:
+
+- [Client Connection Parameters]({% link {{ page.version.version }}/connection-parameters.md %})
+- [Connection Pooling]({% link {{ page.version.version }}/connection-pooling.md %})
+- [Data Replication]({% link {{ page.version.version }}/demo-replication-and-rebalancing.md %})
+- [CockroachDB Resilience]({% link {{ page.version.version }}/demo-cockroachdb-resilience.md %})
+- [Replication & Rebalancing]({% link {{ page.version.version }}/demo-replication-and-rebalancing.md %})
+- [Cross-Cloud Migration]({% link {{ page.version.version }}/demo-automatic-cloud-migration.md %})
+- [Automated Operations]({% link {{ page.version.version }}/orchestrate-a-local-cluster-with-kubernetes-insecure.md %})
diff --git a/src/current/_includes/v25.1/app/start-cockroachdb.md b/src/current/_includes/v25.1/app/start-cockroachdb.md
new file mode 100644
index 00000000000..5aeab710338
--- /dev/null
+++ b/src/current/_includes/v25.1/app/start-cockroachdb.md
@@ -0,0 +1,58 @@
+Choose whether to run a temporary local cluster or a free CockroachDB cluster on CockroachDB {{ site.data.products.serverless }}. The instructions below will adjust accordingly.
+
+
+
+
+
+
+
+
+### Create a free cluster
+
+{% include cockroachcloud/quickstart/create-a-free-cluster.md %}
+
+### Set up your cluster connection
+
+The **Connection info** dialog shows information about how to connect to your cluster.
+
+1. Click the **Choose your OS** dropdown, and select the operating system of your local machine.
+
+1. Click the **Connection string** tab in the **Connection info** dialog.
+
+1. Open a new terminal on your local machine, and run the command provided in step **1** to download the CA certificate. This certificate is required by some clients connecting to CockroachDB {{ site.data.products.cloud }}.
+
+1. Copy the connection string provided in step **2** to a secure location.
+
+ {{site.data.alerts.callout_info}}
+ The connection string is pre-populated with your username, password, cluster name, and other details. Your password, in particular, will be provided *only once*. Save it in a secure place (Cockroach Labs recommends a password manager) to connect to your cluster in the future. If you forget your password, you can reset it by going to the **SQL Users** page for the cluster, found at `https://cockroachlabs.cloud/cluster//users`.
+ {{site.data.alerts.end}}
+
+
+
+
+
+1. If you haven't already, [download the CockroachDB binary]({% link {{ page.version.version }}/install-cockroachdb.md %}).
+1. Run the [`cockroach demo`]({% link {{ page.version.version }}/cockroach-demo.md %}) command:
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ cockroach demo \
+ --no-example-database
+ ~~~
+
+ This starts a temporary, in-memory cluster and opens an interactive SQL shell to the cluster. Any changes to the database will not persist after the cluster is stopped.
+
+ {{site.data.alerts.callout_info}}
+ If `cockroach demo` fails due to SSL authentication, make sure you have cleared any previously downloaded CA certificates from the directory `~/.postgresql`.
+ {{site.data.alerts.end}}
+
+1. Take note of the `(sql)` connection string in the SQL shell welcome text:
+
+ ~~~
+ # Connection parameters:
+ # (webui) http://127.0.0.1:8080/demologin?password=demo76950&username=demo
+ # (sql) postgres://demo:demo76950@127.0.0.1:26257?sslmode=require
+ # (sql/unix) postgres://demo:demo76950@?host=%2Fvar%2Ffolders%2Fc8%2Fb_q93vjj0ybfz0fz0z8vy9zc0000gp%2FT%2Fdemo070856957&port=26257
+ ~~~
+
+
diff --git a/src/current/_includes/v25.1/app/upperdb-basic-sample/main.go b/src/current/_includes/v25.1/app/upperdb-basic-sample/main.go
new file mode 100644
index 00000000000..3e838fe43e2
--- /dev/null
+++ b/src/current/_includes/v25.1/app/upperdb-basic-sample/main.go
@@ -0,0 +1,187 @@
+package main
+
+import (
+ "fmt"
+ "log"
+ "time"
+
+ "github.com/upper/db/v4"
+ "github.com/upper/db/v4/adapter/cockroachdb"
+)
+
+// The settings variable stores connection details.
+var settings = cockroachdb.ConnectionURL{
+ Host: "localhost",
+ Database: "bank",
+ User: "maxroach",
+ Options: map[string]string{
+ // Secure node.
+ "sslrootcert": "certs/ca.crt",
+ "sslkey": "certs/client.maxroach.key",
+ "sslcert": "certs/client.maxroach.crt",
+ },
+}
+
+// Accounts is a handy way to represent a collection.
+func Accounts(sess db.Session) db.Store {
+ return sess.Collection("accounts")
+}
+
+// Account is used to represent a single record in the "accounts" table.
+type Account struct {
+ ID uint64 `db:"id,omitempty"`
+ Balance int64 `db:"balance"`
+}
+
+// Collection is required in order to create a relation between the Account
+// struct and the "accounts" table.
+func (a *Account) Store(sess db.Session) db.Store {
+ return Accounts(sess)
+}
+
+// createTables creates all the tables that are neccessary to run this example.
+func createTables(sess db.Session) error {
+ _, err := sess.SQL().Exec(`
+ CREATE TABLE IF NOT EXISTS accounts (
+ ID SERIAL PRIMARY KEY,
+ balance INT
+ )
+ `)
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+// crdbForceRetry can be used to simulate a transaction error and
+// demonstrate upper/db's ability to retry the transaction automatically.
+//
+// By default, upper/db will retry the transaction five times, if you want
+// to modify this number use: sess.SetMaxTransactionRetries(n).
+//
+// This is only used for demonstration purposes and not intended
+// for production code.
+func crdbForceRetry(sess db.Session) error {
+ var err error
+
+ // The first statement in a transaction can be retried transparently on the
+ // server, so we need to add a placeholder statement so that our
+ // force_retry() statement isn't the first one.
+ _, err = sess.SQL().Exec(`SELECT 1`)
+ if err != nil {
+ return err
+ }
+
+ // If force_retry is called during the specified interval from the beginning
+ // of the transaction it returns a retryable error. If not, 0 is returned
+ // instead of an error.
+ _, err = sess.SQL().Exec(`SELECT crdb_internal.force_retry('1s'::INTERVAL)`)
+ if err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func main() {
+ // Connect to the local CockroachDB node.
+ sess, err := cockroachdb.Open(settings)
+ if err != nil {
+ log.Fatal("cockroachdb.Open: ", err)
+ }
+ defer sess.Close()
+
+ // Adjust this number to fit your specific needs (set to 5, by default)
+ // sess.SetMaxTransactionRetries(10)
+
+ // Create the "accounts" table
+ createTables(sess)
+
+ // Delete all the previous items in the "accounts" table.
+ err = Accounts(sess).Truncate()
+ if err != nil {
+ log.Fatal("Truncate: ", err)
+ }
+
+ // Create a new account with a balance of 1000.
+ account1 := Account{Balance: 1000}
+ err = Accounts(sess).InsertReturning(&account1)
+ if err != nil {
+ log.Fatal("sess.Save: ", err)
+ }
+
+ // Create a new account with a balance of 250.
+ account2 := Account{Balance: 250}
+ err = Accounts(sess).InsertReturning(&account2)
+ if err != nil {
+ log.Fatal("sess.Save: ", err)
+ }
+
+ // Printing records
+ printRecords(sess)
+
+ // Change the balance of the first account.
+ account1.Balance = 500
+ err = sess.Save(&account1)
+ if err != nil {
+ log.Fatal("sess.Save: ", err)
+ }
+
+ // Change the balance of the second account.
+ account2.Balance = 999
+ err = sess.Save(&account2)
+ if err != nil {
+ log.Fatal("sess.Save: ", err)
+ }
+
+ // Printing records
+ printRecords(sess)
+
+ // Delete the first record.
+ err = sess.Delete(&account1)
+ if err != nil {
+ log.Fatal("Delete: ", err)
+ }
+
+ startTime := time.Now()
+
+ // Add a couple of new records within a transaction.
+ err = sess.Tx(func(tx db.Session) error {
+ var err error
+
+ if err = tx.Save(&Account{Balance: 887}); err != nil {
+ return err
+ }
+
+ if time.Now().Sub(startTime) < time.Second*1 {
+ // Will fail continuously for 2 seconds.
+ if err = crdbForceRetry(tx); err != nil {
+ return err
+ }
+ }
+
+ if err = tx.Save(&Account{Balance: 342}); err != nil {
+ return err
+ }
+
+ return nil
+ })
+ if err != nil {
+ log.Fatal("Could not commit transaction: ", err)
+ }
+
+ // Printing records
+ printRecords(sess)
+}
+
+func printRecords(sess db.Session) {
+ accounts := []Account{}
+ err := Accounts(sess).Find().All(&accounts)
+ if err != nil {
+ log.Fatal("Find: ", err)
+ }
+ log.Printf("Balances:")
+ for i := range accounts {
+ fmt.Printf("\taccounts[%d]: %d\n", accounts[i].ID, accounts[i].Balance)
+ }
+}
diff --git a/src/current/_includes/v25.1/backups/advanced-examples-list.md b/src/current/_includes/v25.1/backups/advanced-examples-list.md
new file mode 100644
index 00000000000..2d6c9a5956d
--- /dev/null
+++ b/src/current/_includes/v25.1/backups/advanced-examples-list.md
@@ -0,0 +1,11 @@
+For examples of advanced `BACKUP` and `RESTORE` use cases, see:
+
+- [Incremental backups with a specified destination]({% link {{ page.version.version }}/take-full-and-incremental-backups.md %}#incremental-backups-with-explicitly-specified-destinations)
+- [Backup with revision history and point-in-time restore]({% link {{ page.version.version }}/take-backups-with-revision-history-and-restore-from-a-point-in-time.md %})
+- [Locality-aware backup and restore]({% link {{ page.version.version }}/take-and-restore-locality-aware-backups.md %})
+- [Encrypted backup and restore]({% link {{ page.version.version }}/take-and-restore-encrypted-backups.md %})
+- [Restore into a different database]({% link {{ page.version.version }}/restore.md %}#restore-tables-into-a-different-database)
+- [Remove the foreign key before restore]({% link {{ page.version.version }}/restore.md %}#remove-the-foreign-key-before-restore)
+- [Restoring users from `system.users` backup]({% link {{ page.version.version }}/restore.md %}#restoring-users-from-system-users-backup)
+- [Show an incremental backup at a different location]({% link {{ page.version.version }}/show-backup.md %}#show-a-backup-taken-with-the-incremental-location-option)
+- [Exclude a table's data from backups]({% link {{ page.version.version }}/take-full-and-incremental-backups.md %}#exclude-a-tables-data-from-backups)
diff --git a/src/current/_includes/v25.1/backups/aws-auth-note.md b/src/current/_includes/v25.1/backups/aws-auth-note.md
new file mode 100644
index 00000000000..b32ddde5c69
--- /dev/null
+++ b/src/current/_includes/v25.1/backups/aws-auth-note.md
@@ -0,0 +1,3 @@
+{{site.data.alerts.callout_info}}
+The examples in this section use the **default** `AUTH=specified` parameter. For more detail on how to use `implicit` authentication with Amazon S3 buckets, read [Use Cloud Storage for Bulk Operations — Authentication]({% link {{ page.version.version }}/cloud-storage-authentication.md %}).
+{{site.data.alerts.end}}
diff --git a/src/current/_includes/v25.1/backups/azure-storage-tier-support.md b/src/current/_includes/v25.1/backups/azure-storage-tier-support.md
new file mode 100644
index 00000000000..993b39ed3e7
--- /dev/null
+++ b/src/current/_includes/v25.1/backups/azure-storage-tier-support.md
@@ -0,0 +1 @@
+Cockroach Labs supports Azure's General Purpose v2 Standard storage account type. For more details, refer to the Azure [Storage Account documentation](https://learn.microsoft.com/azure/storage/blobs/storage-blobs-introduction#storage-accounts).
\ No newline at end of file
diff --git a/src/current/_includes/v25.1/backups/azure-url-encode.md b/src/current/_includes/v25.1/backups/azure-url-encode.md
new file mode 100644
index 00000000000..7a16e20d2df
--- /dev/null
+++ b/src/current/_includes/v25.1/backups/azure-url-encode.md
@@ -0,0 +1,3 @@
+{{site.data.alerts.callout_info}}
+Azure storage containers **require** a [url encoded](https://wikipedia.org/wiki/Percent-encoding) `ACCOUNT_KEY` since it is base64-encoded and may contain +, /, = characters. For more detail on how to pass your Azure Storage credentials with this parameter, read [Use Cloud Storage for Bulk Operations — Authentication]({% link {{ page.version.version }}/cloud-storage-authentication.md %}).
+{{site.data.alerts.end}}
\ No newline at end of file
diff --git a/src/current/_includes/v25.1/backups/backup-options-for-schedules.md b/src/current/_includes/v25.1/backups/backup-options-for-schedules.md
new file mode 100644
index 00000000000..391c4dcf968
--- /dev/null
+++ b/src/current/_includes/v25.1/backups/backup-options-for-schedules.md
@@ -0,0 +1,8 @@
+ Option | Value | Description
+-----------------------------------------------------------------+-------------------------+------------------------------
+`revision_history` | [`BOOL`]({% link {{ page.version.version }}/bool.md %}) / None | Create a backup with full [revision history]({% link {{ page.version.version }}/take-backups-with-revision-history-and-restore-from-a-point-in-time.md %}), which records every change made to the cluster within the garbage collection period leading up to and including the given timestamp.
You can specify a backup with revision history without any value e.g., `WITH revision_history`. Or, you can explicitly define `WITH revision_history = 'true' / 'false'`. The `revision_history` option defaults to `true` when used with [`BACKUP`]({% link {{ page.version.version }}/backup.md %}) or `CREATE SCHEDULE FOR BACKUP`. A value is **required** when using `ALTER BACKUP SCHEDULE` to {% if page.name == "alter-backup-schedule.md" %} [apply different options to scheduled backups](#apply-different-options-to-scheduled-backups). {% else %} [alter a backup schedule](alter-backup-schedule.html). {% endif %}
+`encryption_passphrase` | [`STRING`]({% link {{ page.version.version }}/string.md %}) | The passphrase used to [encrypt the files]({% link {{ page.version.version }}/take-and-restore-encrypted-backups.md %}) (`BACKUP` manifest and data files) that the `BACKUP` statement generates. This same passphrase is needed to decrypt the file when it is used to [restore]({% link {{ page.version.version }}/take-and-restore-encrypted-backups.md %}) and to list the contents of the backup when using [`SHOW BACKUP`]({% link {{ page.version.version }}/show-backup.md %}). There is no practical limit on the length of the passphrase.
+`detached` | [`BOOL`]({% link {{ page.version.version }}/bool.md %}) / None | **Note:** Backups running on a schedule have the `detached` option applied implicitly. Therefore, you cannot modify this option for scheduled backups.
When a backup runs in `detached` mode, it will execute asynchronously. The job ID will be returned after the backup [job creation]({% link {{ page.version.version }}/backup-architecture.md %}#job-creation-phase) completes. Note that with `detached` specified, further job information and the job completion status will not be returned. For more on the differences between the returned job data, see the [example]({% link {{ page.version.version }}/backup.md %}#run-a-backup-asynchronously). To check on the job status, use the [`SHOW JOBS`](show-jobs.html) statement.
+`EXECUTION LOCALITY` | Key-value pairs | Restricts the execution of the backup to nodes that match the defined locality filter requirements. For example, `WITH EXECUTION LOCALITY = 'region=us-west-1a,cloud=aws'`.
Refer to [Take Locality-restricted backups]({% link {{ page.version.version }}/take-locality-restricted-backups.md %}) for usage and reference detail.
+`kms` | [`STRING`]({% link {{ page.version.version }}/string.md %}) | The URI of the cryptographic key stored in a key management service (KMS), or a comma-separated list of key URIs, used to [take and restore encrypted backups]({% link {{ page.version.version }}/take-and-restore-encrypted-backups.md %}#examples). Refer to [URI Formats]({% link {{ page.version.version }}/take-and-restore-encrypted-backups.md %}#uri-formats). The key or keys are used to encrypt the manifest and data files that the `BACKUP` statement generates and to decrypt them during a [restore]({% link {{ page.version.version }}/take-and-restore-encrypted-backups.md %}#examples) operation, and to list the contents of the backup when using [`SHOW BACKUP`]({% link {{ page.version.version }}/show-backup.md %}).
AWS KMS, Google Cloud KMS, and Azure Key Vault are supported.
+`incremental_location` | [`STRING`]({% link {{ page.version.version }}/string.md %}) | Create an incremental backup in a different location than the default incremental backup location.
See [Incremental backups with explicitly specified destinations]({% link {{ page.version.version }}/take-full-and-incremental-backups.md %}#incremental-backups-with-explicitly-specified-destinations) for usage.
\ No newline at end of file
diff --git a/src/current/_includes/v25.1/backups/backup-options.md b/src/current/_includes/v25.1/backups/backup-options.md
new file mode 100644
index 00000000000..a4c3eeae595
--- /dev/null
+++ b/src/current/_includes/v25.1/backups/backup-options.md
@@ -0,0 +1,8 @@
+ Option | Value | Description
+-----------------------------------------------------------------+-------------------------+------------------------------
+`revision_history` | [`BOOL`]({% link {{ page.version.version }}/bool.md %}) / None | Create a backup with full [revision history]({% link {{ page.version.version }}/take-backups-with-revision-history-and-restore-from-a-point-in-time.md %}), which records every change made to the cluster within the garbage collection period leading up to and including the given timestamp.
You can specify a backup with revision history without any value e.g., `WITH revision_history`. Or, you can explicitly define `WITH revision_history = 'true' / 'false'`. `revision_history` defaults to `true` when used with `BACKUP` or `CREATE SCHEDULE FOR BACKUP`. A value is **required** when using [`ALTER BACKUP SCHEDULE`]({% link {{ page.version.version }}/alter-backup-schedule.md %}).
+`encryption_passphrase` | [`STRING`]({% link {{ page.version.version }}/string.md %}) | The passphrase used to [encrypt the files]({% link {{ page.version.version }}/take-and-restore-encrypted-backups.md %}) (`BACKUP` manifest and data files) that the `BACKUP` statement generates. This same passphrase is needed to decrypt the file when it is used to [restore]({% link {{ page.version.version }}/take-and-restore-encrypted-backups.md %}) and to list the contents of the backup when using [`SHOW BACKUP`]({% link {{ page.version.version }}/show-backup.md %}). There is no practical limit on the length of the passphrase.
+`detached` | [`BOOL`]({% link {{ page.version.version }}/bool.md %}) / None | When a backup runs in `detached` mode, it will execute asynchronously. The job ID will be returned after the backup [job creation]({% link {{ page.version.version }}/backup-architecture.md %}#job-creation-phase) completes. Note that with `detached` specified, further job information and the job completion status will not be returned. For more on the differences between the returned job data, see the [example]({% link {{ page.version.version }}/backup.md %}#run-a-backup-asynchronously). To check on the job status, use the [`SHOW JOBS`](show-jobs.html) statement. Backups running on a [schedule](create-schedule-for-backup.html) have the `detached` option applied implicitly.
To run a backup within a [transaction](transactions.html), use the `detached` option.
+`EXECUTION LOCALITY` | Key-value pairs | Restricts the execution of the backup to nodes that match the defined locality filter requirements. For example, `WITH EXECUTION LOCALITY = 'region=us-west-1a,cloud=aws'`.
Refer to [Take Locality-restricted backups]({% link {{ page.version.version }}/take-locality-restricted-backups.md %}) for usage and reference detail.
+`kms` | [`STRING`]({% link {{ page.version.version }}/string.md %}) | The URI of the cryptographic key stored in a key management service (KMS), or a comma-separated list of key URIs, used to [take and restore encrypted backups]({% link {{ page.version.version }}/take-and-restore-encrypted-backups.md %}#examples). Refer to [URI Formats]({% link {{ page.version.version }}/take-and-restore-encrypted-backups.md %}#uri-formats). The key or keys are used to encrypt the manifest and data files that the `BACKUP` statement generates and to decrypt them during a [restore]({% link {{ page.version.version }}/take-and-restore-encrypted-backups.md %}#examples) operation, and to list the contents of the backup when using [`SHOW BACKUP`]({% link {{ page.version.version }}/show-backup.md %}).
AWS KMS, Google Cloud KMS, and Azure Key Vault are supported.
+`incremental_location` | [`STRING`]({% link {{ page.version.version }}/string.md %}) | Create an incremental backup in a different location than the default incremental backup location.
See [Incremental backups with explicitly specified destinations]({% link {{ page.version.version }}/take-full-and-incremental-backups.md %}#incremental-backups-with-explicitly-specified-destinations) for usage.
\ No newline at end of file
diff --git a/src/current/_includes/v25.1/backups/backup-storage-collision.md b/src/current/_includes/v25.1/backups/backup-storage-collision.md
new file mode 100644
index 00000000000..c52cc1524e5
--- /dev/null
+++ b/src/current/_includes/v25.1/backups/backup-storage-collision.md
@@ -0,0 +1 @@
+You will encounter an error if you run multiple [backup collections]({% link {{ page.version.version }}/take-full-and-incremental-backups.md %}#backup-collections) to the same storage URI. Each collection's URI must be unique.
\ No newline at end of file
diff --git a/src/current/_includes/v25.1/backups/bulk-auth-options.md b/src/current/_includes/v25.1/backups/bulk-auth-options.md
new file mode 100644
index 00000000000..57bc67ea190
--- /dev/null
+++ b/src/current/_includes/v25.1/backups/bulk-auth-options.md
@@ -0,0 +1,6 @@
+The examples in this section use one of the following storage URIs:
+
+- External connections, which allow you to represent an external storage or sink URI. You can then specify the external connection's name in statements rather than the provider-specific URI. For detail on using external connections, see the [`CREATE EXTERNAL CONNECTION`]({% link {{ page.version.version }}/create-external-connection.md %}) page.
+- Amazon S3 connection strings with the **default** `AUTH=specified` parameter. For guidance on using `AUTH=implicit` authentication with Amazon S3 buckets instead, read [Cloud Storage Authentication]({% link {{ page.version.version }}/cloud-storage-authentication.md %}).
+
+For guidance on connecting to other storage options or using other authentication parameters instead, read [Use Cloud Storage]({% link {{ page.version.version }}/use-cloud-storage.md %}#example-file-urls).
\ No newline at end of file
diff --git a/src/current/_includes/v25.1/backups/cap-parameter-ext-connection.md b/src/current/_includes/v25.1/backups/cap-parameter-ext-connection.md
new file mode 100644
index 00000000000..2628b8527a1
--- /dev/null
+++ b/src/current/_includes/v25.1/backups/cap-parameter-ext-connection.md
@@ -0,0 +1 @@
+If you are creating an {% if page.name == "create-external-connection.md" %}external connection{% else %}[external connection]({% link {{ page.version.version }}/create-external-connection.md %}){% endif %} with [`BACKUP` query parameters]({% link {{ page.version.version }}/backup.md %}#query-parameters) or [authentication]({% link {{ page.version.version }}/cloud-storage-authentication.md %}) parameters, you must pass them in uppercase otherwise you will receive an `unknown query parameters` error.
\ No newline at end of file
diff --git a/src/current/_includes/v25.1/backups/check-files-validate.md b/src/current/_includes/v25.1/backups/check-files-validate.md
new file mode 100644
index 00000000000..b54cf5ce9a6
--- /dev/null
+++ b/src/current/_includes/v25.1/backups/check-files-validate.md
@@ -0,0 +1,32 @@
+1. Use `SHOW BACKUP ... check_files` with a backup for validation:
+
+ {% include_cached copy-clipboard.html %}
+ ~~~sql
+ SHOW BACKUP "2022/09/19-134123.64" IN "s3://bucket?AWS_ACCESS_KEY_ID={Access Key ID}&AWS_SECRET_ACCESS_KEY={Secret Access Key}" WITH check_files;
+ ~~~
+
+ This will return the following output after validating that the backup files are correct and present:
+
+ ~~~
+ database_name | parent_schema_name | object_name | object_type | backup_type | start_time | end_time | size_bytes | rows | is_full_cluster | file_bytes
+ ----------------+--------------------+----------------------------+-------------+-------------+------------+----------------------------+------------+-------+-----------------+-------------
+ NULL | NULL | movr | database | full | NULL | 2022-09-19 13:41:23.645189 | NULL | NULL | f | NULL
+ movr | NULL | public | schema | full | NULL | 2022-09-19 13:41:23.645189 | NULL | NULL | f | NULL
+ movr | public | users | table | full | NULL | 2022-09-19 13:41:23.645189 | 31155 | 340 | f | 16598
+ movr | public | vehicles | table | full | NULL | 2022-09-19 13:41:23.645189 | 22282 | 113 | f | 12459
+ movr | public | rides | table | full | NULL | 2022-09-19 13:41:23.645189 | 261950 | 902 | f | 135831
+ movr | public | vehicle_location_histories | table | full | NULL | 2022-09-19 13:41:23.645189 | 742557 | 10850 | f | 318583
+ movr | public | promo_codes | table | full | NULL | 2022-09-19 13:41:23.645189 | 228320 | 1034 | f | 118376
+ movr | public | user_promo_codes | table | full | NULL | 2022-09-19 13:41:23.645189 | 9320 | 111 | f | 4832
+ ~~~
+
+ The output will return `file_bytes` along with the columns you receive from `SHOW BACKUP` without `check_files`. The `file_bytes` column indicates the estimated bytes in external storage for a particular table object. For more detail on the output columns, see the `SHOW BACKUP` [Response]({% link {{ page.version.version }}/show-backup.md %}#response) table.
+
+1. If `SHOW BACKUP ... check_files` cannot read from a file, it will return an error message similar to the following:
+
+ ~~~
+ ERROR: The following files are missing from the backup:
+ s3:/bucket-name/2022/09/19-134123.64/data/797981063156727810.sst
+ ~~~
+
+ `SHOW BACKUP ... check_files` will return up to ten file paths for incorrect or missing files.
\ No newline at end of file
diff --git a/src/current/_includes/v25.1/backups/control-schedule-privileges.md b/src/current/_includes/v25.1/backups/control-schedule-privileges.md
new file mode 100644
index 00000000000..13f0012ea7f
--- /dev/null
+++ b/src/current/_includes/v25.1/backups/control-schedule-privileges.md
@@ -0,0 +1,3 @@
+- Members of the [`admin` role]({% link {{ page.version.version }}/security-reference/authorization.md %}#default-roles). By default, the `root` user belongs to the `admin` role.
+- Owners of a backup schedule, i.e., the user that [created the backup schedule]({% link {{ page.version.version }}/create-schedule-for-backup.md %}).
+- Owners of a changefeed schedule, i.e., the user that [created the changefeed schedule]({% link {{ page.version.version }}/create-schedule-for-changefeed.md %}).
\ No newline at end of file
diff --git a/src/current/_includes/v25.1/backups/destination-privileges.md b/src/current/_includes/v25.1/backups/destination-privileges.md
new file mode 100644
index 00000000000..fd5d019e97d
--- /dev/null
+++ b/src/current/_includes/v25.1/backups/destination-privileges.md
@@ -0,0 +1,14 @@
+You can grant a user the `EXTERNALIOIMPLICITACCESS` [system-level privilege]({% link {{ page.version.version }}/security-reference/authorization.md %}#supported-privileges).
+
+Either the `EXTERNALIOIMPLICITACCESS` system-level privilege or the [`admin`]({% link {{ page.version.version }}/security-reference/authorization.md %}#admin-role) role is required for the following scenarios:
+
+- Interacting with a cloud storage resource using [`IMPLICIT` authentication]({% link {{ page.version.version }}/cloud-storage-authentication.md %}).
+- Using a [custom endpoint](https://docs.aws.amazon.com/sdk-for-go/api/aws/endpoints/) on S3.
+- Using the [`cockroach nodelocal upload`]({% link {{ page.version.version }}/cockroach-nodelocal-upload.md %}) command.
+
+No special privilege is required for:
+
+- Interacting with an Amazon S3 and Google Cloud Storage resource using `SPECIFIED` credentials. Azure Storage is always `SPECIFIED` by default.
+- Using [Userfile]({% link {{ page.version.version }}/use-userfile-storage.md %}) storage.
+
+{% include {{ page.version.version }}/misc/bulk-permission-note.md %}
\ No newline at end of file
diff --git a/src/current/_includes/v25.1/backups/encrypted-backup-description.md b/src/current/_includes/v25.1/backups/encrypted-backup-description.md
new file mode 100644
index 00000000000..a81f545aaf6
--- /dev/null
+++ b/src/current/_includes/v25.1/backups/encrypted-backup-description.md
@@ -0,0 +1,11 @@
+You can encrypt full or incremental backups with a passphrase by using the [`encryption_passphrase` option]({% link {{ page.version.version }}/backup.md %}#with-encryption-passphrase). Files written by the backup (including `BACKUP` manifests and data files) are encrypted using the specified passphrase to derive a key. To restore the encrypted backup, the same `encryption_passphrase` option (with the same passphrase) must be included in the [`RESTORE`]({% link {{ page.version.version }}/restore.md %}) statement.
+
+When used with [incremental backups]({% link {{ page.version.version }}/take-full-and-incremental-backups.md %}#incremental-backups), the `encryption_passphrase` option is applied to all the [backup file URLs]({% link {{ page.version.version }}/backup.md %}#backup-file-urls), which means the same passphrase must be used when appending another incremental backup to an existing backup. Similarly, when used with [locality-aware backups]({% link {{ page.version.version }}/take-and-restore-locality-aware-backups.md %}), the passphrase provided is applied to files in all localities.
+
+Encryption is done using [AES-256-GCM](https://wikipedia.org/wiki/Galois/Counter_Mode), and GCM is used to both encrypt and authenticate the files. A random [salt](https://wikipedia.org/wiki/Salt_(cryptography)) is used to derive a once-per-backup [AES](https://wikipedia.org/wiki/Advanced_Encryption_Standard) key from the specified passphrase, and then a random [initialization vector](https://wikipedia.org/wiki/Initialization_vector) is used per-file. CockroachDB uses [PBKDF2](https://wikipedia.org/wiki/PBKDF2) with 64,000 iterations for the key derivation.
+
+{{site.data.alerts.callout_info}}
+`BACKUP` and `RESTORE` will use more memory when using encryption, as both the plain-text and cipher-text of a given file are held in memory during encryption and decryption.
+{{site.data.alerts.end}}
+
+For an example of an encrypted backup, see [Create an encrypted backup]({% link {{ page.version.version }}/take-and-restore-encrypted-backups.md %}#take-an-encrypted-backup-using-a-passphrase).
diff --git a/src/current/_includes/v25.1/backups/existing-operation-service-account.md b/src/current/_includes/v25.1/backups/existing-operation-service-account.md
new file mode 100644
index 00000000000..dc0f0996320
--- /dev/null
+++ b/src/current/_includes/v25.1/backups/existing-operation-service-account.md
@@ -0,0 +1,3 @@
+{{site.data.alerts.callout_info}}
+If you already have the service account that contains permissions for the operation, ensure that you give the identity service account access to this service account. Click on your service account and navigate to the **Permissions** tab. Then, use the process in [step 3](#step-3-give-the-identity-service-account-the-token-creator-role) to complete this.
+{{site.data.alerts.end}}
\ No newline at end of file
diff --git a/src/current/_includes/v25.1/backups/external-io-implicit-flag.md b/src/current/_includes/v25.1/backups/external-io-implicit-flag.md
new file mode 100644
index 00000000000..4abb21f78f6
--- /dev/null
+++ b/src/current/_includes/v25.1/backups/external-io-implicit-flag.md
@@ -0,0 +1,3 @@
+{{site.data.alerts.callout_info}}
+If the use of implicit credentials is disabled with the [`--external-io-disable-implicit-credentials` flag]({% link {{ page.version.version }}/cockroach-start.md %}#security), you will receive an error when you access external cloud storage services with `AUTH=implicit`.
+{{site.data.alerts.end}}
\ No newline at end of file
diff --git a/src/current/_includes/v25.1/backups/file-size-setting.md b/src/current/_includes/v25.1/backups/file-size-setting.md
new file mode 100644
index 00000000000..2ddef08efac
--- /dev/null
+++ b/src/current/_includes/v25.1/backups/file-size-setting.md
@@ -0,0 +1,5 @@
+{{site.data.alerts.callout_info}}
+To set a target for the amount of backup data written to each backup file, use the `bulkio.backup.file_size` [cluster setting]({% link {{ page.version.version }}/cluster-settings.md %}).
+
+See the [`SET CLUSTER SETTING`]({% link {{ page.version.version }}/set-cluster-setting.md %}) page for more details on using cluster settings.
+{{site.data.alerts.end}}
diff --git a/src/current/_includes/v25.1/backups/full-cluster-restore-validation.md b/src/current/_includes/v25.1/backups/full-cluster-restore-validation.md
new file mode 100644
index 00000000000..c4196e6e198
--- /dev/null
+++ b/src/current/_includes/v25.1/backups/full-cluster-restore-validation.md
@@ -0,0 +1,3 @@
+{{site.data.alerts.callout_info}}
+Validation of full-cluster restores with `schema_only` must be run on an empty cluster in the same way as a complete [full-cluster restore]({% link {{ page.version.version }}/restore.md %}#full-cluster). Once you have successfully validated the restore, you can destroy the test cluster.
+{{site.data.alerts.end}}
\ No newline at end of file
diff --git a/src/current/_includes/v25.1/backups/gcs-auth-note.md b/src/current/_includes/v25.1/backups/gcs-auth-note.md
new file mode 100644
index 00000000000..4c52b8625b7
--- /dev/null
+++ b/src/current/_includes/v25.1/backups/gcs-auth-note.md
@@ -0,0 +1,3 @@
+{{site.data.alerts.callout_info}}
+The examples in this section use the `AUTH=specified` parameter, which will be the default behavior in v21.2 and beyond for connecting to Google Cloud Storage. For more detail on how to pass your Google Cloud Storage credentials with this parameter, or, how to use `implicit` authentication, read [Use Cloud Storage for Bulk Operations — Authentication]({% link {{ page.version.version }}/cloud-storage-authentication.md %}).
+{{site.data.alerts.end}}
diff --git a/src/current/_includes/v25.1/backups/gcs-default-deprec.md b/src/current/_includes/v25.1/backups/gcs-default-deprec.md
new file mode 100644
index 00000000000..008ad61f4f9
--- /dev/null
+++ b/src/current/_includes/v25.1/backups/gcs-default-deprec.md
@@ -0,0 +1,3 @@
+{{site.data.alerts.callout_info}}
+**Deprecation notice:** Currently, GCS connections default to the `cloudstorage.gs.default.key` [cluster setting]({% link {{ page.version.version }}/cluster-settings.md %}). This default behavior will no longer be supported in v21.2. If you are relying on this default behavior, we recommend adjusting your queries and scripts to now specify the `AUTH` parameter you want to use. Similarly, if you are using the `cloudstorage.gs.default.key` cluster setting to authorize your GCS connection, we recommend switching to use `AUTH=specified` or `AUTH=implicit`. `AUTH=specified` will be the default behavior in v21.2 and beyond.
+{{site.data.alerts.end}}
diff --git a/src/current/_includes/v25.1/backups/locality-aware-access.md b/src/current/_includes/v25.1/backups/locality-aware-access.md
new file mode 100644
index 00000000000..d0a57842341
--- /dev/null
+++ b/src/current/_includes/v25.1/backups/locality-aware-access.md
@@ -0,0 +1 @@
+A successful locality-aware backup job requires that each node in the cluster has access to each storage location. This is because any node in the cluster can claim the job and become the [_coordinator_]({% link {{ page.version.version }}/backup-architecture.md %}#job-creation-phase) node.
\ No newline at end of file
diff --git a/src/current/_includes/v25.1/backups/locality-aware-backups.md b/src/current/_includes/v25.1/backups/locality-aware-backups.md
new file mode 100644
index 00000000000..8ce87c53654
--- /dev/null
+++ b/src/current/_includes/v25.1/backups/locality-aware-backups.md
@@ -0,0 +1,39 @@
+{{site.data.alerts.callout_info}}
+`SHOW BACKUP` is able to display metadata using `check_files` for locality-aware backups taken with the [`incremental_location`]({% link {{ page.version.version }}/show-backup.md %}#show-a-backup-taken-with-the-incremental-location-option) option.
+{{site.data.alerts.end}}
+
+To view a list of [locality-aware backups]({% link {{ page.version.version }}/take-and-restore-locality-aware-backups.md %}), pass the endpoint [collection URI]({% link {{ page.version.version }}/backup.md %}#backup-file-urls) that is set as the `default` location with `COCKROACH_LOCALITY=default`:
+
+{% include_cached copy-clipboard.html %}
+~~~ sql
+> SHOW BACKUPS IN 's3://{default collection URI}/{path}?AWS_ACCESS_KEY_ID={placeholder}&AWS_SECRET_ACCESS_KEY={placeholder}';
+~~~
+
+~~~
+ path
+-------------------------
+/2023/02/23-150925.62
+/2023/03/08-192859.44
+(2 rows)
+~~~
+
+To view a [locality-aware backup]({% link {{ page.version.version }}/take-and-restore-locality-aware-backups.md %}), pass locality-aware backup URIs to `SHOW BACKUP`:
+
+{% include_cached copy-clipboard.html %}
+~~~ sql
+> SHOW BACKUP FROM LATEST IN ('s3://{bucket name}/locality?AWS_ACCESS_KEY_ID={placeholder}&AWS_SECRET_ACCESS_KEY={placeholder}&COCKROACH_LOCALITY=default', 's3://{bucket name}/locality?AWS_ACCESS_KEY_ID={placeholder}&AWS_SECRET_ACCESS_KEY={placeholder}&COCKROACH_LOCALITY=region%3Dus-west');
+~~~
+
+~~~
+ database_name | parent_schema_name | object_name | object_type | backup_type | start_time | end_time | size_bytes | rows | is_full_cluster
+----------------+--------------------+----------------------------+-------------+-------------+------------+----------------------------+------------+------+------------------
+ NULL | NULL | movr | database | full | NULL | 2023-02-23 15:09:25.625777 | NULL | NULL | f
+ movr | NULL | public | schema | full | NULL | 2023-02-23 15:09:25.625777 | NULL | NULL | f
+ movr | public | users | table | full | NULL | 2023-02-23 15:09:25.625777 | 5633 | 58 | f
+ movr | public | vehicles | table | full | NULL | 2023-02-23 15:09:25.625777 | 3617 | 17 | f
+ movr | public | rides | table | full | NULL | 2023-02-23 15:09:25.625777 | 159269 | 511 | f
+ movr | public | vehicle_location_histories | table | full | NULL | 2023-02-23 15:09:25.625777 | 79963 | 1092 | f
+ movr | public | promo_codes | table | full | NULL | 2023-02-23 15:09:25.625777 | 221763 | 1003 | f
+ movr | public | user_promo_codes | table | full | NULL | 2023-02-23 15:09:25.625777 | 927 | 11 | f
+(8 rows)
+~~~
diff --git a/src/current/_includes/v25.1/backups/locality-aware-multi-tenant.md b/src/current/_includes/v25.1/backups/locality-aware-multi-tenant.md
new file mode 100644
index 00000000000..896d29db2d6
--- /dev/null
+++ b/src/current/_includes/v25.1/backups/locality-aware-multi-tenant.md
@@ -0,0 +1 @@
+Both CockroachDB {{ site.data.products.standard }} and CockroachDB {{ site.data.products.basic }} clusters operate with a different architecture compared to CockroachDB {{ site.data.products.core }}. These architectural differences have implications for how locality-aware backups can run. {{ site.data.products.standard }} and {{ site.data.products.basic }} clusters will scale resources depending on whether they are actively in use. This makes it less likely to have a SQL pod available in every locality. As a result, your Serverless cluster may not have a SQL pod in the locality where the data resides, which can lead to the cluster uploading that data to a storage bucket in a locality where you do have active SQL pods. You should consider this as you plan a backup strategy that must comply with [data domiciling]({% link v23.2/data-domiciling.md %}) requirements.
diff --git a/src/current/_includes/v25.1/backups/metrics-per-node.md b/src/current/_includes/v25.1/backups/metrics-per-node.md
new file mode 100644
index 00000000000..a1ec6e0b350
--- /dev/null
+++ b/src/current/_includes/v25.1/backups/metrics-per-node.md
@@ -0,0 +1,3 @@
+{{site.data.alerts.callout_info}}
+Metrics are reported per node. Therefore, it is necessary to retrieve metrics from every node in the cluster. For example, if you are monitoring whether a backup fails, it is necessary to track `scheduled_backup_failed` on each node.
+{{site.data.alerts.end}}
\ No newline at end of file
diff --git a/src/current/_includes/v25.1/backups/no-incremental-restore.md b/src/current/_includes/v25.1/backups/no-incremental-restore.md
new file mode 100644
index 00000000000..6415ec7e034
--- /dev/null
+++ b/src/current/_includes/v25.1/backups/no-incremental-restore.md
@@ -0,0 +1 @@
+When you restore from an incremental backup, you're restoring the **entire** table, database, or cluster. CockroachDB uses both the latest (or a [specific]({% link {{ page.version.version }}/restore.md %}#restore-a-specific-full-or-incremental-backup)) incremental backup and the full backup during this process. You cannot restore an incremental backup without a full backup. Furthermore, it is not possible to restore over a [table]({% link {{ page.version.version }}/restore.md %}#tables), [database]({% link {{ page.version.version }}/restore.md %}#databases), or [cluster](restore.html#full-cluster) with existing data. Refer to [Restore types](restore.html#restore-types) for detail on the types of backups you can restore.
diff --git a/src/current/_includes/v25.1/backups/object-dependency.md b/src/current/_includes/v25.1/backups/object-dependency.md
new file mode 100644
index 00000000000..07bcbc698f3
--- /dev/null
+++ b/src/current/_includes/v25.1/backups/object-dependency.md
@@ -0,0 +1,13 @@
+Dependent objects must be {% if page.name == "restore.md" %} restored {% else %} backed up {% endif %} at the same time as the objects they depend on. When you back up a table, it will not include any dependent tables, [views]({% link {{ page.version.version }}/views.md %}), or [sequences]({% link {{ page.version.version }}/create-sequence.md %}).
+
+For example, if you back up [view]({% link {{ page.version.version }}/views.md %}) `v` that depends on table `t`, it will only back up `v`, not `t`. When you try to restore `v`, the restore will fail because the referenced table is not present in the backup.
+
+Alternatively, you can pass a `skip` option with {% if page.name == "restore.md" %} `RESTORE` {% else %} [`RESTORE`]({% link {{ page.version.version }}/restore.md %}) {% endif %} to skip the dependency instead:
+
+Dependent object | Depends on | Skip option
+-------|------------+-------------
+Table with [foreign key]({% link {{ page.version.version }}/foreign-key.md %}) constraints | The table it `REFERENCES`. | [`skip_missing_foreign_keys`]({% link {{ page.version.version }}/restore.md %}#skip_missing_foreign_keys)
+Table with a [sequence]({% link {{ page.version.version }}/create-sequence.md %}) | The sequence. | [`skip_missing_sequences`]({% link {{ page.version.version }}/restore.md %}#skip-missing-sequences)
+[Views]({% link {{ page.version.version }}/views.md %}) | The tables used in the view's `SELECT` statement. | [`skip_missing_views`]({% link {{ page.version.version }}/restore.md %}#skip-missing-views)
+
+We recommend treating tables with [foreign keys]({% link {{ page.version.version }}/foreign-key.md %}), which contribute to [views]({% link {{ page.version.version }}/views.md %}), or that use sequences or user-defined types as a single unit with their dependencies. While you can restore individual tables, you may find that backing up and restoring at the database level is more convenient.
\ No newline at end of file
diff --git a/src/current/_includes/v25.1/backups/old-syntax-removed.md b/src/current/_includes/v25.1/backups/old-syntax-removed.md
new file mode 100644
index 00000000000..7052fe0d3af
--- /dev/null
+++ b/src/current/_includes/v25.1/backups/old-syntax-removed.md
@@ -0,0 +1,5 @@
+{{site.data.alerts.callout_danger}}
+The `BACKUP ... TO` and `RESTORE ... FROM {storage_uri}` syntax has been removed from CockroachDB v24.3 and later.
+
+For details on the syntax to run `BACKUP` and `RESTORE`, refer to the {% if page.name == "backup.md" %} [backup](#examples) {% else %} [backup]({% link {{ page.version.version }}/backup.md %}#examples) {% endif %} and {% if page.name == "restore.md" %} [restore](#examples) {% else %} [restore]({% link {{ page.version.version }}/restore.md %}#examples) {% endif %} examples.
+{{site.data.alerts.end}}
\ No newline at end of file
diff --git a/src/current/_includes/v25.1/backups/protected-timestamps.md b/src/current/_includes/v25.1/backups/protected-timestamps.md
new file mode 100644
index 00000000000..31b931f4cc4
--- /dev/null
+++ b/src/current/_includes/v25.1/backups/protected-timestamps.md
@@ -0,0 +1,5 @@
+Scheduled backups ensure that the data to be backed up is protected from garbage collection until it has been successfully backed up. This active management of [protected timestamps]({% link {{ page.version.version }}/architecture/storage-layer.md %}#protected-timestamps) means that you can run scheduled backups at a cadence independent from the [GC TTL]({% link {{ page.version.version }}/configure-replication-zones.md %}#gc-ttlseconds) of the data. This is unlike non-scheduled backups that are tightly coupled to the GC TTL. See [Garbage collection and backups]({% link {{ page.version.version }}/take-full-and-incremental-backups.md %}#garbage-collection-and-backups) for more detail.
+
+The data being backed up will not be eligible for garbage collection until a successful backup completes. At this point, the schedule will release the existing protected timestamp record and write a new one to protect data for the next backup that is scheduled to run. It is important to consider that when a scheduled backup fails there will be an accumulation of data until the next successful backup. Resolving the backup failure or [dropping the backup schedule]({% link {{ page.version.version }}/drop-schedules.md %}) will make the data eligible for garbage collection once again.
+
+You can also use the `exclude_data_from_backup` option with a scheduled backup as a way to prevent protected timestamps from prolonging garbage collection on a table. See the example [Exclude a table's data from backups]({% link {{ page.version.version }}/take-full-and-incremental-backups.md %}#exclude-a-tables-data-from-backups) for usage information.
\ No newline at end of file
diff --git a/src/current/_includes/v25.1/backups/pts-schedules-incremental.md b/src/current/_includes/v25.1/backups/pts-schedules-incremental.md
new file mode 100644
index 00000000000..b8cb26aff0b
--- /dev/null
+++ b/src/current/_includes/v25.1/backups/pts-schedules-incremental.md
@@ -0,0 +1,3 @@
+{{site.data.alerts.callout_info}}
+If you are creating incremental backups as part of a [backup schedule]({% link {{ page.version.version }}/create-schedule-for-backup.md %}), [protected timestamps]({% link {{ page.version.version }}/architecture/storage-layer.md %}#protected-timestamps) will ensure the backup revision data is not garbage collected, which allows you to lower the GC TTL. See [Protected timestamps and scheduled backups]({% link {{ page.version.version }}/create-schedule-for-backup.md %}#protected-timestamps-and-scheduled-backups) for more detail.
+{{site.data.alerts.end}}
\ No newline at end of file
diff --git a/src/current/_includes/v25.1/backups/recommend-backups-for-upgrade.md b/src/current/_includes/v25.1/backups/recommend-backups-for-upgrade.md
new file mode 100644
index 00000000000..2ef075abf9c
--- /dev/null
+++ b/src/current/_includes/v25.1/backups/recommend-backups-for-upgrade.md
@@ -0,0 +1,6 @@
+{% if page.path contains "cockroachcloud" %}
+[Managed backups]({% link cockroachcloud/managed-backups.md %}) are automated backups of CockroachDB {{ site.data.products.cloud }} clusters that are stored by Cockroach Labs in cloud storage. By default, Cockroach Labs takes and retains managed backups in all Cloud clusters.
+
+When upgrading to a major release, you can optionally [take a self-managed backup]({% link cockroachcloud/take-and-restore-self-managed-backups.md %}) of your cluster to your own cloud storage, as an extra layer of protection in case the upgrade leads to issues.
+{% else %}
+CockroachDB is designed with high fault tolerance. However, taking regular backups of your data is an operational best practice for [disaster recovery]({% link {{ page.version.version }}/disaster-recovery-planning.md %}) planning.{% endif %}
diff --git a/src/current/_includes/v25.1/backups/retry-failure.md b/src/current/_includes/v25.1/backups/retry-failure.md
new file mode 100644
index 00000000000..e29f6a8d3a6
--- /dev/null
+++ b/src/current/_includes/v25.1/backups/retry-failure.md
@@ -0,0 +1 @@
+If a backup job encounters too many retryable errors, it will enter a [`failed` state]({% link {{ page.version.version }}/show-jobs.md %}#job-status) with the most recent error, which allows subsequent backups the chance to succeed. Refer to the [Backup and Restore Monitoring]({% link {{ page.version.version }}/backup-and-restore-monitoring.md %}) page for metrics to track backup failures.
\ No newline at end of file
diff --git a/src/current/_includes/v25.1/backups/schedule-options.md b/src/current/_includes/v25.1/backups/schedule-options.md
new file mode 100644
index 00000000000..2743f788b85
--- /dev/null
+++ b/src/current/_includes/v25.1/backups/schedule-options.md
@@ -0,0 +1,7 @@
+ Option | Value | Description
+----------------------------+-----------------------------------------+------------------------------
+`first_run` | [`TIMESTAMPTZ`]({% link {{ page.version.version }}/timestamp.md %}) / `now` | Execute the schedule at the specified time in the future. If not specified, the default behavior is to execute the schedule based on its next `RECURRING` time.
+`on_execution_failure` | `retry` / `reschedule` / `pause` | If an error occurs during the backup execution, do the following:
`retry`: Retry the backup right away.
`reschedule`: Retry the backup by rescheduling it based on the `RECURRING` expression.
`pause`: Pause the schedule. This requires manual intervention to [resume the schedule]({% link {{ page.version.version }}/resume-schedules.md %}).
**Default**: `reschedule`
+`on_previous_running` | `start` / `skip` / `wait` | If the previous backup started by the schedule is still running, do the following:
`start`: Start the new backup anyway, even if the previous one is still running.
`skip`: Skip the new backup and run the next backup based on the `RECURRING` expression.
`wait`: Wait for the previous backup to complete.
**Default**: `wait`. The option affects backups started by the full backup schedule only. Incremental backups are always set to `wait`.
+`ignore_existing_backups` | N/A | If backups were already created in the [destination]({% link {{ page.version.version }}/use-cloud-storage.md %}) that the new schedule references, this option must be passed to acknowledge that the new schedule may be backing up different objects.
+`updates_cluster_last_backup_time_metric` | N/A | ([`admin` privileges]({% link {{ page.version.version }}/security-reference/authorization.md %}#admin-role) required) When set during backup schedule creation, this option updates the [`schedules_backup_last_completed_time`]({% link {{ page.version.version }}/backup-and-restore-monitoring.md %}#available-metrics) metric for the scheduled backup.
diff --git a/src/current/_includes/v25.1/backups/scheduled-backups-tip.md b/src/current/_includes/v25.1/backups/scheduled-backups-tip.md
new file mode 100644
index 00000000000..62ced81e069
--- /dev/null
+++ b/src/current/_includes/v25.1/backups/scheduled-backups-tip.md
@@ -0,0 +1,3 @@
+{{site.data.alerts.callout_success}}
+We recommend using scheduled backups to automate daily backups of your cluster.
+{{site.data.alerts.end}}
\ No newline at end of file
diff --git a/src/current/_includes/v25.1/backups/show-backup-replace-diagram.html b/src/current/_includes/v25.1/backups/show-backup-replace-diagram.html
new file mode 100644
index 00000000000..539b72b45da
--- /dev/null
+++ b/src/current/_includes/v25.1/backups/show-backup-replace-diagram.html
@@ -0,0 +1,50 @@
+
\ No newline at end of file
diff --git a/src/current/_includes/v25.1/backups/storage-collision-examples.md b/src/current/_includes/v25.1/backups/storage-collision-examples.md
new file mode 100644
index 00000000000..b6c3b85de05
--- /dev/null
+++ b/src/current/_includes/v25.1/backups/storage-collision-examples.md
@@ -0,0 +1 @@
+For example, if you have a backup schedule running backups for the database `users` the full backup and incremental backup should have the same storage URI for the full and incremental schedule. (`CREATE SCHEDULE FOR BACKUP` will automatically create two schedules for the full and incremental backup to the same storage URI.) If there is another backup schedule, for the database `accounts`, the full and incremental backups for `accounts` should have the same storage URI. However, the storage URI for the `accounts` backup collection should be different to the storage URI for the `users` backup collection.
\ No newline at end of file
diff --git a/src/current/_includes/v25.1/backups/support-products.md b/src/current/_includes/v25.1/backups/support-products.md
new file mode 100644
index 00000000000..d4687c816d2
--- /dev/null
+++ b/src/current/_includes/v25.1/backups/support-products.md
@@ -0,0 +1,3 @@
+## Supported products
+
+The feature described on this page is available in **CockroachDB {{ site.data.products.basic }}**, **CockroachDB {{ site.data.products.standard }}**, **CockroachDB {{ site.data.products.advanced }}**, and **CockroachDB {{ site.data.products.core }}** clusters when you are running [self-managed backups]({% link cockroachcloud/take-and-restore-self-managed-backups.md %}). For a full list of features, refer to [Backup and restore product support]({% link {{ page.version.version }}/backup-and-restore-overview.md %}#backup-and-restore-support).
diff --git a/src/current/_includes/v25.1/backups/updated-backup-privileges.md b/src/current/_includes/v25.1/backups/updated-backup-privileges.md
new file mode 100644
index 00000000000..013fa0f4d8d
--- /dev/null
+++ b/src/current/_includes/v25.1/backups/updated-backup-privileges.md
@@ -0,0 +1,36 @@
+{{site.data.alerts.callout_info}}
+Starting in v22.2, CockroachDB introduces a new [system-level privilege model]({% link {{ page.version.version }}/security-reference/authorization.md %}#supported-privileges) that provides finer control over a user's privilege to work with the database, including taking backups.
+
+There is continued support for the [legacy privilege model](#required-privileges-using-the-legacy-privilege-model) for backups in v22.2, however it **will be removed** in a future release of CockroachDB. We recommend implementing the new privilege model that follows in this section for all new and existing backups.
+{{site.data.alerts.end}}
+
+You can [grant]({% link {{ page.version.version }}/grant.md %}#grant-privileges-on-specific-tables-in-a-database) the `BACKUP` privilege to a user or role depending on the type of backup:
+
+Backup | Privilege
+-------+-----------
+Cluster | Grant a user the `BACKUP` [system-level privilege]({% link {{ page.version.version }}/security-reference/authorization.md %}#supported-privileges). For example, `GRANT SYSTEM BACKUP TO user;`.
+Database | Grant a user the `BACKUP` privilege on the target database. For example, `GRANT BACKUP ON DATABASE test_db TO user;`.
+Table | Grant a user the `BACKUP` privilege at the table level. This gives the user the privilege to back up the schema and all user-defined types that are associated with the table. For example, `GRANT BACKUP ON TABLE test_db.table TO user;`.
+
+The listed privileges do not cascade to objects lower in the schema tree. For example, if you are granted database-level `BACKUP` privileges, this does not give you the privilege to back up a table. If you need the `BACKUP` privilege on a database to apply to all newly created tables in that database, use [`DEFAULT PRIVILEGES`]({% link {{ page.version.version }}/security-reference/authorization.md %}#default-privileges). You can add `BACKUP` to the user or role's default privileges with [`ALTER DEFAULT PRIVILEGES`]({% link {{ page.version.version }}/alter-default-privileges.md %}#grant-default-privileges-to-a-specific-role).
+
+{{site.data.alerts.callout_info}}
+You can grant the `BACKUP` privilege to a user or role **without** the `SELECT` privilege on a table. As a result, these users will be able to take backups, but they will not be able to run a `SELECT` query on that data directly. However, these users could still read this data indirectly, by restoring it from any backups they produce.
+{{site.data.alerts.end}}
+
+Members of the [`admin` role]({% link {{ page.version.version }}/security-reference/authorization.md %}#admin-role) can run all three types of backups (cluster, database, and table) without the need to grant a specific `BACKUP` privilege. However, we recommend using the `BACKUP` privilege model to create users or roles and grant them `BACKUP` privileges as necessary for stronger access control.
+
+### Privileges for managing a backup job
+
+To manage a backup job with [`PAUSE JOB`]({% link {{ page.version.version }}/pause-job.md %}), [`RESUME JOB`]({% link {{ page.version.version }}/resume-job.md %}), or [`CANCEL JOB`]({% link {{ page.version.version }}/cancel-job.md %}), users must have at least one of the following:
+
+- Be a member of the [`admin` role]({% link {{ page.version.version }}/security-reference/authorization.md %}#admin-role).
+- The [`CONTROLJOB` role option]({% link {{ page.version.version }}/security-reference/authorization.md %}#role-options).
+
+To view a backup job with [`SHOW JOB`]({% link {{ page.version.version }}/show-jobs.md %}), users must have at least one of the following:
+
+- The [`VIEWJOB` privilege]({% link {{ page.version.version }}/security-reference/authorization.md %}#supported-privileges), which allows you to view all jobs (including `admin`-owned jobs).
+- Be a member of the [`admin` role]({% link {{ page.version.version }}/security-reference/authorization.md %}#admin-role).
+- The [`CONTROLJOB` role option]({% link {{ page.version.version }}/security-reference/authorization.md %}#role-options).
+
+See [`GRANT`]({% link {{ page.version.version }}/grant.md %}) for detail on granting privileges to a role or user.
\ No newline at end of file
diff --git a/src/current/_includes/v25.1/backups/view-scheduled-backups.md b/src/current/_includes/v25.1/backups/view-scheduled-backups.md
new file mode 100644
index 00000000000..5f3fee97e56
--- /dev/null
+++ b/src/current/_includes/v25.1/backups/view-scheduled-backups.md
@@ -0,0 +1,7 @@
+ When a [backup is created by a schedule]({% link {{ page.version.version }}/create-schedule-for-backup.md %}), it is stored within a collection of backups in the given location. To view details for a backup created by a schedule, you can use the following:
+
+- `SHOW BACKUPS IN collectionURI` statement to [view a list of the full backup's subdirectories]({% link {{ page.version.version }}/show-backup.md %}#view-a-list-of-the-available-full-backup-subdirectories).
+- `SHOW BACKUP FROM subdirectory IN collectionURI` statement to [view a list of the full and incremental backups that are stored in a specific full backup's subdirectory]({% link {{ page.version.version }}/show-backup.md %}#view-a-list-of-the-full-and-incremental-backups-in-a-specific-full-backup-subdirectory).
+- Use the [Schedules page]({% link {{ page.version.version }}/ui-schedules-page.md %}) in the [DB Console]({% link {{ page.version.version }}/ui-overview.md %}) to view a list of created backup schedules and their individual details.
+
+For more details, see [`SHOW BACKUP`]({% link {{ page.version.version }}/show-backup.md %}).
\ No newline at end of file
diff --git a/src/current/_includes/v25.1/backward-incompatible/alpha.1.md b/src/current/_includes/v25.1/backward-incompatible/alpha.1.md
new file mode 100644
index 00000000000..d5aaef86c6b
--- /dev/null
+++ b/src/current/_includes/v25.1/backward-incompatible/alpha.1.md
@@ -0,0 +1,15 @@
+- CockroachDB no longer performs environment variable expansion in the parameter `--certs-dir`. Uses like `--certs-dir='$HOME/path'` (expansion by CockroachDB) can be replaced by `--certs-dir="$HOME/path"` (expansion by the Unix shell). [#81298][#81298]
+- In the Cockroach CLI, [`BOOL` values](../v23.1/bool.html) are now formatted as `t` or `f` instead of `True` or `False`. [#81943][#81943]
+- Removed the `cockroach quit` command. It has been deprecated since v20.1. To [shut down a node](../v23.1/node-shutdown.html) gracefully, send a `SIGTERM` signal to it. [#82988][#82988]
+- Added a cluster version to allow the [Pebble storage engine](../v23.1/architecture/storage-layer.html#pebble) to recombine certain SSTables (specifically, user keys that are split across multiple files in a level of the [log-structured merge-tree](../v23.1/architecture/storage-layer.html#log-structured-merge-trees)). Recombining the split user keys is required for supporting the range keys feature. The migration to recombine the SSTables is expected to be short (split user keys are rare in practice), but will block subsequent migrations until all tables have been recombined. The `storage.marked-for-compaction-files` time series metric can show the progress of the migration. [#84887][#84887]
+- Using a single TCP port listener for both RPC (node-node) and SQL client connections is now deprecated. This capability **will be removed** in the next version of CockroachDB. Instead, make one of the following configuration changes to your CockroachDB deployment:
+ - Preferred: keep port `26257` for SQL, and allocate a new port, e.g., `26357`, for node-node RPC connections. For example, you might configure a node with the flags `--listen-addr=:26357 --sql-addr=:26257`, where subsequent nodes seeking to join would then use the flag `--join=othernode:26357,othernode:26257`. This will become the default configuration in the next version of CockroachDB. When using this mode of operation, care should be taken to use a `--join` flag that includes both the previous and new port numbers for other nodes, so that no network partition occurs during the upgrade.
+ - Optional: keep port `26257` for RPC, and allocate a new port, e.g., `26357`, for SQL connections. For example, you might configure a node with the flags `--listen-addr=:26257 --sql-addr=:26357`. When using this mode of operation, the `--join` flags do not need to be modified. However, SQL client apps or the SQL load balancer configuration (when in use) must be updated to use the new SQL port number. [#85671][#85671]
+- If no `nullif` option is specified while using [`IMPORT CSV`](../v23.1/import.html), then a zero-length string in the input is now treated as `NULL`. The quoted empty string in the input is treated as an empty string. Similarly, if `nullif` is specified, then an unquoted value is treated as `NULL`, and a quoted value is treated as that string. These changes were made to make `IMPORT CSV` behave more similarly to `COPY CSV`. If the previous behavior (i.e., treating either quoted or unquoted values that match the `nullif` setting as `NULL`) is desired, you can use the new `allow_quoted_null` option in the `IMPORT` statement. [#84487][#84487]
+- [`COPY FROM`](../v23.1/copy-from.html) operations are now atomic by default instead of being segmented into 100 row transactions. Set the `copy_from_atomic_enabled` session setting to `false` for the previous behavior. [#85986][#85986]
+- The `GRANT` privilege has been removed and replaced by the more granular [`WITH GRANT OPTION`]({% link v22.2/grant.md %}#grant-privileges-with-the-option-to-grant-to-others), which provides control over which privileges are allowed to be granted. [#81310][#81310]
+- Removed the ability to cast `int`, `int2`, and `int8` to a `0` length `BIT` or `VARBIT`. [#81266][#81266]
+- Removed the deprecated `GRANT` privilege. [#81310][#81310]
+- Removed the `ttl_automatic_column` storage parameter. The `crdb_internal_expiration` column is created when `ttl_expire_after` is set and removed when `ttl_expire_after` is reset. [#83134][#83134]
+- Removed the byte string parameter in the `crdb_internal.schedule_sql_stats_compaction` function. [#82560][#82560]
+- Changed the default value of the `enable_implicit_transaction_for_batch_statements` to `true`. This means that a [batch of statements]({% link v22.2/transactions.md %}#batched-statements) sent in one string separated by semicolons is treated as an implicit transaction. [#76834][#76834]
diff --git a/src/current/_includes/v25.1/cdc/apache-pulsar-unsupported.md b/src/current/_includes/v25.1/cdc/apache-pulsar-unsupported.md
new file mode 100644
index 00000000000..fecb7931784
--- /dev/null
+++ b/src/current/_includes/v25.1/cdc/apache-pulsar-unsupported.md
@@ -0,0 +1,8 @@
+Changefeeds emitting to an Apache Pulsar sink do **not** support:
+
+- [`format=avro`]({% link {{ page.version.version }}/create-changefeed.md %}#format)
+- [`confluent_schema_registry`]({% link {{ page.version.version }}/create-changefeed.md %}#confluent-schema-registry)
+- [`topic_prefix`]({% link {{ page.version.version }}/create-changefeed.md %}#topic-prefix)
+- Any batching configuration
+- [Authentication query parameters]({% link {{ page.version.version }}/create-changefeed.md %}#query-parameters)
+- [External connections]({% link {{ page.version.version }}/create-external-connection.md %})
\ No newline at end of file
diff --git a/src/current/_includes/v25.1/cdc/apache-pulsar-uri.md b/src/current/_includes/v25.1/cdc/apache-pulsar-uri.md
new file mode 100644
index 00000000000..28e8f6438d8
--- /dev/null
+++ b/src/current/_includes/v25.1/cdc/apache-pulsar-uri.md
@@ -0,0 +1,6 @@
+{% include_cached copy-clipboard.html %}
+~~~
+pulsar://{host IP}:6650
+~~~
+
+By default, Apache Pulsar listens for client connections on port `:6650`. For more detail on configuration, refer to the [Apache Pulsar documentation](https://pulsar.apache.org/docs/2.10.x/reference-configuration).
\ No newline at end of file
diff --git a/src/current/_includes/v25.1/cdc/avro-limitations.md b/src/current/_includes/v25.1/cdc/avro-limitations.md
new file mode 100644
index 00000000000..8580ac64cf7
--- /dev/null
+++ b/src/current/_includes/v25.1/cdc/avro-limitations.md
@@ -0,0 +1,30 @@
+- [Decimals]({% link {{ page.version.version }}/decimal.md %}) must have precision specified.
+- [`BYTES`]({% link {{ page.version.version }}/bytes.md %}) (or its aliases `BYTEA` and `BLOB`) are often used to store machine-readable data. When you stream these types through a changefeed with [`format=avro`]({% link {{ page.version.version }}/create-changefeed.md %}#format), CockroachDB does not encode or change the data. However, Avro clients can often include escape sequences to present the data in a printable format, which can interfere with deserialization. A potential solution is to hex-encode `BYTES` values when initially inserting them into CockroachDB. This will ensure that Avro clients can consistently decode the hexadecimal. Note that hex-encoding values at insertion will increase record size.
+- [`BIT`]({% link {{ page.version.version }}/bit.md %}) and [`VARBIT`]({% link {{ page.version.version }}/bit.md %}) types are encoded as arrays of 64-bit integers.
+
+ For efficiency, CockroachDB encodes `BIT` and `VARBIT` bitfield types as arrays of 64-bit integers. That is, [base-2 (binary format)](https://wikipedia.org/wiki/Binary_number#Conversion_to_and_from_other_numeral_systems) `BIT` and `VARBIT` data types are converted to base 10 and stored in arrays. Encoding in CockroachDB is [big-endian](https://wikipedia.org/wiki/Endianness), therefore the last value may have many trailing zeroes. For this reason, the first value of each array is the number of bits that are used in the last value of the array.
+
+ For instance, if the bitfield is 129 bits long, there will be 4 integers in the array. The first integer will be `1`; representing the number of bits in the last value, the second integer will be the first 64 bits, the third integer will be bits 65–128, and the last integer will either be `0` or `9223372036854775808` (i.e., the integer with only the first bit set, or `1000000000000000000000000000000000000000000000000000000000000000` when base 2).
+
+ This example is base-10 encoded into an array as follows:
+
+ ~~~
+ {"array": [1, , , 0 or 9223372036854775808]}
+ ~~~
+
+ For downstream processing, it is necessary to base-2 encode every element in the array (except for the first element). The first number in the array gives you the number of bits to take from the last base-2 number — that is, the most significant bits. So, in the example above this would be `1`. Finally, all the base-2 numbers can be appended together, which will result in the original number of bits, 129.
+
+ In a different example of this process where the bitfield is 136 bits long, the array would be similar to the following when base-10 encoded:
+
+ ~~~
+ {"array": [8, 18293058736425533439, 18446744073709551615, 13690942867206307840]}
+ ~~~
+
+ To then work with this data, you would convert each of the elements in the array to base-2 numbers, besides the first element. For the above array, this would convert to:
+
+ ~~~
+ [8, 1111110111011011111111111111111111111111111111111111111111111111, 1111111111111111111111111111111111111111111111111111111111111111, 1011111000000000000000000000000000000000000000000000000000000000]
+ ~~~
+
+ Next, you use the first element in the array to take the number of bits from the last base-2 element, `10111110`. Finally, you append each of the base-2 numbers together — in the above array, the second, third, and truncated last element. This results in 136 bits, the original number of bits.
+- {% include {{page.version.version}}/cdc/avro-udt-composite.md %}
diff --git a/src/current/_includes/v25.1/cdc/avro-udt-composite.md b/src/current/_includes/v25.1/cdc/avro-udt-composite.md
new file mode 100644
index 00000000000..7a34fbd3253
--- /dev/null
+++ b/src/current/_includes/v25.1/cdc/avro-udt-composite.md
@@ -0,0 +1 @@
+A changefeed in [Avro format]({% link {{ page.version.version }}/changefeed-messages.md %}#avro) will not be able to serialize [user-defined composite (tuple) types](create-type.html). [#102903](https://github.com/cockroachdb/cockroach/issues/102903)
\ No newline at end of file
diff --git a/src/current/_includes/v25.1/cdc/azure-event-hubs-uri.md b/src/current/_includes/v25.1/cdc/azure-event-hubs-uri.md
new file mode 100644
index 00000000000..bb356aca3ec
--- /dev/null
+++ b/src/current/_includes/v25.1/cdc/azure-event-hubs-uri.md
@@ -0,0 +1,9 @@
+~~~
+'azure-event-hub://{event-hubs-namespace}.servicebus.windows.net:9093?shared_access_key_name={policy-name}&shared_access_key={url-encoded key}'
+~~~
+
+You can also use a `kafka://` scheme in the URI:
+
+~~~
+'kafka://{event-hubs-namespace}.servicebus.windows.net:9093?shared_access_key_name={policy-name}&shared_access_key={url-encoded key}'
+~~~
\ No newline at end of file
diff --git a/src/current/_includes/v25.1/cdc/bare-envelope-cdc-queries.md b/src/current/_includes/v25.1/cdc/bare-envelope-cdc-queries.md
new file mode 100644
index 00000000000..6813c9b7f3d
--- /dev/null
+++ b/src/current/_includes/v25.1/cdc/bare-envelope-cdc-queries.md
@@ -0,0 +1 @@
+{% if page.name == "cdc-queries.md" %}CDC queries{% else %}[CDC queries]({% link {{ page.version.version }}/cdc-queries.md %}){% endif %} use [`envelope=bare`]({% link {{ page.version.version }}/create-changefeed.md %}#envelope) message format by default. The `bare` message envelope places the output of the `SELECT` clause at the top level of the message instead of under an `"after"` key. When there is additional information that the changefeed is sending, such as [`updated`]({% link {{ page.version.version }}/create-changefeed.md %}#updated) or [`resolved`](create-changefeed.html#resolved) timestamps, the messages will include a `crdb` field containing this information.
\ No newline at end of file
diff --git a/src/current/_includes/v25.1/cdc/cap-parameter-ext-connection.md b/src/current/_includes/v25.1/cdc/cap-parameter-ext-connection.md
new file mode 100644
index 00000000000..8970ddaf2b0
--- /dev/null
+++ b/src/current/_includes/v25.1/cdc/cap-parameter-ext-connection.md
@@ -0,0 +1,3 @@
+{{site.data.alerts.callout_info}}
+If you are creating an {% if page.name == "create-external-connection.md" %}external connection{% else %}[external connection]({% link {{ page.version.version }}/create-external-connection.md %}){% endif %} with [`CREATE CHANGEFEED` query parameters]({% link {{ page.version.version }}/create-changefeed.md %}#query-parameters), you must pass them in lowercase otherwise you will receive an `unknown query parameters` error.
+{{site.data.alerts.end}}
\ No newline at end of file
diff --git a/src/current/_includes/v25.1/cdc/cdc-cloud-rangefeed.md b/src/current/_includes/v25.1/cdc/cdc-cloud-rangefeed.md
new file mode 100644
index 00000000000..9def4728ec2
--- /dev/null
+++ b/src/current/_includes/v25.1/cdc/cdc-cloud-rangefeed.md
@@ -0,0 +1,3 @@
+{{site.data.alerts.callout_info}}
+If you are working on a CockroachDB {{ site.data.products.standard }} or {{ site.data.products.basic }} cluster, the `kv.rangefeed.enabled` cluster setting is enabled by default.
+{{site.data.alerts.end}}
\ No newline at end of file
diff --git a/src/current/_includes/v25.1/cdc/cdc-schema-locked-example.md b/src/current/_includes/v25.1/cdc/cdc-schema-locked-example.md
new file mode 100644
index 00000000000..0908749d4de
--- /dev/null
+++ b/src/current/_includes/v25.1/cdc/cdc-schema-locked-example.md
@@ -0,0 +1,15 @@
+Use the `schema_locked` [storage parameter]({% link {{ page.version.version }}/with-storage-parameter.md %}) to disallow [schema changes]({% link {{ page.version.version }}/online-schema-changes.md %}) on a watched table, which allows the changefeed to take a fast path that avoids checking if there are schema changes that could require synchronization between [changefeed aggregators]({% link {{ page.version.version }}/how-does-an-enterprise-changefeed-work.md %}). This helps to decrease the latency between a write committing to a table and it emitting to the [changefeed's sink]({% link {{ page.version.version }}/changefeed-sinks.md %}). Enabling `schema_locked`
+
+Enable `schema_locked` on the watched table with the [`ALTER TABLE`]({% link {{ page.version.version }}/alter-table.md %}) statement:
+
+{% include_cached copy-clipboard.html %}
+~~~ sql
+ALTER TABLE watched_table SET (schema_locked = true);
+~~~
+
+While `schema_locked` is enabled on a table, attempted schema changes on the table will be rejected and an error returned. If you need to run a schema change on the locked table, unlock the table with `schema_locked = false`, complete the schema change, and then lock the table again with `schema_locked = true`. The changefeed will run as normal while `schema_locked = false`, but it will not benefit from the performance optimization.
+
+{% include_cached copy-clipboard.html %}
+~~~ sql
+ALTER TABLE watched_table SET (schema_locked = false);
+~~~
\ No newline at end of file
diff --git a/src/current/_includes/v25.1/cdc/changefeed-number-limit.md b/src/current/_includes/v25.1/cdc/changefeed-number-limit.md
new file mode 100644
index 00000000000..85ad543bd82
--- /dev/null
+++ b/src/current/_includes/v25.1/cdc/changefeed-number-limit.md
@@ -0,0 +1 @@
+We recommend limiting the number of changefeeds per cluster to 80.
\ No newline at end of file
diff --git a/src/current/_includes/v25.1/cdc/client-key-encryption.md b/src/current/_includes/v25.1/cdc/client-key-encryption.md
new file mode 100644
index 00000000000..c7c7be4c38c
--- /dev/null
+++ b/src/current/_includes/v25.1/cdc/client-key-encryption.md
@@ -0,0 +1 @@
+**Note:** Client keys are often encrypted. You will receive an error if you pass an encrypted client key in your changefeed statement. To decrypt the client key, run: `openssl rsa -in key.pem -out key.decrypt.pem -passin pass:{PASSWORD}`. Once decrypted, be sure to update your changefeed statement to use the new `key.decrypt.pem` file instead.
\ No newline at end of file
diff --git a/src/current/_includes/v25.1/cdc/cluster-iam-role-step.md b/src/current/_includes/v25.1/cdc/cluster-iam-role-step.md
new file mode 100644
index 00000000000..64980c725bc
--- /dev/null
+++ b/src/current/_includes/v25.1/cdc/cluster-iam-role-step.md
@@ -0,0 +1,41 @@
+1. Navigate to the [IAM console](https://console.aws.amazon.com/iam/), select **Roles** from the navigation, and then **Create role**.
+1. Select **AWS service** for the **Trusted entity type**. For **Use case**, select **EC2** from the dropdown. Click **Next**.
+1. On the **Add permissions** page, click **Next**.
+1. Name the role (for example, `ec2-role`) and click **Create role**.
+1. Once the role has finished creating, copy the ARN in the **Summary** section. Click on the **Trust relationships** tab. You'll find a **Trusted entities** policy:
+
+ ~~~json
+ {
+ "Version": "2012-10-17",
+ "Statement": [
+ {
+ "Effect": "Allow",
+ "Principal": {
+ "Service": "ec2.amazonaws.com"
+ },
+ "Action": "sts:AssumeRole"
+ }
+ ]
+ }
+ ~~~
+
+1. Navigate to the [IAM console](https://console.aws.amazon.com/iam/) and search for the role (`msk-role`) you created in Step 2 that contains the MSK policy. Select the role, which will take you to its summary page.
+1. Click on the **Trust relationships** tab, and click **Edit trust policy**. Add the ARN of the EC2 IAM role (`ec2-role`) to the JSON policy:
+
+ ~~~json
+ {
+ "Version": "2012-10-17",
+ "Statement": [
+ {
+ "Effect": "Allow",
+ "Principal": {
+ "Service": "ec2.amazonaws.com",
+ "AWS": "arn:aws:iam::{account ID}:role/{ec2-role}"
+ },
+ "Action": "sts:AssumeRole"
+ }
+ ]
+ }
+ ~~~
+
+ Once you've updated the policy, click **Update policy**.
\ No newline at end of file
diff --git a/src/current/_includes/v25.1/cdc/composite-key-delete-insert.md b/src/current/_includes/v25.1/cdc/composite-key-delete-insert.md
new file mode 100644
index 00000000000..38c44f28d64
--- /dev/null
+++ b/src/current/_includes/v25.1/cdc/composite-key-delete-insert.md
@@ -0,0 +1,3 @@
+{{site.data.alerts.callout_info}}
+When changes happen to a column that is part of a composite [key]({% link {{ page.version.version }}/primary-key.md %}), the changefeed will produce a {% if page.name == "cdc-queries.md" %}[delete message](#filter-delete-messages) {% else %}[delete message]({% link {{ page.version.version }}/changefeed-messages.md %}#delete-messages) {% endif %} and then an insert message.
+{{site.data.alerts.end}}
\ No newline at end of file
diff --git a/src/current/_includes/v25.1/cdc/configure-all-changefeed.md b/src/current/_includes/v25.1/cdc/configure-all-changefeed.md
new file mode 100644
index 00000000000..6ac9e6b9b4c
--- /dev/null
+++ b/src/current/_includes/v25.1/cdc/configure-all-changefeed.md
@@ -0,0 +1,19 @@
+It is useful to be able to pause all running changefeeds during troubleshooting, testing, or when a decrease in CPU load is needed.
+
+To pause all running changefeeds:
+
+{% include_cached copy-clipboard.html %}
+~~~sql
+PAUSE JOBS (WITH x AS (SHOW CHANGEFEED JOBS) SELECT job_id FROM x WHERE status = ('running'));
+~~~
+
+This will change the status for each of the running changefeeds to `paused`, which can be verified with [`SHOW CHANGEFEED JOBS`]({% link {{ page.version.version }}/show-jobs.md %}#show-changefeed-jobs).
+
+To resume all running changefeeds:
+
+{% include_cached copy-clipboard.html %}
+~~~sql
+RESUME JOBS (WITH x AS (SHOW CHANGEFEED JOBS) SELECT job_id FROM x WHERE status = ('paused'));
+~~~
+
+This will resume the changefeeds and update the status for each of the changefeeds to `running`.
diff --git a/src/current/_includes/v25.1/cdc/confluent-cloud-sr-url.md b/src/current/_includes/v25.1/cdc/confluent-cloud-sr-url.md
new file mode 100644
index 00000000000..4796c1abf75
--- /dev/null
+++ b/src/current/_includes/v25.1/cdc/confluent-cloud-sr-url.md
@@ -0,0 +1 @@
+To connect to Confluent Cloud, use the following URL structure: `'https://{API_KEY_ID}:{API_SECRET_URL_ENCODED}@{CONFLUENT_REGISTRY_URL}:443'`. See the [Stream a Changefeed to a Confluent Cloud Kafka Cluster]({% link {{ page.version.version }}/stream-a-changefeed-to-a-confluent-cloud-kafka-cluster.md %}#step-8-create-a-changefeed) tutorial for further detail.
\ No newline at end of file
diff --git a/src/current/_includes/v25.1/cdc/core-csv.md b/src/current/_includes/v25.1/cdc/core-csv.md
new file mode 100644
index 00000000000..0901eed2def
--- /dev/null
+++ b/src/current/_includes/v25.1/cdc/core-csv.md
@@ -0,0 +1,3 @@
+{{site.data.alerts.callout_info}}
+To determine how wide the columns need to be, the default `table` display format in `cockroach sql` buffers the results it receives from the server before printing them to the console. When consuming basic changefeed data using `cockroach sql`, it's important to use a display format like `csv` that does not buffer its results. To set the display format, use the [`--format=csv` flag]({% link {{ page.version.version }}/cockroach-sql.md %}#sql-flag-format) when starting the [built-in SQL client]({% link {{ page.version.version }}/cockroach-sql.md %}), or set the [`\set display_format=csv` option]({% link {{ page.version.version }}/cockroach-sql.md %}#client-side-options) once the SQL client is open.
+{{site.data.alerts.end}}
diff --git a/src/current/_includes/v25.1/cdc/core-url.md b/src/current/_includes/v25.1/cdc/core-url.md
new file mode 100644
index 00000000000..029e0ac40b7
--- /dev/null
+++ b/src/current/_includes/v25.1/cdc/core-url.md
@@ -0,0 +1,3 @@
+{{site.data.alerts.callout_info}}
+Because basic changefeeds return results differently than other SQL statements, they require a dedicated database connection with specific settings around result buffering. In normal operation, CockroachDB improves performance by buffering results server-side before returning them to a client; however, result buffering is automatically turned off for basic changefeeds. basic changefeeds also have different cancellation behavior than other queries: they can only be canceled by closing the underlying connection or issuing a [`CANCEL QUERY`]({% link {{ page.version.version }}/cancel-query.md %}) statement on a separate connection. Combined, these attributes of changefeeds mean that applications should explicitly create dedicated connections to consume changefeed data, instead of using a connection pool as most client drivers do by default.
+{{site.data.alerts.end}}
diff --git a/src/current/_includes/v25.1/cdc/create-core-changefeed-avro.md b/src/current/_includes/v25.1/cdc/create-core-changefeed-avro.md
new file mode 100644
index 00000000000..53dab65cff2
--- /dev/null
+++ b/src/current/_includes/v25.1/cdc/create-core-changefeed-avro.md
@@ -0,0 +1,122 @@
+In this example, you'll set up a basic changefeed for a single-node cluster that emits Avro records. CockroachDB's Avro binary encoding convention uses the [Confluent Schema Registry](https://docs.confluent.io/current/schema-registry/docs/serializer-formatter.html) to store Avro schemas.
+
+1. Use the [`cockroach start-single-node`]({% link {{ page.version.version }}/cockroach-start-single-node.md %}) command to start a single-node cluster:
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ cockroach start-single-node \
+ --insecure \
+ --listen-addr=localhost \
+ --background
+ ~~~
+
+1. Download and extract the [Confluent Open Source platform](https://www.confluent.io/download/).
+
+1. Move into the extracted `confluent-` directory and start Confluent:
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ ./bin/confluent local services start
+ ~~~
+
+ Only `zookeeper`, `kafka`, and `schema-registry` are needed. To troubleshoot Confluent, see [their docs](https://docs.confluent.io/current/installation/installing_cp.html#zip-and-tar-archives) and the [Quick Start Guide](https://docs.confluent.io/platform/current/quickstart/ce-quickstart.html#ce-quickstart).
+
+1. As the `root` user, open the [built-in SQL client]({% link {{ page.version.version }}/cockroach-sql.md %}):
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ cockroach sql --url="postgresql://root@127.0.0.1:26257?sslmode=disable" --format=csv
+ ~~~
+
+ {% include {{ page.version.version }}/cdc/core-url.md %}
+
+ {% include {{ page.version.version }}/cdc/core-csv.md %}
+
+1. Enable the `kv.rangefeed.enabled` [cluster setting]({% link {{ page.version.version }}/cluster-settings.md %}):
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ sql
+ > SET CLUSTER SETTING kv.rangefeed.enabled = true;
+ ~~~
+
+1. Create table `bar`:
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ sql
+ > CREATE TABLE bar (a INT PRIMARY KEY);
+ ~~~
+
+1. Insert a row into the table:
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ sql
+ > INSERT INTO bar VALUES (0);
+ ~~~
+
+1. Start the basic changefeed:
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ sql
+ > EXPERIMENTAL CHANGEFEED FOR bar WITH format = avro, confluent_schema_registry = 'http://localhost:8081';
+ ~~~
+
+ ~~~
+ table,key,value
+ bar,\000\000\000\000\001\002\000,\000\000\000\000\002\002\002\000
+ ~~~
+
+1. In a new terminal, add another row:
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ cockroach sql --insecure -e "INSERT INTO bar VALUES (1)"
+ ~~~
+
+1. Back in the terminal where the basic changefeed is streaming, the output will appear:
+
+ ~~~
+ bar,\000\000\000\000\001\002\002,\000\000\000\000\002\002\002\002
+ ~~~
+
+ Note that records may take a couple of seconds to display in the basic changefeed.
+
+1. To stop streaming the changefeed, enter **CTRL+C** into the terminal where the changefeed is running.
+
+1. To stop `cockroach`:
+
+ Get the process ID of the node:
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ ps -ef | grep cockroach | grep -v grep
+ ~~~
+
+ ~~~
+ 501 21766 1 0 6:21PM ttys001 0:00.89 cockroach start-single-node --insecure --listen-addr=localhost
+ ~~~
+
+ Gracefully shut down the node, specifying its process ID:
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ kill -TERM 21766
+ ~~~
+
+ ~~~
+ initiating graceful shutdown of server
+ server drained and shutdown completed
+ ~~~
+
+1. To stop Confluent, move into the extracted `confluent-` directory and stop Confluent:
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ ./bin/confluent local services stop
+ ~~~
+
+ To terminate all Confluent processes, use:
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ ./bin/confluent local destroy
+ ~~~
diff --git a/src/current/_includes/v25.1/cdc/create-core-changefeed.md b/src/current/_includes/v25.1/cdc/create-core-changefeed.md
new file mode 100644
index 00000000000..df2264501a0
--- /dev/null
+++ b/src/current/_includes/v25.1/cdc/create-core-changefeed.md
@@ -0,0 +1,98 @@
+In this example, you'll set up a basic changefeed for a single-node cluster.
+
+1. In a terminal window, start `cockroach`:
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ cockroach start-single-node \
+ --insecure \
+ --listen-addr=localhost \
+ --background
+ ~~~
+
+1. As the `root` user, open the [built-in SQL client]({% link {{ page.version.version }}/cockroach-sql.md %}):
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ cockroach sql \
+ --url="postgresql://root@127.0.0.1:26257?sslmode=disable" \
+ --format=csv
+ ~~~
+
+ {% include {{ page.version.version }}/cdc/core-url.md %}
+
+ {% include {{ page.version.version }}/cdc/core-csv.md %}
+
+1. Enable the `kv.rangefeed.enabled` [cluster setting]({% link {{ page.version.version }}/cluster-settings.md %}):
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ sql
+ > SET CLUSTER SETTING kv.rangefeed.enabled = true;
+ ~~~
+
+1. Create table `foo`:
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ sql
+ > CREATE TABLE foo (a INT PRIMARY KEY);
+ ~~~
+
+1. Insert a row into the table:
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ sql
+ > INSERT INTO foo VALUES (0);
+ ~~~
+
+1. Start the basic changefeed:
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ sql
+ > EXPERIMENTAL CHANGEFEED FOR foo;
+ ~~~
+ ~~~
+ table,key,value
+ foo,[0],"{""after"": {""a"": 0}}"
+ ~~~
+
+1. In a new terminal, add another row:
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ cockroach sql --insecure -e "INSERT INTO foo VALUES (1)"
+ ~~~
+
+1. Back in the terminal where the basic changefeed is streaming, the following output has appeared:
+
+ ~~~
+ foo,[1],"{""after"": {""a"": 1}}"
+ ~~~
+
+ Note that records may take a couple of seconds to display in the basic changefeed.
+
+1. To stop streaming the changefeed, enter **CTRL+C** into the terminal where the changefeed is running.
+
+1. To stop `cockroach`:
+
+ Get the process ID of the node:
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ ps -ef | grep cockroach | grep -v grep
+ ~~~
+
+ ~~~
+ 501 21766 1 0 6:21PM ttys001 0:00.89 cockroach start-single-node --insecure --listen-addr=localhost
+ ~~~
+
+ Gracefully shut down the node, specifying its process ID:
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ kill -TERM 21766
+ ~~~
+
+ ~~~
+ initiating graceful shutdown of server
+ server drained and shutdown completed
+ ~~~
diff --git a/src/current/_includes/v25.1/cdc/create-example-db-cdc.md b/src/current/_includes/v25.1/cdc/create-example-db-cdc.md
new file mode 100644
index 00000000000..17902b10eac
--- /dev/null
+++ b/src/current/_includes/v25.1/cdc/create-example-db-cdc.md
@@ -0,0 +1,50 @@
+1. Create a database called `cdc_demo`:
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ sql
+ > CREATE DATABASE cdc_demo;
+ ~~~
+
+1. Set the database as the default:
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ sql
+ > SET DATABASE = cdc_demo;
+ ~~~
+
+1. Create a table and add data:
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ sql
+ > CREATE TABLE office_dogs (
+ id INT PRIMARY KEY,
+ name STRING);
+ ~~~
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ sql
+ > INSERT INTO office_dogs VALUES
+ (1, 'Petee'),
+ (2, 'Carl');
+ ~~~
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ sql
+ > UPDATE office_dogs SET name = 'Petee H' WHERE id = 1;
+ ~~~
+
+1. Create another table and add data:
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ sql
+ > CREATE TABLE employees (
+ dog_id INT REFERENCES office_dogs (id),
+ employee_name STRING);
+ ~~~
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ sql
+ > INSERT INTO employees VALUES
+ (1, 'Lauren'),
+ (2, 'Spencer');
+ ~~~
diff --git a/src/current/_includes/v25.1/cdc/csv-changefeed-format.md b/src/current/_includes/v25.1/cdc/csv-changefeed-format.md
new file mode 100644
index 00000000000..eb04b0f97c4
--- /dev/null
+++ b/src/current/_includes/v25.1/cdc/csv-changefeed-format.md
@@ -0,0 +1 @@
+Changefeeds emit the same CSV format as [`EXPORT`]({% link {{ page.version.version }}/export.md %}). In v22.1, changefeeds emitted CSV data that wrapped some values in single quotes, which were not wrapped when exporting data with the `EXPORT` statement.
\ No newline at end of file
diff --git a/src/current/_includes/v25.1/cdc/csv-udt-composite.md b/src/current/_includes/v25.1/cdc/csv-udt-composite.md
new file mode 100644
index 00000000000..834bddd8366
--- /dev/null
+++ b/src/current/_includes/v25.1/cdc/csv-udt-composite.md
@@ -0,0 +1 @@
+A changefeed emitting [CSV]({% link {{ page.version.version }}/changefeed-messages.md %}#csv) will include `AS` labels in the message format when the changefeed serializes a [user-defined composite type]({% link {{ page.version.version }}/create-type.md %}). [#102905](https://github.com/cockroachdb/cockroach/issues/102905)
\ No newline at end of file
diff --git a/src/current/_includes/v25.1/cdc/disable-replication-ttl.md b/src/current/_includes/v25.1/cdc/disable-replication-ttl.md
new file mode 100644
index 00000000000..c846d1a7a7f
--- /dev/null
+++ b/src/current/_includes/v25.1/cdc/disable-replication-ttl.md
@@ -0,0 +1,26 @@
+Use the `ttl_disable_changefeed_replication` table storage parameter to prevent changefeeds from sending `DELETE` messages issued by row-level TTL jobs for a table. Include the storage parameter when you create or alter the table. For example:
+
+{% include_cached copy-clipboard.html %}
+~~~ sql
+CREATE TABLE tbl (
+ id UUID PRIMARY KEY default gen_random_uuid(),
+ value TEXT
+) WITH (ttl_expire_after = '3 weeks', ttl_job_cron = '@daily', ttl_disable_changefeed_replication = 'true');
+~~~
+
+{% include_cached copy-clipboard.html %}
+~~~ sql
+ALTER TABLE events SET (ttl_expire_after = '1 year', ttl_disable_changefeed_replication = 'true');
+~~~
+
+You can also widen the scope to the cluster by setting the `sql.ttl.changefeed_replication.disabled` [cluster setting]({% link {{ page.version.version }}/cluster-settings.md %}) to `true`. This will prevent changefeeds from emitting deletes issued by all TTL jobs on a cluster.
+
+If you want to have a changefeed ignore the storage parameter or cluster setting that disables changefeed replication, you can set the changefeed option `ignore_disable_changefeed_replication` to `true`:
+
+{% include_cached copy-clipboard.html %}
+~~~ sql
+CREATE CHANGEFEED FOR TABLE table_name INTO 'external://changefeed-sink'
+ WITH resolved, ignore_disable_changefeed_replication = true;
+~~~
+
+This is useful when you have multiple use cases for different changefeeds on the same table. For example, you have a table with a changefeed streaming changes to another database for analytics workflows in which you do not want to reflect row-level TTL deletes. Secondly, you have a changefeed on the same table for audit-logging purposes for which you need to persist every change through the changefeed.
\ No newline at end of file
diff --git a/src/current/_includes/v25.1/cdc/elastic-cpu-performance.md b/src/current/_includes/v25.1/cdc/elastic-cpu-performance.md
new file mode 100644
index 00000000000..6eb9336cd89
--- /dev/null
+++ b/src/current/_includes/v25.1/cdc/elastic-cpu-performance.md
@@ -0,0 +1,3 @@
+{{site.data.alerts.callout_info}}
+By default, changefeeds are integrated with elastic CPU, which helps to prevent changefeeds from affecting foreground traffic. This may affect changefeed latency. For more detail on monitoring, refer to the [Changefeed performance]({% link {{ page.version.version }}/advanced-changefeed-configurations.md %}#changefeed-performance) section.
+{{site.data.alerts.end}}
\ No newline at end of file
diff --git a/src/current/_includes/v25.1/cdc/examples-license-workload.md b/src/current/_includes/v25.1/cdc/examples-license-workload.md
new file mode 100644
index 00000000000..32d395aaed8
--- /dev/null
+++ b/src/current/_includes/v25.1/cdc/examples-license-workload.md
@@ -0,0 +1,24 @@
+1. If you do not already have one, [request a trial {{ site.data.products.enterprise }} license]({% link {{ page.version.version }}/licensing-faqs.md %}#obtain-a-license).
+
+1. Use the [`cockroach start-single-node`]({% link {{ page.version.version }}/cockroach-start-single-node.md %}) command to start a single-node cluster:
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ cockroach start-single-node --insecure --listen-addr=localhost
+ ~~~
+
+1. In this example, you'll run CockroachDB's [Movr]({% link {{ page.version.version }}/movr.md %}) application workload to set up some data for your changefeed.
+
+ In a new terminal, first create the schema for the workload:
+
+ {% include_cached copy-clipboard.html %}
+ ~~~shell
+ cockroach workload init movr "postgresql://root@127.0.0.1:26257?sslmode=disable"
+ ~~~
+
+ Then run the workload:
+
+ {% include_cached copy-clipboard.html %}
+ ~~~shell
+ cockroach workload run movr --duration=1m "postgresql://root@127.0.0.1:26257?sslmode=disable"
+ ~~~
diff --git a/src/current/_includes/v25.1/cdc/ext-conn-cluster-setting.md b/src/current/_includes/v25.1/cdc/ext-conn-cluster-setting.md
new file mode 100644
index 00000000000..82d266ce59d
--- /dev/null
+++ b/src/current/_includes/v25.1/cdc/ext-conn-cluster-setting.md
@@ -0,0 +1 @@
+To restrict a user's access to changefeed data and sink credentials, enable the `changefeed.permissions.require_external_connection_sink.enabled` cluster setting. When you enable this setting, users with the [`CHANGEFEED` privilege]({% link {{ page.version.version }}/create-changefeed.md %}#required-privileges) on a set of tables can only create changefeeds into [external connections]({% link {{ page.version.version }}/create-external-connection.md %}).
diff --git a/src/current/_includes/v25.1/cdc/external-urls.md b/src/current/_includes/v25.1/cdc/external-urls.md
new file mode 100644
index 00000000000..d87cb7538d1
--- /dev/null
+++ b/src/current/_includes/v25.1/cdc/external-urls.md
@@ -0,0 +1,48 @@
+~~~
+[scheme]://[host]/[path]?[parameters]
+~~~
+
+Location | Scheme | Host | Parameters |
+|-------------------------------------------------------------+-------------+--------------------------------------------------+----------------------------------------------------------------------------
+Amazon | `s3` | Bucket name | `AUTH` [1](#considerations) (optional; can be `implicit` or `specified`), `AWS_ACCESS_KEY_ID`, `AWS_SECRET_ACCESS_KEY`, `AWS_SESSION_TOKEN`
+Azure | `azure` | N/A (see [Example file URLs](#example-file-urls) | `AZURE_ACCOUNT_KEY`, `AZURE_ACCOUNT_NAME`
+Google Cloud [2](#considerations) | `gs` | Bucket name | `AUTH` (optional; can be `default`, `implicit`, or `specified`), `CREDENTIALS`
+HTTP [3](#considerations) | `http` | Remote host | N/A
+NFS/Local [4](#considerations) | `nodelocal` | `nodeID` or `self` [5](#considerations) (see [Example file URLs](#example-file-urls)) | N/A
+S3-compatible services [6](#considerations) | `s3` | Bucket name | `AWS_ACCESS_KEY_ID`, `AWS_SECRET_ACCESS_KEY`, `AWS_SESSION_TOKEN`, `AWS_REGION` [7](#considerations) (optional), `AWS_ENDPOINT`
+
+{{site.data.alerts.callout_info}}
+The location parameters often contain special characters that need to be URI-encoded. Use Javascript's [`encodeURIComponent`](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/encodeURIComponent) function or Go language's [`url.QueryEscape`](https://golang.org/pkg/net/url/#QueryEscape) function to URI-encode the parameters. Other languages provide similar functions to URI-encode special characters.
+{{site.data.alerts.end}}
+
+{{site.data.alerts.callout_info}}
+If your environment requires an HTTP or HTTPS proxy server for outgoing connections, you can set the standard `HTTP_PROXY` and `HTTPS_PROXY` environment variables when starting CockroachDB.
+
+ If you cannot run a full proxy, you can disable external HTTP(S) access (as well as custom HTTP(S) endpoints) when performing bulk operations (e.g., [`BACKUP`]({% link {{ page.version.version }}/backup.md %}), [`RESTORE`]({% link {{ page.version.version }}/restore.md %}), etc.) by using the [`--external-io-disable-http` flag]({% link {{ page.version.version }}/cockroach-start.md %}#security). You can also disable the use of implicit credentials when accessing external cloud storage services for various bulk operations by using the [`--external-io-disable-implicit-credentials` flag]({% link {{ page.version.version }}/cockroach-start.md %}#security).
+{{site.data.alerts.end}}
+
+
+
+- 1 If the `AUTH` parameter is not provided, AWS connections default to `specified` and the access keys must be provided in the URI parameters. If the `AUTH` parameter is `implicit`, the access keys can be omitted and [the credentials will be loaded from the environment](https://docs.aws.amazon.com/sdk-for-go/api/aws/session/).
+
+- 2 If the `AUTH` parameter is not specified, the `cloudstorage.gs.default.key` [cluster setting]({% link {{ page.version.version }}/cluster-settings.md %}) will be used if it is non-empty, otherwise the `implicit` behavior is used. If the `AUTH` parameter is `implicit`, all GCS connections use Google's [default authentication strategy](https://cloud.google.com/docs/authentication/production#providing_credentials_to_your_application). If the `AUTH` parameter is `default`, the `cloudstorage.gs.default.key` [cluster setting]({% link {{ page.version.version }}/cluster-settings.md %}) must be set to the contents of a [service account file](https://cloud.google.com/docs/authentication/production#obtaining_and_providing_service_account_credentials_manually) which will be used during authentication. If the `AUTH` parameter is `specified`, GCS connections are authenticated on a per-statement basis, which allows the JSON key object to be sent in the `CREDENTIALS` parameter. The JSON key object should be Base64-encoded (using the standard encoding in [RFC 4648](https://tools.ietf.org/html/rfc4648)).
+
+- 3 You can create your own HTTP server with [Caddy or nginx]({% link {{ page.version.version }}/use-a-local-file-server.md %}). A custom root CA can be appended to the system's default CAs by setting the `cloudstorage.http.custom_ca` [cluster setting]({% link {{ page.version.version }}/cluster-settings.md %}), which will be used when verifying certificates from HTTPS URLs.
+
+- 4 The file system backup location on the NFS drive is relative to the path specified by the `--external-io-dir` flag set while [starting the node]({% link {{ page.version.version }}/cockroach-start.md %}). If the flag is set to `disabled`, then imports from local directories and NFS drives are disabled.
+
+- 5 Using a `nodeID` is required and the data files will be in the `extern` directory of the specified node. In most cases (including single-node clusters), using `nodelocal://1/` is sufficient. Use `self` if you do not want to specify a `nodeID`, and the individual data files will be in the `extern` directories of arbitrary nodes; however, to work correctly, each node must have the [`--external-io-dir` flag]({% link {{ page.version.version }}/cockroach-start.md %}#general) point to the same NFS mount or other network-backed, shared storage.
+
+- 6 A custom root CA can be appended to the system's default CAs by setting the `cloudstorage.http.custom_ca` [cluster setting]({% link {{ page.version.version }}/cluster-settings.md %}), which will be used when verifying certificates from an S3-compatible service.
+
+- 7 The `AWS_REGION` parameter is optional since it is not a required parameter for most S3-compatible services. Specify the parameter only if your S3-compatible service requires it.
+
+#### Example file URLs
+
+Location | Example
+-------------+----------------------------------------------------------------------------------
+Amazon S3 | `s3://acme-co/employees?AWS_ACCESS_KEY_ID=123&AWS_SECRET_ACCESS_KEY=456`
+Azure | `azure-blob://employees?AZURE_ACCOUNT_KEY=123&AZURE_ACCOUNT_NAME=acme-co`
+Google Cloud | `gs://acme-co`
+HTTP | `http://localhost:8080/employees`
+NFS/Local | `nodelocal://1/path/employees`, `nodelocal://self/nfsmount/backups/employees` [5](#considerations)
diff --git a/src/current/_includes/v25.1/cdc/filter-show-changefeed-jobs-columns.md b/src/current/_includes/v25.1/cdc/filter-show-changefeed-jobs-columns.md
new file mode 100644
index 00000000000..39471ac538d
--- /dev/null
+++ b/src/current/_includes/v25.1/cdc/filter-show-changefeed-jobs-columns.md
@@ -0,0 +1,11 @@
+You can filter the columns that `SHOW CHANGEFEED JOBS` displays using a `SELECT` statement:
+
+{% include_cached copy-clipboard.html %}
+~~~ sql
+SELECT job_id, sink_uri, status, format FROM [SHOW CHANGEFEED JOBS] WHERE job_id = 997306743028908033;
+~~~
+~~~
+ job_id | sink_uri | status | format
+---------------------+------------------+----------+---------
+ 997306743028908033 | external://kafka | running | json
+~~~
\ No newline at end of file
diff --git a/src/current/_includes/v25.1/cdc/kafka-acks.md b/src/current/_includes/v25.1/cdc/kafka-acks.md
new file mode 100644
index 00000000000..dec1ba68392
--- /dev/null
+++ b/src/current/_includes/v25.1/cdc/kafka-acks.md
@@ -0,0 +1 @@
+You must also set `acks` to `ALL` in your [server-side Kafka configuration](https://kafka.apache.org/documentation/#producerconfigs_acks) for this to provide high durability delivery.
\ No newline at end of file
diff --git a/src/current/_includes/v25.1/cdc/kafka-vpc-limitation.md b/src/current/_includes/v25.1/cdc/kafka-vpc-limitation.md
new file mode 100644
index 00000000000..4f27e1b4777
--- /dev/null
+++ b/src/current/_includes/v25.1/cdc/kafka-vpc-limitation.md
@@ -0,0 +1 @@
+[VPC Peering]({% link cockroachcloud/network-authorization.md %}#vpc-peering) and [AWS PrivateLink]({% link cockroachcloud/network-authorization.md %}#aws-privatelink) in CockroachDB {{ site.data.products.advanced }} clusters do **not** support connecting to a [Kafka]({% link {{ page.version.version }}/changefeed-sinks.md %}#kafka) sink's internal IP addresses for [changefeeds]({% link {{ page.version.version }}/change-data-capture-overview.md %}). To connect to a Kafka sink from CockroachDB {{ site.data.products.advanced }}, it is necessary to expose the Kafka cluster's external IP address and open ports with firewall rules to allow access from a CockroachDB {{ site.data.products.advanced }} cluster.
diff --git a/src/current/_includes/v25.1/cdc/lagging-ranges.md b/src/current/_includes/v25.1/cdc/lagging-ranges.md
new file mode 100644
index 00000000000..8316c347dda
--- /dev/null
+++ b/src/current/_includes/v25.1/cdc/lagging-ranges.md
@@ -0,0 +1,12 @@
+Use the `changefeed.lagging_ranges` metric to track the number of [ranges]({% link {{ page.version.version }}/architecture/overview.md %}#range) that are behind in a changefeed. This is calculated based on the [changefeed options]({% link {{ page.version.version }}/create-changefeed.md %}#options):
+
+- `lagging_ranges_threshold` sets a duration from the present that determines the length of time a range is considered to be lagging behind, which will then track in the [`lagging_ranges`]({% link {{ page.version.version }}/monitor-and-debug-changefeeds.md %}#lagging-ranges-metric) metric. Note that ranges undergoing an [initial scan]({% link {{ page.version.version }}/create-changefeed.md %}#initial-scan) for longer than the threshold duration are considered to be lagging. Starting a changefeed with an initial scan on a large table will likely increment the metric for each range in the table. As ranges complete the initial scan, the number of ranges lagging behind will decrease.
+ - **Default:** `3m`
+- `lagging_ranges_polling_interval` sets the interval rate for when lagging ranges are checked and the `lagging_ranges` metric is updated. Polling adds latency to the `lagging_ranges` metric being updated. For example, if a range falls behind by 3 minutes, the metric may not update until an additional minute afterward.
+ - **Default:** `1m`
+
+Use the `changefeed.total_ranges` metric to monitor the number of ranges that are watched by [aggregator processors]({% link {{ page.version.version }}/how-does-an-enterprise-changefeed-work.md %}) participating in the changefeed job. If you're experiencing lagging ranges, `changefeed.total_ranges` may indicate that the number of ranges watched by aggregator processors in the job is unbalanced. You may want to try [pausing]({% link {{ page.version.version }}/pause-job.md %}) the changefeed and then [resuming]({% link {{ page.version.version }}/resume-job.md %}) it, so that the changefeed replans the work in the cluster. `changefeed.total_ranges` shares the same polling interval as the `changefeed.lagging_ranges` metric, which is controlled by the `lagging_ranges_polling_interval` option.
+
+{{site.data.alerts.callout_success}}
+You can use the [`metrics_label`]({% link {{ page.version.version }}/monitor-and-debug-changefeeds.md %}#using-changefeed-metrics-labels) option to track the `lagging_ranges` and `total_ranges` metric per changefeed.
+{{site.data.alerts.end}}
\ No newline at end of file
diff --git a/src/current/_includes/v25.1/cdc/list-cloud-changefeed-uris.md b/src/current/_includes/v25.1/cdc/list-cloud-changefeed-uris.md
new file mode 100644
index 00000000000..6c9849b111e
--- /dev/null
+++ b/src/current/_includes/v25.1/cdc/list-cloud-changefeed-uris.md
@@ -0,0 +1,6 @@
+Location | Example
+-------------+----------------------------------------------------------------------------------
+Amazon S3 | `'s3://{BUCKET NAME}/{PATH}?AWS_ACCESS_KEY_ID={KEY ID}&AWS_SECRET_ACCESS_KEY={SECRET ACCESS KEY}'`
+Azure Blob Storage | `'azure://{CONTAINER NAME}/{PATH}?AZURE_ACCOUNT_NAME={ACCOUNT NAME}&AZURE_ACCOUNT_KEY={URL-ENCODED KEY}'`
+Google Cloud | `'gs://{BUCKET NAME}/{PATH}?AUTH=specified&CREDENTIALS={ENCODED KEY}'`
+HTTP | `'file-http(s)://localhost:8080/{PATH}'` or `'http(s)://localhost:8080/{PATH}'`
**Note:** Using `http(s)` without the `file-` prefix is deprecated as a [changefeed sink]({% link {{ page.version.version }}/changefeed-sinks.md %}) scheme. There is continued support for `http(s)`, but it will be removed in a future release. We recommend implementing the `file-http(s)` scheme for changefeed messages.
\ No newline at end of file
diff --git a/src/current/_includes/v25.1/cdc/message-format-list.md b/src/current/_includes/v25.1/cdc/message-format-list.md
new file mode 100644
index 00000000000..7f77dbba575
--- /dev/null
+++ b/src/current/_includes/v25.1/cdc/message-format-list.md
@@ -0,0 +1,6 @@
+By default, changefeeds emit messages in JSON format. You can use a different format by [creating a changefeed](create-changefeed.html) with the [`format`](create-changefeed.html#format) option and specifying one of the following:
+
+- `json`
+- `csv`
+- `avro`
+- `parquet`
\ No newline at end of file
diff --git a/src/current/_includes/v25.1/cdc/metrics-labels.md b/src/current/_includes/v25.1/cdc/metrics-labels.md
new file mode 100644
index 00000000000..6f97eaffcdd
--- /dev/null
+++ b/src/current/_includes/v25.1/cdc/metrics-labels.md
@@ -0,0 +1,8 @@
+To measure metrics per changefeed, you can define a "metrics label" for one or multiple changefeed(s). The changefeed(s) will increment each [changefeed metric]({% link {{ page.version.version }}/monitor-and-debug-changefeeds.md %}#metrics). Metrics label information is sent with time-series metrics to `http://{host}:{http-port}/_status/vars`, viewable via the [Prometheus endpoint]({% link {{ page.version.version }}/monitoring-and-alerting.md %}#prometheus-endpoint). An aggregated metric of all changefeeds is also measured.
+
+It is necessary to consider the following when applying metrics labels to changefeeds:
+
+- The `server.child_metrics.enabled` [cluster setting]({% link {{ page.version.version }}/cluster-settings.md %}) must be set to `true` before using the `metrics_label` option. `server.child_metrics.enabled` is enabled by default in {{ site.data.products.standard }} and {{ site.data.products.basic }}.
+- Metrics label information is sent to the `_status/vars` endpoint, but will **not** show up in [`debug.zip`]({% link {{ page.version.version }}/cockroach-debug-zip.md %}) or the [DB Console]({% link {{ page.version.version }}/ui-overview.md %}).
+- Introducing labels to isolate a changefeed's metrics can increase cardinality significantly. There is a limit of 1024 unique labels in place to prevent cardinality explosion. That is, when labels are applied to high-cardinality data (data with a higher number of unique values), each changefeed with a label then results in more metrics data to multiply together, which will grow over time. This will have an impact on performance as the metric-series data per changefeed quickly populates against its label.
+- The maximum length of a metrics label is 128 bytes.
diff --git a/src/current/_includes/v25.1/cdc/modify-changefeed.md b/src/current/_includes/v25.1/cdc/modify-changefeed.md
new file mode 100644
index 00000000000..fde29d8687e
--- /dev/null
+++ b/src/current/_includes/v25.1/cdc/modify-changefeed.md
@@ -0,0 +1,9 @@
+To modify an {{ site.data.products.enterprise }} changefeed, [pause]({% link {{ page.version.version }}/create-and-configure-changefeeds.md %}#pause) the job and then use:
+
+~~~ sql
+ALTER CHANGEFEED job_id {ADD table DROP table SET option UNSET option};
+~~~
+
+You can add new table targets, remove them, set new [changefeed options]({% link {{ page.version.version }}/create-changefeed.md %}#options), and unset them.
+
+For more information, see [`ALTER CHANGEFEED`]({% link {{ page.version.version }}/alter-changefeed.md %}).
diff --git a/src/current/_includes/v25.1/cdc/msk-dedicated-support.md b/src/current/_includes/v25.1/cdc/msk-dedicated-support.md
new file mode 100644
index 00000000000..9c3c119b838
--- /dev/null
+++ b/src/current/_includes/v25.1/cdc/msk-dedicated-support.md
@@ -0,0 +1 @@
+If you would like to connect a changefeed running on a CockroachDB Dedicated cluster to an Amazon MSK Serverless cluster, contact your Cockroach Labs account team.
\ No newline at end of file
diff --git a/src/current/_includes/v25.1/cdc/msk-iam-policy-role-step.md b/src/current/_includes/v25.1/cdc/msk-iam-policy-role-step.md
new file mode 100644
index 00000000000..0758d426419
--- /dev/null
+++ b/src/current/_includes/v25.1/cdc/msk-iam-policy-role-step.md
@@ -0,0 +1,51 @@
+1. In the AWS Management Console, go to the [IAM console](https://console.aws.amazon.com/iam/), select **Policies** from the navigation, and then **Create Policy**.
+1. Using the **JSON** tab option, update the policy with the following JSON. These permissions will allow you to connect to the cluster, manage topics, and consume messages. You may want to adjust the permissions to suit your permission model. For more details on the available permissions, refer to the AWS documentation on [IAM Access Control](https://docs.aws.amazon.com/msk/latest/developerguide/iam-access-control.html#kafka-actions) for MSK.
+
+ Replace the instances of `arn:aws:kafka:{region}:{account ID}:cluster/{msk-cluster-name}` with the MSK ARN from your cluster's summary page and add `/*` to the end, like the following:
+
+ {% include_cached copy-clipboard.html %}
+ ~~~json
+ {
+ "Version": "2012-10-17",
+ "Statement": [
+ {
+ "Effect": "Allow",
+ "Action": [
+ "kafka-cluster:Connect",
+ "kafka-cluster:AlterCluster",
+ "kafka-cluster:DescribeCluster"
+ ],
+ "Resource": [
+ "arn:aws:kafka:{region}:{account ID}:cluster/{msk-cluster-name}/*"
+ ]
+ },
+ {
+ "Effect": "Allow",
+ "Action": [
+ "kafka-cluster:*Topic",
+ "kafka-cluster:WriteData",
+ "kafka-cluster:ReadData"
+ ],
+ "Resource": [
+ "arn:aws:kafka:{region}:{account ID}:cluster/{msk-cluster-name}/*"
+ ]
+ },
+ {
+ "Effect": "Allow",
+ "Action": [
+ "kafka-cluster:AlterGroup",
+ "kafka-cluster:DescribeGroup"
+ ],
+ "Resource": [
+ "arn:aws:kafka:{region}:{account ID}:cluster/{msk-cluster-name}/*"
+ ]
+ }
+ ]
+ }
+ ~~~
+
+1. Once you have added your policy, add a policy name (for example, `msk-policy`), click **Next**, and **Create policy**.
+1. Return to the [IAM console](https://console.aws.amazon.com/iam/), select **Roles** from the navigation, and then **Create role**.
+1. Select **AWS service** for the **Trusted entity type**. For **Use case**, select **EC2** from the dropdown. Click **Next**.
+1. On the **Add permissions** page, search for the IAM policy (`msk-policy`) you just created. Click **Next**.
+1. Name the role (for example, `msk-role`) and click **Create role**.
\ No newline at end of file
diff --git a/src/current/_includes/v25.1/cdc/msk-tutorial-crdb-setup.md b/src/current/_includes/v25.1/cdc/msk-tutorial-crdb-setup.md
new file mode 100644
index 00000000000..40de46f2af7
--- /dev/null
+++ b/src/current/_includes/v25.1/cdc/msk-tutorial-crdb-setup.md
@@ -0,0 +1,33 @@
+1. (Optional) On the EC2 instance running CockroachDB, run the [Movr]({% link {{ page.version.version }}/movr.md %}) application workload to set up some data for your changefeed.
+
+ Create the schema for the workload:
+
+ {% include_cached copy-clipboard.html %}
+ ~~~shell
+ cockroach workload init movr
+ ~~~
+
+ Then run the workload:
+
+ {% include_cached copy-clipboard.html %}
+ ~~~shell
+ cockroach workload run movr --duration=1m
+ ~~~
+
+1. Start a SQL session. For details on the available flags, refer to the [`cockroach sql`]({% link {{ page.version.version }}/cockroach-sql.md %}) page.
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ cockroach sql --insecure
+ ~~~
+
+ {{site.data.alerts.callout_info}}
+ To set your {{ site.data.products.enterprise }} license, refer to the [Licensing FAQs]({% link {{ page.version.version }}/licensing-faqs.md %}#set-a-license) page.
+ {{site.data.alerts.end}}
+
+1. Enable the `kv.rangefeed.enabled` [cluster setting]({% link {{ page.version.version }}/cluster-settings.md %}):
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ sql
+ SET CLUSTER SETTING kv.rangefeed.enabled = true;
+ ~~~
\ No newline at end of file
diff --git a/src/current/_includes/v25.1/cdc/mux-rangefeed.md b/src/current/_includes/v25.1/cdc/mux-rangefeed.md
new file mode 100644
index 00000000000..964918545df
--- /dev/null
+++ b/src/current/_includes/v25.1/cdc/mux-rangefeed.md
@@ -0,0 +1,3 @@
+`MuxRangefeed` is enabled by default.
+
+`MuxRangefeed` is a subsystem that improves the performance of rangefeeds with scale. It significantly reduces the overhead of running rangefeeds. Without `MuxRangefeed`, the number of RPC streams is proportional with the number of ranges in a table. For example, a large table could have tens of thousands of ranges. With `MuxRangefeed`, this proportion improves so that the number of RPC streams is relative to the number of nodes in a cluster.
\ No newline at end of file
diff --git a/src/current/_includes/v25.1/cdc/note-changefeed-message-page.md b/src/current/_includes/v25.1/cdc/note-changefeed-message-page.md
new file mode 100644
index 00000000000..4570458c8be
--- /dev/null
+++ b/src/current/_includes/v25.1/cdc/note-changefeed-message-page.md
@@ -0,0 +1 @@
+For an overview of the messages emitted from changefeeds, see the [Changefeed Messages]({% link {{ page.version.version }}/changefeed-messages.md %}) page.
\ No newline at end of file
diff --git a/src/current/_includes/v25.1/cdc/oauth-description.md b/src/current/_includes/v25.1/cdc/oauth-description.md
new file mode 100644
index 00000000000..a12ea818927
--- /dev/null
+++ b/src/current/_includes/v25.1/cdc/oauth-description.md
@@ -0,0 +1 @@
+[OAuth 2.0](https://oauth.net/2/) authentication uses credentials managed by a third-party provider (IdP) to authenticate with Kafka instead of requiring you to provide your Kafka cluster credentials directly in a [`CREATE CHANGEFEED`]({% link {{ page.version.version }}/create-changefeed.md %}) statement. Your provider's authentication server will issue a temporary token, giving you flexibility to apply access rules on the credentials that your IdP provides.
\ No newline at end of file
diff --git a/src/current/_includes/v25.1/cdc/options-table-note.md b/src/current/_includes/v25.1/cdc/options-table-note.md
new file mode 100644
index 00000000000..7ae80055e08
--- /dev/null
+++ b/src/current/_includes/v25.1/cdc/options-table-note.md
@@ -0,0 +1 @@
+This table shows the parameters for changefeeds to a specific sink. The `CREATE CHANGEFEED` page provides a list of all the available [options]({% link {{ page.version.version }}/create-changefeed.md %}#options).
diff --git a/src/current/_includes/v25.1/cdc/print-key.md b/src/current/_includes/v25.1/cdc/print-key.md
new file mode 100644
index 00000000000..ab0b0924d30
--- /dev/null
+++ b/src/current/_includes/v25.1/cdc/print-key.md
@@ -0,0 +1,3 @@
+{{site.data.alerts.callout_info}}
+This example only prints the value. To print both the key and value of each message in the changefeed (e.g., to observe what happens with `DELETE`s), use the `--property print.key=true` flag.
+{{site.data.alerts.end}}
diff --git a/src/current/_includes/v25.1/cdc/privilege-model.md b/src/current/_includes/v25.1/cdc/privilege-model.md
new file mode 100644
index 00000000000..e7b8153004c
--- /dev/null
+++ b/src/current/_includes/v25.1/cdc/privilege-model.md
@@ -0,0 +1,43 @@
+{{site.data.alerts.callout_info}}
+Starting in v22.2, CockroachDB introduces a new [system-level privilege model]({% link {{ page.version.version }}/security-reference/authorization.md %}#supported-privileges) that provides finer control over a user's privilege to work with the database, including creating and managing changefeeds.
+
+There is continued support for the [legacy privilege model](#legacy-privilege-model) for changefeeds in v23.1, however it **will be removed** in a future release of CockroachDB. We recommend implementing the new privilege model that follows in this section for all changefeeds.
+{{site.data.alerts.end}}
+
+You can [grant]({% link {{ page.version.version }}/grant.md %}#grant-privileges-on-specific-tables-in-a-database) a user the `CHANGEFEED` privilege to allow them to create changefeeds on a specific table:
+
+{% include_cached copy-clipboard.html %}
+~~~sql
+GRANT CHANGEFEED ON TABLE example_table TO user;
+~~~
+
+When you grant a user the `CHANGEFEED` privilege on a set of tables, they can:
+
+- Create changefeeds on the target tables even if the user does **not** have the [`CONTROLCHANGEFEED` role option]({% link {{ page.version.version }}/alter-role.md %}#role-options) or the `SELECT` privilege on the tables.
+- Manage the changefeed jobs running on the tables using the [`SHOW CHANGEFEED JOB`]({% link {{ page.version.version }}/show-jobs.md %}#show-changefeed-jobs), [`PAUSE JOB`]({% link {{ page.version.version }}/pause-job.md %}), [`RESUME JOB`]({% link {{ page.version.version }}/resume-job.md %}), and [`CANCEL JOB`](cancel-job.html) commands.
+
+These users will be able to create changefeeds, but they will not be able to run a `SELECT` query on that data directly. However, they could still read this data indirectly if they have read access to the [sink]({% link {{ page.version.version }}/changefeed-sinks.md %}).
+
+{% include {{ page.version.version }}/cdc/ext-conn-cluster-setting.md %}
+
+### Privilege model
+
+The following summarizes the operations users can run when they have changefeed privileges on a table:
+
+Granted privileges | Usage
+-------------------+-------
+`CHANGEFEED` | Create changefeeds on tables. Manage changefeed jobs on tables.
+`CHANGEFEED` + [`USAGE`]({% link {{ page.version.version }}/create-external-connection.md %}#required-privileges) on external connection | Create changefeeds on tables to an external connection URI. Manage changefeed jobs on tables. **Note:** If you need to manage access to changefeed sink URIs, set the `changefeed.permissions.require_external_connection_sink.enabled=true` cluster setting. This will mean that users with these privileges can **only** create changefeeds on external connections.
+`SELECT` | Create a sinkless changefeed that emits messages to a SQL client.
+**Deprecated** `CONTROLCHANGEFEED` role option + `SELECT` | Create changefeeds on tables.
+
+You can add `CHANGEFEED` to the user or role's [default privileges]({% link {{ page.version.version }}/security-reference/authorization.md %}#default-privileges) with [`ALTER DEFAULT PRIVILEGES`]({% link {{ page.version.version }}/alter-default-privileges.md %}#grant-default-privileges-to-a-specific-role):
+
+{% include_cached copy-clipboard.html %}
+~~~sql
+ALTER DEFAULT PRIVILEGES GRANT CHANGEFEED ON TABLES TO user;
+~~~
+
+{{site.data.alerts.callout_info}}
+Users with the `CONTROLCHANGEFEED` role option must have `SELECT` on each table, even if they are also granted the `CHANGEFEED` privilege. The `CONTROLCHANGEFEED` role option will be deprecated in a future release.
+{{site.data.alerts.end}}
\ No newline at end of file
diff --git a/src/current/_includes/v25.1/cdc/pts-gc-monitoring.md b/src/current/_includes/v25.1/cdc/pts-gc-monitoring.md
new file mode 100644
index 00000000000..11a2c4d1fd0
--- /dev/null
+++ b/src/current/_includes/v25.1/cdc/pts-gc-monitoring.md
@@ -0,0 +1,6 @@
+You can monitor changefeed jobs for [protected timestamp]({% link {{ page.version.version }}/architecture/storage-layer.md %}#protected-timestamps) usage. We recommend setting up {% if page.name == "monitor-and-debug-changefeeds.md" %} monitoring {% else %} [monitoring]({% link {{ page.version.version }}/monitor-and-debug-changefeeds.md %}) {% endif %}for the following metrics:
+
+- `jobs.changefeed.protected_age_sec`: Tracks the age of the oldest [protected timestamp]({% link {{ page.version.version }}/architecture/storage-layer.md %}#protected-timestamps) record protected by changefeed jobs. We recommend monitoring if `protected_age_sec` is greater than [`gc.ttlseconds`]({% link {{ page.version.version }}/configure-replication-zones.md %}#gc-ttlseconds). As `protected_age_sec` increases, garbage accumulation increases. [Garbage collection]({% link {{ page.version.version }}/architecture/storage-layer.md %}#garbage-collection) will not progress on a table, database, or cluster if the protected timestamp record is present.
+- `jobs.changefeed.currently_paused`: Tracks the number of changefeed jobs currently considered [paused]({% link {{ page.version.version }}/pause-job.md %}). Since paused changefeed jobs can accumulate garbage, it is important to [monitor the number of paused changefeeds]({% link {{ page.version.version }}/pause-job.md %}#monitoring-paused-jobs).
+- `jobs.changefeed.expired_pts_records`: Tracks the number of expired [protected timestamp]({% link {{ page.version.version }}/architecture/storage-layer.md %}#protected-timestamps) records owned by changefeed jobs. You can monitor this metric in conjunction with the [`gc_protect_expires_after` option]({% link {{ page.version.version }}/create-changefeed.md %}#gc-protect-expires-after).
+- `jobs.changefeed.protected_record_count`: Tracks the number of [protected timestamp]({% link {{ page.version.version }}/architecture/storage-layer.md %}#protected-timestamps) records held by changefeed jobs.
\ No newline at end of file
diff --git a/src/current/_includes/v25.1/cdc/recommendation-monitoring-pts.md b/src/current/_includes/v25.1/cdc/recommendation-monitoring-pts.md
new file mode 100644
index 00000000000..c802caf5cd3
--- /dev/null
+++ b/src/current/_includes/v25.1/cdc/recommendation-monitoring-pts.md
@@ -0,0 +1 @@
+Cockroach Labs recommends monitoring your changefeeds to track [retryable errors]({% link {{ page.version.version }}/monitor-and-debug-changefeeds.md %}#changefeed-retry-errors) and [protected timestamp]({% link {{ page.version.version }}/architecture/storage-layer.md %}#protected-timestamps) usage. Refer to the [Monitor and Debug Changefeeds]({% link {{ page.version.version }}/monitor-and-debug-changefeeds.md %}) page for more information.
\ No newline at end of file
diff --git a/src/current/_includes/v25.1/cdc/schedule-query-example.md b/src/current/_includes/v25.1/cdc/schedule-query-example.md
new file mode 100644
index 00000000000..7a692f21eb4
--- /dev/null
+++ b/src/current/_includes/v25.1/cdc/schedule-query-example.md
@@ -0,0 +1,14 @@
+This example creates a nightly export of some filtered table data with a [scheduled changefeed]({% link {{ page.version.version }}/create-schedule-for-changefeed.md %}) that will run just after midnight every night. The changefeed uses [CDC queries]({% link {{ page.version.version }}/cdc-queries.md %}) to query the table and filter the data it will send to the sink:
+
+{% include_cached copy-clipboard.html %}
+~~~ sql
+CREATE SCHEDULE sf_skateboard FOR CHANGEFEED INTO 'external://cloud-sink' WITH format=csv
+ AS SELECT current_location, id, type, status FROM vehicles
+ WHERE city = 'san francisco' AND type = 'skateboard'
+ RECURRING '1 0 * * *' WITH SCHEDULE OPTIONS on_execution_failure=retry, on_previous_running=start;
+~~~
+
+The [schedule options]({% link {{ page.version.version }}/create-schedule-for-changefeed.md %}#schedule-options) control the schedule's behavior:
+
+- If it runs into a failure, `on_execution_failure=retry` will ensure that the schedule retries the changefeed immediately.
+- If the previous scheduled changefeed is still running, `on_previous_running=start` will start a new changefeed at the defined cadence.
\ No newline at end of file
diff --git a/src/current/_includes/v25.1/cdc/schema-registry-metric.md b/src/current/_includes/v25.1/cdc/schema-registry-metric.md
new file mode 100644
index 00000000000..b9482feafdc
--- /dev/null
+++ b/src/current/_includes/v25.1/cdc/schema-registry-metric.md
@@ -0,0 +1 @@
+Use the `changefeed.schema_registry.retry_count` metric to measure the number of request retries performed when sending requests to the schema registry. For more detail on monitoring changefeeds, refer to [Monitor and Debug Changefeeds]({% link {{ page.version.version }}/monitor-and-debug-changefeeds.md %}).
\ No newline at end of file
diff --git a/src/current/_includes/v25.1/cdc/schema-registry-timeout.md b/src/current/_includes/v25.1/cdc/schema-registry-timeout.md
new file mode 100644
index 00000000000..eec8371f282
--- /dev/null
+++ b/src/current/_includes/v25.1/cdc/schema-registry-timeout.md
@@ -0,0 +1 @@
+Use the {% if page.name == "create-changefeed.md" %} `timeout={duration}` query parameter {% else %} [`timeout={duration}` query parameter]({% link {{ page.version.version }}/create-changefeed.md %}#confluent-schema-registry) {% endif %}([duration string](https://pkg.go.dev/time#ParseDuration)) in your Confluent Schema Registry URI to change the default timeout for contacting the schema registry. By default, the timeout is 30 seconds.
\ No newline at end of file
diff --git a/src/current/_includes/v25.1/cdc/show-changefeed-job-retention.md b/src/current/_includes/v25.1/cdc/show-changefeed-job-retention.md
new file mode 100644
index 00000000000..74706999656
--- /dev/null
+++ b/src/current/_includes/v25.1/cdc/show-changefeed-job-retention.md
@@ -0,0 +1 @@
+All changefeed jobs will display regardless of if the job completed and when it completed. You can define a retention time and delete completed jobs by using the `jobs.retention_time` [cluster setting]({% link {{ page.version.version }}/cluster-settings.md %}).
\ No newline at end of file
diff --git a/src/current/_includes/v25.1/cdc/show-changefeed-job.md b/src/current/_includes/v25.1/cdc/show-changefeed-job.md
new file mode 100644
index 00000000000..1515b2eeb95
--- /dev/null
+++ b/src/current/_includes/v25.1/cdc/show-changefeed-job.md
@@ -0,0 +1,24 @@
+{% include_cached copy-clipboard.html %}
+~~~ sql
+SHOW CHANGEFEED JOBS;
+~~~
+~~~
+ job_id | description | ...
++----------------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------+ ...
+ 685724608744325121 | CREATE CHANGEFEED FOR TABLE mytable INTO 'kafka://localhost:9092' WITH confluent_schema_registry = 'http://localhost:8081', format = 'avro', resolved, updated | ...
+ 685723987509116929 | CREATE CHANGEFEED FOR TABLE mytable INTO 'kafka://localhost:9092' WITH confluent_schema_registry = 'http://localhost:8081', format = 'avro', resolved, updated | ...
+(2 rows)
+~~~
+
+To show an individual {{ site.data.products.enterprise }} changefeed:
+
+{% include_cached copy-clipboard.html %}
+~~~ sql
+SHOW CHANGEFEED JOB {job_id};
+~~~
+~~~
+ job_id | description | user_name | status | running_status | created | started | finished | modified | high_water_timestamp | error | sink_uri | full_table_names | topics | format
+---------------------+--------------------------------------------------------------------------------------+-----------+---------+------------------------------------------+----------------------------+----------------------------+----------+----------------------------+--------------------------------+-------+----------------+---------------------+--------+----------
+ 866218332400680961 | CREATE CHANGEFEED FOR TABLE movr.users INTO 'external://aws' WITH format = 'parquet' | root | running | running: resolved=1684438482.937939878,0 | 2023-05-18 14:14:16.323465 | 2023-05-18 14:14:16.360245 | NULL | 2023-05-18 19:35:16.120407 | 1684438482937939878.0000000000 | | external://aws | {movr.public.users} | NULL | parquet
+(1 row)
+~~~
\ No newline at end of file
diff --git a/src/current/_includes/v25.1/cdc/sink-URI-external-connection.md b/src/current/_includes/v25.1/cdc/sink-URI-external-connection.md
new file mode 100644
index 00000000000..90ab96f315f
--- /dev/null
+++ b/src/current/_includes/v25.1/cdc/sink-URI-external-connection.md
@@ -0,0 +1 @@
+You can create an external connection to represent a changefeed sink URI. This allows you to specify the external connection's name in statements rather than the provider-specific URI. For detail on using external connections, see the [`CREATE EXTERNAL CONNECTION`]({% link {{ page.version.version }}/create-external-connection.md %}) page.
\ No newline at end of file
diff --git a/src/current/_includes/v25.1/cdc/sink-configuration-detail.md b/src/current/_includes/v25.1/cdc/sink-configuration-detail.md
new file mode 100644
index 00000000000..ed96a0ead52
--- /dev/null
+++ b/src/current/_includes/v25.1/cdc/sink-configuration-detail.md
@@ -0,0 +1,28 @@
+{{site.data.alerts.callout_danger}}
+Setting either `Messages` or `Bytes` with a non-zero value without setting `Frequency` will cause the sink to assume `Frequency` has an infinity value. If either `Messages` or `Bytes` have a non-zero value, then a non-zero value for `Frequency` **must** be provided. This configuration is invalid and will cause an error, since the messages could sit in a batch indefinitely if the other conditions do not trigger.
+{{site.data.alerts.end}}
+
+Some complexities to consider when setting `Flush` fields for batching:
+
+- When all batching parameters are zero (`"Messages"`, `"Bytes"`, and `"Frequency"`) the sink will interpret this configuration as "send batch every time a message is available." This would be the same as not providing any configuration at all:
+
+ ~~~
+ {
+ "Flush": {
+ "Messages": 0,
+ "Bytes": 0,
+ "Frequency": "0s"
+ }
+ }
+ ~~~
+
+- If one or more fields are set as non-zero values, any fields with a zero value the sink will interpret as infinity. For example, in the following configuration, the sink will send a batch whenever the size reaches 100 messages, **or**, when 5 seconds has passed since the batch was populated with its first message. `Bytes` is unset, so the batch size is unlimited. No flush will be triggered due to batch size:
+
+ ~~~
+ {
+ "Flush": {
+ "Messages": 100,
+ "Frequency": "5s"
+ }
+ }
+ ~~~
\ No newline at end of file
diff --git a/src/current/_includes/v25.1/cdc/sink-list.md b/src/current/_includes/v25.1/cdc/sink-list.md
new file mode 100644
index 00000000000..6468b3d317c
--- /dev/null
+++ b/src/current/_includes/v25.1/cdc/sink-list.md
@@ -0,0 +1,8 @@
+- {% if page.name == "changefeed-sinks.md" %} [Amazon MSK](#amazon-msk) {% else %} [Amazon MSK]({% link {{ page.version.version }}/changefeed-sinks.md %}#amazon-msk) {% endif %}
+- {% if page.name == "changefeed-sinks.md" %} [Apache Pulsar](#apache-pulsar) (in Preview) {% else %} [Apache Pulsar]({% link {{ page.version.version }}/changefeed-sinks.md %}#apache-pulsar) (in Preview) {% endif %}
+- {% if page.name == "changefeed-sinks.md" %} [Azure Event Hubs](#azure-event-hubs) {% else %} [Azure Event Hubs]({% link {{ page.version.version }}/changefeed-sinks.md %}#azure-event-hubs) {% endif %}
+- {% if page.name == "changefeed-sinks.md" %} [Cloud Storage](#cloud-storage-sink) / HTTP {% else %} [Cloud Storage]({% link {{ page.version.version }}/changefeed-sinks.md %}#cloud-storage-sink) / HTTP {% endif %}
+- {% if page.name == "changefeed-sinks.md" %} [Confluent Cloud](#confluent-cloud) {% else %} [Confluent Cloud]({% link {{ page.version.version }}/changefeed-sinks.md %}#confluent-cloud) {% endif %}
+- {% if page.name == "changefeed-sinks.md" %} [Google Cloud Pub/Sub](#google-cloud-pub-sub) {% else %} [Google Cloud Pub/Sub]({% link {{ page.version.version }}/changefeed-sinks.md %}#google-cloud-pub-sub) {% endif %}
+- {% if page.name == "changefeed-sinks.md" %} [Kafka](#kafka) {% else %} [Kafka]({% link {{ page.version.version }}/changefeed-sinks.md %}#kafka) {% endif %}
+- {% if page.name == "changefeed-sinks.md" %} [Webhook](#webhook-sink) {% else %} [Webhook]({% link {{ page.version.version }}/changefeed-sinks.md %}#webhook-sink) {% endif %}
\ No newline at end of file
diff --git a/src/current/_includes/v25.1/cdc/sql-cluster-settings-example.md b/src/current/_includes/v25.1/cdc/sql-cluster-settings-example.md
new file mode 100644
index 00000000000..fa2887967a1
--- /dev/null
+++ b/src/current/_includes/v25.1/cdc/sql-cluster-settings-example.md
@@ -0,0 +1,27 @@
+1. As the `root` user, open the [built-in SQL client]({% link {{ page.version.version }}/cockroach-sql.md %}):
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ cockroach sql --insecure
+ ~~~
+
+1. Set your organization name and [{{ site.data.products.enterprise }} license]({% link {{ page.version.version }}/licensing-faqs.md %}#types-of-licenses) key:
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ sql
+ > SET CLUSTER SETTING cluster.organization = '';
+ ~~~
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ sql
+ > SET CLUSTER SETTING enterprise.license = '';
+ ~~~
+
+1. Enable the `kv.rangefeed.enabled` [cluster setting]({% link {{ page.version.version }}/cluster-settings.md %}):
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ sql
+ > SET CLUSTER SETTING kv.rangefeed.enabled = true;
+ ~~~
+
+ {% include {{ page.version.version }}/cdc/cdc-cloud-rangefeed.md %}
diff --git a/src/current/_includes/v25.1/cdc/tutorial-privilege-check.md b/src/current/_includes/v25.1/cdc/tutorial-privilege-check.md
new file mode 100644
index 00000000000..4ad9801b37a
--- /dev/null
+++ b/src/current/_includes/v25.1/cdc/tutorial-privilege-check.md
@@ -0,0 +1 @@
+The `CHANGEFEED` privilege in order to create and manage changefeed jobs. Refer to [Required privileges]({% link {{ page.version.version }}/create-changefeed.md %}#required-privileges) for more details.
\ No newline at end of file
diff --git a/src/current/_includes/v25.1/cdc/types-udt-composite-general.md b/src/current/_includes/v25.1/cdc/types-udt-composite-general.md
new file mode 100644
index 00000000000..f702cfec5cb
--- /dev/null
+++ b/src/current/_includes/v25.1/cdc/types-udt-composite-general.md
@@ -0,0 +1 @@
+Changefeed types are not fully integrated with [user-defined composite types]({% link {{ page.version.version }}/create-type.md %}). Running changefeeds with user-defined composite types is in [Preview]({% link {{ page.version.version }}/cockroachdb-feature-availability.md %}#feature-availability-phases). Certain changefeed types do not support user-defined composite types. Refer to the change data capture [Known Limitations]({% link {{ page.version.version }}/create-and-configure-changefeeds.md %}#known-limitations) for more detail.
\ No newline at end of file
diff --git a/src/current/_includes/v25.1/cdc/url-encoding.md b/src/current/_includes/v25.1/cdc/url-encoding.md
new file mode 100644
index 00000000000..eb12a94bbe0
--- /dev/null
+++ b/src/current/_includes/v25.1/cdc/url-encoding.md
@@ -0,0 +1,3 @@
+{{site.data.alerts.callout_info}}
+Parameters should always be URI-encoded before they are included the changefeed's URI, as they often contain special characters. Use Javascript's [encodeURIComponent](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/encodeURIComponent) function or Go language's [url.QueryEscape](https://golang.org/pkg/net/url/#QueryEscape) function to URI-encode the parameters. Other languages provide similar functions to URI-encode special characters.
+{{site.data.alerts.end}}
diff --git a/src/current/_includes/v25.1/cdc/virtual-computed-column-cdc.md b/src/current/_includes/v25.1/cdc/virtual-computed-column-cdc.md
new file mode 100644
index 00000000000..60910bb817a
--- /dev/null
+++ b/src/current/_includes/v25.1/cdc/virtual-computed-column-cdc.md
@@ -0,0 +1 @@
+As of v22.1, changefeeds filter out [`VIRTUAL` computed columns]({% link {{ page.version.version }}/computed-columns.md %}) from events by default. This is a [backward-incompatible change]({% link releases/v22.1.md %}#v22-1-0-backward-incompatible-changes). To maintain the changefeed behavior in previous versions where [`NULL`]({% link {{ page.version.version }}/null-handling.md %}) values are emitted for virtual computed columns, see the [`virtual_columns`]({% link {{ page.version.version }}/create-changefeed.md %}#virtual-columns) option for more detail.
diff --git a/src/current/_includes/v25.1/cdc/webhook-beta.md b/src/current/_includes/v25.1/cdc/webhook-beta.md
new file mode 100644
index 00000000000..5d27a27585e
--- /dev/null
+++ b/src/current/_includes/v25.1/cdc/webhook-beta.md
@@ -0,0 +1,3 @@
+{{site.data.alerts.callout_info}}
+The webhook sink is currently in **beta** — see [usage considerations]({% link {{ page.version.version }}/changefeed-sinks.md %}#webhook-sink), available [parameters]({% link {{ page.version.version }}/create-changefeed.md %}#parameters), and [options]({% link {{ page.version.version }}/create-changefeed.md %}#options) for more information.
+{{site.data.alerts.end}}
diff --git a/src/current/_includes/v25.1/child-metrics-table.md b/src/current/_includes/v25.1/child-metrics-table.md
new file mode 100644
index 00000000000..2b1a16f092a
--- /dev/null
+++ b/src/current/_includes/v25.1/child-metrics-table.md
@@ -0,0 +1,27 @@
+{% assign metrics = site.data.metrics.child-metrics | where_exp: "metrics", "metrics.feature contains feature" | sort: "child_metric_id" %}
+{% comment %} Fetch child-metrics for given feature. {% endcomment %}
+
+Following is a list of the metrics that have child metrics:
+
+
+
+
+
CockroachDB Metric Name
+
{% if feature == "ldr" %}Description{% else %}Description When Aggregated{% endif %}
+
Type
+
Unit
+
+
+
+ {% for m in metrics %} {% comment %} Iterate through the metrics. {% endcomment %}
+ {% assign metrics-list = site.data.metrics.metrics-list | where: "metric", m.child_metric_id %}
+ {% comment %} Get the row from the metrics-list with the given child_metric_id. {% endcomment %}
+
\ No newline at end of file
diff --git a/src/current/_includes/v25.1/client-transaction-retry.md b/src/current/_includes/v25.1/client-transaction-retry.md
new file mode 100644
index 00000000000..4c65eebea7f
--- /dev/null
+++ b/src/current/_includes/v25.1/client-transaction-retry.md
@@ -0,0 +1,3 @@
+{{site.data.alerts.callout_info}}
+With the default `SERIALIZABLE` [isolation level]({% link {{ page.version.version }}/transactions.md %}#isolation-levels), CockroachDB may require the client to [retry a transaction]({% link {{ page.version.version }}/transactions.md %}#transaction-retries) in case of read/write [contention]({{ link_prefix }}performance-best-practices-overview.html#understanding-and-avoiding-transaction-contention). CockroachDB provides a [generic retry function](transaction-retry-error-reference.html#client-side-retry-handling) that runs inside a transaction and retries it as needed. The code sample below shows how it is used.
+{{site.data.alerts.end}}
diff --git a/src/current/_includes/v25.1/computed-columns/add-computed-column.md b/src/current/_includes/v25.1/computed-columns/add-computed-column.md
new file mode 100644
index 00000000000..5eff580e575
--- /dev/null
+++ b/src/current/_includes/v25.1/computed-columns/add-computed-column.md
@@ -0,0 +1,55 @@
+In this example, create a table:
+
+{% include_cached copy-clipboard.html %}
+~~~ sql
+> CREATE TABLE x (
+ a INT NULL,
+ b INT NULL AS (a * 2) STORED,
+ c INT NULL AS (a + 4) STORED,
+ FAMILY "primary" (a, b, rowid, c)
+ );
+~~~
+
+Then, insert a row of data:
+
+{% include_cached copy-clipboard.html %}
+~~~ sql
+> INSERT INTO x VALUES (6);
+~~~
+
+{% include_cached copy-clipboard.html %}
+~~~ sql
+> SELECT * FROM x;
+~~~
+
+~~~
++---+----+----+
+| a | b | c |
++---+----+----+
+| 6 | 12 | 10 |
++---+----+----+
+(1 row)
+~~~
+
+Now add another virtual computed column to the table:
+
+{% include_cached copy-clipboard.html %}
+~~~ sql
+> ALTER TABLE x ADD COLUMN d INT AS (a // 2) VIRTUAL;
+~~~
+
+The `d` column is added to the table and computed from the `a` column divided by 2.
+
+{% include_cached copy-clipboard.html %}
+~~~ sql
+> SELECT * FROM x;
+~~~
+
+~~~
++---+----+----+---+
+| a | b | c | d |
++---+----+----+---+
+| 6 | 12 | 10 | 3 |
++---+----+----+---+
+(1 row)
+~~~
diff --git a/src/current/_includes/v25.1/computed-columns/alter-computed-column.md b/src/current/_includes/v25.1/computed-columns/alter-computed-column.md
new file mode 100644
index 00000000000..d51d64a1df4
--- /dev/null
+++ b/src/current/_includes/v25.1/computed-columns/alter-computed-column.md
@@ -0,0 +1,76 @@
+To alter the formula for a computed column, you must [`DROP`]({% link {{ page.version.version }}/alter-table.md %}#drop-column) and [`ADD`]({% link {{ page.version.version }}/alter-table.md %}#add-column) the column back with the new definition. Take the following table for instance:
+
+{% include_cached copy-clipboard.html %}
+~~~sql
+> CREATE TABLE x (
+a INT NULL,
+b INT NULL AS (a * 2) STORED,
+c INT NULL AS (a + 4) STORED,
+FAMILY "primary" (a, b, rowid, c)
+);
+~~~
+~~~
+CREATE TABLE
+
+
+Time: 4ms total (execution 4ms / network 0ms)
+~~~
+
+Add a computed column `d`:
+
+{% include_cached copy-clipboard.html %}
+~~~sql
+> ALTER TABLE x ADD COLUMN d INT AS (a // 2) STORED;
+~~~
+~~~
+ALTER TABLE
+
+
+Time: 199ms total (execution 199ms / network 0ms)
+~~~
+
+If you try to alter it, you'll get an error:
+
+{% include_cached copy-clipboard.html %}
+~~~sql
+> ALTER TABLE x ALTER COLUMN d INT AS (a // 3) STORED;
+~~~
+~~~
+invalid syntax: statement ignored: at or near "int": syntax error
+SQLSTATE: 42601
+DETAIL: source SQL:
+ALTER TABLE x ALTER COLUMN d INT AS (a // 3) STORED
+ ^
+HINT: try \h ALTER TABLE
+~~~
+
+However, you can drop it and then add it with the new definition:
+
+{% include_cached copy-clipboard.html %}
+~~~sql
+> SET sql_safe_updates = false;
+> ALTER TABLE x DROP COLUMN d;
+> ALTER TABLE x ADD COLUMN d INT AS (a // 3) STORED;
+> SET sql_safe_updates = true;
+~~~
+~~~
+SET
+
+
+Time: 1ms total (execution 0ms / network 0ms)
+
+ALTER TABLE
+
+
+Time: 195ms total (execution 195ms / network 0ms)
+
+ALTER TABLE
+
+
+Time: 186ms total (execution 185ms / network 0ms)
+
+SET
+
+
+Time: 0ms total (execution 0ms / network 0ms)
+~~~
diff --git a/src/current/_includes/v25.1/computed-columns/convert-computed-column.md b/src/current/_includes/v25.1/computed-columns/convert-computed-column.md
new file mode 100644
index 00000000000..2be9bf72587
--- /dev/null
+++ b/src/current/_includes/v25.1/computed-columns/convert-computed-column.md
@@ -0,0 +1,108 @@
+You can convert a stored, computed column into a regular column by using `ALTER TABLE`.
+
+In this example, create a simple table with a computed column:
+
+{% include_cached copy-clipboard.html %}
+~~~ sql
+> CREATE TABLE office_dogs (
+ id INT PRIMARY KEY,
+ first_name STRING,
+ last_name STRING,
+ full_name STRING AS (CONCAT(first_name, ' ', last_name)) STORED
+ );
+~~~
+
+Then, insert a few rows of data:
+
+{% include_cached copy-clipboard.html %}
+~~~ sql
+> INSERT INTO office_dogs (id, first_name, last_name) VALUES
+ (1, 'Petee', 'Hirata'),
+ (2, 'Carl', 'Kimball'),
+ (3, 'Ernie', 'Narayan');
+~~~
+
+{% include_cached copy-clipboard.html %}
+~~~ sql
+> SELECT * FROM office_dogs;
+~~~
+
+~~~
++----+------------+-----------+---------------+
+| id | first_name | last_name | full_name |
++----+------------+-----------+---------------+
+| 1 | Petee | Hirata | Petee Hirata |
+| 2 | Carl | Kimball | Carl Kimball |
+| 3 | Ernie | Narayan | Ernie Narayan |
++----+------------+-----------+---------------+
+(3 rows)
+~~~
+
+The `full_name` column is computed from the `first_name` and `last_name` columns without the need to define a [view]({% link {{ page.version.version }}/views.md %}). You can view the column details with the [`SHOW COLUMNS`]({% link {{ page.version.version }}/show-columns.md %}) statement:
+
+{% include_cached copy-clipboard.html %}
+~~~ sql
+> SHOW COLUMNS FROM office_dogs;
+~~~
+
+~~~
++-------------+-----------+-------------+----------------+------------------------------------+-------------+
+| column_name | data_type | is_nullable | column_default | generation_expression | indices |
++-------------+-----------+-------------+----------------+------------------------------------+-------------+
+| id | INT | false | NULL | | {"primary"} |
+| first_name | STRING | true | NULL | | {} |
+| last_name | STRING | true | NULL | | {} |
+| full_name | STRING | true | NULL | concat(first_name, ' ', last_name) | {} |
++-------------+-----------+-------------+----------------+------------------------------------+-------------+
+(4 rows)
+~~~
+
+Now, convert the computed column (`full_name`) to a regular column:
+
+{% include_cached copy-clipboard.html %}
+~~~ sql
+> ALTER TABLE office_dogs ALTER COLUMN full_name DROP STORED;
+~~~
+
+Check that the computed column was converted:
+
+{% include_cached copy-clipboard.html %}
+~~~ sql
+> SHOW COLUMNS FROM office_dogs;
+~~~
+
+~~~
++-------------+-----------+-------------+----------------+-----------------------+-------------+
+| column_name | data_type | is_nullable | column_default | generation_expression | indices |
++-------------+-----------+-------------+----------------+-----------------------+-------------+
+| id | INT | false | NULL | | {"primary"} |
+| first_name | STRING | true | NULL | | {} |
+| last_name | STRING | true | NULL | | {} |
+| full_name | STRING | true | NULL | | {} |
++-------------+-----------+-------------+----------------+-----------------------+-------------+
+(4 rows)
+~~~
+
+The computed column is now a regular column and can be updated as such:
+
+{% include_cached copy-clipboard.html %}
+~~~ sql
+> INSERT INTO office_dogs (id, first_name, last_name, full_name) VALUES (4, 'Lola', 'McDog', 'This is not computed');
+~~~
+
+{% include_cached copy-clipboard.html %}
+~~~ sql
+> SELECT * FROM office_dogs;
+~~~
+
+~~~
++----+------------+-----------+----------------------+
+| id | first_name | last_name | full_name |
++----+------------+-----------+----------------------+
+| 1 | Petee | Hirata | Petee Hirata |
+| 2 | Carl | Kimball | Carl Kimball |
+| 3 | Ernie | Narayan | Ernie Narayan |
+| 4 | Lola | McDog | This is not computed |
++----+------------+-----------+----------------------+
+(4 rows)
+~~~
diff --git a/src/current/_includes/v25.1/computed-columns/jsonb.md b/src/current/_includes/v25.1/computed-columns/jsonb.md
new file mode 100644
index 00000000000..3851d463245
--- /dev/null
+++ b/src/current/_includes/v25.1/computed-columns/jsonb.md
@@ -0,0 +1,70 @@
+In this example, create a table with a `JSONB` column and a stored computed column:
+
+{% include_cached copy-clipboard.html %}
+~~~ sql
+> CREATE TABLE student_profiles (
+ id STRING PRIMARY KEY AS (profile->>'id') STORED,
+ profile JSONB
+);
+~~~
+
+Create a compute column after you create a table:
+
+{% include_cached copy-clipboard.html %}
+~~~ sql
+> ALTER TABLE student_profiles ADD COLUMN age INT AS ( (profile->>'age')::INT) STORED;
+~~~
+
+Then, insert a few rows of data:
+
+{% include_cached copy-clipboard.html %}
+~~~ sql
+> INSERT INTO student_profiles (profile) VALUES
+ ('{"id": "d78236", "name": "Arthur Read", "age": "16", "school": "PVPHS", "credits": 120, "sports": "none"}'),
+ ('{"name": "Buster Bunny", "age": "15", "id": "f98112", "school": "THS", "credits": 67, "clubs": "MUN"}'),
+ ('{"name": "Ernie Narayan", "school" : "Brooklyn Tech", "id": "t63512", "sports": "Track and Field", "clubs": "Chess"}');
+~~~
+
+{% include_cached copy-clipboard.html %}
+~~~ sql
+> SELECT * FROM student_profiles;
+~~~
+~~~
++--------+---------------------------------------------------------------------------------------------------------------------+------+
+| id | profile | age |
+---------+---------------------------------------------------------------------------------------------------------------------+------+
+| d78236 | {"age": "16", "credits": 120, "id": "d78236", "name": "Arthur Read", "school": "PVPHS", "sports": "none"} | 16 |
+| f98112 | {"age": "15", "clubs": "MUN", "credits": 67, "id": "f98112", "name": "Buster Bunny", "school": "THS"} | 15 |
+| t63512 | {"clubs": "Chess", "id": "t63512", "name": "Ernie Narayan", "school": "Brooklyn Tech", "sports": "Track and Field"} | NULL |
++--------+---------------------------------------------------------------------------------------------------------------------+------|
+~~~
+
+The primary key `id` is computed as a field from the `profile` column. Additionally the `age` column is computed from the profile column data as well.
+
+This example shows how add a stored computed column with a [coerced type]({% link {{ page.version.version }}/scalar-expressions.md %}#explicit-type-coercions):
+
+{% include_cached copy-clipboard.html %}
+~~~ sql
+CREATE TABLE json_data (
+ id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
+ json_info JSONB
+);
+INSERT INTO json_data (json_info) VALUES ('{"amount": "123.45"}');
+~~~
+
+{% include_cached copy-clipboard.html %}
+~~~ sql
+ALTER TABLE json_data ADD COLUMN amount DECIMAL AS ((json_info->>'amount')::DECIMAL) STORED;
+~~~
+
+{% include_cached copy-clipboard.html %}
+~~~ sql
+SELECT * FROM json_data;
+~~~
+
+~~~
+ id | json_info | amount
+---------------------------------------+----------------------+---------
+ e7c3d706-1367-4d77-bfb4-386dfdeb10f9 | {"amount": "123.45"} | 123.45
+(1 row)
+~~~
diff --git a/src/current/_includes/v25.1/computed-columns/secondary-index.md b/src/current/_includes/v25.1/computed-columns/secondary-index.md
new file mode 100644
index 00000000000..8b78325e695
--- /dev/null
+++ b/src/current/_includes/v25.1/computed-columns/secondary-index.md
@@ -0,0 +1,63 @@
+In this example, create a table with a virtual computed column and an index on that column:
+
+{% include_cached copy-clipboard.html %}
+~~~ sql
+> CREATE TABLE gymnastics (
+ id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
+ athlete STRING,
+ vault DECIMAL,
+ bars DECIMAL,
+ beam DECIMAL,
+ floor DECIMAL,
+ combined_score DECIMAL AS (vault + bars + beam + floor) VIRTUAL,
+ INDEX total (combined_score DESC)
+ );
+~~~
+
+Then, insert a few rows a data:
+
+{% include_cached copy-clipboard.html %}
+~~~ sql
+> INSERT INTO gymnastics (athlete, vault, bars, beam, floor) VALUES
+ ('Simone Biles', 15.933, 14.800, 15.300, 15.800),
+ ('Gabby Douglas', 0, 15.766, 0, 0),
+ ('Laurie Hernandez', 15.100, 0, 15.233, 14.833),
+ ('Madison Kocian', 0, 15.933, 0, 0),
+ ('Aly Raisman', 15.833, 0, 15.000, 15.366);
+~~~
+
+{% include_cached copy-clipboard.html %}
+~~~ sql
+> SELECT * FROM gymnastics;
+~~~
+~~~
++--------------------------------------+------------------+--------+--------+--------+--------+----------------+
+| id | athlete | vault | bars | beam | floor | combined_score |
++--------------------------------------+------------------+--------+--------+--------+--------+----------------+
+| 3fe11371-6a6a-49de-bbef-a8dd16560fac | Aly Raisman | 15.833 | 0 | 15.000 | 15.366 | 46.199 |
+| 56055a70-b4c7-4522-909b-8f3674b705e5 | Madison Kocian | 0 | 15.933 | 0 | 0 | 15.933 |
+| 69f73fd1-da34-48bf-aff8-71296ce4c2c7 | Gabby Douglas | 0 | 15.766 | 0 | 0 | 15.766 |
+| 8a7b730b-668d-4845-8d25-48bda25114d6 | Laurie Hernandez | 15.100 | 0 | 15.233 | 14.833 | 45.166 |
+| b2c5ca80-21c2-4853-9178-b96ce220ea4d | Simone Biles | 15.933 | 14.800 | 15.300 | 15.800 | 61.833 |
++--------------------------------------+------------------+--------+--------+--------+--------+----------------+
+~~~
+
+Now, run a query using the secondary index:
+
+{% include_cached copy-clipboard.html %}
+~~~ sql
+> SELECT athlete, combined_score FROM gymnastics ORDER BY combined_score DESC;
+~~~
+~~~
++------------------+----------------+
+| athlete | combined_score |
++------------------+----------------+
+| Simone Biles | 61.833 |
+| Aly Raisman | 46.199 |
+| Laurie Hernandez | 45.166 |
+| Madison Kocian | 15.933 |
+| Gabby Douglas | 15.766 |
++------------------+----------------+
+~~~
+
+The athlete with the highest combined score of 61.833 is Simone Biles.
diff --git a/src/current/_includes/v25.1/computed-columns/simple.md b/src/current/_includes/v25.1/computed-columns/simple.md
new file mode 100644
index 00000000000..3538a44f0d1
--- /dev/null
+++ b/src/current/_includes/v25.1/computed-columns/simple.md
@@ -0,0 +1,40 @@
+In this example, let's create a simple table with a computed column:
+
+{% include_cached copy-clipboard.html %}
+~~~ sql
+> CREATE TABLE users (
+ id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
+ city STRING,
+ first_name STRING,
+ last_name STRING,
+ full_name STRING AS (CONCAT(first_name, ' ', last_name)) STORED,
+ address STRING,
+ credit_card STRING,
+ dl STRING UNIQUE CHECK (LENGTH(dl) < 8)
+);
+~~~
+
+Then, insert a few rows of data:
+
+{% include_cached copy-clipboard.html %}
+~~~ sql
+> INSERT INTO users (first_name, last_name) VALUES
+ ('Lola', 'McDog'),
+ ('Carl', 'Kimball'),
+ ('Ernie', 'Narayan');
+~~~
+
+{% include_cached copy-clipboard.html %}
+~~~ sql
+> SELECT * FROM users;
+~~~
+~~~
+ id | city | first_name | last_name | full_name | address | credit_card | dl
++--------------------------------------+------+------------+-----------+---------------+---------+-------------+------+
+ 5740da29-cc0c-47af-921c-b275d21d4c76 | NULL | Ernie | Narayan | Ernie Narayan | NULL | NULL | NULL
+ e7e0b748-9194-4d71-9343-cd65218848f0 | NULL | Lola | McDog | Lola McDog | NULL | NULL | NULL
+ f00e4715-8ca7-4d5a-8de5-ef1d5d8092f3 | NULL | Carl | Kimball | Carl Kimball | NULL | NULL | NULL
+(3 rows)
+~~~
+
+The `full_name` column is computed from the `first_name` and `last_name` columns without the need to define a [view]({% link {{ page.version.version }}/views.md %}).
diff --git a/src/current/_includes/v25.1/computed-columns/virtual.md b/src/current/_includes/v25.1/computed-columns/virtual.md
new file mode 100644
index 00000000000..4c6718d7552
--- /dev/null
+++ b/src/current/_includes/v25.1/computed-columns/virtual.md
@@ -0,0 +1,41 @@
+In this example, create a table with a `JSONB` column and virtual computed columns:
+
+{% include_cached copy-clipboard.html %}
+~~~ sql
+> CREATE TABLE student_profiles (
+ id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
+ profile JSONB,
+ full_name STRING AS (concat_ws(' ',profile->>'firstName', profile->>'lastName')) VIRTUAL,
+ birthday TIMESTAMP AS (parse_timestamp(profile->>'birthdate')) VIRTUAL
+);
+~~~
+
+Then, insert a few rows of data:
+
+{% include_cached copy-clipboard.html %}
+~~~ sql
+> INSERT INTO student_profiles (profile) VALUES
+ ('{"id": "d78236", "firstName": "Arthur", "lastName": "Read", "birthdate": "2010-01-25", "school": "PVPHS", "credits": 120, "sports": "none"}'),
+ ('{"firstName": "Buster", "lastName": "Bunny", "birthdate": "2011-11-07", "id": "f98112", "school": "THS", "credits": 67, "clubs": "MUN"}'),
+ ('{"firstName": "Ernie", "lastName": "Narayan", "school" : "Brooklyn Tech", "id": "t63512", "sports": "Track and Field", "clubs": "Chess"}');
+~~~
+
+{% include_cached copy-clipboard.html %}
+~~~ sql
+> SELECT * FROM student_profiles;
+~~~
+~~~
+ id | profile | full_name | birthday
+---------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------------+---------------+----------------------
+ 0e420282-105d-473b-83e2-3b082e7033e4 | {"birthdate": "2011-11-07", "clubs": "MUN", "credits": 67, "firstName": "Buster", "id": "f98112", "lastName": "Bunny", "school": "THS"} | Buster Bunny | 2011-11-07 00:00:00
+ 6e9b77cd-ec67-41ae-b346-7b3d89902c72 | {"birthdate": "2010-01-25", "credits": 120, "firstName": "Arthur", "id": "d78236", "lastName": "Read", "school": "PVPHS", "sports": "none"} | Arthur Read | 2010-01-25 00:00:00
+ f74b21e3-dc1e-49b7-a648-3c9b9024a70f | {"clubs": "Chess", "firstName": "Ernie", "id": "t63512", "lastName": "Narayan", "school": "Brooklyn Tech", "sports": "Track and Field"} | Ernie Narayan | NULL
+(3 rows)
+
+
+Time: 2ms total (execution 2ms / network 0ms)
+~~~
+
+The virtual column `full_name` is computed as a field from the `profile` column's data. The first name and last name are concatenated and separated by a single whitespace character using the [`concat_ws` string function]({% link {{ page.version.version }}/functions-and-operators.md %}#string-and-byte-functions).
+
+The virtual column `birthday` is parsed as a `TIMESTAMP` value from the `profile` column's `birthdate` string value. The [`parse_timestamp` function]({% link {{ page.version.version }}/functions-and-operators.md %}) is used to parse strings in `TIMESTAMP` format.
diff --git a/src/current/_includes/v25.1/connect/cockroach-workload-parameters.md b/src/current/_includes/v25.1/connect/cockroach-workload-parameters.md
new file mode 100644
index 00000000000..68e11059b9e
--- /dev/null
+++ b/src/current/_includes/v25.1/connect/cockroach-workload-parameters.md
@@ -0,0 +1 @@
+The `cockroach workload` command does not support connection or security flags like other [`cockroach` commands]({% link {{ page.version.version }}/cockroach-commands.md %}). Instead, you must use a [connection string]({% link {{ page.version.version }}/connection-parameters.md %}) at the end of the command.
\ No newline at end of file
diff --git a/src/current/_includes/v25.1/connect/connection-url.md b/src/current/_includes/v25.1/connect/connection-url.md
new file mode 100644
index 00000000000..ae994bb3047
--- /dev/null
+++ b/src/current/_includes/v25.1/connect/connection-url.md
@@ -0,0 +1,19 @@
+
+Set a `DATABASE_URL` environment variable to your connection string.
+
+{% include_cached copy-clipboard.html %}
+~~~ shell
+export DATABASE_URL="{connection string}"
+~~~
+
+
+
+
+Set a `DATABASE_URL` environment variable to your connection string.
+
+{% include_cached copy-clipboard.html %}
+~~~ shell
+$env:DATABASE_URL = "{connection string}"
+~~~
+
+
\ No newline at end of file
diff --git a/src/current/_includes/v25.1/connect/core-note.md b/src/current/_includes/v25.1/connect/core-note.md
new file mode 100644
index 00000000000..7b701cafb80
--- /dev/null
+++ b/src/current/_includes/v25.1/connect/core-note.md
@@ -0,0 +1,7 @@
+{{site.data.alerts.callout_info}}
+The connection information shown on this page uses [client certificate and key authentication]({% link {{ page.version.version }}/authentication.md %}#client-authentication) to connect to a secure, CockroachDB {{ site.data.products.core }} cluster.
+
+To connect to a CockroachDB {{ site.data.products.core }} cluster with client certificate and key authentication, you must first [generate server and client certificates]({% link {{ page.version.version }}/authentication.md %}#using-digital-certificates-with-cockroachdb).
+
+For instructions on starting a secure cluster, see [Start a Local Cluster (Secure)]({% link {{ page.version.version }}/secure-a-cluster.md %}).
+{{site.data.alerts.end}}
diff --git a/src/current/_includes/v25.1/connect/jdbc-connection-url.md b/src/current/_includes/v25.1/connect/jdbc-connection-url.md
new file mode 100644
index 00000000000..c055a390b4e
--- /dev/null
+++ b/src/current/_includes/v25.1/connect/jdbc-connection-url.md
@@ -0,0 +1,19 @@
+Set a `JDBC_DATABASE_URL` environment variable to your JDBC connection string.
+
+
diff --git a/src/current/_includes/v25.1/dedicated-pci-compliance.md b/src/current/_includes/v25.1/dedicated-pci-compliance.md
new file mode 100644
index 00000000000..59dd7790daf
--- /dev/null
+++ b/src/current/_includes/v25.1/dedicated-pci-compliance.md
@@ -0,0 +1,7 @@
+{{site.data.alerts.callout_info}}
+CockroachDB {{ site.data.products.dedicated }} clusters comply with the Payment Card Industry Data Security Standard (PCI DSS). Compliance is certified by a PCI Qualified Security Assessor (QSA).
+
+To achieve compliance with PCI DSS on a CockroachDB {{ site.data.products.dedicated }} cluster, you must enable all required features in your CockroachDB {{ site.data.products.cloud }} organization and your cluster, and you must take additional steps to ensure that your organization's applications and procedures comply with PCI DSS. For details, refer to [PCI DSS Compliance in CockroachDB {{ site.data.products.dedicated }} advanced](https://cockroachlabs.com/docs/cockroachcloud/pci-dss.html).
+
+To learn more about achieving PCI DSS compliance with CockroachDB {{ site.data.products.dedicated }}, contact your Cockroach Labs account team.
+{{site.data.alerts.end}}
diff --git a/src/current/_includes/v25.1/demo_movr.md b/src/current/_includes/v25.1/demo_movr.md
new file mode 100644
index 00000000000..5a8a431193a
--- /dev/null
+++ b/src/current/_includes/v25.1/demo_movr.md
@@ -0,0 +1,6 @@
+Start the [MovR database]({% link {{ page.version.version }}/movr.md %}) on a 3-node CockroachDB demo cluster with a larger data set.
+
+{% include_cached copy-clipboard.html %}
+~~~ shell
+cockroach demo movr --num-histories 250000 --num-promo-codes 250000 --num-rides 125000 --num-users 12500 --num-vehicles 3750 --nodes 3
+~~~
\ No newline at end of file
diff --git a/src/current/_includes/v25.1/essential-alerts.md b/src/current/_includes/v25.1/essential-alerts.md
new file mode 100644
index 00000000000..f71d81dd2f5
--- /dev/null
+++ b/src/current/_includes/v25.1/essential-alerts.md
@@ -0,0 +1,533 @@
+{% if include.deployment == 'self-hosted' %}
+## Platform
+
+### High CPU
+
+A node with a high CPU utilization, an *overloaded* node, has a limited ability to process the user workload and increases the risks of cluster instability.
+
+**Metric**
+ [`sys.cpu.combined.percent-normalized`]({% link {{ page.version.version }}/essential-metrics-{{ include.deployment }}.md %}#sys-cpu-combined-percent-normalized)
+ [`sys.cpu.host.combined.percent-normalized`]({% link {{ page.version.version }}/essential-metrics-{{ include.deployment }}.md %}#sys-cpu-host-combined-percent-normalized)
+
+**Rule**
+ Set alerts for each node for each of the listed metrics:
+ WARNING: Metric greater than `0.80` for `4 hours`
+ CRITICAL: Metric greater than `0.90` for `1 hour`
+
+**Action**
+
+- Refer to [CPU Usage]({% link {{ page.version.version }}/common-issues-to-monitor.md %}#cpu-usage) and [Workload Concurrency]({% link {{ page.version.version }}/common-issues-to-monitor.md %}#workload-concurrency).
+
+- In the DB Console, navigate to **Metrics**, [**Hardware** dashboard]({% link {{ page.version.version }}/ui-hardware-dashboard.md %}) for the cluster and check for high values on the [**CPU Percent** graph]({% link {{ page.version.version }}/ui-hardware-dashboard.md %}#cpu-percent) and the [**Host CPU Percent** graph]({% link {{ page.version.version }}/ui-hardware-dashboard.md %}#host-cpu-percent).
+
+- In the DB Console, navigate to **Metrics**, [**SQL** dashboard]({% link {{ page.version.version }}/ui-sql-dashboard.md %}) for the cluster and check for high values on the [**Active SQL Statements** graph]({% link {{ page.version.version }}/ui-sql-dashboard.md %}#active-sql-statements). This graph shows the true concurrency of the workload, which may exceed the cluster capacity planning guidance of no more than 4 active statements per vCPU or core.
+
+- A persistently high CPU utilization of all nodes in a CockroachDB cluster suggests the current compute resources may be insufficient to support the user workload's concurrency requirements. If confirmed, the number of processors (vCPUs or cores) in the CockroachDB cluster needs to be adjusted to sustain the required level of workload concurrency. For a prompt resolution, either add cluster nodes or throttle the workload concurrency, for example, by reducing the number of concurrent connections to not exceed 4 active statements per vCPU or core.
+
+### Hot node (hot spot)
+
+Unbalanced utilization of CockroachDB nodes in a cluster may negatively affect the cluster's performance and stability, with some nodes getting overloaded while others remain relatively underutilized.
+
+**Metric**
+ [`sys.cpu.combined.percent-normalized`]({% link {{ page.version.version }}/essential-metrics-{{ include.deployment }}.md %}#sys-cpu-host-combined-percent-normalized)
+ [`sys.cpu.host.combined.percent-normalized`]({% link {{ page.version.version }}/essential-metrics-{{ include.deployment }}.md %}#sys-cpu-host-combined-percent-normalized)
+
+**Rule**
+ Set alerts for each of the listed metrics:
+ WARNING: The max CPU utilization across all nodes exceeds the cluster's median CPU utilization by `30` for `2 hours`
+
+**Action**
+
+- Refer to [Hot spots]({% link {{ page.version.version }}/performance-recipes.md %}#hot-spots).
+
+### Node memory utilization
+
+One node with high memory utilization is a cluster stability risk. High memory utilization is a prelude to a node's [out-of-memory (OOM) crash]({% link {{ page.version.version }}/cluster-setup-troubleshooting.md %}#out-of-memory-oom-crash) — the process is terminated by the OS when the system is critically low on memory. An OOM condition is not expected to occur if a CockroachDB cluster is provisioned and sized per [Cockroach Labs guidance]({% link {{ page.version.version }}/common-issues-to-monitor.md %}#memory-planning).
+
+**Metric**
+ [`sys.rss`]({% link {{ page.version.version }}/essential-metrics-{{ include.deployment }}.md %}#sys-rss)
+
+**Rule**
+ Set alerts for each node:
+ WARNING: `sys.rss` greater than `0.80` for `4 hours`
+ CRITICAL: `sys.rss` greater than `0.90` for `1 hour`
+
+**Action**
+
+- Provision all CockroachDB VMs or machines with [sufficient RAM]({% link {{ page.version.version }}/recommended-production-settings.md %}#memory).
+
+### Node storage performance
+
+Under-configured or under-provisioned disk storage is a common root cause of inconsistent CockroachDB cluster performance and could also lead to cluster instability. Refer to [Disk IOPS]({% link {{ page.version.version }}/common-issues-to-monitor.md %}#disk-iops).
+
+**Metric**
+ [`sys.host.disk.iopsinprogress`]({% link {{ page.version.version }}/essential-metrics-{{ include.deployment }}.md %}#sys-host-disk-iopsinprogress)
+
+**Rule**
+ WARNING: `sys.host.disk.iopsinprogress` greater than `10` for `10 seconds`
+ CRITICAL: `sys.host.disk.iopsinprogress` greater than `20` for `10 seconds`
+
+**Action**
+
+- Provision enough storage capacity for CockroachDB data, and configure your volumes to maximize disk I/O. Refer to [Storage and disk I/O]({% link {{ page.version.version }}/common-issues-to-monitor.md %}#storage-and-disk-i-o).
+
+### Version mismatch
+
+All CockroachDB cluster nodes should be running the same exact executable (with identical build label). This warning guards against an operational error where some nodes were not upgraded.
+
+**Metric**
+ `build.timestamp`
+
+**Rule**
+ Set alerts for each node:
+ WARNING: `build.timestamp` not the same across cluster nodes for more than `4 hours`
+
+**Action**
+
+- Ensure all cluster nodes are running exactly the same CockroachDB version, including the patch release version number.
+
+### High open file descriptor count
+
+Send an alert when a cluster is getting close to the open file descriptor limit.
+
+**Metric**
+ `sys.fd.open`
+ `sys.fd.softlimit`
+
+**Rule**
+ Set alerts for each node:
+ WARNING: `sys_fd_open` / `sys_fd_softlimit` greater than `0.8` for `10 minutes`
+
+**Action**
+
+- Refer to [File descriptors limit]({% link {{ page.version.version }}/recommended-production-settings.md %}#file-descriptors-limit).
+{% endif %}
+
+## Storage
+
+### Node storage capacity
+
+A CockroachDB node will not able to operate if there is no free disk space on a CockroachDB [store]({% link {{ page.version.version }}/cockroach-start.md %}#store) volume.
+
+**Metric**
+ [`capacity`]({% link {{ page.version.version }}/essential-metrics-{{ include.deployment }}.md %}#capacity)
+ [`capacity.available`]({% link {{ page.version.version }}/essential-metrics-{{ include.deployment }}.md %}#capacity-available)
+
+**Rule**
+ Set alerts for each node:
+ WARNING: `capacity.available`/`capacity` is less than `0.30` for `24 hours`
+ CRITICAL: `capacity.available`/`capacity` is less than `0.10` for `1 hour`
+
+**Action**
+
+- Refer to [Storage Capacity]({% link {{ page.version.version }}/common-issues-to-monitor.md %}#storage-capacity).
+- Increase the size of CockroachDB node storage capacity. CockroachDB storage volumes should not be utilized more than 60% (40% free space).
+- In a "disk full" situation, you may be able to get a node "unstuck" by removing the [automatically created emergency ballast file]({% link {{ page.version.version }}/cluster-setup-troubleshooting.md %}#automatic-ballast-files).
+
+{% if include.deployment == 'self-hosted' %}
+### Write stalls
+
+A high `write-stalls` value means CockroachDB is unable to write to a disk in an acceptable time, resulting in CockroachDB facing a disk latency issue and not responding to writes.
+
+**Metric**
+ [`storage.write-stalls`]({% link {{ page.version.version }}/essential-metrics-{{ include.deployment }}.md %}#storage-write-stalls)
+
+**Rule**
+ Set alerts for each node:
+ WARNING: `storage.write-stalls` per minute is greater than or equal to `1` per minute
+ CRITICAL: `storage.write-stalls` per second is greater than or equal to `1` per second
+
+**Action**
+
+- Refer to [Disk stalls]({% link {{ page.version.version }}/cluster-setup-troubleshooting.md %}#disk-stalls).
+{% endif %}
+
+{% if include.deployment == 'self-hosted' %}
+## Health
+
+### Node restarting too frequently
+
+Send an alert if a node has restarted more than once in the last 10 minutes. Calculate this using the number of times the `sys.uptime` metric was reset back to zero.
+
+**Metric**
+ [`sys.uptime`]({% link {{ page.version.version }}/essential-metrics-{{ include.deployment }}.md %}#sys-uptime)
+
+**Rule**
+ Set alerts for each node:
+ WARNING: `sys.uptime` resets greater than `1` in the last `10 minutes`
+
+**Action**
+
+- Refer to [Node process restarts]({% link {{ page.version.version }}/common-issues-to-monitor.md %}#node-process-restarts).
+
+### Node LSM storage health
+
+CockroachDB uses the [Pebble]({% link {{ page.version.version }}/architecture/storage-layer.md %}#pebble) storage engine that uses a [Log-structured Merge-tree (LSM tree)]({% link {{ page.version.version }}/architecture/storage-layer.md %}#log-structured-merge-trees) to manage data storage. The health of an LSM tree can be measured by the [*read amplification*]({% link {{ page.version.version }}/architecture/storage-layer.md %}#inverted-lsms), which is the average number of [SST files]({% link {{ page.version.version }}/architecture/storage-layer.md %}#log-structured-merge-trees) being checked per read operation. A value in the single digits is characteristic of a healthy LSM tree. A value in the double, triple, or quadruple digits suggests an [inverted LSM]({% link {{ page.version.version }}/architecture/storage-layer.md %}#inverted-lsms). A node reporting a high read amplification is an indication of a problem on that node that is likely to affect the workload.
+
+**Metric**
+ `rocksdb.read-amplification`
+
+**Rule**
+ Set alerts for each node:
+ WARNING: `rocksdb.read-amplification` greater than `50` for `1 hour`
+ CRITICAL: `rocksdb.read-amplification` greater than `150` for `15 minutes`
+
+**Action**
+
+- Refer to [LSM Health]({% link {{ page.version.version }}/common-issues-to-monitor.md %}#lsm-health).
+
+## Expiration of license and certificates
+
+### Enterprise license expiration
+
+Avoid [license]({% link {{ page.version.version }}/licensing-faqs.md %}#types-of-licenses) expiration to avoid any disruption to feature access.
+
+**Metric**
+ [`seconds.until.enterprise.license.expiry`]({% link {{ page.version.version }}/essential-metrics-{{ include.deployment }}.md %}#seconds-until-enterprise-license-expiry)
+
+**Rule**
+ WARNING: `seconds.until.enterprise.license.expiry` is greater than `0` and less than `1814400` seconds (3 weeks)
+ CRITICAL: `seconds.until.enterprise.license.expiry` is greater than `0` and less than `259200` seconds (3 days)
+
+**Action**
+
+[Renew the enterprise license]({% link {{ page.version.version }}/licensing-faqs.md %}#renew-an-expired-license).
+
+### Security certificate expiration
+
+Avoid [security certificate]({% link {{ page.version.version }}/cockroach-cert.md %}) expiration.
+
+**Metric**
+ [`security.certificate.expiration.ca`]({% link {{ page.version.version }}/essential-metrics-{{ include.deployment }}.md %}#security-certificate-expiration-ca)
+ [`security.certificate.expiration.client-ca`]({% link {{ page.version.version }}/essential-metrics-{{ include.deployment }}.md %}#security-certificate-expiration-client-ca)
+ [`security.certificate.expiration.ui`]({% link {{ page.version.version }}/essential-metrics-{{ include.deployment }}.md %}#security-certificate-expiration-ui)
+ [`security.certificate.expiration.ui-ca`]({% link {{ page.version.version }}/essential-metrics-{{ include.deployment }}.md %}#security-certificate-expiration-ui-ca)
+ [`security.certificate.expiration.node`]({% link {{ page.version.version }}/essential-metrics-{{ include.deployment }}.md %}#security-certificate-expiration-node)
+ [`security.certificate.expiration.node-client`]({% link {{ page.version.version }}/essential-metrics-{{ include.deployment }}.md %}#security-certificate-expiration-node-client)
+
+**Rule**
+ Set alerts for each of the listed metrics:
+ WARNING: Metric is greater than `0` and less than `1814400` seconds (3 weeks) until enterprise license expiration
+ CRITICAL: Metric is greater than `0` and less than `259200` seconds (3 days) until enterprise license expiration
+
+**Action**
+
+[Rotate the expiring certificates]({% link {{ page.version.version }}/rotate-certificates.md %}).
+{% endif %}
+
+{% if include.deployment == 'self-hosted' %}
+## KV distributed
+
+{{site.data.alerts.callout_info}}
+During [rolling maintenance]({% link {{ page.version.version }}/upgrade-cockroach-version.md %}) or planned cluster resizing, the nodes' state and count will be changing. **Mute KV distributed alerts described in the following sections during routine maintenance procedures** to avoid unnecessary distractions.
+{{site.data.alerts.end}}
+
+### Heartbeat latency
+
+Monitor the cluster health for early signs of instability. If this metric exceeds 1 second, it is a sign of instability.
+
+**Metric**
+ [`liveness.heartbeatlatency`]({% link {{ page.version.version }}/essential-metrics-{{ include.deployment }}.md %}#liveness-heartbeatlatency)
+
+**Rule**
+ WARNING: `liveness.heartbeatlatency` greater than `0.5s`
+ CRITICAL: `liveness.heartbeatlatency` greater than `3s`
+
+**Action**
+
+- Refer to [Node liveness issues]({% link {{ page.version.version }}/cluster-setup-troubleshooting.md %}#node-liveness-issues).
+
+### Live node count change
+
+The liveness checks reported by a node is inconsistent with the rest of the cluster. Number of live nodes in the cluster (will be 0 if this node is not itself live). This is a critical metric that tracks the live nodes in the cluster.
+
+**Metric**
+ [`liveness.livenodes`]({% link {{ page.version.version }}/essential-metrics-{{ include.deployment }}.md %}#liveness-livenodes)
+
+**Rule**
+ Set alerts for each node:
+ WARNING: max(`liveness.livenodes`) for the cluster - min(`liveness.livenodes`) for node > `0` for `2 minutes`
+ CRITICAL: max(`liveness.livenodes`) for the cluster - min(`liveness.livenodes`) for node > `0` for `5 minutes`
+
+**Action**
+
+- Refer to [Node liveness issues]({% link {{ page.version.version }}/cluster-setup-troubleshooting.md %}#node-liveness-issues).
+
+### Intent buildup
+
+Send an alert when very large transactions are [locking]({% link {{ page.version.version }}/architecture/transaction-layer.md %}#write-intents) millions of keys (rows). A common example is a transaction with a [`DELETE`]({% link {{ page.version.version }}/delete.md %}) that affects a large number of rows. Transactions with an excessively large scope are often inadvertent, perhaps due to a non-selective filter and a specific data distribution that was not anticipated by an application developer.
+
+Transactions that create a large number of [write intents]({% link {{ page.version.version }}/architecture/transaction-layer.md %}#write-intents) could have a negative effect on the workload's performance. These transactions may create locking contention, thus limiting concurrency. This would reduce throughput, and in extreme cases, lead to stalled workloads.
+
+**Metric**
+ `intentcount`
+
+**Rule**
+ WARNING: `intentcount` greater than 10,000,000 for 2 minutes
+ CRITICAL: `intentcount` greater than 10,000,000 for 5 minutes
+ For tighter transaction scope scrutiny, lower the `intentcount` threshold that triggers an alert.
+
+**Action**
+
+- Identify the large scope transactions that acquire a lot of locks. Consider reducing the scope of large transactions, implementing them as several smaller scope transactions. For example, if the alert is triggered by a large scope `DELETE`, consider "paging" `DELETE`s that target thousands of records instead of millions. This is often the most effective resolution, however it generally means an application level [refactoring]({% link {{ page.version.version }}/bulk-update-data.md %}).
+- After reviewing the workload, you may conclude that a possible performance impact of allowing transactions to take a large number of intents is not a concern. For example, a large delete of obsolete, not-in-use data may create no concurrency implications and the elapsed time to execute that transaction may not be impactful. In that case, no response could be a valid way to handle this alert.
+{% endif %}
+
+{% if include.deployment == 'self-hosted' %}
+## KV replication
+
+### Unavailable ranges
+
+Send an alert when the number of ranges with fewer live replicas than needed for quorum is non-zero for too long.
+
+**Metric**
+ [`ranges.unavailable`]({% link {{ page.version.version }}/essential-metrics-{{ include.deployment }}.md %}#ranges-unavailable)
+
+**Rule**
+ WARNING: `ranges.unavailable` greater than `0` for `10 minutes`
+
+**Action**
+
+- Refer to [Replication issues]({% link {{ page.version.version }}/cluster-setup-troubleshooting.md %}#replication-issues).
+
+### Tripped replica circuit breakers
+
+Send an alert when a replica stops serving traffic due to other replicas being offline for too long.
+
+**Metric**
+ `kv.replica_circuit_breaker.num_tripped_replicas`
+
+**Rule**
+ WARNING: `kv.replica_circuit_breaker.num_tripped_replicas` greater than `0` for `10 minutes`
+
+**Action**
+
+- Refer to [Per-replica circuit breakers]({% link {{ page.version.version }}/architecture/replication-layer.md %}#per-replica-circuit-breakers) and [Replication issues]({% link {{ page.version.version }}/cluster-setup-troubleshooting.md %}#replication-issues).
+
+### Under-replicated ranges
+
+Send an alert when the number of ranges with replication below the replication factor is non-zero for too long.
+
+**Metric**
+ [`ranges.underreplicated`]({% link {{ page.version.version }}/essential-metrics-{{ include.deployment }}.md %}#ranges-underreplicated)
+
+**Rule**
+ WARNING: `ranges.underreplicated` greater than `0` for `1 hour`
+
+**Action**
+
+- Refer to [Replication issues]({% link {{ page.version.version }}/cluster-setup-troubleshooting.md %}#replication-issues).
+
+### Requests stuck in raft
+
+Send an alert when requests are taking a very long time in replication. An (evaluated) request has to pass through the replication layer, notably the quota pool and raft. If it fails to do so within a highly permissive duration, the gauge is incremented (and decremented again once the request is either applied or returns an error). A nonzero value indicates range or replica unavailability, and should be investigated.
+
+**Metric**
+ `requests.slow.raft`
+
+**Rule**
+ WARNING: `requests.slow.raft` greater than `0` for `10 minutes`
+
+**Action**
+
+- Refer to [Raft]({% link {{ page.version.version }}/architecture/replication-layer.md %}#raft) and [Replication issues]({% link {{ page.version.version }}/cluster-setup-troubleshooting.md %}#replication-issues).
+{% endif %}
+
+## SQL
+
+### Node not executing SQL
+
+Send an alert when a node is not executing SQL despite having connections. `sql.conns` shows the number of connections as well as the distribution, or balancing, of connections across cluster nodes. An imbalance can lead to nodes becoming overloaded.
+
+**Metric**
+ [`sql.conns`]({% link {{ page.version.version }}/essential-metrics-{{ include.deployment }}.md %}#sql-conns)
+ `sql.query.count`
+
+**Rule**
+ Set alerts for each node:
+ WARNING: `sql.conns` greater than `0` while `sql.query.count` equals `0`
+
+**Action**
+
+- Refer to [Connection Pooling]({% link {{ page.version.version }}/connection-pooling.md %}).
+
+### SQL query failure
+
+Send an alert when the query failure count exceeds a user-determined threshold based on their application's SLA.
+
+**Metric**
+ [`sql.failure.count`]({% link {{ page.version.version }}/essential-metrics-{{ include.deployment }}.md %}#sql-failure-count)
+
+**Rule**
+ WARNING: `sql.failure.count` is greater than a threshold (based on the user’s application SLA)
+
+**Action**
+
+- Use the [**Insights** page]({% link {{ page.version.version }}/ui-insights-page.md %}) to find failed executions with their error code to troubleshoot or use application-level logs, if instrumented, to determine the cause of error.
+
+### SQL queries experiencing high latency
+
+Send an alert when the query latency exceeds a user-determined threshold based on their application’s SLA.
+
+**Metric**
+ [`sql.service.latency`]({% link {{ page.version.version }}/essential-metrics-{{ include.deployment }}.md %}#sql-service-latency)
+ [`sql.conn.latency`]({% link {{ page.version.version }}/essential-metrics-{{ include.deployment }}.md %}#sql-conn-latency)
+
+**Rule**
+ WARNING: (p99 or p90 of `sql.service.latency` plus average of `sql.conn.latency`) is greater than a threshold (based on the user’s application SLA)
+
+**Action**
+
+- Apply the time range of the alert to the [**SQL Activity** pages]({% link {{ page.version.version }}/monitoring-and-alerting.md %}#sql-activity-pages) to investigate. Use the [**Statements** page]({% link {{ page.version.version }}/ui-statements-page.md %}) P90 Latency and P99 latency columns to correlate [statement fingerprints]({% link {{ page.version.version }}/ui-statements-page.md %}#sql-statement-fingerprints) with this alert.
+
+{% if include.deployment == 'self-hosted' %}
+## Backup
+
+### Backup failure
+
+While CockroachDB is a distributed product, there is always a need to ensure backups complete.
+
+**Metric**
+ [`schedules.BACKUP.failed`]({% link {{ page.version.version }}/essential-metrics-{{ include.deployment }}.md %}#schedules-BACKUP-failed)
+
+**Rule**
+ Set alerts for each node:
+ WARNING: `schedules.BACKUP.failed` is greater than `0`
+
+**Action**
+
+- Refer to [Backup and Restore Monitoring]({% link {{ page.version.version }}/backup-and-restore-monitoring.md %}).
+{% endif %}
+
+## Changefeeds
+
+{{site.data.alerts.callout_info}}
+During [rolling maintenance]({% link {{ page.version.version }}/upgrade-cockroach-version.md %}), [changefeed jobs]({% link {{ page.version.version }}/change-data-capture-overview.md %}) restart following node restarts. **Mute changefeed alerts described in the following sections during routine maintenance procedures** to avoid unnecessary distractions.
+{{site.data.alerts.end}}
+
+### Changefeed failure
+
+Changefeeds can suffer permanent failures (that the [jobs system]({% link {{ page.version.version }}/monitor-and-debug-changefeeds.md %}) will not try to restart). Any increase in this metric counter should prompt investigative action.
+
+**Metric**
+ [`changefeed.failures`]({% link {{ page.version.version }}/essential-metrics-{{ include.deployment }}.md %}#changefeed-failures)
+
+**Rule**
+ CRITICAL: If `changefeed.failures` is greater than `0`
+
+**Action**
+
+1. If the alert is triggered during cluster maintenance, mute it. Otherwise start investigation with the following query:
+
+ {% include_cached copy-clipboard.html %}
+ ```sql
+ SELECT job_id, status, ((high_water_timestamp/1000000000)::INT::TIMESTAMP) - NOW() AS "changefeed latency", created, LEFT(description, 60), high_water_timestamp FROM crdb_internal.jobs WHERE job_type = 'CHANGEFEED' AND status IN ('running', 'paused', 'pause-requested') ORDER BY created DESC;
+ ```
+
+2. If the cluster is not undergoing maintenance, check the health of [sink]({% link {{ page.version.version }}/changefeed-sinks.md %}) endpoints. If the sink is [Kafka]({% link {{ page.version.version }}/changefeed-sinks.md %}#kafka), check for sink connection errors such as `ERROR: connecting to kafka: path.to.cluster:port: kafka: client has run out of available brokers to talk to (Is your cluster reachable?)`.
+
+### Frequent changefeed restarts
+
+Changefeeds automatically restart in case of transient errors. However too many restarts outside of a routine maintenance procedure may be due to a systemic condition and should be investigated.
+
+**Metric**
+ [`changefeed.error_retries`]({% link {{ page.version.version }}/essential-metrics-{{ include.deployment }}.md %}#changefeed-error-retries)
+
+**Rule**
+ WARNING: If `changefeed.error_retries` is greater than `50` for more than `15 minutes`
+
+**Action**
+
+- Follow the action for a [changefeed failure](#changefeed-failure).
+
+### Changefeed falling behind
+
+Changefeed has fallen behind. This is determined by the end-to-end lag between a committed change and that change applied at the destination. This can be due to cluster capacity or changefeed sink availability.
+
+**Metric**
+ [`changefeed.commit_latency`]({% link {{ page.version.version }}/essential-metrics-{{ include.deployment }}.md %}#changefeed-commit-latency)
+
+**Rule**
+ WARNING: `changefeed.commit_latency` is greater than `10 minutes`
+ CRITICAL: `changefeed.commit_latency` is greater than `15 minutes`
+
+**Action**
+
+1. In the DB Console, navigate to **Metrics**, [**Changefeeds** dashboard]({% link {{ page.version.version }}/ui-cdc-dashboard.md %}) for the cluster and check the maximum values on the [**Commit Latency** graph]({% link {{ page.version.version }}/ui-cdc-dashboard.md %}#commit-latency). Alternatively, individual changefeed latency can be verified by using the following SQL query:
+
+ {% include_cached copy-clipboard.html %}
+ ```sql
+ SELECT job_id, status, ((high_water_timestamp/1000000000)::INT::TIMESTAMP) - NOW() AS "changefeed latency", created, LEFT(description, 60), high_water_timestamp FROM crdb_internal.jobs WHERE job_type = 'CHANGEFEED' AND status IN ('running', 'paused', 'pause-requested') ORDER BY created DESC;
+ ```
+
+2. Copy the `job_id` for the changefeed job with highest `changefeed latency` and pause the job:
+
+ {% include_cached copy-clipboard.html %}
+ ```sql
+ PAUSE JOB 681491311976841286;
+ ```
+
+3. Check the status of the pause request by running the query from step 1. If the job status is `pause-requested`, check again in a few minutes.
+
+4. After the job is `paused`, resume the job.
+
+ {% include_cached copy-clipboard.html %}
+ ```sql
+ RESUME JOB 681491311976841286;
+ ```
+
+5. If the changefeed latency does not progress after these steps due to lack of cluster resources or availability of the changefeed sink, [contact Support](https://support.cockroachlabs.com).
+
+### Changefeed has been paused a long time
+
+Changefeed jobs should not be paused for a long time because [the protected timestamp prevents garbage collection]({% link {{ page.version.version }}/protect-changefeed-data.md %}). To protect against an operational error, this alert guards against an inadvertently forgotten pause.
+
+**Metric**
+ [`jobs.changefeed.currently_paused`]({% link {{ page.version.version }}/essential-metrics-{{ include.deployment }}.md %}#changefeed-currently-paused)
+
+**Rule**
+ WARNING: `jobs.changefeed.currently_paused` is greater than `0` for more than `15 minutes`
+ CRITICAL: `jobs.changefeed.currently_paused` is greater than `0` for more than `60 minutes`
+
+**Action**
+
+1. Check the status of each changefeed using the following SQL query:
+
+ {% include_cached copy-clipboard.html %}
+ ```sql
+ SELECT job_id, status, ((high_water_timestamp/1000000000)::INT::TIMESTAMP) - NOW() AS "changefeed latency",created, LEFT(description, 60), high_water_timestamp FROM crdb_internal.jobs WHERE job_type = 'CHANGEFEED' AND status IN ('running', 'paused','pause-requested') ORDER BY created DESC;
+ ```
+
+2. If all the changefeeds have status as `running`, one or more changefeeds may have run into an error and recovered. In the DB Console, navigate to **Metrics**, [**Changefeeds** dashboard]({% link {{ page.version.version }}/ui-cdc-dashboard.md %}) for the cluster and check the [**Changefeed Restarts** graph]({% link {{ page.version.version }}/ui-cdc-dashboard.md %}#changefeed-restarts).
+
+3. Resume paused changefeed(s) with the `job_id` using:
+
+ {% include_cached copy-clipboard.html %}
+ ```sql
+ RESUME JOB 681491311976841286;
+ ```
+
+### Changefeed experiencing high latency
+
+Send an alert when the maximum latency of any running changefeed exceeds a specified threshold, which is less than the [`gc.ttlseconds`]({% link {{ page.version.version }}/configure-replication-zones.md %}#replication-zone-variables) variable set in the cluster. This alert ensures that the changefeed progresses faster than the garbage collection TTL, preventing a changefeed's protected timestamp from delaying garbage collection.
+
+**Metric**
+ [`changefeed.checkpoint_progress`]({% link {{ page.version.version }}/monitor-and-debug-changefeeds.md %}#metrics)
+
+**Rule**
+ WARNING: (current time minus `changefeed.checkpoint_progress`) is greater than a threshold (that is less than `gc.ttlseconds` variable)
+
+**Action**
+
+- Refer to [Monitor and Debug Changefeeds]({% link {{ page.version.version }}/monitor-and-debug-changefeeds.md %}#recommended-changefeed-metrics-to-track).
+
+## See also
+
+- [Events to alert on]({% link {{ page.version.version }}/monitoring-and-alerting.md %}#events-to-alert-on)
+- [Common Issues to Monitor]({% link {{ page.version.version }}/common-issues-to-monitor.md %})
+{% if include.deployment == 'self-hosted' %}
+- [Essential Metrics for CockroachDB Self-Hosted Deployments]({% link {{ page.version.version }}/essential-metrics-self-hosted.md %})
+{% elsif include.deployment == 'advanced' %}
+- [Essential Metrics for CockroachDB Advanced Deployments]({% link {{ page.version.version }}/essential-metrics-advanced.md %})
+{% endif %}
+
diff --git a/src/current/_includes/v25.1/essential-metrics.md b/src/current/_includes/v25.1/essential-metrics.md
new file mode 100644
index 00000000000..41039d31a75
--- /dev/null
+++ b/src/current/_includes/v25.1/essential-metrics.md
@@ -0,0 +1,197 @@
+These essential CockroachDB metrics enable you to build custom dashboards with the following tools:
+{% if include.deployment == 'self-hosted' %}
+* [Grafana]({% link {{ page.version.version }}/monitor-cockroachdb-with-prometheus.md %}#step-5-visualize-metrics-in-grafana)
+* [Datadog Integration]({% link {{ page.version.version }}/datadog.md %}) - The [**Datadog Integration Metric Name**](https://docs.datadoghq.com/integrations/cockroachdb/?tab=host#metrics) column lists the corresponding Datadog metric which requires the `cockroachdb.` prefix.
+{% elsif include.deployment == 'advanced' %}
+* [Datadog integration]({% link cockroachcloud/tools-page.md %}#monitor-cockroachdb-cloud-with-datadog) - The [**Datadog Integration Metric Name**](https://docs.datadoghq.com/integrations/cockroachdb_dedicated/#metrics) column lists the corresponding Datadog metric which requires the `crdb_dedicated.` prefix.
+* [Metrics export]({% link cockroachcloud/export-metrics-advanced.md %})
+{% endif %}
+
+The **Usage** column explains why each metric is important to visualize in a custom dashboard and how to make both practical and actionable use of the metric in a production deployment.
+
+## Platform
+
+|
| Usage |
+| ----------------------------------------------------- | {% if include.deployment == 'self-hosted' %}------ |{% elsif include.deployment == 'advanced' %}---- |{% endif %} ------------------------------------------------------------ | ------------------------------------------------------------ |
+| sys.cpu.combined.percent-normalized | sys.cpu.combined.percent.normalized | Current user+system CPU percentage consumed by the CRDB process, normalized by number of cores | This metric gives the CPU utilization percentage by the CockroachDB process. If it is equal to 1 (or 100%), then the CPU is overloaded. The CockroachDB process should not be running with over 80% utilization for extended periods of time (hours). This metric is used in the DB Console [**CPU Percent** graph]({% link {{ page.version.version }}/ui-hardware-dashboard.md %}#cpu-percent). |
+| sys.cpu.host.combined.percent-normalized | NOT AVAILABLE | Current user+system CPU percentage consumed by all processes on the host OS, normalized by number of cores. If the CRDB process is run in a containerized environment, the host OS is the container since the CRDB process cannot inspect CPU usage beyond the container. | This metric gives the CPU utilization percentage of the underlying server, virtual server or container hosting the CockroachDB process. It includes CockroachDB process and non-CockroachDB process usage. If it is equal to 1 (or 100%), then the CPU is overloaded. The CockroachDB process should not be running in an environment with an overloaded state for extended periods of time (hours). This metric is used in the DB Console **Host CPU Percent** graph. |
+| sys.cpu.user.percent | sys.cpu.user.percent | Current user CPU percentage consumed by the CRDB process | This metric gives the CPU usage percentage at the user level by the CockroachDB process only. This is similar to the Linux `top` command output. The metric value can be more than 1 (or 100%) on multi-core systems. It is best to combine user and system metrics. |
+| sys.cpu.sys.percent | sys.cpu.sys.percent | Current system CPU percentage consumed by the CRDB process | This metric gives the CPU usage percentage at the system (Linux kernel) level by the CockroachDB process only. This is similar to the Linux `top` command output. The metric value can be more than 1 (or 100%) on multi-core systems. It is best to combine user and system metrics. |
+| sys.rss | sys.rss | Current process memory (RSS) | This metric gives the amount of RAM used by the CockroachDB process. Persistently low values over an extended period of time suggest there is underutilized memory that can be put to work with adjusted [settings for `--cache` or `--max_sql_memory`]({% link {{ page.version.version }}/recommended-production-settings.md %}#cache-and-sql-memory-size) or both. Conversely, a high utilization, even if a temporary spike, indicates an increased risk of [Out-of-memory (OOM) crash]({% link {{ page.version.version }}/cluster-setup-troubleshooting.md %}#out-of-memory-oom-crash) (particularly since the [swap is generally disabled]({% link {{ page.version.version }}/recommended-production-settings.md %}#memory)). |
+| sql.mem.root.current | {% if include.deployment == 'self-hosted' %}sql.mem.root.current |{% elsif include.deployment == 'advanced' %}NOT AVAILABLE |{% endif %} Current sql statement memory usage for root | This metric shows how memory set aside for temporary materializations, such as hash tables and intermediary result sets, is utilized. Use this metric to optimize memory allocations based on long term observations. The maximum amount is set with [`--max_sql_memory`]({% link {{ page.version.version }}/recommended-production-settings.md %}#cache-and-sql-memory-size). If the utilization of sql memory is persistently low, perhaps some portion of this memory allocation can be shifted to [`--cache`]({% link {{ page.version.version }}/recommended-production-settings.md %}#cache-and-sql-memory-size). |
+| sys.host.disk.write.bytes | {% if include.deployment == 'self-hosted' %}sys.host.disk.write.bytes |{% elsif include.deployment == 'advanced' %}NOT AVAILABLE |{% endif %} Bytes written to all disks since this process started | This metric reports the effective storage device write throughput (MB/s) rate. To confirm that storage is sufficiently provisioned, assess the I/O performance rates (IOPS and MBPS) in the context of the sys.host.disk.iopsinprogress metric. |
+| sys.host.disk.write.count | {% if include.deployment == 'self-hosted' %}sys.host.disk.write |{% elsif include.deployment == 'advanced' %}NOT AVAILABLE |{% endif %} Disk write operations across all disks since this process started | This metric reports the effective storage device write IOPS rate. To confirm that storage is sufficiently provisioned, assess the I/O performance rates (IOPS and MBPS) in the context of the sys.host.disk.iopsinprogress metric. |
+| sys.host.disk.read.bytes | {% if include.deployment == 'self-hosted' %}sys.host.disk.read.bytes |{% elsif include.deployment == 'advanced' %}NOT AVAILABLE |{% endif %} Bytes read from all disks since this process started | This metric reports the effective storage device read throughput (MB/s) rate. To confirm that storage is sufficiently provisioned, assess the I/O performance rates (IOPS and MBPS) in the context of the sys.host.disk.iopsinprogress metric. |
+| sys.host.disk.read.count | {% if include.deployment == 'self-hosted' %}sys.host.disk.read |{% elsif include.deployment == 'advanced' %}NOT AVAILABLE |{% endif %} Disk read operations across all disks since this process started | This metric reports the effective storage device read IOPS rate. To confirm that storage is sufficiently provisioned, assess the I/O performance rates (IOPS and MBPS) in the context of the sys.host.disk.iopsinprogress metric. |
+| sys.host.disk.iopsinprogress | {% if include.deployment == 'self-hosted' %}sys.host.disk.iopsinprogress |{% elsif include.deployment == 'advanced' %}NOT AVAILABLE |{% endif %} IO operations currently in progress on this host | This metric gives the average queue length of the storage device. It characterizes the storage device's performance capability. All I/O performance metrics are Linux counters and correspond to the `avgqu-sz` in the Linux `iostat` command output. You need to view the device queue graph in the context of the actual read/write IOPS and MBPS metrics that show the actual device utilization. If the device is not keeping up, the queue will grow. Values over 10 are bad. Values around 5 mean the device is working hard trying to keep up. For internal (on chassis) [NVMe](https://www.wikipedia.org/wiki/NVM_Express) devices, the queue values are typically 0. For network connected devices, such as [AWS EBS volumes](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-volume-types.html), the normal operating range of values is 1 to 2. Spikes in values are OK. They indicate an I/O spike where the device fell behind and then caught up. End users may experience inconsistent response times, but there should be no cluster stability issues. If the queue is greater than 5 for an extended period of time and IOPS or MBPS are low, then the storage is most likely not provisioned per Cockroach Labs guidance. In AWS EBS, it is commonly an EBS type, such as gp2, not suitable as database primary storage. If I/O is low and the queue is low, the most likely scenario is that the CPU is lacking and not driving I/O. One such case is a cluster with nodes with only 2 vcpus which is not supported [sizing]({% link {{ page.version.version }}/recommended-production-settings.md %}#sizing) for production deployments. There are quite a few background processes in the database that take CPU away from the workload, so the workload is just not getting the CPU. Review [storage and disk I/O]({% link {{ page.version.version }}/common-issues-to-monitor.md %}#storage-and-disk-i-o). |
+| sys.host.net.recv.bytes | sys.host.net.recv.bytes | Bytes received on all network interfaces since this process started | This metric gives the node's ingress/egress network transfer rates for flat sections which may indicate insufficiently provisioned networking or high error rates. CockroachDB is using a reliable TCP/IP protocol, so errors result in delivery retries that create a "slow network" effect. |
+| sys.host.net.send.bytes | sys.host.net.send.bytes | Bytes sent on all network interfaces since this process started | This metric gives the node's ingress/egress network transfer rates for flat sections which may indicate insufficiently provisioned networking or high error rates. CockroachDB is using a reliable TCP/IP protocol, so errors result in delivery retries that create a "slow network" effect. |
+| clock-offset.meannanos | clock.offset.meannanos | Mean clock offset with other nodes | This metric gives the node's clock skew. In a well-configured environment, the actual clock skew would be in the sub-millisecond range. A skew exceeding 5 ms is likely due to a NTP service mis-configuration. Reducing the actual clock skew reduces the probability of uncertainty related conflicts and corresponding retires which has a positive impact on workload performance. Conversely, a larger actual clock skew increases the probability of retries due to uncertainty conflicts, with potentially measurable adverse effects on workload performance. |
+
+## Storage
+
+
| Usage |
+| ----------------------------------------------------- | {% if include.deployment == 'self-hosted' %}------ |{% elsif include.deployment == 'advanced' %}---- |{% endif %} ------------------------------------------------------------ | ------------------------------------------------------------ |
+| capacity | {% if include.deployment == 'self-hosted' %}capacity.total |{% elsif include.deployment == 'advanced' %}capacity |{% endif %} Total storage capacity | This metric gives total storage capacity. Measurements should comply with the following rule: CockroachDB storage volumes should not be utilized more than 60% (40% free space). |
+| capacity.available | capacity.available | Available storage capacity | This metric gives available storage capacity. Measurements should comply with the following rule: CockroachDB storage volumes should not be utilized more than 60% (40% free space). |
+| capacity.used | capacity.used | Used storage capacity | This metric gives used storage capacity. Measurements should comply with the following rule: CockroachDB storage volumes should not be utilized more than 60% (40% free space). |
+| storage.write-stalls | {% if include.deployment == 'self-hosted' %}storage.write.stalls |{% elsif include.deployment == 'advanced' %}NOT AVAILABLE |{% endif %} Number of instances of intentional write stalls to backpressure incoming writes | This metric reports actual disk stall events. Ideally, investigate all reports of disk stalls. As a pratical guideline, one stall per minute is not likely to have a material impact on workload beyond an occasional increase in response time. However one stall per second should be viewed as problematic and investigated actively. It is particularly problematic if the rate persists over an extended period of time, and worse, if it is increasing. |
+| rocksdb.compactions | rocksdb.compactions.total | Number of SST compactions | This metric reports the number of a node's [LSM compactions]({% link {{ page.version.version }}/common-issues-to-monitor.md %}#lsm-health). If the number of compactions remains elevated while the LSM health does not improve, compactions are not keeping up with the workload. If the condition persists for an extended period, the cluster will initially exhibit performance issues that will eventually escalate into stability issues. |
+| rocksdb.block.cache.hits | rocksdb.block.cache.hits | Count of block cache hits | This metric gives hits to block cache which is reserved memory. It is allocated upon the start of a node process by the [`--cache` flag]({% link {{ page.version.version }}/cockroach-start.md %}#general) and never shrinks. By observing block cache hits and misses, you can fine-tune memory allocations in the node process for the demands of the workload. |
+| rocksdb.block.cache.misses | rocksdb.block.cache.misses | Count of block cache misses | This metric gives misses to block cache which is reserved memory. It is allocated upon the start of a node process by the [`--cache` flag]({% link {{ page.version.version }}/cockroach-start.md %}#general) and never shrinks. By observing block cache hits and misses, you can fine-tune memory allocations in the node process for the demands of the workload. |
+
+## Health
+
+|
| Usage |
+| ----------------------------------------------------- | {% if include.deployment == 'self-hosted' %}------ |{% elsif include.deployment == 'advanced' %}---- |{% endif %} ------------------------------------------------------------ | ------------------------------------------------------------ |
+| sys.uptime | sys.uptime | Process uptime | This metric measures the length of time, in seconds, that the CockroachDB process has been running. Monitor this metric to detect events such as node restarts, which may require investigation or intervention. |
+| admission.io.overload | {% if include.deployment == 'self-hosted' %}admission.io.overload |{% elsif include.deployment == 'advanced' %}NOT AVAILABLE |{% endif %} 1-normalized float indicating whether IO admission control considers the store as overloaded with respect to compaction out of L0 (considers sub-level and file counts). | If the value of this metric exceeds 1, then it indicates overload. You can also look at the metrics `storage.l0-num-files`, `storage.l0-sublevels` or `rocksdb.read-amplification` directly. A healthy LSM shape is defined as “read-amp < 20” and “L0-files < 1000”, looking at [cluster settings]({% link {{ page.version.version }}/cluster-settings.md %}) `admission.l0_sub_level_count_overload_threshold` and `admission.l0_file_count_overload_threshold` respectively. |
+| admission.wait_durations.kv-p75 | {% if include.deployment == 'self-hosted' %}admission.wait.durations.kv |{% elsif include.deployment == 'advanced' %}NOT AVAILABLE |{% endif %} Wait time durations for requests that waited | This metric shows if CPU utilization-based admission control feature is working effectively or potentially overaggressive. This is a latency histogram of how much delay was added to the workload due to throttling by CPU control. If observing over 100ms waits for over 5 seconds while there was excess CPU capacity available, then the admission control is overly aggressive. |
+| admission.wait_durations.kv-stores-p75 | {% if include.deployment == 'self-hosted' %}admission.wait.durations.kv_stores |{% elsif include.deployment == 'advanced' %}NOT AVAILABLE |{% endif %} Wait time durations for requests that waited | This metric shows if I/O utilization-based admission control feature is working effectively or potentially overaggressive. This is a latency histogram of how much delay was added to the workload due to throttling by I/O control. If observing over 100ms waits for over 5 seconds while there was excess I/O capacity available, then the admission control is overly aggressive. |
+| sys.runnable.goroutines.per.cpu | {% if include.deployment == 'self-hosted' %}sys.runnable.goroutines.per_cpu |{% elsif include.deployment == 'advanced' %}NOT AVAILABLE |{% endif %} Average number of goroutines that are waiting to run, normalized by number of cores | If this metric has a value over 30, it indicates a CPU overload. If the condition lasts a short period of time (a few seconds), the database users are likely to experience inconsistent response times. If the condition persists for an extended period of time (tens of seconds, or minutes) the cluster may start developing stability issues. Review [CPU planning]({% link {{ page.version.version }}/common-issues-to-monitor.md %}#cpu).
+
+{% if include.deployment == 'self-hosted' %}
+## Network
+
+|
| Usage |
+| ------------------------------------------------------ | --------------------------------------------------------- | ------------------------------------------------------------ | ------------------------------------------------------------ |
+| rpc.connection.avg_round_trip_latency | rpc.connection.avg_round_trip_latency | Sum of exponentially weighted moving average of round-trip latencies, as measured through a gRPC RPC. Dividing this gauge by `rpc.connection.healthy` gives an approximation of average latency, but the top-level round-trip-latency histogram is more useful. Instead, users should consult the label families of this metric if they are available (which requires Prometheus and the cluster setting `server.child_metrics.enabled`); these provide per-peer moving averages. This metric does not track failed connection. A failed connection's contribution is reset to zero. | This metric is helpful in understanding general network issues outside of CockroachDB that could be impacting the user’s workload. |
+| rpc.connection.failures | rpc.connection.failures.count | Counter of failed connections. This includes both the event in which a healthy connection terminates as well as unsuccessful reconnection attempts. Connections that are terminated as part of local node shutdown are excluded. Decommissioned peers are excluded. | See Description. |
+| rpc.connection.healthy | rpc.connection.healthy | Gauge of current connections in a healthy state (i.e., bidirectionally connected and heartbeating). | See Description. |
+| rpc.connection.healthy_nanos | rpc.connection.healthy_nanos | Gauge of nanoseconds of healthy connection time. On the Prometheus endpoint scraped when the cluster setting `server.child_metrics.enabled` is set, this gauge allows you to see the duration for which a given peer has been connected in a healthy state. | This can be useful for monitoring the stability and health of connections within your CockroachDB cluster. |
+| rpc.connection.heartbeats | rpc.connection.heartbeats.count | Counter of successful heartbeats. | See Description. |
+| rpc.connection.unhealthy | rpc.connection.unhealthy | Gauge of current connections in an unhealthy state (not bidirectionally connected or heartbeating). | If the value of this metric is greater than 0, this could indicate a network partition. |
+| rpc.connection.unhealthy_nanos | rpc.connection.unhealthy_nanos | Gauge of nanoseconds of unhealthy connection time. On the Prometheus endpoint scraped when the cluster setting `server.child_metrics.enabled` is set, this gauge allows you to see the duration for which a given peer has been unreachable. | If this duration is greater than 0, this could indicate how long a network partition has been occurring. |
+{% endif %}
+
+{% if include.deployment == 'self-hosted' %}
+## Expiration of license and certificates
+
+|
| Usage |
+| ----------------------------------------------------- | ---------------------------------------------------------- | ------------------------------------------------------------ | ------------------------------------------------------------ |
+| seconds.until.enterprise.license.expiry | seconds.until.enterprise.license.expiry | Seconds until enterprise license expiry (0 if no license present or running without enterprise features) | See Description. |
+| security.certificate.expiration.ca | security.certificate_expiration.ca | Expiration for the CA certificate. 0 means no certificate or error | See Description. |
+| security.certificate.expiration.client-ca | security.certificate_expiration.client_ca | Expiration for the client CA certificate. 0 means no certificate or error| See Description. |
+| security.certificate.expiration.ui | security.certificate_expiration.ui | Expiration for the UI certificate. 0 means no certificate or error| See Description. |
+| security.certificate.expiration.ui-ca | security.certificate_expiration.ui_ca | Expiration for the UI CA certificate. 0 means no certificate or error| See Description. |
+| security.certificate.expiration.node | security.certificate_expiration.node | Expiration for the node certificate. 0 means no certificate or error| See Description. |
+| security.certificate.expiration.node-client | security.certificate_expiration.node_client | Expiration for the node's client certificate. 0 means no certificate or error| See Description. |
+{% endif %}
+
+## KV distributed
+
+|
| Usage |
+| ----------------------------------------------------- | {% if include.deployment == 'self-hosted' %}------ |{% elsif include.deployment == 'advanced' %}---- |{% endif %} ------------------------------------------------------------ | ------------------------------------------------------------ |
+| liveness.heartbeatlatency | {% if include.deployment == 'self-hosted' %}liveness.heartbeatlatency-p90 |{% elsif include.deployment == 'advanced' %}liveness.heartbeatlatency |{% endif %} Node liveness heartbeat latency | If this metric exceeds 1 second, it is a sign of cluster instability. |
+| liveness.livenodes | liveness.livenodes | Number of live nodes in the cluster (will be 0 if this node is not itself live) | This is a critical metric that tracks the live nodes in the cluster. |
+| distsender.rpc.sent.nextreplicaerror | distsender.rpc.sent.nextreplicaerror | Number of replica-addressed RPCs sent due to per-replica errors | [RPC](architecture/overview.html#overview) errors do not necessarily indicate a problem. This metric tracks remote procedure calls that return a status value other than "success". A non-success status of an RPC should not be misconstrued as a network transport issue. It is database code logic executed on another cluster node. The non-success status is a result of an orderly execution of an RPC that reports a specific logical condition. |
+| distsender.errors.notleaseholder | distsender.errors.notleaseholder | Number of NotLeaseHolderErrors encountered from replica-addressed RPCs | Errors of this type are normal during elastic cluster topology changes when leaseholders are actively rebalancing. They are automatically retried. However they may create occasional response time spikes. In that case, this metric may provide the explanation of the cause. |
+
+## KV replication
+
+|
| Usage |
+| ----------------------------------------------------- | {% if include.deployment == 'self-hosted' %}------ |{% elsif include.deployment == 'advanced' %}---- |{% endif %} ------------------------------------------------------------ | ------------------------------------------------------------ |
+| leases.transfers.success | leases.transfers.success | Number of successful lease transfers | A high number of [lease](architecture/replication-layer.html#leases) transfers is not a negative or positive signal, rather it is a reflection of the elastic cluster activities. For example, this metric is high during cluster topology changes. A high value is often the reason for NotLeaseHolderErrors which are normal and expected during rebalancing. Observing this metric may provide a confirmation of the cause of such errors. |
+| rebalancing.queriespersecond | {% if include.deployment == 'self-hosted' %}rebalancing.queriespersecond |{% elsif include.deployment == 'advanced' %}NOT AVAILABLE |{% endif %} Number of kv-level requests received per second by the store, considering the last 30 minutes, as used in rebalancing decisions. | This metric shows hotspots along the queries per second (QPS) dimension. It provides insights into the ongoing rebalancing activities. |
+| ranges | ranges | Number of ranges | This metric provides a measure of the scale of the data size. |
+| replicas | {% if include.deployment == 'self-hosted' %}replicas.total |{% elsif include.deployment == 'advanced' %}replicas |{% endif %} Number of replicas | This metric provides an essential characterization of the data distribution across cluster nodes. |
+| replicas.leaseholders | replicas.leaseholders | Number of lease holders | This metric provides an essential characterization of the data processing points across cluster nodes. |
+| ranges.underreplicated | ranges.underreplicated | Number of ranges with fewer live replicas than the replication target | This metric is an indicator of [replication issues]({% link {{ page.version.version }}/cluster-setup-troubleshooting.md %}#replication-issues). It shows whether the cluster has data that is not conforming to resilience goals. The next step is to determine the corresponding database object, such as the table or index, of these under-replicated ranges and whether the under-replication is temporarily expected. Use the statement `SELECT table_name, index_name FROM [SHOW RANGES WITH INDEXES] WHERE range_id = {id of under-replicated range};`|
+| ranges.unavailable | ranges.unavailable | Number of ranges with fewer live replicas than needed for quorum | This metric is an indicator of [replication issues]({% link {{ page.version.version }}/cluster-setup-troubleshooting.md %}#replication-issues). It shows whether the cluster is unhealthy and can impact workload. If an entire range is unavailable, then it will be unable to process queries. |
+| queue.replicate.replacedecommissioningreplica.error | {% if include.deployment == 'self-hosted' %}queue.replicate.replacedecommissioningreplica.error.count |{% elsif include.deployment == 'advanced' %}NOT AVAILABLE |{% endif %} Number of failed decommissioning replica replacements processed by the replicate queue | Refer to [Decommission the node]({% link {{ page.version.version }}/node-shutdown.md %}?filters=decommission#decommission-the-node). |
+| range.splits | {% if include.deployment == 'self-hosted' %}range.splits.total |{% elsif include.deployment == 'advanced' %}range.splits |{% endif %} Number of range splits | This metric indicates how fast a workload is scaling up. Spikes can indicate resource hot spots since the [split heuristic is based on QPS]({% link {{ page.version.version }}/load-based-splitting.md %}#control-load-based-splitting-threshold). To understand whether hot spots are an issue and with which tables and indexes they are occurring, correlate this metric with other metrics such as CPU usage, such as `sys.cpu.combined.percent-normalized`, or use the [**Hot Ranges** page]({% link {{ page.version.version }}/ui-hot-ranges-page.md %}). |
+| range.merges | {% if include.deployment == 'self-hosted' %}range.merges.count |{% elsif include.deployment == 'advanced' %}NOT AVAILABLE |{% endif %} Number of range merges | This metric indicates how fast a workload is scaling down. Merges are Cockroach's [optimization for performance](architecture/distribution-layer.html#range-merges). This metric indicates that there have been deletes in the workload. |
+
+## SQL
+
+|
| Usage |
+| ----------------------------------------------------- | {% if include.deployment == 'self-hosted' %}------ |{% elsif include.deployment == 'advanced' %}---- |{% endif %} ------------------------------------------------------------ | ------------------------------------------------------------ |
+| sql.conns | sql.conns | Number of active SQL connections | This metric shows the number of connections as well as the distribution, or balancing, of connections across cluster nodes. An imbalance can lead to nodes becoming overloaded. Review [Connection Pooling]({% link {{ page.version.version }}/connection-pooling.md %}). |
+| sql.new_conns | {% if include.deployment == 'self-hosted' %}sql.new_conns.count |{% elsif include.deployment == 'advanced' %}NOT AVAILABLE |{% endif %} Number of new connection attempts. | The rate of this metric shows how frequently new connections are being established. This can be useful in determining if a high rate of incoming new connections is causing additional load on the server due to a misconfigured application. |
+| sql.txns.open | sql.txns.open | Number of currently open user SQL transactions | This metric should roughly correspond to the number of cores * 4. If this metric is consistently larger, scale out the cluster. |
+| sql.statements.active | sql.statements.active | Number of currently active user SQL statements | This high-level metric reflects workload volume. |
+| sql.failure.count | {% if include.deployment == 'self-hosted' %}sql.failure |{% elsif include.deployment == 'advanced' %}sql.failure.count |{% endif %} Number of statements resulting in a planning or runtime error | This metric is a high-level indicator of workload and application degradation with query failures. Use the [Insights page]({% link {{ page.version.version }}/ui-insights-page.md %}) to find failed executions with their error code to troubleshoot or use application-level logs, if instrumented, to determine the cause of error. |
+| sql.full.scan.count | {% if include.deployment == 'self-hosted' %}sql.full.scan |{% elsif include.deployment == 'advanced' %}sql.full.scan.count |{% endif %} Number of full table or index scans | This metric is a high-level indicator of potentially suboptimal query plans in the workload that may require index tuning and maintenance. To identify the [statements with a full table scan]({% link {{ page.version.version }}/performance-recipes.md %}#statements-with-full-table-scans), use `SHOW FULL TABLE SCAN` or the [**SQL Activity Statements** page]({% link {{ page.version.version }}/ui-statements-page.md %}) with the corresponding metric time frame. The **Statements** page also includes [explain plans]({% link {{ page.version.version }}/ui-statements-page.md %}#explain-plans) and [index recommendations]({% link {{ page.version.version }}/ui-statements-page.md %}#insights). Not all full scans are necessarily bad especially over smaller tables. |
+| sql.insert.count | sql.insert.count | Number of SQL INSERT statements successfully executed | This high-level metric reflects workload volume. Monitor this metric to identify abnormal application behavior or patterns over time. If abnormal patterns emerge, apply the metric's time range to the [**SQL Activity** pages]({% link {{ page.version.version }}/monitoring-and-alerting.md %}#sql-activity-pages) to investigate interesting outliers or patterns. For example, on the [**Transactions** page]({% link {{ page.version.version }}/ui-transactions-page.md %}) and the [**Statements** page]({% link {{ page.version.version }}/ui-statements-page.md %}), sort on the Execution Count column. To find problematic sessions, on the [**Sessions** page]({% link {{ page.version.version }}/ui-sessions-page.md %}), sort on the Transaction Count column. Find the sessions with high transaction counts and trace back to a user or application. |
+| sql.update.count | sql.update.count | Number of SQL UPDATE statements successfully executed | This high-level metric reflects workload volume. Monitor this metric to identify abnormal application behavior or patterns over time. If abnormal patterns emerge, apply the metric's time range to the [**SQL Activity** pages]({% link {{ page.version.version }}/monitoring-and-alerting.md %}#sql-activity-pages) to investigate interesting outliers or patterns. For example, on the [**Transactions** page]({% link {{ page.version.version }}/ui-transactions-page.md %}) and the [**Statements** page]({% link {{ page.version.version }}/ui-statements-page.md %}), sort on the Execution Count column. To find problematic sessions, on the [**Sessions** page]({% link {{ page.version.version }}/ui-sessions-page.md %}), sort on the Transaction Count column. Find the sessions with high transaction counts and trace back to a user or application. |
+| sql.delete.count | sql.delete.count | Number of SQL DELETE statements successfully executed | This high-level metric reflects workload volume. Monitor this metric to identify abnormal application behavior or patterns over time. If abnormal patterns emerge, apply the metric's time range to the [**SQL Activity** pages]({% link {{ page.version.version }}/monitoring-and-alerting.md %}#sql-activity-pages) to investigate interesting outliers or patterns. For example, on the [**Transactions** page]({% link {{ page.version.version }}/ui-transactions-page.md %}) and the [**Statements** page]({% link {{ page.version.version }}/ui-statements-page.md %}), sort on the Execution Count column. To find problematic sessions, on the [**Sessions** page]({% link {{ page.version.version }}/ui-sessions-page.md %}), sort on the Transaction Count column. Find the sessions with high transaction counts and trace back to a user or application. |
+| sql.select.count | sql.select.count | Number of SQL SELECT statements successfully executed | This high-level metric reflects workload volume. Monitor this metric to identify abnormal application behavior or patterns over time. If abnormal patterns emerge, apply the metric's time range to the [**SQL Activity** pages]({% link {{ page.version.version }}/monitoring-and-alerting.md %}#sql-activity-pages) to investigate interesting outliers or patterns. For example, on the [**Transactions** page]({% link {{ page.version.version }}/ui-transactions-page.md %}) and the [**Statements** page]({% link {{ page.version.version }}/ui-statements-page.md %}), sort on the Execution Count column. To find problematic sessions, on the [**Sessions** page]({% link {{ page.version.version }}/ui-sessions-page.md %}), sort on the Transaction Count column. Find the sessions with high transaction counts and trace back to a user or application. |
+| sql.ddl.count | sql.ddl.count | Number of SQL DDL statements successfully executed | This high-level metric reflects workload volume. Monitor this metric to identify abnormal application behavior or patterns over time. If abnormal patterns emerge, apply the metric's time range to the [**SQL Activity** pages]({% link {{ page.version.version }}/monitoring-and-alerting.md %}#sql-activity-pages) to investigate interesting outliers or patterns. For example, on the [**Transactions** page]({% link {{ page.version.version }}/ui-transactions-page.md %}) and the [**Statements** page]({% link {{ page.version.version }}/ui-statements-page.md %}), sort on the Execution Count column. To find problematic sessions, on the [**Sessions** page]({% link {{ page.version.version }}/ui-sessions-page.md %}), sort on the Transaction Count column. Find the sessions with high transaction counts and trace back to a user or application. |
+| sql.txn.begin.count | sql.txn.begin.count | Number of SQL transaction BEGIN statements successfully executed | This metric reflects workload volume by counting explicit [transactions]({% link {{ page.version.version }}/transactions.md %}). Use this metric to determine whether explicit transactions can be refactored as implicit transactions (individual statements). |
+| sql.txn.commit.count | sql.txn.commit.count | Number of SQL transaction COMMIT statements successfully executed | This metric shows the number of [transactions]({% link {{ page.version.version }}/transactions.md %}) that completed successfully. This metric can be used as a proxy to measure the number of successful explicit transactions. |
+| sql.txn.rollback.count | sql.txn.rollback.count | Number of SQL transaction ROLLBACK statements successfully executed | This metric shows the number of orderly transaction [rollbacks]({% link {{ page.version.version }}/rollback-transaction.md %}). A persistently high number of rollbacks may negatively impact the workload performance and needs to be investigated. |
+| sql.txn.abort.count | sql.txn.abort.count | Number of SQL transaction abort errors | This high-level metric reflects workload performance. A persistently high number of SQL transaction abort errors may negatively impact the workload performance and needs to be investigated. |
+| sql.service.latency-p90, sql.service.latency-p99 | sql.service.latency | Latency of SQL request execution | These high-level metrics reflect workload performance. Monitor these metrics to understand latency over time. If abnormal patterns emerge, apply the metric's time range to the [**SQL Activity** pages]({% link {{ page.version.version }}/monitoring-and-alerting.md %}#sql-activity-pages) to investigate interesting outliers or patterns. The [**Statements page**]({% link {{ page.version.version }}/ui-statements-page.md %}) has P90 Latency and P99 latency columns to enable correlation with this metric. |
+| sql.txn.latency-p90, sql.txn.latency-p99 | sql.txn.latency | Latency of SQL transactions | These high-level metrics provide a latency histogram of all executed SQL transactions. These metrics provide an overview of the current SQL workload. |
+| txnwaitqueue.deadlocks_total | {% if include.deployment == 'self-hosted' %}txnwaitqueue.deadlocks.count |{% elsif include.deployment == 'advanced' %}NOT AVAILABLE |{% endif %} Number of deadlocks detected by the transaction wait queue | Alert on this metric if its value is greater than zero, especially if transaction throughput is lower than expected. Applications should be able to detect and recover from deadlock errors. However, transaction performance and throughput can be maximized if the application logic avoids deadlock conditions in the first place, for example, by keeping transactions as short as possible. |
+| sql.distsql.contended_queries.count | {% if include.deployment == 'self-hosted' %}sql.distsql.contended.queries |{% elsif include.deployment == 'advanced' %} sql.distsql.contended.queries |{% endif %} Number of SQL queries that experienced contention | This metric is incremented whenever there is a non-trivial amount of contention experienced by a statement whether read-write or write-write conflicts. Monitor this metric to correlate possible workload performance issues to contention conflicts. |
+| sql.conn.latency-p90, sql.conn.latency-p99 | sql.conn.latency | Latency to establish and authenticate a SQL connection | These metrics characterize the database connection latency which can affect the application performance, for example, by having slow startup times. |
+| txn.restarts.serializable | txn.restarts.serializable | Number of restarts due to a forwarded commit timestamp and isolation=SERIALIZABLE | This metric is one measure of the impact of contention conflicts on workload performance. For guidance on contention conflicts, review [transaction contention best practices]({% link {{ page.version.version }}/performance-best-practices-overview.md %}#transaction-contention) and [performance tuning recipes]({% link {{ page.version.version }}/performance-recipes.md %}#transaction-contention). Tens of restarts per minute may be a high value, a signal of an elevated degree of contention in the workload, which should be investigated. |
+| txn.restarts.writetooold | txn.restarts.writetooold | Number of restarts due to a concurrent writer committing first | This metric is one measure of the impact of contention conflicts on workload performance. For guidance on contention conflicts, review [transaction contention best practices]({% link {{ page.version.version }}/performance-best-practices-overview.md %}#transaction-contention) and [performance tuning recipes]({% link {{ page.version.version }}/performance-recipes.md %}#transaction-contention). Tens of restarts per minute may be a high value, a signal of an elevated degree of contention in the workload, which should be investigated. |
+| txn.restarts.writetoooldmulti | {% if include.deployment == 'self-hosted' %}txn.restarts.writetoooldmulti.count |{% elsif include.deployment == 'advanced' %}NOT AVAILABLE |{% endif %} Number of restarts due to multiple concurrent writers committing first | This metric is one measure of the impact of contention conflicts on workload performance. For guidance on contention conflicts, review [transaction contention best practices]({% link {{ page.version.version }}/performance-best-practices-overview.md %}#transaction-contention) and [performance tuning recipes]({% link {{ page.version.version }}/performance-recipes.md %}#transaction-contention). Tens of restarts per minute may be a high value, a signal of an elevated degree of contention in the workload, which should be investigated. |
+| txn.restarts.unknown | {% if include.deployment == 'self-hosted' %}txn.restarts.unknown.count |{% elsif include.deployment == 'advanced' %}NOT AVAILABLE |{% endif %} Number of restarts due to a unknown reasons | This metric is one measure of the impact of contention conflicts on workload performance. For guidance on contention conflicts, review [transaction contention best practices]({% link {{ page.version.version }}/performance-best-practices-overview.md %}#transaction-contention) and [performance tuning recipes]({% link {{ page.version.version }}/performance-recipes.md %}#transaction-contention). Tens of restarts per minute may be a high value, a signal of an elevated degree of contention in the workload, which should be investigated. |
+| txn.restarts.txnpush | {% if include.deployment == 'self-hosted' %}txn.restarts.txnpush.count |{% elsif include.deployment == 'advanced' %}NOT AVAILABLE |{% endif %} Number of restarts due to a transaction push failure | This metric is one measure of the impact of contention conflicts on workload performance. For guidance on contention conflicts, review [transaction contention best practices]({% link {{ page.version.version }}/performance-best-practices-overview.md %}#transaction-contention) and [performance tuning recipes]({% link {{ page.version.version }}/performance-recipes.md %}#transaction-contention). Tens of restarts per minute may be a high value, a signal of an elevated degree of contention in the workload, which should be investigated. |
+| txn.restarts.txnaborted | {% if include.deployment == 'self-hosted' %}txn.restarts.txnaborted.count |{% elsif include.deployment == 'advanced' %}NOT AVAILABLE |{% endif %} Number of restarts due to an abort by a concurrent transaction | The errors tracked by this metric are generally due to deadlocks. Deadlocks can often be prevented with a considered transaction design. Identify the conflicting transactions involved in the deadlocks, then, if possible, redesign the business logic implementation prone to deadlocks. |
+
+## Table Statistics
+
+|
| Usage |
+| ----------------------------------------------------- | {% if include.deployment == 'self-hosted' %}------ |{% elsif include.deployment == 'advanced' %}---- |{% endif %} ------------------------------------------------------------ | ------------------------------------------------------------ |
+| jobs.auto_create_stats.resume_failed | {% if include.deployment == 'self-hosted' %}jobs.auto.create.stats.resume_failed.count |{% elsif include.deployment == 'advanced' %}NOT AVAILABLE |{% endif %} Number of auto_create_stats jobs which failed with a non-retryable error | This metric is a high-level indicator that automatically generated [table statistics]({% link {{ page.version.version }}/cost-based-optimizer.md %}#table-statistics) is failing. Failed statistic creation can lead to the query optimizer running with stale statistics. Stale statistics can cause suboptimal query plans to be selected leading to poor query performance. |
+| jobs.auto_create_stats.currently_running | {% if include.deployment == 'self-hosted' %}jobs.auto.create.stats.currently_running |{% elsif include.deployment == 'advanced' %}NOT AVAILABLE |{% endif %} Number of auto_create_stats jobs currently running | This metric tracks the number of active automatically generated statistics jobs that could also be consuming resources. Ensure that foreground SQL traffic is not impacted by correlating this metric with SQL latency and query volume metrics. |
+| jobs.auto_create_stats.currently_paused | {% if include.deployment == 'self-hosted' %}jobs.auto.create.stats.currently_paused |{% elsif include.deployment == 'advanced' %}NOT AVAILABLE |{% endif %} Number of auto_create_stats jobs currently considered Paused | This metric is a high-level indicator that automatically generated statistics jobs are paused which can lead to the query optimizer running with stale statistics. Stale statistics can cause suboptimal query plans to be selected leading to poor query performance. |
+| jobs.create_stats.currently_running | {% if include.deployment == 'self-hosted' %}jobs.create.stats.currently_running |{% elsif include.deployment == 'advanced' %}NOT AVAILABLE |{% endif %} Number of create_stats jobs currently running | This metric tracks the number of active create statistics jobs that may be consuming resources. Ensure that foreground SQL traffic is not impacted by correlating this metric with SQL latency and query volume metrics. |
+
+## Backup and Restore
+
+|
| Usage |
+| ----------------------------------------------------- | {% if include.deployment == 'self-hosted' %}------ |{% elsif include.deployment == 'advanced' %}---- |{% endif %} ------------------------------------------------------------ | ------------------------------------------------------------ |
+| jobs.backup.currently_running | {% if include.deployment == 'self-hosted' %}jobs.backup.currently_running |{% elsif include.deployment == 'advanced' %}NOT AVAILABLE |{% endif %} Number of backup jobs currently running | See Description. |
+| jobs.backup.currently_paused | {% if include.deployment == 'self-hosted' %}jobs.backup.currently_paused |{% elsif include.deployment == 'advanced' %}NOT AVAILABLE |{% endif %} Number of backup jobs currently considered Paused | Monitor and alert on this metric to safeguard against an inadvertent operational error of leaving a backup job in a paused state for an extended period of time. In functional areas, a paused job can hold resources or have concurrency impact or some other negative consequence. Paused backup may break the [recovery point objective (RPO)]({% link {{ page.version.version }}/backup.md %}#performance). |
+| schedules.BACKUP.failed | {% if include.deployment == 'self-hosted' %}schedules.backup.failed |{% elsif include.deployment == 'advanced' %}NOT AVAILABLE |{% endif %} Number of BACKUP jobs failed | Monitor this metric and investigate backup job failures. |
+| schedules.BACKUP.last-completed-time | {% if include.deployment == 'self-hosted' %}schedules.backup.last_completed_time |{% elsif include.deployment == 'advanced' %}NOT AVAILABLE |{% endif %} The Unix timestamp of the most recently completed backup by a schedule specified as maintaining this metric | Monitor this metric to ensure that backups are meeting the [recovery point objective (RPO)]({% link {{ page.version.version }}/disaster-recovery-overview.md %}). Each node exports the time that it last completed a backup on behalf of the schedule. If a node is restarted, it will report `0` until it completes a backup. If all nodes are restarted, `max()` is `0` until a node completes a backup.
To make use of this metric, first, from each node, take the maximum over a rolling window equal to or greater than the backup frequency, and then take the maximum of those values across nodes. For example with a backup frequency of 60 minutes, monitor `time() - max_across_nodes(max_over_time(schedules_BACKUP_last_completed_time, 60min))`. |
+
+## Changefeeds
+
+If [changefeeds]({% link {{ page.version.version }}/change-data-capture-overview.md %}) are created in a CockroachDB cluster, monitor these additional metrics in your custom dashboards:
+
+|
| Usage |
+| ----------------------------------------------------- | {% if include.deployment == 'self-hosted' %}------ |{% elsif include.deployment == 'advanced' %}---- |{% endif %} ------------------------------------------------------------ | ------------------------------------------------------------ |
+| changefeed.running | changefeed.running | Number of currently running changefeeds, including sinkless | This metric tracks the total number of all running changefeeds. |
+| jobs.changefeed.currently_paused | {% if include.deployment == 'self-hosted' %}jobs.changefeed.currently_paused |{% elsif include.deployment == 'advanced' %}NOT AVAILABLE |{% endif %} Number of changefeed jobs currently considered Paused | Monitor and alert on this metric to safeguard against an inadvertent operational error of leaving a changefeed job in a paused state for an extended period of time. Changefeed jobs should not be paused for a long time because the [protected timestamp prevents garbage collection]({% link {{ page.version.version }}/monitor-and-debug-changefeeds.md %}#protected-timestamp-and-garbage-collection-monitoring). |
+| changefeed.failures | changefeed.failures | Total number of changefeed jobs which have failed | This metric tracks the permanent changefeed job failures that the jobs system will not try to restart. Any increase in this counter should be investigated. An alert on this metric is recommended. |
+| changefeed.error_retries | changefeed.error.retries | Total retryable errors encountered by all changefeeds | This metric tracks transient changefeed errors. Alert on "too many" errors, such as 50 retries in 15 minutes. For example, during a rolling upgrade this counter will increase because the changefeed jobs will restart following node restarts. There is an exponential backoff, up to 10 minutes. But if there is no rolling upgrade in process or other cluster maintenance, and the error rate is high, investigate the changefeed job.
+| changefeed.emitted_messages | changefeed.emitted.messages | Messages emitted by all feeds | This metric provides a useful context when assessing the state of changefeeds. This metric characterizes the rate of changes being streamed from the CockroachDB cluster. |
+| changefeed.emitted_bytes | {% if include.deployment == 'self-hosted' %}changefeed.emitted_bytes.count |{% elsif include.deployment == 'advanced' %}NOT AVAILABLE |{% endif %} Bytes emitted by all feeds | This metric provides a useful context when assessing the state of changefeeds. This metric characterizes the throughput bytes being streamed from the CockroachDB cluster. |
+| changefeed.commit_latency | changefeed.commit.latency | The difference between the event MVCC timestamp and the time it was acknowledged by the downstream sink. If the sink batches events, then the difference between the oldest event in the batch and acknowledgement is recorded. Latency during backfill is excluded.| This metric provides a useful context when assessing the state of changefeeds. This metric characterizes the end-to-end lag between a committed change and that change applied at the destination. |
+| jobs.changefeed.protected_age_sec | {% if include.deployment == 'self-hosted' %}jobs.changefeed.protected_age_sec |{% elsif include.deployment == 'advanced' %}NOT AVAILABLE |{% endif %} The age of the oldest PTS record protected by changefeed jobs | [Changefeeds use protected timestamps to protect the data from being garbage collected]({% link {{ page.version.version }}/monitor-and-debug-changefeeds.md %}#protected-timestamp-and-garbage-collection-monitoring). Ensure the protected timestamp age does not significantly exceed the [GC TTL zone configuration]({% link {{ page.version.version }}/configure-replication-zones.md %}#replication-zone-variables). Alert on this metric if the protected timestamp age is greater than 3 times the GC TTL. |
+
+## Row-Level TTL
+
+If [Row-Level TTL]({% link {{ page.version.version }}/row-level-ttl.md %}) is configured for any table in a CockroachDB cluster, monitor these additional metrics in your custom dashboards:
+
+|
| Usage |
+| ----------------------------------------------------- | {% if include.deployment == 'self-hosted' %}------ |{% elsif include.deployment == 'advanced' %}---- |{% endif %} ------------------------------------------------------------ | ------------------------------------------------------------ |
+| jobs.row_level_ttl.resume_completed | {% if include.deployment == 'self-hosted' %}jobs.row.level.ttl.resume_completed.count |{% elsif include.deployment == 'advanced' %}NOT AVAILABLE |{% endif %} Number of row_level_ttl jobs which successfully resumed to completion | If Row Level TTL is enabled, this metric should be nonzero and correspond to the `ttl_cron` setting that was chosen. If this metric is zero, it means the job is not running |
+| jobs.row_level_ttl.resume_failed | {% if include.deployment == 'self-hosted' %}jobs.row.level.ttl.resume_failed.count |{% elsif include.deployment == 'advanced' %}NOT AVAILABLE |{% endif %} Number of row_level_ttl jobs which failed with a non-retryable error | This metric should remain at zero. Repeated errors means the Row Level TTL job is not deleting data. |
+| jobs.row_level_ttl.rows_selected | {% if include.deployment == 'self-hosted' %}jobs.row.level.ttl.rows_selected.count |{% elsif include.deployment == 'advanced' %}NOT AVAILABLE |{% endif %} Number of rows selected for deletion by the row level TTL job. | Correlate this metric with the metric `jobs.row_level_ttl.rows_deleted` to ensure all the rows that should be deleted are actually getting deleted. |
+| jobs.row_level_ttl.rows_deleted | {% if include.deployment == 'self-hosted' %}jobs.row.level.ttl.rows_deleted.count |{% elsif include.deployment == 'advanced' %}NOT AVAILABLE |{% endif %} Number of rows deleted by the row level TTL job. | Correlate this metric with the metric `jobs.row_level_ttl.rows_selected` to ensure all the rows that should be deleted are actually getting deleted. |
+| jobs.row_level_ttl.currently_paused | {% if include.deployment == 'self-hosted' %}jobs.row.level.ttl.currently_paused |{% elsif include.deployment == 'advanced' %}NOT AVAILABLE |{% endif %} Number of row_level_ttl jobs currently considered Paused | Monitor this metric to ensure the Row Level TTL job does not remain paused inadvertently for an extended period. |
+| jobs.row_level_ttl.currently_running | {% if include.deployment == 'self-hosted' %}jobs.row.level.ttl.currently_running |{% elsif include.deployment == 'advanced' %}NOT AVAILABLE |{% endif %} Number of row_level_ttl jobs currently running | Monitor this metric to ensure there are not too many Row Level TTL jobs running at the same time. Generally, this metric should be in the low single digits. |
+| schedules.scheduled-row-level-ttl-executor.failed | {% if include.deployment == 'self-hosted' %}schedules.scheduled.row.level.ttl.executor_failed.count |{% elsif include.deployment == 'advanced' %}NOT AVAILABLE |{% endif %} Number of scheduled-row-level-ttl-executor jobs failed | Monitor this metric to ensure the Row Level TTL job is running. If it is non-zero, it means the job could not be created. |
+| jobs.row_level_ttl.span_total_duration | NOT AVAILABLE | Duration for processing a span during row level TTL. | See Description. |
+| jobs.row_level_ttl.select_duration | NOT AVAILABLE | Duration for select requests during row level TTL. | See Description. |
+| jobs.row_level_ttl.delete_duration | NOT AVAILABLE | Duration for delete requests during row level TTL. | See Description. |
+| jobs.row_level_ttl.num_active_spans | NOT AVAILABLE | Number of active spans the TTL job is deleting from. | See Description. |
+| jobs.row_level_ttl.total_rows | NOT AVAILABLE | Approximate number of rows on the TTL table. | See Description. |
+| jobs.row_level_ttl.total_expired_rows | NOT AVAILABLE | Approximate number of rows that have expired the TTL on the TTL table. | See Description. |
+
+## See also
+
+- [Available Metrics]({% link {{ page.version.version }}/metrics.md %}#available-metrics)
+- [Monitor CockroachDB with Prometheus]({% link {{ page.version.version }}/monitor-cockroachdb-with-prometheus.md %})
+- [Visualize metrics in Grafana]({% link {{ page.version.version }}/monitor-cockroachdb-with-prometheus.md %}#step-5-visualize-metrics-in-grafana)
+- [Custom Chart Debug Page]({% link {{ page.version.version }}/ui-custom-chart-debug-page.md %})
+- [Cluster API]({% link {{ page.version.version }}/cluster-api.md %})
+- [Essential Alerts]({% link {{ page.version.version }}/essential-alerts-{{ include.deployment}}.md %})
+- [CockroachDB Source Code - DB Console metrics to graphs mappings (in *.tsx files)](https://github.com/cockroachdb/cockroach/tree/master/pkg/ui/workspaces/db-console/src/views/cluster/containers/nodeGraphs/dashboards)
diff --git a/src/current/_includes/v25.1/faq/auto-generate-unique-ids.md b/src/current/_includes/v25.1/faq/auto-generate-unique-ids.md
new file mode 100644
index 00000000000..ebe3262e6df
--- /dev/null
+++ b/src/current/_includes/v25.1/faq/auto-generate-unique-ids.md
@@ -0,0 +1,111 @@
+To auto-generate unique row identifiers, you can use the `gen_random_uuid()`, `uuid_v4()`, or `unique_rowid()` [functions]({% link {{ page.version.version }}/functions-and-operators.md %}#id-generation-functions).
+
+To use the [`UUID`]({% link {{ page.version.version }}/uuid.md %}) column with the `gen_random_uuid()` [function]({% link {{ page.version.version }}/functions-and-operators.md %}#id-generation-functions) as the [default value]({% link {{ page.version.version }}/default-value.md %}):
+
+{% include_cached copy-clipboard.html %}
+~~~ sql
+CREATE TABLE users (
+ id UUID NOT NULL DEFAULT gen_random_uuid(),
+ city STRING NOT NULL,
+ name STRING NULL,
+ address STRING NULL,
+ credit_card STRING NULL,
+ CONSTRAINT "primary" PRIMARY KEY (city ASC, id ASC),
+ FAMILY "primary" (id, city, name, address, credit_card)
+);
+~~~
+
+{% include_cached copy-clipboard.html %}
+~~~ sql
+INSERT INTO users (name, city) VALUES ('Petee', 'new york'), ('Eric', 'seattle'), ('Dan', 'seattle');
+~~~
+
+{% include_cached copy-clipboard.html %}
+~~~ sql
+SELECT * FROM users;
+~~~
+
+~~~
+ id | city | name | address | credit_card
++--------------------------------------+----------+-------+---------+-------------+
+ cf8ee4e2-cd74-449a-b6e6-a0fb2017baa4 | new york | Petee | NULL | NULL
+ 2382564e-702f-42d9-a139-b6df535ae00a | seattle | Eric | NULL | NULL
+ 7d27e40b-263a-4891-b29b-d59135e55650 | seattle | Dan | NULL | NULL
+(3 rows)
+~~~
+
+Alternatively, you can use the [`BYTES`]({% link {{ page.version.version }}/bytes.md %}) column with the `uuid_v4()` function as the default value:
+
+{% include_cached copy-clipboard.html %}
+~~~ sql
+CREATE TABLE users2 (
+ id BYTES DEFAULT uuid_v4(),
+ city STRING NOT NULL,
+ name STRING NULL,
+ address STRING NULL,
+ credit_card STRING NULL,
+ CONSTRAINT "primary" PRIMARY KEY (city ASC, id ASC),
+ FAMILY "primary" (id, city, name, address, credit_card)
+);
+~~~
+
+{% include_cached copy-clipboard.html %}
+~~~ sql
+INSERT INTO users2 (name, city) VALUES ('Anna', 'new york'), ('Jonah', 'seattle'), ('Terry', 'chicago');
+~~~
+
+{% include_cached copy-clipboard.html %}
+~~~ sql
+SELECT * FROM users;
+~~~
+
+~~~
+ id | city | name | address | credit_card
++------------------------------------------------+----------+-------+---------+-------------+
+ 4\244\277\323/\261M\007\213\275*\0060\346\025z | chicago | Terry | NULL | NULL
+ \273*t=u.F\010\274f/}\313\332\373a | new york | Anna | NULL | NULL
+ \004\\\364nP\024L)\252\364\222r$\274O0 | seattle | Jonah | NULL | NULL
+(3 rows)
+~~~
+
+In either case, generated IDs will be 128-bit, sufficiently large to generate unique values. Once the table grows beyond a single key-value range's [default size]({% link {{ page.version.version }}/configure-replication-zones.md %}#range-max-bytes), new IDs will be scattered across all of the table's ranges and, therefore, likely across different nodes. This means that multiple nodes will share in the load.
+
+This approach has the disadvantage of creating a primary key that may not be useful in a query directly, which can require a join with another table or a secondary index.
+
+If it is important for generated IDs to be stored in the same key-value range, you can use an [integer type]({% link {{ page.version.version }}/int.md %}) with the `unique_rowid()` [function]({% link {{ page.version.version }}/functions-and-operators.md %}#id-generation-functions) as the default value, either explicitly or via the [`SERIAL` pseudo-type]({% link {{ page.version.version }}/serial.md %}):
+
+{% include_cached copy-clipboard.html %}
+~~~ sql
+CREATE TABLE users3 (
+ id INT DEFAULT unique_rowid(),
+ city STRING NOT NULL,
+ name STRING NULL,
+ address STRING NULL,
+ credit_card STRING NULL,
+ CONSTRAINT "primary" PRIMARY KEY (city ASC, id ASC),
+ FAMILY "primary" (id, city, name, address, credit_card)
+);
+~~~
+
+{% include_cached copy-clipboard.html %}
+~~~ sql
+INSERT INTO users3 (name, city) VALUES ('Blake', 'chicago'), ('Hannah', 'seattle'), ('Bobby', 'seattle');
+~~~
+
+{% include_cached copy-clipboard.html %}
+~~~ sql
+SELECT * FROM users3;
+~~~
+
+~~~
+ id | city | name | address | credit_card
++--------------------+---------+--------+---------+-------------+
+ 469048192112197633 | chicago | Blake | NULL | NULL
+ 469048192112263169 | seattle | Hannah | NULL | NULL
+ 469048192112295937 | seattle | Bobby | NULL | NULL
+(3 rows)
+~~~
+
+Upon insert or upsert, the `unique_rowid()` function generates a default value from the timestamp and ID of the node executing the insert. Such time-ordered values are likely to be globally unique except in cases where a very large number of IDs (100,000+) are generated per node per second. Also, there can be gaps and the order is not completely guaranteed.
+
+To understand the differences between the `UUID` and `unique_rowid()` options, see the [SQL FAQs]({% link {{ page.version.version }}/sql-faqs.md %}#what-are-the-differences-between-uuid-sequences-and-unique_rowid). For further background on UUIDs, see [What is a UUID, and Why Should You Care?](https://www.cockroachlabs.com/blog/what-is-a-uuid/).
diff --git a/src/current/_includes/v25.1/faq/clock-synchronization-effects.md b/src/current/_includes/v25.1/faq/clock-synchronization-effects.md
new file mode 100644
index 00000000000..e335a97fc3e
--- /dev/null
+++ b/src/current/_includes/v25.1/faq/clock-synchronization-effects.md
@@ -0,0 +1,31 @@
+CockroachDB requires moderate levels of clock synchronization to preserve data consistency. For this reason, when a node detects that its clock is out of sync with at least half of the other nodes in the cluster by 80% of the maximum offset allowed, it spontaneously shuts down. This offset defaults to 500ms but can be changed via the [`--max-offset`]({% link {{ page.version.version }}/cockroach-start.md %}#flags-max-offset) flag when starting each node.
+
+Regardless of clock skew, [`SERIALIZABLE`]({% link {{ page.version.version }}/demo-serializable.md %}) and [`READ COMMITTED`]({% link {{ page.version.version }}/read-committed.md %}) transactions both serve globally consistent ("non-stale") reads and [commit atomically]({% link {{ page.version.version }}/developer-basics.md %}#how-transactions-work-in-cockroachdb). However, skew outside the configured clock offset bounds can result in violations of single-key linearizability between causally dependent transactions. It's therefore important to prevent clocks from drifting too far by running [NTP](http://www.ntp.org/) or other clock synchronization software on each node.
+
+In very rare cases, CockroachDB can momentarily run with a stale clock. This can happen when using vMotion, which can suspend a VM running CockroachDB, migrate it to different hardware, and resume it. This will cause CockroachDB to be out of sync for a short period before it jumps to the correct time. During this window, it would be possible for a client to read stale data and write data derived from stale reads. By enabling the `server.clock.forward_jump_check_enabled` [cluster setting]({% link {{ page.version.version }}/cluster-settings.md %}), you can be alerted when the CockroachDB clock jumps forward, indicating it had been running with a stale clock. To protect against this on vMotion, however, use the [`--clock-device`](cockroach-start.html#general) flag to specify a [PTP hardware clock](https://www.kernel.org/doc/html/latest/driver-api/ptp.html) for CockroachDB to use when querying the current time. When doing so, you should not enable `server.clock.forward_jump_check_enabled` because forward jumps will be expected and harmless. For more information on how `--clock-device` interacts with vMotion, see [this blog post](https://core.vmware.com/blog/cockroachdb-vmotion-support-vsphere-7-using-precise-timekeeping).
+
+{{site.data.alerts.callout_danger}}
+In CockroachDB versions prior to v22.2.13, and in v23.1 versions prior to v23.1.9, the [`--clock-device`](cockroach-start.html#general) flag had a bug that could cause it to generate timestamps in the far future. This could cause nodes to crash due to incorrect timestamps, or in the worst case irreversibly advance the cluster's HLC clock into the far future. This bug is fixed in CockroachDB v23.2.
+{{site.data.alerts.end}}
+
+### Considerations
+
+When setting up clock synchronization:
+
+- All nodes in the cluster must be synced to the same time source, or to different sources that implement leap second smearing in the same way. For example, Google and Amazon have time sources that are compatible with each other (they implement [leap second smearing](https://developers.google.com/time/smear) in the same way), but are incompatible with the default NTP pool (which does not implement leap second smearing).
+- For nodes running in AWS, we recommend [Amazon Time Sync Service](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/set-time.html#configure-amazon-time-service). For nodes running in GCP, we recommend [Google's internal NTP service](https://cloud.google.com/compute/docs/instances/configure-ntp#configure_ntp_for_your_instances). For nodes running elsewhere, we recommend [Google Public NTP](https://developers.google.com/time/). Note that the Google and Amazon time services can be mixed with each other, but they cannot be mixed with other time services (unless you have verified leap second behavior). Either all of your nodes should use the Google and Amazon services, or none of them should.
+- If you do not want to use the Google or Amazon time sources, you can use [`chrony`](https://chrony.tuxfamily.org/index.html) and enable client-side leap smearing, unless the time source you're using already does server-side smearing. In most cases, we recommend the Google Public NTP time source because it handles smearing the leap second. If you use a different NTP time source that doesn't smear the leap second, you must configure client-side smearing manually and do so in the same way on each machine.
+- Do not run more than one clock sync service on VMs where `cockroach` is running.
+- {% include {{ page.version.version }}/misc/multiregion-max-offset.md %}
+
+### Tutorials
+
+For guidance on synchronizing clocks, see the tutorial for your deployment environment:
+
+Environment | Featured Approach
+------------|---------------------
+[On-Premises]({% link {{ page.version.version }}/deploy-cockroachdb-on-premises.md %}#step-1-synchronize-clocks) | Use NTP with Google's external NTP service.
+[AWS]({% link {{ page.version.version }}/deploy-cockroachdb-on-aws.md %}#step-3-synchronize-clocks) | Use the Amazon Time Sync Service.
+[Azure]({% link {{ page.version.version }}/deploy-cockroachdb-on-microsoft-azure.md %}#step-3-synchronize-clocks) | Disable Hyper-V time synchronization and use NTP with Google's external NTP service.
+[Digital Ocean]({% link {{ page.version.version }}/deploy-cockroachdb-on-digital-ocean.md %}#step-2-synchronize-clocks) | Use NTP with Google's external NTP service.
+[GCE]({% link {{ page.version.version }}/deploy-cockroachdb-on-google-cloud-platform.md %}#step-3-synchronize-clocks) | Use NTP with Google's internal NTP service.
diff --git a/src/current/_includes/v25.1/faq/clock-synchronization-monitoring.md b/src/current/_includes/v25.1/faq/clock-synchronization-monitoring.md
new file mode 100644
index 00000000000..c3022ad1a32
--- /dev/null
+++ b/src/current/_includes/v25.1/faq/clock-synchronization-monitoring.md
@@ -0,0 +1,8 @@
+As explained in more detail [in our monitoring documentation]({% link {{ page.version.version }}/monitoring-and-alerting.md %}#prometheus-endpoint), each CockroachDB node exports a wide variety of metrics at `http://:/_status/vars` in the format used by the popular Prometheus timeseries database. Two of these metrics export how close each node's clock is to the clock of all other nodes:
+
+Metric | Definition
+-------|-----------
+`clock_offset_meannanos` | The mean difference between the node's clock and other nodes' clocks in nanoseconds
+`clock_offset_stddevnanos` | The standard deviation of the difference between the node's clock and other nodes' clocks in nanoseconds
+
+As described in [the above answer](#what-happens-when-node-clocks-are-not-properly-synchronized), a node will shut down if the mean offset of its clock from the other nodes' clocks exceeds 80% of the maximum offset allowed. It's recommended to monitor the `clock_offset_meannanos` metric and alert if it's approaching the 80% threshold of your cluster's configured max offset.
diff --git a/src/current/_includes/v25.1/faq/differences-between-numberings.md b/src/current/_includes/v25.1/faq/differences-between-numberings.md
new file mode 100644
index 00000000000..80f7fe26d50
--- /dev/null
+++ b/src/current/_includes/v25.1/faq/differences-between-numberings.md
@@ -0,0 +1,11 @@
+
+| Property | UUID generated with `uuid_v4()` | INT generated with `unique_rowid()` | Sequences |
+|--------------------------------------|-----------------------------------------|-----------------------------------------------|--------------------------------|
+| Size | 16 bytes | 8 bytes | 1 to 8 bytes |
+| Ordering properties | Unordered | Highly time-ordered | Highly time-ordered |
+| Performance cost at generation | Small, scalable | Small, scalable | Variable, can cause [contention]({{ link_prefix }}performance-best-practices-overview.html#understanding-and-avoiding-transaction-contention) |
+| Value distribution | Uniformly distributed (128 bits) | Contains time and space (node ID) components | Dense, small values |
+| Data locality | Maximally distributed | Values generated close in time are co-located | Highly local |
+| `INSERT` latency when used as key | Small, insensitive to concurrency | Small, but increases with concurrent INSERTs | Higher |
+| `INSERT` throughput when used as key | Highest | Limited by max throughput on 1 node | Limited by max throughput on 1 node |
+| Read throughput when used as key | Highest (maximal parallelism) | Limited | Limited |
diff --git a/src/current/_includes/v25.1/faq/sequential-numbers.md b/src/current/_includes/v25.1/faq/sequential-numbers.md
new file mode 100644
index 00000000000..aca5750d0a7
--- /dev/null
+++ b/src/current/_includes/v25.1/faq/sequential-numbers.md
@@ -0,0 +1,8 @@
+Sequential numbers can be generated in CockroachDB using the `unique_rowid()` built-in function or using [SQL sequences]({% link {{ page.version.version }}/create-sequence.md %}). However, note the following considerations:
+
+- Unless you need roughly-ordered numbers, use [`UUID`]({% link {{ page.version.version }}/uuid.md %}) values instead. See the [previous
+FAQ](#how-do-i-auto-generate-unique-row-ids-in-cockroachdb) for details.
+- [Sequences]({% link {{ page.version.version }}/create-sequence.md %}) produce **unique** values. However, not all values are guaranteed to be produced (e.g., when a transaction is canceled after it consumes a value) and the values may be slightly reordered (e.g., when a transaction that
+consumes a lower sequence number commits after a transaction that consumes a higher number).
+- For maximum performance, avoid using sequences or `unique_rowid()` to generate row IDs or indexed columns. Values generated in these ways are logically close to each other and can cause [contention]({{ link_prefix }}performance-best-practices-overview.html#understanding-and-avoiding-transaction-contention) on a few data ranges during inserts. Instead, prefer [`UUID`]({% link {{ page.version.version }}/uuid.md %}) identifiers.
+- {% include {{page.version.version}}/performance/use-hash-sharded-indexes.md %}
diff --git a/src/current/_includes/v25.1/faq/sequential-transactions.md b/src/current/_includes/v25.1/faq/sequential-transactions.md
new file mode 100644
index 00000000000..21e4a8c212b
--- /dev/null
+++ b/src/current/_includes/v25.1/faq/sequential-transactions.md
@@ -0,0 +1,19 @@
+Most use cases that ask for a strong time-based write ordering can be solved with other, more distribution-friendly
+solutions instead. For example, CockroachDB's [time travel queries (`AS OF SYSTEM
+TIME`)](https://www.cockroachlabs.com/blog/time-travel-queries-select-witty_subtitle-the_future/) support the following:
+
+- Paginating through all the changes to a table or dataset
+- Determining the order of changes to data over time
+- Determining the state of data at some point in the past
+- Determining the changes to data between two points of time
+
+Consider also that the values generated by `unique_rowid()`, described in the previous FAQ entries, also provide an approximate time ordering.
+
+However, if your application absolutely requires strong time-based write ordering, it is possible to create a strictly monotonic counter in CockroachDB that increases over time as follows:
+
+- Initially: `CREATE TABLE cnt(val INT PRIMARY KEY); INSERT INTO cnt(val) VALUES(1);`
+- In each transaction: `INSERT INTO cnt(val) SELECT max(val)+1 FROM cnt RETURNING val;`
+
+This will cause [`INSERT`]({% link {{ page.version.version }}/insert.md %}) transactions to conflict with each other and effectively force the transactions to commit one at a time throughout the cluster, which in turn guarantees the values generated in this way are strictly increasing over time without gaps. The caveat is that performance is severely limited as a result.
+
+If you find yourself interested in this problem, please [contact us]({% link {{ page.version.version }}/support-resources.md %}) and describe your situation. We would be glad to help you find alternative solutions and possibly extend CockroachDB to better match your needs.
diff --git a/src/current/_includes/v25.1/faq/simulate-key-value-store.md b/src/current/_includes/v25.1/faq/simulate-key-value-store.md
new file mode 100644
index 00000000000..13beebeb957
--- /dev/null
+++ b/src/current/_includes/v25.1/faq/simulate-key-value-store.md
@@ -0,0 +1,13 @@
+CockroachDB is a distributed SQL database built on a transactional and strongly-consistent key-value store. Although it is not possible to access the key-value store directly, you can mirror direct access using a "simple" table of two columns, with one set as the primary key:
+
+~~~ sql
+> CREATE TABLE kv (k INT PRIMARY KEY, v BYTES);
+~~~
+
+When such a "simple" table has no indexes or foreign keys, [`INSERT`]({% link {{ page.version.version }}/insert.md %})/[`UPSERT`]({% link {{ page.version.version }}/upsert.md %})/[`UPDATE`]({% link {{ page.version.version }}/update.md %})/[`DELETE`](delete.html) statements translate to key-value operations with minimal overhead (single digit percent slowdowns). For example, the following `UPSERT` to add or replace a row in the table would translate into a single key-value Put operation:
+
+~~~ sql
+> UPSERT INTO kv VALUES (1, b'hello')
+~~~
+
+This SQL table approach also offers you a well-defined query language, a known transaction model, and the flexibility to add more columns to the table if the need arises.
diff --git a/src/current/_includes/v25.1/faq/what-is-crdb.md b/src/current/_includes/v25.1/faq/what-is-crdb.md
new file mode 100644
index 00000000000..28857ed61fa
--- /dev/null
+++ b/src/current/_includes/v25.1/faq/what-is-crdb.md
@@ -0,0 +1,7 @@
+CockroachDB is a [distributed SQL](https://www.cockroachlabs.com/blog/what-is-distributed-sql/) database built on a transactional and strongly-consistent key-value store. It **scales** horizontally; **survives** disk, machine, rack, and even datacenter failures with minimal latency disruption and no manual intervention; supports **strongly-consistent** ACID transactions; and provides a familiar **SQL** API for structuring, manipulating, and querying data.
+
+CockroachDB is inspired by Google's [Spanner](http://research.google.com/archive/spanner.html) and [F1](http://research.google.com/pubs/pub38125.html) technologies, and the [source code](https://github.com/cockroachdb/cockroach) is freely available.
+
+{{site.data.alerts.callout_success}}
+For a deeper dive into CockroachDB's capabilities and how it fits into the database landscape, take the free [**Intro to Distributed SQL and CockroachDB**](https://university.cockroachlabs.com/courses/course-v1:crl+intro-to-distributed-sql-and-cockroachdb+self-paced/about) course on Cockroach University.
+{{site.data.alerts.end}}
\ No newline at end of file
diff --git a/src/current/_includes/v25.1/filter-tabs/crdb-kubernetes.md b/src/current/_includes/v25.1/filter-tabs/crdb-kubernetes.md
new file mode 100644
index 00000000000..db7f18ff324
--- /dev/null
+++ b/src/current/_includes/v25.1/filter-tabs/crdb-kubernetes.md
@@ -0,0 +1,4 @@
+{% assign tab_names_html = "Secure;Insecure" %}
+{% assign html_page_filenames = "orchestrate-a-local-cluster-with-kubernetes.html;orchestrate-a-local-cluster-with-kubernetes-insecure.html" %}
+
+{% include filter-tabs.md tab_names=tab_names_html page_filenames=html_page_filenames page_folder=page.version.version %}
diff --git a/src/current/_includes/v25.1/filter-tabs/crdb-single-kubernetes.md b/src/current/_includes/v25.1/filter-tabs/crdb-single-kubernetes.md
new file mode 100644
index 00000000000..409bdc1855c
--- /dev/null
+++ b/src/current/_includes/v25.1/filter-tabs/crdb-single-kubernetes.md
@@ -0,0 +1,4 @@
+{% assign tab_names_html = "Secure;Insecure" %}
+{% assign html_page_filenames = "deploy-cockroachdb-with-kubernetes.html;deploy-cockroachdb-with-kubernetes-insecure.html" %}
+
+{% include filter-tabs.md tab_names=tab_names_html page_filenames=html_page_filenames page_folder=page.version.version %}
diff --git a/src/current/_includes/v25.1/filter-tabs/crud-go.md b/src/current/_includes/v25.1/filter-tabs/crud-go.md
new file mode 100644
index 00000000000..a69d0e4435c
--- /dev/null
+++ b/src/current/_includes/v25.1/filter-tabs/crud-go.md
@@ -0,0 +1,4 @@
+{% assign tab_names_html = "Use pgx;Use GORM;Use lib/pq;Use upper/db" %}
+{% assign html_page_filenames = "build-a-go-app-with-cockroachdb.html;build-a-go-app-with-cockroachdb-gorm.html;build-a-go-app-with-cockroachdb-pq.html;build-a-go-app-with-cockroachdb-upperdb.html" %}
+
+{% include filter-tabs.md tab_names=tab_names_html page_filenames=html_page_filenames page_folder=page.version.version %}
diff --git a/src/current/_includes/v25.1/filter-tabs/crud-java.md b/src/current/_includes/v25.1/filter-tabs/crud-java.md
new file mode 100644
index 00000000000..5cbdf749e09
--- /dev/null
+++ b/src/current/_includes/v25.1/filter-tabs/crud-java.md
@@ -0,0 +1,4 @@
+{% assign tab_names_html = "Use JDBC;Use Hibernate;Use jOOQ;Use MyBatis-Spring" %}
+{% assign html_page_filenames = "build-a-java-app-with-cockroachdb.html;build-a-java-app-with-cockroachdb-hibernate.html;build-a-java-app-with-cockroachdb-jooq.html;build-a-spring-app-with-cockroachdb-mybatis.html" %}
+
+{% include filter-tabs.md tab_names=tab_names_html page_filenames=html_page_filenames page_folder=page.version.version %}
diff --git a/src/current/_includes/v25.1/filter-tabs/crud-js.md b/src/current/_includes/v25.1/filter-tabs/crud-js.md
new file mode 100644
index 00000000000..bb319ed88c1
--- /dev/null
+++ b/src/current/_includes/v25.1/filter-tabs/crud-js.md
@@ -0,0 +1,4 @@
+{% assign tab_names_html = "Use node-postgres;Use Sequelize;Use Knex.js;Use Prisma;Use TypeORM" %}
+{% assign html_page_filenames = "build-a-nodejs-app-with-cockroachdb.html;build-a-nodejs-app-with-cockroachdb-sequelize.html;build-a-nodejs-app-with-cockroachdb-knexjs.html;build-a-nodejs-app-with-cockroachdb-prisma.html;build-a-typescript-app-with-cockroachdb.html" %}
+
+{% include filter-tabs.md tab_names=tab_names_html page_filenames=html_page_filenames page_folder=page.version.version %}
diff --git a/src/current/_includes/v25.1/filter-tabs/crud-python.md b/src/current/_includes/v25.1/filter-tabs/crud-python.md
new file mode 100644
index 00000000000..e721cc92405
--- /dev/null
+++ b/src/current/_includes/v25.1/filter-tabs/crud-python.md
@@ -0,0 +1,4 @@
+{% assign tab_names_html = "Use psycopg3;Use psycopg2;Use SQLAlchemy;Use Django;Use asyncpg" %}
+{% assign html_page_filenames = "build-a-python-app-with-cockroachdb-psycopg3.html;build-a-python-app-with-cockroachdb.html;build-a-python-app-with-cockroachdb-sqlalchemy.html;build-a-python-app-with-cockroachdb-django.html;build-a-python-app-with-cockroachdb-asyncpg.html" %}
+
+{% include filter-tabs.md tab_names=tab_names_html page_filenames=html_page_filenames page_folder=page.version.version %}
diff --git a/src/current/_includes/v25.1/filter-tabs/crud-ruby.md b/src/current/_includes/v25.1/filter-tabs/crud-ruby.md
new file mode 100644
index 00000000000..5fc13aa697b
--- /dev/null
+++ b/src/current/_includes/v25.1/filter-tabs/crud-ruby.md
@@ -0,0 +1,4 @@
+{% assign tab_names_html = "Use pg;Use ActiveRecord" %}
+{% assign html_page_filenames = "build-a-ruby-app-with-cockroachdb.html;build-a-ruby-app-with-cockroachdb-activerecord.html" %}
+
+{% include filter-tabs.md tab_names=tab_names_html page_filenames=html_page_filenames page_folder=page.version.version %}
diff --git a/src/current/_includes/v25.1/filter-tabs/crud-spring.md b/src/current/_includes/v25.1/filter-tabs/crud-spring.md
new file mode 100644
index 00000000000..bd4f66f19a7
--- /dev/null
+++ b/src/current/_includes/v25.1/filter-tabs/crud-spring.md
@@ -0,0 +1,4 @@
+{% assign tab_names_html = "Use JDBC;Use JPA" %}
+{% assign html_page_filenames = "build-a-spring-app-with-cockroachdb-jdbc.html;build-a-spring-app-with-cockroachdb-jpa.html" %}
+
+{% include filter-tabs.md tab_names=tab_names_html page_filenames=html_page_filenames page_folder=page.version.version %}
diff --git a/src/current/_includes/v25.1/filter-tabs/deploy-crdb-aws.md b/src/current/_includes/v25.1/filter-tabs/deploy-crdb-aws.md
new file mode 100644
index 00000000000..706e5d85b8f
--- /dev/null
+++ b/src/current/_includes/v25.1/filter-tabs/deploy-crdb-aws.md
@@ -0,0 +1,4 @@
+{% assign tab_names_html = "Secure;Insecure" %}
+{% assign html_page_filenames = "deploy-cockroachdb-on-aws.html;deploy-cockroachdb-on-aws-insecure.html" %}
+
+{% include filter-tabs.md tab_names=tab_names_html page_filenames=html_page_filenames page_folder=page.version.version %}
diff --git a/src/current/_includes/v25.1/filter-tabs/deploy-crdb-do.md b/src/current/_includes/v25.1/filter-tabs/deploy-crdb-do.md
new file mode 100644
index 00000000000..02e44afee30
--- /dev/null
+++ b/src/current/_includes/v25.1/filter-tabs/deploy-crdb-do.md
@@ -0,0 +1,4 @@
+{% assign tab_names_html = "Secure;Insecure" %}
+{% assign html_page_filenames = "deploy-cockroachdb-on-digital-ocean.html;deploy-cockroachdb-on-digital-ocean-insecure.html" %}
+
+{% include filter-tabs.md tab_names=tab_names_html page_filenames=html_page_filenames page_folder=page.version.version %}
diff --git a/src/current/_includes/v25.1/filter-tabs/deploy-crdb-gce.md b/src/current/_includes/v25.1/filter-tabs/deploy-crdb-gce.md
new file mode 100644
index 00000000000..5799dfec9f0
--- /dev/null
+++ b/src/current/_includes/v25.1/filter-tabs/deploy-crdb-gce.md
@@ -0,0 +1,4 @@
+{% assign tab_names_html = "Secure;Insecure" %}
+{% assign html_page_filenames = "deploy-cockroachdb-on-google-cloud-platform.html;deploy-cockroachdb-on-google-cloud-platform-insecure.html" %}
+
+{% include filter-tabs.md tab_names=tab_names_html page_filenames=html_page_filenames page_folder=page.version.version %}
diff --git a/src/current/_includes/v25.1/filter-tabs/deploy-crdb-ma.md b/src/current/_includes/v25.1/filter-tabs/deploy-crdb-ma.md
new file mode 100644
index 00000000000..3f1162b426c
--- /dev/null
+++ b/src/current/_includes/v25.1/filter-tabs/deploy-crdb-ma.md
@@ -0,0 +1,4 @@
+{% assign tab_names_html = "Secure;Insecure" %}
+{% assign html_page_filenames = "deploy-cockroachdb-on-microsoft-azure.html;deploy-cockroachdb-on-microsoft-azure-insecure.html" %}
+
+{% include filter-tabs.md tab_names=tab_names_html page_filenames=html_page_filenames page_folder=page.version.version %}
diff --git a/src/current/_includes/v25.1/filter-tabs/deploy-crdb-op.md b/src/current/_includes/v25.1/filter-tabs/deploy-crdb-op.md
new file mode 100644
index 00000000000..fdf35c61162
--- /dev/null
+++ b/src/current/_includes/v25.1/filter-tabs/deploy-crdb-op.md
@@ -0,0 +1,4 @@
+{% assign tab_names_html = "Secure;Insecure" %}
+{% assign html_page_filenames = "deploy-cockroachdb-on-premises.html;deploy-cockroachdb-on-premises-insecure.html" %}
+
+{% include filter-tabs.md tab_names=tab_names_html page_filenames=html_page_filenames page_folder=page.version.version %}
diff --git a/src/current/_includes/v25.1/filter-tabs/perf-bench-tpc-c.md b/src/current/_includes/v25.1/filter-tabs/perf-bench-tpc-c.md
new file mode 100644
index 00000000000..1394f916add
--- /dev/null
+++ b/src/current/_includes/v25.1/filter-tabs/perf-bench-tpc-c.md
@@ -0,0 +1,4 @@
+{% assign tab_names_html = "Local;Local (Multi-Region);Small;Medium;Large" %}
+{% assign html_page_filenames = "performance-benchmarking-with-tpcc-local.html;performance-benchmarking-with-tpcc-local-multiregion.html;performance-benchmarking-with-tpcc-small.html;performance-benchmarking-with-tpcc-medium.html;performance-benchmarking-with-tpcc-large.html" %}
+
+{% include filter-tabs.md tab_names=tab_names_html page_filenames=html_page_filenames page_folder=page.version.version %}
diff --git a/src/current/_includes/v25.1/filter-tabs/security-cert.md b/src/current/_includes/v25.1/filter-tabs/security-cert.md
new file mode 100644
index 00000000000..0832e618021
--- /dev/null
+++ b/src/current/_includes/v25.1/filter-tabs/security-cert.md
@@ -0,0 +1,4 @@
+{% assign tab_names_html = "Use cockroach cert;Use OpenSSL;Use custom CA" %}
+{% assign html_page_filenames = "cockroach-cert.html;create-security-certificates-openssl.html;create-security-certificates-custom-ca.html" %}
+
+{% include filter-tabs.md tab_names=tab_names_html page_filenames=html_page_filenames page_folder=page.version.version %}
diff --git a/src/current/_includes/v25.1/filter-tabs/start-a-cluster.md b/src/current/_includes/v25.1/filter-tabs/start-a-cluster.md
new file mode 100644
index 00000000000..92a688078cb
--- /dev/null
+++ b/src/current/_includes/v25.1/filter-tabs/start-a-cluster.md
@@ -0,0 +1,4 @@
+{% assign tab_names_html = "Secure;Insecure" %}
+{% assign html_page_filenames = "secure-a-cluster.html;start-a-local-cluster.html" %}
+
+{% include filter-tabs.md tab_names=tab_names_html page_filenames=html_page_filenames page_folder=page.version.version %}
diff --git a/src/current/_includes/v25.1/finalization-required/119894.md b/src/current/_includes/v25.1/finalization-required/119894.md
new file mode 100644
index 00000000000..f2b393c3c0e
--- /dev/null
+++ b/src/current/_includes/v25.1/finalization-required/119894.md
@@ -0,0 +1 @@
+[Splits](https://cockroachlabs.com/docs/{{ include.version }}/architecture/distribution-layer#range-splits) no longer hold [latches](https://cockroachlabs.com/docs/architecture/distribution-layer.#latch-manager) for time proportional to the range size while computing [MVCC](https://cockroachlabs.com/docs/{{ include.version }}/architecture/storage-layer#mvcc) statistics. Instead, MVCC statistics are pre-computed before the critical section of the split. As a side effect, the resulting statistics are no longer 100% accurate because they may correctly distribute writes concurrent with the split. To mitigate against this potential inaccuracy, and to prevent the statistics from drifting after successive splits, the existing stored statistics are re-computed and corrected if needed during the non-critical section of the split. [#119894](https://github.com/cockroachdb/cockroach/pull/119894)
diff --git a/src/current/_includes/v25.1/import-export-auth.md b/src/current/_includes/v25.1/import-export-auth.md
new file mode 100644
index 00000000000..fc3f2938cb4
--- /dev/null
+++ b/src/current/_includes/v25.1/import-export-auth.md
@@ -0,0 +1,9 @@
+The following examples make use of:
+
+- Amazon S3 connection strings. For guidance on connecting to other storage options or using other authentication parameters instead, read [Use Cloud Storage]({% link {{ page.version.version }}/use-cloud-storage.md %}#example-file-urls).
+- The **default** `AUTH=specified` parameter. For guidance on using `AUTH=implicit` authentication with Amazon S3 buckets instead, read [Cloud Storage Authentication]({% link {{ page.version.version }}/cloud-storage-authentication.md %}).
+
+Also, note the following features for connecting and authenticating to cloud storage:
+
+- External connections, which allow you to represent an external storage or sink URI. You can then specify the external connection's name in statements rather than the provider-specific URI. For detail on using external connections, see the [`CREATE EXTERNAL CONNECTION`]({% link {{ page.version.version }}/create-external-connection.md %}) page.
+- Assume role authentication, which allows you to limit the control specific users have over your storage buckets. See [Assume role authentication]({% link {{ page.version.version }}/cloud-storage-authentication.md %}) for more information.
\ No newline at end of file
diff --git a/src/current/_includes/v25.1/install-docker-steps.md b/src/current/_includes/v25.1/install-docker-steps.md
new file mode 100644
index 00000000000..09126119a2c
--- /dev/null
+++ b/src/current/_includes/v25.1/install-docker-steps.md
@@ -0,0 +1,57 @@
+{% comment %}This include is used in install-cockroachdb-*.md{% endcomment %}
+{% capture deployment_link %}
+{% if page.name contains "mac" %}[Deploy a local container in Docker]({% link {{ page.version.version }}/start-a-local-cluster-in-docker-mac.md %})
+{% elsif page.name contains "windows" %}[Deploy a local container in Docker]({% link {{ page.version.version }}/start-a-local-cluster-in-docker-windows.md %})
+{% else %}[Deploy a local container in Docker]({% link {{ page.version.version }}/start-a-local-cluster-in-docker-linux.md %})
+{% endif %}
+{% endcapture %}
+
+{{site.data.alerts.callout_danger}}
+Running a stateful application like CockroachDB in Docker is more complex and error-prone than most uses of Docker. Unless you are very experienced with Docker, we recommend starting with a different installation and deployment method.
+{{site.data.alerts.end}}
+
+CockroachDB's Docker images are [multi-platform images](https://docs.docker.com/build/building/multi-platform/) that contain binaries for both Intel and ARM. Multi-platform images do not take up additional space on your Docker host.
+
+Experimental images are not qualified for production use and not eligible for support or uptime SLA commitments.
+
+1. Install a container runtime, such as [Docker Desktop](https://docs.docker.com/desktop/).
+1. Verify that the runtime service is installed correctly and running in the background. Refer to the runtime's documentation. For Docker, start a terminal and run `docker version`. If you get an error, verify your installation and try again.
+1. Visit [Docker Hub](https://hub.docker.com/r/cockroachdb/cockroach) and decide which image tag to pull. Releases are rolled out gradually. Docker images for a new release are published when other binary artifacts are published. The following tag formats are commonly used, although other tags are available.
+
+
+
+
+
Tag
+
Example
+
Description
+
+
+
+
+
An exact patch
+
`{{ page.version.name }}`
+
Pins a cluster to an exact patch. The cluster is upgraded to a newer patch or major version only when you pull a newer tag.
+
+
+
Latest patch within a major version
+
`latest-{{ page.version.version }}`
+
Automatically updates a cluster to the latest patch of the version you specify. This tag is recommended in production, because it keeps your cluster updated within a major version but does not automatically upgrade your cluster to a new major version.
+
+
+
`latest`
+
The latest patch within the latest major version.
+
This is the default if you do not specify a tag. It updates your cluster automatically to each new patch and major version, and is not recommended in production.
+
+
+
+
+ Copy the tag you want to pull.
+
+1. Pull the image. Replace `{TAG}` with the tag from the previous step.
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ docker pull cockroachdb/cockroach:{TAG}
+ ~~~
+
+1. Start a cluster by starting the container on each node using `docker start`. The default command is `cockroach start`. Pass your desired flags as the final argument. For details, refer to {{ deployment_link | strip }}.
diff --git a/src/current/_includes/v25.1/json/json-sample.go b/src/current/_includes/v25.1/json/json-sample.go
new file mode 100644
index 00000000000..d5953a71ee2
--- /dev/null
+++ b/src/current/_includes/v25.1/json/json-sample.go
@@ -0,0 +1,79 @@
+package main
+
+import (
+ "database/sql"
+ "fmt"
+ "io/ioutil"
+ "net/http"
+ "time"
+
+ _ "github.com/lib/pq"
+)
+
+func main() {
+ db, err := sql.Open("postgres", "user=maxroach dbname=jsonb_test sslmode=disable port=26257")
+ if err != nil {
+ panic(err)
+ }
+
+ // The Reddit API wants us to tell it where to start from. The first request
+ // we just say "null" to say "from the start", subsequent requests will use
+ // the value received from the last call.
+ after := "null"
+
+ for i := 0; i < 41; i++ {
+ after, err = makeReq(db, after)
+ if err != nil {
+ panic(err)
+ }
+ // Reddit limits to 30 requests per minute, so do not do any more than that.
+ time.Sleep(2 * time.Second)
+ }
+}
+
+func makeReq(db *sql.DB, after string) (string, error) {
+ // First, make a request to reddit using the appropriate "after" string.
+ client := &http.Client{}
+ req, err := http.NewRequest("GET", fmt.Sprintf("https://www.reddit.com/r/programming.json?after=%s", after), nil)
+
+ req.Header.Add("User-Agent", `Go`)
+
+ resp, err := client.Do(req)
+ if err != nil {
+ return "", err
+ }
+
+ res, err := ioutil.ReadAll(resp.Body)
+ if err != nil {
+ return "", err
+ }
+
+ // We've gotten back our JSON from reddit, we can use a couple SQL tricks to
+ // accomplish multiple things at once.
+ // The JSON reddit returns looks like this:
+ // {
+ // "data": {
+ // "children": [ ... ]
+ // },
+ // "after": ...
+ // }
+ // We structure our query so that we extract the `children` field, and then
+ // expand that and insert each individual element into the database as a
+ // separate row. We then return the "after" field so we know how to make the
+ // next request.
+ r, err := db.Query(`
+ INSERT INTO jsonb_test.programming (posts)
+ SELECT json_array_elements($1->'data'->'children')
+ RETURNING $1->'data'->'after'`,
+ string(res))
+ if err != nil {
+ return "", err
+ }
+
+ // Since we did a RETURNING, we need to grab the result of our query.
+ r.Next()
+ var newAfter string
+ r.Scan(&newAfter)
+
+ return newAfter, nil
+}
diff --git a/src/current/_includes/v25.1/json/json-sample.py b/src/current/_includes/v25.1/json/json-sample.py
new file mode 100644
index 00000000000..49e302613e0
--- /dev/null
+++ b/src/current/_includes/v25.1/json/json-sample.py
@@ -0,0 +1,44 @@
+import json
+import psycopg2
+import requests
+import time
+
+conn = psycopg2.connect(database="jsonb_test", user="maxroach", host="localhost", port=26257)
+conn.set_session(autocommit=True)
+cur = conn.cursor()
+
+# The Reddit API wants us to tell it where to start from. The first request
+# we just say "null" to say "from the start"; subsequent requests will use
+# the value received from the last call.
+url = "https://www.reddit.com/r/programming.json"
+after = {"after": "null"}
+
+for n in range(41):
+ # First, make a request to reddit using the appropriate "after" string.
+ req = requests.get(url, params=after, headers={"User-Agent": "Python"})
+
+ # Decode the JSON and set "after" for the next request.
+ resp = req.json()
+ after = {"after": str(resp['data']['after'])}
+
+ # Convert the JSON to a string to send to the database.
+ data = json.dumps(resp)
+
+ # The JSON reddit returns looks like this:
+ # {
+ # "data": {
+ # "children": [ ... ]
+ # },
+ # "after": ...
+ # }
+ # We structure our query so that we extract the `children` field, and then
+ # expand that and insert each individual element into the database as a
+ # separate row.
+ cur.execute("""INSERT INTO jsonb_test.programming (posts)
+ SELECT json_array_elements(%s->'data'->'children')""", (data,))
+
+ # Reddit limits to 30 requests per minute, so do not do any more than that.
+ time.sleep(2)
+
+cur.close()
+conn.close()
diff --git a/src/current/_includes/v25.1/known-limitations/admission-control-limitations.md b/src/current/_includes/v25.1/known-limitations/admission-control-limitations.md
new file mode 100644
index 00000000000..26bff1a2dd7
--- /dev/null
+++ b/src/current/_includes/v25.1/known-limitations/admission-control-limitations.md
@@ -0,0 +1,3 @@
+Admission control works on the level of each node, not at the cluster level. The admission control system queues requests until the operations are processed or the request exceeds the timeout value (for example by using [`SET statement_timeout`]({% link {{ page.version.version }}/set-vars.md %}#supported-variables)). If you specify aggressive timeout values, the system may operate correctly but have low throughput as the operations exceed the timeout value while only completing part of the work. There is no mechanism for preemptively rejecting requests when the work queues are long.
+
+Organizing operations by priority can mean that higher priority operations consume all the available resources while lower priority operations remain in the queue until the operation times out.
\ No newline at end of file
diff --git a/src/current/_includes/v25.1/known-limitations/alter-changefeed-cdc-queries.md b/src/current/_includes/v25.1/known-limitations/alter-changefeed-cdc-queries.md
new file mode 100644
index 00000000000..56dd7eeaacd
--- /dev/null
+++ b/src/current/_includes/v25.1/known-limitations/alter-changefeed-cdc-queries.md
@@ -0,0 +1 @@
+{% if page.name == "alter-changefeed.md" %} `ALTER CHANGEFEED` {% else %} [`ALTER CHANGEFEED`]({% link {{ page.version.version }}/alter-changefeed.md %}) {% endif %} is not fully supported with changefeeds that use {% if page.name == "cdc-queries.md" %} CDC queries. {% else %} [CDC queries]({% link {{ page.version.version }}/cdc-queries.md %}). {% endif %} You can alter the options that a changefeed uses, but you cannot alter the changefeed target tables. [#83033](https://github.com/cockroachdb/cockroach/issues/83033)
\ No newline at end of file
diff --git a/src/current/_includes/v25.1/known-limitations/alter-changefeed-limitations.md b/src/current/_includes/v25.1/known-limitations/alter-changefeed-limitations.md
new file mode 100644
index 00000000000..a183f2964f4
--- /dev/null
+++ b/src/current/_includes/v25.1/known-limitations/alter-changefeed-limitations.md
@@ -0,0 +1,8 @@
+- It is necessary to [`PAUSE`]({% link {{ page.version.version }}/pause-job.md %}) the changefeed before performing any [`ALTER CHANGEFEED`]({% link {{ page.version.version }}/alter-changefeed.md %}) statement. [#77171](https://github.com/cockroachdb/cockroach/issues/77171)
+- CockroachDB does not keep track of the [`initial_scan`]({% link {{ page.version.version }}/create-changefeed.md %}#initial-scan) option applied to tables when it is set to `yes` or `only`. For example:
+
+ ~~~ sql
+ ALTER CHANGEFEED {job_ID} ADD table WITH initial_scan = 'yes';
+ ~~~
+
+ This will trigger an initial scan of the table and the changefeed will track `table`. The changefeed will **not** track `initial_scan` specified as an option, so it will not display in the output or after a `SHOW CHANGEFEED JOB` statement.
\ No newline at end of file
diff --git a/src/current/_includes/v25.1/known-limitations/alter-column-limitations.md b/src/current/_includes/v25.1/known-limitations/alter-column-limitations.md
new file mode 100644
index 00000000000..fb889befe8d
--- /dev/null
+++ b/src/current/_includes/v25.1/known-limitations/alter-column-limitations.md
@@ -0,0 +1,12 @@
+You cannot alter the data type of a column if:
+
+- The column is part of an [index]({% link {{ page.version.version }}/indexes.md %}).
+- The column has [`CHECK` constraints]({% link {{ page.version.version }}/check.md %}).
+- The column owns a [sequence]({% link {{ page.version.version }}/create-sequence.md %}).
+- The column has a [`DEFAULT` expression]({% link {{ page.version.version }}/default-value.md %}). This will result in an `ERROR: ... column ... cannot also have a DEFAULT expression` with `SQLSTATE: 42P16`.
+- The `ALTER COLUMN TYPE` statement is part of a combined `ALTER TABLE` statement.
+- The `ALTER COLUMN TYPE` statement is inside an [explicit transaction]({% link {{ page.version.version }}/begin-transaction.md %}).
+
+{{site.data.alerts.callout_info}}
+Most `ALTER COLUMN TYPE` changes are finalized asynchronously. Schema changes on the table with the altered column may be restricted, and writes to the altered column may be rejected until the schema change is finalized.
+{{site.data.alerts.end}}
\ No newline at end of file
diff --git a/src/current/_includes/v25.1/known-limitations/alter-type-limitations.md b/src/current/_includes/v25.1/known-limitations/alter-type-limitations.md
new file mode 100644
index 00000000000..fa25e47f962
--- /dev/null
+++ b/src/current/_includes/v25.1/known-limitations/alter-type-limitations.md
@@ -0,0 +1,2 @@
+- When running the [`ALTER TYPE`]({% link {{ page.version.version }}/alter-type.md %}) statement, you can only reference a user-defined type from the database that contains the type.
+- You can only [cancel]({% link {{ page.version.version }}/cancel-job.md %}) `ALTER TYPE` [schema change jobs]({% link {{ page.version.version }}/online-schema-changes.md %}) that drop values. This is because when you drop a value, CockroachDB searches through every row that could contain the type's value, which could take a long time. All other `ALTER TYPE` schema change jobs are [non-cancellable]({% link {{ page.version.version }}/cancel-job.md %}#known-limitations).
\ No newline at end of file
diff --git a/src/current/_includes/v25.1/known-limitations/alter-view-limitations.md b/src/current/_includes/v25.1/known-limitations/alter-view-limitations.md
new file mode 100644
index 00000000000..642bed6ce08
--- /dev/null
+++ b/src/current/_includes/v25.1/known-limitations/alter-view-limitations.md
@@ -0,0 +1,4 @@
+`ALTER VIEW` does not currently support:
+
+- Changing the [`SELECT`]({% link {{ page.version.version }}/select-clause.md %}) statement executed by a view. Instead, you must drop the existing view and create a new view.
+- Renaming a view that other views depend on. This feature may be added in the future. [#10083](https://github.com/cockroachdb/cockroach/issues/10083)
\ No newline at end of file
diff --git a/src/current/_includes/v25.1/known-limitations/aost-limitations.md b/src/current/_includes/v25.1/known-limitations/aost-limitations.md
new file mode 100644
index 00000000000..811c884d08d
--- /dev/null
+++ b/src/current/_includes/v25.1/known-limitations/aost-limitations.md
@@ -0,0 +1 @@
+CockroachDB does not support placeholders in {% if page.name == "as-of-system-time.md" %}`AS OF SYSTEM TIME`{% else %}[`AS OF SYSTEM TIME`]({% link {{ page.version.version }}/as-of-system-time.md %}){% endif %}. The time value must be a constant value embedded in the SQL string. [#30955](https://github.com/cockroachdb/cockroach/issues/30955)
\ No newline at end of file
diff --git a/src/current/_includes/v25.1/known-limitations/cancel-job-limitations.md b/src/current/_includes/v25.1/known-limitations/cancel-job-limitations.md
new file mode 100644
index 00000000000..23080976a2a
--- /dev/null
+++ b/src/current/_includes/v25.1/known-limitations/cancel-job-limitations.md
@@ -0,0 +1,8 @@
+- To avoid transaction states that cannot properly [roll back]({% link {{ page.version.version }}/rollback-transaction.md %}), the following statements cannot be cancelled with [`CANCEL JOB`]({% link {{ page.version.version }}/cancel-job.md %}):
+
+ - `DROP` statements (e.g., [`DROP TABLE`]({% link {{ page.version.version }}/drop-table.md %})).
+ - `ALTER ... RENAME` statements (e.g., [`ALTER TABLE ... RENAME TO`]({% link {{ page.version.version }}/alter-table.md %}#rename-to)).
+ - [`CREATE TABLE ... AS`]({% link {{ page.version.version }}/create-table-as.md %}) statements.
+ - [`ALTER TYPE`]({% link {{ page.version.version }}/alter-type.md %}) statements, except for those that drop values.
+
+- When an Enterprise [`RESTORE`]({% link {{ page.version.version }}/restore.md %}) is canceled, partially restored data is properly cleaned up. This can have a minor, temporary impact on cluster performance.
\ No newline at end of file
diff --git a/src/current/_includes/v25.1/known-limitations/cannot-refresh-materialized-views-inside-transactions.md b/src/current/_includes/v25.1/known-limitations/cannot-refresh-materialized-views-inside-transactions.md
new file mode 100644
index 00000000000..b0aaf728177
--- /dev/null
+++ b/src/current/_includes/v25.1/known-limitations/cannot-refresh-materialized-views-inside-transactions.md
@@ -0,0 +1,27 @@
+- CockroachDB cannot refresh {% if page.name == "views.md" %} materialized views {% else %} [materialized views]({% link {{ page.version.version }}/views.md %}#materialized-views) {% endif %} inside [explicit transactions]({% link {{ page.version.version }}/begin-transaction.md %}). Trying to refresh a materialized view inside an explicit transaction will result in an error.
+ 1. Start [`cockroach demo`]({% link {{ page.version.version }}/cockroach-demo.md %}) with the sample `bank` data set:
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ cockroach demo bank
+ ~~~
+ 1. Create the materialized view described in [Usage]({% link {{ page.version.version }}/views.md %}#usage).
+ 1. Start a new multi-statement transaction with [`BEGIN TRANSACTION`]({% link {{ page.version.version }}/begin-transaction.md %}):
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ sql
+ BEGIN TRANSACTION;
+ ~~~
+ 1. Inside the open transaction, attempt to [refresh the view]({% link {{ page.version.version }}/refresh.md %}). This will result in an error.
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ sql
+ REFRESH MATERIALIZED VIEW overdrawn_accounts;
+ ~~~
+
+ ~~~
+ ERROR: cannot refresh view in an explicit transaction
+ SQLSTATE: 25000
+ ~~~
+
+ [#66008](https://github.com/cockroachdb/cockroach/issues/66008)
diff --git a/src/current/_includes/v25.1/known-limitations/cdc-queries-column-families.md b/src/current/_includes/v25.1/known-limitations/cdc-queries-column-families.md
new file mode 100644
index 00000000000..505a8c9700e
--- /dev/null
+++ b/src/current/_includes/v25.1/known-limitations/cdc-queries-column-families.md
@@ -0,0 +1 @@
+Creating a changefeed with {% if page.name == "cdc-queries.md" %} CDC queries {% else %} [CDC queries]({% link {{ page.version.version }}/cdc-queries.md %}) {% endif %} on tables with more than one {% if page.name == "changefeeds-on-tables-with-column-families.md" %} column family {% else %} [column family]({% link {{ page.version.version }}/changefeeds-on-tables-with-column-families.md %}) {% endif %} is not supported. [#127761](https://github.com/cockroachdb/cockroach/issues/127761)
\ No newline at end of file
diff --git a/src/current/_includes/v25.1/known-limitations/cdc-queries.md b/src/current/_includes/v25.1/known-limitations/cdc-queries.md
new file mode 100644
index 00000000000..2839eba5eda
--- /dev/null
+++ b/src/current/_includes/v25.1/known-limitations/cdc-queries.md
@@ -0,0 +1,7 @@
+- You can only apply CDC queries on a single table in each statement.
+- Some [stable functions]({% link {{ page.version.version }}/functions-and-operators.md %}#built-in-functions), notably functions that return MVCC timestamps, are overridden to return the MVCC timestamp of the event, e.g., `transaction_timestamp` or `statement_timestamp`. Additionally, some [time-based functions]({% link {{ page.version.version }}/functions-and-operators.md %}#date-and-time-functions), such as `now()` are not supported. We recommend using the `transaction_timestamp()` function or the {% if page.name == "cdc-queries.md" %} `crdb_internal_mvcc_timestamp` {% else %}[`crdb_internal_mvcc_timestamp`]({% link {{ page.version.version }}/cdc-queries.md %}#crdb-internal-mvcc-timestamp) {% endif %} column instead.
+- The following are not permitted in CDC queries:
+ - [Volatile functions]({% link {{ page.version.version }}/functions-and-operators.md %}#function-volatility).
+ - Sub-select queries.
+ - [Aggregate]({% link {{ page.version.version }}/functions-and-operators.md %}#aggregate-functions) and [window functions]({% link {{ page.version.version }}/window-functions.md %}) (i.e., functions operating over many rows). [#98237](https://github.com/cockroachdb/cockroach/issues/98237)
+- `delete` changefeed events will only contain the [primary key]({% link {{ page.version.version }}/primary-key.md %}). All other columns will emit as `NULL`. See [Capture delete messages]({% link {{ page.version.version }}/cdc-queries.md %}#capture-delete-messages) for detail on running a CDC query that emits the deleted values. [#83835](https://github.com/cockroachdb/cockroach/issues/83835)
diff --git a/src/current/_includes/v25.1/known-limitations/cdc.md b/src/current/_includes/v25.1/known-limitations/cdc.md
new file mode 100644
index 00000000000..a473e94367c
--- /dev/null
+++ b/src/current/_includes/v25.1/known-limitations/cdc.md
@@ -0,0 +1,8 @@
+- Changefeed target options are limited to tables and [column families]({% link {{ page.version.version }}/changefeeds-on-tables-with-column-families.md %}). [#73435](https://github.com/cockroachdb/cockroach/issues/73435)
+- {% include {{page.version.version}}/cdc/kafka-vpc-limitation.md %}
+- Webhook sinks only support HTTPS. Use the [`insecure_tls_skip_verify`]({% link {{ page.version.version }}/create-changefeed.md %}#insecure-tls-skip-verify) parameter when testing to disable certificate verification; however, this still requires HTTPS and certificates. [#73431](https://github.com/cockroachdb/cockroach/issues/73431)
+- Formats for changefeed messages are not supported by all changefeed sinks. Refer to the [Changefeed Sinks]({% link {{ page.version.version }}/changefeed-sinks.md %}) page for details on compatible formats with each sink and the [`format`]({% link {{ page.version.version }}/create-changefeed.md %}) option to specify a changefeed message format. [#73432](https://github.com/cockroachdb/cockroach/issues/73432)
+- Using the [`split_column_families`]({% link {{ page.version.version }}/create-changefeed.md %}#split-column-families) and [`resolved`]({% link {{ page.version.version }}/create-changefeed.md %}#resolved) options on the same changefeed will cause an error when using the following [sinks](changefeed-sinks.html): Kafka and Google Cloud Pub/Sub. Instead, use the individual `FAMILY` keyword to specify column families when creating a changefeed. [#79452](https://github.com/cockroachdb/cockroach/issues/79452)
+- {% include {{page.version.version}}/cdc/types-udt-composite-general.md %} The following limitations apply:
+ - {% include {{page.version.version}}/cdc/avro-udt-composite.md %}
+ - {% include {{page.version.version}}/cdc/csv-udt-composite.md %}
\ No newline at end of file
diff --git a/src/current/_includes/v25.1/known-limitations/changefeed-column-family-message.md b/src/current/_includes/v25.1/known-limitations/changefeed-column-family-message.md
new file mode 100644
index 00000000000..41744b9b4b4
--- /dev/null
+++ b/src/current/_includes/v25.1/known-limitations/changefeed-column-family-message.md
@@ -0,0 +1 @@
+When you create a changefeed on a table with more than one {% if page.name == "changefeeds-on-tables-with-column-families.md" %} column family {% else %} [column family]({% link {{ page.version.version }}/changefeeds-on-tables-with-column-families.md %}) {% endif %}, the changefeed will emit messages per column family in separate streams. As a result, [changefeed messages]({% link {{ page.version.version }}/changefeed-messages.md %}) for different column families will arrive at the [sink]({% link {{ page.version.version }}/changefeed-sinks.md %}) under separate topics. [#127736](https://github.com/cockroachdb/cockroach/issues/127736)
\ No newline at end of file
diff --git a/src/current/_includes/v25.1/known-limitations/compression-level-kafka-config.md b/src/current/_includes/v25.1/known-limitations/compression-level-kafka-config.md
new file mode 100644
index 00000000000..0635319c5af
--- /dev/null
+++ b/src/current/_includes/v25.1/known-limitations/compression-level-kafka-config.md
@@ -0,0 +1 @@
+Changefeeds created in v24.3 of CockroachDB that emit to [Kafka]({% link {{ page.version.version }}/changefeed-sinks.md %}#kafka), or changefeeds created in earlier versions with the `changefeed.new_kafka_sink.enabled` cluster setting enabled, do not support negative compression level values for `GZIP` compression in the [`kafka_sink_config = {... "CompressionLevel" = ...}`]({% link {{ page.version.version }}/changefeed-sinks.md %}#compressionlevel) option field. [#136492](https://github.com/cockroachdb/cockroach/issues/136492)
\ No newline at end of file
diff --git a/src/current/_includes/v25.1/known-limitations/copy-syntax.md b/src/current/_includes/v25.1/known-limitations/copy-syntax.md
new file mode 100644
index 00000000000..e64a075dcac
--- /dev/null
+++ b/src/current/_includes/v25.1/known-limitations/copy-syntax.md
@@ -0,0 +1,5 @@
+CockroachDB does not yet support the following `COPY` syntax:
+
+ - `COPY ... WITH FREEZE`. [#85573](https://github.com/cockroachdb/cockroach/issues/85573)
+ - `COPY ... WITH QUOTE`. [#85574](https://github.com/cockroachdb/cockroach/issues/85574)
+ - `COPY ... FROM ... WHERE `. [#54580](https://github.com/cockroachdb/cockroach/issues/54580)
diff --git a/src/current/_includes/v25.1/known-limitations/create-statistics-aost-limitation.md b/src/current/_includes/v25.1/known-limitations/create-statistics-aost-limitation.md
new file mode 100644
index 00000000000..09f86f51c48
--- /dev/null
+++ b/src/current/_includes/v25.1/known-limitations/create-statistics-aost-limitation.md
@@ -0,0 +1 @@
+The `ANALYZE` alias {% if page.name != "create-statistics.md" %}of [`CREATE STATISTICS`]({% link {{ page.version.version }}/create-statistics.md %}){% endif %} does not support specifying an {% if page.name == "as-of-system-time.md" %}`AS OF SYSTEM TIME`{% else %}[`AS OF SYSTEM TIME`]({% link {{ page.version.version }}/as-of-system-time.md %}){% endif %} timestamp. `ANALYZE` statements use `AS OF SYSTEM TIME '-0.001ms'` automatically. For more control over the statistics interval, use the `CREATE STATISTICS` syntax instead. [#96430](https://github.com/cockroachdb/cockroach/issues/96430)
\ No newline at end of file
diff --git a/src/current/_includes/v25.1/known-limitations/create-table-as-limitations.md b/src/current/_includes/v25.1/known-limitations/create-table-as-limitations.md
new file mode 100644
index 00000000000..9f837eb074c
--- /dev/null
+++ b/src/current/_includes/v25.1/known-limitations/create-table-as-limitations.md
@@ -0,0 +1 @@
+The [primary key]({% link {{ page.version.version }}/primary-key.md %}) of tables created with `CREATE TABLE ... AS` is not automatically derived from the query results. You must specify new primary keys at table creation. For examples, see [Specify a primary key]({% link {{ page.version.version }}/create-table-as.md %}#specify-a-primary-key).
\ No newline at end of file
diff --git a/src/current/_includes/v25.1/known-limitations/data-domiciling-limitations.md b/src/current/_includes/v25.1/known-limitations/data-domiciling-limitations.md
new file mode 100644
index 00000000000..509a9c9599f
--- /dev/null
+++ b/src/current/_includes/v25.1/known-limitations/data-domiciling-limitations.md
@@ -0,0 +1,4 @@
+- When columns are [indexed]({% link {{ page.version.version }}/indexes.md %}), a subset of data from the indexed columns may appear in [meta ranges]({% link {{ page.version.version }}/architecture/distribution-layer.md %}#meta-ranges) or other system tables. CockroachDB synchronizes these system ranges and system tables across nodes. This synchronization does not respect any multi-region settings applied via either the [multi-region SQL statements]({% link {{ page.version.version }}/multiregion-overview.md %}), or the low-level [zone configs]({% link {{ page.version.version }}/configure-replication-zones.md %}) mechanism.
+- [Zone configs]({% link {{ page.version.version }}/configure-replication-zones.md %}) can be used for data placement but these features were historically built for performance, not for domiciling. The replication system's top priority is to prevent the loss of data and it may override the zone configurations if necessary to ensure data durability. For more information, see [Replication Controls]({% link {{ page.version.version }}/configure-replication-zones.md %}#types-of-constraints).
+- If your [log files]({% link {{ page.version.version }}/logging-overview.md %}) are kept in the region where they were generated, there is some cross-region leakage (like the system tables described previously), but the majority of user data that makes it into the logs is going to be homed in that region. If that's not strong enough, you can use the [log redaction functionality]({% link {{ page.version.version }}/configure-logs.md %}#redact-logs) to strip all raw data from the logs. You can also limit your log retention entirely.
+- If you start a node with a [`--locality`]({% link {{ page.version.version }}/cockroach-start.md %}#locality) flag that says the node is in region _A_, but the node is actually running in some region _B_, data domiciling based on the inferred node placement will not work. A CockroachDB node only knows its locality based on the text supplied to the `--locality` flag; it can not ensure that it is actually running in that physical location.
\ No newline at end of file
diff --git a/src/current/_includes/v25.1/known-limitations/datadog-self-hosted-limitations.md b/src/current/_includes/v25.1/known-limitations/datadog-self-hosted-limitations.md
new file mode 100644
index 00000000000..215f712404c
--- /dev/null
+++ b/src/current/_includes/v25.1/known-limitations/datadog-self-hosted-limitations.md
@@ -0,0 +1 @@
+The integration of your CockroachDB {{ site.data.products.core }} cluster with Datadog only supports displaying cluster-wide averages of reported metrics. Filtering by a specific node is unsupported.
diff --git a/src/current/_includes/v25.1/known-limitations/drop-column-partial-index.md b/src/current/_includes/v25.1/known-limitations/drop-column-partial-index.md
new file mode 100644
index 00000000000..9fd1811cc43
--- /dev/null
+++ b/src/current/_includes/v25.1/known-limitations/drop-column-partial-index.md
@@ -0,0 +1 @@
+CockroachDB prevents a column from being dropped using [`ALTER TABLE ... DROP COLUMN`]({% link {{ page.version.version }}/alter-table.md %}#drop-column) if it is referenced by a partial index predicate. To drop such a column, the partial indexes need to be dropped first using [`DROP INDEX`]({% link {{ page.version.version }}/drop-index.md %}). [#97813](https://github.com/cockroachdb/cockroach/issues/97813).
\ No newline at end of file
diff --git a/src/current/_includes/v25.1/known-limitations/drop-owned-by-limitations.md b/src/current/_includes/v25.1/known-limitations/drop-owned-by-limitations.md
new file mode 100644
index 00000000000..95685f6adf1
--- /dev/null
+++ b/src/current/_includes/v25.1/known-limitations/drop-owned-by-limitations.md
@@ -0,0 +1,13 @@
+- [`ENUM`]({% link {{ page.version.version }}/enum.md %}) types are not dropped.
+- [`DROP OWNED BY`]({% link {{ page.version.version }}/drop-owned-by.md %}) drops all owned objects as well as any [grants]({% link {{ page.version.version }}/grant.md %}) on objects not owned by the [role]({% link {{ page.version.version }}/security-reference/authorization.md %}#roles).
+- If the [role]({% link {{ page.version.version }}/security-reference/authorization.md %}#roles) for which you are trying to `DROP OWNED BY` was granted a [system-level privilege]({% link {{ page.version.version }}/security-reference/authorization.md %}#supported-privileges) (i.e., using the [`GRANT SYSTEM ...`]({% link {{ page.version.version }}/grant.md %}#grant-system-level-privileges-on-the-entire-cluster) statement), the following error will be signalled:
+
+ ~~~
+ ERROR: cannot perform drop owned by if role has synthetic privileges; foo has entries in system.privileges
+ SQLSTATE: 0A000
+ HINT: perform REVOKE SYSTEM ... for the relevant privileges foo has in system.privileges
+ ~~~
+
+ The phrase "synthetic privileges" in the error message refers to [system-level privileges]({% link {{ page.version.version }}/security-reference/authorization.md %}#supported-privileges).
+
+ The workaround is to use [`SHOW SYSTEM GRANTS FOR {role}`](show-system-grants.html) and then use [`REVOKE SYSTEM ...`](revoke.html#revoke-system-level-privileges-on-the-entire-cluster) for each privilege in the result. [#88149](https://github.com/cockroachdb/cockroach/issues/88149)
\ No newline at end of file
diff --git a/src/current/_includes/v25.1/known-limitations/drop-single-partition.md b/src/current/_includes/v25.1/known-limitations/drop-single-partition.md
new file mode 100644
index 00000000000..ddda733e09e
--- /dev/null
+++ b/src/current/_includes/v25.1/known-limitations/drop-single-partition.md
@@ -0,0 +1 @@
+CockroachDB does not currently support dropping a single partition from a table. In order to remove partitions, you can [repartition]({% unless page.name == "partitioning.md" %}{% link {{ page.version.version }}/partitioning.md %}{% endunless %}#repartition-a-table) the table.
diff --git a/src/current/_includes/v25.1/known-limitations/drop-trigger-limitations.md b/src/current/_includes/v25.1/known-limitations/drop-trigger-limitations.md
new file mode 100644
index 00000000000..90745f7e17a
--- /dev/null
+++ b/src/current/_includes/v25.1/known-limitations/drop-trigger-limitations.md
@@ -0,0 +1 @@
+[`DROP TRIGGER`]({% link {{ page.version.version }}/drop-trigger.md %}) with `CASCADE` is not supported. [#128151](https://github.com/cockroachdb/cockroach/issues/128151)
\ No newline at end of file
diff --git a/src/current/_includes/v25.1/known-limitations/drop-unique-index-from-create-table.md b/src/current/_includes/v25.1/known-limitations/drop-unique-index-from-create-table.md
new file mode 100644
index 00000000000..ebe7750ee62
--- /dev/null
+++ b/src/current/_includes/v25.1/known-limitations/drop-unique-index-from-create-table.md
@@ -0,0 +1 @@
+[`UNIQUE` indexes]({% link {{ page.version.version }}/create-index.md %}) created as part of a [`CREATE TABLE`]({% link {{ page.version.version }}/create-table.md %}) statement cannot be removed without using [`CASCADE`]({% unless page.name == "drop-index.md" %}drop-index.html{% endunless %}#remove-an-index-and-dependent-objects-with-cascade). Unique indexes created with [`CREATE INDEX`]({% link {{ page.version.version }}/create-index.md %}) do not have this limitation.
diff --git a/src/current/_includes/v25.1/known-limitations/expression-index-limitations.md b/src/current/_includes/v25.1/known-limitations/expression-index-limitations.md
new file mode 100644
index 00000000000..c0e94185948
--- /dev/null
+++ b/src/current/_includes/v25.1/known-limitations/expression-index-limitations.md
@@ -0,0 +1,43 @@
+- The expression cannot reference columns outside the index's table.
+- Functional expression output must be determined by the input arguments. For example, you can't use the [volatile function]({% link {{ page.version.version }}/functions-and-operators.md %}#function-volatility) `now()` to create an index because its output depends on more than just the function arguments.
+- CockroachDB does not allow {% if page.name == "expression-indexes.md" %} expression indexes {% else %} [expression indexes]({% link {{ page.version.version }}/expression-indexes.md %}) {% endif %} to reference [computed columns]({% link {{ page.version.version }}/computed-columns.md %}). [#67900](https://github.com/cockroachdb/cockroach/issues/67900)
+- CockroachDB does not support expressions as `ON CONFLICT` targets. This means that unique {% if page.name == "expression-indexes.md" %} expression indexes {% else %} [expression indexes]({% link {{ page.version.version }}/expression-indexes.md %}) {% endif %} cannot be selected as arbiters for [`INSERT .. ON CONFLICT`]({% link {{ page.version.version }}/insert.md %}#on-conflict-clause) statements. For example:
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ sql
+ CREATE TABLE t (a INT, b INT, UNIQUE INDEX ((a + b)));
+ ~~~
+
+ ~~~
+ CREATE TABLE
+ ~~~
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ sql
+ INSERT INTO t VALUES (1, 2) ON CONFLICT ((a + b)) DO NOTHING;
+ ~~~
+
+ ~~~
+ invalid syntax: statement ignored: at or near "(": syntax error
+ SQLSTATE: 42601
+ DETAIL: source SQL:
+ INSERT INTO t VALUES (1, 2) ON CONFLICT ((a + b)) DO NOTHING
+ ^
+ HINT: try \h INSERT
+ ~~~
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ sql
+ INSERT INTO t VALUES (1, 2) ON CONFLICT ((a + b)) DO UPDATE SET a = 10;
+ ~~~
+
+ ~~~
+ invalid syntax: statement ignored: at or near "(": syntax error
+ SQLSTATE: 42601
+ DETAIL: source SQL:
+ INSERT INTO t VALUES (1, 2) ON CONFLICT ((a + b)) DO UPDATE SET a = 10
+ ^
+ HINT: try \h INSERT
+ ~~~
+
+ [#67893](https://github.com/cockroachdb/cockroach/issues/67893)
diff --git a/src/current/_includes/v25.1/known-limitations/failover-stop-application.md b/src/current/_includes/v25.1/known-limitations/failover-stop-application.md
new file mode 100644
index 00000000000..7035b280fa7
--- /dev/null
+++ b/src/current/_includes/v25.1/known-limitations/failover-stop-application.md
@@ -0,0 +1 @@
+After a failover, there is no mechanism to stop applications from connecting to the original primary cluster. It is necessary to redirect application traffic manually, such as by using a network load balancer or adjusting DNS records.
\ No newline at end of file
diff --git a/src/current/_includes/v25.1/known-limitations/follower-reads-limitations.md b/src/current/_includes/v25.1/known-limitations/follower-reads-limitations.md
new file mode 100644
index 00000000000..f9ace7d02f6
--- /dev/null
+++ b/src/current/_includes/v25.1/known-limitations/follower-reads-limitations.md
@@ -0,0 +1,67 @@
+##### Exact staleness reads and long-running writes
+
+Long-running write transactions will create [write intents]({% link {{ page.version.version }}/architecture/transaction-layer.md %}#write-intents) with a timestamp near when the transaction began. When an exact staleness follower read encounters a write intent, it will often end up in a ["transaction wait queue"]({% link {{ page.version.version }}/architecture/transaction-layer.md %}#txnwaitqueue), waiting for the operation to complete; however, this runs counter to the benefit exact staleness reads provide.
+
+To counteract this, you can issue all follower reads in explicit [transactions set with `HIGH` priority]({% link {{ page.version.version }}/transactions.md %}#transaction-priorities):
+
+```sql
+BEGIN PRIORITY HIGH AS OF SYSTEM TIME follower_read_timestamp();
+SELECT ...
+SELECT ...
+COMMIT;
+```
+
+##### Exact staleness read timestamps must be far enough in the past
+
+If an exact staleness read is not using an [`AS OF SYSTEM TIME`]({% link {{ page.version.version }}/as-of-system-time.md %}) value far enough in the past, CockroachDB cannot perform a follower read. Instead, the read must access the [leaseholder replica]({% link {{ page.version.version }}/architecture/overview.md %}#architecture-leaseholder). This adds network latency if the leaseholder is not the closest replica to the gateway node. Most users will [use the `follower_read_timestamp()` function]({% link {{ page.version.version }}/follower-reads.md %}#run-queries-that-use-exact-staleness-follower-reads) to get a timestamp far enough in the past that there is a high probability of getting a follower read.
+
+##### Bounded staleness read limitations
+
+Bounded staleness reads have the following limitations:
+
+- They must be used in a [single-statement (aka implicit) transaction]({% link {{ page.version.version }}/transactions.md %}#individual-statements).
+- They must read from a single row.
+- They must not require an [index]({% link {{ page.version.version }}/indexes.md %}) [join]({% link {{ page.version.version }}/joins.md %}). In other words, the index used by the read query must be either a [primary]({% link {{ page.version.version }}/primary-key.md %}) [index]({% link {{ page.version.version }}/indexes.md %}), or some other index that covers the entire query by [`STORING`]({% link {{ page.version.version }}/create-index.md %}#store-columns) all columns.
+
+For example, let's look at a read query that cannot be served as a bounded staleness read. We will use a [demo cluster]({% link {{ page.version.version }}/cockroach-demo.md %}), which automatically loads the [MovR dataset]({% link {{ page.version.version }}/movr.md %}).
+
+{% include_cached copy-clipboard.html %}
+~~~ shell
+cockroach demo
+~~~
+
+{% include_cached copy-clipboard.html %}
+~~~ sql
+SELECT code FROM promo_codes AS OF SYSTEM TIME with_max_staleness('10s') LIMIT 1;
+ERROR: unimplemented: cannot use bounded staleness for queries that may touch more than one row or require an index join
+SQLSTATE: 0A000
+HINT: You have attempted to use a feature that is not yet implemented.
+See: https://go.crdb.dev/issue-v/67562/v23.2
+~~~
+
+As noted by the error message, this query cannot be served as a bounded staleness read because in this case it would touch more than one row. Even though we used a [`LIMIT 1` clause]({% link {{ page.version.version }}/limit-offset.md %}), the query would still have to touch more than one row in order to filter out the additional results.
+
+We can verify that more than one row would be touched by issuing [`EXPLAIN`]({% link {{ page.version.version }}/explain.md %}) on the same query, but without the [`AS OF SYSTEM TIME`]({% link {{ page.version.version }}/as-of-system-time.md %}) clause:
+
+{% include_cached copy-clipboard.html %}
+~~~ sql
+EXPLAIN SELECT code FROM promo_codes LIMIT 5;
+~~~
+
+~~~
+ info
+-------------------------------------------------------------------------------
+ distribution: full
+ vectorized: true
+
+ • scan
+ estimated row count: 1 (0.10% of the table; stats collected 1 minute ago)
+ table: promo_codes@primary
+ spans: LIMITED SCAN
+ limit: 1
+(8 rows)
+~~~
+
+The output verifies that this query performs a scan of the primary [index]({% link {{ page.version.version }}/indexes.md %}) on the `promo_codes` table, which is why it cannot be used for a bounded staleness read.
+
+For an example showing how to successfully perform a bounded staleness read, see [Run queries that use bounded staleness follower reads]({% link {{ page.version.version }}/follower-reads.md %}#run-queries-that-use-bounded-staleness-follower-reads).
diff --git a/src/current/_includes/v25.1/known-limitations/forecasted-stats-limitations.md b/src/current/_includes/v25.1/known-limitations/forecasted-stats-limitations.md
new file mode 100644
index 00000000000..c8753124a96
--- /dev/null
+++ b/src/current/_includes/v25.1/known-limitations/forecasted-stats-limitations.md
@@ -0,0 +1,9 @@
+- The following [cluster settings]({% link {{ page.version.version }}/cluster-settings.md %}) do not immediately take effect, and instead only take effect when new statistics are collected for a table.
+
+ - [`sql.stats.forecasts.max_decrease`]({% link {{ page.version.version }}/cluster-settings.md %}#setting-sql-stats-forecasts-max-decrease)
+ - [`sql.stats.forecasts.min_goodness_of_fit`]({% link {{ page.version.version }}/cluster-settings.md %}#setting-sql-stats-forecasts-min-goodness-of-fit)
+ - [`sql.stats.forecasts.min_observations`]({% link {{ page.version.version }}/cluster-settings.md %}#setting-sql-stats-forecasts-min-observations)
+
+ Although [`SHOW STATISTICS WITH FORECAST`]({% link {{ page.version.version }}/show-statistics.md %}#display-forecasted-statistics) shows the settings taking effect immediately, they do not actually take effect until new statistics are collected (as can be verified with [`EXPLAIN`]({% link {{ page.version.version }}/explain.md %})).
+
+ As a workaround, disable and enable forecasting at the [cluster]({% link {{ page.version.version }}/cost-based-optimizer.md %}#enable-and-disable-automatic-statistics-collection-for-clusters) or [table]({% link {{ page.version.version }}/cost-based-optimizer.md %}#enable-and-disable-automatic-statistics-collection-for-tables) level. This will invalidate the statistics cache and cause these settings to take effect immediately. [#123852](https://github.com/cockroachdb/cockroach/issues/123852)
\ No newline at end of file
diff --git a/src/current/_includes/v25.1/known-limitations/full-text-search-unsupported.md b/src/current/_includes/v25.1/known-limitations/full-text-search-unsupported.md
new file mode 100644
index 00000000000..7b5a83f2cae
--- /dev/null
+++ b/src/current/_includes/v25.1/known-limitations/full-text-search-unsupported.md
@@ -0,0 +1,14 @@
+- Aspects of [text search configurations]({% link {{ page.version.version }}/full-text-search.md %}#text-search-configuration) other than the specified dictionary.
+- `websearch_to_tsquery()` built-in function.
+- `tsquery_phrase()` built-in function.
+- `ts_rank_cd()` built-in function.
+- `setweight()` built-in function.
+- Inverted joins on `TSVECTOR` values.
+- `tsvector || tsvector` comparisons.
+- `tsquery || tsquery` comparisons.
+- `tsquery && tsquery` comparisons.
+- `tsquery <-> tsquery` comparisons.
+- `!! tsquery` comparisons.
+- `tsquery @> tsquery` and `tsquery <@ tsquery` comparisons.
+
+[#41288](https://github.com/cockroachdb/cockroach/issues/41288)
\ No newline at end of file
diff --git a/src/current/_includes/v25.1/known-limitations/generic-query-plan-limitations.md b/src/current/_includes/v25.1/known-limitations/generic-query-plan-limitations.md
new file mode 100644
index 00000000000..e28e66d5f32
--- /dev/null
+++ b/src/current/_includes/v25.1/known-limitations/generic-query-plan-limitations.md
@@ -0,0 +1,2 @@
+- Because [generic query plans]({% link {{ page.version.version }}/cost-based-optimizer.md %}#query-plan-cache) use lookup joins instead of the scans and revscans used by custom query plans, generic query plans do not perform as well as custom query plans in some cases. [#128916](https://github.com/cockroachdb/cockroach/issues/128916)
+- [Generic query plans]({% link {{ page.version.version }}/cost-based-optimizer.md %}#query-plan-type) are not included in the [plan cache]({% link {{ page.version.version }}/cost-based-optimizer.md %}#query-plan-cache). This means a generic query plan built and optimized for a prepared statement in one session cannot be used by another session. To reuse generic query plans for maximum performance, a prepared statement should be executed multiple times instead of prepared and executed once. [#128911](https://github.com/cockroachdb/cockroach/issues/128911)
\ No newline at end of file
diff --git a/src/current/_includes/v25.1/known-limitations/grant-revoke-schema-changes.md b/src/current/_includes/v25.1/known-limitations/grant-revoke-schema-changes.md
new file mode 100644
index 00000000000..2a2d076f108
--- /dev/null
+++ b/src/current/_includes/v25.1/known-limitations/grant-revoke-schema-changes.md
@@ -0,0 +1,19 @@
+User/role management operations (such as [`GRANT`]({% link {{ page.version.version }}/grant.md %}) and [`REVOKE`]({% link {{ page.version.version }}/revoke.md %})) are [schema changes]({% link {{ page.version.version }}/online-schema-changes.md %}). As such, they inherit the [limitations of schema changes]({% link {{ page.version.version }}/online-schema-changes.md %}#known-limitations).
+
+For example, schema changes wait for concurrent [transactions]({% link {{ page.version.version }}/transactions.md %}) using the same resources as the schema changes to complete. In the case of [role memberships]({% link {{ page.version.version }}/security-reference/authorization.md %}#roles) being modified inside a transaction, most transactions need access to the set of role memberships. Using the default settings, role modifications require schema leases to expire, which can take up to 5 minutes.
+
+This means that [long-running transactions]({% link {{ page.version.version }}/query-behavior-troubleshooting.md %}#hanging-or-stuck-queries) elsewhere in the system can cause user/role management operations inside transactions to take several minutes to complete. This can have a cascading effect. When a user/role management operation inside a transaction takes a long time to complete, it can in turn block all user-initiated transactions being run by your application, since the user/role management operation in the transaction has to commit before any other transactions that access role memberships (i.e., most transactions) can make progress.
+
+If you want user/role management operations to finish more quickly, and do not care whether concurrent transactions will immediately see the side effects of those operations, set the [session variable]({% link {{ page.version.version }}/set-vars.md %}) `allow_role_memberships_to_change_during_transaction` to `true`.
+
+When this session variable is enabled, any user/role management operations issued in the current session will only need to wait for the completion of statements in other sessions where `allow_role_memberships_to_change_during_transaction` is not enabled.
+
+To accelerate user/role management operations across your entire application, you have the following options:
+
+1. Set the session variable in all sessions by [passing it in the client connection string]({% link {{ page.version.version }}/connection-parameters.md %}#supported-options-parameters).
+1. Apply the `allow_role_memberships_to_change_during_transaction` setting globally to an entire cluster using the [`ALTER ROLE ALL`]({% link {{ page.version.version }}/alter-role.md %}) statement:
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ sql
+ ALTER ROLE ALL SET allow_role_memberships_to_change_during_transaction = true;
+ ~~~
diff --git a/src/current/_includes/v25.1/known-limitations/import-high-disk-contention.md b/src/current/_includes/v25.1/known-limitations/import-high-disk-contention.md
new file mode 100644
index 00000000000..e5abd405038
--- /dev/null
+++ b/src/current/_includes/v25.1/known-limitations/import-high-disk-contention.md
@@ -0,0 +1,6 @@
+[`IMPORT INTO`]({% link {{ page.version.version }}/import-into.md %}) can sometimes fail with a "context canceled" error, or can restart itself many times without ever finishing. If this is happening, it is likely due to a high amount of disk contention. This can be mitigated by setting the `kv.bulk_io_write.max_rate` [cluster setting]({% link {{ page.version.version }}/cluster-settings.md %}) to a value below your max disk write speed. For example, to set it to 10MB/s, execute:
+
+{% include_cached copy-clipboard.html %}
+~~~ sql
+> SET CLUSTER SETTING kv.bulk_io_write.max_rate = '10MB';
+~~~
diff --git a/src/current/_includes/v25.1/known-limitations/import-into-limitations.md b/src/current/_includes/v25.1/known-limitations/import-into-limitations.md
new file mode 100644
index 00000000000..6a7cce6f727
--- /dev/null
+++ b/src/current/_includes/v25.1/known-limitations/import-into-limitations.md
@@ -0,0 +1,11 @@
+[`IMPORT INTO`]({% link {{ page.version.version }}/import-into.md %}) has the following limitations:
+
+- While importing into an existing table, the table is taken offline.
+- After importing into an existing table, [constraints]({% link {{ page.version.version }}/constraints.md %}) will be un-validated and need to be [re-validated]({% link {{ page.version.version }}/alter-table.md %}#validate-constraint).
+- Imported rows must not conflict with existing rows in the table or any unique secondary indexes.
+- `IMPORT INTO` works for only a single existing table.
+- `IMPORT INTO` can sometimes fail with a "context canceled" error, or can restart itself many times without ever finishing. If this is happening, it is likely due to a high amount of disk contention. This can be mitigated by setting the `kv.bulk_io_write.max_rate` [cluster setting]({% link {{ page.version.version }}/cluster-settings.md %}) to a value below your max disk write speed. For example, to set it to 10MB/s, execute:
+ {% include_cached copy-clipboard.html %}
+ ~~~ sql
+ SET CLUSTER SETTING kv.bulk_io_write.max_rate = '10MB';
+ ~~~
\ No newline at end of file
diff --git a/src/current/_includes/v25.1/known-limitations/jsonb-limitations.md b/src/current/_includes/v25.1/known-limitations/jsonb-limitations.md
new file mode 100644
index 00000000000..81f2da52861
--- /dev/null
+++ b/src/current/_includes/v25.1/known-limitations/jsonb-limitations.md
@@ -0,0 +1 @@
+- You cannot use [primary key]({% link {{ page.version.version }}/primary-key.md %}), [foreign key]({% link {{ page.version.version }}/foreign-key.md %}), and [unique]({% link {{ page.version.version }}/unique.md %}) [constraints]({% link {{ page.version.version }}/constraints.md %}) on `JSONB` values.
\ No newline at end of file
diff --git a/src/current/_includes/v25.1/known-limitations/ldr-column-families.md b/src/current/_includes/v25.1/known-limitations/ldr-column-families.md
new file mode 100644
index 00000000000..2a7c3bbba52
--- /dev/null
+++ b/src/current/_includes/v25.1/known-limitations/ldr-column-families.md
@@ -0,0 +1 @@
+Replicating tables cannot contain [column families]({% link {{ page.version.version }}/column-families.md %}). [#133562](https://github.com/cockroachdb/cockroach/issues/133562)
\ No newline at end of file
diff --git a/src/current/_includes/v25.1/known-limitations/ldr-composite-primary.md b/src/current/_includes/v25.1/known-limitations/ldr-composite-primary.md
new file mode 100644
index 00000000000..ac897af35a7
--- /dev/null
+++ b/src/current/_includes/v25.1/known-limitations/ldr-composite-primary.md
@@ -0,0 +1 @@
+The [primary key]({% link {{ page.version.version }}/primary-key.md %}) in replicating tables cannot contain composite types. [#133572](https://github.com/cockroachdb/cockroach/issues/133572)
\ No newline at end of file
diff --git a/src/current/_includes/v25.1/known-limitations/ldr-indexes.md b/src/current/_includes/v25.1/known-limitations/ldr-indexes.md
new file mode 100644
index 00000000000..0bf7f60c2d4
--- /dev/null
+++ b/src/current/_includes/v25.1/known-limitations/ldr-indexes.md
@@ -0,0 +1 @@
+Replicating tables cannot contain an [index]({% link {{ page.version.version }}/indexes.md %}) that requires expression evaluation before insertion. [#133560](https://github.com/cockroachdb/cockroach/issues/133560)
\ No newline at end of file
diff --git a/src/current/_includes/v25.1/known-limitations/ldr-sequences.md b/src/current/_includes/v25.1/known-limitations/ldr-sequences.md
new file mode 100644
index 00000000000..4e39f3630e3
--- /dev/null
+++ b/src/current/_includes/v25.1/known-limitations/ldr-sequences.md
@@ -0,0 +1 @@
+Replicating table cannot reference [sequences]({% link {{ page.version.version }}/create-sequence.md %}). [#132303](https://github.com/cockroachdb/cockroach/issues/132303)
\ No newline at end of file
diff --git a/src/current/_includes/v25.1/known-limitations/ldr-triggers.md b/src/current/_includes/v25.1/known-limitations/ldr-triggers.md
new file mode 100644
index 00000000000..55f8e885b97
--- /dev/null
+++ b/src/current/_includes/v25.1/known-limitations/ldr-triggers.md
@@ -0,0 +1 @@
+Replicating tables cannot reference triggers. [#132301](https://github.com/cockroachdb/cockroach/issues/132301)
\ No newline at end of file
diff --git a/src/current/_includes/v25.1/known-limitations/ldr-udfs.md b/src/current/_includes/v25.1/known-limitations/ldr-udfs.md
new file mode 100644
index 00000000000..fb642f14751
--- /dev/null
+++ b/src/current/_includes/v25.1/known-limitations/ldr-udfs.md
@@ -0,0 +1 @@
+Replicating tables cannot reference [user-defined functions]({% link {{ page.version.version }}/user-defined-functions.md %}). [#132302](https://github.com/cockroachdb/cockroach/issues/132302)
\ No newline at end of file
diff --git a/src/current/_includes/v25.1/known-limitations/locality-optimized-search-limited-records.md b/src/current/_includes/v25.1/known-limitations/locality-optimized-search-limited-records.md
new file mode 100644
index 00000000000..7a2be1ca3ef
--- /dev/null
+++ b/src/current/_includes/v25.1/known-limitations/locality-optimized-search-limited-records.md
@@ -0,0 +1 @@
+- {% if page.name == "cost-based-optimizer.md" %} Locality optimized search {% else %} [Locality optimized search]({% link {{ page.version.version }}/cost-based-optimizer.md %}#locality-optimized-search-in-multi-region-clusters) {% endif %} works only for queries selecting a limited number of records (up to 100,000 unique keys).
diff --git a/src/current/_includes/v25.1/known-limitations/locality-optimized-search-virtual-computed-columns.md b/src/current/_includes/v25.1/known-limitations/locality-optimized-search-virtual-computed-columns.md
new file mode 100644
index 00000000000..d6acf418aa8
--- /dev/null
+++ b/src/current/_includes/v25.1/known-limitations/locality-optimized-search-virtual-computed-columns.md
@@ -0,0 +1 @@
+- {% if page.name == "cost-based-optimizer.md" %} Locality optimized search {% else %} [Locality optimized search]({% link {{ page.version.version }}/cost-based-optimizer.md %}#locality-optimized-search-in-multi-region-clusters) {% endif %} does not work for queries that use [partitioned unique indexes]({% link {{ page.version.version }}/partitioning.md %}#partition-using-a-secondary-index) on [virtual computed columns](computed-columns.html#virtual-computed-columns). A workaround for computed columns is to make the virtual computed column a [stored computed column](computed-columns.html#stored-computed-columns). Locality optimized search does not work for queries that use partitioned unique [expression indexes](expression-indexes.html). [#68129](https://github.com/cockroachdb/cockroach/issues/68129)
diff --git a/src/current/_includes/v25.1/known-limitations/logging-limitations.md b/src/current/_includes/v25.1/known-limitations/logging-limitations.md
new file mode 100644
index 00000000000..78c70d14234
--- /dev/null
+++ b/src/current/_includes/v25.1/known-limitations/logging-limitations.md
@@ -0,0 +1 @@
+Log files can only be accessed in the DB Console if they are stored in the same directory as the file sink for the `DEV` channel.
diff --git a/src/current/_includes/v25.1/known-limitations/materialized-views-no-stats.md b/src/current/_includes/v25.1/known-limitations/materialized-views-no-stats.md
new file mode 100644
index 00000000000..02f2bd787c4
--- /dev/null
+++ b/src/current/_includes/v25.1/known-limitations/materialized-views-no-stats.md
@@ -0,0 +1 @@
+- The optimizer may not select the most optimal query plan when querying materialized views because CockroachDB does not [collect statistics]({% link {{ page.version.version }}/cost-based-optimizer.md %}#table-statistics) on materialized views. [#78181](https://github.com/cockroachdb/cockroach/issues/78181).
diff --git a/src/current/_includes/v25.1/known-limitations/multiple-arbiter-indexes.md b/src/current/_includes/v25.1/known-limitations/multiple-arbiter-indexes.md
new file mode 100644
index 00000000000..c9861623314
--- /dev/null
+++ b/src/current/_includes/v25.1/known-limitations/multiple-arbiter-indexes.md
@@ -0,0 +1 @@
+CockroachDB does not currently support multiple arbiter indexes for [`INSERT ON CONFLICT DO UPDATE`]({% link {{ page.version.version }}/insert.md %}#on-conflict-clause), and will return an error if there are multiple unique or exclusion constraints matching the `ON CONFLICT DO UPDATE` specification. [#53170](https://github.com/cockroachdb/cockroach/issues/53170)
\ No newline at end of file
diff --git a/src/current/_includes/v25.1/known-limitations/online-schema-changes-limitations.md b/src/current/_includes/v25.1/known-limitations/online-schema-changes-limitations.md
new file mode 100644
index 00000000000..1e7b619fdf8
--- /dev/null
+++ b/src/current/_includes/v25.1/known-limitations/online-schema-changes-limitations.md
@@ -0,0 +1,25 @@
+##### Schema changes within transactions
+
+Most schema changes should not be performed within an explicit transaction with multiple statements, as they do not have the same atomicity guarantees as other SQL statements. Execute schema changes either as single statements (as an implicit transaction), or in an explicit transaction consisting of the single schema change statement. There are some exceptions to this, detailed below.
+
+Schema changes keep your data consistent at all times, but they do not run inside [transactions][txns] in the general case. Making schema changes transactional would mean requiring a given schema change to propagate across all the nodes of a cluster. This would block all user-initiated transactions being run by your application, since the schema change would have to commit before any other transactions could make progress. This would prevent the cluster from servicing reads and writes during the schema change, requiring application downtime.
+
+{{site.data.alerts.callout_success}}
+Some tools and applications may be able to workaround CockroachDB's lack of transactional schema changes by [enabling a setting that automatically commits before running schema changes inside transactions]({% link {{ page.version.version }}/online-schema-changes.md %}#enable-automatic-commit-before-running-schema-changes-inside-transactions).
+{{site.data.alerts.end}}
+
+Some schema change operations can be run within explicit, multiple statement transactions. `CREATE TABLE` and `CREATE INDEX` statements can be run within the same transaction with the same atomicity guarantees as other SQL statements. There are no performance or rollback issues when using these statements within a multiple statement transaction.
+
+{% include {{ page.version.version }}/known-limitations/schema-changes-within-transactions.md %}
+
+##### Schema change DDL statements inside a multi-statement transaction can fail while other statements succeed
+
+{% include {{ page.version.version }}/known-limitations/schema-change-ddl-inside-multi-statement-transactions.md %}
+
+##### No online schema changes if primary key change in progress
+
+You cannot start an online schema change on a table if a [primary key change]({% link {{ page.version.version }}/alter-table.md %}#alter-primary-key) is currently in progress on the same table.
+
+##### No online schema changes between executions of prepared statements
+
+{% include {{ page.version.version }}/known-limitations/schema-changes-between-prepared-statements.md %}
\ No newline at end of file
diff --git a/src/current/_includes/v25.1/known-limitations/partitioning-with-placeholders.md b/src/current/_includes/v25.1/known-limitations/partitioning-with-placeholders.md
new file mode 100644
index 00000000000..7abc2e1744a
--- /dev/null
+++ b/src/current/_includes/v25.1/known-limitations/partitioning-with-placeholders.md
@@ -0,0 +1 @@
+When defining a [table partition]({% link {{ page.version.version }}/partitioning.md %}), either during table creation or table alteration, it is not possible to use placeholders in the `PARTITION BY` clause.
diff --git a/src/current/_includes/v25.1/known-limitations/per-replica-circuit-breaker-limitations.md b/src/current/_includes/v25.1/known-limitations/per-replica-circuit-breaker-limitations.md
new file mode 100644
index 00000000000..0abc2b55fec
--- /dev/null
+++ b/src/current/_includes/v25.1/known-limitations/per-replica-circuit-breaker-limitations.md
@@ -0,0 +1,4 @@
+[Per-replica circuit breakers]({% link {{ page.version.version }}/architecture/replication-layer.md %}#per-replica-circuit-breakers) have the following limitations:
+
+- They cannot prevent requests from hanging when the node's [liveness range]({% link {{ page.version.version }}/architecture/replication-layer.md %}#epoch-based-leases-table-data) is unavailable. For more information about troubleshooting a cluster that's having node liveness issues, see [Node liveness issues]({% link {{ page.version.version }}/cluster-setup-troubleshooting.md %}#node-liveness-issues).
+- They are not tripped if _all_ replicas of a range [become unavailable]({% link {{ page.version.version }}/cluster-setup-troubleshooting.md %}#db-console-shows-under-replicated-unavailable-ranges), because the circuit breaker mechanism operates per-replica. This means at least one replica needs to be available to receive the request in order for the breaker to trip.
\ No newline at end of file
diff --git a/src/current/_includes/v25.1/known-limitations/physical-cluster-replication.md b/src/current/_includes/v25.1/known-limitations/physical-cluster-replication.md
new file mode 100644
index 00000000000..abd0fc2b10b
--- /dev/null
+++ b/src/current/_includes/v25.1/known-limitations/physical-cluster-replication.md
@@ -0,0 +1,5 @@
+- Physical cluster replication is supported only on CockroachDB {{ site.data.products.core }} in new clusters on v23.2 or above. Physical Cluster Replication cannot be enabled on clusters that have been upgraded from a previous version of CockroachDB.
+- The primary and standby clusters must have the same [zone configurations]({% link {{ page.version.version }}/configure-replication-zones.md %}).
+- Failing back to the primary cluster after a failover is a manual process. Refer to [Fail back to the primary cluster]({% link {{ page.version.version }}/failover-replication.md %}#fail-back-to-the-primary-cluster). In addition, after failover, to continue using physical cluster replication, you must configure it again.
+- Before failover to the standby, the standby cluster does not support running [backups]({% link {{ page.version.version }}/backup-and-restore-overview.md %}) or [changefeeds]({% link {{ page.version.version }}/change-data-capture-overview.md %}).
+- Large data imports, such as those produced by [`RESTORE`]({% link {{ page.version.version }}/restore.md %}) or [`IMPORT INTO`]({% link {{ page.version.version }}/import-into.md %}), may dramatically increase [replication lag]({% link {{ page.version.version }}/physical-cluster-replication-technical-overview.md %}#failover-and-promotion-process).
diff --git a/src/current/_includes/v25.1/known-limitations/plpgsql-limitations.md b/src/current/_includes/v25.1/known-limitations/plpgsql-limitations.md
new file mode 100644
index 00000000000..83e47a0bdaa
--- /dev/null
+++ b/src/current/_includes/v25.1/known-limitations/plpgsql-limitations.md
@@ -0,0 +1,26 @@
+{% if page.name != "known-limitations.md" # New limitations in v24.2 %}
+{% endif %}
+- It is not possible to use a variable as a target more than once in the same `INTO` clause. For example, `SELECT 1, 2 INTO x, x;`. [#121605](https://github.com/cockroachdb/cockroach/issues/121605)
+- PLpgSQL variable declarations cannot inherit the type of a table row or column using `%TYPE` or `%ROWTYPE` syntax. [#114676](https://github.com/cockroachdb/cockroach/issues/114676)
+- PL/pgSQL arguments cannot be referenced with ordinals (e.g., `$1`, `$2`). [#114701](https://github.com/cockroachdb/cockroach/issues/114701)
+- The following statements are not supported:
+ - `FOR` cursor loops, `FOR` query loops, and `FOREACH` loops. [#105246](https://github.com/cockroachdb/cockroach/issues/105246)
+ - `RETURN NEXT` and `RETURN QUERY`. [#117744](https://github.com/cockroachdb/cockroach/issues/117744)
+ - `PERFORM`, `EXECUTE`, `GET DIAGNOSTICS`, and `CASE`. [#117744](https://github.com/cockroachdb/cockroach/issues/117744)
+- PL/pgSQL exception blocks cannot catch [transaction retry errors]({% link {{ page.version.version }}/transaction-retry-error-reference.md %}). [#111446](https://github.com/cockroachdb/cockroach/issues/111446)
+- `RAISE` statements cannot be annotated with names of schema objects related to the error (i.e., using `COLUMN`, `CONSTRAINT`, `DATATYPE`, `TABLE`, or `SCHEMA`). [#106237](https://github.com/cockroachdb/cockroach/issues/106237)
+- `RAISE` statements message the client directly, and do not produce log output. [#117750](https://github.com/cockroachdb/cockroach/issues/117750)
+- `ASSERT` debugging checks are not supported. [#117744](https://github.com/cockroachdb/cockroach/issues/117744)
+- `RECORD` parameters and variables are not supported in [user-defined functions]({% link {{ page.version.version }}/user-defined-functions.md %}). [#105713](https://github.com/cockroachdb/cockroach/issues/105713)
+- Variable shadowing (e.g., declaring a variable with the same name in an inner block) is not supported in PL/pgSQL. [#117508](https://github.com/cockroachdb/cockroach/issues/117508)
+- Syntax for accessing members of composite types without parentheses is not supported. [#114687](https://github.com/cockroachdb/cockroach/issues/114687)
+- `NOT NULL` variable declarations are not supported. [#105243](https://github.com/cockroachdb/cockroach/issues/105243)
+- Cursors opened in PL/pgSQL execute their queries on opening, affecting performance and resource usage. [#111479](https://github.com/cockroachdb/cockroach/issues/111479)
+- Cursors in PL/pgSQL cannot be declared with arguments. [#117746](https://github.com/cockroachdb/cockroach/issues/117746)
+- `OPEN FOR EXECUTE` is not supported for opening cursors. [#117744](https://github.com/cockroachdb/cockroach/issues/117744)
+- The `print_strict_params` option is not supported in PL/pgSQL. [#123671](https://github.com/cockroachdb/cockroach/issues/123671)
+- The `FOUND` local variable, which checks whether a statement affected any rows, is not supported in PL/pgSQL. [#122306](https://github.com/cockroachdb/cockroach/issues/122306)
+- By default, when a PL/pgSQL variable conflicts with a column name, CockroachDB resolves the ambiguity by treating it as a column reference rather than a variable reference. This behavior differs from PostgreSQL, where an ambiguous column error is reported, and it is possible to change the `plpgsql.variable_conflict` setting in order to prefer either columns or variables. [#115680](https://github.com/cockroachdb/cockroach/issues/115680)
+- It is not possible to define a `RECORD`-returning PL/pgSQL function that returns different-typed expressions from different `RETURN` statements. CockroachDB requires a consistent return type for `RECORD`-returning functions. [#115384](https://github.com/cockroachdb/cockroach/issues/115384)
+- Variables cannot be declared with an associated collation using the `COLLATE` keyword. [#105245](https://github.com/cockroachdb/cockroach/issues/105245)
+- Variables cannot be accessed using the `label.var_name` pattern. [#122322](https://github.com/cockroachdb/cockroach/issues/122322)
\ No newline at end of file
diff --git a/src/current/_includes/v25.1/known-limitations/read-committed-limitations.md b/src/current/_includes/v25.1/known-limitations/read-committed-limitations.md
new file mode 100644
index 00000000000..63f83b15dd8
--- /dev/null
+++ b/src/current/_includes/v25.1/known-limitations/read-committed-limitations.md
@@ -0,0 +1,6 @@
+- Schema changes (e.g., [`CREATE TABLE`]({% link {{ page.version.version }}/create-table.md %}), [`CREATE SCHEMA`]({% link {{ page.version.version }}/create-schema.md %}), [`CREATE INDEX`]({% link {{ page.version.version }}/create-index.md %})) cannot be performed within explicit `READ COMMITTED` transactions when the [`autocommit_before_ddl` session setting]({% link {{page.version.version}}/set-vars.md %}#autocommit-before-ddl) is set to `off`, and will cause transactions to abort. As a workaround, [set the transaction's isolation level]({% link {{ page.version.version }}/read-committed.md %}#set-the-current-transaction-to-read-committed) to `SERIALIZABLE`. [#114778](https://github.com/cockroachdb/cockroach/issues/114778)
+- Multi-column-family checks during updates are not supported under `READ COMMITTED` isolation. [#112488](https://github.com/cockroachdb/cockroach/issues/112488)
+- Because locks acquired by [foreign key]({% link {{ page.version.version }}/foreign-key.md %}) checks, [`SELECT FOR UPDATE`]({% link {{ page.version.version }}/select-for-update.md %}), and [`SELECT FOR SHARE`]({% link {{ page.version.version }}/select-for-update.md %}) are fully replicated under `READ COMMITTED` isolation, some queries experience a delay for Raft replication.
+- [Foreign key]({% link {{ page.version.version }}/foreign-key.md %}) checks are not performed in parallel under `READ COMMITTED` isolation.
+- [`SELECT FOR UPDATE` and `SELECT FOR SHARE`]({% link {{ page.version.version }}/select-for-update.md %}) statements are less optimized under `READ COMMITTED` isolation than under `SERIALIZABLE` isolation. Under `READ COMMITTED` isolation, `SELECT FOR UPDATE` and `SELECT FOR SHARE` usually perform an extra lookup join for every locked table when compared to the same queries under `SERIALIZABLE`. In addition, some optimization steps (such as de-correlation of correlated [subqueries]({% link {{ page.version.version }}/subqueries.md %})) are not currently performed on these queries.
+- Regardless of isolation level, [`SELECT FOR UPDATE` and `SELECT FOR SHARE`]({% link {{ page.version.version }}/select-for-update.md %}) statements in CockroachDB do not prevent insertion of new rows matching the search condition (i.e., [phantom reads]({% link {{ page.version.version }}/read-committed.md %}#non-repeatable-reads-and-phantom-reads)). This matches PostgreSQL behavior at all isolation levels. [#120673](https://github.com/cockroachdb/cockroach/issues/120673)
\ No newline at end of file
diff --git a/src/current/_includes/v25.1/known-limitations/restore-multiregion-match.md b/src/current/_includes/v25.1/known-limitations/restore-multiregion-match.md
new file mode 100644
index 00000000000..20ddbb0c930
--- /dev/null
+++ b/src/current/_includes/v25.1/known-limitations/restore-multiregion-match.md
@@ -0,0 +1,50 @@
+[`REGIONAL BY TABLE`]({% link {{ page.version.version }}/table-localities.md %}#regional-tables) and [`REGIONAL BY ROW`]({% link {{ page.version.version }}/table-localities.md %}#regional-by-row-tables) tables can be restored **only** if the regions of the backed-up table match those of the target database. All of the following must be true for `RESTORE` to be successful:
+
+ * The [regions]({% link {{ page.version.version }}/multiregion-overview.md %}#database-regions) of the source database and the regions of the destination database have the same set of regions.
+ * The regions were added to each of the databases in the same order.
+ * The databases have the same [primary region]({% link {{ page.version.version }}/alter-database.md %}#set-primary-region).
+
+ The following example would be considered as having **mismatched** regions because the database regions were not added in the same order and the primary regions do not match.
+
+ Running on the source database:
+
+ ~~~ sql
+ ALTER DATABASE source_database SET PRIMARY REGION "us-east1";
+ ~~~
+ ~~~ sql
+ ALTER DATABASE source_database ADD region "us-west1";
+ ~~~
+
+ Running on the destination database:
+
+ ~~~ sql
+ ALTER DATABASE destination_database SET PRIMARY REGION "us-west1";
+ ~~~
+ ~~~ sql
+ ALTER DATABASE destination_database ADD region "us-east1";
+ ~~~
+
+ In addition, the following scenario has mismatched regions between the databases since the regions were not added to the database in the same order.
+
+ Running on the source database:
+
+ ~~~ sql
+ ALTER DATABASE source_database SET PRIMARY REGION "us-east1";
+ ~~~
+ ~~~ sql
+ ALTER DATABASE source_database ADD region "us-west1";
+ ~~~
+
+ Running on the destination database:
+
+ ~~~ sql
+ ALTER DATABASE destination_database SET PRIMARY REGION "us-west1";
+ ~~~
+ ~~~ sql
+ ALTER DATABASE destination_database ADD region "us-east1";
+ ~~~
+ ~~~ sql
+ ALTER DATABASE destination_database SET PRIMARY REGION "us-east1";
+ ~~~
+
+ [#71071](https://github.com/cockroachdb/cockroach/issues/71071)
\ No newline at end of file
diff --git a/src/current/_includes/v25.1/known-limitations/restore-tables-non-multi-reg.md b/src/current/_includes/v25.1/known-limitations/restore-tables-non-multi-reg.md
new file mode 100644
index 00000000000..5390f2d09ee
--- /dev/null
+++ b/src/current/_includes/v25.1/known-limitations/restore-tables-non-multi-reg.md
@@ -0,0 +1 @@
+Restoring [`GLOBAL`]({% link {{ page.version.version }}/table-localities.md %}#global-tables) and [`REGIONAL BY TABLE`]({% link {{ page.version.version }}/table-localities.md %}#regional-tables) tables into a **non**-multi-region database is not supported. [#71502](https://github.com/cockroachdb/cockroach/issues/71502)
diff --git a/src/current/_includes/v25.1/known-limitations/restore-udf.md b/src/current/_includes/v25.1/known-limitations/restore-udf.md
new file mode 100644
index 00000000000..a4a4bc080fe
--- /dev/null
+++ b/src/current/_includes/v25.1/known-limitations/restore-udf.md
@@ -0,0 +1 @@
+`RESTORE` will not restore a table that references a [UDF]({% link {{ page.version.version }}/user-defined-functions.md %}), unless you skip restoring the function with the {% if page.name == "restore.md" %} [`skip_missing_udfs`](#skip-missing-udfs) {% else %} [`skip_missing_udfs`]({% link {{ page.version.version }}/restore.md %}#skip-missing-udfs) {% endif %} option. Alternatively, take a [database-level backup]({% link {{ page.version.version }}/backup.md %}#back-up-a-database) to include everything needed to restore the table. [#118195](https://github.com/cockroachdb/cockroach/issues/118195)
\ No newline at end of file
diff --git a/src/current/_includes/v25.1/known-limitations/routine-limitations.md b/src/current/_includes/v25.1/known-limitations/routine-limitations.md
new file mode 100644
index 00000000000..4718c6c7abf
--- /dev/null
+++ b/src/current/_includes/v25.1/known-limitations/routine-limitations.md
@@ -0,0 +1,10 @@
+{% if page.name != "known-limitations.md" # New limitations in v24.2 %}
+{% endif %}
+- Routines cannot be invoked with named arguments, e.g., `SELECT foo(a => 1, b => 2);` or `SELECT foo(b := 1, a := 2);`. [#122264](https://github.com/cockroachdb/cockroach/issues/122264)
+- Routines cannot be created if they reference temporary tables. [#121375](https://github.com/cockroachdb/cockroach/issues/121375)
+- Routines cannot be created with unnamed `INOUT` parameters. For example, `CREATE PROCEDURE p(INOUT INT) AS $$ BEGIN NULL; END; $$ LANGUAGE PLpgSQL;`. [#121251](https://github.com/cockroachdb/cockroach/issues/121251)
+- Routines cannot be created if they return fewer columns than declared. For example, `CREATE FUNCTION f(OUT sum INT, INOUT a INT, INOUT b INT) LANGUAGE SQL AS $$ SELECT (a + b, b); $$;`. [#121247](https://github.com/cockroachdb/cockroach/issues/121247)
+- Routines cannot be created with an `OUT` parameter of type `RECORD`. [#123448](https://github.com/cockroachdb/cockroach/issues/123448)
+- DDL statements (e.g., `CREATE TABLE`, `CREATE INDEX`) are not allowed within UDFs or stored procedures. [#110080](https://github.com/cockroachdb/cockroach/issues/110080)
+- Polymorphic types cannot be cast to other types (e.g., `TEXT`) within routine parameters. [#123536](https://github.com/cockroachdb/cockroach/issues/123536)
+- Routine parameters and return types cannot be declared using the `ANYENUM` polymorphic type, which is able to match any [`ENUM`]({% link {{ page.version.version }}/enum.md %}) type. [123048](https://github.com/cockroachdb/cockroach/issues/123048)
\ No newline at end of file
diff --git a/src/current/_includes/v25.1/known-limitations/row-level-ttl-limitations.md b/src/current/_includes/v25.1/known-limitations/row-level-ttl-limitations.md
new file mode 100644
index 00000000000..54b1e3ee66e
--- /dev/null
+++ b/src/current/_includes/v25.1/known-limitations/row-level-ttl-limitations.md
@@ -0,0 +1,5 @@
+- Any queries you run against tables with Row-Level TTL enabled (or against tables that have [foreign keys]({% link {{page.version.version}}/foreign-key.md %}) that reference TTL-enabled tables) do not filter out expired rows from the result set (this includes [`UPDATE`s]({% link {{ page.version.version }}/update.md %}) and [`DELETE`s]({% link {{ page.version.version }}/delete.md %})). This feature may be added in a future release. For now, follow the instructions in [Filter out expired rows from a selection query]({% link {{ page.version.version }}/row-level-ttl.md %}#filter-out-expired-rows-from-a-selection-query).
+- Enabling Row-Level TTL on a table with multiple [secondary indexes]({% link {{ page.version.version }}/indexes.md %}) can have negative performance impacts on a cluster, including increased [latency]({% link {{ page.version.version }}/common-issues-to-monitor.md %}#service-latency) and [contention]({% link {{ page.version.version }}/performance-best-practices-overview.md %}#transaction-contention). This is particularly true for large tables with terabytes of data and billions of rows that are split up into multiple ranges across separate nodes.
+ - Increased latency may occur because secondary indexes aren't necessarily stored on the same underlying [ranges]({% link {{ page.version.version }}/architecture/overview.md %}#architecture-range) as a table's [primary indexes]({% link {{ page.version.version }}/indexes.md %}). Further, the secondary indexes' ranges may have [leaseholders]({% link {{ page.version.version }}/architecture/overview.md %}#architecture-leaseholder) located on different nodes than the primary index.
+ - Increased contention may occur because [intents]({% link {{ page.version.version }}/architecture/transaction-layer.md %}#write-intents) must be written as part of performing the deletions.
+ - Finally, secondary indexes can also have a negative impact on the overall performance of [TTL jobs]({% link {{ page.version.version }}/row-level-ttl.md %}#view-running-ttl-jobs). According to internal testing, the [TTL job processing rate]({% link {{ page.version.version }}/ui-ttl-dashboard.md %}#processing-rate) is worse on tables with secondary indexes. If you encounter this situation, decreasing the [`ttl_delete_batch_size` storage parameter]({% link {{ page.version.version }}/row-level-ttl.md %}#param-ttl-delete-batch-size) may help by decreasing the number of ranges that need to be accessed by the job.
diff --git a/src/current/_includes/v25.1/known-limitations/schema-change-ddl-inside-multi-statement-transactions.md b/src/current/_includes/v25.1/known-limitations/schema-change-ddl-inside-multi-statement-transactions.md
new file mode 100644
index 00000000000..9a3d47b140f
--- /dev/null
+++ b/src/current/_includes/v25.1/known-limitations/schema-change-ddl-inside-multi-statement-transactions.md
@@ -0,0 +1,62 @@
+Most schema change [DDL](https://wikipedia.org/wiki/Data_definition_language#ALTER_statement) statements that run inside a multi-statement transaction with non-DDL statements can fail at [`COMMIT`]({% link {{ page.version.version }}/commit-transaction.md %}) time, even if other statements in the transaction succeed. This leaves such transactions in a "partially committed, partially aborted" state that may require manual intervention to determine whether the DDL statements succeeded.
+
+Some DDL statements do not have this limitation. `CREATE TABLE` and `CREATE INDEX` statements have the same atomicity guarantees as other statements within a transaction.
+
+If such a failure occurs, CockroachDB will emit a CockroachDB-specific error code, `XXA00`, and the following error message:
+
+```
+transaction committed but schema change aborted with error:
+HINT: Some of the non-DDL statements may have committed successfully, but some of the DDL statement(s) failed.
+Manual inspection may be required to determine the actual state of the database.
+```
+
+{{site.data.alerts.callout_danger}}
+If you must execute schema change DDL statements inside a multi-statement transaction, we **strongly recommend** checking for this error code and handling it appropriately every time you execute such transactions.
+{{site.data.alerts.end}}
+
+This error will occur in various scenarios, including but not limited to:
+
+- Creating a unique index fails because values aren't unique.
+- The evaluation of a computed value fails.
+- Adding a constraint (or a column with a constraint) fails because the constraint is violated for the default/computed values in the column.
+
+To see an example of this error, start by creating the following table.
+
+{% include_cached copy-clipboard.html %}
+~~~ sql
+CREATE TABLE T(x INT);
+INSERT INTO T(x) VALUES (1), (2), (3);
+~~~
+
+Then, enter the following multi-statement transaction, which will trigger the error.
+
+{% include_cached copy-clipboard.html %}
+~~~ sql
+BEGIN;
+ALTER TABLE t ADD CONSTRAINT unique_x UNIQUE(x);
+INSERT INTO T(x) VALUES (3);
+COMMIT;
+~~~
+
+~~~
+pq: transaction committed but schema change aborted with error: (23505): duplicate key value (x)=(3) violates unique constraint "unique_x"
+HINT: Some of the non-DDL statements may have committed successfully, but some of the DDL statement(s) failed.
+Manual inspection may be required to determine the actual state of the database.
+~~~
+
+In this example, the [`INSERT`]({% link {{ page.version.version }}/insert.md %}) statement committed, but the [`ALTER TABLE`]({% link {{ page.version.version }}/alter-table.md %}) statement adding a [`UNIQUE` constraint]({% link {{ page.version.version }}/unique.md %}) failed. We can verify this by looking at the data in table `t` and seeing that the additional non-unique value `3` was successfully inserted.
+
+{% include_cached copy-clipboard.html %}
+~~~ sql
+SELECT * FROM t;
+~~~
+
+~~~
+ x
++---+
+ 1
+ 2
+ 3
+ 3
+(4 rows)
+~~~
diff --git a/src/current/_includes/v25.1/known-limitations/schema-changes-between-prepared-statements.md b/src/current/_includes/v25.1/known-limitations/schema-changes-between-prepared-statements.md
new file mode 100644
index 00000000000..736fe99df61
--- /dev/null
+++ b/src/current/_includes/v25.1/known-limitations/schema-changes-between-prepared-statements.md
@@ -0,0 +1,33 @@
+When the schema of a table targeted by a prepared statement changes after the prepared statement is created, future executions of the prepared statement could result in an error. For example, adding a column to a table referenced in a prepared statement with a `SELECT *` clause will result in an error:
+
+{% include_cached copy-clipboard.html %}
+~~~ sql
+CREATE TABLE users (id INT PRIMARY KEY);
+~~~
+
+{% include_cached copy-clipboard.html %}
+~~~ sql
+PREPARE prep1 AS SELECT * FROM users;
+~~~
+
+{% include_cached copy-clipboard.html %}
+~~~ sql
+ALTER TABLE users ADD COLUMN name STRING;
+~~~
+
+{% include_cached copy-clipboard.html %}
+~~~ sql
+INSERT INTO users VALUES (1, 'Max Roach');
+~~~
+
+{% include_cached copy-clipboard.html %}
+~~~ sql
+EXECUTE prep1;
+~~~
+
+~~~
+ERROR: cached plan must not change result type
+SQLSTATE: 0A000
+~~~
+
+It's therefore recommended to explicitly list result columns instead of using `SELECT *` in prepared statements, when possible.
diff --git a/src/current/_includes/v25.1/known-limitations/schema-changes-within-transactions.md b/src/current/_includes/v25.1/known-limitations/schema-changes-within-transactions.md
new file mode 100644
index 00000000000..407d45d02c7
--- /dev/null
+++ b/src/current/_includes/v25.1/known-limitations/schema-changes-within-transactions.md
@@ -0,0 +1,9 @@
+Within a single [transaction]({% link {{ page.version.version }}/transactions.md %}):
+
+- You can run schema changes inside the same transaction as a [`CREATE TABLE`]({% link {{ page.version.version }}/create-table.md %}) statement. For more information, see [Run schema changes inside a transaction with `CREATE TABLE`]({% link {{ page.version.version }}/online-schema-changes.md %}#run-schema-changes-inside-a-transaction-with-create-table). However, a `CREATE TABLE` statement containing [`FOREIGN KEY`]({% link {{ page.version.version }}/foreign-key.md %}) clauses cannot be followed by statements that reference the new table.
+- [Schema change DDL statements inside a multi-statement transaction can fail while other statements succeed](#schema-change-ddl-statements-inside-a-multi-statement-transaction-can-fail-while-other-statements-succeed).
+- [`DROP COLUMN`]({% link {{ page.version.version }}/alter-table.md %}#drop-column) can result in data loss if one of the other schema changes in the transaction fails or is canceled. To work around this, move the `DROP COLUMN` statement to its own explicit transaction or run it in a single statement outside the existing transaction.
+
+{{site.data.alerts.callout_info}}
+If a schema change within a transaction fails, manual intervention may be needed to determine which statement has failed. After determining which schema change(s) failed, you can then retry the schema change.
+{{site.data.alerts.end}}
diff --git a/src/current/_includes/v25.1/known-limitations/select-for-update-limitations.md b/src/current/_includes/v25.1/known-limitations/select-for-update-limitations.md
new file mode 100644
index 00000000000..3597c510ead
--- /dev/null
+++ b/src/current/_includes/v25.1/known-limitations/select-for-update-limitations.md
@@ -0,0 +1,10 @@
+By default under `SERIALIZABLE` isolation, locks acquired using `SELECT ... FOR UPDATE` and `SELECT ... FOR SHARE` are implemented as fast, in-memory [unreplicated locks](architecture/transaction-layer.html#unreplicated-locks). If a [lease transfer]({% link {{ page.version.version }}/architecture/replication-layer.md %}#epoch-based-leases-table-data) or [range split/merge]({% link {{ page.version.version }}/architecture/distribution-layer.md %}#range-merges) occurs on a range held by an unreplicated lock, the lock is dropped. The following behaviors can occur:
+
+- The desired ordering of concurrent accesses to one or more rows of a table expressed by your use of `SELECT ... FOR UPDATE` may not be preserved (that is, a transaction _B_ against some table _T_ that was supposed to wait behind another transaction _A_ operating on _T_ may not wait for transaction _A_).
+- The transaction that acquired the (now dropped) unreplicated lock may fail to commit, leading to [transaction retry errors with code `40001`]({% link {{ page.version.version }}/transaction-retry-error-reference.md %}) and the [`restart transaction` error message]({% link {{ page.version.version }}/common-errors.md %}#restart-transaction).
+
+When running under `SERIALIZABLE` isolation, `SELECT ... FOR UPDATE` and `SELECT ... FOR SHARE` locks should be thought of as best-effort, and should not be relied upon for correctness. Note that [serialization]({% link {{ page.version.version }}/demo-serializable.md %}) is preserved despite this limitation. This limitation is fixed when the `enable_durable_locking_for_serializable` [session setting]({% link {{ page.version.version }}/session-variables.md %}#enable-durable-locking-for-serializable) is set to `true`.
+
+{{site.data.alerts.callout_info}}
+This limitation does **not** apply to [`READ COMMITTED`]({% link {{ page.version.version }}/read-committed.md %}) transactions.
+{{site.data.alerts.end}}
\ No newline at end of file
diff --git a/src/current/_includes/v25.1/known-limitations/set-transaction-no-rollback.md b/src/current/_includes/v25.1/known-limitations/set-transaction-no-rollback.md
new file mode 100644
index 00000000000..414cbac6282
--- /dev/null
+++ b/src/current/_includes/v25.1/known-limitations/set-transaction-no-rollback.md
@@ -0,0 +1,17 @@
+{% if page.name == "set-vars.md" %} `SET` {% else %} [`SET`]({% link {{ page.version.version }}/set-vars.md %}) {% endif %} does not properly apply [`ROLLBACK`]({% link {{ page.version.version }}/rollback-transaction.md %}) within a transaction. For example, in the following transaction, showing the `TIME ZONE` [variable]({% link {{ page.version.version }}/set-vars.md %}#supported-variables) does not return `2` as expected after the rollback:
+
+~~~sql
+SET TIME ZONE +2;
+BEGIN;
+SET TIME ZONE +3;
+ROLLBACK;
+SHOW TIME ZONE;
+~~~
+
+~~~sql
+timezone
+------------
+3
+~~~
+
+[#69396](https://github.com/cockroachdb/cockroach/issues/69396)
diff --git a/src/current/_includes/v25.1/known-limitations/show-backup-symlink.md b/src/current/_includes/v25.1/known-limitations/show-backup-symlink.md
new file mode 100644
index 00000000000..38ba86fb28f
--- /dev/null
+++ b/src/current/_includes/v25.1/known-limitations/show-backup-symlink.md
@@ -0,0 +1 @@
+[`SHOW BACKUP`]({% link {{ page.version.version }}/show-backup.md %}) does not support listing backups if the [`nodelocal`]({% link {{ page.version.version }}/cockroach-nodelocal-upload.md %}) storage location is a symlink. [#70260](https://github.com/cockroachdb/cockroach/issues/70260)
\ No newline at end of file
diff --git a/src/current/_includes/v25.1/known-limitations/sql-cursors.md b/src/current/_includes/v25.1/known-limitations/sql-cursors.md
new file mode 100644
index 00000000000..4c047aa9603
--- /dev/null
+++ b/src/current/_includes/v25.1/known-limitations/sql-cursors.md
@@ -0,0 +1,24 @@
+CockroachDB implements SQL {% if page.name == "known-limitations.md" %} [cursor]({% link {{ page.version.version }}/cursors.md %}) {% else %} cursor {% endif %} support with the following limitations:
+
+- `DECLARE` only supports forward cursors. Reverse cursors created with `DECLARE SCROLL` are not supported. [#77102](https://github.com/cockroachdb/cockroach/issues/77102)
+- `FETCH` supports forward, relative, and absolute variants, but only for forward cursors. [#77102](https://github.com/cockroachdb/cockroach/issues/77102)
+- `BINARY CURSOR`, which returns data in the Postgres binary format, is not supported. [#77099](https://github.com/cockroachdb/cockroach/issues/77099)
+- `WITH HOLD`, which allows keeping a cursor open for longer than a transaction by writing its results into a buffer, is accepted as valid syntax within a single transaction but is not supported. It acts as a no-op and does not actually perform the function of `WITH HOLD`, which is to make the cursor live outside its parent transaction. Instead, if you are using `WITH HOLD`, you will be forced to close that cursor within the transaction it was created in. [#77101](https://github.com/cockroachdb/cockroach/issues/77101)
+ - This syntax is accepted (but does not have any effect):
+ {% include_cached copy-clipboard.html %}
+ ~~~ sql
+ BEGIN;
+ DECLARE test_cur CURSOR WITH HOLD FOR SELECT * FROM foo ORDER BY bar;
+ CLOSE test_cur;
+ COMMIT;
+ ~~~
+ - This syntax is not accepted, and will result in an error:
+ {% include_cached copy-clipboard.html %}
+ ~~~ sql
+ BEGIN;
+ DECLARE test_cur CURSOR WITH HOLD FOR SELECT * FROM foo ORDER BY bar;
+ COMMIT; -- This will fail with an error because CLOSE test_cur was not called inside the transaction.
+ ~~~
+- Scrollable cursor (also known as reverse `FETCH`) is not supported. [#77102](https://github.com/cockroachdb/cockroach/issues/77102)
+- [`SELECT ... FOR UPDATE`]({% link {{ page.version.version }}/select-for-update.md %}) with a cursor is not supported. [#77103](https://github.com/cockroachdb/cockroach/issues/77103)
+- Respect for [`SAVEPOINT`s]({% link {{ page.version.version }}/savepoint.md %}) is not supported. Cursor definitions do not disappear properly if rolled back to a `SAVEPOINT` from before they were created. [#77104](https://github.com/cockroachdb/cockroach/issues/77104)
diff --git a/src/current/_includes/v25.1/known-limitations/srid-4326-limitations.md b/src/current/_includes/v25.1/known-limitations/srid-4326-limitations.md
new file mode 100644
index 00000000000..b556a9fbecd
--- /dev/null
+++ b/src/current/_includes/v25.1/known-limitations/srid-4326-limitations.md
@@ -0,0 +1 @@
+Defining a custom SRID by inserting rows into [`spatial_ref_sys`]({% link {{ page.version.version }}/architecture/glossary.md %}#spatial_ref_sys) is not currently supported. [#55903](https://github.com/cockroachdb/cockroach/issues/55903)
\ No newline at end of file
diff --git a/src/current/_includes/v25.1/known-limitations/stats-refresh-upgrade.md b/src/current/_includes/v25.1/known-limitations/stats-refresh-upgrade.md
new file mode 100644
index 00000000000..3d5a8d26325
--- /dev/null
+++ b/src/current/_includes/v25.1/known-limitations/stats-refresh-upgrade.md
@@ -0,0 +1 @@
+- The [automatic statistics refresher]({% link {{ page.version.version }}/cost-based-optimizer.md %}#control-statistics-refresh-rate) automatically checks whether it needs to refresh statistics for every table in the database upon startup of each node in the cluster. If statistics for a table have not been refreshed in a while, this will trigger collection of statistics for that table. If statistics have been refreshed recently, it will not force a refresh. As a result, the automatic statistics refresher does not necessarily perform a refresh of statistics after an [upgrade]({% link {{ page.version.version }}/upgrade-cockroach-version.md %}). This could cause a problem, for example, if the upgrade moves from a version without [histograms]({% link {{ page.version.version }}/cost-based-optimizer.md %}#control-histogram-collection) to a version with histograms. To refresh statistics manually, use [`CREATE STATISTICS`](create-statistics.html). [#54816](https://github.com/cockroachdb/cockroach/issues/54816)
diff --git a/src/current/_includes/v25.1/known-limitations/stored-proc-limitations.md b/src/current/_includes/v25.1/known-limitations/stored-proc-limitations.md
new file mode 100644
index 00000000000..b2ba1b61562
--- /dev/null
+++ b/src/current/_includes/v25.1/known-limitations/stored-proc-limitations.md
@@ -0,0 +1,3 @@
+{% if page.name != "known-limitations.md" # New limitations in v24.2 %}
+{% endif %}
+- `COMMIT` and `ROLLBACK` statements are not supported within nested procedures. [#122266](https://github.com/cockroachdb/cockroach/issues/122266)
\ No newline at end of file
diff --git a/src/current/_includes/v25.1/known-limitations/trigger-limitations.md b/src/current/_includes/v25.1/known-limitations/trigger-limitations.md
new file mode 100644
index 00000000000..fb1c4685480
--- /dev/null
+++ b/src/current/_includes/v25.1/known-limitations/trigger-limitations.md
@@ -0,0 +1,3 @@
+- A [trigger function]({% link {{ page.version.version }}/triggers.md %}#trigger-function) that is used in an existing trigger cannot be replaced with `CREATE OR REPLACE` syntax. To use `CREATE OR REPLACE`, first [drop any triggers]({% link {{ page.version.version }}/drop-trigger.md %}) that are using the function. [#134555](https://github.com/cockroachdb/cockroach/issues/134555)
+- Hidden columns are not visible to triggers. [#133331](https://github.com/cockroachdb/cockroach/issues/133331)
+- {% include {{ page.version.version }}/known-limitations/drop-trigger-limitations.md %}
\ No newline at end of file
diff --git a/src/current/_includes/v25.1/known-limitations/trigram-unsupported-syntax.md b/src/current/_includes/v25.1/known-limitations/trigram-unsupported-syntax.md
new file mode 100644
index 00000000000..494730c7ae8
--- /dev/null
+++ b/src/current/_includes/v25.1/known-limitations/trigram-unsupported-syntax.md
@@ -0,0 +1,9 @@
+- `word_similarity()` built-in function.
+- `strict_word_similarity()` built-in function.
+- `%>` and `<%` comparisons and acceleration.
+- `<<%` and `%>>` comparisons and acceleration.
+- `<->`, `<<->`, `<->>`, `<<<->`, and `<->>>` comparisons.
+- Acceleration on [regex string matching]({% link {{ page.version.version }}/scalar-expressions.md %}#string-matching-using-posix-regular-expressions).
+- `%` comparisons, `show_trgm`, and trigram index creation on [collated strings]({% link {{ page.version.version }}/collate.md %}).
+
+[#41285](https://github.com/cockroachdb/cockroach/issues/41285)
\ No newline at end of file
diff --git a/src/current/_includes/v25.1/known-limitations/udf-limitations.md b/src/current/_includes/v25.1/known-limitations/udf-limitations.md
new file mode 100644
index 00000000000..57011914407
--- /dev/null
+++ b/src/current/_includes/v25.1/known-limitations/udf-limitations.md
@@ -0,0 +1,10 @@
+{% if page.name != "known-limitations.md" # New limitations in v24.2 %}
+{% endif %}
+- A `RECORD`-returning UDF cannot be created without a `RETURN` statement in the root block, which would restrict the wildcard type to a concrete one. [#122945](https://github.com/cockroachdb/cockroach/issues/122945)
+- User-defined functions are not currently supported in:
+ - Expressions (column, index, constraint) in tables. [#87699](https://github.com/cockroachdb/cockroach/issues/87699)
+ - Views. [#87699](https://github.com/cockroachdb/cockroach/issues/87699)
+- User-defined functions cannot call themselves recursively. [#93049](https://github.com/cockroachdb/cockroach/issues/93049)
+- [Common table expressions]({% link {{ page.version.version }}/common-table-expressions.md %}) (CTE), recursive or non-recursive, are not supported in [user-defined functions]({% link {{ page.version.version }}/user-defined-functions.md %}) (UDF). That is, you cannot use a `WITH` clause in the body of a UDF. [#92961](https://github.com/cockroachdb/cockroach/issues/92961)
+- The `setval` function cannot be resolved when used inside UDF bodies. [#110860](https://github.com/cockroachdb/cockroach/issues/110860)
+- Casting subqueries to [user-defined types]({% link {{ page.version.version }}/create-type.md %}) in UDFs is not supported. [#108184](https://github.com/cockroachdb/cockroach/issues/108184)
\ No newline at end of file
diff --git a/src/current/_includes/v25.1/known-limitations/vectorized-engine-limitations.md b/src/current/_includes/v25.1/known-limitations/vectorized-engine-limitations.md
new file mode 100644
index 00000000000..daea59ebf88
--- /dev/null
+++ b/src/current/_includes/v25.1/known-limitations/vectorized-engine-limitations.md
@@ -0,0 +1,2 @@
+- The vectorized engine does not support queries containing a join filtered with an [`ON` expression]({% link {{ page.version.version }}/joins.md %}#supported-join-conditions). [#38018](https://github.com/cockroachdb/cockroach/issues/38018)
+- The vectorized engine does not support [working with spatial data]({% link {{ page.version.version }}/export-spatial-data.md %}). Queries with [geospatial functions]({% link {{ page.version.version }}/functions-and-operators.md %}#spatial-functions) or [spatial data]({% link {{ page.version.version }}/export-spatial-data.md %}) will revert to the row-oriented execution engine.
diff --git a/src/current/_includes/v25.1/ldr/create_logical_replication_stream_stmt.html b/src/current/_includes/v25.1/ldr/create_logical_replication_stream_stmt.html
new file mode 100644
index 00000000000..f0b742f619e
--- /dev/null
+++ b/src/current/_includes/v25.1/ldr/create_logical_replication_stream_stmt.html
@@ -0,0 +1,171 @@
+
+
+
+
\ No newline at end of file
diff --git a/src/current/_includes/v25.1/ldr/immediate-description.md b/src/current/_includes/v25.1/ldr/immediate-description.md
new file mode 100644
index 00000000000..eb87361a009
--- /dev/null
+++ b/src/current/_includes/v25.1/ldr/immediate-description.md
@@ -0,0 +1 @@
+Attempts to replicate the changed row directly into the destination table, without re-running constraint validations. It does not support writing into tables with [foreign key]({% link {{ page.version.version }}/foreign-key.md %}) constraints.
\ No newline at end of file
diff --git a/src/current/_includes/v25.1/ldr/multiple-tables.md b/src/current/_includes/v25.1/ldr/multiple-tables.md
new file mode 100644
index 00000000000..0522fb605be
--- /dev/null
+++ b/src/current/_includes/v25.1/ldr/multiple-tables.md
@@ -0,0 +1 @@
+There are some tradeoffs between enabling one table per LDR job versus multiple tables in one LDR job. Multiple tables in one LDR job can be easier to operate. For example, if you pause and resume the single job, LDR will stop and resume for all the tables. However, the most granular level observability will be at the job level. One table in one LDR job will allow for table-level observability.
diff --git a/src/current/_includes/v25.1/ldr/show-logical-replication-responses.md b/src/current/_includes/v25.1/ldr/show-logical-replication-responses.md
new file mode 100644
index 00000000000..0316fe9e6d9
--- /dev/null
+++ b/src/current/_includes/v25.1/ldr/show-logical-replication-responses.md
@@ -0,0 +1,9 @@
+Field | Response
+---------+----------
+`job_id` | The job's ID. Use with [`CANCEL JOB`]({% link {{ page.version.version }}/cancel-job.md %}), [`PAUSE JOB`]({% link {{ page.version.version }}/pause-job.md %}), [`RESUME JOB`]({% link {{ page.version.version }}/resume-job.md %}), [`SHOW JOB`]({% link {{ page.version.version }}/show-jobs.md %}).
+`status` | The job's current state. Possible values: `pending`, `paused`, `pause-requested`, `failed`, `succeeded`, `canceled`, `cancel-requested`, `running`, `retry-running`, `retry-reverting`, `reverting`, `revert-failed`.
Refer to [Jobs status]({% link {{ page.version.version }}/show-jobs.md %}#job-status) for a description of each status.
+`targets` | The fully qualified name of the table(s) that are part of the LDR job.
+`replicated_time` | The latest [timestamp]({% link {{ page.version.version }}/timestamp.md %}) at which the destination cluster has consistent data. This time advances automatically as long as the LDR job proceeds without error. `replicated_time` is updated periodically (every 30s).
+`replication_start_time` | The start time of the LDR job.
+`conflict_resolution_type` | The type of [conflict resolution]({% link {{ page.version.version }}/manage-logical-data-replication.md %}#conflict-resolution): `LWW` last write wins.
+`description` | Description of the job including the replicating table(s) and the source cluster connection.
diff --git a/src/current/_includes/v25.1/ldr/show_logical_replication_jobs_stmt.html b/src/current/_includes/v25.1/ldr/show_logical_replication_jobs_stmt.html
new file mode 100644
index 00000000000..50cef32cc22
--- /dev/null
+++ b/src/current/_includes/v25.1/ldr/show_logical_replication_jobs_stmt.html
@@ -0,0 +1,55 @@
+
+
+
+
\ No newline at end of file
diff --git a/src/current/_includes/v25.1/ldr/validated-description.md b/src/current/_includes/v25.1/ldr/validated-description.md
new file mode 100644
index 00000000000..4b7bb9a8b18
--- /dev/null
+++ b/src/current/_includes/v25.1/ldr/validated-description.md
@@ -0,0 +1 @@
+Attempts to apply the write in a similar way to a user-run query, which would re-run all constraint validations relevant to the destination table(s). If the change violates foreign key dependencies, unique constraints, or other constraints, the row will be put in the [dead letter queue (DLQ)]({% link {{ page.version.version }}/manage-logical-data-replication.md %}#dead-letter-queue-dlq) instead. Like the [SQL layer]({% link {{ page.version.version }}/architecture/sql-layer.md %}), `validated` mode does not recognize deletion tombstones. As a result, an update to the same key from cluster A will successfully apply on cluster B, even if that key was deleted on cluster B before the LDR job streamed the cluster A update to the key.
\ No newline at end of file
diff --git a/src/current/_includes/v25.1/leader-leases-intro.md b/src/current/_includes/v25.1/leader-leases-intro.md
new file mode 100644
index 00000000000..e2e62cc0ccb
--- /dev/null
+++ b/src/current/_includes/v25.1/leader-leases-intro.md
@@ -0,0 +1 @@
+CockroachDB offers an improved leasing system rebuilt atop a stronger form of [Raft]({% link {{ page.version.version }}/architecture/replication-layer.md %}#raft) leadership that ensures that the Raft leader is **always** the range's leaseholder. This new type of lease is called a _Leader lease_, and supersedes [epoch-based leases]({% link {{ page.version.version }}/architecture/replication-layer.md %}#epoch-based-leases-table-data) and [expiration-based leases]({% link {{ page.version.version }}/architecture/replication-layer.md %}#expiration-based-leases-meta-and-system-ranges) leases while combining the performance of the former with the resilience of the latter. **Leader leases are not enabled by default.**
diff --git a/src/current/_includes/v25.1/metric-names-serverless.md b/src/current/_includes/v25.1/metric-names-serverless.md
new file mode 100644
index 00000000000..33fab10e2a8
--- /dev/null
+++ b/src/current/_includes/v25.1/metric-names-serverless.md
@@ -0,0 +1,244 @@
+Name | Description
+-----|-----
+`addsstable.applications` | Number of SSTable ingestions applied (i.e., applied by Replicas)
+`addsstable.copies` | number of SSTable ingestions that required copying files during application
+`addsstable.proposals` | Number of SSTable ingestions proposed (i.e., sent to Raft by lease holders)
+`admission.wait_sum.kv-stores` | Total wait time in micros
+`admission.wait_sum.kv` | Total wait time in micros
+`admission.wait_sum.sql-kv-response` | Total wait time in micros
+`admission.wait_sum.sql-sql-response` | Total wait time in micros
+`capacity.available` | Available storage capacity
+`capacity.reserved` | Capacity reserved for snapshots
+`capacity.used` | Used storage capacity
+`capacity` | Total storage capacity
+`changefeed.backfill_count` | Number of changefeeds currently executing backfill
+`changefeed.backfill_pending_ranges` | Number of ranges in an ongoing backfill that are yet to be fully emitted
+`changefeed.commit_latency` | Event commit latency: a difference between event MVCC timestamp and the time it was acknowledged by the downstream sink. If the sink batches events, then the difference between the earliest event in the batch and acknowledgement is recorded; Excludes latency during backfill
+`changefeed.emitted_messages` | Messages emitted by all feeds
+`changefeed.error_retries` | Total retryable errors encountered by all changefeeds
+`changefeed.failures` | Total number of changefeed jobs which have failed
+`changefeed.max_behind_nanos` | Largest commit-to-emit duration of any running feed
+`changefeed.message_size_hist` | Message size histogram
+`changefeed.running` | Number of currently running changefeeds, including sinkless
+`clock-offset.meannanos` | Mean clock offset with other nodes
+`clock-offset.stddevnanos` | Stddev clock offset with other nodes
+`distsender.batches.partial` | Number of partial batches processed after being divided on range boundaries
+`distsender.batches` | Number of batches processed
+`distsender.errors.notleaseholder` | Number of NotLeaseHolderErrors encountered from replica-addressed RPCs
+`distsender.rpc.sent.local` | Number of replica-addressed RPCs sent through the local-server optimization
+`distsender.rpc.sent.nextreplicaerror` | Number of replica-addressed RPCs sent due to per-replica errors
+`distsender.rpc.sent` | Number of replica-addressed RPCs sent
+`exec.error` | Number of batch KV requests that failed to execute on this node. This count excludes transaction restart/abort errors. However, it will include other errors expected during normal operation, such as ConditionFailedError. This metric is thus not an indicator of KV health.
+`exec.latency` | Latency of batch KV requests (including errors) executed on this node. This measures requests already addressed to a single replica, from the moment at which they arrive at the internal gRPC endpoint to the moment at which the response (or an error) is returned. This latency includes in particular commit waits, conflict resolution and replication, and end-users can easily produce high measurements via long-running transactions that conflict with foreground traffic. This metric thus does not provide a good signal for understanding the health of the KV layer.
+`exec.success` | Number of batch KV requests executed successfully on this node. A request is considered to have executed 'successfully' if it either returns a result or a transaction restart/abort error.
+`gcbytesage` | Cumulative age of non-live data
+`gossip.bytes.received` | Number of received gossip bytes
+`gossip.bytes.sent` | Number of sent gossip bytes
+`gossip.connections.incoming` | Number of active incoming gossip connections
+`gossip.connections.outgoing` | Number of active outgoing gossip connections
+`gossip.connections.refused` | Number of refused incoming gossip connections
+`gossip.infos.received` | Number of received gossip Info objects
+`gossip.infos.sent` | Number of sent gossip Info objects
+`intentage` | Cumulative age of intents
+`intentbytes` | Number of bytes in intent KV pairs
+`intentcount` | Count of intent keys
+`jobs.changefeed.resume_retry_error` | Number of changefeed jobs which failed with a retryable error
+`keybytes` | Number of bytes taken up by keys
+`keycount` | Count of all keys
+`leases.epoch` | Number of replica leaseholders using epoch-based leases
+`leases.error` | Number of failed lease requests
+`leases.expiration` | Number of replica leaseholders using expiration-based leases
+`leases.success` | Number of successful lease requests
+`leases.transfers.error` | Number of failed lease transfers
+`leases.transfers.success` | Number of successful lease transfers
+`livebytes` | Number of bytes of live data (keys plus values)
+`livecount` | Count of live keys
+`liveness.epochincrements` | Number of times this node has incremented its liveness epoch
+`liveness.heartbeatfailures` | Number of failed node liveness heartbeats from this node
+`liveness.heartbeatlatency` | Node liveness heartbeat latency
+`liveness.heartbeatsuccesses` | Number of successful node liveness heartbeats from this node
+`liveness.livenodes` | Number of live nodes in the cluster (will be 0 if this node is not itself live)
+`queue.consistency.pending` | Number of pending replicas in the consistency checker queue
+`queue.consistency.process.failure` | Number of replicas which failed processing in the consistency checker queue
+`queue.consistency.process.success` | Number of replicas successfully processed by the consistency checker queue
+`queue.consistency.processingnanos` | Nanoseconds spent processing replicas in the consistency checker queue
+`queue.gc.info.abortspanconsidered` | Number of AbortSpan entries eligible for removal based on their ages
+`queue.gc.info.abortspangcnum` | Number of AbortSpan entries fit for removal
+`queue.gc.info.abortspanscanned` | Number of transactions present in the AbortSpan scanned from the engine
+`queue.gc.info.intentsconsidered` | Number of intents eligible to be considered because they are at least two hours old
+`queue.gc.info.intenttxns` | Number of associated distinct transactions
+`queue.gc.info.numkeysaffected` | Number of keys with data that is eligible for garbage collection
+`queue.gc.info.pushtxn` | Number of attempted pushes
+`queue.gc.info.resolvesuccess` | Number of successful intent resolutions
+`queue.gc.info.resolvetotal` | Number of attempted intent resolutions
+`queue.gc.info.transactionspangcaborted` | Number of entries eligible for garbage collection that correspond to aborted txns
+`queue.gc.info.transactionspangccommitted` | Number of entries eligible for garbage collection that correspond to committed txns
+`queue.gc.info.transactionspangcpending` | Number of entries eligible for garbage collection that correspond to pending txns
+`queue.gc.info.transactionspanscanned` | Number of entries in transaction spans scanned from the engine
+`queue.gc.pending` | Number of pending replicas in the MVCC garbage collection queue
+`queue.gc.process.failure` | Number of replicas which failed processing in the MVCC garbage collection queue
+`queue.gc.process.success` | Number of replicas successfully processed by the MVCC garbage collection queue
+`queue.gc.processingnanos` | Nanoseconds spent processing replicas in the MVCC garbage collection queue
+`queue.raftlog.pending` | Number of pending replicas in the Raft log queue
+`queue.raftlog.process.failure` | Number of replicas which failed processing in the Raft log queue
+`queue.raftlog.process.success` | Number of replicas successfully processed by the Raft log queue
+`queue.raftlog.processingnanos` | Nanoseconds spent processing replicas in the Raft log queue
+`queue.raftsnapshot.pending` | Number of pending replicas in the Raft repair queue
+`queue.raftsnapshot.process.failure` | Number of replicas which failed processing in the Raft repair queue
+`queue.raftsnapshot.process.success` | Number of replicas successfully processed by the Raft repair queue
+`queue.raftsnapshot.processingnanos` | Nanoseconds spent processing replicas in the Raft repair queue
+`queue.replicagc.pending` | Number of pending replicas in the replica queue
+`queue.replicagc.process.failure` | Number of replicas which failed processing in the replica garbage collection queue
+`queue.replicagc.process.success` | Number of replicas successfully processed by the replica garbage collection queue
+`queue.replicagc.processingnanos` | Nanoseconds spent processing replicas in the replica garbage collection queue
+`queue.replicagc.removereplica` | Number of replica removals attempted by the replica garbage collection queue
+`queue.replicate.addreplica` | Number of replica additions attempted by the replicate queue
+`queue.replicate.pending` | Number of pending replicas in the replicate queue
+`queue.replicate.process.failure` | Number of replicas which failed processing in the replicate queue
+`queue.replicate.process.success` | Number of replicas successfully processed by the replicate queue
+`queue.replicate.processingnanos` | Nanoseconds spent processing replicas in the replicate queue
+`queue.replicate.purgatory` | Number of replicas in the replicate queue's purgatory, awaiting allocation options
+`queue.replicate.rebalancereplica` | Number of replica rebalancer-initiated additions attempted by the replicate queue
+`queue.replicate.removedeadreplica` | Number of dead replica removals attempted by the replicate queue (typically in response to a node outage)
+`queue.replicate.removereplica` | Number of replica removals attempted by the replicate queue (typically in response to a rebalancer-initiated addition)
+`queue.replicate.transferlease` | Number of range lease transfers attempted by the replicate queue
+`queue.split.pending` | Number of pending replicas in the split queue
+`queue.split.process.failure` | Number of replicas which failed processing in the split queue
+`queue.split.process.success` | Number of replicas successfully processed by the split queue
+`queue.split.processingnanos` | Nanoseconds spent processing replicas in the split queue
+`queue.tsmaintenance.pending` | Number of pending replicas in the time series maintenance queue
+`queue.tsmaintenance.process.failure` | Number of replicas which failed processing in the time series maintenance queue
+`queue.tsmaintenance.process.success` | Number of replicas successfully processed by the time series maintenance queue
+`queue.tsmaintenance.processingnanos` | Nanoseconds spent processing replicas in the time series maintenance queue
+`raft.commandsapplied` | Count of Raft commands applied. This measurement is taken on the Raft apply loops of all Replicas (leaders and followers alike), meaning that it does not measure the number of Raft commands *proposed* (in the hypothetical extreme case, all Replicas may apply all commands through snapshots, thus not increasing this metric at all). Instead, it is a proxy for how much work is being done advancing the Replica state machines on this node.
+`raft.heartbeats.pending` | Number of pending heartbeats and responses waiting to be coalesced
+`raft.process.commandcommit.latency` | Latency histogram for applying a batch of Raft commands to the state machine. This metric is misnamed: it measures the latency for *applying* a batch of committed Raft commands to a Replica state machine. This requires only non-durable I/O (except for replication configuration changes). Note that a "batch" in this context is really a sub-batch of the batch received for application during Raft ready handling. The 'raft.process.applycommitted.latency' histogram is likely more suitable in most cases, as it measures the total latency across all sub-batches (i.e., the sum of commandcommit.latency for a complete batch).
+`raft.process.logcommit.latency` | Latency histogram for committing Raft log entries to stable storage. This measures the latency of durably committing a group of newly received Raft entries as well as the HardState entry to disk. This excludes any data processing, i.e., we measure purely the commit latency of the resulting Engine write. Homogeneous bands of p50-p99 latencies (in the presence of regular Raft traffic), make it likely that the storage layer is healthy. Spikes in the latency bands can either hint at the presence of large sets of Raft entries being received, or at performance issues at the storage layer.
+`raft.process.tickingnanos` | Nanoseconds spent in store.processRaft() processing replica.Tick()
+`raft.process.workingnanos` | Nanoseconds spent in store.processRaft() working. This is the sum of the measurements passed to the raft.process.handleready.latency histogram.
+`raft.rcvd.app` | Number of MsgApp messages received by this store
+`raft.rcvd.appresp` | Number of MsgAppResp messages received by this store
+`raft.rcvd.dropped` | Number of incoming Raft messages dropped (due to queue length or size)
+`raft.rcvd.heartbeat` | Number of (coalesced, if enabled) MsgHeartbeat messages received by this store
+`raft.rcvd.heartbeatresp` | Number of (coalesced, if enabled) MsgHeartbeatResp messages received by this store
+`raft.rcvd.prevote` | Number of MsgPreVote messages received by this store
+`raft.rcvd.prevoteresp` | Number of MsgPreVoteResp messages received by this store
+`raft.rcvd.prop` | Number of MsgProp messages received by this store
+`raft.rcvd.snap` | Number of MsgSnap messages received by this store
+`raft.rcvd.timeoutnow` | Number of MsgTimeoutNow messages received by this store
+`raft.rcvd.transferleader` | Number of MsgTransferLeader messages received by this store
+`raft.rcvd.vote` | Number of MsgVote messages received by this store
+`raft.rcvd.voteresp` | Number of MsgVoteResp messages received by this store
+`raft.ticks` | Number of Raft ticks queued
+`raftlog.behind` | Number of Raft log entries followers on other stores are behind. This gauge provides a view of the aggregate number of log entries the Raft leaders on this node think the followers are behind. Since a Raft leader may not always have a good estimate for this information for all of its followers, and since followers are expected to be behind (when they are not required as part of a quorum) *and* the aggregate thus scales like the count of such followers, it is difficult to meaningfully interpret this metric.
+`raftlog.truncated` | Number of Raft log entries truncated
+`range.adds` | Number of range additions
+`range.raftleadertransfers` | Number of Raft leader transfers
+`range.removes` | Number of range removals
+`range.snapshots.generated` | Number of generated snapshots
+`range.snapshots.recv-in-progress` | Number of non-empty snapshots in progress on a receiver store
+`range.snapshots.recv-queue` | Number of queued non-empty snapshots on a receiver store
+`range.snapshots.recv-total-in-progress` | Number of empty and non-empty snapshots in progress on a receiver store
+`range.snapshots.send-in-progress` | Number of non-empty snapshots in progress on a sender store
+`range.snapshots.send-queue` | Number of queued non-empty snapshots on a sender store
+`range.snapshots.send-total-in-progress` | Number of empty and non-empty in-progress on a sender store
+`range.splits` | Number of range splits
+`ranges.overreplicated` | Number of ranges with more live replicas than the replication target
+`ranges.unavailable` | Number of ranges with fewer live replicas than needed for quorum
+`ranges.underreplicated` | Number of ranges with fewer live replicas than the replication target
+`ranges` | Number of ranges
+`rebalancing.writespersecond` | Number of keys written (i.e., applied by raft) per second to the store, averaged over a large time period as used in rebalancing decisions
+`replicas.leaders_not_leaseholders` | Number of replicas that are Raft leaders whose range lease is held by another store
+`replicas.leaders` | Number of Raft leaders
+`replicas.leaseholders` | Number of lease holders
+`replicas.quiescent` | Number of quiesced replicas
+`replicas.reserved` | Number of replicas reserved for snapshots
+`replicas` | Number of replicas
+`requests.backpressure.split` | Number of backpressured writes waiting on a range split. A range will backpressure (roughly) non-system traffic when the range is above the configured size until the range splits. When the rate of this metric is nonzero over extended periods of time, it should be investigated why splits are not occurring.
+`requests.slow.distsender` | Number of replica-bound RPCs currently stuck or retrying for a long time. Note that this is not a good signal for KV health. The remote side of the RPCs tracked here may experience contention, so an end user can easily cause values for this metric to be emitted by leaving a transaction open for a long time and contending with it using a second transaction.
+`requests.slow.lease` | Number of requests that have been stuck for a long time acquiring a lease. A nonzero value usually indicates range or replica unavailability, and should be investigated. Commonly, `requests.slow.raft` is also a nonzero value, which indicates that the lease requests are not getting a timely response from the replication layer.
+`requests.slow.raft` | Number of requests that have been stuck for a long time in the replication layer. An (evaluated) request has to pass through the replication layer, notably the quota pool and Raft. If it fails to do so within a highly permissive duration, this metric is incremented (and decremented again once the request is either applied or returns an error). A nonzero value indicates range or replica unavailability, and should be investigated.
+`rocksdb.block.cache.hits` | Count of block cache hits
+`rocksdb.block.cache.misses` | Count of block cache misses
+`rocksdb.block.cache.pinned-usage` | Bytes pinned by the block cache
+`rocksdb.block.cache.usage` | Bytes used by the block cache
+`rocksdb.bloom.filter.prefix.checked` | Number of times the bloom filter was checked
+`rocksdb.bloom.filter.prefix.useful` | Number of times the bloom filter helped avoid iterator creation
+`rocksdb.compactions` | Number of table compactions
+`rocksdb.flushes` | Number of table flushes
+`rocksdb.memtable.total-size` | Current size of memtable in bytes
+`rocksdb.num-sstables` | Number of storage engine SSTables
+`rocksdb.read-amplification` | Number of disk reads per query
+`rocksdb.table-readers-mem-estimate` | Memory used by index and filter blocks
+`round-trip-latency` | Distribution of round-trip latencies with other nodes
+`sql.bytesin` | Number of sql bytes received
+`sql.bytesout` | Number of sql bytes sent
+`sql.conn.latency` | Latency to establish and authenticate a SQL connection
+`sql.conns` | Number of active sql connections
+`sql.ddl.count` | Number of SQL DDL statements successfully executed
+`sql.delete.count` | Number of SQL DELETE statements successfully executed
+`sql.distsql.contended_queries.count` | Number of SQL queries that experienced contention
+`sql.distsql.exec.latency` | Latency of DistSQL statement execution
+`sql.distsql.flows.active` | Number of distributed SQL flows currently active
+`sql.distsql.flows.total` | Number of distributed SQL flows executed
+`sql.distsql.queries.active` | Number of SQL queries currently active
+`sql.distsql.queries.total` | Number of SQL queries executed
+`sql.distsql.select.count` | Number of DistSQL SELECT statements
+`sql.distsql.service.latency` | Latency of DistSQL request execution
+`sql.exec.latency` | Latency of SQL statement execution
+`sql.failure.count` | Number of statements resulting in a planning or runtime error
+`sql.full.scan.count` | Number of full table or index scans
+`sql.insert.count` | Number of SQL INSERT statements successfully executed
+`sql.mem.distsql.current` | Current sql statement memory usage for distsql
+`sql.mem.distsql.max` | Memory usage per sql statement for distsql
+`sql.mem.internal.session.current` | Current sql session memory usage for internal
+`sql.mem.internal.session.max` | Memory usage per sql session for internal
+`sql.mem.internal.txn.current` | Current sql transaction memory usage for internal
+`sql.mem.internal.txn.max` | Memory usage per sql transaction for internal
+`sql.misc.count` | Number of other SQL statements successfully executed
+`sql.query.count` | Number of SQL queries executed
+`sql.select.count` | Number of SQL SELECT statements successfully executed
+`sql.service.latency` | Latency of SQL request execution
+`sql.statements.active` | Number of currently active user SQL statements
+`sql.txn.abort.count` | Number of SQL transaction abort errors
+`sql.txn.begin.count` | Number of SQL transaction BEGIN statements successfully executed
+`sql.txn.commit.count` | Number of SQL transaction COMMIT statements successfully executed
+`sql.txn.latency` | Latency of SQL transactions
+`sql.txn.rollback.count` | Number of SQL transaction ROLLBACK statements successfully executed
+`sql.txns.open` | Number of currently open user SQL transactions
+`sql.update.count` | Number of SQL UPDATE statements successfully executed
+`sys.cgo.allocbytes` | Current bytes of memory allocated by cgo
+`sys.cgo.totalbytes` | Total bytes of memory allocated by cgo, but not released
+`sys.cgocalls` | Total number of cgo calls
+`sys.cpu.combined.percent-normalized` | Current user+system cpu percentage, normalized 0-1 by number of cores
+`sys.cpu.sys.ns` | Total system cpu time
+`sys.cpu.sys.percent` | Current system cpu percentage
+`sys.cpu.user.ns` | Total user cpu time
+`sys.cpu.user.percent` | Current user cpu percentage
+`sys.fd.open` | Process open file descriptors
+`sys.fd.softlimit` | Process open FD soft limit
+`sys.gc.count` | Total number of garbage collection runs
+`sys.gc.pause.ns` | Total garbage collection pause
+`sys.gc.pause.percent` | Current garbage collection pause percentage
+`sys.go.allocbytes` | Current bytes of memory allocated by go
+`sys.go.totalbytes` | Total bytes of memory allocated by go, but not released
+`sys.goroutines` | Current number of goroutines
+`sys.host.net.recv.bytes` | Bytes received on all network interfaces since this process started
+`sys.host.net.send.bytes` | Bytes sent on all network interfaces since this process started
+`sys.rss` | Current process RSS
+`sys.uptime` | Process uptime
+`sysbytes` | Number of bytes in system KV pairs
+`syscount` | Count of system KV pairs
+`timeseries.write.bytes` | Total size in bytes of metric samples written to disk
+`timeseries.write.errors` | Total errors encountered while attempting to write metrics to disk
+`timeseries.write.samples` | Total number of metric samples written to disk
+`totalbytes` | Total number of bytes taken up by keys and values including non-live data
+`txn.aborts` | Number of aborted KV transactions
+`txn.commits1PC` | Number of KV transaction one-phase commit attempts
+`txn.commits` | Number of committed KV transactions (including 1PC)
+`txn.durations` | KV transaction durations
+`txn.restarts.serializable` | Number of restarts due to a forwarded commit timestamp and isolation=SERIALIZABLE
+`txn.restarts.writetooold` | Number of restarts due to a concurrent writer committing first
+`txn.restarts` | Number of restarted KV transactions
+`valbytes` | Number of bytes taken up by values
+`valcount` | Count of all values
diff --git a/src/current/_includes/v25.1/metric-names.md b/src/current/_includes/v25.1/metric-names.md
new file mode 100644
index 00000000000..c72864fd149
--- /dev/null
+++ b/src/current/_includes/v25.1/metric-names.md
@@ -0,0 +1,29 @@
+{% assign list1 = site.data.metrics.available-metrics-in-metrics-list %}
+{% assign list2 = site.data.metrics.available-metrics-not-in-metrics-list %}
+
+{% assign available_metrics_combined = list1 | concat: list2 %}
+{% assign available_metrics_sorted = available_metrics_combined | sort: "metric_id" %}
+
+
+
+
+
CockroachDB Metric Name
+
Description
+
Type
+
Unit
+
+
+
+ {% for m in available_metrics_sorted %} {% comment %} Iterate through the available_metrics. {% endcomment %}
+ {% assign metrics-list = site.data.metrics.metrics-list | where: "metric", m.metric_id %}
+ {% comment %} Get the row from the metrics-list with the given metric_id. {% endcomment %}
+
+
{{ m.metric_id }}
+ {% comment %} Use the value from the metrics-list, if any, followed by the value in the available-metrics-not-in-metrics-list, if any. {% endcomment %}
+
diff --git a/src/current/_includes/v25.1/migration/load-data-copy-from.md b/src/current/_includes/v25.1/migration/load-data-copy-from.md
new file mode 100644
index 00000000000..32af9248920
--- /dev/null
+++ b/src/current/_includes/v25.1/migration/load-data-copy-from.md
@@ -0,0 +1 @@
+When migrating from PostgreSQL, you can use [`COPY FROM`]({% link {{ page.version.version }}/copy-from.md %}) to copy CSV or tab-delimited data to your CockroachDB tables. This option enables your tables to remain online and accessible. However, it is slower than using [`IMPORT INTO`]({% link {{ page.version.version }}/import-into.md %}).
\ No newline at end of file
diff --git a/src/current/_includes/v25.1/migration/load-data-import-into.md b/src/current/_includes/v25.1/migration/load-data-import-into.md
new file mode 100644
index 00000000000..174b1bd33f4
--- /dev/null
+++ b/src/current/_includes/v25.1/migration/load-data-import-into.md
@@ -0,0 +1 @@
+Use [`IMPORT INTO`]({% link {{ page.version.version }}/import-into.md %}) to migrate [CSV]({% link {{ page.version.version }}/migrate-from-csv.md %}), TSV, or [Avro]({% link {{ page.version.version }}/migrate-from-avro.md %}) data stored via [userfile]({% link {{ page.version.version }}/use-userfile-storage.md %}) or [cloud storage]({% link {{ page.version.version }}/use-cloud-storage.md %}) into pre-existing tables on CockroachDB. This option achieves the highest throughput, but [requires taking the CockroachDB tables **offline**]({% link {{ page.version.version }}/import-into.md %}#considerations) to achieve its import speed.
\ No newline at end of file
diff --git a/src/current/_includes/v25.1/migration/load-data-third-party.md b/src/current/_includes/v25.1/migration/load-data-third-party.md
new file mode 100644
index 00000000000..b8f0cf70f17
--- /dev/null
+++ b/src/current/_includes/v25.1/migration/load-data-third-party.md
@@ -0,0 +1 @@
+Use a [third-party data migration tool]({% link {{ page.version.version }}/third-party-database-tools.md %}#data-migration-tools) (e.g., [AWS DMS]({% link {{ page.version.version }}/aws-dms.md %}), [Qlik]({% link {{ page.version.version }}/qlik.md %}), [Striim](striim.html)) to load the data.
\ No newline at end of file
diff --git a/src/current/_includes/v25.1/misc/assume-role-description.md b/src/current/_includes/v25.1/misc/assume-role-description.md
new file mode 100644
index 00000000000..ea6ca882975
--- /dev/null
+++ b/src/current/_includes/v25.1/misc/assume-role-description.md
@@ -0,0 +1 @@
+Pass the [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) of the role to assume. Use in combination with `AUTH=implicit` or `specified`. `external_id`: Use as a value to `ASSUME_ROLE` to specify the [external ID](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-user_externalid.html) for third-party access to your S3 bucket.
\ No newline at end of file
diff --git a/src/current/_includes/v25.1/misc/auth-intro-examples.md b/src/current/_includes/v25.1/misc/auth-intro-examples.md
new file mode 100644
index 00000000000..27b7fb4484c
--- /dev/null
+++ b/src/current/_includes/v25.1/misc/auth-intro-examples.md
@@ -0,0 +1,3 @@
+These examples use the **default** `AUTH=specified` parameter. For more detail on how to use `implicit` authentication with Amazon S3 buckets, read [Use Cloud Storage for Bulk Operations — Authentication]({% link {{ page.version.version }}/cloud-storage-authentication.md %}).
+
+CockroachDB supports assume role authentication. This allows you to limit the control specific users have over your storage buckets. See [Assume role authentication]({% link {{ page.version.version }}/cloud-storage-authentication.md %}) for more information.
\ No newline at end of file
diff --git a/src/current/_includes/v25.1/misc/available-capacity-metric.md b/src/current/_includes/v25.1/misc/available-capacity-metric.md
new file mode 100644
index 00000000000..d3bb8ffae1e
--- /dev/null
+++ b/src/current/_includes/v25.1/misc/available-capacity-metric.md
@@ -0,0 +1 @@
+If you are testing your deployment locally with multiple CockroachDB nodes running on a single machine (this is [not recommended in production]({% link {{ page.version.version }}/recommended-production-settings.md %}#topology)), you must explicitly [set the store size]({% link {{ page.version.version }}/cockroach-start.md %}#store) per node in order to display the correct capacity. Otherwise, the machine's actual disk capacity will be counted as a separate store for each node, thus inflating the computed capacity.
\ No newline at end of file
diff --git a/src/current/_includes/v25.1/misc/aws-locations.md b/src/current/_includes/v25.1/misc/aws-locations.md
new file mode 100644
index 00000000000..8b073c1f230
--- /dev/null
+++ b/src/current/_includes/v25.1/misc/aws-locations.md
@@ -0,0 +1,18 @@
+| Location | SQL Statement |
+| ------ | ------ |
+| US East (N. Virginia) | `INSERT into system.locations VALUES ('region', 'us-east-1', 37.478397, -76.453077)`|
+| US East (Ohio) | `INSERT into system.locations VALUES ('region', 'us-east-2', 40.417287, -76.453077)` |
+| US West (N. California) | `INSERT into system.locations VALUES ('region', 'us-west-1', 38.837522, -120.895824)` |
+| US West (Oregon) | `INSERT into system.locations VALUES ('region', 'us-west-2', 43.804133, -120.554201)` |
+| Canada (Central) | `INSERT into system.locations VALUES ('region', 'ca-central-1', 56.130366, -106.346771)` |
+| EU (Frankfurt) | `INSERT into system.locations VALUES ('region', 'eu-central-1', 50.110922, 8.682127)` |
+| EU (Ireland) | `INSERT into system.locations VALUES ('region', 'eu-west-1', 53.142367, -7.692054)` |
+| EU (London) | `INSERT into system.locations VALUES ('region', 'eu-west-2', 51.507351, -0.127758)` |
+| EU (Paris) | `INSERT into system.locations VALUES ('region', 'eu-west-3', 48.856614, 2.352222)` |
+| Asia Pacific (Tokyo) | `INSERT into system.locations VALUES ('region', 'ap-northeast-1', 35.689487, 139.691706)` |
+| Asia Pacific (Seoul) | `INSERT into system.locations VALUES ('region', 'ap-northeast-2', 37.566535, 126.977969)` |
+| Asia Pacific (Osaka-Local) | `INSERT into system.locations VALUES ('region', 'ap-northeast-3', 34.693738, 135.502165)` |
+| Asia Pacific (Singapore) | `INSERT into system.locations VALUES ('region', 'ap-southeast-1', 1.352083, 103.819836)` |
+| Asia Pacific (Sydney) | `INSERT into system.locations VALUES ('region', 'ap-southeast-2', -33.86882, 151.209296)` |
+| Asia Pacific (Mumbai) | `INSERT into system.locations VALUES ('region', 'ap-south-1', 19.075984, 72.877656)` |
+| South America (São Paulo) | `INSERT into system.locations VALUES ('region', 'sa-east-1', -23.55052, -46.633309)` |
diff --git a/src/current/_includes/v25.1/misc/azure-blob.md b/src/current/_includes/v25.1/misc/azure-blob.md
new file mode 100644
index 00000000000..3449e639f7a
--- /dev/null
+++ b/src/current/_includes/v25.1/misc/azure-blob.md
@@ -0,0 +1 @@
+For [changefeeds]({% link {{ page.version.version }}/changefeed-sinks.md %}), you must use the `azure://` scheme. For all other jobs, the `azure://` and `azure-storage://` schemes are also supported for backward compatibility, though `azure-blob://` is recommended.
\ No newline at end of file
diff --git a/src/current/_includes/v25.1/misc/azure-env-param.md b/src/current/_includes/v25.1/misc/azure-env-param.md
new file mode 100644
index 00000000000..62b6b01293e
--- /dev/null
+++ b/src/current/_includes/v25.1/misc/azure-env-param.md
@@ -0,0 +1 @@
+The [Azure environment](https://learn.microsoft.com/azure/deployment-environments/concept-environments-key-concepts#environments) that the storage account belongs to. The accepted values are: `AZURECHINACLOUD`, `AZUREGERMANCLOUD`, `AZUREPUBLICCLOUD`, and [`AZUREUSGOVERNMENTCLOUD`](https://learn.microsoft.com/azure/azure-government/documentation-government-developer-guide). These are cloud environments that meet security, compliance, and data privacy requirements for the respective instance of Azure cloud. If the parameter is not specified, it will default to `AZUREPUBLICCLOUD`.
\ No newline at end of file
diff --git a/src/current/_includes/v25.1/misc/azure-locations.md b/src/current/_includes/v25.1/misc/azure-locations.md
new file mode 100644
index 00000000000..7119ff8b7cb
--- /dev/null
+++ b/src/current/_includes/v25.1/misc/azure-locations.md
@@ -0,0 +1,30 @@
+| Location | SQL Statement |
+| -------- | ------------- |
+| eastasia (East Asia) | `INSERT into system.locations VALUES ('region', 'eastasia', 22.267, 114.188)` |
+| southeastasia (Southeast Asia) | `INSERT into system.locations VALUES ('region', 'southeastasia', 1.283, 103.833)` |
+| centralus (Central US) | `INSERT into system.locations VALUES ('region', 'centralus', 41.5908, -93.6208)` |
+| eastus (East US) | `INSERT into system.locations VALUES ('region', 'eastus', 37.3719, -79.8164)` |
+| eastus2 (East US 2) | `INSERT into system.locations VALUES ('region', 'eastus2', 36.6681, -78.3889)` |
+| westus (West US) | `INSERT into system.locations VALUES ('region', 'westus', 37.783, -122.417)` |
+| northcentralus (North Central US) | `INSERT into system.locations VALUES ('region', 'northcentralus', 41.8819, -87.6278)` |
+| southcentralus (South Central US) | `INSERT into system.locations VALUES ('region', 'southcentralus', 29.4167, -98.5)` |
+| northeurope (North Europe) | `INSERT into system.locations VALUES ('region', 'northeurope', 53.3478, -6.2597)` |
+| westeurope (West Europe) | `INSERT into system.locations VALUES ('region', 'westeurope', 52.3667, 4.9)` |
+| japanwest (Japan West) | `INSERT into system.locations VALUES ('region', 'japanwest', 34.6939, 135.5022)` |
+| japaneast (Japan East) | `INSERT into system.locations VALUES ('region', 'japaneast', 35.68, 139.77)` |
+| brazilsouth (Brazil South) | `INSERT into system.locations VALUES ('region', 'brazilsouth', -23.55, -46.633)` |
+| australiaeast (Australia East) | `INSERT into system.locations VALUES ('region', 'australiaeast', -33.86, 151.2094)` |
+| australiasoutheast (Australia Southeast) | `INSERT into system.locations VALUES ('region', 'australiasoutheast', -37.8136, 144.9631)` |
+| southindia (South India) | `INSERT into system.locations VALUES ('region', 'southindia', 12.9822, 80.1636)` |
+| centralindia (Central India) | `INSERT into system.locations VALUES ('region', 'centralindia', 18.5822, 73.9197)` |
+| westindia (West India) | `INSERT into system.locations VALUES ('region', 'westindia', 19.088, 72.868)` |
+| canadacentral (Canada Central) | `INSERT into system.locations VALUES ('region', 'canadacentral', 43.653, -79.383)` |
+| canadaeast (Canada East) | `INSERT into system.locations VALUES ('region', 'canadaeast', 46.817, -71.217)` |
+| uksouth (UK South) | `INSERT into system.locations VALUES ('region', 'uksouth', 50.941, -0.799)` |
+| ukwest (UK West) | `INSERT into system.locations VALUES ('region', 'ukwest', 53.427, -3.084)` |
+| westcentralus (West Central US) | `INSERT into system.locations VALUES ('region', 'westcentralus', 40.890, -110.234)` |
+| westus2 (West US 2) | `INSERT into system.locations VALUES ('region', 'westus2', 47.233, -119.852)` |
+| koreacentral (Korea Central) | `INSERT into system.locations VALUES ('region', 'koreacentral', 37.5665, 126.9780)` |
+| koreasouth (Korea South) | `INSERT into system.locations VALUES ('region', 'koreasouth', 35.1796, 129.0756)` |
+| francecentral (France Central) | `INSERT into system.locations VALUES ('region', 'francecentral', 46.3772, 2.3730)` |
+| francesouth (France South) | `INSERT into system.locations VALUES ('region', 'francesouth', 43.8345, 2.1972)` |
diff --git a/src/current/_includes/v25.1/misc/basic-terms.md b/src/current/_includes/v25.1/misc/basic-terms.md
new file mode 100644
index 00000000000..f168f878c63
--- /dev/null
+++ b/src/current/_includes/v25.1/misc/basic-terms.md
@@ -0,0 +1,36 @@
+### Cluster
+A group of interconnected CockroachDB nodes that function as a single distributed SQL database server. Nodes collaboratively organize transactions, and rebalance workload and data storage to optimize performance and fault-tolerance.
+
+Each cluster has its own authorization hierarchy, meaning that users and roles must be defined on that specific cluster.
+
+A CockroachDB cluster can be run in CockroachDB Cloud, within a customer [Organization]({% link {{ page.version.version }}/architecture/glossary.md %}#organization), or can be self-hosted.
+
+### Node
+An individual instance of CockroachDB. One or more nodes form a cluster.
+
+### Range
+
+CockroachDB stores all user data (tables, indexes, etc.) and almost all system data in a sorted map of key-value pairs. This keyspace is divided into contiguous chunks called _ranges_, such that every key is found in one range.
+
+From a SQL perspective, a table and its secondary indexes initially map to a single range, where each key-value pair in the range represents a single row in the table (also called the _primary index_ because the table is sorted by the primary key) or a single row in a secondary index. As soon as the size of a range reaches [the default range size]({% link {{ page.version.version }}/configure-replication-zones.md %}#range-max-bytes), it is [split into two ranges]({% link {{ page.version.version }}/architecture/distribution-layer.md %}#range-splits). This process continues for these new ranges as the table and its indexes continue growing.
+
+### Replica
+
+A copy of a range stored on a node. By default, there are three [replicas]({% link {{ page.version.version }}/configure-replication-zones.md %}#num_replicas) of each range on different nodes.
+
+### Leaseholder
+
+The replica that holds the "range lease." This replica receives and coordinates all read and write requests for the range.
+
+For most types of tables and queries, the leaseholder is the only replica that can serve consistent reads (reads that return "the latest" data).
+
+### Raft protocol
+
+The [consensus protocol]({% link {{ page.version.version }}/architecture/replication-layer.md %}#raft) employed in CockroachDB that ensures that your data is safely stored on multiple nodes and that those nodes agree on the current state even if some of them are temporarily disconnected.
+
+### Raft leader
+
+For each range, the replica that is the "leader" for write requests. The leader uses the Raft protocol to ensure that a majority of replicas (the leader and enough followers) agree, based on their Raft logs, before committing the write. The Raft leader is almost always the same replica as the leaseholder.
+
+### Raft log
+A time-ordered log of writes to a range that its replicas have agreed on. This log exists on-disk with each replica and is the range's source of truth for consistent replication.
diff --git a/src/current/_includes/v25.1/misc/beta-release-warning.md b/src/current/_includes/v25.1/misc/beta-release-warning.md
new file mode 100644
index 00000000000..c228f650d04
--- /dev/null
+++ b/src/current/_includes/v25.1/misc/beta-release-warning.md
@@ -0,0 +1,3 @@
+{{site.data.alerts.callout_danger}}
+Beta releases are intended for testing and experimentation only. Beta releases are not recommended for production use, as they can lead to data corruption, cluster unavailability, performance issues, etc.
+{{site.data.alerts.end}}
diff --git a/src/current/_includes/v25.1/misc/beta-warning.md b/src/current/_includes/v25.1/misc/beta-warning.md
new file mode 100644
index 00000000000..4a5b9e3c6ae
--- /dev/null
+++ b/src/current/_includes/v25.1/misc/beta-warning.md
@@ -0,0 +1,3 @@
+{{site.data.alerts.callout_danger}}
+**This is a beta feature.** It is currently undergoing continued testing. Please [file a Github issue]({% link {{ page.version.version }}/file-an-issue.md %}) with us if you identify a bug.
+{{site.data.alerts.end}}
diff --git a/src/current/_includes/v25.1/misc/bulk-permission-note.md b/src/current/_includes/v25.1/misc/bulk-permission-note.md
new file mode 100644
index 00000000000..cb008696def
--- /dev/null
+++ b/src/current/_includes/v25.1/misc/bulk-permission-note.md
@@ -0,0 +1 @@
+We recommend using [cloud storage]({% link {{ page.version.version }}/use-cloud-storage.md %}). You also need to ensure that the permissions at your storage destination are configured for the operation. See [Storage Permissions]({% link {{ page.version.version }}/use-cloud-storage.md %}#storage-permissions) for a list of the necessary permissions that each bulk operation requires.
\ No newline at end of file
diff --git a/src/current/_includes/v25.1/misc/cert-auth-using-x509-subject.md b/src/current/_includes/v25.1/misc/cert-auth-using-x509-subject.md
new file mode 100644
index 00000000000..281feb1986f
--- /dev/null
+++ b/src/current/_includes/v25.1/misc/cert-auth-using-x509-subject.md
@@ -0,0 +1 @@
+If you manage your own Certificate Authority (CA) infrastructure, CockroachDB supports mapping between the Subject field of your [X.509 certificates](https://en.wikipedia.org/wiki/X.509) and SQL [roles]({% link {{ page.version.version }}/security-reference/authorization.md %}#roles). For more information, see [Certificate-based authentication using multiple values from the X.509 Subject field]({% link {{page.version.version}}/certificate-based-authentication-using-the-x509-subject-field.md %}).
diff --git a/src/current/_includes/v25.1/misc/chrome-localhost.md b/src/current/_includes/v25.1/misc/chrome-localhost.md
new file mode 100644
index 00000000000..d794ff339d0
--- /dev/null
+++ b/src/current/_includes/v25.1/misc/chrome-localhost.md
@@ -0,0 +1,3 @@
+{{site.data.alerts.callout_info}}
+If you are using Google Chrome, and you are getting an error about not being able to reach `localhost` because its certificate has been revoked, go to chrome://flags/#allow-insecure-localhost, enable "Allow invalid certificates for resources loaded from localhost", and then restart the browser. Enabling this Chrome feature degrades security for all sites running on `localhost`, not just CockroachDB's DB Console, so be sure to enable the feature only temporarily.
+{{site.data.alerts.end}}
diff --git a/src/current/_includes/v25.1/misc/csv-import-callout.md b/src/current/_includes/v25.1/misc/csv-import-callout.md
new file mode 100644
index 00000000000..60555c5d0b6
--- /dev/null
+++ b/src/current/_includes/v25.1/misc/csv-import-callout.md
@@ -0,0 +1,3 @@
+{{site.data.alerts.callout_info}}
+The column order in your schema must match the column order in the file being imported.
+{{site.data.alerts.end}}
\ No newline at end of file
diff --git a/src/current/_includes/v25.1/misc/customizing-the-savepoint-name.md b/src/current/_includes/v25.1/misc/customizing-the-savepoint-name.md
new file mode 100644
index 00000000000..6a00f8f6d8c
--- /dev/null
+++ b/src/current/_includes/v25.1/misc/customizing-the-savepoint-name.md
@@ -0,0 +1,5 @@
+Set the `force_savepoint_restart` [session variable]({% link {{ page.version.version }}/set-vars.md %}#supported-variables) to `true` to enable using a custom name for the [retry savepoint]({% link {{ page.version.version }}/advanced-client-side-transaction-retries.md %}#retry-savepoints).
+
+Once this variable is set, the [`SAVEPOINT`]({% link {{ page.version.version }}/savepoint.md %}) statement will accept any name for the retry savepoint, not just `cockroach_restart`. In addition, it causes every savepoint name to be equivalent to `cockroach_restart`, therefore disallowing the use of [nested transactions]({% link {{ page.version.version }}/transactions.md %}#nested-transactions).
+
+This feature exists to support applications that want to use the [advanced client-side transaction retry protocol]({% link {{ page.version.version }}/advanced-client-side-transaction-retries.md %}), but cannot customize the name of savepoints to be `cockroach_restart`. For example, this may be necessary because you are using an ORM that requires its own names for savepoints.
diff --git a/src/current/_includes/v25.1/misc/database-terms.md b/src/current/_includes/v25.1/misc/database-terms.md
new file mode 100644
index 00000000000..78663985607
--- /dev/null
+++ b/src/current/_includes/v25.1/misc/database-terms.md
@@ -0,0 +1,30 @@
+### Consistency
+The requirement that a transaction must change affected data only in allowed ways. CockroachDB uses "consistency" in both the sense of [ACID semantics](https://en.wikipedia.org/wiki/ACID) and the [CAP theorem](https://wikipedia.org/wiki/CAP_theorem), albeit less formally than either definition.
+
+### Isolation
+The degree to which a transaction may be affected by other transactions running at the same time. CockroachDB provides the [`SERIALIZABLE`](https://wikipedia.org/wiki/Serializability) and `READ COMMITTED` isolation levels. For more information, see [Isolation levels]({% link {{ page.version.version }}/transactions.md %}#isolation-levels).
+
+### Consensus
+ The process of reaching agreement on whether a transaction is committed or aborted. CockroachDB uses the [Raft consensus protocol](#architecture-raft). In CockroachDB, when a range receives a write, a quorum of nodes containing replicas of the range acknowledge the write. This means your data is safely stored and a majority of nodes agree on the database's current state, even if some of the nodes are offline.
+
+When a write does not achieve consensus, forward progress halts to maintain consistency within the cluster.
+
+### Replication
+The process of creating and distributing copies of data, as well as ensuring that those copies remain consistent. CockroachDB requires all writes to propagate to a [quorum](https://wikipedia.org/wiki/Quorum_%28distributed_computing%29) of copies of the data before being considered committed. This ensures the consistency of your data.
+
+### Transaction
+A set of operations performed on a database that satisfy the requirements of [ACID semantics](https://en.wikipedia.org/wiki/ACID). This is a crucial feature for a consistent system to ensure developers can trust the data in their database. For more information about how transactions work in CockroachDB, see [Transaction Layer]({% link {{ page.version.version }}/architecture/transaction-layer.md %}).
+
+### Transaction contention
+ A [state of conflict]({% link {{ page.version.version }}/performance-best-practices-overview.md %}#transaction-contention) that occurs when:
+
+- A [transaction]({% link {{ page.version.version }}/transactions.md %}) is unable to complete due to another concurrent or recent transaction attempting to write to the same data. This is also called *lock contention*.
+- A transaction is [automatically retried]({% link {{ page.version.version }}/transactions.md %}#automatic-retries) because it could not be placed into a [serializable ordering]({% link {{ page.version.version }}/demo-serializable.md %}) among all of the currently executing transactions. This is also called a *serialization conflict*. If the automatic retry is not possible or fails, a [*transaction retry error*](../transaction-retry-error-reference.html) is emitted to the client, requiring a client application running under `SERIALIZABLE` isolation to [retry the transaction](../transaction-retry-error-reference.html#client-side-retry-handling).
+
+Steps should be taken to [reduce transaction contention]({% link {{ page.version.version }}/performance-best-practices-overview.md %}#reduce-transaction-contention) in the first place.
+
+### Multi-active availability
+A consensus-based notion of high availability that lets each node in the cluster handle reads and writes for a subset of the stored data (on a per-range basis). This is in contrast to _active-passive replication_, in which the active node receives 100% of request traffic, and _active-active_ replication, in which all nodes accept requests but typically cannot guarantee that reads are both up-to-date and fast.
+
+### User
+A SQL user is an identity capable of executing SQL statements and performing other cluster actions against CockroachDB clusters. SQL users must authenticate with an option permitted on the cluster (username/password, single sign-on (SSO), or certificate). Note that a SQL/cluster user is distinct from a CockroachDB {{ site.data.products.cloud }} organization user.
diff --git a/src/current/_includes/v25.1/misc/debug-subcommands.md b/src/current/_includes/v25.1/misc/debug-subcommands.md
new file mode 100644
index 00000000000..25feb08481b
--- /dev/null
+++ b/src/current/_includes/v25.1/misc/debug-subcommands.md
@@ -0,0 +1,5 @@
+While the `cockroach debug` command has a few subcommands, users are expected to use only the [`zip`]({% link {{ page.version.version }}/cockroach-debug-zip.md %}), [`encryption-active-key`]({% link {{ page.version.version }}/cockroach-debug-encryption-active-key.md %}), [`merge-logs`]({% link {{ page.version.version }}/cockroach-debug-merge-logs.md %}), [`list-files`](cockroach-debug-list-files.html), [`tsdump`](cockroach-debug-tsdump.html), and [`ballast`](cockroach-debug-ballast.html) subcommands.
+
+We recommend using the [`encryption-decrypt`]({% link {{ page.version.version }}/cockroach-debug-encryption-decrypt.md %}) and [`job-trace`]({% link {{ page.version.version }}/cockroach-debug-job-trace.md %}) subcommands only when directed by the [Cockroach Labs support team]({% link {{ page.version.version }}/support-resources.md %}).
+
+The other `debug` subcommands are useful only to Cockroach Labs. Output of `debug` commands may contain sensitive or secret information.
diff --git a/src/current/_includes/v25.1/misc/declarative-schema-changer-note.md b/src/current/_includes/v25.1/misc/declarative-schema-changer-note.md
new file mode 100644
index 00000000000..fb1374e2bee
--- /dev/null
+++ b/src/current/_includes/v25.1/misc/declarative-schema-changer-note.md
@@ -0,0 +1,3 @@
+{{site.data.alerts.callout_danger}}
+`{{ page.title }}` now uses the [declarative schema changer]({% link {{ page.version.version }}/online-schema-changes.md %}#declarative-schema-changer) by default. Declarative schema changer statements and legacy schema changer statements operating on the same objects cannot exist within the same transaction. Either split the transaction into multiple transactions, or disable either the `sql.defaults.use_declarative_schema_changer` [cluster setting]({% link {{ page.version.version }}/cluster-settings.md %}) or the `use_declarative_schema_changer` [session variable]({% link {{ page.version.version }}/set-vars.md %}).
+{{site.data.alerts.end}}
diff --git a/src/current/_includes/v25.1/misc/delete-statistics.md b/src/current/_includes/v25.1/misc/delete-statistics.md
new file mode 100644
index 00000000000..6954194fc75
--- /dev/null
+++ b/src/current/_includes/v25.1/misc/delete-statistics.md
@@ -0,0 +1,15 @@
+To delete statistics for all tables in all databases:
+
+{% include_cached copy-clipboard.html %}
+~~~ sql
+DELETE FROM system.table_statistics WHERE true;
+~~~
+
+To delete a named set of statistics (e.g, one named "users_stats"), run a query like the following:
+
+{% include_cached copy-clipboard.html %}
+~~~ sql
+DELETE FROM system.table_statistics WHERE name = 'users_stats';
+~~~
+
+For more information about the `DELETE` statement, see [`DELETE`]({% link {{ page.version.version }}/delete.md %}).
diff --git a/src/current/_includes/v25.1/misc/diagnostics-callout.html b/src/current/_includes/v25.1/misc/diagnostics-callout.html
new file mode 100644
index 00000000000..a969a8cf152
--- /dev/null
+++ b/src/current/_includes/v25.1/misc/diagnostics-callout.html
@@ -0,0 +1 @@
+{{site.data.alerts.callout_info}}By default, each node of a CockroachDB cluster periodically shares anonymous usage details with Cockroach Labs. For an explanation of the details that get shared and how to opt-out of reporting, see Diagnostics Reporting.{{site.data.alerts.end}}
diff --git a/src/current/_includes/v25.1/misc/enterprise-features.md b/src/current/_includes/v25.1/misc/enterprise-features.md
new file mode 100644
index 00000000000..5f9193e1ca9
--- /dev/null
+++ b/src/current/_includes/v25.1/misc/enterprise-features.md
@@ -0,0 +1,32 @@
+## Cluster optimization
+
+Feature | Description
+--------+-------------------------
+[Read Committed isolation]({% link {{ page.version.version }}/read-committed.md %}) | Achieve predictable query performance at high workload concurrencies, but without guaranteed transaction serializability.
+[Follower Reads]({% link {{ page.version.version }}/follower-reads.md %}) | Reduce read latency in multi-region deployments by using the closest replica at the expense of reading slightly historical data.
+[Multi-Region Capabilities]({% link {{ page.version.version }}/multiregion-overview.md %}) | Row-level control over where your data is stored to help you reduce read and write latency and meet regulatory requirements.
+[PL/pgSQL]({% link {{ page.version.version }}/plpgsql.md %}) | Use a procedural language in [user-defined functions]({% link {{ page.version.version }}/user-defined-functions.md %}) and [stored procedures]({% link {{ page.version.version }}/stored-procedures.md %}) to improve performance and enable more complex queries.
+[Node Map]({% link {{ page.version.version }}/enable-node-map.md %}) | Visualize the geographical distribution of a cluster by plotting its node localities on a world map.
+[Generic query plans]({% link {{ page.version.version }}/cost-based-optimizer.md %}#query-plan-type) | Improve performance for prepared statements by enabling generic plans that eliminate most of the query latency attributed to planning.
+[`VECTOR` type]({% link {{ page.version.version }}/vector.md %}) | Represent data points in multi-dimensional space, using fixed-length arrays of floating-point numbers.
+
+## Recovery and streaming
+
+Feature | Description
+--------+-------------------------
+[`BACKUP`]({% link {{ page.version.version }}/backup.md %}) and restore capabilities | Taking and restoring [full backups]({% link {{ page.version.version }}/take-full-and-incremental-backups.md %}), [incremental backups]({% link {{ page.version.version }}/take-full-and-incremental-backups.md %}), [backups with revision history]({% link {{ page.version.version }}/take-backups-with-revision-history-and-restore-from-a-point-in-time.md %}), [locality-aware backups](take-and-restore-locality-aware-backups.html), and [encrypted backups](take-and-restore-encrypted-backups.html).
+[Changefeeds into a Configurable Sink]({% link {{ page.version.version }}/create-changefeed.md %}) | For every change in a configurable allowlist of tables, configure a changefeed to emit a record to a configurable sink: Apache Kafka, cloud storage, Google Cloud Pub/Sub, or a webhook sink. These records can be processed by downstream systems for reporting, caching, or full-text indexing.
+[Change Data Capture Queries]({% link {{ page.version.version }}/cdc-queries.md %}) | Use `SELECT` queries to filter and modify change data before sending it to a changefeed's sink.
+[Physical Cluster Replication]({% link {{ page.version.version }}/physical-cluster-replication-overview.md %}) | Send all data at the byte level from a primary cluster to an independent standby cluster. Existing data and ongoing changes on the active primary cluster, which is serving application data, replicate asynchronously to the passive standby cluster.
+
+## Security and IAM
+
+Feature | Description
+--------+-------------------------
+[Encryption at Rest]({% link {{ page.version.version }}/security-reference/encryption.md %}#encryption-at-rest-enterprise) | Enable automatic transparent encryption of a node's data on the local disk using AES in counter mode, with all key sizes allowed. This feature works together with CockroachDB's automatic encryption of data in transit.
+[Column-level encryption]({% link {{ page.version.version }}/column-level-encryption.md %}) | Encrypt specific columns within a table.
+[GSSAPI with Kerberos Authentication]({% link {{ page.version.version }}/gssapi_authentication.md %}) | Authenticate to your cluster using identities stored in an external enterprise directory system that supports Kerberos, such as Active Directory.
+[Cluster Single Sign-on (SSO)]({% link {{ page.version.version }}/sso-sql.md %}) | Grant SQL access to a cluster using JSON Web Tokens (JWTs) issued by an external identity provider (IdP) or custom JWT issuer.
+[Single Sign-on (SSO) for DB Console]({% link {{ page.version.version }}/sso-db-console.md %}) | Grant access to a cluster's DB Console interface using SSO through an IdP that supports OIDC.
+[Role-based SQL Audit Logs]({% link {{ page.version.version }}/role-based-audit-logging.md %}) | Enable logging of queries being executed against your system by specific users or roles.
+[Certificate-based authentication using multiple values from the X.509 Subject field]({% link {{ page.version.version }}/certificate-based-authentication-using-the-x509-subject-field.md %}) | Map SQL user [roles]({% link {{ page.version.version }}/security-reference/authorization.md %}#roles) to values in the Subject field of the [X.509 certificate](https://en.wikipedia.org/wiki/X.509) used for [TLS authentication]({% link {{ page.version.version }}/security-reference/transport-layer-security.md %}#what-is-transport-layer-security-tls).
diff --git a/src/current/_includes/v25.1/misc/explore-benefits-see-also.md b/src/current/_includes/v25.1/misc/explore-benefits-see-also.md
new file mode 100644
index 00000000000..2ad7178c808
--- /dev/null
+++ b/src/current/_includes/v25.1/misc/explore-benefits-see-also.md
@@ -0,0 +1,7 @@
+- [Replication & Rebalancing]({% link {{ page.version.version }}/demo-replication-and-rebalancing.md %})
+- [CockroachDB Resilience]({% link {{ page.version.version }}/demo-cockroachdb-resilience.md %})
+- [Low Latency Multi-Region Deployment]({% link {{ page.version.version }}/demo-low-latency-multi-region-deployment.md %})
+- [Serializable Transactions]({% link {{ page.version.version }}/demo-serializable.md %})
+- [Cross-Cloud Migration]({% link {{ page.version.version }}/demo-automatic-cloud-migration.md %})
+- [Orchestration]({% link {{ page.version.version }}/orchestrate-a-local-cluster-with-kubernetes-insecure.md %})
+- [JSON Support]({% link {{ page.version.version }}/demo-json-support.md %})
diff --git a/src/current/_includes/v25.1/misc/external-connection-kafka.md b/src/current/_includes/v25.1/misc/external-connection-kafka.md
new file mode 100644
index 00000000000..2ffa2b599a1
--- /dev/null
+++ b/src/current/_includes/v25.1/misc/external-connection-kafka.md
@@ -0,0 +1,3 @@
+{{site.data.alerts.callout_info}}
+You can create an external connection to represent a Kafka sink URI. This allows you to specify the external connection's name in statements rather than the provider-specific URI. For detail on using external connections, see the [`CREATE EXTERNAL CONNECTION`]({% link {{ page.version.version }}/create-external-connection.md %}) page.
+{{site.data.alerts.end}}
\ No newline at end of file
diff --git a/src/current/_includes/v25.1/misc/external-connection-note.md b/src/current/_includes/v25.1/misc/external-connection-note.md
new file mode 100644
index 00000000000..f9bc7914ed8
--- /dev/null
+++ b/src/current/_includes/v25.1/misc/external-connection-note.md
@@ -0,0 +1 @@
+You can create an external connection to represent an external storage or sink URI. This allows you to specify the external connection's name in statements rather than the provider-specific URI. For detail on using external connections, see the [`CREATE EXTERNAL CONNECTION`]({% link {{ page.version.version }}/create-external-connection.md %}) page.
\ No newline at end of file
diff --git a/src/current/_includes/v25.1/misc/external-io-privilege.md b/src/current/_includes/v25.1/misc/external-io-privilege.md
new file mode 100644
index 00000000000..c3f92f8e24e
--- /dev/null
+++ b/src/current/_includes/v25.1/misc/external-io-privilege.md
@@ -0,0 +1 @@
+You can grant a user the `EXTERNALIOIMPLICITACCESS` [system-level privilege]({% link {{ page.version.version }}/security-reference/authorization.md %}#supported-privileges) to interact with external resources that require implicit access.
\ No newline at end of file
diff --git a/src/current/_includes/v25.1/misc/force-index-selection.md b/src/current/_includes/v25.1/misc/force-index-selection.md
new file mode 100644
index 00000000000..a2f70c98ee6
--- /dev/null
+++ b/src/current/_includes/v25.1/misc/force-index-selection.md
@@ -0,0 +1,157 @@
+By using the explicit index annotation, you can override [CockroachDB's index selection](https://www.cockroachlabs.com/blog/index-selection-cockroachdb-2/) and use a specific [index]({% link {{ page.version.version }}/indexes.md %}) when reading from a named table.
+
+{{site.data.alerts.callout_info}}
+Index selection can impact [performance]({% link {{ page.version.version }}/performance-best-practices-overview.md %}), but does not change the result of a query.
+{{site.data.alerts.end}}
+
+##### Force index scan
+
+To force a scan of a specific index:
+
+{% include_cached copy-clipboard.html %}
+~~~ sql
+SELECT * FROM table@my_idx;
+~~~
+
+This is equivalent to the longer expression:
+
+{% include_cached copy-clipboard.html %}
+~~~ sql
+SELECT * FROM table@{FORCE_INDEX=my_idx};
+~~~
+
+##### Force reverse scan
+
+To force a reverse scan of a specific index:
+
+{% include_cached copy-clipboard.html %}
+~~~ sql
+SELECT * FROM table@{FORCE_INDEX=my_idx,DESC};
+~~~
+
+Forcing a reverse scan can help with [performance tuning]({% link {{ page.version.version }}/performance-best-practices-overview.md %}). To choose an index and its scan direction:
+
+{% include_cached copy-clipboard.html %}
+~~~ sql
+SELECT * FROM table@{FORCE_INDEX=idx[,DIRECTION]};
+~~~
+
+where the optional `DIRECTION` is either `ASC` (ascending) or `DESC` (descending).
+
+When a direction is specified, that scan direction is forced; otherwise the [cost-based optimizer]({% link {{ page.version.version }}/cost-based-optimizer.md %}) is free to choose the direction it calculates will result in the best performance.
+
+You can verify that the optimizer is choosing your desired scan direction using [`EXPLAIN (OPT)`]({% link {{ page.version.version }}/explain.md %}#opt-option). For example, given the table
+
+{% include_cached copy-clipboard.html %}
+~~~ sql
+CREATE TABLE kv (K INT PRIMARY KEY, v INT);
+~~~
+
+you can check the scan direction with:
+
+{% include_cached copy-clipboard.html %}
+~~~ sql
+EXPLAIN (opt) SELECT * FROM users@{FORCE_INDEX=primary,DESC};
+~~~
+
+~~~
+ text
++-------------------------------------+
+ scan users,rev
+ └── flags: force-index=primary,rev
+(2 rows)
+~~~
+
+#### Force inverted index scan
+
+To force a scan of any [inverted index]({% link {{ page.version.version }}/inverted-indexes.md %}) of the hinted table:
+
+{% include_cached copy-clipboard.html %}
+~~~ sql
+SELECT * FROM table@{FORCE_INVERTED_INDEX};
+~~~
+
+The `FORCE_INVERTED_INDEX` hint does not allow specifying an inverted index. If no query plan can be generated, the query will result in the error:
+
+~~~
+ERROR: could not produce a query plan conforming to the FORCE_INVERTED_INDEX hint
+~~~
+
+##### Force partial index scan
+
+To force a [partial index scan]({% link {{ page.version.version }}/partial-indexes.md %}), your statement must have a `WHERE` clause that implies the partial index filter.
+
+{% include_cached copy-clipboard.html %}
+~~~ sql
+CREATE TABLE t (
+ a INT,
+ INDEX idx (a) WHERE a > 0);
+INSERT INTO t(a) VALUES (5);
+SELECT * FROM t@idx WHERE a > 0;
+~~~
+
+~~~
+CREATE TABLE
+
+Time: 13ms total (execution 12ms / network 0ms)
+
+INSERT 1
+
+Time: 22ms total (execution 21ms / network 0ms)
+
+ a
+-----
+ 5
+(1 row)
+
+Time: 1ms total (execution 1ms / network 0ms)
+~~~
+
+##### Force partial GIN index scan
+
+To force a [partial GIN index]({% link {{ page.version.version }}/inverted-indexes.md %}#partial-gin-indexes) scan, your statement must have a `WHERE` clause that:
+
+- Implies the partial index.
+- Constrains the GIN index scan.
+
+{% include_cached copy-clipboard.html %}
+~~~ sql
+DROP TABLE t;
+CREATE TABLE t (
+ j JSON,
+ INVERTED INDEX idx (j) WHERE j->'a' = '1');
+INSERT INTO t(j)
+ VALUES ('{"a": 1}'),
+ ('{"a": 3, "b": 2}'),
+ ('{"a": 1, "b": 2}');
+SELECT * FROM t@idx WHERE j->'a' = '1' AND j->'b' = '2';
+~~~
+
+~~~
+DROP TABLE
+
+Time: 68ms total (execution 22ms / network 45ms)
+
+CREATE TABLE
+
+Time: 10ms total (execution 10ms / network 0ms)
+
+INSERT 3
+
+Time: 22ms total (execution 22ms / network 0ms)
+
+ j
+--------------------
+ {"a": 1, "b": 2}
+(1 row)
+
+Time: 1ms total (execution 1ms / network 0ms)
+~~~
+
+##### Prevent full scan
+
+{% include {{ page.version.version }}/sql/no-full-scan.md %}
+
+{{site.data.alerts.callout_success}}
+For other ways to prevent full scans, refer to [Prevent the optimizer from planning full scans]({% link {{ page.version.version }}/performance-best-practices-overview.md %}#prevent-the-optimizer-from-planning-full-scans).
+{{site.data.alerts.end}}
\ No newline at end of file
diff --git a/src/current/_includes/v25.1/misc/gce-locations.md b/src/current/_includes/v25.1/misc/gce-locations.md
new file mode 100644
index 00000000000..22122aae78d
--- /dev/null
+++ b/src/current/_includes/v25.1/misc/gce-locations.md
@@ -0,0 +1,18 @@
+| Location | SQL Statement |
+| ------ | ------ |
+| us-east1 (South Carolina) | `INSERT into system.locations VALUES ('region', 'us-east1', 33.836082, -81.163727)` |
+| us-east4 (N. Virginia) | `INSERT into system.locations VALUES ('region', 'us-east4', 37.478397, -76.453077)` |
+| us-central1 (Iowa) | `INSERT into system.locations VALUES ('region', 'us-central1', 42.032974, -93.581543)` |
+| us-west1 (Oregon) | `INSERT into system.locations VALUES ('region', 'us-west1', 43.804133, -120.554201)` |
+| northamerica-northeast1 (Montreal) | `INSERT into system.locations VALUES ('region', 'northamerica-northeast1', 56.130366, -106.346771)` |
+| europe-west1 (Belgium) | `INSERT into system.locations VALUES ('region', 'europe-west1', 50.44816, 3.81886)` |
+| europe-west2 (London) | `INSERT into system.locations VALUES ('region', 'europe-west2', 51.507351, -0.127758)` |
+| europe-west3 (Frankfurt) | `INSERT into system.locations VALUES ('region', 'europe-west3', 50.110922, 8.682127)` |
+| europe-west4 (Netherlands) | `INSERT into system.locations VALUES ('region', 'europe-west4', 53.4386, 6.8355)` |
+| europe-west6 (Zürich) | `INSERT into system.locations VALUES ('region', 'europe-west6', 47.3769, 8.5417)` |
+| asia-east1 (Taiwan) | `INSERT into system.locations VALUES ('region', 'asia-east1', 24.0717, 120.5624)` |
+| asia-northeast1 (Tokyo) | `INSERT into system.locations VALUES ('region', 'asia-northeast1', 35.689487, 139.691706)` |
+| asia-southeast1 (Singapore) | `INSERT into system.locations VALUES ('region', 'asia-southeast1', 1.352083, 103.819836)` |
+| australia-southeast1 (Sydney) | `INSERT into system.locations VALUES ('region', 'australia-southeast1', -33.86882, 151.209296)` |
+| asia-south1 (Mumbai) | `INSERT into system.locations VALUES ('region', 'asia-south1', 19.075984, 72.877656)` |
+| southamerica-east1 (São Paulo) | `INSERT into system.locations VALUES ('region', 'southamerica-east1', -23.55052, -46.633309)` |
diff --git a/src/current/_includes/v25.1/misc/geojson_geometry_note.md b/src/current/_includes/v25.1/misc/geojson_geometry_note.md
new file mode 100644
index 00000000000..a023f205c20
--- /dev/null
+++ b/src/current/_includes/v25.1/misc/geojson_geometry_note.md
@@ -0,0 +1,3 @@
+{{site.data.alerts.callout_info}}
+The screenshots in these examples were generated using [geojson.io](http://geojson.io), but they are designed to showcase the shapes, not the map. Representing `GEOMETRY` data in GeoJSON can lead to unexpected results if using geometries with [SRIDs]({% link {{ page.version.version }}/architecture/glossary.md %}#srid) other than 4326 (as shown below).
+{{site.data.alerts.end}}
diff --git a/src/current/_includes/v25.1/misc/haproxy.md b/src/current/_includes/v25.1/misc/haproxy.md
new file mode 100644
index 00000000000..c94bd654466
--- /dev/null
+++ b/src/current/_includes/v25.1/misc/haproxy.md
@@ -0,0 +1,39 @@
+By default, the generated configuration file is called `haproxy.cfg` and looks as follows, with the `server` addresses pre-populated correctly:
+
+ ~~~
+ global
+ maxconn 4096
+
+ defaults
+ mode tcp
+ # Timeout values should be configured for your specific use.
+ # See: https://cbonte.github.io/haproxy-dconv/1.8/configuration.html#4-timeout%20connect
+ timeout connect 10s
+ timeout client 1m
+ timeout server 1m
+ # TCP keep-alive on client side. Server already enables them.
+ option clitcpka
+
+ listen psql
+ bind :26257
+ mode tcp
+ balance roundrobin
+ option httpchk GET /health?ready=1
+ server cockroach1 :26257 check port 8080
+ server cockroach2 :26257 check port 8080
+ server cockroach3 :26257 check port 8080
+ ~~~
+
+ The file is preset with the minimal [configurations](http://cbonte.github.io/haproxy-dconv/1.7/configuration.html) needed to work with your running cluster:
+
+ Field | Description
+ ------|------------
+ `timeout connect` `timeout client` `timeout server` | Timeout values that should be suitable for most deployments.
+ `bind` | The port that HAProxy listens on. This is the port clients will connect to and thus needs to be allowed by your network configuration.
This tutorial assumes HAProxy is running on a separate machine from CockroachDB nodes. If you run HAProxy on the same machine as a node (not recommended), you'll need to change this port, as `26257` is likely already being used by the CockroachDB node.
+ `balance` | The balancing algorithm. This is set to `roundrobin` to ensure that connections get rotated amongst nodes (connection 1 on node 1, connection 2 on node 2, etc.). Check the [HAProxy Configuration Manual](http://cbonte.github.io/haproxy-dconv/1.7/configuration.html#4-balance) for details about this and other balancing algorithms.
+ `option httpchk` | The HTTP endpoint that HAProxy uses to check node health. [`/health?ready=1`]({% link {{ page.version.version }}/monitoring-and-alerting.md %}#health-ready-1) ensures that HAProxy doesn't direct traffic to nodes that are live but not ready to receive requests.
+ `server` | For each included node, this field specifies the address the node advertises to other nodes in the cluster, i.e., the addressed pass in the [`--advertise-addr` flag]({% link {{ page.version.version }}/cockroach-start.md %}#networking) on node startup. Make sure hostnames are resolvable and IP addresses are routable from HAProxy.
+
+ {{site.data.alerts.callout_info}}
+ For full details on these and other configuration settings, see the [HAProxy Configuration Manual](http://cbonte.github.io/haproxy-dconv/1.7/configuration.html).
+ {{site.data.alerts.end}}
diff --git a/src/current/_includes/v25.1/misc/htpp-import-only.md b/src/current/_includes/v25.1/misc/htpp-import-only.md
new file mode 100644
index 00000000000..e69de29bb2d
diff --git a/src/current/_includes/v25.1/misc/import-perf.md b/src/current/_includes/v25.1/misc/import-perf.md
new file mode 100644
index 00000000000..34bee9acdb4
--- /dev/null
+++ b/src/current/_includes/v25.1/misc/import-perf.md
@@ -0,0 +1,3 @@
+{{site.data.alerts.callout_success}}
+For best practices for optimizing import performance in CockroachDB, see [Import Performance Best Practices]({% link {{ page.version.version }}/import-performance-best-practices.md %}).
+{{site.data.alerts.end}}
diff --git a/src/current/_includes/v25.1/misc/index-storage-parameters.md b/src/current/_includes/v25.1/misc/index-storage-parameters.md
new file mode 100644
index 00000000000..e2aa6d76301
--- /dev/null
+++ b/src/current/_includes/v25.1/misc/index-storage-parameters.md
@@ -0,0 +1,14 @@
+| Parameter name | Description | Data type | Default value
+|---------------------+----------------------|-----|------|
+| `bucket_count` | The number of buckets into which a [hash-sharded index]({% link {{ page.version.version }}/hash-sharded-indexes.md %}) will split. | Integer | The value of the `sql.defaults.default_hash_sharded_index_bucket_count` [cluster setting]({% link {{ page.version.version }}/cluster-settings.md %}). |
+| `geometry_max_x` | The maximum X-value of the [spatial reference system]({% link {{ page.version.version }}/architecture/glossary.md %}#spatial-reference-system) for the object(s) being covered. This only needs to be set if you are using a custom [SRID]({% link {{ page.version.version }}/architecture/glossary.md %}#srid). | | Derived from SRID bounds, else `(1 << 31) -1`. |
+| `geometry_max_y` | The maximum Y-value of the [spatial reference system]({% link {{ page.version.version }}/architecture/glossary.md %}#spatial-reference-system) for the object(s) being covered. This only needs to be set if you are using a custom [SRID]({% link {{ page.version.version }}/architecture/glossary.md %}#srid). | | Derived from SRID bounds, else `(1 << 31) -1`. |
+| `geometry_min_x` | The minimum X-value of the [spatial reference system]({% link {{ page.version.version }}/architecture/glossary.md %}#spatial-reference-system) for the object(s) being covered. This only needs to be set if the default bounds of the SRID are too large/small for the given data, or SRID = 0 and you wish to use a smaller range (unfortunately this is currently not exposed, but is viewable on ). By default, SRID = 0 assumes `[-min int32, max int32]` ranges. | | Derived from SRID bounds, else `-(1 << 31)`. |
+| `geometry_min_y` | The minimum Y-value of the [spatial reference system]({% link {{ page.version.version }}/architecture/glossary.md %}#spatial-reference-system) for the object(s) being covered. This only needs to be set if you are using a custom [SRID]({% link {{ page.version.version }}/architecture/glossary.md %}#srid). | | Derived from SRID bounds, else `-(1 << 31)`. |
+| `s2_level_mod` | `s2_max_level` must be divisible by `s2_level_mod`. `s2_level_mod` must be between `1` and `3`. | Integer | `1` |
+| `s2_max_cells` | The maximum number of S2 cells used in the covering. Provides a limit on how much work is done exploring the possible coverings. Allowed values: `1-30`. You may want to use higher values for odd-shaped regions such as skinny rectangles. Used in [spatial indexes]({% link {{ page.version.version }}/spatial-indexes.md %}). | Integer | `4` |
+| `s2_max_level` | The maximum level of S2 cell used in the covering. Allowed values: `1-30`. Setting it to less than the default means that CockroachDB will be forced to generate coverings using larger cells. Used in [spatial indexes]({% link {{ page.version.version }}/spatial-indexes.md %}). | Integer | `30` |
+
+The following parameters are included for PostgreSQL compatibility and do not affect how CockroachDB runs:
+
+- `fillfactor`
diff --git a/src/current/_includes/v25.1/misc/install-next-steps.html b/src/current/_includes/v25.1/misc/install-next-steps.html
new file mode 100644
index 00000000000..3783b5fccb0
--- /dev/null
+++ b/src/current/_includes/v25.1/misc/install-next-steps.html
@@ -0,0 +1,16 @@
+
+
If you're just getting started with CockroachDB:
+
diff --git a/src/current/_includes/v25.1/misc/linux-binary-prereqs.md b/src/current/_includes/v25.1/misc/linux-binary-prereqs.md
new file mode 100644
index 00000000000..541183fe71b
--- /dev/null
+++ b/src/current/_includes/v25.1/misc/linux-binary-prereqs.md
@@ -0,0 +1 @@
+
The CockroachDB binary for Linux requires glibc, libncurses, and tzdata, which are found by default on nearly all Linux distributions, with Alpine as the notable exception.
diff --git a/src/current/_includes/v25.1/misc/logging-defaults.md b/src/current/_includes/v25.1/misc/logging-defaults.md
new file mode 100644
index 00000000000..eabdd93755b
--- /dev/null
+++ b/src/current/_includes/v25.1/misc/logging-defaults.md
@@ -0,0 +1,3 @@
+By default, this command logs messages to `stderr`. This includes events with `WARNING` [severity]({% link {{ page.version.version }}/logging.md %}#logging-levels-severities) and higher.
+
+If you need to troubleshoot this command's behavior, you can [customize its logging behavior]({% link {{ page.version.version }}/configure-logs.md %}).
\ No newline at end of file
diff --git a/src/current/_includes/v25.1/misc/logging-flags.md b/src/current/_includes/v25.1/misc/logging-flags.md
new file mode 100644
index 00000000000..68106413559
--- /dev/null
+++ b/src/current/_includes/v25.1/misc/logging-flags.md
@@ -0,0 +1,12 @@
+Flag | Description
+-----|------------
+`--log` | Configure logging parameters by specifying a YAML payload. For details, see [Configure logs]({% link {{ page.version.version }}/configure-logs.md %}#flag). If a YAML configuration is not specified, the [default configuration]({% link {{ page.version.version }}/configure-logs.md %}#default-logging-configuration) is used.
`--log-config-file` can also be used.
**Note:** The logging flags below cannot be combined with `--log`, but can be defined instead in the YAML payload.
+`--log-config-file` | Configure logging parameters by specifying a path to a YAML file. For details, see [Configure logs]({% link {{ page.version.version }}/configure-logs.md %}#flag). If a YAML configuration is not specified, the [default configuration]({% link {{ page.version.version }}/configure-logs.md %}#default-logging-configuration) is used.
`--log` can also be used.
**Note:** The logging flags below cannot be combined with `--log-config-file`, but can be defined instead in the YAML file.
+`--log-dir` | An alias for the [`--log`]({% link {{ page.version.version }}/configure-logs.md %}#flag) flag, for configuring the log directory where log files are stored and written to. Specifically, `--log-dir=XXX` is an alias for `--log='file-defaults: {dir: XXX}'`.
Setting `--log-dir` to a blank directory (`--log-dir=`) disables logging to files. Do not use `--log-dir=""`; this creates a new directory named `""` and stores log files in that directory.
+`--log-group-max-size` | An alias for the [`--log`]({% link {{ page.version.version }}/configure-logs.md %}#flag) flag, for configuring the maximum size for a logging group (for example, `cockroach`, `cockroach-sql-audit`, `cockroach-auth`, `cockroach-sql-exec`, `cockroach-pebble`), after which the oldest log file is deleted. `--log-group-max-size=XXX` is an alias for `--log='file-defaults: {max-group-size: XXX}'`. Accepts a valid file size, such as `--log-group-max-size=1GiB`.
**Default:** `100MiB`
+`--log-file-max-size` | An alias for [`--log`]({% link {{ page.version.version }}/configure-logs.md %}#flag), used to specify the maximum size that a log file can grow before a new log file is created. `--log-file-max-size=XXX` is an alias for `--log='file-defaults: {max-file-size: XXX}'`. Accepts a valid file size, such as `--log-file-max-size=2MiB`. **Requires** logging to files.
**Default:** `10MiB`
+`--log-file-verbosity` | An alias for [`--log`]({% link {{ page.version.version }}/configure-logs.md %}#flag), used to specify the minimum [severity level]({% link {{ page.version.version }}/logging.md %}#logging-levels-severities) of messages that are logged. `--log-file-verbosity=XXX` is an alias for `--log='file-defaults: {filter: XXX}'`. When a severity is specified, such as `--log-file-verbosity=WARNING`, log messages that are below the specified severity level are not written to the target log file. **Requires** logging to files.
**Default:** `INFO`
+`--logtostderr` | An alias for [`--log`]({% link {{ page.version.version }}/configure-logs.md %}#flag), to optionally output log messages at or above the configured [severity level]({% link {{ page.version.version }}/logging.md %}#logging-levels-severities) to the `stderr` sink. `--logtostderr=XXX` is an alias for `--log='sinks: {stderr: {filter: XXX}}'`. Accepts a valid [severity level]({% link {{ page.version.version }}/logging.md %}#logging-levels-severities). If no value is specified, by default messages related to server commands are logged to `stderr` at `INFO` severity and above, and messages related to client commands are logged to `stderr` at `WARNING` severity and above.
Setting `--logtostderr=NONE` disables logging to `stderr`.
**Default:** `UNKNOWN`
+`--no-color` | An alias for [`--log`]({% link {{ page.version.version }}/configure-logs.md %}#flag) flag, used to control whether log output to the `stderr` sinc is colorized. `--no-color=XXX` is an alias for `--log='sinks: {stderr: {no-color: XXX}}'`. Accepts either `true` or `false`.
When set to `false`, messages logged to `stderr` are colorized based on [severity level]({% link {{ page.version.version }}/logging.md %}#logging-levels-severities).
**Default:** `false`
+`--redactable-logs` | An alias for [`--log`]({% link {{ page.version.version }}/configure-logs.md %}#flag) flag, used to whether [redaction markers]({% link {{ page.version.version }}/configure-logs.md %}#redact-logs) are used in place of secret or sensitive information in log messages. `--redactable-logs=XXX` is an alias for `--log='file-defaults: {redactable: XXX}'`. Accepts `true` or `false`.
**Default:** `false`
+`--sql-audit-dir` | An alias for [`--log`]({% link {{ page.version.version }}/configure-logs.md %}#flag), used to optionally confine log output of the `SENSITIVE_ACCESS` [logging channel]({% link {{ page.version.version }}/logging-overview.md %}#logging-channels) to a separate directory. `--sql-audit-dir=XXX` is an alias for `--log='sinks: {file-groups: {sql-audit: {channels: SENSITIVE_ACCESS, dir: ...}}}'`.
Enabling `SENSITIVE_ACCESS` logs can negatively impact performance. As a result, we recommend using the `SENSITIVE_ACCESS` channel for security purposes only. For more information, refer to [Security and Audit Monitoring]({% link {{ page.version.version }}/logging-use-cases.md %}#security-and-audit-monitoring).
diff --git a/src/current/_includes/v25.1/misc/movr-live-demo.md b/src/current/_includes/v25.1/misc/movr-live-demo.md
new file mode 100644
index 00000000000..f8cfb24cb21
--- /dev/null
+++ b/src/current/_includes/v25.1/misc/movr-live-demo.md
@@ -0,0 +1,3 @@
+{{site.data.alerts.callout_success}}
+For a live demo of the deployed example application, see [https://movr.cloud](https://movr.cloud).
+{{site.data.alerts.end}}
\ No newline at end of file
diff --git a/src/current/_includes/v25.1/misc/movr-schema.md b/src/current/_includes/v25.1/misc/movr-schema.md
new file mode 100644
index 00000000000..e838bcf4572
--- /dev/null
+++ b/src/current/_includes/v25.1/misc/movr-schema.md
@@ -0,0 +1,12 @@
+The six tables in the `movr` database store user, vehicle, and ride data for MovR:
+
+Table | Description
+--------|----------------------------
+`users` | People registered for the service.
+`vehicles` | The pool of vehicles available for the service.
+`rides` | When and where users have rented a vehicle.
+`promo_codes` | Promotional codes for users.
+`user_promo_codes` | Promotional codes in use by users.
+`vehicle_location_histories` | Vehicle location history.
+
+
diff --git a/src/current/_includes/v25.1/misc/movr-workflow.md b/src/current/_includes/v25.1/misc/movr-workflow.md
new file mode 100644
index 00000000000..a682c099b70
--- /dev/null
+++ b/src/current/_includes/v25.1/misc/movr-workflow.md
@@ -0,0 +1,76 @@
+The workflow for MovR is as follows:
+
+1. A user loads the app and sees the 25 closest vehicles.
+
+ For example:
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ sql
+ > SELECT id, city, status FROM vehicles WHERE city='amsterdam' limit 25;
+ ~~~
+
+1. The user signs up for the service.
+
+ For example:
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ sql
+ > INSERT INTO users (id, name, address, city, credit_card)
+ VALUES ('66666666-6666-4400-8000-00000000000f', 'Mariah Lam', '88194 Angela Gardens Suite 60', 'amsterdam', '123245696');
+ ~~~
+
+ {{site.data.alerts.callout_info}}Usually for Universally Unique Identifier (UUID) you would need to generate it automatically but for the sake of this follow up we will use predetermined UUID to keep track of them in our examples.{{site.data.alerts.end}}
+
+1. In some cases, the user adds their own vehicle to share.
+
+ For example:
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ sql
+ > INSERT INTO vehicles (id, city, type, owner_id,creation_time,status, current_location, ext)
+ VALUES ('ffffffff-ffff-4400-8000-00000000000f', 'amsterdam', 'skateboard', '66666666-6666-4400-8000-00000000000f', current_timestamp(), 'available', '88194 Angela Gardens Suite 60', '{"color": "blue"}');
+ ~~~
+1. More often, the user reserves a vehicle and starts a ride, applying a promo code, if available and valid.
+
+ For example:
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ sql
+ > SELECT code FROM user_promo_codes WHERE user_id ='66666666-6666-4400-8000-00000000000f';
+ ~~~
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ sql
+ > UPDATE vehicles SET status = 'in_use' WHERE id='bbbbbbbb-bbbb-4800-8000-00000000000b';
+ ~~~
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ sql
+ > INSERT INTO rides (id, city, vehicle_city, rider_id, vehicle_id, start_address,end_address, start_time, end_time, revenue)
+ VALUES ('cd032f56-cf1a-4800-8000-00000000066f', 'amsterdam', 'amsterdam', '66666666-6666-4400-8000-00000000000f', 'bbbbbbbb-bbbb-4800-8000-00000000000b', '70458 Mary Crest', '', TIMESTAMP '2020-10-01 10:00:00.123456', NULL, 0.0);
+ ~~~
+
+1. During the ride, MovR tracks the location of the vehicle.
+
+ For example:
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ sql
+ > INSERT INTO vehicle_location_histories (city, ride_id, timestamp, lat, long)
+ VALUES ('amsterdam', 'cd032f56-cf1a-4800-8000-00000000066f', current_timestamp(), -101, 60);
+ ~~~
+
+1. The user ends the ride and releases the vehicle.
+
+ For example:
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ sql
+ > UPDATE vehicles SET status = 'available' WHERE id='bbbbbbbb-bbbb-4800-8000-00000000000b';
+ ~~~
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ sql
+ > UPDATE rides SET end_address ='33862 Charles Junctions Apt. 49', end_time=TIMESTAMP '2020-10-01 10:30:00.123456', revenue=88.6
+ WHERE id='cd032f56-cf1a-4800-8000-00000000066f';
+ ~~~
diff --git a/src/current/_includes/v25.1/misc/multiregion-max-offset.md b/src/current/_includes/v25.1/misc/multiregion-max-offset.md
new file mode 100644
index 00000000000..794ed12ca84
--- /dev/null
+++ b/src/current/_includes/v25.1/misc/multiregion-max-offset.md
@@ -0,0 +1 @@
+For new clusters using the [multi-region SQL abstractions]({% link {{ page.version.version }}/multiregion-overview.md %}), Cockroach Labs recommends lowering the [`--max-offset`]({% link {{ page.version.version }}/cockroach-start.md %}#flags-max-offset) setting to `250ms`. This setting is especially helpful for lowering the write latency of [global tables]({% link {{ page.version.version }}/table-localities.md %}#global-tables). Nodes can run with different values for `--max-offset`, but only for the purpose of updating the setting across the cluster using a rolling upgrade.
diff --git a/src/current/_includes/v25.1/misc/note-egress-perimeter-cdc-backup.md b/src/current/_includes/v25.1/misc/note-egress-perimeter-cdc-backup.md
new file mode 100644
index 00000000000..9b80cec66f6
--- /dev/null
+++ b/src/current/_includes/v25.1/misc/note-egress-perimeter-cdc-backup.md
@@ -0,0 +1,3 @@
+{{site.data.alerts.callout_info}}
+Cockroach Labs recommends enabling Egress Perimeter Controls on CockroachDB {{ site.data.products.advanced }} clusters to mitigate the risk of data exfiltration when accessing external resources, such as cloud storage for change data capture or backup and restore operations. See [Egress Perimeter Controls]({% link cockroachcloud/egress-perimeter-controls.md %}) for detail and setup instructions.
+{{site.data.alerts.end}}
diff --git a/src/current/_includes/v25.1/misc/remove-user-callout.html b/src/current/_includes/v25.1/misc/remove-user-callout.html
new file mode 100644
index 00000000000..925f83d779d
--- /dev/null
+++ b/src/current/_includes/v25.1/misc/remove-user-callout.html
@@ -0,0 +1 @@
+Removing a user does not remove that user's privileges. Therefore, to prevent a future user with an identical username from inheriting an old user's privileges, it's important to revoke a user's privileges before or after removing the user.
diff --git a/src/current/_includes/v25.1/misc/s3-compatible-warning.md b/src/current/_includes/v25.1/misc/s3-compatible-warning.md
new file mode 100644
index 00000000000..5ad4966ad94
--- /dev/null
+++ b/src/current/_includes/v25.1/misc/s3-compatible-warning.md
@@ -0,0 +1,3 @@
+{{site.data.alerts.callout_danger}}
+While Cockroach Labs actively tests Amazon S3, Google Cloud Storage, and Azure Storage, we **do not** test [S3-compatible services]({% link {{ page.version.version }}/cloud-storage-authentication.md %}) (e.g., [MinIO](https://min.io/), [Red Hat Ceph](https://docs.ceph.com/en/pacific/radosgw/s3/)).
+{{site.data.alerts.end}}
\ No newline at end of file
diff --git a/src/current/_includes/v25.1/misc/schema-change-stmt-note.md b/src/current/_includes/v25.1/misc/schema-change-stmt-note.md
new file mode 100644
index 00000000000..792cd2b9e51
--- /dev/null
+++ b/src/current/_includes/v25.1/misc/schema-change-stmt-note.md
@@ -0,0 +1,3 @@
+{{site.data.alerts.callout_info}}
+The `{{ page.title }}` statement performs a schema change. For more information about how online schema changes work in CockroachDB, see [Online Schema Changes]({% link {{ page.version.version }}/online-schema-changes.md %}).
+{{site.data.alerts.end}}
diff --git a/src/current/_includes/v25.1/misc/schema-change-view-job.md b/src/current/_includes/v25.1/misc/schema-change-view-job.md
new file mode 100644
index 00000000000..37e46feb4d8
--- /dev/null
+++ b/src/current/_includes/v25.1/misc/schema-change-view-job.md
@@ -0,0 +1 @@
+This schema change statement is registered as a job. You can view long-running jobs with [`SHOW JOBS`]({% link {{ page.version.version }}/show-jobs.md %}).
diff --git a/src/current/_includes/v25.1/misc/session-vars.md b/src/current/_includes/v25.1/misc/session-vars.md
new file mode 100644
index 00000000000..e4f9e1eee6b
--- /dev/null
+++ b/src/current/_includes/v25.1/misc/session-vars.md
@@ -0,0 +1,109 @@
+| Variable name | Description | Initial value | Modify with [`SET`]({% link {{ page.version.version }}/set-vars.md %})? | View with [`SHOW`]({% link {{ page.version.version }}/show-vars.md %})? |
+|---|---|---|---|---|
+| `application_name` | The current application name for statistics collection. | Empty string, or `cockroach` for sessions from the [built-in SQL client]({% link {{ page.version.version }}/cockroach-sql.md %}). | Yes | Yes |
+| `autocommit_before_ddl` | When the [`autocommit_before_ddl` session setting]({% link {{page.version.version}}/set-vars.md %}#autocommit-before-ddl) is set to `on`, any schema change statement that is sent during an [explicit transaction]({% link {{page.version.version}}/transactions.md %}) will cause the transaction to [commit]({% link {{page.version.version}}/commit-transaction.md %}) before executing the schema change. | `off` | Yes | Yes |
+| `bytea_output` | The [mode for conversions from `STRING` to `BYTES`]({% link {{ page.version.version }}/bytes.md %}#supported-conversions). | hex | Yes | Yes |
+| `client_min_messages` | The severity level of notices displayed in the [SQL shell]({% link {{ page.version.version }}/cockroach-sql.md %}). Accepted values include `debug5`, `debug4`, `debug3`, `debug2`, `debug1`, `log`, `notice`, `warning`, and `error`. | `notice` | Yes | Yes |
+| `copy_from_atomic_enabled` | If set to `on`, [`COPY FROM`]({% link {{ page.version.version }}/copy-from.md %}) statements are committed atomically, matching PostgreSQL behavior. If set to `off`, `COPY FROM` statements are segmented into batches of 100 rows unless issued within an explicit transaction, matching the CockroachDB behavior in versions prior to v22.2. | `on` | Yes | Yes |
+| `copy_transaction_quality_of_service` | The default quality of service for [`COPY`]({% link {{ page.version.version }}/copy-from.md %}) statements in the current session. The supported options are `regular`, `critical`, and `background`. See [Set quality of service level]({% link {{ page.version.version }}/admission-control.md %}#copy-qos). | `background` | Yes | Yes |
+| `cost_scans_with_default_col_size` | Whether to prevent the optimizer from considering column size when costing plans. | `false` | Yes | Yes |
+| `crdb_version` | The version of CockroachDB. | CockroachDB OSS version | No | Yes |
+| `database` | The [current database]({% link {{ page.version.version }}/sql-name-resolution.md %}#current-database). | Database in connection string, or empty if not specified. | Yes | Yes |
+| `datestyle` | The input string format for [`DATE`]({% link {{ page.version.version }}/date.md %}) and [`TIMESTAMP`]({% link {{ page.version.version }}/timestamp.md %}) values. Accepted values include `ISO,MDY`, `ISO,DMY`, and `ISO,YMD`. | The value set by the `sql.defaults.datestyle` [cluster setting]({% link {{ page.version.version }}/cluster-settings.md %}) (`ISO,MDY`, by default). | Yes | Yes |
+| `default_int_size` | The size, in bytes, of an [`INT`]({% link {{ page.version.version }}/int.md %}) type. | `8` | Yes | Yes |
+| `default_text_search_config` | The dictionary used to normalize tokens and eliminate stop words when calling a [full-text search function]({% link {{ page.version.version }}/functions-and-operators.md %}#full-text-search-functions) without a configuration parameter. See [Full-Text Search]({% link {{ page.version.version }}/full-text-search.md %}). | `english` | Yes | Yes |
+| `default_transaction_isolation` | The isolation level at which transactions in the session execute ([`SERIALIZABLE`]({% link {{ page.version.version }}/demo-serializable.md %}) or [`READ COMMITTED`]({% link {{ page.version.version }}/read-committed.md %})). See [Isolation levels]({% link {{ page.version.version }}/transactions.md %}#isolation-levels). | `SERIALIZABLE` | Yes | Yes |
+| `default_transaction_priority` | The default transaction priority for the current session. The supported options are `low`, `normal`, and `high`. | `normal` | Yes | Yes |
+| `default_transaction_quality_of_service` | The default transaction quality of service for the current session. The supported options are `regular`, `critical`, and `background`. See [Set quality of service level]({% link {{ page.version.version }}/admission-control.md %}#set-quality-of-service-level-for-a-session). | `regular` | Yes | Yes |
+| `default_transaction_read_only` | The default transaction access mode for the current session. If set to `on`, only read operations are allowed in transactions in the current session; if set to `off`, both read and write operations are allowed. See [`SET TRANSACTION`]({% link {{ page.version.version }}/set-transaction.md %}) for more details. | `off` | Yes | Yes |
+| `default_transaction_use_follower_reads` | If set to on, all read-only transactions use [`AS OF SYSTEM TIME follower_read_timestamp()`]({% link {{ page.version.version }}/as-of-system-time.md %}) to allow the transaction to use follower reads. If set to `off`, read-only transactions will only use follower reads if an `AS OF SYSTEM TIME` clause is specified in the statement, with an interval of at least 4.8 seconds. | `off` | Yes | Yes |
+| `disable_changefeed_replication` | When `true`, [changefeeds]({% link {{ page.version.version }}/changefeed-messages.md %}#filtering-changefeed-messages) will not emit messages for any changes (e.g., `INSERT`, `UPDATE`) issued to watched tables during that session. | `false` | Yes | Yes |
+| `disallow_full_table_scans` | If set to `on`, queries on "large" tables with a row count greater than [`large_full_scan_rows`](#large-full-scan-rows) will not use full table or index scans. If no other query plan is possible, queries will return an error message. This setting does not apply to internal queries, which may plan full table or index scans without checking the session variable. | `off` | Yes | Yes || `distsql` | The query distribution mode for the session. By default, CockroachDB determines which queries are faster to execute if distributed across multiple nodes, and all other queries are run through the gateway node. | `auto` | Yes | Yes |
+| `enable_auto_rehoming` | When enabled, the [home regions]({% link {{ page.version.version }}/alter-table.md %}#crdb_region) of rows in [`REGIONAL BY ROW`]({% link {{ page.version.version }}/alter-table.md %}#set-the-table-locality-to-regional-by-row) tables are automatically set to the region of the [gateway node]({% link {{ page.version.version }}/ui-sessions-page.md %}#session-details-gateway-node) from which any [`UPDATE`]({% link {{ page.version.version }}/update.md %}) or [`UPSERT`]({% link {{ page.version.version }}/upsert.md %}) statements that operate on those rows originate. | `off` | Yes | Yes |
+| `enable_durable_locking_for_serializable` | Indicates whether CockroachDB replicates [`FOR UPDATE` and `FOR SHARE`]({% link {{ page.version.version }}/select-for-update.md %}#lock-strengths) locks via [Raft]({% link {{ page.version.version }}/architecture/replication-layer.md %}#raft), allowing locks to be preserved when leases are transferred. Note that replicating `FOR UPDATE` and `FOR SHARE` locks will add latency to those statements. This setting only affects `SERIALIZABLE` transactions and matches the default `READ COMMITTED` behavior when enabled. | `off` | Yes | Yes |
+| `enable_experimental_alter_column_type_general` | If `on`, it is possible to [alter column data types]({% link {{ page.version.version }}/alter-table.md %}#alter-column-data-types). | `off` | Yes | Yes |
+| `enable_implicit_fk_locking_for_serializable` | Indicates whether CockroachDB uses [shared locks]({% link {{ page.version.version }}/select-for-update.md %}#lock-strengths) to perform [foreign key]({% link {{ page.version.version }}/foreign-key.md %}) checks. To take effect, the [`enable_shared_locking_for_serializable`](#enable-shared-locking-for-serializable) setting must also be enabled. This setting only affects `SERIALIZABLE` transactions and matches the default `READ COMMITTED` behavior when enabled. | `off` | Yes | Yes |
+| `enable_implicit_select_for_update` | Indicates whether [`UPDATE`]({% link {{ page.version.version }}/update.md %}) and [`UPSERT`]({% link {{ page.version.version }}/upsert.md %}) statements acquire locks using the `FOR UPDATE` locking mode during their initial row scan, which improves performance for contended workloads.
For more information about how `FOR UPDATE` locking works, see the documentation for [`SELECT FOR UPDATE`]({% link {{ page.version.version }}/select-for-update.md %}). | `on` | Yes | Yes |
+| `enable_implicit_transaction_for_batch_statements` | Indicates whether multiple statements in a single query (a "batch statement") will all run in the same implicit transaction, which matches the PostgreSQL wire protocol. | `on` | Yes | Yes |
+| `enable_insert_fast_path` | Indicates whether CockroachDB will use a specialized execution operator for inserting into a table. We recommend leaving this setting `on`. | `on` | Yes | Yes |
+| `enable_shared_locking_for_serializable` | Indicates whether [shared locks]({% link {{ page.version.version }}/select-for-update.md %}#lock-strengths) are enabled for `SERIALIZABLE` transactions. When `off`, `SELECT` statements using `FOR SHARE` are still permitted under `SERIALIZABLE` isolation, but silently do not lock. | `off` | Yes | Yes |
+| `enable_super_regions` | When enabled, you can define a super region: a set of [database regions]({% link {{ page.version.version }}/multiregion-overview.md %}#super-regions) on a multi-region cluster such that your [schema objects]({% link {{ page.version.version }}/schema-design-overview.md %}#database-schema-objects) will have all of their [replicas]({% link {{ page.version.version }}/architecture/overview.md %}#architecture-replica) stored _only_ in regions that are members of the super region. | `off` | Yes | Yes |
+| `enable_zigzag_join` | Indicates whether the [cost-based optimizer]({% link {{ page.version.version }}/cost-based-optimizer.md %}) will plan certain queries using a [zigzag merge join algorithm]({% link {{ page.version.version }}/cost-based-optimizer.md %}#zigzag-joins), which searches for the desired intersection by jumping back and forth between the indexes based on the fact that after constraining indexes, they share an ordering. | `on` | Yes | Yes |
+| `enforce_home_region` | If set to `on`, queries return an error and in some cases a suggested resolution if they cannot run entirely in their home region. This can occur if a query has no home region (for example, if it reads from different home regions in a [regional by row table]({% link {{ page.version.version }}/table-localities.md %}#regional-by-row-tables)) or a query's home region differs from the [gateway]({% link {{ page.version.version }}/architecture/life-of-a-distributed-transaction.md %}#gateway) region. Note that only tables with `ZONE` [survivability]({% link {{ page.version.version }}/multiregion-survival-goals.md %}#when-to-use-zone-vs-region-survival-goals) can be scanned without error when this is enabled. For more information about home regions, see [Table localities]({% link {{ page.version.version }}/multiregion-overview.md %}#table-localities).
This feature is in preview. It is subject to change. | `off` | Yes | Yes |
+| `enforce_home_region_follower_reads_enabled` | If `on` while the [`enforce_home_region`]({% link {{ page.version.version }}/cost-based-optimizer.md %}#control-whether-queries-are-limited-to-a-single-region) setting is `on`, allows `enforce_home_region` to perform `AS OF SYSTEM TIME` [follower reads]({% link {{ page.version.version }}/follower-reads.md %}) to detect and report a query's [home region]({% link {{ page.version.version }}/multiregion-overview.md %}#table-localities), if any.
This feature is in preview. It is subject to change. | `off` | Yes | Yes |
+| `expect_and_ignore_not_visible_columns_in_copy` | If `on`, [`COPY FROM`]({% link {{ page.version.version }}/copy-from.md %}) with no column specifiers will assume that hidden columns are in the copy data, but will ignore them when applying `COPY FROM`. | `off` | Yes | Yes |
+| `extra_float_digits` | The number of digits displayed for floating-point values. Only values between `-15` and `3` are supported. | `0` | Yes | Yes |
+| `force_savepoint_restart` | When set to `true`, allows the [`SAVEPOINT`]({% link {{ page.version.version }}/savepoint.md %}) statement to accept any name for a savepoint. | `off` | Yes | Yes |
+| `foreign_key_cascades_limit` | Limits the number of [cascading operations]({% link {{ page.version.version }}/foreign-key.md %}#use-a-foreign-key-constraint-with-cascade) that run as part of a single query. | `10000` | Yes | Yes |
+| `idle_in_session_timeout` | Automatically terminates sessions that idle past the specified threshold.
When set to `0`, the session will not timeout. | The value set by the `sql.defaults.idle_in_session_timeout` [cluster setting]({% link {{ page.version.version }}/cluster-settings.md %}) (`0s`, by default). | Yes | Yes |
+| `idle_in_transaction_session_timeout` | Automatically terminates sessions that are idle in a transaction past the specified threshold.
When set to `0`, the session will not timeout. | The value set by the `sql.defaults.idle_in_transaction_session_timeout` [cluster setting]({% link {{ page.version.version }}/cluster-settings.md %}) (0s, by default). | Yes | Yes |
+| `index_recommendations_enabled` | If `true`, display recommendations to create indexes required to eliminate full table scans. For more details, see [Default statement plans]({% link {{ page.version.version }}/explain.md %}#default-statement-plans). | `true` | Yes | Yes |
+| `inject_retry_errors_enabled` | If `true`, any statement executed inside of an explicit transaction (with the exception of [`SET`]({% link {{ page.version.version }}/set-vars.md %}) statements) will return a transaction retry error. If the client retries the transaction using the special [`cockroach_restart SAVEPOINT` name]({% link {{ page.version.version }}/savepoint.md %}#savepoints-for-client-side-transaction-retries), after the 3rd retry error, the transaction will proceed as normal. Otherwise, the errors will continue until `inject_retry_errors_enabled` is set to `false`. For more details, see [Test transaction retry logic]({% link {{ page.version.version }}/transaction-retry-error-example.md %}#test-transaction-retry-logic). | `false` | Yes | Yes |
+| `intervalstyle` | The input string format for [`INTERVAL`]({% link {{ page.version.version }}/interval.md %}) values. Accepted values include `postgres`, `iso_8601`, and `sql_standard`. | The value set by the `sql.defaults.intervalstyle` [cluster setting]({% link {{ page.version.version }}/cluster-settings.md %}) (`postgres`, by default). | Yes | Yes |
+| `is_superuser` | If `on` or `true`, the current user is a member of the [`admin` role]({% link {{ page.version.version }}/security-reference/authorization.md %}#admin-role). | User-dependent | No | Yes |
+| `large_full_scan_rows` | Determines which tables are considered "large" such that `disallow_full_table_scans` rejects full table or index scans of "large" tables. The default value is `1000`. To reject all full table or index scans, set to `0`. | User-dependent | No | Yes |
+| `locality` | The location of the node. For more information, see [Locality]({% link {{ page.version.version }}/cockroach-start.md %}#locality). | Node-dependent | No | Yes |
+| `lock_timeout` | The amount of time a query can spend acquiring or waiting for a single [row-level lock]({% link {{ page.version.version }}/architecture/transaction-layer.md %}#concurrency-control). In CockroachDB, unlike in PostgreSQL, non-locking reads wait for conflicting locks to be released. As a result, the `lock_timeout` configuration applies to writes, and to locking and non-locking reads in read-write and read-only transactions. If `lock_timeout = 0`, queries do not timeout due to lock acquisitions. | The value set by the `sql.defaults.lock_timeout` [cluster setting]({% link {{ page.version.version }}/cluster-settings.md %}) (`0`, by default) | Yes | Yes |
+| `multiple_active_portals_enabled` | Whether to enable the [multiple active portals]({% link {{ page.version.version }}/postgresql-compatibility.md %}#multiple-active-portals) pgwire feature. | `false` | Yes | Yes |
+| `node_id` | The ID of the node currently connected to.
This variable is particularly useful for verifying load balanced connections. | Node-dependent | No | Yes |
+| `null_ordered_last` | Set the default ordering of `NULL`s. The default order is `NULL`s first for ascending order and `NULL`s last for descending order. | `false` | Yes | Yes |
+| `optimizer_merge_joins_enabled` | If `on`, the optimizer will explore query plans with merge joins. | `on` | Yes | Yes |
+| `optimizer_push_offset_into_index_join` | If `on`, the optimizer will attempt to push offset expressions into index join expressions to produce more efficient query plans. | `on` | Yes | Yes |
+| `optimizer_use_forecasts` | If `on`, the optimizer uses forecasted statistics for query planning. | `on` | Yes | Yes |
+| `optimizer_use_histograms` | If `on`, the optimizer uses collected histograms for cardinality estimation. | `on` | No | Yes |
+| `optimizer_use_improved_multi_column_selectivity_estimate` | If `on`, the optimizer uses an improved selectivity estimate for multi-column predicates. | `on` | Yes | Yes |
+| `optimizer_use_improved_zigzag_join_costing` | If `on`, the cost of [zigzag joins]({% link {{ page.version.version }}/cost-based-optimizer.md %}#zigzag-joins) is updated so they will be never be chosen over scans unless they produce fewer rows. To take effect, the [`enable_zigzag_join`](#enable-zigzag-join) setting must also be enabled. | `on` | Yes | Yes |
+| `optimizer_use_lock_op_for_serializable` | If `on`, the optimizer uses a `Lock` operator to construct query plans for `SELECT` statements using the [`FOR UPDATE` and `FOR SHARE`]({% link {{ page.version.version }}/select-for-update.md %}) clauses. This setting only affects `SERIALIZABLE` transactions. `READ COMMITTED` transactions are evaluated with the `Lock` operator regardless of the setting. | `off` | Yes | Yes |
+| `optimizer_use_multicol_stats` | If `on`, the optimizer uses collected multi-column statistics for cardinality estimation. | `on` | No | Yes |
+| `optimizer_use_not_visible_indexes` | If `on`, the optimizer uses not visible indexes for planning. | `off` | No | Yes |
+| `optimizer_use_virtual_computed_column_stats` | If `on`, the optimizer uses table statistics on [virtual computed columns]({% link {{ page.version.version }}/computed-columns.md %}#virtual-computed-columns). | `on` | Yes | Yes |
+| `plan_cache_mode` | The type of plan that is cached in the [query plan cache]({% link {{ page.version.version }}/cost-based-optimizer.md %}#query-plan-cache): `auto`, `force_generic_plan`, or `force_custom_plan`.
For more information, refer to [Query plan type]({% link {{ page.version.version }}/cost-based-optimizer.md %}#query-plan-type). | `force_custom_plan` | Yes | Yes
+| `plpgsql_use_strict_into` | If `on`, PL/pgSQL [`SELECT ... INTO` and `RETURNING ... INTO` statements]({% link {{ page.version.version }}/plpgsql.md %}#assign-a-result-to-a-variable) behave as though the `STRICT` option is specified. This causes the SQL statement to error if it does not return exactly one row. | `off` | Yes | Yes |
+| `pg_trgm.similarity_threshold` | The threshold above which a [`%`]({% link {{ page.version.version }}/functions-and-operators.md %}#operators) string comparison returns `true`. The value must be between `0` and `1`. For more information, see [Trigram Indexes]({% link {{ page.version.version }}/trigram-indexes.md %}). | `0.3` | Yes | Yes |
+| `prefer_lookup_joins_for_fks` | If `on`, the optimizer prefers [`lookup joins`]({% link {{ page.version.version }}/joins.md %}#lookup-joins) to [`merge joins`]({% link {{ page.version.version }}/joins.md %}#merge-joins) when performing [`foreign key`]({% link {{ page.version.version }}/foreign-key.md %}) checks. | `off` | Yes | Yes |
+| `reorder_joins_limit` | Maximum number of joins that the optimizer will attempt to reorder when searching for an optimal query execution plan.
For more information, see [Join reordering]({% link {{ page.version.version }}/cost-based-optimizer.md %}#join-reordering). | `8` | Yes | Yes |
+| `require_explicit_primary_keys` | If `on`, CockroachDB throws an error for all tables created without an explicit primary key defined. | `off` | Yes | Yes |
+| `search_path` | A list of schemas that will be searched to resolve unqualified table or function names. For more details, see [SQL name resolution]({% link {{ page.version.version }}/sql-name-resolution.md %}). | `public` | Yes | Yes |
+| `serial_normalization` | Specifies the default handling of [`SERIAL`]({% link {{ page.version.version }}/serial.md %}) in table definitions. Valid options include `'rowid'`, `'virtual_sequence'`, `sql_sequence`, `sql_sequence_cached`, and `unordered_rowid`. If set to `'virtual_sequence'`, the `SERIAL` type auto-creates a sequence for [better compatibility with Hibernate sequences](https://forum.cockroachlabs.com/t/hibernate-sequence-generator-returns-negative-number-and-ignore-unique-rowid/1885). If set to `sql_sequence_cached`, you can use the `sql.defaults.serial_sequences_cache_size` [cluster setting]({% link {{ page.version.version }}/cluster-settings.md %}) to control the number of values to cache in a user's session, with a default of 256. If set to `unordered_rowid`, the `SERIAL` type generates a globally unique 64-bit integer (a combination of the insert timestamp and the ID of the node executing the statement) that does not have unique ordering. | `'rowid'` | Yes | Yes |
+| `server_version` | The version of PostgreSQL that CockroachDB emulates. | Version-dependent | No | Yes |
+| `server_version_num` | The version of PostgreSQL that CockroachDB emulates. | Version-dependent | Yes | Yes |
+| `session_id` | The ID of the current session. | Session-dependent | No | Yes |
+| `session_user` | The user connected for the current session. | User in connection string | No | Yes |
+| `sql_safe_updates` | If `true`, the following potentially unsafe SQL statements are disallowed: [`DROP DATABASE`]({% link {{ page.version.version }}/drop-database.md %}) of a non-empty database and all dependent objects; [`DELETE`]({% link {{ page.version.version }}/delete.md %}) and [`UPDATE`]({% link {{ page.version.version }}/update.md %}) without a `WHERE` clause, unless a [`LIMIT`]({% link {{ page.version.version }}/limit-offset.md %}) clause is included; [`SELECT ... FOR UPDATE`]({% link {{ page.version.version }}/select-for-update.md %}) and [`SELECT ... FOR SHARE`]({% link {{ page.version.version }}/select-for-update.md %}) without a `WHERE` or [`LIMIT`]({% link {{ page.version.version }}/limit-offset.md %}) clause; and [`ALTER TABLE ... DROP COLUMN`]({% link {{ page.version.version }}/alter-table.md %}#drop-column). For more details, refer to [Allow potentially unsafe SQL statements]({% link {{ page.version.version }}/cockroach-sql.md %}#allow-potentially-unsafe-sql-statements). | `true` for interactive sessions from the [built-in SQL client]({% link {{ page.version.version }}/cockroach-sql.md %}), `false` for sessions from other clients | Yes | Yes |
+| `statement_timeout` | The amount of time a statement can run before being stopped. This value can be an `int` (e.g., `10`) and will be interpreted as milliseconds. It can also be an interval or string argument, where the string can be parsed as a valid interval (e.g., `'4s'`). A value of `0` turns it off. | The value set by the `sql.defaults.statement_timeout` [cluster setting]({% link {{ page.version.version }}/cluster-settings.md %}) (`0s`, by default). | Yes | Yes |
+| `stub_catalog_tables` | If `off`, querying an unimplemented, empty [`pg_catalog`]({% link {{ page.version.version }}/pg-catalog.md %}) table will result in an error, as is the case in v20.2 and earlier. If `on`, querying an unimplemented, empty `pg_catalog` table simply returns no rows. | `on` | Yes | Yes |
+| `timezone` | The default time zone for the current session. | `UTC` | Yes | Yes |
+| `tracing` | The trace recording state. | `off` | Yes | Yes |
+| `transaction_isolation` | The isolation level at which the transaction executes ([`SERIALIZABLE`]({% link {{ page.version.version }}/demo-serializable.md %}) or [`READ COMMITTED`]({% link {{ page.version.version }}/read-committed.md %})). See [Isolation levels]({% link {{ page.version.version }}/transactions.md %}#isolation-levels). | `SERIALIZABLE` | Yes | Yes |
+| `transaction_priority` | The priority of the current transaction. See Transactions: Transaction priorities for more details. This session variable was called transaction priority (with a space) in CockroachDB 1.x. It has been renamed for compatibility with PostgreSQL. | `NORMAL` | Yes | Yes |
+| `transaction_read_only` | The access mode of the current transaction. See [`SET TRANSACTION`]({% link {{ page.version.version }}/set-transaction.md %}) for more details. | `off` | Yes | Yes |
+| `transaction_rows_read_err` | The limit for the number of rows read by a SQL transaction. If this value is exceeded the transaction will fail (or the event will be logged to `SQL_INTERNAL_PERF` for internal transactions). | `0` | Yes | Yes |
+| `transaction_rows_read_log` | The threshold for the number of rows read by a SQL transaction. If this value is exceeded, the event will be logged to `SQL_PERF` (or `SQL_INTERNAL_PERF` for internal transactions). | `0` | Yes | Yes |
+| `transaction_rows_written_err` | The limit for the number of rows written by a SQL transaction. If this value is exceeded the transaction will fail (or the event will be logged to `SQL_INTERNAL_PERF` for internal transactions). | `0` | Yes | Yes |
+| `transaction_rows_written_log` | The threshold for the number of rows written by a SQL transaction. If this value is exceeded, the event will be logged to `SQL_PERF` (or `SQL_INTERNAL_PERF` for internal transactions). | `0` | Yes | Yes |
+| `transaction_status` | The state of the current transaction. See [Transactions]({% link {{ page.version.version }}/transactions.md %}) for more details. | `NoTxn` | No | Yes |
+| `transaction_timeout` | Aborts an explicit [transaction]({% link {{ page.version.version }}/transactions.md %}) when it runs longer than the configured duration. Stored in milliseconds; can be expressed in milliseconds or as an [`INTERVAL`]({% link {{ page.version.version }}/interval.md %}). | `0` | Yes | Yes |
+| `troubleshooting_mode_enabled` | When enabled, avoid performing additional work on queries, such as collecting and emitting telemetry data. This session variable is particularly useful when the cluster is experiencing issues, unavailability, or failure. | `off` | Yes | Yes |
+| `use_declarative_schema_changer` | Whether to use the declarative schema changer for supported statements. See [Declarative schema changer]({% link {{ page.version.version }}/online-schema-changes.md %}#declarative-schema-changer) for more details. | `on` | Yes | Yes |
+| `vectorize` | The vectorized execution engine mode. Options include `on` and `off`. For more details, see [Configure vectorized execution for CockroachDB]({% link {{ page.version.version }}/vectorized-execution.md %}#configure-vectorized-execution). | `on` | Yes | Yes |
+| `virtual_cluster_name` | The name of the virtual cluster that the SQL client is connected to. | Session-dependent | No | Yes |
+
+The following session variables are exposed only for backwards compatibility with earlier CockroachDB releases and have no impact on how CockroachDB runs:
+
+| Variable name | Initial value | Modify with [`SET`]({% link {{ page.version.version }}/set-vars.md %})? | View with [`SHOW`]({% link {{ page.version.version }}/show-vars.md %})? |
+|---|---|---|---|
+| `backslash_quote` | `safe_encoding` | No | Yes |
+| `client_encoding` | `UTF8` | No | Yes |
+| `default_tablespace` | | No | Yes |
+| `enable_drop_enum_value` | `off` | Yes | Yes |
+| `enable_seqscan` | `on` | Yes | Yes |
+| `escape_string_warning` | `on` | No | Yes |
+| `experimental_enable_hash_sharded_indexes` | `off` | Yes | Yes |
+| `integer_datetimes` | `on` | No | Yes |
+| `max_identifier_length` | `128` | No | Yes |
+| `max_index_keys` | `32` | No | Yes |
+| `row_security` | `off` | No | Yes |
+| `standard_conforming_strings` | `on` | No | Yes |
+| `server_encoding` | `UTF8` | Yes | Yes |
+| `synchronize_seqscans` | `on` | No | Yes |
+| `synchronous_commit` | `on` | Yes | Yes |
diff --git a/src/current/_includes/v25.1/misc/sorting-delete-output.md b/src/current/_includes/v25.1/misc/sorting-delete-output.md
new file mode 100644
index 00000000000..b48a138a279
--- /dev/null
+++ b/src/current/_includes/v25.1/misc/sorting-delete-output.md
@@ -0,0 +1,9 @@
+To sort the output of a `DELETE` statement, use:
+
+{% include_cached copy-clipboard.html %}
+~~~ sql
+> WITH a AS (DELETE ... RETURNING ...)
+ SELECT ... FROM a ORDER BY ...
+~~~
+
+For an example, see [Sort and return deleted rows]({% link {{ page.version.version }}/delete.md %}#sort-and-return-deleted-rows).
diff --git a/src/current/_includes/v25.1/misc/source-privileges.md b/src/current/_includes/v25.1/misc/source-privileges.md
new file mode 100644
index 00000000000..543801a7201
--- /dev/null
+++ b/src/current/_includes/v25.1/misc/source-privileges.md
@@ -0,0 +1,12 @@
+The source file URL does _not_ require the [`admin` role]({% link {{ page.version.version }}/security-reference/authorization.md %}#admin-role) in the following scenarios:
+
+- S3 and GS using `SPECIFIED` (and not `IMPLICIT`) credentials. Azure is always `SPECIFIED` by default.
+- [Userfile]({% link {{ page.version.version }}/use-userfile-storage.md %})
+
+The source file URL _does_ require the [`admin` role]({% link {{ page.version.version }}/security-reference/authorization.md %}#admin-role) in the following scenarios:
+
+- S3 or GS using `IMPLICIT` credentials
+- Use of a [custom endpoint](https://docs.aws.amazon.com/sdk-for-go/api/aws/endpoints/) on S3
+- [Nodelocal]({% link {{ page.version.version }}/cockroach-nodelocal-upload.md %}), [HTTP]({% link {{ page.version.version }}/use-a-local-file-server.md %}) or [HTTPS] ({% link {{ page.version.version }}/use-a-local-file-server.md %})
+
+We recommend using [cloud storage]({% link {{ page.version.version }}/use-cloud-storage.md %}).
diff --git a/src/current/_includes/v25.1/misc/storage-class-glacier-incremental.md b/src/current/_includes/v25.1/misc/storage-class-glacier-incremental.md
new file mode 100644
index 00000000000..9daebd72c14
--- /dev/null
+++ b/src/current/_includes/v25.1/misc/storage-class-glacier-incremental.md
@@ -0,0 +1,5 @@
+{{site.data.alerts.callout_danger}}
+[Incremental backups]({% link {{ page.version.version }}/take-full-and-incremental-backups.md %}#incremental-backups) are **not** compatible with the [S3 Glacier Flexible Retrieval or Glacier Deep Archive storage classes](https://docs.aws.amazon.com/AmazonS3/latest/userguide//storage-class-intro.html#sc-glacier). Incremental backups require the reading of previous backups on an ad-hoc basis, which is not possible with backup files already in Glacier Flexible Retrieval or Glacier Deep Archive. This is because these storage classes do not allow immediate access to an S3 object without first [restoring the archived objects](https://docs.aws.amazon.com/AmazonS3/latest/userguide/restoring-objects.html) to its S3 bucket.
+
+Refer to [Incremental backups and storage classes]({% link {{ page.version.version }}/use-cloud-storage.md %}#incremental-backups-and-archive-storage-classes) for more detail.
+{{site.data.alerts.end}}
\ No newline at end of file
diff --git a/src/current/_includes/v25.1/misc/storage-classes.md b/src/current/_includes/v25.1/misc/storage-classes.md
new file mode 100644
index 00000000000..c4dafce941e
--- /dev/null
+++ b/src/current/_includes/v25.1/misc/storage-classes.md
@@ -0,0 +1 @@
+Use the parameter to set one of these [storage classes](https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html#AmazonS3-PutObject-request-header-StorageClass) listed in Amazon's documentation. For more general usage information, see Amazon's [Using Amazon S3 storage classes](https://docs.aws.amazon.com/AmazonS3/latest/userguide/storage-class-intro.html) documentation.
diff --git a/src/current/_includes/v25.1/misc/table-storage-parameters.md b/src/current/_includes/v25.1/misc/table-storage-parameters.md
new file mode 100644
index 00000000000..3ca7f601648
--- /dev/null
+++ b/src/current/_includes/v25.1/misc/table-storage-parameters.md
@@ -0,0 +1,15 @@
+| Parameter name | Description | Data type | Default value |
+|------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------|-----------|---------------|
+| `exclude_data_from_backup` | Exclude the data in this table from any future backups. | Boolean | `false` |
+| `schema_locked` | Disallow [schema changes]({% link {{ page.version.version }}/online-schema-changes.md %}) on this table. Enabling `schema_locked` can help [improve performance of changefeeds]({% link {{ page.version.version }}/create-changefeed.md %}#disallow-schema-changes-on-tables-to-improve-changefeed-performance) running on this table. | Boolean | `false` |
+| `sql_stats_automatic_collection_enabled` | Enable [automatic statistics collection]({% link {{ page.version.version }}/cost-based-optimizer.md %}#enable-and-disable-automatic-statistics-collection-for-tables) for this table. | Boolean | `true` |
+| `sql_stats_automatic_collection_min_stale_rows` | Minimum number of stale rows in this table that will trigger a statistics refresh. | Integer | 500 |
+| `sql_stats_automatic_collection_fraction_stale_rows` | Fraction of stale rows in this table that will trigger a statistics refresh. | Float | 0.2 |
+| `sql_stats_forecasts_enabled` | Enable [forecasted statistics]({% link {{ page.version.version }}/show-statistics.md %}#display-forecasted-statistics) collection for this table. | Boolean | `true` |
+
+The following parameters are included for PostgreSQL compatibility and do not affect how CockroachDB runs:
+
+- `autovacuum_enabled`
+- `fillfactor`
+
+For the list of storage parameters that affect how [Row-Level TTL]({% link {{ page.version.version }}/row-level-ttl.md %}) works, see the list of [TTL storage parameters]({% link {{ page.version.version }}/row-level-ttl.md %}#ttl-storage-parameters).
\ No newline at end of file
diff --git a/src/current/_includes/v25.1/misc/tooling.md b/src/current/_includes/v25.1/misc/tooling.md
new file mode 100644
index 00000000000..f587b47babf
--- /dev/null
+++ b/src/current/_includes/v25.1/misc/tooling.md
@@ -0,0 +1,107 @@
+## Support levels
+
+Cockroach Labs has partnered with open-source projects, vendors, and individuals to offer the following levels of support with third-party tools:
+
+- **Full support** indicates that Cockroach Labs is committed to maintaining compatibility with the vast majority of the tool's features. CockroachDB is regularly tested against the latest version documented in the table below.
+- **Partial support** indicates that Cockroach Labs is working towards full support for the tool. The primary features of the tool are compatible with CockroachDB (e.g., connecting and basic database operations), but full integration may require additional steps, lack support for all features, or exhibit unexpected behavior.
+- **Partner supported** indicates that Cockroach Labs has a partnership with a third-party vendor that provides support for the CockroachDB integration with their tool.
+
+{{site.data.alerts.callout_danger}}
+Tools, drivers, or frameworks are considered **unsupported** if:
+
+- The tool, driver, or framework is not listed on this page.
+- The version of a supported tool, driver, or framework is not listed on this page.
+
+If you encounter issues when using unsupported tools, drivers, or frameworks, contact the maintainer directly.
+
+Cockroach Labs provides "best effort" support for tools, drivers, and frameworks that are not officially supported. This means that while we will do our best to assist you, we may not be able to fully troubleshoot errors in your deployment.
+
+Customers should contact their account team before moving production workloads to CockroachDB that use unsupported drivers.
+{{site.data.alerts.end}}
+
+{{site.data.alerts.callout_info}}
+Unless explicitly stated, support for a [driver](#drivers) or [data access framework](#data-access-frameworks-e-g-orms) does not include [automatic, client-side transaction retry handling]({% link {{ page.version.version }}/transaction-retry-error-reference.md %}#client-side-retry-handling). For client-side transaction retry handling samples, see [Example Apps]({% link {{ page.version.version }}/example-apps.md %}).
+{{site.data.alerts.end}}
+
+If you encounter problems using CockroachDB with any of the tools listed on this page, please [open an issue](https://github.com/cockroachdb/cockroach/issues/new) with details to help us make progress toward better support.
+
+For a list of tools supported by the CockroachDB community, see [Third-Party Tools Supported by the Community]({% link {{ page.version.version }}/community-tooling.md %}).
+
+## Drivers
+
+| Language | Driver | Latest tested version | Support level | CockroachDB adapter | Tutorial |
+|----------+--------+-----------------------+---------------------+---------------------+----------|
+| C | [libpq](http://www.postgresql.org/docs/13/static/libpq.html)| PostgreSQL 13 | Partial | N/A | N/A |
+| C# (.NET) | [Npgsql](https://www.nuget.org/packages/Npgsql/) | {% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/master/pkg/cmd/roachtest/tests/npgsql.go ||var npgsqlSupportedTag = "v||"\n\n %} | Full | N/A | [Build a C# App with CockroachDB (Npgsql)](build-a-csharp-app-with-cockroachdb.html) |
+| Go | [pgx](https://github.com/jackc/pgx/releases)
[pq](https://github.com/lib/pq) | {% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/master/pkg/cmd/roachtest/tests/pgx.go ||var supportedPGXTag = "||"\n\n %} (use latest version of CockroachDB adapter){% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/master/pkg/cmd/roachtest/tests/libpq.go ||var libPQSupportedTag = "||"\n\n %} | Full
Full | [`crdbpgx`](https://pkg.go.dev/github.com/cockroachdb/cockroach-go/crdb/crdbpgx) (includes client-side transaction retry handling)N/A | [Build a Go App with CockroachDB (pgx)](build-a-go-app-with-cockroachdb.html)
[Build a Go App with CockroachDB (pq)](build-a-go-app-with-cockroachdb-pq.html) |
+| Java | [JDBC](https://jdbc.postgresql.org/download/) | {% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/master/pkg/cmd/roachtest/tests/pgjdbc.go ||var supportedPGJDBCTag = "||"\n\n %} | Full | N/A | [Build a Java App with CockroachDB (JDBC)](build-a-java-app-with-cockroachdb.html) |
+| JavaScript | [pg](https://www.npmjs.com/package/pg) | 8.2.1 | Full | N/A | [Build a Node.js App with CockroachDB (pg)](build-a-nodejs-app-with-cockroachdb.html) |
+| Python | [psycopg3](https://www.psycopg.org/psycopg3/docs/)
{% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/master/pkg/cmd/roachtest/tests/gopg.go ||var gopgSupportedTag = "||"\n\n %}v4 | Full
FullFull | [`crdbgorm`](https://pkg.go.dev/github.com/cockroachdb/cockroach-go/crdb/crdbgorm) (includes client-side transaction retry handling)N/AN/A | [Build a Go App with CockroachDB (GORM)](build-a-go-app-with-cockroachdb-gorm.html)
N/A[Build a Go App with CockroachDB (upper/db)](build-a-go-app-with-cockroachdb-upperdb.html) |
+| Java | [Hibernate](https://hibernate.org/orm/) (including [Hibernate Spatial](https://docs.jboss.org/hibernate/orm/current/userguide/html_single/Hibernate_User_Guide.html#spatial))[jOOQ](https://www.jooq.org/)[MyBatis](https://mybatis.org/mybatis-3/) | {% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/master/pkg/cmd/roachtest/tests/hibernate.go ||var supportedHibernateTag = "||"\n\n %} (must be at least 5.4.19)
3.13.2 (must be at least 3.13.0)3.5.5| Full
FullFull | N/A
N/AN/A | [Build a Java App with CockroachDB (Hibernate)](build-a-java-app-with-cockroachdb-hibernate.html)
[Build a Java App with CockroachDB (jOOQ)](build-a-java-app-with-cockroachdb-jooq.html)[Build a Spring App with CockroachDB (MyBatis)]({% link {{ page.version.version }}/build-a-spring-app-with-cockroachdb-mybatis.md %}) |
+| JavaScript/TypeScript | [Sequelize](https://www.npmjs.com/package/sequelize)
N/AN/A[`sqlalchemy-cockroachdb`](https://pypi.org/project/sqlalchemy-cockroachdb) (includes client-side transaction retry handling) | [Build a Python App with CockroachDB (Django)](build-a-python-app-with-cockroachdb-django.html)
N/A (See [peewee docs](http://docs.peewee-orm.com/en/latest/peewee/playhouse.html#cockroach-database).)[Build a Python App with CockroachDB (SQLAlchemy)](build-a-python-app-with-cockroachdb-sqlalchemy.html) |
+
+## Application frameworks
+
+| Framework | Data access | Latest tested version | Support level | Tutorial |
+|-----------+-------------+-----------------------+---------------+----------|
+| Spring | [JDBC]({% link {{ page.version.version }}/build-a-spring-app-with-cockroachdb-jdbc.md %})[JPA (Hibernate)](build-a-spring-app-with-cockroachdb-jpa.html)[MyBatis]({% link {{ page.version.version }}/build-a-spring-app-with-cockroachdb-mybatis.md %}) | See individual Java ORM or [driver](#drivers) for data access version support. | See individual Java ORM or [driver](#drivers) for data access support level. | [Build a Spring App with CockroachDB (JDBC)]({% link {{ page.version.version }}/build-a-spring-app-with-cockroachdb-jdbc.md %})[Build a Spring App with CockroachDB (JPA)](build-a-spring-app-with-cockroachdb-jpa.html)[Build a Spring App with CockroachDB (MyBatis)]({% link {{ page.version.version }}/build-a-spring-app-with-cockroachdb-mybatis.md %})
+
+## Graphical user interfaces (GUIs)
+
+| GUI | Latest tested version | Support level | Tutorial |
+|-----+-----------------------+---------------+----------|
+| [DBeaver](https://dbeaver.com/) | 5.2.3 | Full | [Visualize CockroachDB Schemas with DBeaver]({% link {{ page.version.version }}/dbeaver.md %})
+
+## Integrated development environments (IDEs)
+
+| IDE | Latest tested version | Support level | Tutorial |
+|-----+-----------------------+---------------+----------|
+| [DataGrip](https://www.jetbrains.com/datagrip/) | 2024.1 | Full | N/A
+| [IntelliJ IDEA](https://www.jetbrains.com/idea/) | 2024.1 | Full | [Use IntelliJ IDEA with CockroachDB]({% link {{ page.version.version }}/intellij-idea.md %})
+
+## Enhanced data security tools
+
+| Tool | Support level | Integration |
+|-----+---------------+----------|
+| [Satori](https://satoricyber.com/) | Partner supported | [Satori Integration]({% link {{ page.version.version }}/satori-integration.md %}) |
+| [HashiCorp Vault](https://www.vaultproject.io/) | Partner supported | [HashiCorp Vault Integration]({% link {{ page.version.version }}/hashicorp-integration.md %}) |
+
+## Schema migration tools
+
+| Tool | Latest tested version | Support level | Tutorial |
+|-----+------------------------+----------------+----------|
+| [Alembic](https://alembic.sqlalchemy.org/en/latest/) | 1.7 | Full | [Migrate CockroachDB Schemas with Alembic]({% link {{ page.version.version }}/alembic.md %})
+| [Flyway](https://flywaydb.org/documentation/commandline/#download-and-installation) | 7.1.0 | Full | [Migrate CockroachDB Schemas with Flyway]({% link {{ page.version.version }}/flyway.md %})
+| [Liquibase](https://www.liquibase.org/download) | 4.2.0 | Full | [Migrate CockroachDB Schemas with Liquibase]({% link {{ page.version.version }}/liquibase.md %})
+| [Prisma](https://prisma.io) | 3.14.0 | Full | [Build a Node.js App with CockroachDB (Prisma)](build-a-nodejs-app-with-cockroachdb-prisma.html)
+
+## Data migration tools
+
+| Tool | Latest tested version | Support level | Documentation |
+|-----+------------------------+----------------+----------|
+| [AWS DMS](https://aws.amazon.com/dms/) | 3.4.6 | Full | [Migrate with AWS Database Migration Service (DMS)](aws-dms.html)
+| [Qlik Replicate](https://www.qlik.com/us/products/qlik-replicate) | November 2022 | Full | [Migrate and Replicate Data with Qlik Replicate]({% link {{ page.version.version }}/qlik.md %})
+| [Striim](https://www.striim.com) | 4.1.2 | Full | [Migrate and Replicate Data with Striim]({% link {{ page.version.version }}/striim.md %})
+| [Oracle GoldenGate](https://www.oracle.com/integration/goldengate/) | 21.3 | Partial | [Migrate and Replicate Data with Oracle GoldenGate]({% link {{ page.version.version }}/goldengate.md %})
+| [Debezium](https://debezium.io/) | 2.4 | Full | [Migrate Data with Debezium]({% link {{ page.version.version }}/debezium.md %})
+
+## Provisioning tools
+| Tool | Latest tested version | Support level | Documentation |
+|------+-----------------------+---------------+---------------|
+| [Terraform](https://terraform.io/) | 1.3.2 | Partial | [Terraform provider for CockroachDB Cloud](https://github.com/cockroachdb/terraform-provider-cockroach#get-started) |
+
+## Other tools
+
+| Tool | Latest tested version | Support level | Tutorial |
+|-----+------------------------+---------------+----------|
+| [Flowable](https://github.com/flowable/flowable-engine) | 6.4.2 | Full | [Getting Started with Flowable and CockroachDB (external)](https://blog.flowable.org/2019/07/11/getting-started-with-flowable-and-cockroachdb/)
diff --git a/src/current/_includes/v25.1/misc/userfile.md b/src/current/_includes/v25.1/misc/userfile.md
new file mode 100644
index 00000000000..dbeb640a84b
--- /dev/null
+++ b/src/current/_includes/v25.1/misc/userfile.md
@@ -0,0 +1,3 @@
+{{site.data.alerts.callout_info}}
+ CockroachDB now supports uploading files to a [user-scoped file storage]({% link {{ page.version.version }}/use-userfile-storage.md %}) using a SQL connection. We recommend using `userfile` instead of `nodelocal`, as it is user-scoped and more secure.
+{{site.data.alerts.end}}
diff --git a/src/current/_includes/v25.1/orchestration/apply-custom-resource.md b/src/current/_includes/v25.1/orchestration/apply-custom-resource.md
new file mode 100644
index 00000000000..e7aacf41a1e
--- /dev/null
+++ b/src/current/_includes/v25.1/orchestration/apply-custom-resource.md
@@ -0,0 +1,6 @@
+Apply the new settings to the cluster:
+
+{% include_cached copy-clipboard.html %}
+~~~ shell
+$ kubectl apply -f example.yaml
+~~~
\ No newline at end of file
diff --git a/src/current/_includes/v25.1/orchestration/apply-helm-values.md b/src/current/_includes/v25.1/orchestration/apply-helm-values.md
new file mode 100644
index 00000000000..90f9c8783f8
--- /dev/null
+++ b/src/current/_includes/v25.1/orchestration/apply-helm-values.md
@@ -0,0 +1,6 @@
+Apply the custom values to override the default Helm chart [values](https://github.com/cockroachdb/helm-charts/blob/master/cockroachdb/values.yaml):
+
+{% include_cached copy-clipboard.html %}
+~~~ shell
+$ helm upgrade {release-name} --values {custom-values}.yaml cockroachdb/cockroachdb
+~~~
\ No newline at end of file
diff --git a/src/current/_includes/v25.1/orchestration/apply-statefulset-manifest.md b/src/current/_includes/v25.1/orchestration/apply-statefulset-manifest.md
new file mode 100644
index 00000000000..0236903c497
--- /dev/null
+++ b/src/current/_includes/v25.1/orchestration/apply-statefulset-manifest.md
@@ -0,0 +1,6 @@
+Apply the new settings to the cluster:
+
+{% include_cached copy-clipboard.html %}
+~~~ shell
+$ kubectl apply -f {statefulset-manifest}.yaml
+~~~
\ No newline at end of file
diff --git a/src/current/_includes/v25.1/orchestration/kubernetes-basic-sql.md b/src/current/_includes/v25.1/orchestration/kubernetes-basic-sql.md
new file mode 100644
index 00000000000..341c9bca23b
--- /dev/null
+++ b/src/current/_includes/v25.1/orchestration/kubernetes-basic-sql.md
@@ -0,0 +1,44 @@
+1. Run some basic [CockroachDB SQL statements]({% link {{ page.version.version }}/learn-cockroachdb-sql.md %}):
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ sql
+ > CREATE DATABASE bank;
+ ~~~
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ sql
+ > CREATE TABLE bank.accounts (id INT PRIMARY KEY, balance DECIMAL);
+ ~~~
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ sql
+ > INSERT INTO bank.accounts VALUES (1, 1000.50);
+ ~~~
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ sql
+ > SELECT * FROM bank.accounts;
+ ~~~
+
+ ~~~
+ id | balance
+ +----+---------+
+ 1 | 1000.50
+ (1 row)
+ ~~~
+
+1. [Create a user with a password]({% link {{ page.version.version }}/create-user.md %}#create-a-user-with-a-password):
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ sql
+ > CREATE USER roach WITH PASSWORD 'Q7gc8rEdS';
+ ~~~
+
+ You will need this username and password to access the DB Console later.
+
+1. Exit the SQL shell and pod:
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ sql
+ > \q
+ ~~~
\ No newline at end of file
diff --git a/src/current/_includes/v25.1/orchestration/kubernetes-cockroach-cert.md b/src/current/_includes/v25.1/orchestration/kubernetes-cockroach-cert.md
new file mode 100644
index 00000000000..12fa4d9783f
--- /dev/null
+++ b/src/current/_includes/v25.1/orchestration/kubernetes-cockroach-cert.md
@@ -0,0 +1,90 @@
+{{site.data.alerts.callout_info}}
+The below steps use [`cockroach cert` commands]({% link {{ page.version.version }}/cockroach-cert.md %}) to quickly generate and sign the CockroachDB node and client certificates. Read our [Authentication]({% link {{ page.version.version }}/authentication.md %}#using-digital-certificates-with-cockroachdb) docs to learn about other methods of signing certificates.
+{{site.data.alerts.end}}
+
+1. Create two directories:
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ mkdir certs my-safe-directory
+ ~~~
+
+ Directory | Description
+ ----------|------------
+ `certs` | You'll generate your CA certificate and all node and client certificates and keys in this directory.
+ `my-safe-directory` | You'll generate your CA key in this directory and then reference the key when generating node and client certificates.
+
+1. Create the CA certificate and key pair:
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ cockroach cert create-ca \
+ --certs-dir=certs \
+ --ca-key=my-safe-directory/ca.key
+ ~~~
+
+1. Create a client certificate and key pair for the root user:
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ cockroach cert create-client \
+ root \
+ --certs-dir=certs \
+ --ca-key=my-safe-directory/ca.key
+ ~~~
+
+1. Upload the client certificate and key to the Kubernetes cluster as a secret:
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ kubectl create secret \
+ generic cockroachdb.client.root \
+ --from-file=certs
+ ~~~
+
+ ~~~
+ secret/cockroachdb.client.root created
+ ~~~
+
+1. Create the certificate and key pair for your CockroachDB nodes:
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ cockroach cert create-node \
+ localhost 127.0.0.1 \
+ cockroachdb-public \
+ cockroachdb-public.default \
+ cockroachdb-public.default.svc.cluster.local \
+ *.cockroachdb \
+ *.cockroachdb.default \
+ *.cockroachdb.default.svc.cluster.local \
+ --certs-dir=certs \
+ --ca-key=my-safe-directory/ca.key
+ ~~~
+
+1. Upload the node certificate and key to the Kubernetes cluster as a secret:
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ kubectl create secret \
+ generic cockroachdb.node \
+ --from-file=certs
+ ~~~
+
+ ~~~
+ secret/cockroachdb.node created
+ ~~~
+
+1. Check that the secrets were created on the cluster:
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ kubectl get secrets
+ ~~~
+
+ ~~~
+ NAME TYPE DATA AGE
+ cockroachdb.client.root Opaque 3 41m
+ cockroachdb.node Opaque 5 14s
+ default-token-6qjdb kubernetes.io/service-account-token 3 4m
+ ~~~
\ No newline at end of file
diff --git a/src/current/_includes/v25.1/orchestration/kubernetes-expand-disk-helm.md b/src/current/_includes/v25.1/orchestration/kubernetes-expand-disk-helm.md
new file mode 100644
index 00000000000..4ec3d2f171f
--- /dev/null
+++ b/src/current/_includes/v25.1/orchestration/kubernetes-expand-disk-helm.md
@@ -0,0 +1,118 @@
+You can expand certain [types of persistent volumes](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#types-of-persistent-volumes
+) (including GCE Persistent Disk and Amazon Elastic Block Store) by editing their persistent volume claims.
+
+{{site.data.alerts.callout_info}}
+These steps assume you followed the tutorial [Deploy CockroachDB on Kubernetes](deploy-cockroachdb-with-kubernetes.html?filters=helm).
+{{site.data.alerts.end}}
+
+1. Get the persistent volume claims for the volumes:
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ kubectl get pvc
+ ~~~
+
+ ~~~
+ NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE
+ datadir-my-release-cockroachdb-0 Bound pvc-75dadd4c-01a1-11ea-b065-42010a8e00cb 100Gi RWO standard 17m
+ datadir-my-release-cockroachdb-1 Bound pvc-75e143ca-01a1-11ea-b065-42010a8e00cb 100Gi RWO standard 17m
+ datadir-my-release-cockroachdb-2 Bound pvc-75ef409a-01a1-11ea-b065-42010a8e00cb 100Gi RWO standard 17m
+ ~~~
+
+1. In order to expand a persistent volume claim, `AllowVolumeExpansion` in its storage class must be `true`. Examine the storage class:
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ kubectl describe storageclass standard
+ ~~~
+
+ ~~~
+ Name: standard
+ IsDefaultClass: Yes
+ Annotations: storageclass.kubernetes.io/is-default-class=true
+ Provisioner: kubernetes.io/gce-pd
+ Parameters: type=pd-standard
+ AllowVolumeExpansion: False
+ MountOptions:
+ ReclaimPolicy: Delete
+ VolumeBindingMode: Immediate
+ Events:
+ ~~~
+
+ If necessary, edit the storage class:
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ kubectl patch storageclass standard -p '{"allowVolumeExpansion": true}'
+ ~~~
+
+ ~~~
+ storageclass.storage.k8s.io/standard patched
+ ~~~
+
+1. Edit one of the persistent volume claims to request more space:
+
+ {{site.data.alerts.callout_info}}
+ The requested `storage` value must be larger than the previous value. You cannot use this method to decrease the disk size.
+ {{site.data.alerts.end}}
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ kubectl patch pvc datadir-my-release-cockroachdb-0 -p '{"spec": {"resources": {"requests": {"storage": "200Gi"}}}}'
+ ~~~
+
+ ~~~
+ persistentvolumeclaim/datadir-my-release-cockroachdb-0 patched
+ ~~~
+
+1. Check the capacity of the persistent volume claim:
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ kubectl get pvc datadir-my-release-cockroachdb-0
+ ~~~
+
+ ~~~
+ NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE
+ datadir-my-release-cockroachdb-0 Bound pvc-75dadd4c-01a1-11ea-b065-42010a8e00cb 100Gi RWO standard 18m
+ ~~~
+
+ If the PVC capacity has not changed, this may be because `AllowVolumeExpansion` was initially set to `false` or because the [volume has a file system](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#resizing-an-in-use-persistentvolumeclaim) that has to be expanded. You will need to start or restart a pod in order to have it reflect the new capacity.
+
+ {{site.data.alerts.callout_success}}
+ Running `kubectl get pv` will display the persistent volumes with their *requested* capacity and not their actual capacity. This can be misleading, so it's best to use `kubectl get pvc`.
+ {{site.data.alerts.end}}
+
+1. Examine the persistent volume claim. If the volume has a file system, you will see a `FileSystemResizePending` condition with an accompanying message:
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ kubectl describe pvc datadir-my-release-cockroachdb-0
+ ~~~
+
+ ~~~
+ Waiting for user to (re-)start a pod to finish file system resize of volume on node.
+ ~~~
+
+1. Delete the corresponding pod to restart it:
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ kubectl delete pod my-release-cockroachdb-0
+ ~~~
+
+ The `FileSystemResizePending` condition and message will be removed.
+
+1. View the updated persistent volume claim:
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ kubectl get pvc datadir-my-release-cockroachdb-0
+ ~~~
+
+ ~~~
+ NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE
+ datadir-my-release-cockroachdb-0 Bound pvc-75dadd4c-01a1-11ea-b065-42010a8e00cb 200Gi RWO standard 20m
+ ~~~
+
+1. The CockroachDB cluster needs to be expanded one node at a time. Repeat steps 3 - 6 to increase the capacities of the remaining volumes by the same amount.
\ No newline at end of file
diff --git a/src/current/_includes/v25.1/orchestration/kubernetes-expand-disk-manual.md b/src/current/_includes/v25.1/orchestration/kubernetes-expand-disk-manual.md
new file mode 100644
index 00000000000..e6cf4bbbddb
--- /dev/null
+++ b/src/current/_includes/v25.1/orchestration/kubernetes-expand-disk-manual.md
@@ -0,0 +1,118 @@
+You can expand certain [types of persistent volumes](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#types-of-persistent-volumes
+) (including GCE Persistent Disk and Amazon Elastic Block Store) by editing their persistent volume claims.
+
+{{site.data.alerts.callout_info}}
+These steps assume you followed the tutorial [Deploy CockroachDB on Kubernetes](deploy-cockroachdb-with-kubernetes.html?filters=manual).
+{{site.data.alerts.end}}
+
+1. Get the persistent volume claims for the volumes:
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ kubectl get pvc
+ ~~~
+
+ ~~~
+ NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE
+ datadir-cockroachdb-0 Bound pvc-75dadd4c-01a1-11ea-b065-42010a8e00cb 100Gi RWO standard 17m
+ datadir-cockroachdb-1 Bound pvc-75e143ca-01a1-11ea-b065-42010a8e00cb 100Gi RWO standard 17m
+ datadir-cockroachdb-2 Bound pvc-75ef409a-01a1-11ea-b065-42010a8e00cb 100Gi RWO standard 17m
+ ~~~
+
+1. In order to expand a persistent volume claim, `AllowVolumeExpansion` in its storage class must be `true`. Examine the storage class:
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ kubectl describe storageclass standard
+ ~~~
+
+ ~~~
+ Name: standard
+ IsDefaultClass: Yes
+ Annotations: storageclass.kubernetes.io/is-default-class=true
+ Provisioner: kubernetes.io/gce-pd
+ Parameters: type=pd-standard
+ AllowVolumeExpansion: False
+ MountOptions:
+ ReclaimPolicy: Delete
+ VolumeBindingMode: Immediate
+ Events:
+ ~~~
+
+ If necessary, edit the storage class:
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ kubectl patch storageclass standard -p '{"allowVolumeExpansion": true}'
+ ~~~
+
+ ~~~
+ storageclass.storage.k8s.io/standard patched
+ ~~~
+
+1. Edit one of the persistent volume claims to request more space:
+
+ {{site.data.alerts.callout_info}}
+ The requested `storage` value must be larger than the previous value. You cannot use this method to decrease the disk size.
+ {{site.data.alerts.end}}
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ kubectl patch pvc datadir-cockroachdb-0 -p '{"spec": {"resources": {"requests": {"storage": "200Gi"}}}}'
+ ~~~
+
+ ~~~
+ persistentvolumeclaim/datadir-cockroachdb-0 patched
+ ~~~
+
+1. Check the capacity of the persistent volume claim:
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ kubectl get pvc datadir-cockroachdb-0
+ ~~~
+
+ ~~~
+ NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE
+ datadir-cockroachdb-0 Bound pvc-75dadd4c-01a1-11ea-b065-42010a8e00cb 100Gi RWO standard 18m
+ ~~~
+
+ If the PVC capacity has not changed, this may be because `AllowVolumeExpansion` was initially set to `false` or because the [volume has a file system](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#resizing-an-in-use-persistentvolumeclaim) that has to be expanded. You will need to start or restart a pod in order to have it reflect the new capacity.
+
+ {{site.data.alerts.callout_success}}
+ Running `kubectl get pv` will display the persistent volumes with their *requested* capacity and not their actual capacity. This can be misleading, so it's best to use `kubectl get pvc`.
+ {{site.data.alerts.end}}
+
+1. Examine the persistent volume claim. If the volume has a file system, you will see a `FileSystemResizePending` condition with an accompanying message:
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ kubectl describe pvc datadir-cockroachdb-0
+ ~~~
+
+ ~~~
+ Waiting for user to (re-)start a pod to finish file system resize of volume on node.
+ ~~~
+
+1. Delete the corresponding pod to restart it:
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ kubectl delete pod cockroachdb-0
+ ~~~
+
+ The `FileSystemResizePending` condition and message will be removed.
+
+1. View the updated persistent volume claim:
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ kubectl get pvc datadir-cockroachdb-0
+ ~~~
+
+ ~~~
+ NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE
+ datadir-cockroachdb-0 Bound pvc-75dadd4c-01a1-11ea-b065-42010a8e00cb 200Gi RWO standard 20m
+ ~~~
+
+1. The CockroachDB cluster needs to be expanded one node at a time. Repeat steps 3 - 6 to increase the capacities of the remaining volumes by the same amount.
\ No newline at end of file
diff --git a/src/current/_includes/v25.1/orchestration/kubernetes-limitations.md b/src/current/_includes/v25.1/orchestration/kubernetes-limitations.md
new file mode 100644
index 00000000000..5e9784c28d1
--- /dev/null
+++ b/src/current/_includes/v25.1/orchestration/kubernetes-limitations.md
@@ -0,0 +1,37 @@
+#### Kubernetes version
+
+To deploy CockroachDB {{page.version.version}}, Kubernetes 1.18 or higher is required. Cockroach Labs strongly recommends that you use a Kubernetes version that is [eligible for patch support by the Kubernetes project](https://kubernetes.io/releases/).
+
+#### Kubernetes Operator
+
+- The CockroachDB Kubernetes Operator currently deploys clusters in a single region. For multi-region deployments using manual configs, see [Orchestrate CockroachDB Across Multiple Kubernetes Clusters]({% link {{ page.version.version }}/orchestrate-cockroachdb-with-kubernetes-multi-cluster.md %}).
+
+- Using the Operator, you can give a new cluster an arbitrary number of [labels](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/). However, a cluster's labels cannot be modified after it is deployed. To track the status of this limitation, refer to [#993](https://github.com/cockroachdb/cockroach-operator/issues/993) in the Operator project's issue tracker.
+
+{% unless page.name == "orchestrate-cockroachdb-with-kubernetes-multi-cluster.md" %}
+#### Helm version
+
+The CockroachDB Helm chart requires Helm 3.0 or higher. If you attempt to use an incompatible Helm version, an error like the following occurs:
+
+~~~ shell
+Error: UPGRADE FAILED: template: cockroachdb/templates/tests/client.yaml:6:14: executing "cockroachdb/templates/tests/client.yaml" at <.Values.networkPolicy.enabled>: nil pointer evaluating interface {}.enabled
+~~~
+
+The CockroachDB Helm chart is currently not under active development, and no new features are planned. However, Cockroach Labs remains committed to fully supporting the Helm chart by addressing defects, providing security patches, and addressing breaking changes due to deprecations in Kubernetes APIs.
+
+A deprecation notice for the Helm chart will be provided to customers a minimum of 6 months in advance of actual deprecation.
+{% endunless %}
+
+#### Network
+
+Service Name Indication (SNI) is an extension to the TLS protocol which allows a client to indicate which hostname it is attempting to connect to at the start of the TCP handshake process. The server can present multiple certificates on the same IP address and TCP port number, and one server can serve multiple secure websites or API services even if they use different certificates.
+
+Due to its order of operations, the PostgreSQL wire protocol's implementation of TLS is not compatible with SNI-based routing in the Kubernetes ingress controller. Instead, use a TCP load balancer for CockroachDB that is not shared with other services.
+
+#### Resources
+
+When starting Kubernetes, select machines with at least **4 vCPUs** and **16 GiB** of memory, and provision at least **2 vCPUs** and **8 Gi** of memory to CockroachDB per pod. These minimum settings are used by default in this deployment guide, and are appropriate for testing purposes only. On a production deployment, you should adjust the resource settings for your workload. For details, see [Resource management]({% link {{ page.version.version }}/configure-cockroachdb-kubernetes.md %}#memory-and-cpu).
+
+#### Storage
+
+Kubernetes deployments use external persistent volumes that are often replicated by the provider. CockroachDB replicates data automatically, and this redundant layer of replication can impact performance. Using [local volumes](https://kubernetes.io/docs/concepts/storage/volumes/#local) may improve performance.
diff --git a/src/current/_includes/v25.1/orchestration/kubernetes-remove-nodes-helm.md b/src/current/_includes/v25.1/orchestration/kubernetes-remove-nodes-helm.md
new file mode 100644
index 00000000000..c6b3215bb74
--- /dev/null
+++ b/src/current/_includes/v25.1/orchestration/kubernetes-remove-nodes-helm.md
@@ -0,0 +1,126 @@
+Before removing a node from your cluster, you must first decommission the node. This lets a node finish in-flight requests, rejects any new requests, and transfers all range replicas and range leases off the node.
+
+{{site.data.alerts.callout_danger}}
+If you remove nodes without first telling CockroachDB to decommission them, you may cause data or even cluster unavailability. For more details about how this works and what to consider before removing nodes, see [Prepare for graceful shutdown](node-shutdown.html?filters=decommission#prepare-for-graceful-shutdown).
+{{site.data.alerts.end}}
+
+1. Use the [`cockroach node status`]({% link {{ page.version.version }}/cockroach-node.md %}) command to get the internal IDs of nodes. For example, if you followed the steps in [Deploy CockroachDB with Kubernetes]({% link {{ page.version.version }}/deploy-cockroachdb-with-kubernetes.md %}#step-3-use-the-built-in-sql-client) to launch a secure client pod, get a shell into the `cockroachdb-client-secure` pod:
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ kubectl exec -it cockroachdb-client-secure \
+ -- ./cockroach node status \
+ --certs-dir=/cockroach-certs \
+ --host=my-release-cockroachdb-public
+ ~~~
+
+ ~~~
+ id | address | build | started_at | updated_at | is_available | is_live
+ +----+---------------------------------------------------------------------------------+--------+----------------------------------+----------------------------------+--------------+---------+
+ 1 | my-release-cockroachdb-0.my-release-cockroachdb.default.svc.cluster.local:26257 | {{page.release_info.version}} | 2018-11-29 16:04:36.486082+00:00 | 2018-11-29 18:24:24.587454+00:00 | true | true
+ 2 | my-release-cockroachdb-2.my-release-cockroachdb.default.svc.cluster.local:26257 | {{page.release_info.version}} | 2018-11-29 16:55:03.880406+00:00 | 2018-11-29 18:24:23.469302+00:00 | true | true
+ 3 | my-release-cockroachdb-1.my-release-cockroachdb.default.svc.cluster.local:26257 | {{page.release_info.version}} | 2018-11-29 16:04:41.383588+00:00 | 2018-11-29 18:24:25.030175+00:00 | true | true
+ 4 | my-release-cockroachdb-3.my-release-cockroachdb.default.svc.cluster.local:26257 | {{page.release_info.version}} | 2018-11-29 17:31:19.990784+00:00 | 2018-11-29 18:24:26.041686+00:00 | true | true
+ (4 rows)
+ ~~~
+
+ The pod uses the `root` client certificate created earlier to initialize the cluster, so there's no CSR approval required.
+
+1. Use the [`cockroach node decommission`]({% link {{ page.version.version }}/cockroach-node.md %}) command to decommission the node with the highest number in its address, specifying its ID (in this example, node ID `4` because its address is `my-release-cockroachdb-3`):
+
+ {{site.data.alerts.callout_info}}
+ You must decommission the node with the highest number in its address. Kubernetes will remove the pod for the node with the highest number in its address when you reduce the replica count.
+ {{site.data.alerts.end}}
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ kubectl exec -it cockroachdb-client-secure \
+ -- ./cockroach node decommission 4 \
+ --certs-dir=/cockroach-certs \
+ --host=my-release-cockroachdb-public
+ ~~~
+
+ You'll then see the decommissioning status print to `stderr` as it changes:
+
+ ~~~
+ id | is_live | replicas | is_decommissioning | membership | is_draining
+ -----+---------+----------+--------------------+-----------------+--------------
+ 4 | true | 73 | true | decommissioning | false
+ ~~~
+
+ Once the node has been fully decommissioned, you'll see a confirmation:
+
+ ~~~
+ id | is_live | replicas | is_decommissioning | membership | is_draining
+ -----+---------+----------+--------------------+-----------------+--------------
+ 4 | true | 0 | true | decommissioning | false
+ (1 row)
+
+ No more data reported on target nodes. Please verify cluster health before removing the nodes.
+ ~~~
+
+1. Once the node has been decommissioned, scale down your StatefulSet:
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ helm upgrade \
+ my-release \
+ cockroachdb/cockroachdb \
+ --set statefulset.replicas=3 \
+ --reuse-values
+ ~~~
+
+1. Verify that the pod was successfully removed:
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ kubectl get pods
+ ~~~
+
+ ~~~
+ NAME READY STATUS RESTARTS AGE
+ my-release-cockroachdb-0 1/1 Running 0 51m
+ my-release-cockroachdb-1 1/1 Running 0 47m
+ my-release-cockroachdb-2 1/1 Running 0 3m
+ cockroachdb-client-secure 1/1 Running 0 15m
+ ...
+ ~~~
+
+1. You should also remove the persistent volume that was mounted to the pod. Get the persistent volume claims for the volumes:
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ kubectl get pvc
+ ~~~
+
+ ~~~
+ NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE
+ datadir-my-release-cockroachdb-0 Bound pvc-75dadd4c-01a1-11ea-b065-42010a8e00cb 100Gi RWO standard 17m
+ datadir-my-release-cockroachdb-1 Bound pvc-75e143ca-01a1-11ea-b065-42010a8e00cb 100Gi RWO standard 17m
+ datadir-my-release-cockroachdb-2 Bound pvc-75ef409a-01a1-11ea-b065-42010a8e00cb 100Gi RWO standard 17m
+ datadir-my-release-cockroachdb-3 Bound pvc-75e561ba-01a1-11ea-b065-42010a8e00cb 100Gi RWO standard 17m
+ ~~~
+
+1. Verify that the PVC with the highest number in its name is no longer mounted to a pod:
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ kubectl describe pvc datadir-my-release-cockroachdb-3
+ ~~~
+
+ ~~~
+ Name: datadir-my-release-cockroachdb-3
+ ...
+ Mounted By:
+ ~~~
+
+1. Remove the persistent volume by deleting the PVC:
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ kubectl delete pvc datadir-my-release-cockroachdb-3
+ ~~~
+
+ ~~~
+ persistentvolumeclaim "datadir-my-release-cockroachdb-3" deleted
+ ~~~
\ No newline at end of file
diff --git a/src/current/_includes/v25.1/orchestration/kubernetes-remove-nodes-insecure.md b/src/current/_includes/v25.1/orchestration/kubernetes-remove-nodes-insecure.md
new file mode 100644
index 00000000000..af48c9c6c30
--- /dev/null
+++ b/src/current/_includes/v25.1/orchestration/kubernetes-remove-nodes-insecure.md
@@ -0,0 +1,140 @@
+To safely remove a node from your cluster, you must first decommission the node and only then adjust the `spec.replicas` value of your StatefulSet configuration to permanently remove it. This sequence is important because the decommissioning process lets a node finish in-flight requests, rejects any new requests, and transfers all range replicas and range leases off the node.
+
+{{site.data.alerts.callout_danger}}
+If you remove nodes without first telling CockroachDB to decommission them, you may cause data or even cluster unavailability. For more details about how this works and what to consider before removing nodes, see [Prepare for graceful shutdown](node-shutdown.html?filters=decommission#prepare-for-graceful-shutdown).
+{{site.data.alerts.end}}
+
+1. Launch a temporary interactive pod and use the `cockroach node status` command to get the internal IDs of nodes:
+
+
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ kubectl run cockroachdb -it \
+ --image=cockroachdb/cockroach:{{page.release_info.version}} \
+ --rm \
+ --restart=Never \
+ -- node status \
+ --insecure \
+ --host=cockroachdb-public
+ ~~~
+
+ ~~~
+ id | address | build | started_at | updated_at | is_available | is_live
+ +----+---------------------------------------------------------------------------------+--------+----------------------------------+----------------------------------+--------------+---------+
+ 1 | cockroachdb-0.cockroachdb.default.svc.cluster.local:26257 | {{page.release_info.version}} | 2018-11-29 16:04:36.486082+00:00 | 2018-11-29 18:24:24.587454+00:00 | true | true
+ 2 | cockroachdb-2.cockroachdb.default.svc.cluster.local:26257 | {{page.release_info.version}} | 2018-11-29 16:55:03.880406+00:00 | 2018-11-29 18:24:23.469302+00:00 | true | true
+ 3 | cockroachdb-1.cockroachdb.default.svc.cluster.local:26257 | {{page.release_info.version}} | 2018-11-29 16:04:41.383588+00:00 | 2018-11-29 18:24:25.030175+00:00 | true | true
+ 4 | cockroachdb-3.cockroachdb.default.svc.cluster.local:26257 | {{page.release_info.version}} | 2018-11-29 17:31:19.990784+00:00 | 2018-11-29 18:24:26.041686+00:00 | true | true
+ (4 rows)
+ ~~~
+
+
+
+
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ kubectl run cockroachdb -it \
+ --image=cockroachdb/cockroach:{{page.release_info.version}} \
+ --rm \
+ --restart=Never \
+ -- node status \
+ --insecure \
+ --host=my-release-cockroachdb-public
+ ~~~
+
+ ~~~
+ id | address | build | started_at | updated_at | is_available | is_live
+ +----+---------------------------------------------------------------------------------+--------+----------------------------------+----------------------------------+--------------+---------+
+ 1 | my-release-cockroachdb-0.my-release-cockroachdb.default.svc.cluster.local:26257 | {{page.release_info.version}} | 2018-11-29 16:04:36.486082+00:00 | 2018-11-29 18:24:24.587454+00:00 | true | true
+ 2 | my-release-cockroachdb-2.my-release-cockroachdb.default.svc.cluster.local:26257 | {{page.release_info.version}} | 2018-11-29 16:55:03.880406+00:00 | 2018-11-29 18:24:23.469302+00:00 | true | true
+ 3 | my-release-cockroachdb-1.my-release-cockroachdb.default.svc.cluster.local:26257 | {{page.release_info.version}} | 2018-11-29 16:04:41.383588+00:00 | 2018-11-29 18:24:25.030175+00:00 | true | true
+ 4 | my-release-cockroachdb-3.my-release-cockroachdb.default.svc.cluster.local:26257 | {{page.release_info.version}} | 2018-11-29 17:31:19.990784+00:00 | 2018-11-29 18:24:26.041686+00:00 | true | true
+ (4 rows)
+ ~~~
+
+
+
+1. Note the ID of the node with the highest number in its address (in this case, the address including `cockroachdb-3`) and use the [`cockroach node decommission`]({% link {{ page.version.version }}/cockroach-node.md %}) command to decommission it:
+
+ {{site.data.alerts.callout_info}}
+ It's important to decommission the node with the highest number in its address because, when you reduce the replica count, Kubernetes will remove the pod for that node.
+ {{site.data.alerts.end}}
+
+
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ kubectl run cockroachdb -it \
+ --image=cockroachdb/cockroach:{{page.release_info.version}} \
+ --rm \
+ --restart=Never \
+ -- node decommission \
+ --insecure \
+ --host=cockroachdb-public
+ ~~~
+
+
+
+
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ kubectl run cockroachdb -it \
+ --image=cockroachdb/cockroach:{{page.release_info.version}} \
+ --rm \
+ --restart=Never \
+ -- node decommission \
+ --insecure \
+ --host=my-release-cockroachdb-public
+ ~~~
+
+
+
+ You'll then see the decommissioning status print to `stderr` as it changes:
+
+ ~~~
+ id | is_live | replicas | is_decommissioning | membership | is_draining
+ -----+---------+----------+--------------------+-----------------+--------------
+ 4 | true | 73 | true | decommissioning | false
+ ~~~
+
+ Once the node has been fully decommissioned, you'll see a confirmation:
+
+ ~~~
+ id | is_live | replicas | is_decommissioning | membership | is_draining
+ -----+---------+----------+--------------------+-----------------+--------------
+ 4 | true | 0 | true | decommissioning | false
+ (1 row)
+
+ No more data reported on target nodes. Please verify cluster health before removing the nodes.
+ ~~~
+
+1. Once the node has been decommissioned, remove a pod from your StatefulSet:
+
+
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ kubectl scale statefulset cockroachdb --replicas=3
+ ~~~
+
+ ~~~
+ statefulset "cockroachdb" scaled
+ ~~~
+
+
+
+
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ helm upgrade \
+ my-release \
+ cockroachdb/cockroachdb \
+ --set statefulset.replicas=3 \
+ --reuse-values
+ ~~~
+
+
diff --git a/src/current/_includes/v25.1/orchestration/kubernetes-remove-nodes-manual.md b/src/current/_includes/v25.1/orchestration/kubernetes-remove-nodes-manual.md
new file mode 100644
index 00000000000..753c030bf70
--- /dev/null
+++ b/src/current/_includes/v25.1/orchestration/kubernetes-remove-nodes-manual.md
@@ -0,0 +1,126 @@
+Before removing a node from your cluster, you must first decommission the node. This lets a node finish in-flight requests, rejects any new requests, and transfers all range replicas and range leases off the node.
+
+{{site.data.alerts.callout_danger}}
+If you remove nodes without first telling CockroachDB to decommission them, you may cause data or even cluster unavailability. For more details about how this works and what to consider before removing nodes, see [Prepare for graceful shutdown](node-shutdown.html?filters=decommission#prepare-for-graceful-shutdown).
+{{site.data.alerts.end}}
+
+1. Use the [`cockroach node status`]({% link {{ page.version.version }}/cockroach-node.md %}) command to get the internal IDs of nodes. For example, if you followed the steps in [Deploy CockroachDB with Kubernetes]({% link {{ page.version.version }}/deploy-cockroachdb-with-kubernetes.md %}#step-3-use-the-built-in-sql-client) to launch a secure client pod, get a shell into the `cockroachdb-client-secure` pod:
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ kubectl exec -it cockroachdb-client-secure \
+ -- ./cockroach node status \
+ --certs-dir=/cockroach-certs \
+ --host=cockroachdb-public
+ ~~~
+
+ ~~~
+ id | address | build | started_at | updated_at | is_available | is_live
+ +----+---------------------------------------------------------------------------------+--------+----------------------------------+----------------------------------+--------------+---------+
+ 1 | cockroachdb-0.cockroachdb.default.svc.cluster.local:26257 | {{page.release_info.version}} | 2018-11-29 16:04:36.486082+00:00 | 2018-11-29 18:24:24.587454+00:00 | true | true
+ 2 | cockroachdb-2.cockroachdb.default.svc.cluster.local:26257 | {{page.release_info.version}} | 2018-11-29 16:55:03.880406+00:00 | 2018-11-29 18:24:23.469302+00:00 | true | true
+ 3 | cockroachdb-1.cockroachdb.default.svc.cluster.local:26257 | {{page.release_info.version}} | 2018-11-29 16:04:41.383588+00:00 | 2018-11-29 18:24:25.030175+00:00 | true | true
+ 4 | cockroachdb-3.cockroachdb.default.svc.cluster.local:26257 | {{page.release_info.version}} | 2018-11-29 17:31:19.990784+00:00 | 2018-11-29 18:24:26.041686+00:00 | true | true
+ (4 rows)
+ ~~~
+
+ The pod uses the `root` client certificate created earlier to initialize the cluster, so there's no CSR approval required.
+
+1. Use the [`cockroach node decommission`]({% link {{ page.version.version }}/cockroach-node.md %}) command to decommission the node with the highest number in its address, specifying its ID (in this example, node ID `4` because its address is `cockroachdb-3`):
+
+ {{site.data.alerts.callout_info}}
+ You must decommission the node with the highest number in its address. Kubernetes will remove the pod for the node with the highest number in its address when you reduce the replica count.
+ {{site.data.alerts.end}}
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ kubectl exec -it cockroachdb-client-secure \
+ -- ./cockroach node decommission 4 \
+ --certs-dir=/cockroach-certs \
+ --host=cockroachdb-public
+ ~~~
+
+ You'll then see the decommissioning status print to `stderr` as it changes:
+
+ ~~~
+ id | is_live | replicas | is_decommissioning | membership | is_draining
+ -----+---------+----------+--------------------+-----------------+--------------
+ 4 | true | 73 | true | decommissioning | false
+ ~~~
+
+ Once the node has been fully decommissioned, you'll see a confirmation:
+
+ ~~~
+ id | is_live | replicas | is_decommissioning | membership | is_draining
+ -----+---------+----------+--------------------+-----------------+--------------
+ 4 | true | 0 | true | decommissioning | false
+ (1 row)
+
+ No more data reported on target nodes. Please verify cluster health before removing the nodes.
+ ~~~
+
+1. Once the node has been decommissioned, scale down your StatefulSet:
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ kubectl scale statefulset cockroachdb --replicas=3
+ ~~~
+
+ ~~~
+ statefulset.apps/cockroachdb scaled
+ ~~~
+
+1. Verify that the pod was successfully removed:
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ kubectl get pods
+ ~~~
+
+ ~~~
+ NAME READY STATUS RESTARTS AGE
+ cockroachdb-0 1/1 Running 0 51m
+ cockroachdb-1 1/1 Running 0 47m
+ cockroachdb-2 1/1 Running 0 3m
+ cockroachdb-client-secure 1/1 Running 0 15m
+ ...
+ ~~~
+
+1. You should also remove the persistent volume that was mounted to the pod. Get the persistent volume claims for the volumes:
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ kubectl get pvc
+ ~~~
+
+ ~~~
+ NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE
+ datadir-cockroachdb-0 Bound pvc-75dadd4c-01a1-11ea-b065-42010a8e00cb 100Gi RWO standard 17m
+ datadir-cockroachdb-1 Bound pvc-75e143ca-01a1-11ea-b065-42010a8e00cb 100Gi RWO standard 17m
+ datadir-cockroachdb-2 Bound pvc-75ef409a-01a1-11ea-b065-42010a8e00cb 100Gi RWO standard 17m
+ datadir-cockroachdb-3 Bound pvc-75e561ba-01a1-11ea-b065-42010a8e00cb 100Gi RWO standard 17m
+ ~~~
+
+1. Verify that the PVC with the highest number in its name is no longer mounted to a pod:
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ kubectl describe pvc datadir-cockroachdb-3
+ ~~~
+
+ ~~~
+ Name: datadir-cockroachdb-3
+ ...
+ Mounted By:
+ ~~~
+
+1. Remove the persistent volume by deleting the PVC:
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ kubectl delete pvc datadir-cockroachdb-3
+ ~~~
+
+ ~~~
+ persistentvolumeclaim "datadir-cockroachdb-3" deleted
+ ~~~
\ No newline at end of file
diff --git a/src/current/_includes/v25.1/orchestration/kubernetes-scale-cluster-helm.md b/src/current/_includes/v25.1/orchestration/kubernetes-scale-cluster-helm.md
new file mode 100644
index 00000000000..474a87c1077
--- /dev/null
+++ b/src/current/_includes/v25.1/orchestration/kubernetes-scale-cluster-helm.md
@@ -0,0 +1,118 @@
+Before scaling CockroachDB, ensure that your Kubernetes cluster has enough worker nodes to host the number of pods you want to add. This is to ensure that two pods are not placed on the same worker node, as recommended in our [production guidance]({% link {{ page.version.version }}/recommended-production-settings.md %}#topology).
+
+For example, if you want to scale from 3 CockroachDB nodes to 4, your Kubernetes cluster should have at least 4 worker nodes. You can verify the size of your Kubernetes cluster by running `kubectl get nodes`.
+
+1. Edit your StatefulSet configuration to add another pod for the new CockroachDB node:
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ helm upgrade \
+ my-release \
+ cockroachdb/cockroachdb \
+ --set statefulset.replicas=4 \
+ --reuse-values
+ ~~~
+
+ ~~~
+ Release "my-release" has been upgraded. Happy Helming!
+ LAST DEPLOYED: Tue May 14 14:06:43 2019
+ NAMESPACE: default
+ STATUS: DEPLOYED
+
+ RESOURCES:
+ ==> v1beta1/PodDisruptionBudget
+ NAME AGE
+ my-release-cockroachdb-budget 51m
+
+ ==> v1/Pod(related)
+
+ NAME READY STATUS RESTARTS AGE
+ my-release-cockroachdb-0 1/1 Running 0 38m
+ my-release-cockroachdb-1 1/1 Running 0 39m
+ my-release-cockroachdb-2 1/1 Running 0 39m
+ my-release-cockroachdb-3 0/1 Pending 0 0s
+ my-release-cockroachdb-init-nwjkh 0/1 Completed 0 39m
+
+ ...
+ ~~~
+
+1. Get the name of the `Pending` CSR for the new pod:
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ kubectl get csr
+ ~~~
+
+ ~~~
+ NAME AGE REQUESTOR CONDITION
+ default.client.root 1h system:serviceaccount:default:default Approved,Issued
+ default.node.my-release-cockroachdb-0 1h system:serviceaccount:default:default Approved,Issued
+ default.node.my-release-cockroachdb-1 1h system:serviceaccount:default:default Approved,Issued
+ default.node.my-release-cockroachdb-2 1h system:serviceaccount:default:default Approved,Issued
+ default.node.my-release-cockroachdb-3 2m system:serviceaccount:default:default Pending
+ node-csr-0Xmb4UTVAWMEnUeGbW4KX1oL4XV_LADpkwjrPtQjlZ4 1h kubelet Approved,Issued
+ node-csr-NiN8oDsLhxn0uwLTWa0RWpMUgJYnwcFxB984mwjjYsY 1h kubelet Approved,Issued
+ node-csr-aU78SxyU69pDK57aj6txnevr7X-8M3XgX9mTK0Hso6o 1h kubelet Approved,Issued
+ ...
+ ~~~
+
+ If you do not see a `Pending` CSR, wait a minute and try again.
+
+1. Examine the CSR for the new pod:
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ kubectl describe csr default.node.my-release-cockroachdb-3
+ ~~~
+
+ ~~~
+ Name: default.node.my-release-cockroachdb-3
+ Labels:
+ Annotations:
+ CreationTimestamp: Thu, 09 Nov 2017 13:39:37 -0500
+ Requesting User: system:serviceaccount:default:default
+ Status: Pending
+ Subject:
+ Common Name: node
+ Serial Number:
+ Organization: Cockroach
+ Subject Alternative Names:
+ DNS Names: localhost
+ my-release-cockroachdb-1.my-release-cockroachdb.default.svc.cluster.local
+ my-release-cockroachdb-1.my-release-cockroachdb
+ my-release-cockroachdb-public
+ my-release-cockroachdb-public.default.svc.cluster.local
+ IP Addresses: 127.0.0.1
+ 10.48.1.6
+ Events:
+ ~~~
+
+1. If everything looks correct, approve the CSR for the new pod:
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ kubectl certificate approve default.node.my-release-cockroachdb-3
+ ~~~
+
+ ~~~
+ certificatesigningrequest.certificates.k8s.io/default.node.my-release-cockroachdb-3 approved
+ ~~~
+
+1. Verify that the new pod started successfully:
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ kubectl get pods
+ ~~~
+
+ ~~~
+ NAME READY STATUS RESTARTS AGE
+ my-release-cockroachdb-0 1/1 Running 0 51m
+ my-release-cockroachdb-1 1/1 Running 0 47m
+ my-release-cockroachdb-2 1/1 Running 0 3m
+ my-release-cockroachdb-3 1/1 Running 0 1m
+ cockroachdb-client-secure 1/1 Running 0 15m
+ ...
+ ~~~
+
+1. You can also open the [**Node List**]({% link {{ page.version.version }}/ui-cluster-overview-page.md %}#node-list) in the DB Console to ensure that the fourth node successfully joined the cluster.
\ No newline at end of file
diff --git a/src/current/_includes/v25.1/orchestration/kubernetes-scale-cluster-manual.md b/src/current/_includes/v25.1/orchestration/kubernetes-scale-cluster-manual.md
new file mode 100644
index 00000000000..050c6a252da
--- /dev/null
+++ b/src/current/_includes/v25.1/orchestration/kubernetes-scale-cluster-manual.md
@@ -0,0 +1,51 @@
+Before scaling up CockroachDB, note the following [topology recommendations]({% link {{ page.version.version }}/recommended-production-settings.md %}#topology):
+
+- Each CockroachDB node (running in its own pod) should run on a separate Kubernetes worker node.
+- Each availability zone should have the same number of CockroachDB nodes.
+
+If your cluster has 3 CockroachDB nodes distributed across 3 availability zones (as in our [deployment example](deploy-cockroachdb-with-kubernetes.html?filters=manual)), we recommend scaling up by a multiple of 3 to retain an even distribution of nodes. You should therefore scale up to a minimum of 6 CockroachDB nodes, with 2 nodes in each zone.
+
+1. Run `kubectl get nodes` to list the worker nodes in your Kubernetes cluster. There should be at least as many worker nodes as pods you plan to add. This ensures that no more than one pod will be placed on each worker node.
+
+1. Add worker nodes if necessary:
+ - On GKE, [resize your cluster](https://cloud.google.com/kubernetes-engine/docs/how-to/resizing-a-cluster). If you deployed a [regional cluster](https://cloud.google.com/kubernetes-engine/docs/how-to/creating-a-regional-cluster) as we recommended, you will use `--num-nodes` to specify the desired number of worker nodes in each zone. For example:
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ gcloud container clusters resize {cluster-name} --region {region-name} --num-nodes 2
+ ~~~
+ - On EKS, resize your [Worker Node Group](https://eksctl.io/usage/managing-nodegroups/#scaling).
+ - On GCE, resize your [Managed Instance Group](https://cloud.google.com/compute/docs/instance-groups/).
+ - On AWS, resize your [Auto Scaling Group](https://docs.aws.amazon.com/autoscaling/latest/userguide/as-manual-scaling.html).
+
+1. Edit your StatefulSet configuration to add pods for each new CockroachDB node:
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ kubectl scale statefulset cockroachdb --replicas=6
+ ~~~
+
+ ~~~
+ statefulset.apps/cockroachdb scaled
+ ~~~
+
+1. Verify that the new pod started successfully:
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ kubectl get pods
+ ~~~
+
+ ~~~
+ NAME READY STATUS RESTARTS AGE
+ cockroachdb-0 1/1 Running 0 51m
+ cockroachdb-1 1/1 Running 0 47m
+ cockroachdb-2 1/1 Running 0 3m
+ cockroachdb-3 1/1 Running 0 1m
+ cockroachdb-4 1/1 Running 0 1m
+ cockroachdb-5 1/1 Running 0 1m
+ cockroachdb-client-secure 1/1 Running 0 15m
+ ...
+ ~~~
+
+1. You can also open the [**Node List**]({% link {{ page.version.version }}/ui-cluster-overview-page.md %}#node-list) in the DB Console to ensure that the fourth node successfully joined the cluster.
\ No newline at end of file
diff --git a/src/current/_includes/v25.1/orchestration/kubernetes-simulate-failure.md b/src/current/_includes/v25.1/orchestration/kubernetes-simulate-failure.md
new file mode 100644
index 00000000000..6df9b394177
--- /dev/null
+++ b/src/current/_includes/v25.1/orchestration/kubernetes-simulate-failure.md
@@ -0,0 +1,91 @@
+Based on the `replicas: 3` line in the StatefulSet configuration, Kubernetes ensures that three pods/nodes are running at all times. When a pod/node fails, Kubernetes automatically creates another pod/node with the same network identity and persistent storage.
+
+To see this in action:
+
+1. Terminate one of the CockroachDB nodes:
+
+
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ kubectl delete pod cockroachdb-2
+ ~~~
+
+ ~~~
+ pod "cockroachdb-2" deleted
+ ~~~
+
+
+
+
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ kubectl delete pod cockroachdb-2
+ ~~~
+
+ ~~~
+ pod "cockroachdb-2" deleted
+ ~~~
+
+
+
+
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ kubectl delete pod my-release-cockroachdb-2
+ ~~~
+
+ ~~~
+ pod "my-release-cockroachdb-2" deleted
+ ~~~
+
+
+
+
+1. In the DB Console, the **Cluster Overview** will soon show one node as **Suspect**. As Kubernetes auto-restarts the node, watch how the node once again becomes healthy.
+
+1. Back in the terminal, verify that the pod was automatically restarted:
+
+
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ kubectl get pod cockroachdb-2
+ ~~~
+
+ ~~~
+ NAME READY STATUS RESTARTS AGE
+ cockroachdb-2 1/1 Running 0 12s
+ ~~~
+
+
+
+
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ kubectl get pod cockroachdb-2
+ ~~~
+
+ ~~~
+ NAME READY STATUS RESTARTS AGE
+ cockroachdb-2 1/1 Running 0 12s
+ ~~~
+
+
+
+
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ kubectl get pod my-release-cockroachdb-2
+ ~~~
+
+ ~~~
+ NAME READY STATUS RESTARTS AGE
+ my-release-cockroachdb-2 1/1 Running 0 44s
+ ~~~
+
+
diff --git a/src/current/_includes/v25.1/orchestration/kubernetes-stop-cluster.md b/src/current/_includes/v25.1/orchestration/kubernetes-stop-cluster.md
new file mode 100644
index 00000000000..58d79611e6d
--- /dev/null
+++ b/src/current/_includes/v25.1/orchestration/kubernetes-stop-cluster.md
@@ -0,0 +1,141 @@
+To shut down the CockroachDB cluster:
+
+
+{% capture latest_operator_version %}{% include_cached latest_operator_version.md %}{% endcapture %}
+
+1. Delete the previously created custom resource:
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ kubectl delete -f example.yaml
+ ~~~
+
+1. Remove the Operator:
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ kubectl delete -f https://raw.githubusercontent.com/cockroachdb/cockroach-operator/v{{ latest_operator_version }}/install/operator.yaml
+ ~~~
+
+ This will delete the CockroachDB cluster being run by the Operator. It intentionally does **not** delete:
+ - The persistent volumes that were attached to the pods, to avoid the risk of data loss. Before deleting a cluster's persistent volumes, be sure to back them up. For more information, refer to [Delete a Cluster's Persistent Volumes](#delete-a-clusters-persistent-volumes) in the Kubernetes project's documentation.
+ - Any secrets you may have created. For more information on managing secrets, refer to [Managing Secrets Using `kubectl`](https://kubernetes.io/docs/tasks/configmap-secret/managing-secret-using-kubectl) in the Kubernetes project's documentation.
+
+
+
+
+
+This procedure shuts down the CockroachDB cluster and deletes the resources you just created, including the logs and Prometheus and Alertmanager resources. This command intentionally does **not** delete:
+
+- The persistent volumes that were attached to the pods, to avoid the risk of data loss. Before deleting a cluster's persistent volumes, be sure to back them up. For more information, refer to [Delete a Cluster's Persistent Volumes](#delete-a-clusters-persistent-volumes) in the Kubernetes project's documentation.
+- Any secrets you may have created. For more information on managing secrets, refer to [Managing Secrets Using `kubectl`](https://kubernetes.io/docs/tasks/configmap-secret/managing-secret-using-kubectl) in the Kubernetes project's documentation.
+
+{{site.data.alerts.callout_danger}}
+Do **not** use the `--all` flag to `kubectl delete`, to avoid the risk of data loss.
+{{site.data.alerts.end}}
+
+1. Delete the resources associated with the `cockroachdb` label, including the logs and Prometheus and Alertmanager resources. This command is very long; you may need to scroll your browser to read all of it.
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ kubectl delete \
+ pods,statefulsets,services,poddisruptionbudget,jobs,rolebinding,clusterrolebinding,role,clusterrole,serviceaccount,alertmanager,prometheus,prometheusrule,serviceMonitor \
+ -l app=cockroachdb
+ ~~~
+
+ ~~~
+ pod "cockroachdb-0" deleted
+ pod "cockroachdb-1" deleted
+ pod "cockroachdb-2" deleted
+ statefulset.apps "alertmanager-cockroachdb" deleted
+ statefulset.apps "prometheus-cockroachdb" deleted
+ service "alertmanager-cockroachdb" deleted
+ service "cockroachdb" deleted
+ service "cockroachdb-public" deleted
+ poddisruptionbudget.policy "cockroachdb-budget" deleted
+ job.batch "cluster-init-secure" deleted
+ rolebinding.rbac.authorization.k8s.io "cockroachdb" deleted
+ clusterrolebinding.rbac.authorization.k8s.io "cockroachdb" deleted
+ clusterrolebinding.rbac.authorization.k8s.io "prometheus" deleted
+ role.rbac.authorization.k8s.io "cockroachdb" deleted
+ clusterrole.rbac.authorization.k8s.io "cockroachdb" deleted
+ clusterrole.rbac.authorization.k8s.io "prometheus" deleted
+ serviceaccount "cockroachdb" deleted
+ serviceaccount "prometheus" deleted
+ alertmanager.monitoring.coreos.com "cockroachdb" deleted
+ prometheus.monitoring.coreos.com "cockroachdb" deleted
+ prometheusrule.monitoring.coreos.com "prometheus-cockroachdb-rules" deleted
+ servicemonitor.monitoring.coreos.com "cockroachdb" deleted
+ ~~~
+
+1. Delete the pod created for `cockroach` client commands, if you didn't do so earlier:
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ kubectl delete pod cockroachdb-client-secure
+ ~~~
+
+ ~~~
+ pod "cockroachdb-client-secure" deleted
+ ~~~
+
+{% capture get_issuers_command %}{% include_cached copy-clipboard.html %}
+ ~~~ shell
+ kubectl get issuer
+ ~~~
+ ~~~ shell
+ kubectl delete issuer {issuer_name}
+ ~~~
+{% endcapture %}
+
+{% capture get_csrs_command %}{% include_cached copy-clipboard.html %}
+ ~~~ shell
+ kubectl get csr
+ ~~~
+ ~~~ shell
+ kubectl delete csr default.client.root default.{node_name}
+ ~~~
+{% endcapture %}
+
+1. Delete the cluster's cryptographic resources.
+
If your cluster's certificates are managed using cert-manager (recommended but not default), get the names of the cluster's issuers and delete them: {{ get_issuers_command }}
If your cluster uses self-signed certificates (the default), get the names of any CSRs for the cluster, then delete them: {{ get_csrs_command }}
+
+
+
+
+1. Uninstall the release:
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ helm uninstall my-release
+ ~~~
+
+ ~~~
+ release "my-release" deleted
+ ~~~
+
+1. Delete the pod created for `cockroach` client commands, if you didn't do so earlier:
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ kubectl delete pod cockroachdb-client-secure
+ ~~~
+
+ ~~~
+ pod "cockroachdb-client-secure" deleted
+ ~~~
+
+1. Delete the cluster's cryptographic resources.
+
If your cluster's certificates are managed using cert-manager (recommended but not default), get the names of the cluster's issuers and delete them: {{ get_issuers_command }}
If your cluster uses self-signed certificates (the default), get the names of any CSRs for the cluster, then delete them: {{ get_csrs_command }}
+
+
+
+### Delete a cluster's persistent volumes
+
+If you need to free up the storage used by CockroachDB, you can optionally delete the persistent volumes that were attached to the pods, after first backing up your data.
+
+{{site.data.alerts.callout_danger}}
+Before you delete a cluster's persistent volumes, be sure you have a backup copy of your data. Data **cannot** be recovered once the persistent volumes are deleted. For more information, see the [Kubernetes documentation](https://kubernetes.io/docs/tasks/run-application/delete-stateful-set/#persistent-volumes).
+{{site.data.alerts.end}}
+
+Refer to the Kubernetes project's documentation for more information and recommendations.
diff --git a/src/current/_includes/v25.1/orchestration/local-start-kubernetes.md b/src/current/_includes/v25.1/orchestration/local-start-kubernetes.md
new file mode 100644
index 00000000000..7a62cd98fcc
--- /dev/null
+++ b/src/current/_includes/v25.1/orchestration/local-start-kubernetes.md
@@ -0,0 +1,22 @@
+## Before you begin
+
+Before getting started, it's helpful to review some Kubernetes-specific terminology:
+
+Feature | Description
+--------|------------
+[minikube](http://kubernetes.io/docs/getting-started-guides/minikube/) | A tool commonly used to run a Kubernetes cluster on a local workstation.
+[pod](http://kubernetes.io/docs/user-guide/pods/) | A pod is a group of one of more containers managed by Kubernetes. In this tutorial, all pods run on your local workstation. Each pod contains a single container that runs a single-node CockroachDB cluster. You'll start with 3 pods and grow to 4.
+[StatefulSet](http://kubernetes.io/docs/concepts/abstractions/controllers/statefulsets/) | A StatefulSet is a group of pods treated as stateful units, where each pod has distinguishable network identity and always binds back to the same persistent storage on restart.
+[persistent volume](http://kubernetes.io/docs/user-guide/persistent-volumes/) | A persistent volume is storage mounted in a pod and available to its containers. The lifetime of a persistent volume is decoupled from the lifetime of the pod that's using it, ensuring that each CockroachDB node binds back to the same storage on restart.
When using `minikube`, persistent volumes are external temporary directories that endure until they are manually deleted or until the entire Kubernetes cluster is deleted.
+[persistent volume claim](http://kubernetes.io/docs/user-guide/persistent-volumes/#persistentvolumeclaims) | When e pod is created, it requests a persistent volume claim to claim durable storage for its node.
+
+## Step 1. Start Kubernetes
+
+1. Follow the [Minikube documentation](https://kubernetes.io/docs/tasks/tools/install-minikube/) to install the latest version of `minikube`, a hypervisor, and the `kubectl` command-line tool.
+
+1. Start a local Kubernetes cluster:
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ minikube start
+ ~~~
diff --git a/src/current/_includes/v25.1/orchestration/monitor-cluster.md b/src/current/_includes/v25.1/orchestration/monitor-cluster.md
new file mode 100644
index 00000000000..171dbb24914
--- /dev/null
+++ b/src/current/_includes/v25.1/orchestration/monitor-cluster.md
@@ -0,0 +1,106 @@
+To access the cluster's [DB Console]({% link {{ page.version.version }}/ui-overview.md %}):
+
+{% if page.secure == true %}
+
+1. On secure clusters, [certain pages of the DB Console]({% link {{ page.version.version }}/ui-overview.md %}#db-console-access) can only be accessed by `admin` users.
+
+ Get a shell into the pod and start the CockroachDB [built-in SQL client]({% link {{ page.version.version }}/cockroach-sql.md %}):
+
+
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ kubectl exec -it cockroachdb-client-secure \
+ -- ./cockroach sql \
+ --certs-dir=/cockroach/cockroach-certs \
+ --host=cockroachdb-public
+ ~~~
+
+
+
+
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ kubectl exec -it cockroachdb-client-secure \
+ -- ./cockroach sql \
+ --certs-dir=/cockroach-certs \
+ --host=cockroachdb-public
+ ~~~
+
+
+
+
+
+ $ kubectl exec -it cockroachdb-client-secure \
+ -- ./cockroach sql \
+ --certs-dir=/cockroach-certs \
+ --host=my-release-cockroachdb-public
+
+
+1. Assign `roach` to the `admin` role (you only need to do this once):
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ sql
+ > GRANT admin TO roach;
+ ~~~
+
+1. Exit the SQL shell and pod:
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ sql
+ > \q
+ ~~~
+
+{% endif %}
+
+1. In a new terminal window, port-forward from your local machine to the `cockroachdb-public` service:
+
+
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ kubectl port-forward service/cockroachdb-public 8080
+ ~~~
+
+
+
+
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ kubectl port-forward service/cockroachdb-public 8080
+ ~~~
+
+
+
+
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ kubectl port-forward service/my-release-cockroachdb-public 8080
+ ~~~
+
+
+
+ ~~~
+ Forwarding from 127.0.0.1:8080 -> 8080
+ ~~~
+
+ {{site.data.alerts.callout_info}}The port-forward command must be run on the same machine as the web browser in which you want to view the DB Console. If you have been running these commands from a cloud instance or other non-local shell, you will not be able to view the UI without configuring kubectl locally and running the above port-forward command on your local machine.{{site.data.alerts.end}}
+
+{% if page.secure == true %}
+
+1. Go to https://localhost:8080 and log in with the username and password you created earlier.
+
+ {% include {{ page.version.version }}/misc/chrome-localhost.md %}
+
+{% else %}
+
+1. Go to http://localhost:8080.
+
+{% endif %}
+
+1. In the UI, verify that the cluster is running as expected:
+ - View the [Node List]({% link {{ page.version.version }}/ui-cluster-overview-page.md %}#node-list) to ensure that all nodes successfully joined the cluster.
+ - Click the **Databases** tab on the left to verify that `bank` is listed.
diff --git a/src/current/_includes/v25.1/orchestration/operator-check-namespace.md b/src/current/_includes/v25.1/orchestration/operator-check-namespace.md
new file mode 100644
index 00000000000..bc37c6e1681
--- /dev/null
+++ b/src/current/_includes/v25.1/orchestration/operator-check-namespace.md
@@ -0,0 +1,3 @@
+{{site.data.alerts.callout_info}}
+All `kubectl` steps should be performed in the [namespace where you installed the Operator]({% link {{ page.version.version }}/deploy-cockroachdb-with-kubernetes.md %}#install-the-operator). By default, this is `cockroach-operator-system`.
+{{site.data.alerts.end}}
\ No newline at end of file
diff --git a/src/current/_includes/v25.1/orchestration/start-cockroachdb-helm-insecure.md b/src/current/_includes/v25.1/orchestration/start-cockroachdb-helm-insecure.md
new file mode 100644
index 00000000000..db3916f2fa9
--- /dev/null
+++ b/src/current/_includes/v25.1/orchestration/start-cockroachdb-helm-insecure.md
@@ -0,0 +1,111 @@
+1. [Install the Helm client](https://helm.sh/docs/intro/install) (version 3.0 or higher) and add the `cockroachdb` chart repository:
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ helm repo add cockroachdb https://charts.cockroachdb.com/
+ ~~~
+
+ ~~~
+ "cockroachdb" has been added to your repositories
+ ~~~
+
+1. Update your Helm chart repositories to ensure that you're using the [latest CockroachDB chart](https://github.com/cockroachdb/helm-charts/blob/master/cockroachdb/Chart.yaml):
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ helm repo update
+ ~~~
+
+1. Modify our Helm chart's [`values.yaml`](https://github.com/cockroachdb/helm-charts/blob/master/cockroachdb/values.yaml) parameters for your deployment scenario.
+
+ Create a `my-values.yaml` file to override the defaults in `values.yaml`, substituting your own values in this example based on the guidelines below.
+
+ {% include_cached copy-clipboard.html %}
+ ~~~
+ statefulset:
+ resources:
+ limits:
+ memory: "8Gi"
+ requests:
+ memory: "8Gi"
+ conf:
+ cache: "2Gi"
+ max-sql-memory: "2Gi"
+ ~~~
+
+ 1. To avoid running out of memory when CockroachDB is not the only pod on a Kubernetes node, you *must* set memory limits explicitly. This is because CockroachDB does not detect the amount of memory allocated to its pod when run in Kubernetes. We recommend setting `conf.cache` and `conf.max-sql-memory` each to 1/4 of the `memory` allocation specified in `statefulset.resources.requests` and `statefulset.resources.limits`.
+
+ {{site.data.alerts.callout_success}}
+ For example, if you are allocating 8Gi of `memory` to each CockroachDB node, allocate 2Gi to `cache` and 2Gi to `max-sql-memory`.
+ {{site.data.alerts.end}}
+
+1. For an insecure deployment, set `tls.enabled` to `false`. For clarity, this example includes the example configuration from the previous steps.
+
+ {% include_cached copy-clipboard.html %}
+ ~~~
+ statefulset:
+ resources:
+ limits:
+ memory: "8Gi"
+ requests:
+ memory: "8Gi"
+ conf:
+ cache: "2Gi"
+ max-sql-memory: "2Gi"
+ tls:
+ enabled: false
+ ~~~
+
+ 1. You may want to modify `storage.persistentVolume.size` and `storage.persistentVolume.storageClass` for your use case. This chart defaults to 100Gi of disk space per pod. For more details on customizing disks for performance, see [these instructions]({% link {{ page.version.version }}/kubernetes-performance.md %}#disk-type).
+
+ {{site.data.alerts.callout_info}}
+ If necessary, you can [expand disk size](/docs/{{ page.version.version }}/configure-cockroachdb-kubernetes.html?filters=helm#expand-disk-size) after the cluster is live.
+ {{site.data.alerts.end}}
+
+1. Install the CockroachDB Helm chart.
+
+ Provide a "release" name to identify and track this particular deployment of the chart, and override the default values with those in `my-values.yaml`.
+
+ {{site.data.alerts.callout_info}}
+ This tutorial uses `my-release` as the release name. If you use a different value, be sure to adjust the release name in subsequent commands.
+ {{site.data.alerts.end}}
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ helm install my-release --values my-values.yaml cockroachdb/cockroachdb
+ ~~~
+
+ Behind the scenes, this command uses our `cockroachdb-statefulset.yaml` file to create the StatefulSet that automatically creates 3 pods, each with a CockroachDB node running inside it, where each pod has distinguishable network identity and always binds back to the same persistent storage on restart.
+
+1. Confirm that CockroachDB cluster initialization has completed successfully, with the pods for CockroachDB showing `1/1` under `READY` and the pod for initialization showing `COMPLETED` under `STATUS`:
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ kubectl get pods
+ ~~~
+
+ ~~~
+ NAME READY STATUS RESTARTS AGE
+ my-release-cockroachdb-0 1/1 Running 0 8m
+ my-release-cockroachdb-1 1/1 Running 0 8m
+ my-release-cockroachdb-2 1/1 Running 0 8m
+ my-release-cockroachdb-init-hxzsc 0/1 Completed 0 1h
+ ~~~
+
+1. Confirm that the persistent volumes and corresponding claims were created successfully for all three pods:
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ kubectl get pv
+ ~~~
+
+ ~~~
+ NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE
+ pvc-71019b3a-fc67-11e8-a606-080027ba45e5 100Gi RWO Delete Bound default/datadir-my-release-cockroachdb-0 standard 11m
+ pvc-7108e172-fc67-11e8-a606-080027ba45e5 100Gi RWO Delete Bound default/datadir-my-release-cockroachdb-1 standard 11m
+ pvc-710dcb66-fc67-11e8-a606-080027ba45e5 100Gi RWO Delete Bound default/datadir-my-release-cockroachdb-2 standard 11m
+ ~~~
+
+{{site.data.alerts.callout_success}}
+The StatefulSet configuration sets all CockroachDB nodes to log to `stderr`, so if you ever need access to logs for a pod, use `kubectl logs ` rather than checking the log on the persistent volume.
+{{site.data.alerts.end}}
diff --git a/src/current/_includes/v25.1/orchestration/start-cockroachdb-helm-secure.md b/src/current/_includes/v25.1/orchestration/start-cockroachdb-helm-secure.md
new file mode 100644
index 00000000000..a2f3ebc52b8
--- /dev/null
+++ b/src/current/_includes/v25.1/orchestration/start-cockroachdb-helm-secure.md
@@ -0,0 +1,110 @@
+The CockroachDB Helm chart is compatible with all Kubernetes versions that are [supported by the Kubernetes project](https://kubernetes.io/releases/) when cert-manager is used for mTLS.
+
+The CockroachDB Helm chart is currently not under active development, and no new features are planned. However, Cockroach Labs remains committed to fully supporting the Helm chart by addressing defects, providing security patches, and addressing breaking changes due to deprecations in Kubernetes APIs.
+
+A deprecation notice for the Helm chart will be provided to customers a minimum of 6 months in advance of actual deprecation.
+
+{{site.data.alerts.callout_danger}}
+If you are running a secure Helm deployment on Kubernetes 1.22 and later, you must migrate away from using the Kubernetes CA for cluster authentication. The recommended approach is to use `cert-manager` for certificate management. For details, refer to [Deploy cert-manager for mTLS](secure-cockroachdb-kubernetes.html?filters=helm#deploy-cert-manager-for-mtls).
+{{site.data.alerts.end}}
+
+1. [Install the Helm client](https://helm.sh/docs/intro/install) (version 3.0 or higher) and add the `cockroachdb` chart repository:
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ helm repo add cockroachdb https://charts.cockroachdb.com/
+ ~~~
+
+ ~~~
+ "cockroachdb" has been added to your repositories
+ ~~~
+
+1. Update your Helm chart repositories to ensure that you're using the [latest CockroachDB chart](https://github.com/cockroachdb/helm-charts/blob/master/cockroachdb/Chart.yaml):
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ helm repo update
+ ~~~
+
+1. The cluster configuration is set in the Helm chart's [values file](https://github.com/cockroachdb/helm-charts/blob/master/cockroachdb/values.yaml).
+
+ {{site.data.alerts.callout_info}}
+ By default, the Helm chart specifies CPU and memory resources that are appropriate for the virtual machines used in this deployment example. On a production cluster, you should substitute values that are appropriate for your machines and workload. For details on configuring your deployment, see [Configure the Cluster](configure-cockroachdb-kubernetes.html?filters=helm).
+ {{site.data.alerts.end}}
+
+ Before deploying, modify some parameters in our Helm chart's [values file](https://github.com/cockroachdb/helm-charts/blob/master/cockroachdb/values.yaml):
+
+ 1. Create a local YAML file (e.g., `my-values.yaml`) to specify your custom values. These will be used to override the defaults in `values.yaml`.
+
+ 1. To avoid running out of memory when CockroachDB is not the only pod on a Kubernetes node, you *must* set memory limits explicitly. This is because CockroachDB does not detect the amount of memory allocated to its pod when run in Kubernetes. We recommend setting `conf.cache` and `conf.max-sql-memory` each to 1/4 of the `memory` allocation specified in `statefulset.resources.requests` and `statefulset.resources.limits`.
+
+ {{site.data.alerts.callout_success}}
+ For example, if you are allocating 8Gi of `memory` to each CockroachDB node, allocate 2Gi to `cache` and 2Gi to `max-sql-memory`.
+ {{site.data.alerts.end}}
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ yaml
+ conf:
+ cache: "2Gi"
+ max-sql-memory: "2Gi"
+ ~~~
+
+ The Helm chart defaults to a secure deployment by automatically setting `tls.enabled` to `true`.
+
+ {{site.data.alerts.callout_info}}
+ By default, the Helm chart will generate and sign 1 client and 1 node certificate to secure the cluster. To authenticate using your own CA, see [Certificate management](/docs/{{ page.version.version }}/secure-cockroachdb-kubernetes.html?filters=helm#use-a-custom-ca).
+ {{site.data.alerts.end}}
+
+ Refer to the [CockroachDB Helm chart's `values.yaml` template](https://github.com/cockroachdb/helm-charts/blob/master/cockroachdb/values.yaml).
+
+1. Install the CockroachDB Helm chart, specifying your custom values file.
+
+ Provide a "release" name to identify and track this particular deployment of the chart, and override the default values with those in `my-values.yaml`.
+
+ {{site.data.alerts.callout_info}}
+ This tutorial uses `my-release` as the release name. If you use a different value, be sure to adjust the release name in subsequent commands.
+ {{site.data.alerts.end}}
+
+ {{site.data.alerts.callout_danger}}
+ To allow the CockroachDB pods to successfully deploy, do not set the [`--wait` flag](https://helm.sh/docs/intro/using_helm/#helpful-options-for-installupgraderollback) when using Helm commands.
+ {{site.data.alerts.end}}
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ helm install my-release --values {custom-values}.yaml cockroachdb/cockroachdb
+ ~~~
+
+ Behind the scenes, this command uses our `cockroachdb-statefulset.yaml` file to create the StatefulSet that automatically creates 3 pods, each with a CockroachDB node running inside it, where each pod has distinguishable network identity and always binds back to the same persistent storage on restart.
+
+1. Confirm that CockroachDB cluster initialization has completed successfully, with the pods for CockroachDB showing `1/1` under `READY` and the pod for initialization showing `COMPLETED` under `STATUS`:
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ kubectl get pods
+ ~~~
+
+ ~~~
+ NAME READY STATUS RESTARTS AGE
+ my-release-cockroachdb-0 1/1 Running 0 8m
+ my-release-cockroachdb-1 1/1 Running 0 8m
+ my-release-cockroachdb-2 1/1 Running 0 8m
+ my-release-cockroachdb-init-hxzsc 0/1 Completed 0 1h
+ ~~~
+
+1. Confirm that the persistent volumes and corresponding claims were created successfully for all three pods:
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ kubectl get pv
+ ~~~
+
+ ~~~
+ NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE
+ pvc-71019b3a-fc67-11e8-a606-080027ba45e5 100Gi RWO Delete Bound default/datadir-my-release-cockroachdb-0 standard 11m
+ pvc-7108e172-fc67-11e8-a606-080027ba45e5 100Gi RWO Delete Bound default/datadir-my-release-cockroachdb-1 standard 11m
+ pvc-710dcb66-fc67-11e8-a606-080027ba45e5 100Gi RWO Delete Bound default/datadir-my-release-cockroachdb-2 standard 11m
+ ~~~
+
+{{site.data.alerts.callout_success}}
+The StatefulSet configuration sets all CockroachDB nodes to log to `stderr`, so if you ever need access to logs for a pod, use `kubectl logs ` rather than checking the log on the persistent volume.
+{{site.data.alerts.end}}
diff --git a/src/current/_includes/v25.1/orchestration/start-cockroachdb-insecure.md b/src/current/_includes/v25.1/orchestration/start-cockroachdb-insecure.md
new file mode 100644
index 00000000000..3406d48edbb
--- /dev/null
+++ b/src/current/_includes/v25.1/orchestration/start-cockroachdb-insecure.md
@@ -0,0 +1,114 @@
+1. From your local workstation, use our [`cockroachdb-statefulset.yaml`](https://github.com/cockroachdb/cockroach/blob/master/cloud/kubernetes/cockroachdb-statefulset.yaml) file to create the StatefulSet that automatically creates 3 pods, each with a CockroachDB node running inside it.
+
+ Download [`cockroachdb-statefulset.yaml`](https://github.com/cockroachdb/cockroach/blob/master/cloud/kubernetes/cockroachdb-statefulset.yaml):
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ curl -O https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/cockroachdb-statefulset.yaml
+ ~~~
+
+ {{site.data.alerts.callout_info}}
+ By default, this manifest specifies CPU and memory resources that are appropriate for the virtual machines used in this deployment example. On a production cluster, you should substitute values that are appropriate for your machines and workload. For details on configuring your deployment, see [Resource management](configure-cockroachdb-kubernetes.html?filters=manual).
+ {{site.data.alerts.end}}
+
+ Use the file to create the StatefulSet and start the cluster:
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ kubectl create -f cockroachdb-statefulset.yaml
+ ~~~
+
+ ~~~
+ service/cockroachdb-public created
+ service/cockroachdb created
+ poddisruptionbudget.policy/cockroachdb-budget created
+ statefulset.apps/cockroachdb created
+ ~~~
+
+ Alternatively, if you'd rather start with a configuration file that has been customized for performance:
+
+ 1. Download our [performance version of `cockroachdb-statefulset-insecure.yaml`](https://github.com/cockroachdb/cockroach/blob/master/cloud/kubernetes/performance/cockroachdb-statefulset-insecure.yaml):
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ curl -O https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/performance/cockroachdb-statefulset-insecure.yaml
+ ~~~
+
+ 1. Modify the file wherever there is a `TODO` comment.
+
+ 1. Use the file to create the StatefulSet and start the cluster:
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ kubectl create -f cockroachdb-statefulset-insecure.yaml
+ ~~~
+
+1. Confirm that three pods are `Running` successfully. Note that they will not
+ be considered `Ready` until after the cluster has been initialized:
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ kubectl get pods
+ ~~~
+
+ ~~~
+ NAME READY STATUS RESTARTS AGE
+ cockroachdb-0 0/1 Running 0 2m
+ cockroachdb-1 0/1 Running 0 2m
+ cockroachdb-2 0/1 Running 0 2m
+ ~~~
+
+1. Confirm that the persistent volumes and corresponding claims were created successfully for all three pods:
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ kubectl get persistentvolumes
+ ~~~
+
+ ~~~
+ NAME CAPACITY ACCESSMODES RECLAIMPOLICY STATUS CLAIM REASON AGE
+ pvc-52f51ecf-8bd5-11e6-a4f4-42010a800002 1Gi RWO Delete Bound default/datadir-cockroachdb-0 26s
+ pvc-52fd3a39-8bd5-11e6-a4f4-42010a800002 1Gi RWO Delete Bound default/datadir-cockroachdb-1 27s
+ pvc-5315efda-8bd5-11e6-a4f4-42010a800002 1Gi RWO Delete Bound default/datadir-cockroachdb-2 27s
+ ~~~
+
+1. Use our [`cluster-init.yaml`](https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/cluster-init.yaml) file to perform a one-time initialization that joins the CockroachDB nodes into a single cluster:
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ kubectl create \
+ -f https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/cluster-init.yaml
+ ~~~
+
+ ~~~
+ job.batch/cluster-init created
+ ~~~
+
+1. Confirm that cluster initialization has completed successfully. The job should be considered successful and the Kubernetes pods should soon be considered `Ready`:
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ kubectl get job cluster-init
+ ~~~
+
+ ~~~
+ NAME COMPLETIONS DURATION AGE
+ cluster-init 1/1 7s 27s
+ ~~~
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ kubectl get pods
+ ~~~
+
+ ~~~
+ NAME READY STATUS RESTARTS AGE
+ cluster-init-cqf8l 0/1 Completed 0 56s
+ cockroachdb-0 1/1 Running 0 7m51s
+ cockroachdb-1 1/1 Running 0 7m51s
+ cockroachdb-2 1/1 Running 0 7m51s
+ ~~~
+
+{{site.data.alerts.callout_success}}
+The StatefulSet configuration sets all CockroachDB nodes to log to `stderr`, so if you ever need access to a pod/node's logs to troubleshoot, use `kubectl logs ` rather than checking the log on the persistent volume.
+{{site.data.alerts.end}}
diff --git a/src/current/_includes/v25.1/orchestration/start-cockroachdb-local-helm-insecure.md b/src/current/_includes/v25.1/orchestration/start-cockroachdb-local-helm-insecure.md
new file mode 100644
index 00000000000..9a820070312
--- /dev/null
+++ b/src/current/_includes/v25.1/orchestration/start-cockroachdb-local-helm-insecure.md
@@ -0,0 +1,126 @@
+1. [Install the Helm client](https://helm.sh/docs/intro/install) (version 3.0 or higher) and add the `cockroachdb` chart repository:
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ helm repo add cockroachdb https://charts.cockroachdb.com/
+ ~~~
+
+ ~~~
+ "cockroachdb" has been added to your repositories
+ ~~~
+
+1. Update your Helm chart repositories to ensure that you're using the [latest CockroachDB chart](https://github.com/cockroachdb/helm-charts/blob/master/cockroachdb/Chart.yaml):
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ helm repo update
+ ~~~
+
+1. The cluster configuration is set in the Helm chart's [values file](https://github.com/cockroachdb/helm-charts/blob/master/cockroachdb/values.yaml).
+
+ {{site.data.alerts.callout_info}}
+ By default, the Helm chart specifies CPU and memory resources that are appropriate for the virtual machines used in this deployment example. On a production cluster, you should substitute values that are appropriate for your machines and workload. For details on configuring your deployment, see [Configure the Cluster](configure-cockroachdb-kubernetes.html?filters=helm).
+ {{site.data.alerts.end}}
+
+ Before deploying, modify some parameters in our Helm chart's [values file](https://github.com/cockroachdb/helm-charts/blob/master/cockroachdb/values.yaml):
+
+ 1. Create a local YAML file (e.g., `my-values.yaml`) to specify your custom values. These will be used to override the defaults in `values.yaml`.
+
+ 1. To avoid running out of memory when CockroachDB is not the only pod on a Kubernetes node, you *must* set memory limits explicitly. This is because CockroachDB does not detect the amount of memory allocated to its pod when run in Kubernetes. We recommend setting `conf.cache` and `conf.max-sql-memory` each to 1/4 of the `memory` allocation specified in `statefulset.resources.requests` and `statefulset.resources.limits`.
+
+ {{site.data.alerts.callout_success}}
+ For example, if you are allocating 8Gi of `memory` to each CockroachDB node, allocate 2Gi to `cache` and 2Gi to `max-sql-memory`.
+ {{site.data.alerts.end}}
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ yaml
+ conf:
+ cache: "2Gi"
+ max-sql-memory: "2Gi"
+ ~~~
+
+ The Helm chart defaults to a secure deployment by automatically setting `tls.enabled` to `true`. For an insecure deployment, set `tls.enabled` to `false`:
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ yaml
+ tls:
+ enabled: false
+ ~~~
+
+ Your values file should look similar to:
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ yaml
+ conf:
+ cache: "2Gi"
+ max-sql-memory: "2Gi"
+ tls:
+ enabled: false
+ ~~~
+
+ Refer to the [CockroachDB Helm chart's `values.yaml` template](https://github.com/cockroachdb/helm-charts/blob/master/cockroachdb/values.yaml).
+
+1. Install the CockroachDB Helm chart, specifying your custom values file.
+
+ Provide a "release" name to identify and track this particular deployment of the chart, and override the default values with those in `my-values.yaml`.
+
+ {{site.data.alerts.callout_info}}
+ This tutorial uses `my-release` as the release name. If you use a different value, be sure to adjust the release name in subsequent commands.
+ {{site.data.alerts.end}}
+
+ {{site.data.alerts.callout_danger}}
+ To allow the CockroachDB pods to successfully deploy, do not set the [`--wait` flag](https://helm.sh/docs/intro/using_helm/#helpful-options-for-installupgraderollback) when using Helm commands.
+ {{site.data.alerts.end}}
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ helm install my-release --values {custom-values}.yaml cockroachdb/cockroachdb
+ ~~~
+
+1. Install the CockroachDB Helm chart.
+
+ Provide a "release" name to identify and track this particular deployment of the chart.
+
+ {{site.data.alerts.callout_info}}
+ This tutorial uses `my-release` as the release name. If you use a different value, be sure to adjust the release name in subsequent commands.
+ {{site.data.alerts.end}}
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ helm install my-release cockroachdb/cockroachdb
+ ~~~
+
+ Behind the scenes, this command uses our `cockroachdb-statefulset.yaml` file to create the StatefulSet that automatically creates 3 pods, each with a CockroachDB node running inside it, where each pod has distinguishable network identity and always binds back to the same persistent storage on restart.
+
+1. Confirm that CockroachDB cluster initialization has completed successfully, with the pods for CockroachDB showing `1/1` under `READY` and the pod for initialization showing `COMPLETED` under `STATUS`:
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ kubectl get pods
+ ~~~
+
+ ~~~
+ NAME READY STATUS RESTARTS AGE
+ my-release-cockroachdb-0 1/1 Running 0 8m
+ my-release-cockroachdb-1 1/1 Running 0 8m
+ my-release-cockroachdb-2 1/1 Running 0 8m
+ my-release-cockroachdb-init-hxzsc 0/1 Completed 0 1h
+ ~~~
+
+1. Confirm that the persistent volumes and corresponding claims were created successfully for all three pods:
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ kubectl get pv
+ ~~~
+
+ ~~~
+ NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE
+ pvc-71019b3a-fc67-11e8-a606-080027ba45e5 100Gi RWO Delete Bound default/datadir-my-release-cockroachdb-0 standard 11m
+ pvc-7108e172-fc67-11e8-a606-080027ba45e5 100Gi RWO Delete Bound default/datadir-my-release-cockroachdb-1 standard 11m
+ pvc-710dcb66-fc67-11e8-a606-080027ba45e5 100Gi RWO Delete Bound default/datadir-my-release-cockroachdb-2 standard 11m
+ ~~~
+
+{{site.data.alerts.callout_success}}
+The StatefulSet configuration sets all CockroachDB nodes to log to `stderr`, so if you ever need access to a pod/node's logs to troubleshoot, use `kubectl logs ` rather than checking the log on the persistent volume.
+{{site.data.alerts.end}}
diff --git a/src/current/_includes/v25.1/orchestration/start-cockroachdb-local-insecure.md b/src/current/_includes/v25.1/orchestration/start-cockroachdb-local-insecure.md
new file mode 100644
index 00000000000..552cb3cd25f
--- /dev/null
+++ b/src/current/_includes/v25.1/orchestration/start-cockroachdb-local-insecure.md
@@ -0,0 +1,83 @@
+1. From your local workstation, use our [`cockroachdb-statefulset.yaml`](https://github.com/cockroachdb/cockroach/blob/master/cloud/kubernetes/cockroachdb-statefulset.yaml) file to create the StatefulSet that automatically creates 3 pods, each with a CockroachDB node running inside it:
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ kubectl create -f https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/cockroachdb-statefulset.yaml
+ ~~~
+
+ ~~~
+ service/cockroachdb-public created
+ service/cockroachdb created
+ poddisruptionbudget.policy/cockroachdb-budget created
+ statefulset.apps/cockroachdb created
+ ~~~
+
+1. Confirm that three pods are `Running` successfully. Note that they will not
+ be considered `Ready` until after the cluster has been initialized:
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ kubectl get pods
+ ~~~
+
+ ~~~
+ NAME READY STATUS RESTARTS AGE
+ cockroachdb-0 0/1 Running 0 2m
+ cockroachdb-1 0/1 Running 0 2m
+ cockroachdb-2 0/1 Running 0 2m
+ ~~~
+
+1. Confirm that the persistent volumes and corresponding claims were created successfully for all three pods:
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ kubectl get pv
+ ~~~
+
+ ~~~
+ NAME CAPACITY ACCESSMODES RECLAIMPOLICY STATUS CLAIM REASON AGE
+ pvc-52f51ecf-8bd5-11e6-a4f4-42010a800002 1Gi RWO Delete Bound default/datadir-cockroachdb-0 26s
+ pvc-52fd3a39-8bd5-11e6-a4f4-42010a800002 1Gi RWO Delete Bound default/datadir-cockroachdb-1 27s
+ pvc-5315efda-8bd5-11e6-a4f4-42010a800002 1Gi RWO Delete Bound default/datadir-cockroachdb-2 27s
+ ~~~
+
+1. Use our [`cluster-init.yaml`](https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/cluster-init.yaml) file to perform a one-time initialization that joins the CockroachDB nodes into a single cluster:
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ kubectl create \
+ -f https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/cluster-init.yaml
+ ~~~
+
+ ~~~
+ job.batch/cluster-init created
+ ~~~
+
+1. Confirm that cluster initialization has completed successfully. The job should be considered successful and the Kubernetes pods should soon be considered `Ready`:
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ kubectl get job cluster-init
+ ~~~
+
+ ~~~
+ NAME COMPLETIONS DURATION AGE
+ cluster-init 1/1 7s 27s
+ ~~~
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ kubectl get pods
+ ~~~
+
+ ~~~
+ NAME READY STATUS RESTARTS AGE
+ cluster-init-cqf8l 0/1 Completed 0 56s
+ cockroachdb-0 1/1 Running 0 7m51s
+ cockroachdb-1 1/1 Running 0 7m51s
+ cockroachdb-2 1/1 Running 0 7m51s
+ ~~~
+
+{{site.data.alerts.callout_success}}
+The StatefulSet configuration sets all CockroachDB nodes to log to `stderr`, so if you ever need access to a pod/node's logs to troubleshoot, use `kubectl logs ` rather than checking the log on the persistent volume.
+{{site.data.alerts.end}}
diff --git a/src/current/_includes/v25.1/orchestration/start-cockroachdb-operator-secure.md b/src/current/_includes/v25.1/orchestration/start-cockroachdb-operator-secure.md
new file mode 100644
index 00000000000..5cbc1c49af9
--- /dev/null
+++ b/src/current/_includes/v25.1/orchestration/start-cockroachdb-operator-secure.md
@@ -0,0 +1,110 @@
+### Install the Operator
+
+{% capture latest_operator_version %}{% include_cached latest_operator_version.md %}{% endcapture %}
+{% capture apply_default_operator_manifest_command %}{% include_cached copy-clipboard.html %}
+ ~~~ shell
+ kubectl apply -f https://raw.githubusercontent.com/cockroachdb/cockroach-operator/v{{ latest_operator_version }}/install/operator.yaml
+ ~~~
+ ~~~
+ clusterrole.rbac.authorization.k8s.io/cockroach-database-role created
+ serviceaccount/cockroach-database-sa created
+ clusterrolebinding.rbac.authorization.k8s.io/cockroach-database-rolebinding created
+ role.rbac.authorization.k8s.io/cockroach-operator-role created
+ clusterrolebinding.rbac.authorization.k8s.io/cockroach-operator-rolebinding created
+ clusterrole.rbac.authorization.k8s.io/cockroach-operator-role created
+ serviceaccount/cockroach-operator-sa created
+ rolebinding.rbac.authorization.k8s.io/cockroach-operator-default created
+ deployment.apps/cockroach-operator created
+ ~~~
+{% endcapture %}
+{% capture download_operator_manifest_command %}{% include_cached copy-clipboard.html %}
+ ~~~ shell
+ curl -O https://raw.githubusercontent.com/cockroachdb/cockroach-operator/v{{ latest_operator_version }}/install/operator.yaml
+ ~~~
+{% endcapture %}
+{% capture apply_local_operator_manifest_command %}{% include_cached copy-clipboard.html %}
+ ~~~ shell
+ kubectl apply -f operator.yaml
+ ~~~
+{% endcapture %}
+
+1. Apply the [custom resource definition (CRD)](https://kubernetes.io/docs/concepts/extend-kubernetes/api-extension/custom-resources/#customresourcedefinitions) for the Operator:
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ kubectl apply -f https://raw.githubusercontent.com/cockroachdb/cockroach-operator/v{{ latest_operator_version }}/install/crds.yaml
+ ~~~
+
+ ~~~
+ customresourcedefinition.apiextensions.k8s.io/crdbclusters.crdb.cockroachlabs.com created
+ ~~~
+
+1. By default, the Operator is configured to install in the `cockroach-operator-system` namespace and to manage CockroachDB instances for all namespaces on the cluster.
To use these defaults, apply the Operator manifest without modifying it: {{ apply_default_operator_manifest_command }}
To change these defaults:
Download the Operator manifest: {{ download_operator_manifest_command }}
To use a custom namespace, edit all instances of namespace: cockroach-operator-system with your desired namespace.
To limit the namespaces that will be monitored, set the WATCH_NAMESPACE environment variable in the Deployment pod spec. This can be set to a single namespace or a comma-delimited set of namespaces. When set, only those CrdbCluster resources in the supplied namespace(s) will be reconciled.
Apply your local version of the Operator manifest to the cluster: {{ apply_local_operator_manifest_command }}
+
+1. Set your current namespace to the one used by the Operator. For example, to use the Operator's default namespace:
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ kubectl config set-context --current --namespace=cockroach-operator-system
+ ~~~
+
+1. Validate that the Operator is running:
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ kubectl get pods
+ ~~~
+
+ ~~~
+ NAME READY STATUS RESTARTS AGE
+ cockroach-operator-6f7b86ffc4-9ppkv 1/1 Running 0 54s
+ ~~~
+
+### Initialize the cluster
+
+{{site.data.alerts.callout_info}}
+After a cluster managed by the Kubernetes operator is initialized, its Kubernetes labels cannot be modified. For more details, refer to [Best practices](#best-practices).
+{{site.data.alerts.end}}
+
+1. Download `example.yaml`, a custom resource that tells the Operator how to configure the Kubernetes cluster.
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ curl -O https://raw.githubusercontent.com/cockroachdb/cockroach-operator/v{{ latest_operator_version }}/examples/example.yaml
+ ~~~
+
+ By default, this custom resource specifies CPU and memory resources that are appropriate for the virtual machines used in this deployment example. On a production cluster, you should substitute values that are appropriate for your machines and workload. For details on configuring your deployment, see [Configure the Cluster](configure-cockroachdb-kubernetes.html).
+
+ {{site.data.alerts.callout_info}}
+ By default, the Operator will generate and sign 1 client and 1 node certificate to secure the cluster. This means that if you do not provide a CA, a `cockroach`-generated CA is used. If you want to authenticate using your own CA, [specify the generated secrets in the custom resource](secure-cockroachdb-kubernetes.html#use-a-custom-ca) **before** proceeding to the next step.
+ {{site.data.alerts.end}}
+
+1. Apply `example.yaml`:
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ kubectl apply -f example.yaml
+ ~~~
+
+ The Operator will create a StatefulSet and initialize the nodes as a cluster.
+
+ ~~~
+ crdbcluster.crdb.cockroachlabs.com/cockroachdb created
+ ~~~
+
+1. Check that the pods were created:
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ kubectl get pods
+ ~~~
+
+ ~~~
+ NAME READY STATUS RESTARTS AGE
+ cockroach-operator-6f7b86ffc4-9t9zb 1/1 Running 0 3m22s
+ cockroachdb-0 1/1 Running 0 2m31s
+ cockroachdb-1 1/1 Running 0 102s
+ cockroachdb-2 1/1 Running 0 46s
+ ~~~
+
+ Each pod should have `READY` status soon after being created.
diff --git a/src/current/_includes/v25.1/orchestration/start-cockroachdb-secure.md b/src/current/_includes/v25.1/orchestration/start-cockroachdb-secure.md
new file mode 100644
index 00000000000..972cabc2d8e
--- /dev/null
+++ b/src/current/_includes/v25.1/orchestration/start-cockroachdb-secure.md
@@ -0,0 +1,108 @@
+### Configure the cluster
+
+1. Download and modify our [StatefulSet configuration](https://github.com/cockroachdb/cockroach/blob/master/cloud/kubernetes/bring-your-own-certs/cockroachdb-statefulset.yaml):
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ curl -O https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/bring-your-own-certs/cockroachdb-statefulset.yaml
+ ~~~
+
+1. Update `secretName` with the name of the corresponding node secret.
+
+ The secret names depend on your method for generating secrets. For example, if you follow the below [steps using `cockroach cert`](#create-certificates), use this secret name:
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ yaml
+ secret:
+ secretName: cockroachdb.node
+ ~~~
+
+1. The StatefulSet configuration deploys CockroachDB into the `default` namespace. To use a different namespace, search for `kind: RoleBinding` and change its `subjects.namespace` property to the name of the namespace. Otherwise, a `failed to read secrets` error occurs when you attempt to follow the steps in [Initialize the cluster](#initialize-the-cluster).
+
+{{site.data.alerts.callout_info}}
+By default, this manifest specifies CPU and memory resources that are appropriate for the virtual machines used in this deployment example. On a production cluster, you should substitute values that are appropriate for your machines and workload. For details on configuring your deployment, see [Configure the Cluster](configure-cockroachdb-kubernetes.html?filters=manual).
+{{site.data.alerts.end}}
+
+### Create certificates
+
+{{site.data.alerts.callout_success}}
+The StatefulSet configuration sets all CockroachDB nodes to log to `stderr`, so if you ever need access to a pod/node's logs to troubleshoot, use `kubectl logs ` rather than checking the log on the persistent volume.
+{{site.data.alerts.end}}
+
+{% include {{ page.version.version }}/orchestration/kubernetes-cockroach-cert.md %}
+
+### Initialize the cluster
+
+1. Use the config file you downloaded to create the StatefulSet that automatically creates 3 pods, each running a CockroachDB node:
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ kubectl create -f cockroachdb-statefulset.yaml
+ ~~~
+
+ ~~~
+ serviceaccount/cockroachdb created
+ role.rbac.authorization.k8s.io/cockroachdb created
+ rolebinding.rbac.authorization.k8s.io/cockroachdb created
+ service/cockroachdb-public created
+ service/cockroachdb created
+ poddisruptionbudget.policy/cockroachdb-budget created
+ statefulset.apps/cockroachdb created
+ ~~~
+
+1. Initialize the CockroachDB cluster:
+
+ 1. Confirm that three pods are `Running` successfully. Note that they will not be considered `Ready` until after the cluster has been initialized:
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ kubectl get pods
+ ~~~
+
+ ~~~
+ NAME READY STATUS RESTARTS AGE
+ cockroachdb-0 0/1 Running 0 2m
+ cockroachdb-1 0/1 Running 0 2m
+ cockroachdb-2 0/1 Running 0 2m
+ ~~~
+
+ 1. Confirm that the persistent volumes and corresponding claims were created successfully for all three pods:
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ kubectl get pv
+ ~~~
+
+ ~~~
+ NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE
+ pvc-9e435563-fb2e-11e9-a65c-42010a8e0fca 100Gi RWO Delete Bound default/datadir-cockroachdb-0 standard 51m
+ pvc-9e47d820-fb2e-11e9-a65c-42010a8e0fca 100Gi RWO Delete Bound default/datadir-cockroachdb-1 standard 51m
+ pvc-9e4f57f0-fb2e-11e9-a65c-42010a8e0fca 100Gi RWO Delete Bound default/datadir-cockroachdb-2 standard 51m
+ ~~~
+
+ 1. Run `cockroach init` on one of the pods to complete the node startup process and have them join together as a cluster:
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ kubectl exec -it cockroachdb-0 \
+ -- /cockroach/cockroach init \
+ --certs-dir=/cockroach/cockroach-certs
+ ~~~
+
+ ~~~
+ Cluster successfully initialized
+ ~~~
+
+ 1. Confirm that cluster initialization has completed successfully. The job should be considered successful and the Kubernetes pods should soon be considered `Ready`:
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ kubectl get pods
+ ~~~
+
+ ~~~
+ NAME READY STATUS RESTARTS AGE
+ cockroachdb-0 1/1 Running 0 3m
+ cockroachdb-1 1/1 Running 0 3m
+ cockroachdb-2 1/1 Running 0 3m
+ ~~~
\ No newline at end of file
diff --git a/src/current/_includes/v25.1/orchestration/start-kubernetes.md b/src/current/_includes/v25.1/orchestration/start-kubernetes.md
new file mode 100644
index 00000000000..a9a24eb7948
--- /dev/null
+++ b/src/current/_includes/v25.1/orchestration/start-kubernetes.md
@@ -0,0 +1,98 @@
+You can use the hosted [Google Kubernetes Engine (GKE)](#hosted-gke) service or the hosted [Amazon Elastic Kubernetes Service (EKS)](#hosted-eks) to quickly start Kubernetes.
+
+{{site.data.alerts.callout_info}}
+GKE or EKS are not required to run CockroachDB on Kubernetes. A manual GCE or AWS cluster with the [minimum recommended Kubernetes version](#kubernetes-version) and at least 3 pods, each presenting [sufficient resources](#resources) to start a CockroachDB node, can also be used.
+{{site.data.alerts.end}}
+
+### Hosted GKE
+
+1. Complete the **Before You Begin** steps described in the [Google Kubernetes Engine Quickstart](https://cloud.google.com/kubernetes-engine/docs/quickstart) documentation.
+
+ This includes installing `gcloud`, which is used to create and delete Kubernetes Engine clusters, and `kubectl`, which is the command-line tool used to manage Kubernetes from your workstation.
+
+ {{site.data.alerts.callout_success}}
+ The documentation offers the choice of using Google's Cloud Shell product or using a local shell on your machine. Choose to use a local shell if you want to be able to view the DB Console using the steps in this guide.
+ {{site.data.alerts.end}}
+
+1. From your local workstation, start the Kubernetes cluster, specifying one of the available [regions](https://cloud.google.com/compute/docs/regions-zones#available) (e.g., `us-east1`):
+
+ {{site.data.alerts.callout_success}}
+ Since this region can differ from your default `gcloud` region, be sure to include the `--region` flag to run `gcloud` commands against this cluster.
+ {{site.data.alerts.end}}
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ gcloud container clusters create cockroachdb --machine-type n2-standard-4 --region {region-name} --num-nodes 1
+ ~~~
+
+ ~~~
+ Creating cluster cockroachdb...done.
+ ~~~
+
+ This creates GKE instances and joins them into a single Kubernetes cluster named `cockroachdb`. The `--region` flag specifies a [regional three-zone cluster](https://cloud.google.com/kubernetes-engine/docs/how-to/creating-a-regional-cluster), and `--num-nodes` specifies one Kubernetes worker node in each zone.
+
+ The `--machine-type` flag tells the node pool to use the [`n2-standard-4`](https://cloud.google.com/compute/docs/machine-types#standard_machine_types) machine type (4 vCPUs, 16 GB memory), which meets our [recommended CPU and memory configuration]({% link {{ page.version.version }}/recommended-production-settings.md %}#basic-hardware-recommendations).
+
+ The process can take a few minutes, so do not move on to the next step until you see a `Creating cluster cockroachdb...done` message and details about your cluster.
+
+1. Get the email address associated with your Google Cloud account:
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ gcloud info | grep Account
+ ~~~
+
+ ~~~
+ Account: [your.google.cloud.email@example.org]
+ ~~~
+
+ {{site.data.alerts.callout_danger}}
+ This command returns your email address in all lowercase. However, in the next step, you must enter the address using the accurate capitalization. For example, if your address is YourName@example.com, you must use YourName@example.com and not yourname@example.com.
+ {{site.data.alerts.end}}
+
+1. [Create the RBAC roles](https://cloud.google.com/kubernetes-engine/docs/how-to/role-based-access-control#prerequisites_for_using_role-based_access_control) CockroachDB needs for running on GKE, using the address from the previous step:
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ kubectl create clusterrolebinding $USER-cluster-admin-binding \
+ --clusterrole=cluster-admin \
+ --user={your.google.cloud.email@example.org}
+ ~~~
+
+ ~~~
+ clusterrolebinding.rbac.authorization.k8s.io/your.username-cluster-admin-binding created
+ ~~~
+
+### Hosted EKS
+
+1. Complete the steps described in the [EKS Getting Started](https://docs.aws.amazon.com/eks/latest/userguide/getting-started-eksctl.html) documentation.
+
+ This includes installing and configuring the AWS CLI and `eksctl`, which is the command-line tool used to create and delete Kubernetes clusters on EKS, and `kubectl`, which is the command-line tool used to manage Kubernetes from your workstation.
+
+ {{site.data.alerts.callout_info}}
+ If you are running [EKS-Anywhere](https://aws.amazon.com/eks/eks-anywhere/), CockroachDB requires that you [configure your default storage class](https://kubernetes.io/docs/tasks/administer-cluster/change-default-storage-class/) to auto-provision persistent volumes. Alternatively, you can define a custom storage configuration as required by your install pattern.
+ {{site.data.alerts.end}}
+
+1. From your local workstation, start the Kubernetes cluster:
+
+ {{site.data.alerts.callout_success}}
+ To ensure that all 3 nodes can be placed into a different availability zone, you may want to first [confirm that at least 3 zones are available in the region](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-regions-availability-zones.html#availability-zones-describe) for your account.
+ {{site.data.alerts.end}}
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ eksctl create cluster \
+ --name cockroachdb \
+ --nodegroup-name standard-workers \
+ --node-type m5.xlarge \
+ --nodes 3 \
+ --nodes-min 1 \
+ --nodes-max 4 \
+ --node-ami auto
+ ~~~
+
+ This creates EKS instances and joins them into a single Kubernetes cluster named `cockroachdb`. The `--node-type` flag tells the node pool to use the [`m5.xlarge`](https://aws.amazon.com/ec2/instance-types/) instance type (4 vCPUs, 16 GB memory), which meets our [recommended CPU and memory configuration]({% link {{ page.version.version }}/recommended-production-settings.md %}#basic-hardware-recommendations).
+
+ Cluster provisioning usually takes between 10 and 15 minutes. Do not move on to the next step until you see a message like `[✔] EKS cluster "cockroachdb" in "us-east-1" region is ready` and details about your cluster.
+
+1. Open the [AWS CloudFormation console](https://console.aws.amazon.com/cloudformation/home) to verify that the stacks `eksctl-cockroachdb-cluster` and `eksctl-cockroachdb-nodegroup-standard-workers` were successfully created. Be sure that your region is selected in the console.
\ No newline at end of file
diff --git a/src/current/_includes/v25.1/orchestration/test-cluster-insecure.md b/src/current/_includes/v25.1/orchestration/test-cluster-insecure.md
new file mode 100644
index 00000000000..3c94e27b70a
--- /dev/null
+++ b/src/current/_includes/v25.1/orchestration/test-cluster-insecure.md
@@ -0,0 +1,76 @@
+1. Launch a temporary interactive pod and start the [built-in SQL client]({% link {{ page.version.version }}/cockroach-sql.md %}) inside it:
+
+
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ kubectl run cockroachdb -it \
+ --image=cockroachdb/cockroach:{{page.release_info.version}} \
+ --rm \
+ --restart=Never \
+ -- sql \
+ --insecure \
+ --host=cockroachdb-public
+ ~~~
+
+
+
+
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ kubectl run cockroachdb -it \
+ --image=cockroachdb/cockroach:{{page.release_info.version}} \
+ --rm \
+ --restart=Never \
+ -- sql \
+ --insecure \
+ --host=my-release-cockroachdb-public
+ ~~~
+
+
+
+1. Run some basic [CockroachDB SQL statements]({% link {{ page.version.version }}/learn-cockroachdb-sql.md %}):
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ sql
+ > CREATE DATABASE bank;
+ ~~~
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ sql
+ > CREATE TABLE bank.accounts (
+ id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
+ balance DECIMAL
+ );
+ ~~~
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ sql
+ > INSERT INTO bank.accounts (balance)
+ VALUES
+ (1000.50), (20000), (380), (500), (55000);
+ ~~~
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ sql
+ > SELECT * FROM bank.accounts;
+ ~~~
+
+ ~~~
+ id | balance
+ +--------------------------------------+---------+
+ 6f123370-c48c-41ff-b384-2c185590af2b | 380
+ 990c9148-1ea0-4861-9da7-fd0e65b0a7da | 1000.50
+ ac31c671-40bf-4a7b-8bee-452cff8a4026 | 500
+ d58afd93-5be9-42ba-b2e2-dc00dcedf409 | 20000
+ e6d8f696-87f5-4d3c-a377-8e152fdc27f7 | 55000
+ (5 rows)
+ ~~~
+
+1. Exit the SQL shell and delete the temporary pod:
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ sql
+ > \q
+ ~~~
diff --git a/src/current/_includes/v25.1/orchestration/test-cluster-secure.md b/src/current/_includes/v25.1/orchestration/test-cluster-secure.md
new file mode 100644
index 00000000000..f255d8d62fc
--- /dev/null
+++ b/src/current/_includes/v25.1/orchestration/test-cluster-secure.md
@@ -0,0 +1,145 @@
+To use the CockroachDB SQL client, first launch a secure pod running the `cockroach` binary.
+
+
+
+{% capture latest_operator_version %}{% include_cached latest_operator_version.md %}{% endcapture %}
+
+{% include_cached copy-clipboard.html %}
+~~~ shell
+$ kubectl create \
+-f https://raw.githubusercontent.com/cockroachdb/cockroach-operator/v{{ latest_operator_version }}/examples/client-secure-operator.yaml
+~~~
+
+1. Get a shell into the pod and start the CockroachDB [built-in SQL client]({% link {{ page.version.version }}/cockroach-sql.md %}):
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ kubectl exec -it cockroachdb-client-secure \
+ -- ./cockroach sql \
+ --certs-dir=/cockroach/cockroach-certs \
+ --host=cockroachdb-public
+ ~~~
+
+ ~~~
+ # Welcome to the CockroachDB SQL shell.
+ # All statements must be terminated by a semicolon.
+ # To exit, type: \q.
+ #
+ # Server version: CockroachDB CCL v21.1.0 (x86_64-unknown-linux-gnu, built 2021/04/23 13:54:57, go1.13.14) (same version as client)
+ # Cluster ID: a96791d9-998c-4683-a3d3-edbf425bbf11
+ #
+ # Enter \? for a brief introduction.
+ #
+ root@cockroachdb-public:26257/defaultdb>
+ ~~~
+
+{% include {{ page.version.version }}/orchestration/kubernetes-basic-sql.md %}
+
+
+
+
+
+{% include_cached copy-clipboard.html %}
+~~~ shell
+$ kubectl create \
+-f https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/bring-your-own-certs/client.yaml
+~~~
+
+~~~
+pod/cockroachdb-client-secure created
+~~~
+
+1. Get a shell into the pod and start the CockroachDB [built-in SQL client]({% link {{ page.version.version }}/cockroach-sql.md %}):
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ kubectl exec -it cockroachdb-client-secure \
+ -- ./cockroach sql \
+ --certs-dir=/cockroach-certs \
+ --host=cockroachdb-public
+ ~~~
+
+ ~~~
+ # Welcome to the cockroach SQL interface.
+ # All statements must be terminated by a semicolon.
+ # To exit: CTRL + D.
+ #
+ # Client version: CockroachDB CCL v19.1.0 (x86_64-unknown-linux-gnu, built 2019/04/29 18:36:40, go1.11.6)
+ # Server version: CockroachDB CCL v19.1.0 (x86_64-unknown-linux-gnu, built 2019/04/29 18:36:40, go1.11.6)
+
+ # Cluster ID: 256a8705-e348-4e3a-ab12-e1aba96857e4
+ #
+ # Enter \? for a brief introduction.
+ #
+ root@cockroachdb-public:26257/defaultdb>
+ ~~~
+
+ {{site.data.alerts.callout_success}}
+ This pod will continue running indefinitely, so any time you need to reopen the built-in SQL client or run any other [`cockroach` client commands]({% link {{ page.version.version }}/cockroach-commands.md %}) (e.g., `cockroach node`), repeat step 2 using the appropriate `cockroach` command.
+
+ If you'd prefer to delete the pod and recreate it when needed, run `kubectl delete pod cockroachdb-client-secure`.
+ {{site.data.alerts.end}}
+
+{% include {{ page.version.version }}/orchestration/kubernetes-basic-sql.md %}
+
+
+
+From your local workstation, use our [`client-secure.yaml`](https://github.com/cockroachdb/helm-charts/blob/master/examples/client-secure.yaml) file to launch a pod and keep it running indefinitely.
+
+1. Download the file:
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ curl -OOOOOOOOO \
+ https://raw.githubusercontent.com/cockroachdb/helm-charts/master/examples/client-secure.yaml
+ ~~~
+
+1. In the file, set the following values:
+ - `spec.serviceAccountName: my-release-cockroachdb`
+ - `spec.image: cockroachdb/cockroach: {your CockroachDB version}`
+ - `spec.volumes[0].project.sources[0].secret.name: my-release-cockroachdb-client-secret`
+
+1. Use the file to launch a pod and keep it running indefinitely:
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ kubectl create -f client-secure.yaml
+ ~~~
+
+ ~~~
+ pod "cockroachdb-client-secure" created
+ ~~~
+
+1. Get a shell into the pod and start the CockroachDB [built-in SQL client]({% link {{ page.version.version }}/cockroach-sql.md %}):
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ kubectl exec -it cockroachdb-client-secure \
+ -- ./cockroach sql \
+ --certs-dir=./cockroach-certs \
+ --host=my-release-cockroachdb-public
+ ~~~
+
+ ~~~
+ # Welcome to the cockroach SQL interface.
+ # All statements must be terminated by a semicolon.
+ # To exit: CTRL + D.
+ #
+ # Client version: CockroachDB CCL v19.1.0 (x86_64-unknown-linux-gnu, built 2019/04/29 18:36:40, go1.11.6)
+ # Server version: CockroachDB CCL v19.1.0 (x86_64-unknown-linux-gnu, built 2019/04/29 18:36:40, go1.11.6)
+
+ # Cluster ID: 256a8705-e348-4e3a-ab12-e1aba96857e4
+ #
+ # Enter \? for a brief introduction.
+ #
+ root@my-release-cockroachdb-public:26257/defaultdb>
+ ~~~
+
+ {{site.data.alerts.callout_success}}
+ This pod will continue running indefinitely, so any time you need to reopen the built-in SQL client or run any other [`cockroach` client commands]({% link {{ page.version.version }}/cockroach-commands.md %}) (e.g., `cockroach node`), repeat step 2 using the appropriate `cockroach` command.
+
+ If you'd prefer to delete the pod and recreate it when needed, run `kubectl delete pod cockroachdb-client-secure`.
+ {{site.data.alerts.end}}
+
+{% include {{ page.version.version }}/orchestration/kubernetes-basic-sql.md %}
+
\ No newline at end of file
diff --git a/src/current/_includes/v25.1/performance/alter-primary-key-hash-sharded.md b/src/current/_includes/v25.1/performance/alter-primary-key-hash-sharded.md
new file mode 100644
index 00000000000..7aac175286e
--- /dev/null
+++ b/src/current/_includes/v25.1/performance/alter-primary-key-hash-sharded.md
@@ -0,0 +1,66 @@
+Let's assume the `events` table already exists:
+
+{% include_cached copy-clipboard.html %}
+~~~ sql
+> CREATE TABLE events (
+ product_id INT8,
+ owner UUID,
+ serial_number VARCHAR,
+ event_id UUID,
+ ts TIMESTAMP,
+ data JSONB,
+ PRIMARY KEY (product_id, owner, serial_number, ts, event_id),
+ INDEX (ts) USING HASH
+);
+~~~
+
+You can change an existing primary key to use hash sharding by adding the `USING HASH` clause at the end of the key definition:
+
+{% include_cached copy-clipboard.html %}
+~~~ sql
+> ALTER TABLE events ALTER PRIMARY KEY USING COLUMNS (product_id, owner, serial_number, ts, event_id) USING HASH;
+~~~
+
+{% include_cached copy-clipboard.html %}
+~~~ sql
+> SHOW INDEX FROM events;
+~~~
+
+~~~
+ table_name | index_name | non_unique | seq_in_index | column_name | direction | storing | implicit
+-------------+---------------+------------+--------------+-------------------------------------------------------------------+-----------+---------+-----------
+ events | events_pkey | false | 1 | crdb_internal_event_id_owner_product_id_serial_number_ts_shard_16 | ASC | false | true
+ events | events_pkey | false | 2 | product_id | ASC | false | false
+ events | events_pkey | false | 3 | owner | ASC | false | false
+ events | events_pkey | false | 4 | serial_number | ASC | false | false
+ events | events_pkey | false | 5 | ts | ASC | false | false
+ events | events_pkey | false | 6 | event_id | ASC | false | false
+ events | events_pkey | false | 7 | data | N/A | true | false
+ events | events_ts_idx | true | 1 | crdb_internal_ts_shard_16 | ASC | false | true
+ events | events_ts_idx | true | 2 | ts | ASC | false | false
+ events | events_ts_idx | true | 3 | crdb_internal_event_id_owner_product_id_serial_number_ts_shard_16 | ASC | false | true
+ events | events_ts_idx | true | 4 | product_id | ASC | false | true
+ events | events_ts_idx | true | 5 | owner | ASC | false | true
+ events | events_ts_idx | true | 6 | serial_number | ASC | false | true
+ events | events_ts_idx | true | 7 | event_id | ASC | false | true
+(14 rows)
+~~~
+
+{% include_cached copy-clipboard.html %}
+~~~ sql
+> SHOW COLUMNS FROM events;
+~~~
+
+~~~
+ column_name | data_type | is_nullable | column_default | generation_expression | indices | is_hidden
+--------------------------------------------------------------------+-----------+-------------+----------------+-----------------------------------------------------------------------------------------------+-----------------------------+------------
+ product_id | INT8 | false | NULL | | {events_pkey,events_ts_idx} | false
+ owner | UUID | false | NULL | | {events_pkey,events_ts_idx} | false
+ serial_number | VARCHAR | false | NULL | | {events_pkey,events_ts_idx} | false
+ event_id | UUID | false | NULL | | {events_pkey,events_ts_idx} | false
+ ts | TIMESTAMP | false | NULL | | {events_pkey,events_ts_idx} | false
+ data | JSONB | true | NULL | | {events_pkey} | false
+ crdb_internal_ts_shard_16 | INT8 | false | NULL | mod(fnv32(crdb_internal.datums_to_bytes(ts)), 16) | {events_ts_idx} | true
+ crdb_internal_event_id_owner_product_id_serial_number_ts_shard_16 | INT8 | false | NULL | mod(fnv32(crdb_internal.datums_to_bytes(event_id, owner, product_id, serial_number, ts)), 16) | {events_pkey,events_ts_idx} | true
+(8 rows)
+~~~
diff --git a/src/current/_includes/v25.1/performance/check-rebalancing-after-partitioning.md b/src/current/_includes/v25.1/performance/check-rebalancing-after-partitioning.md
new file mode 100644
index 00000000000..c7e19142bd4
--- /dev/null
+++ b/src/current/_includes/v25.1/performance/check-rebalancing-after-partitioning.md
@@ -0,0 +1,41 @@
+Over the next minutes, CockroachDB will rebalance all partitions based on the constraints you defined.
+
+To check this at a high level, access the Web UI on any node at `:8080` and look at the **Node List**. You'll see that the range count is still close to even across all nodes but much higher than before partitioning:
+
+
+
+To check at a more granular level, SSH to one of the instances not running CockroachDB and run the `SHOW EXPERIMENTAL_RANGES` statement on the `vehicles` table:
+
+{% include_cached copy-clipboard.html %}
+~~~ shell
+$ cockroach sql \
+{{page.certs}} \
+--host= \
+--database=movr \
+--execute="SELECT * FROM \
+[SHOW EXPERIMENTAL_RANGES FROM TABLE vehicles] \
+WHERE \"start_key\" IS NOT NULL \
+ AND \"start_key\" NOT LIKE '%Prefix%';"
+~~~
+
+~~~
+ start_key | end_key | range_id | replicas | lease_holder
++------------------+----------------------------+----------+----------+--------------+
+ /"boston" | /"boston"/PrefixEnd | 105 | {1,2,3} | 3
+ /"los angeles" | /"los angeles"/PrefixEnd | 121 | {7,8,9} | 8
+ /"new york" | /"new york"/PrefixEnd | 101 | {1,2,3} | 3
+ /"san francisco" | /"san francisco"/PrefixEnd | 117 | {7,8,9} | 8
+ /"seattle" | /"seattle"/PrefixEnd | 113 | {4,5,6} | 5
+ /"washington dc" | /"washington dc"/PrefixEnd | 109 | {1,2,3} | 1
+(6 rows)
+~~~
+
+For reference, here's how the nodes map to zones:
+
+Node IDs | Zone
+---------|-----
+1-3 | `us-east1-b` (South Carolina)
+4-6 | `us-west1-a` (Oregon)
+7-9 | `us-west2-a` (Los Angeles)
+
+We can see that, after partitioning, the replicas for New York, Boston, and Washington DC are located on nodes 1-3 in `us-east1-b`, replicas for Seattle are located on nodes 4-6 in `us-west1-a`, and replicas for San Francisco and Los Angeles are located on nodes 7-9 in `us-west2-a`.
diff --git a/src/current/_includes/v25.1/performance/check-rebalancing.md b/src/current/_includes/v25.1/performance/check-rebalancing.md
new file mode 100644
index 00000000000..32e3d98f8f1
--- /dev/null
+++ b/src/current/_includes/v25.1/performance/check-rebalancing.md
@@ -0,0 +1,33 @@
+Since you started each node with the `--locality` flag set to its GCE zone, over the next minutes, CockroachDB will rebalance data evenly across the zones.
+
+To check this, access the DB Console on any node at `:8080` and look at the **Node List**. You'll see that the range count is more or less even across all nodes:
+
+
+
+For reference, here's how the nodes map to zones:
+
+Node IDs | Zone
+---------|-----
+1-3 | `us-east1-b` (South Carolina)
+4-6 | `us-west1-a` (Oregon)
+7-9 | `us-west2-a` (Los Angeles)
+
+To verify even balancing at range level, SSH to one of the instances not running CockroachDB and run the `SHOW EXPERIMENTAL_RANGES` statement:
+
+{% include_cached copy-clipboard.html %}
+~~~ shell
+$ cockroach sql \
+{{page.certs}} \
+--host= \
+--database=movr \
+--execute="SHOW EXPERIMENTAL_RANGES FROM TABLE vehicles;"
+~~~
+
+~~~
+ start_key | end_key | range_id | replicas | lease_holder
++-----------+---------+----------+----------+--------------+
+ NULL | NULL | 33 | {3,4,7} | 7
+(1 row)
+~~~
+
+In this case, we can see that, for the single range containing `vehicles` data, one replica is in each zone, and the leaseholder is in the `us-west2-a` zone.
diff --git a/src/current/_includes/v25.1/performance/configure-network.md b/src/current/_includes/v25.1/performance/configure-network.md
new file mode 100644
index 00000000000..e9abeb94df3
--- /dev/null
+++ b/src/current/_includes/v25.1/performance/configure-network.md
@@ -0,0 +1,18 @@
+CockroachDB requires TCP communication on two ports:
+
+- **26257** (`tcp:26257`) for inter-node communication (i.e., working as a cluster)
+- **8080** (`tcp:8080`) for accessing the DB Console
+
+Since GCE instances communicate on their internal IP addresses by default, you do not need to take any action to enable inter-node communication. However, to access the DB Console from your local network, you must [create a firewall rule for your project](https://cloud.google.com/vpc/docs/using-firewalls):
+
+Field | Recommended Value
+------|------------------
+Name | **cockroachweb**
+Source filter | IP ranges
+Source IP ranges | Your local network's IP ranges
+Allowed protocols | **tcp:8080**
+Target tags | `cockroachdb`
+
+{{site.data.alerts.callout_info}}
+The **tag** feature will let you easily apply the rule to your instances.
+{{site.data.alerts.end}}
diff --git a/src/current/_includes/v25.1/performance/create-index-hash-sharded-secondary-index.md b/src/current/_includes/v25.1/performance/create-index-hash-sharded-secondary-index.md
new file mode 100644
index 00000000000..05f66896541
--- /dev/null
+++ b/src/current/_includes/v25.1/performance/create-index-hash-sharded-secondary-index.md
@@ -0,0 +1,62 @@
+Let's assume the `events` table already exists:
+
+{% include_cached copy-clipboard.html %}
+~~~ sql
+> CREATE TABLE events (
+ product_id INT8,
+ owner UUID,
+ serial_number VARCHAR,
+ event_id UUID,
+ ts TIMESTAMP,
+ data JSONB,
+ PRIMARY KEY (product_id, owner, serial_number, ts, event_id)
+);
+~~~
+
+You can create a hash-sharded index on an existing table:
+
+{% include_cached copy-clipboard.html %}
+~~~ sql
+> CREATE INDEX ON events(ts) USING HASH;
+~~~
+
+{% include_cached copy-clipboard.html %}
+~~~ sql
+> SHOW INDEX FROM events;
+~~~
+
+~~~
+ table_name | index_name | non_unique | seq_in_index | column_name | direction | storing | implicit
+-------------+---------------+------------+--------------+---------------------------+-----------+---------+-----------
+ events | events_pkey | false | 1 | product_id | ASC | false | false
+ events | events_pkey | false | 2 | owner | ASC | false | false
+ events | events_pkey | false | 3 | serial_number | ASC | false | false
+ events | events_pkey | false | 4 | ts | ASC | false | false
+ events | events_pkey | false | 5 | event_id | ASC | false | false
+ events | events_pkey | false | 6 | data | N/A | true | false
+ events | events_ts_idx | true | 1 | crdb_internal_ts_shard_16 | ASC | false | true
+ events | events_ts_idx | true | 2 | ts | ASC | false | false
+ events | events_ts_idx | true | 3 | product_id | ASC | false | true
+ events | events_ts_idx | true | 4 | owner | ASC | false | true
+ events | events_ts_idx | true | 5 | serial_number | ASC | false | true
+ events | events_ts_idx | true | 6 | event_id | ASC | false | true
+(12 rows)
+~~~
+
+{% include_cached copy-clipboard.html %}
+~~~ sql
+> SHOW COLUMNS FROM events;
+~~~
+
+~~~
+ column_name | data_type | is_nullable | column_default | generation_expression | indices | is_hidden
+----------------------------+-----------+-------------+----------------+---------------------------------------------------+-----------------------------+------------
+ product_id | INT8 | false | NULL | | {events_pkey,events_ts_idx} | false
+ owner | UUID | false | NULL | | {events_pkey,events_ts_idx} | false
+ serial_number | VARCHAR | false | NULL | | {events_pkey,events_ts_idx} | false
+ event_id | UUID | false | NULL | | {events_pkey,events_ts_idx} | false
+ ts | TIMESTAMP | false | NULL | | {events_pkey,events_ts_idx} | false
+ data | JSONB | true | NULL | | {events_pkey} | false
+ crdb_internal_ts_shard_16 | INT8 | false | NULL | mod(fnv32(crdb_internal.datums_to_bytes(ts)), 16) | {events_ts_idx} | true
+(7 rows)
+~~~
diff --git a/src/current/_includes/v25.1/performance/create-table-hash-sharded-primary-index.md b/src/current/_includes/v25.1/performance/create-table-hash-sharded-primary-index.md
new file mode 100644
index 00000000000..40ba79a096a
--- /dev/null
+++ b/src/current/_includes/v25.1/performance/create-table-hash-sharded-primary-index.md
@@ -0,0 +1,37 @@
+Let's create the `products` table and add a hash-sharded primary key on the `ts` column:
+
+{% include_cached copy-clipboard.html %}
+~~~ sql
+> CREATE TABLE products (
+ ts DECIMAL PRIMARY KEY USING HASH,
+ product_id INT8
+ );
+~~~
+
+{% include_cached copy-clipboard.html %}
+~~~ sql
+> SHOW INDEX FROM products;
+~~~
+
+~~~
+ table_name | index_name | non_unique | seq_in_index | column_name | direction | storing | implicit
+-------------+---------------+------------+--------------+---------------------------+-----------+---------+-----------
+ products | products_pkey | false | 1 | crdb_internal_ts_shard_16 | ASC | false | true
+ products | products_pkey | false | 2 | ts | ASC | false | false
+ products | products_pkey | false | 3 | product_id | N/A | true | false
+(3 rows)
+~~~
+
+{% include_cached copy-clipboard.html %}
+~~~ sql
+> SHOW COLUMNS FROM products;
+~~~
+
+~~~
+ column_name | data_type | is_nullable | column_default | generation_expression | indices | is_hidden
+----------------------------+-----------+-------------+----------------+---------------------------------------------------+-----------------+------------
+ crdb_internal_ts_shard_16 | INT8 | false | NULL | mod(fnv32(crdb_internal.datums_to_bytes(ts)), 16) | {products_pkey} | true
+ ts | DECIMAL | false | NULL | | {products_pkey} | false
+ product_id | INT8 | true | NULL | | {products_pkey} | false
+(3 rows)
+~~~
diff --git a/src/current/_includes/v25.1/performance/create-table-hash-sharded-secondary-index.md b/src/current/_includes/v25.1/performance/create-table-hash-sharded-secondary-index.md
new file mode 100644
index 00000000000..dc0e164a0fb
--- /dev/null
+++ b/src/current/_includes/v25.1/performance/create-table-hash-sharded-secondary-index.md
@@ -0,0 +1,56 @@
+Let's now create the `events` table and add a secondary index on the `ts` column in a single statement:
+
+{% include_cached copy-clipboard.html %}
+~~~ sql
+> CREATE TABLE events (
+ product_id INT8,
+ owner UUID,
+ serial_number VARCHAR,
+ event_id UUID,
+ ts TIMESTAMP,
+ data JSONB,
+ PRIMARY KEY (product_id, owner, serial_number, ts, event_id),
+ INDEX (ts) USING HASH
+);
+~~~
+
+{% include_cached copy-clipboard.html %}
+~~~ sql
+> SHOW INDEX FROM events;
+~~~
+
+~~~
+ table_name | index_name | non_unique | seq_in_index | column_name | direction | storing | implicit
+-------------+---------------+------------+--------------+---------------------------+-----------+---------+-----------
+ events | events_pkey | false | 1 | product_id | ASC | false | false
+ events | events_pkey | false | 2 | owner | ASC | false | false
+ events | events_pkey | false | 3 | serial_number | ASC | false | false
+ events | events_pkey | false | 4 | ts | ASC | false | false
+ events | events_pkey | false | 5 | event_id | ASC | false | false
+ events | events_pkey | false | 6 | data | N/A | true | false
+ events | events_ts_idx | true | 1 | crdb_internal_ts_shard_16 | ASC | false | true
+ events | events_ts_idx | true | 2 | ts | ASC | false | false
+ events | events_ts_idx | true | 3 | product_id | ASC | false | true
+ events | events_ts_idx | true | 4 | owner | ASC | false | true
+ events | events_ts_idx | true | 5 | serial_number | ASC | false | true
+ events | events_ts_idx | true | 6 | event_id | ASC | false | true
+(12 rows)
+~~~
+
+{% include_cached copy-clipboard.html %}
+~~~ sql
+> SHOW COLUMNS FROM events;
+~~~
+
+~~~
+ column_name | data_type | is_nullable | column_default | generation_expression | indices | is_hidden
+----------------------------+-----------+-------------+----------------+---------------------------------------------------+-----------------------------+------------
+ product_id | INT8 | false | NULL | | {events_pkey,events_ts_idx} | false
+ owner | UUID | false | NULL | | {events_pkey,events_ts_idx} | false
+ serial_number | VARCHAR | false | NULL | | {events_pkey,events_ts_idx} | false
+ event_id | UUID | false | NULL | | {events_pkey,events_ts_idx} | false
+ ts | TIMESTAMP | false | NULL | | {events_pkey,events_ts_idx} | false
+ data | JSONB | true | NULL | | {events_pkey} | false
+ crdb_internal_ts_shard_16 | INT8 | false | NULL | mod(fnv32(crdb_internal.datums_to_bytes(ts)), 16) | {events_ts_idx} | true
+(7 rows)
+~~~
diff --git a/src/current/_includes/v25.1/performance/increase-server-side-retries.md b/src/current/_includes/v25.1/performance/increase-server-side-retries.md
new file mode 100644
index 00000000000..95f2c2a9647
--- /dev/null
+++ b/src/current/_includes/v25.1/performance/increase-server-side-retries.md
@@ -0,0 +1,5 @@
+- [Send statements in transactions as a single batch]({% link {{ page.version.version }}/transactions.md %}#batched-statements). Batching allows CockroachDB to [automatically retry]({% link {{ page.version.version }}/transactions.md %}#automatic-retries) a transaction when [previous reads are invalidated]({% link {{ page.version.version }}/architecture/transaction-layer.md %}#read-refreshing) at a [pushed timestamp]({% link {{ page.version.version }}/architecture/transaction-layer.md %}#timestamp-cache). When a multi-statement transaction is not batched, and takes more than a single round trip, CockroachDB cannot automatically retry the transaction. For an example showing how to break up large transactions in an application, see [Break up large transactions into smaller units of work](build-a-python-app-with-cockroachdb-sqlalchemy.html#break-up-large-transactions-into-smaller-units-of-work).
+
+
+
+- Limit the size of the result sets of your transactions to under 16KB, so that CockroachDB is more likely to [automatically retry]({% link {{ page.version.version }}/transactions.md %}#automatic-retries) when [previous reads are invalidated]({% link {{ page.version.version }}/architecture/transaction-layer.md %}#read-refreshing) at a [pushed timestamp]({% link {{ page.version.version }}/architecture/transaction-layer.md %}#timestamp-cache). When a transaction returns a result set over 16KB, even if that transaction has been sent as a single batch, CockroachDB cannot automatically retry the transaction. You can change the results buffer size for all new sessions using the `sql.defaults.results_buffer.size` [cluster setting](cluster-settings.html), or for a specific session using the `results_buffer_size` [connection parameter]({% link {{page.version.version}}/connection-parameters.md %}#additional-connection-parameters).
diff --git a/src/current/_includes/v25.1/performance/lease-preference-system-database.md b/src/current/_includes/v25.1/performance/lease-preference-system-database.md
new file mode 100644
index 00000000000..9661aef0e2d
--- /dev/null
+++ b/src/current/_includes/v25.1/performance/lease-preference-system-database.md
@@ -0,0 +1,10 @@
+To reduce latency while making {% if page.name == "online-schema-changes.md" %}online schema changes{% else %}[online schema changes]({% link {{ page.version.version }}/online-schema-changes.md %}){% endif %}, we recommend specifying a `lease_preference` [zone configuration]({% link {{ page.version.version }}/configure-replication-zones.md %}) on the `system` database to a single region and running all subsequent schema changes from a node within that region. For example, if the majority of online schema changes come from machines that are geographically close to `us-east1`, run the following:
+
+{% include_cached copy-clipboard.html %}
+~~~ sql
+ALTER DATABASE system CONFIGURE ZONE USING constraints = '{"+region=us-east1": 1}', lease_preferences = '[[+region=us-east1]]';
+~~~
+
+Run all subsequent schema changes from a node in the specified region.
+
+If you do not intend to run more schema changes from that region, you can safely [remove the lease preference from the zone configuration]({% link {{ page.version.version }}/alter-database.md %}#remove-a-replication-zone) for the system database.
diff --git a/src/current/_includes/v25.1/performance/partition-by-city.md b/src/current/_includes/v25.1/performance/partition-by-city.md
new file mode 100644
index 00000000000..226c2d1d5f3
--- /dev/null
+++ b/src/current/_includes/v25.1/performance/partition-by-city.md
@@ -0,0 +1,419 @@
+For this service, the most effective technique for improving read and write latency is to [geo-partition]({% link {{ page.version.version }}/partitioning.md %}) the data by city. In essence, this means changing the way data is mapped to ranges. Instead of an entire table and its indexes mapping to a specific range or set of ranges, all rows in the table and its indexes with a given city will map to a range or set of ranges. Once ranges are defined in this way, we can then use the [replication zone]({% link {{ page.version.version }}/configure-replication-zones.md %}) feature to pin partitions to specific locations, ensuring that read and write requests from users in a specific city do not have to leave that region.
+
+1. Partitioning is an enterprise feature, so start off by [registering for a 30-day trial license](https://www.cockroachlabs.com/get-cockroachdb/enterprise/).
+
+1. Once you've received the trial license, SSH to any node in your cluster and [apply the license]({% link {{ page.version.version }}/licensing-faqs.md %}#set-a-license):
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ cockroach sql \
+ {{page.certs}} \
+ --host= \
+ --execute="SET CLUSTER SETTING cluster.organization = '';"
+ ~~~
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ cockroach sql \
+ {{page.certs}} \
+ --host= \
+ --execute="SET CLUSTER SETTING enterprise.license = '';"
+ ~~~
+
+1. Define partitions for all tables and their secondary indexes.
+
+ Start with the `users` table:
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ cockroach sql \
+ {{page.certs}} \
+ --database=movr \
+ --host= \
+ --execute="ALTER TABLE users \
+ PARTITION BY LIST (city) ( \
+ PARTITION new_york VALUES IN ('new york'), \
+ PARTITION boston VALUES IN ('boston'), \
+ PARTITION washington_dc VALUES IN ('washington dc'), \
+ PARTITION seattle VALUES IN ('seattle'), \
+ PARTITION san_francisco VALUES IN ('san francisco'), \
+ PARTITION los_angeles VALUES IN ('los angeles') \
+ );"
+ ~~~
+
+ Now define partitions for the `vehicles` table and its secondary indexes:
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ cockroach sql \
+ {{page.certs}} \
+ --database=movr \
+ --host= \
+ --execute="ALTER TABLE vehicles \
+ PARTITION BY LIST (city) ( \
+ PARTITION new_york VALUES IN ('new york'), \
+ PARTITION boston VALUES IN ('boston'), \
+ PARTITION washington_dc VALUES IN ('washington dc'), \
+ PARTITION seattle VALUES IN ('seattle'), \
+ PARTITION san_francisco VALUES IN ('san francisco'), \
+ PARTITION los_angeles VALUES IN ('los angeles') \
+ );"
+ ~~~
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ cockroach sql \
+ {{page.certs}} \
+ --database=movr \
+ --host= \
+ --execute="ALTER INDEX vehicles_auto_index_fk_city_ref_users \
+ PARTITION BY LIST (city) ( \
+ PARTITION new_york VALUES IN ('new york'), \
+ PARTITION boston VALUES IN ('boston'), \
+ PARTITION washington_dc VALUES IN ('washington dc'), \
+ PARTITION seattle VALUES IN ('seattle'), \
+ PARTITION san_francisco VALUES IN ('san francisco'), \
+ PARTITION los_angeles VALUES IN ('los angeles') \
+ );"
+ ~~~
+
+ Next, define partitions for the `rides` table and its secondary indexes:
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ cockroach sql \
+ {{page.certs}} \
+ --database=movr \
+ --host= \
+ --execute="ALTER TABLE rides \
+ PARTITION BY LIST (city) ( \
+ PARTITION new_york VALUES IN ('new york'), \
+ PARTITION boston VALUES IN ('boston'), \
+ PARTITION washington_dc VALUES IN ('washington dc'), \
+ PARTITION seattle VALUES IN ('seattle'), \
+ PARTITION san_francisco VALUES IN ('san francisco'), \
+ PARTITION los_angeles VALUES IN ('los angeles') \
+ );"
+ ~~~
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ cockroach sql \
+ {{page.certs}} \
+ --database=movr \
+ --host= \
+ --execute="ALTER INDEX rides_auto_index_fk_city_ref_users \
+ PARTITION BY LIST (city) ( \
+ PARTITION new_york VALUES IN ('new york'), \
+ PARTITION boston VALUES IN ('boston'), \
+ PARTITION washington_dc VALUES IN ('washington dc'), \
+ PARTITION seattle VALUES IN ('seattle'), \
+ PARTITION san_francisco VALUES IN ('san francisco'), \
+ PARTITION los_angeles VALUES IN ('los angeles') \
+ );"
+ ~~~
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ cockroach sql \
+ {{page.certs}} \
+ --database=movr \
+ --host= \
+ --execute="ALTER INDEX rides_auto_index_fk_vehicle_city_ref_vehicles \
+ PARTITION BY LIST (vehicle_city) ( \
+ PARTITION new_york VALUES IN ('new york'), \
+ PARTITION boston VALUES IN ('boston'), \
+ PARTITION washington_dc VALUES IN ('washington dc'), \
+ PARTITION seattle VALUES IN ('seattle'), \
+ PARTITION san_francisco VALUES IN ('san francisco'), \
+ PARTITION los_angeles VALUES IN ('los angeles') \
+ );"
+ ~~~
+
+ Finally, drop an unused index on `rides` rather than partition it:
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ cockroach sql \
+ {{page.certs}} \
+ --database=movr \
+ --host= \
+ --execute="DROP INDEX rides_start_time_idx;"
+ ~~~
+
+ {{site.data.alerts.callout_info}}
+ The `rides` table contains 1 million rows, so dropping this index will take a few minutes.
+ {{site.data.alerts.end}}
+
+1. Now [create replication zones]({% link {{ page.version.version }}/configure-replication-zones.md %}#create-a-replication-zone-for-a-partition) to require city data to be stored on specific nodes based on node locality.
+
+ City | Locality
+ -----|---------
+ New York | `zone=us-east1-b`
+ Boston | `zone=us-east1-b`
+ Washington DC | `zone=us-east1-b`
+ Seattle | `zone=us-west1-a`
+ San Francisco | `zone=us-west2-a`
+ Los Angeles | `zone=us-west2-a`
+
+ {{site.data.alerts.callout_info}}
+ Since our nodes are located in 3 specific GCE zones, we're only going to use the `zone=` portion of node locality. If we were using multiple zones per regions, we would likely use the `region=` portion of the node locality instead.
+ {{site.data.alerts.end}}
+
+ Start with the `users` table partitions:
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ cockroach sql --execute="ALTER PARTITION new_york OF TABLE movr.users CONFIGURE ZONE USING constraints='[+zone=us-east1-b]';" \
+ {{page.certs}} \
+ --host=
+ ~~~
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ cockroach sql --execute="ALTER PARTITION boston OF TABLE movr.users CONFIGURE ZONE USING constraints='[+zone=us-east1-b]';" \
+ {{page.certs}} \
+ --host=
+ ~~~
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ cockroach sql --execute="ALTER PARTITION washington_dc OF TABLE movr.users CONFIGURE ZONE USING constraints='[+zone=us-east1-b]';" \
+ {{page.certs}} \
+ --host=
+ ~~~
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ cockroach sql --execute="ALTER PARTITION seattle OF TABLE movr.users CONFIGURE ZONE USING constraints='[+zone=us-west1-a]';" \
+ {{page.certs}} \
+ --host=
+ ~~~
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ cockroach sql --execute="ALTER PARTITION san_francisco OF TABLE movr.users CONFIGURE ZONE USING constraints='[+zone=us-west2-a]';" \
+ {{page.certs}} \
+ --host=
+ ~~~
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ cockroach sql --execute="ALTER PARTITION los_angeles OF TABLE movr.users CONFIGURE ZONE USING constraints='[+zone=us-west2-a]';" \
+ {{page.certs}} \
+ --host=
+ ~~~
+
+ Move on to the `vehicles` table and secondary index partitions:
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ cockroach sql --execute="ALTER PARTITION new_york OF TABLE movr.vehicles CONFIGURE ZONE USING constraints='[+zone=us-east1-b]';" \
+ {{page.certs}} \
+ --host=
+ ~~~
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ cockroach sql --execute="ALTER PARTITION new_york OF INDEX vehicles_auto_index_fk_city_ref_users CONFIGURE ZONE USING constraints='[+zone=us-east1-b]';" \
+ {{page.certs}} \
+ --host=
+ ~~~
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ cockroach sql --execute="ALTER PARTITION boston OF TABLE movr.vehicles CONFIGURE ZONE USING constraints='[+zone=us-east1-b]';" \
+ {{page.certs}} \
+ --host=
+ ~~~
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ cockroach sql --execute="ALTER PARTITION boston OF INDEX vehicles_auto_index_fk_city_ref_users CONFIGURE ZONE USING constraints='[+zone=us-east1-b]';" \
+ {{page.certs}} \
+ --host=
+ ~~~
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ cockroach sql --execute="ALTER PARTITION washington_dc OF TABLE movr.vehicles CONFIGURE ZONE USING constraints='[+zone=us-east1-b]';" \
+ {{page.certs}} \
+ --host=
+ ~~~
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ cockroach sql --execute="ALTER PARTITION washington_dc OF INDEX vehicles_auto_index_fk_city_ref_users CONFIGURE ZONE USING constraints='[+zone=us-east1-b]';" \
+ {{page.certs}} \
+ --host=
+ ~~~
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ cockroach sql --execute="ALTER PARTITION seattle OF TABLE movr.vehicles CONFIGURE ZONE USING constraints='[+zone=us-west1-a]';" \
+ {{page.certs}} \
+ --host=
+ ~~~
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ cockroach sql --execute="ALTER PARTITION seattle OF INDEX vehicles_auto_index_fk_city_ref_users CONFIGURE ZONE USING constraints='[+zone=us-west1-a]';" \
+ {{page.certs}} \
+ --host=
+ ~~~
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ cockroach sql --execute="ALTER PARTITION san_francisco OF TABLE movr.vehicles CONFIGURE ZONE USING constraints='[+zone=us-west2-a]';" \
+ {{page.certs}} \
+ --host=
+ ~~~
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ cockroach sql --execute="ALTER PARTITION san_francisco OF INDEX vehicles_auto_index_fk_city_ref_users CONFIGURE ZONE USING constraints='[+zone=us-west2-a]';" \
+ {{page.certs}} \
+ --host=
+ ~~~
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ cockroach sql --execute="ALTER PARTITION los_angeles OF TABLE movr.vehicles CONFIGURE ZONE USING constraints='[+zone=us-west2-a]';" \
+ {{page.certs}} \
+ --host=
+ ~~~
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ cockroach sql --execute="ALTER PARTITION los_angeles OF INDEX vehicles_auto_index_fk_city_ref_users CONFIGURE ZONE USING constraints='[+zone=us-west2-a]';" \
+ {{page.certs}} \
+ --host=
+ ~~~
+
+ Finish with the `rides` table and secondary index partitions:
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ cockroach sql --execute="ALTER PARTITION new_york OF TABLE movr.rides CONFIGURE ZONE USING constraints='[+zone=us-east1-b]';" \
+ {{page.certs}} \
+ --host=
+ ~~~
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ cockroach sql --execute="ALTER PARTITION new_york OF INDEX rides_auto_index_fk_city_ref_users CONFIGURE ZONE USING constraints='[+zone=us-east1-b]';" \
+ {{page.certs}} \
+ --host=
+ ~~~
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ cockroach sql --execute="ALTER PARTITION new_york OF INDEX rides_auto_index_fk_vehicle_city_ref_vehicles CONFIGURE ZONE USING constraints='[+zone=us-east1-b]';" \
+ {{page.certs}} \
+ --host=
+ ~~~
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ cockroach sql --execute="ALTER PARTITION boston OF TABLE movr.rides CONFIGURE ZONE USING constraints='[+zone=us-east1-b]';" \
+ {{page.certs}} \
+ --host=
+ ~~~
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ cockroach sql --execute="ALTER PARTITION boston OF INDEX rides_auto_index_fk_city_ref_users CONFIGURE ZONE USING constraints='[+zone=us-east1-b]';" \
+ {{page.certs}} \
+ --host=
+ ~~~
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ cockroach sql --execute="ALTER PARTITION boston OF INDEX rides_auto_index_fk_vehicle_city_ref_vehicles CONFIGURE ZONE USING constraints='[+zone=us-east1-b]';" \
+ {{page.certs}} \
+ --host=
+ ~~~
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ cockroach sql --execute="ALTER PARTITION washington_dc OF TABLE movr.rides CONFIGURE ZONE USING constraints='[+zone=us-east1-b]';" \
+ {{page.certs}} \
+ --host=
+ ~~~
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ cockroach sql --execute="ALTER PARTITION washington_dc OF INDEX rides_auto_index_fk_city_ref_users CONFIGURE ZONE USING constraints='[+zone=us-east1-b]';" \
+ {{page.certs}} \
+ --host=
+ ~~~
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ cockroach sql --execute="ALTER PARTITION washington_dc OF INDEX rides_auto_index_fk_vehicle_city_ref_vehicles CONFIGURE ZONE USING constraints='[+zone=us-east1-b]';" \
+ {{page.certs}} \
+ --host=
+ ~~~
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ cockroach sql --execute="ALTER PARTITION seattle OF TABLE movr.rides CONFIGURE ZONE USING constraints='[+zone=us-west1-a]';" \
+ {{page.certs}} \
+ --host=
+ ~~~
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ cockroach sql --execute="ALTER PARTITION seattle OF INDEX rides_auto_index_fk_city_ref_users CONFIGURE ZONE USING constraints='[+zone=us-west1-a]';" \
+ {{page.certs}} \
+ --host=
+ ~~~
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ cockroach sql --execute="ALTER PARTITION seattle OF INDEX rides_auto_index_fk_vehicle_city_ref_vehicles CONFIGURE ZONE USING constraints='[+zone=us-west1-a]';" \
+ {{page.certs}} \
+ --host=
+ ~~~
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ cockroach sql --execute="ALTER PARTITION san_francisco OF TABLE movr.rides CONFIGURE ZONE USING constraints='[+zone=us-west2-a]';" \
+ {{page.certs}} \
+ --host=
+ ~~~
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ cockroach sql --execute="ALTER PARTITION san_francisco OF INDEX rides_auto_index_fk_city_ref_users CONFIGURE ZONE USING constraints='[+zone=us-west2-a]';" \
+ {{page.certs}} \
+ --host=
+ ~~~
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ cockroach sql --execute="ALTER PARTITION san_francisco OF INDEX rides_auto_index_fk_vehicle_city_ref_vehicles CONFIGURE ZONE USING constraints='[+zone=us-west2-a]';" \
+ {{page.certs}} \
+ --host=
+ ~~~
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ cockroach sql --execute="ALTER PARTITION los_angeles OF TABLE movr.rides CONFIGURE ZONE USING constraints='[+zone=us-west2-a]';" \
+ {{page.certs}} \
+ --host=
+ ~~~
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ cockroach sql --execute="ALTER PARTITION los_angeles OF INDEX rides_auto_index_fk_city_ref_users CONFIGURE ZONE USING constraints='[+zone=us-west2-a]';" \
+ {{page.certs}} \
+ --host=
+ ~~~
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ cockroach sql --execute="ALTER PARTITION los_angeles OF INDEX rides_auto_index_fk_vehicle_city_ref_vehicles CONFIGURE ZONE USING constraints='[+zone=us-west2-a]';" \
+ {{page.certs}} \
+ --host=
+ ~~~
diff --git a/src/current/_includes/v25.1/performance/reduce-contention.md b/src/current/_includes/v25.1/performance/reduce-contention.md
new file mode 100644
index 00000000000..0f52e1f212a
--- /dev/null
+++ b/src/current/_includes/v25.1/performance/reduce-contention.md
@@ -0,0 +1,17 @@
+- Limit the number of affected rows by following [optimizing queries]({% link {{ page.version.version }}/apply-statement-performance-rules.md %}) (e.g., avoiding full scans, creating secondary indexes, etc.). Not only will transactions run faster, lock fewer rows, and hold locks for a shorter duration, but the chances of [read invalidation]({% link {{ page.version.version }}/architecture/transaction-layer.md %}#read-refreshing) when the transaction's [timestamp is pushed]({% link {{ page.version.version }}/architecture/transaction-layer.md %}#timestamp-cache), due to a conflicting write, are decreased because of a smaller read set (i.e., a smaller number of rows read).
+
+- Break down larger transactions (e.g., [bulk deletes]({% link {{ page.version.version }}/bulk-delete-data.md %})) into smaller ones to have transactions hold locks for a shorter duration. For example, use [common table expressions]({% link {{ page.version.version }}/common-table-expressions.md %}) to group multiple clauses together in a single SQL statement. This will also decrease the likelihood of [pushed timestamps]({% link {{ page.version.version }}/architecture/transaction-layer.md %}#timestamp-cache). For instance, as the size of writes (number of rows written) decreases, the chances of the transaction's timestamp getting bumped by concurrent reads decreases.
+
+- Use [`SELECT FOR UPDATE`]({% link {{ page.version.version }}/select-for-update.md %}) to aggressively lock rows that will later be updated in the transaction. Updates must operate on the most recent version of a row, so a concurrent write to the row will cause a retry error ([`RETRY_WRITE_TOO_OLD`]({% link {{ page.version.version }}/transaction-retry-error-reference.md %}#retry_write_too_old)). Locking early in the transaction forces concurrent writers to block until the transaction is finished, which prevents the retry error. Note that this locks the rows for the duration of the transaction; whether this is tenable will depend on your workload. For more information, see [When and why to use `SELECT FOR UPDATE` in CockroachDB](https://www.cockroachlabs.com/blog/when-and-why-to-use-select-for-update-in-cockroachdb/).
+
+- Use historical reads ([`SELECT ... AS OF SYSTEM TIME`]({% link {{ page.version.version }}/as-of-system-time.md %})), preferably [bounded staleness reads]({% link {{ page.version.version }}/follower-reads.md %}#when-to-use-bounded-staleness-reads) or [exact staleness with follower reads]({% link {{ page.version.version }}/follower-reads.md %}#run-queries-that-use-exact-staleness-follower-reads) when possible to reduce conflicts with other writes. This reduces the likelihood of [`RETRY_SERIALIZABLE`](transaction-retry-error-reference.html#retry_serializable) errors as fewer writes will happen at the historical timestamp. More specifically, writes' timestamps are less likely to be pushed by historical reads as they would [when the read has a higher priority level](architecture/transaction-layer.html#transaction-conflicts). Note that if the `AS OF SYSTEM TIME` value is below the closed timestamp, the read cannot be invalidated.
+
+- When replacing values in a row, use [`UPSERT`]({% link {{ page.version.version }}/upsert.md %}) and specify values for all columns in the inserted rows. This will usually have the best performance under contention, compared to combinations of [`SELECT`]({% link {{ page.version.version }}/select-clause.md %}), [`INSERT`]({% link {{ page.version.version }}/insert.md %}), and [`UPDATE`](update.html).
+
+- If applicable to your workload, assign [column families]({% link {{ page.version.version }}/column-families.md %}#default-behavior) and separate columns that are frequently read and written into separate columns. Transactions will operate on disjoint column families and reduce the likelihood of conflicts.
+
+- As a last resort, consider adjusting the [closed timestamp interval]({% link {{ page.version.version }}/architecture/transaction-layer.md %}#closed-timestamps) using the `kv.closed_timestamp.target_duration` [cluster setting]({% link {{ page.version.version }}/cluster-settings.md %}) to reduce the likelihood of long-running write transactions having their [timestamps pushed]({% link {{ page.version.version }}/architecture/transaction-layer.md %}#timestamp-cache). This setting should be carefully adjusted if **no other mitigations are available** because there can be downstream implications (e.g., historical reads, change data capture feeds, statistics collection, handling zone configurations, etc.). For example, a transaction _A_ is forced to refresh (i.e., change its timestamp) due to hitting the maximum [_closed timestamp_]({% link {{ page.version.version }}/architecture/transaction-layer.md %}#closed-timestamps) interval (closed timestamps enable [Follower Reads](follower-reads.html#how-stale-follower-reads-work) and [Change Data Capture (CDC)](change-data-capture-overview.html)). This can happen when transaction _A_ is a long-running transaction, and there is a write by another transaction to data that _A_ has already read.
+
+{{site.data.alerts.callout_info}}
+If you increase the `kv.closed_timestamp.target_duration` setting, it means that you are increasing the amount of time by which the data available in [Follower Reads]({% link {{ page.version.version }}/follower-reads.md %}) and [CDC changefeeds]({% link {{ page.version.version }}/change-data-capture-overview.md %}) lags behind the current state of the cluster. In other words, there is a trade-off here: if you absolutely must execute long-running transactions that execute concurrently with other transactions that are writing to the same data, you may have to settle for longer delays on Follower Reads and/or CDC to avoid frequent serialization errors. The anomaly that would be exhibited if these transactions were not retried is called [write skew](https://www.cockroachlabs.com/blog/what-write-skew-looks-like/).
+{{site.data.alerts.end}}
\ No newline at end of file
diff --git a/src/current/_includes/v25.1/performance/reduce-hot-spots.md b/src/current/_includes/v25.1/performance/reduce-hot-spots.md
new file mode 100644
index 00000000000..4d7b601e33d
--- /dev/null
+++ b/src/current/_includes/v25.1/performance/reduce-hot-spots.md
@@ -0,0 +1,37 @@
+- Use index keys with a random distribution of values, so that transactions over different rows are more likely to operate on separate data ranges. See the [SQL FAQs]({% link {{ page.version.version }}/sql-faqs.md %}#how-do-i-auto-generate-unique-row-ids-in-cockroachdb) on row IDs for suggestions.
+
+- Place parts of the records that are modified by different transactions in different tables. That is, increase [normalization](https://wikipedia.org/wiki/Database_normalization). However, there are benefits and drawbacks to increasing normalization.
+
+ - Benefits of increasing normalization:
+
+ - Can improve performance for write-heavy workloads. This is because, with increased normalization, a given business fact must be written to one place rather than to multiple places.
+ - Allows separate transactions to modify related underlying data without causing [contention](#transaction-contention).
+ - Reduces the chance of data inconsistency, since a given business fact must be written only to one place.
+ - Reduces or eliminates data redundancy.
+ - Uses less disk space.
+
+ - Drawbacks of increasing normalization:
+
+ - Can reduce performance for read-heavy workloads. This is because increasing normalization results in more joins, and can make the SQL more complicated in other ways.
+ - More complex data model.
+
+ - In general:
+
+ - Increase normalization for write-intensive and read/write-intensive transactional workloads.
+ - Do not increase normalization for read-intensive reporting workloads.
+
+- If the application strictly requires operating on very few different index keys, consider using [`ALTER ... SPLIT AT`]({% link {{ page.version.version }}/alter-table.md %}#split-at) so that each index key can be served by a separate group of nodes in the cluster.
+
+- If you are working with a table that **must** be indexed on sequential keys, consider using [hash-sharded indexes]({% link {{ page.version.version }}/hash-sharded-indexes.md %}). For details about the mechanics and performance improvements of hash-sharded indexes in CockroachDB, see the blog post [Hash Sharded Indexes Unlock Linear Scaling for Sequential Workloads](https://www.cockroachlabs.com/blog/hash-sharded-indexes-unlock-linear-scaling-for-sequential-workloads/). As part of this, we recommend doing thorough performance testing with and without hash-sharded indexes to see which works best for your application.
+
+- To avoid read hot spots:
+
+ - Increase data distribution, which will allow for more ranges. The hot spot exists because the data being accessed is all co-located in one range.
+ - Increase [load balancing]({% link {{ page.version.version }}/recommended-production-settings.md %}#load-balancing) across more nodes in the same range. Most transactional reads must go to the leaseholder in CockroachDB, which means that opportunities for load balancing over replicas are minimal.
+
+ However, the following features do permit load balancing over replicas:
+
+ - [Global tables]({% link {{ page.version.version }}/global-tables.md %}).
+ - [Follower reads]({% link {{ page.version.version }}/follower-reads.md %}) (both the bounded staleness and the exact staleness kinds).
+
+ In these cases, more replicas will help, up to the number of nodes in the cluster.
\ No newline at end of file
diff --git a/src/current/_includes/v25.1/performance/scale-cluster.md b/src/current/_includes/v25.1/performance/scale-cluster.md
new file mode 100644
index 00000000000..3575d31e374
--- /dev/null
+++ b/src/current/_includes/v25.1/performance/scale-cluster.md
@@ -0,0 +1,61 @@
+1. SSH to one of the `n2-standard-4` instances in the `us-west1-a` zone.
+
+1. Download the [CockroachDB archive](https://binaries.cockroachdb.com/cockroach-{{ page.release_info.version }}.linux-amd64.tgz) for Linux, extract the binary, and copy it into the `PATH`:
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ curl https://binaries.cockroachdb.com/cockroach-{{ page.release_info.version }}.linux-amd64.tgz \
+ | tar -xz
+ ~~~
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ sudo cp -i cockroach-{{ page.release_info.version }}.linux-amd64/cockroach /usr/local/bin/
+ ~~~
+
+1. Run the [`cockroach start`]({% link {{ page.version.version }}/cockroach-start.md %}) command:
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ cockroach start \
+ {{page.certs}} \
+ --advertise-host= \
+ --join= \
+ --locality=cloud=gce,region=us-west1,zone=us-west1-a \
+ --cache=.25 \
+ --max-sql-memory=.25 \
+ --background
+ ~~~
+
+1. Repeat steps 1 - 3 for the other two `n2-standard-4` instances in the `us-west1-a` zone.
+
+1. SSH to one of the `n2-standard-4` instances in the `us-west2-a` zone.
+
+1. Download the [CockroachDB archive](https://binaries.cockroachdb.com/cockroach-{{ page.release_info.version }}.linux-amd64.tgz) for Linux, extract the binary, and copy it into the `PATH`:
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ curl https://binaries.cockroachdb.com/cockroach-{{ page.release_info.version }}.linux-amd64.tgz \
+ | tar -xz
+ ~~~
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ sudo cp -i cockroach-{{ page.release_info.version }}.linux-amd64/cockroach /usr/local/bin/
+ ~~~
+
+1. Run the [`cockroach start`]({% link {{ page.version.version }}/cockroach-start.md %}) command:
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ cockroach start \
+ {{page.certs}} \
+ --advertise-host= \
+ --join= \
+ --locality=cloud=gce,region=us-west2,zone=us-west2-a \
+ --cache=.25 \
+ --max-sql-memory=.25 \
+ --background
+ ~~~
+
+1. Repeat steps 5 - 7 for the other two `n2-standard-4` instances in the `us-west2-a` zone.
diff --git a/src/current/_includes/v25.1/performance/sql-trace-txn-enable-threshold.md b/src/current/_includes/v25.1/performance/sql-trace-txn-enable-threshold.md
new file mode 100644
index 00000000000..9ec50e30bdb
--- /dev/null
+++ b/src/current/_includes/v25.1/performance/sql-trace-txn-enable-threshold.md
@@ -0,0 +1,6 @@
+{%- if include.version_prefix != nil -%}
+ {%- assign url = include.version_prefix | append: "cluster-settings.html#setting-sql-trace-txn-enable-threshold" | absolute_url -%}
+{%- else -%}
+ {%- assign url = "cluster-settings.html#setting-sql-trace-txn-enable-threshold" -%}
+{%- endif -%}
+The default tracing behavior captures a small percent of transactions, so not all contention events will be recorded. When investigating transaction contention, you can set the [`sql.trace.txn.enable_threshold` cluster setting]({{ url }}) to always capture contention events.
diff --git a/src/current/_includes/v25.1/performance/start-cluster.md b/src/current/_includes/v25.1/performance/start-cluster.md
new file mode 100644
index 00000000000..516aca418f1
--- /dev/null
+++ b/src/current/_includes/v25.1/performance/start-cluster.md
@@ -0,0 +1,60 @@
+#### Start the nodes
+
+1. SSH to the first `n2-standard-4` instance.
+
+1. Download the [CockroachDB archive](https://binaries.cockroachdb.com/cockroach-{{ page.release_info.version }}.linux-amd64.tgz) for Linux, extract the binary, and copy it into the `PATH`:
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ curl https://binaries.cockroachdb.com/cockroach-{{ page.release_info.version }}.linux-amd64.tgz \
+ | tar -xz
+ ~~~
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ sudo cp -i cockroach-{{ page.release_info.version }}.linux-amd64/cockroach /usr/local/bin/
+ ~~~
+
+1. Run the [`cockroach start`]({% link {{ page.version.version }}/cockroach-start.md %}) command:
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ cockroach start \
+ {{page.certs}} \
+ --advertise-host= \
+ --join=:26257,:26257,:26257 \
+ --locality=cloud=gce,region=us-east1,zone=us-east1-b \
+ --cache=.25 \
+ --max-sql-memory=.25 \
+ --background
+ ~~~
+
+1. Repeat steps 1 - 3 for the other two `n2-standard-4` instances. Be sure to adjust the `--advertise-addr` flag each time.
+
+#### Initialize the cluster
+
+1. SSH to the fourth instance, the one not running a CockroachDB node.
+
+1. Download the [CockroachDB archive](https://binaries.cockroachdb.com/cockroach-{{ page.release_info.version }}.linux-amd64.tgz) for Linux, and extract the binary:
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ curl https://binaries.cockroachdb.com/cockroach-{{ page.release_info.version }}.linux-amd64.tgz \
+ | tar -xz
+ ~~~
+
+1. Copy the binary into the `PATH`:
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ sudo cp -i cockroach-{{ page.release_info.version }}.linux-amd64/cockroach /usr/local/bin/
+ ~~~
+
+1. Run the [`cockroach init`]({% link {{ page.version.version }}/cockroach-init.md %}) command:
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ cockroach init {{page.certs}} --host=
+ ~~~
+
+ Each node then prints helpful details to the [standard output]({% link {{ page.version.version }}/cockroach-start.md %}#standard-output), such as the CockroachDB version, the URL for the DB Console, and the SQL URL for clients.
diff --git a/src/current/_includes/v25.1/performance/test-performance-after-partitioning.md b/src/current/_includes/v25.1/performance/test-performance-after-partitioning.md
new file mode 100644
index 00000000000..dcd388bfb43
--- /dev/null
+++ b/src/current/_includes/v25.1/performance/test-performance-after-partitioning.md
@@ -0,0 +1,93 @@
+After partitioning, reads and writers for a specific city will be much faster because all replicas for that city are now located on the nodes closest to the city.
+
+To check this, let's repeat a few of the read and write queries that we executed before partitioning in [step 12](#step-12-test-performance).
+
+#### Reads
+
+Again imagine we are a Movr administrator in New York, and we want to get the IDs and descriptions of all New York-based bikes that are currently in use:
+
+1. SSH to the instance in `us-east1-b` with the Python client.
+
+1. Query for the data:
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ {{page.app}} \
+ --host= \
+ --statement="SELECT id, ext FROM vehicles \
+ WHERE city = 'new york' \
+ AND type = 'bike' \
+ AND status = 'in_use'" \
+ --repeat=50 \
+ --times
+ ~~~
+
+ ~~~
+ Result:
+ ['id', 'ext']
+ ['0068ee24-2dfb-437d-9a5d-22bb742d519e', "{u'color': u'green', u'brand': u'Kona'}"]
+ ['01b80764-283b-4232-8961-a8d6a4121a08', "{u'color': u'green', u'brand': u'Pinarello'}"]
+ ['02a39628-a911-4450-b8c0-237865546f7f', "{u'color': u'black', u'brand': u'Schwinn'}"]
+ ['02eb2a12-f465-4575-85f8-a4b77be14c54', "{u'color': u'black', u'brand': u'Pinarello'}"]
+ ['02f2fcc3-fea6-4849-a3a0-dc60480fa6c2', "{u'color': u'red', u'brand': u'FujiCervelo'}"]
+ ['034d42cf-741f-428c-bbbb-e31820c68588', "{u'color': u'yellow', u'brand': u'Santa Cruz'}"]
+ ...
+
+ Times (milliseconds):
+ [20.065784454345703, 7.866144180297852, 8.362054824829102, 9.08803939819336, 7.925987243652344, 7.543087005615234, 7.786035537719727, 8.227825164794922, 7.907867431640625, 7.654905319213867, 7.793903350830078, 7.627964019775391, 7.833957672119141, 7.858037948608398, 7.474184036254883, 9.459972381591797, 7.726192474365234, 7.194995880126953, 7.364034652709961, 7.25102424621582, 7.650852203369141, 7.663965225219727, 9.334087371826172, 7.810115814208984, 7.543087005615234, 7.134914398193359, 7.922887802124023, 7.220029830932617, 7.606029510498047, 7.208108901977539, 7.333993911743164, 7.464170455932617, 7.679939270019531, 7.436990737915039, 7.62486457824707, 7.235050201416016, 7.420063018798828, 7.795095443725586, 7.39598274230957, 7.546901702880859, 7.582187652587891, 7.9669952392578125, 7.418155670166016, 7.539033889770508, 7.805109024047852, 7.086992263793945, 7.069826126098633, 7.833957672119141, 7.43412971496582, 7.035017013549805]
+
+ Median time (milliseconds):
+ 7.62641429901
+ ~~~
+
+Before partitioning, this query took a median time of 72.02ms. After partitioning, the query took a median time of only 7.62ms.
+
+#### Writes
+
+Now let's again imagine 100 people in New York and 100 people in Seattle and 100 people in New York want to create new Movr accounts:
+
+1. SSH to the instance in `us-west1-a` with the Python client.
+
+1. Create 100 Seattle-based users:
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ {{page.app}} \
+ --host= \
+ --statement="INSERT INTO users VALUES (gen_random_uuid(), 'seattle', 'Seatller', '111 East Street', '1736352379937347')" \
+ --repeat=100 \
+ --times
+ ~~~
+
+ ~~~
+ Times (milliseconds):
+ [41.8248176574707, 9.701967239379883, 8.725166320800781, 9.058952331542969, 7.819175720214844, 6.247997283935547, 10.265827178955078, 7.627964019775391, 9.120941162109375, 7.977008819580078, 9.247064590454102, 8.929967880249023, 9.610176086425781, 14.40286636352539, 8.588075637817383, 8.67319107055664, 9.417057037353516, 7.652044296264648, 8.917093276977539, 9.135961532592773, 8.604049682617188, 9.220123291015625, 7.578134536743164, 9.096860885620117, 8.942842483520508, 8.63790512084961, 7.722139358520508, 13.59701156616211, 9.176015853881836, 11.484146118164062, 9.212017059326172, 7.563114166259766, 8.793115615844727, 8.80289077758789, 7.827043533325195, 7.6389312744140625, 17.47584342956543, 9.436845779418945, 7.63392448425293, 8.594989776611328, 9.002208709716797, 8.93402099609375, 8.71896743774414, 8.76307487487793, 8.156061172485352, 8.729934692382812, 8.738040924072266, 8.25190544128418, 8.971929550170898, 7.460832595825195, 8.889198303222656, 8.45789909362793, 8.761167526245117, 10.223865509033203, 8.892059326171875, 8.961915969848633, 8.968114852905273, 7.750988006591797, 7.761955261230469, 9.199142456054688, 9.02700424194336, 9.509086608886719, 9.428977966308594, 7.902860641479492, 8.940935134887695, 8.615970611572266, 8.75401496887207, 7.906913757324219, 8.179187774658203, 11.447906494140625, 8.71419906616211, 9.202003479003906, 9.263038635253906, 9.089946746826172, 8.92496109008789, 10.32114028930664, 7.913827896118164, 9.464025497436523, 10.612010955810547, 8.78596305847168, 8.878946304321289, 7.575035095214844, 10.657072067260742, 8.777856826782227, 8.649110794067383, 9.012937545776367, 8.931875228881836, 9.31406021118164, 9.396076202392578, 8.908987045288086, 8.002996444702148, 9.089946746826172, 7.5588226318359375, 8.918046951293945, 12.117862701416016, 7.266998291015625, 8.074045181274414, 8.955001831054688, 8.868932723999023, 8.755922317504883]
+
+ Median time (milliseconds):
+ 8.90052318573
+ ~~~
+
+ Before partitioning, this query took a median time of 48.40ms. After partitioning, the query took a median time of only 8.90ms.
+
+1. SSH to the instance in `us-east1-b` with the Python client.
+
+1. Create 100 new NY-based users:
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ {{page.app}} \
+ --host= \
+ --statement="INSERT INTO users VALUES (gen_random_uuid(), 'new york', 'New Yorker', '111 West Street', '9822222379937347')" \
+ --repeat=100 \
+ --times
+ ~~~
+
+ ~~~
+ Times (milliseconds):
+ [276.3068675994873, 9.830951690673828, 8.772134780883789, 9.304046630859375, 8.24880599975586, 7.959842681884766, 7.848978042602539, 7.879018783569336, 7.754087448120117, 10.724067687988281, 13.960123062133789, 9.825944900512695, 9.60993766784668, 9.273052215576172, 9.41920280456543, 8.040904998779297, 16.484975814819336, 10.178089141845703, 8.322000503540039, 9.468793869018555, 8.002042770385742, 9.185075759887695, 9.54294204711914, 9.387016296386719, 9.676933288574219, 13.051986694335938, 9.506940841674805, 12.327909469604492, 10.377168655395508, 15.023946762084961, 9.985923767089844, 7.853031158447266, 9.43303108215332, 9.164094924926758, 10.941028594970703, 9.37199592590332, 12.359857559204102, 8.975028991699219, 7.728099822998047, 8.310079574584961, 9.792089462280273, 9.448051452636719, 8.057117462158203, 9.37795639038086, 9.753942489624023, 9.576082229614258, 8.192062377929688, 9.392023086547852, 7.97581672668457, 8.165121078491211, 9.660959243774414, 8.270978927612305, 9.901046752929688, 8.085966110229492, 10.581016540527344, 9.831905364990234, 7.883787155151367, 8.077859878540039, 8.161067962646484, 10.02812385559082, 7.9898834228515625, 9.840965270996094, 9.452104568481445, 9.747028350830078, 9.003162384033203, 9.206056594848633, 9.274005889892578, 7.8449249267578125, 8.827924728393555, 9.322881698608398, 12.08186149597168, 8.76307487487793, 8.353948593139648, 8.182048797607422, 7.736921310424805, 9.31406021118164, 9.263992309570312, 9.282112121582031, 7.823944091796875, 9.11712646484375, 8.099079132080078, 9.156942367553711, 8.363962173461914, 10.974884033203125, 8.729934692382812, 9.2620849609375, 9.27591323852539, 8.272886276245117, 8.25190544128418, 8.093118667602539, 9.259939193725586, 8.413076400756836, 8.198976516723633, 9.95182991027832, 8.024930953979492, 8.895158767700195, 8.243083953857422, 9.076833724975586, 9.994029998779297, 10.149955749511719]
+
+ Median time (milliseconds):
+ 9.26303863525
+ ~~~
+
+ Before partitioning, this query took a median time of 116.86ms. After partitioning, the query took a median time of only 9.26ms.
diff --git a/src/current/_includes/v25.1/performance/test-performance.md b/src/current/_includes/v25.1/performance/test-performance.md
new file mode 100644
index 00000000000..b9a8ca0e6c8
--- /dev/null
+++ b/src/current/_includes/v25.1/performance/test-performance.md
@@ -0,0 +1,146 @@
+In general, all of the tuning techniques featured in the single-region scenario above still apply in a multi-region deployment. However, the fact that data and leaseholders are spread across the US means greater latencies in many cases.
+
+#### Reads
+
+For example, imagine we are a Movr administrator in New York, and we want to get the IDs and descriptions of all New York-based bikes that are currently in use:
+
+1. SSH to the instance in `us-east1-b` with the Python client.
+
+1. Query for the data:
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ {{page.app}} \
+ --host= \
+ --statement="SELECT id, ext FROM vehicles \
+ WHERE city = 'new york' \
+ AND type = 'bike' \
+ AND status = 'in_use'" \
+ --repeat=50 \
+ --times
+ ~~~
+
+ ~~~
+ Result:
+ ['id', 'ext']
+ ['0068ee24-2dfb-437d-9a5d-22bb742d519e', "{u'color': u'green', u'brand': u'Kona'}"]
+ ['01b80764-283b-4232-8961-a8d6a4121a08', "{u'color': u'green', u'brand': u'Pinarello'}"]
+ ['02a39628-a911-4450-b8c0-237865546f7f', "{u'color': u'black', u'brand': u'Schwinn'}"]
+ ['02eb2a12-f465-4575-85f8-a4b77be14c54', "{u'color': u'black', u'brand': u'Pinarello'}"]
+ ['02f2fcc3-fea6-4849-a3a0-dc60480fa6c2', "{u'color': u'red', u'brand': u'FujiCervelo'}"]
+ ['034d42cf-741f-428c-bbbb-e31820c68588', "{u'color': u'yellow', u'brand': u'Santa Cruz'}"]
+ ...
+
+ Times (milliseconds):
+ [933.8209629058838, 72.02410697937012, 72.45206832885742, 72.39294052124023, 72.8158950805664, 72.07584381103516, 72.21412658691406, 71.96712493896484, 71.75517082214355, 72.16811180114746, 71.78592681884766, 72.91603088378906, 71.91109657287598, 71.4719295501709, 72.40676879882812, 71.8080997467041, 71.84004783630371, 71.98500633239746, 72.40891456604004, 73.75001907348633, 71.45905494689941, 71.53081893920898, 71.46596908569336, 72.07608222961426, 71.94995880126953, 71.41804695129395, 71.29096984863281, 72.11899757385254, 71.63381576538086, 71.3050365447998, 71.83194160461426, 71.20394706726074, 70.9981918334961, 72.79205322265625, 72.63493537902832, 72.15285301208496, 71.8698501586914, 72.30591773986816, 71.53582572937012, 72.69001007080078, 72.03006744384766, 72.56317138671875, 71.61688804626465, 72.17121124267578, 70.20092010498047, 72.12018966674805, 73.34589958190918, 73.01592826843262, 71.49410247802734, 72.19099998474121]
+
+ Median time (milliseconds):
+ 72.0270872116
+ ~~~
+
+As we saw earlier, the leaseholder for the `vehicles` table is in `us-west2-a` (Los Angeles), so our query had to go from the gateway node in `us-east1-b` all the way to the west coast and then back again before returning data to the client.
+
+For contrast, imagine we are now a Movr administrator in Los Angeles, and we want to get the IDs and descriptions of all Los Angeles-based bikes that are currently in use:
+
+1. SSH to the instance in `us-west2-a` with the Python client.
+
+1. Query for the data:
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ {{page.app}} \
+ --host= \
+ --statement="SELECT id, ext FROM vehicles \
+ WHERE city = 'los angeles' \
+ AND type = 'bike' \
+ AND status = 'in_use'" \
+ --repeat=50 \
+ --times
+ ~~~
+
+ ~~~
+ Result:
+ ['id', 'ext']
+ ['00078349-94d4-43e6-92be-8b0d1ac7ee9f', "{u'color': u'blue', u'brand': u'Merida'}"]
+ ['003f84c4-fa14-47b2-92d4-35a3dddd2d75', "{u'color': u'red', u'brand': u'Kona'}"]
+ ['0107a133-7762-4392-b1d9-496eb30ee5f9', "{u'color': u'yellow', u'brand': u'Kona'}"]
+ ['0144498b-4c4f-4036-8465-93a6bea502a3', "{u'color': u'blue', u'brand': u'Pinarello'}"]
+ ['01476004-fb10-4201-9e56-aadeb427f98a', "{u'color': u'black', u'brand': u'Merida'}"]
+
+ Times (milliseconds):
+ [782.6759815216064, 8.564949035644531, 8.226156234741211, 7.949113845825195, 7.86590576171875, 7.842063903808594, 7.674932479858398, 7.555961608886719, 7.642984390258789, 8.024930953979492, 7.717132568359375, 8.46409797668457, 7.520914077758789, 7.6541900634765625, 7.458925247192383, 7.671833038330078, 7.740020751953125, 7.771015167236328, 7.598161697387695, 8.411169052124023, 7.408857345581055, 7.469892501831055, 7.524967193603516, 7.764101028442383, 7.750988006591797, 7.2460174560546875, 6.927967071533203, 7.822990417480469, 7.27391242980957, 7.730960845947266, 7.4710845947265625, 7.4310302734375, 7.33494758605957, 7.455110549926758, 7.021188735961914, 7.083892822265625, 7.812976837158203, 7.625102996826172, 7.447957992553711, 7.179021835327148, 7.504940032958984, 7.224082946777344, 7.257938385009766, 7.714986801147461, 7.4939727783203125, 7.6160430908203125, 7.578849792480469, 7.890939712524414, 7.546901702880859, 7.411956787109375]
+
+ Median time (milliseconds):
+ 7.6071023941
+ ~~~
+
+Because the leaseholder for `vehicles` is in the same zone as the client request, this query took just 7.60ms compared to the similar query in New York that took 72.02ms.
+
+#### Writes
+
+The geographic distribution of data impacts write performance as well. For example, imagine 100 people in Seattle and 100 people in New York want to create new Movr accounts:
+
+1. SSH to the instance in `us-west1-a` with the Python client.
+
+1. Create 100 Seattle-based users:
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ {{page.app}} \
+ --host= \
+ --statement="INSERT INTO users VALUES (gen_random_uuid(), 'seattle', 'Seatller', '111 East Street', '1736352379937347')" \
+ --repeat=100 \
+ --times
+ ~~~
+
+ ~~~
+ Times (milliseconds):
+ [277.4538993835449, 50.12702941894531, 47.75214195251465, 48.13408851623535, 47.872066497802734, 48.65407943725586, 47.78695106506348, 49.14689064025879, 52.770137786865234, 49.00097846984863, 48.68602752685547, 47.387123107910156, 47.36208915710449, 47.6841926574707, 46.49209976196289, 47.06096649169922, 46.753883361816406, 46.304941177368164, 48.90894889831543, 48.63715171813965, 48.37393760681152, 49.23295974731445, 50.13418197631836, 48.310041427612305, 48.57516288757324, 47.62911796569824, 47.77693748474121, 47.505855560302734, 47.89996147155762, 49.79205131530762, 50.76479911804199, 50.21500587463379, 48.73299598693848, 47.55592346191406, 47.35088348388672, 46.7071533203125, 43.00808906555176, 43.1060791015625, 46.02813720703125, 47.91092872619629, 68.71294975280762, 49.241065979003906, 48.9039421081543, 47.82295227050781, 48.26998710632324, 47.631025314331055, 64.51892852783203, 48.12812805175781, 67.33417510986328, 48.603057861328125, 50.31013488769531, 51.02396011352539, 51.45716667175293, 50.85396766662598, 49.07512664794922, 47.49894142150879, 44.67201232910156, 43.827056884765625, 44.412851333618164, 46.69189453125, 49.55601692199707, 49.16882514953613, 49.88598823547363, 49.31306838989258, 46.875, 46.69594764709473, 48.31886291503906, 48.378944396972656, 49.0570068359375, 49.417972564697266, 48.22111129760742, 50.662994384765625, 50.58097839355469, 75.44088363647461, 51.05400085449219, 50.85110664367676, 48.187971115112305, 56.7781925201416, 42.47403144836426, 46.2191104888916, 53.96890640258789, 46.697139739990234, 48.99096488952637, 49.1330623626709, 46.34690284729004, 47.09315299987793, 46.39410972595215, 46.51689529418945, 47.58000373840332, 47.924041748046875, 48.426151275634766, 50.22597312927246, 50.1859188079834, 50.37498474121094, 49.861907958984375, 51.477909088134766, 73.09293746948242, 48.779964447021484, 45.13692855834961, 42.2968864440918]
+
+ Median time (milliseconds):
+ 48.4025478363
+ ~~~
+
+1. SSH to the instance in `us-east1-b` with the Python client.
+
+1. Create 100 new NY-based users:
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ {{page.app}} \
+ --host= \
+ --statement="INSERT INTO users VALUES (gen_random_uuid(), 'new york', 'New Yorker', '111 West Street', '9822222379937347')" \
+ --repeat=100 \
+ --times
+ ~~~
+
+ ~~~
+ Times (milliseconds):
+ [131.05082511901855, 116.88899993896484, 115.15498161315918, 117.095947265625, 121.04082107543945, 115.8750057220459, 113.80696296691895, 113.05880546569824, 118.41201782226562, 125.30899047851562, 117.5389289855957, 115.23890495300293, 116.84799194335938, 120.0411319732666, 115.62800407409668, 115.08989334106445, 113.37089538574219, 115.15498161315918, 115.96989631652832, 133.1961154937744, 114.25995826721191, 118.09396743774414, 122.24102020263672, 116.14608764648438, 114.80998992919922, 131.9139003753662, 114.54391479492188, 115.15307426452637, 116.7759895324707, 135.10799407958984, 117.18511581420898, 120.15485763549805, 118.0570125579834, 114.52388763427734, 115.28396606445312, 130.00011444091797, 126.45292282104492, 142.69423484802246, 117.60401725769043, 134.08493995666504, 117.47002601623535, 115.75007438659668, 117.98381805419922, 115.83089828491211, 114.88890647888184, 113.23404312133789, 121.1700439453125, 117.84791946411133, 115.35286903381348, 115.0820255279541, 116.99700355529785, 116.67394638061523, 116.1041259765625, 114.67289924621582, 112.98894882202148, 117.1119213104248, 119.78602409362793, 114.57300186157227, 129.58717346191406, 118.37983131408691, 126.68204307556152, 118.30306053161621, 113.27195167541504, 114.22920227050781, 115.80777168273926, 116.81294441223145, 114.76683616638184, 115.1430606842041, 117.29192733764648, 118.24417114257812, 116.56999588012695, 113.8620376586914, 114.88819122314453, 120.80597877502441, 132.39002227783203, 131.00910186767578, 114.56179618835449, 117.03896522521973, 117.72680282592773, 115.6010627746582, 115.27681350708008, 114.52317237854004, 114.87483978271484, 117.78903007507324, 116.65701866149902, 122.6949691772461, 117.65193939208984, 120.5449104309082, 115.61179161071777, 117.54202842712402, 114.70890045166016, 113.58809471130371, 129.7171115875244, 117.57993698120117, 117.1119213104248, 117.64001846313477, 140.66505432128906, 136.41691207885742, 116.24789237976074, 115.19908905029297]
+
+ Median time (milliseconds):
+ 116.868495941
+ ~~~
+
+It took 48.40ms to create a user in Seattle and 116.86ms to create a user in New York. To better understand this discrepancy, let's look at the distribution of data for the `users` table:
+
+{% include_cached copy-clipboard.html %}
+~~~ shell
+$ cockroach sql \
+{{page.certs}} \
+--host= \
+--database=movr \
+--execute="SHOW EXPERIMENTAL_RANGES FROM TABLE users;"
+~~~
+
+~~~
+ start_key | end_key | range_id | replicas | lease_holder
++-----------+---------+----------+----------+--------------+
+ NULL | NULL | 49 | {2,6,8} | 6
+(1 row)
+~~~
+
+For the single range containing `users` data, one replica is in each zone, with the leaseholder in the `us-west1-a` zone. This means that:
+
+- When creating a user in Seattle, the request doesn't have to leave the zone to reach the leaseholder. However, since a write requires consensus from its replica group, the write has to wait for confirmation from either the replica in `us-west1-b` (Los Angeles) or `us-east1-b` (New York) before committing and then returning confirmation to the client.
+- When creating a user in New York, there are more network hops and, thus, increased latency. The request first needs to travel across the continent to the leaseholder in `us-west1-a`. It then has to wait for confirmation from either the replica in `us-west1-b` (Los Angeles) or `us-east1-b` (New York) before committing and then returning confirmation to the client back in the east.
diff --git a/src/current/_includes/v25.1/performance/transaction-retry-error-actions.md b/src/current/_includes/v25.1/performance/transaction-retry-error-actions.md
new file mode 100644
index 00000000000..b528f7b4f84
--- /dev/null
+++ b/src/current/_includes/v25.1/performance/transaction-retry-error-actions.md
@@ -0,0 +1,5 @@
+In most cases, the correct actions to take when encountering transaction retry errors are:
+
+1. Under `SERIALIZABLE` isolation, update your application to support [client-side retry handling]({% link {{ page.version.version }}/transaction-retry-error-reference.md %}#client-side-retry-handling) when transaction retry errors are encountered. Follow the guidance for the [specific error type]({% link {{ page.version.version }}/transaction-retry-error-reference.md %}#transaction-retry-error-reference).
+
+1. Take steps to [minimize transaction retry errors]({% link {{ page.version.version }}/transaction-retry-error-reference.md %}#minimize-transaction-retry-errors) in the first place. This means reducing transaction contention overall, and increasing the likelihood that CockroachDB can [automatically retry]({% link {{ page.version.version }}/transactions.md %}#automatic-retries) a failed transaction.
\ No newline at end of file
diff --git a/src/current/_includes/v25.1/performance/tuning-secure.py b/src/current/_includes/v25.1/performance/tuning-secure.py
new file mode 100644
index 00000000000..a644dbb1c87
--- /dev/null
+++ b/src/current/_includes/v25.1/performance/tuning-secure.py
@@ -0,0 +1,77 @@
+#!/usr/bin/env python
+
+import argparse
+import psycopg2
+import time
+
+parser = argparse.ArgumentParser(
+ description="test performance of statements against movr database")
+parser.add_argument("--host", required=True,
+ help="ip address of one of the CockroachDB nodes")
+parser.add_argument("--statement", required=True,
+ help="statement to execute")
+parser.add_argument("--repeat", type=int,
+ help="number of times to repeat the statement", default = 20)
+parser.add_argument("--times",
+ help="print time for each repetition of the statement", action="store_true")
+parser.add_argument("--cumulative",
+ help="print cumulative time for all repetitions of the statement", action="store_true")
+args = parser.parse_args()
+
+conn = psycopg2.connect(
+ database='movr',
+ user='root',
+ host=args.host,
+ port=26257,
+ sslmode='require',
+ sslrootcert='certs/ca.crt',
+ sslkey='certs/client.root.key',
+ sslcert='certs/client.root.crt'
+)
+conn.set_session(autocommit=True)
+cur = conn.cursor()
+
+def median(lst):
+ n = len(lst)
+ if n < 1:
+ return None
+ if n % 2 == 1:
+ return sorted(lst)[n//2]
+ else:
+ return sum(sorted(lst)[n//2-1:n//2+1])/2.0
+
+times = list()
+for n in range(args.repeat):
+ start = time.time()
+ statement = args.statement
+ cur.execute(statement)
+ if n < 1:
+ if cur.description is not None:
+ colnames = [desc[0] for desc in cur.description]
+ print("")
+ print("Result:")
+ print(colnames)
+ rows = cur.fetchall()
+ for row in rows:
+ print([str(cell) for cell in row])
+ end = time.time()
+ times.append((end - start)* 1000)
+
+cur.close()
+conn.close()
+
+print("")
+if args.times:
+ print("Times (milliseconds):")
+ print(times)
+ print("")
+# print("Average time (milliseconds):")
+# print(float(sum(times))/len(times))
+# print("")
+print("Median time (milliseconds):")
+print(median(times))
+print("")
+if args.cumulative:
+ print("Cumulative time (milliseconds):")
+ print(sum(times))
+ print("")
diff --git a/src/current/_includes/v25.1/performance/tuning.py b/src/current/_includes/v25.1/performance/tuning.py
new file mode 100644
index 00000000000..dcb567dad91
--- /dev/null
+++ b/src/current/_includes/v25.1/performance/tuning.py
@@ -0,0 +1,73 @@
+#!/usr/bin/env python
+
+import argparse
+import psycopg2
+import time
+
+parser = argparse.ArgumentParser(
+ description="test performance of statements against movr database")
+parser.add_argument("--host", required=True,
+ help="ip address of one of the CockroachDB nodes")
+parser.add_argument("--statement", required=True,
+ help="statement to execute")
+parser.add_argument("--repeat", type=int,
+ help="number of times to repeat the statement", default = 20)
+parser.add_argument("--times",
+ help="print time for each repetition of the statement", action="store_true")
+parser.add_argument("--cumulative",
+ help="print cumulative time for all repetitions of the statement", action="store_true")
+args = parser.parse_args()
+
+conn = psycopg2.connect(
+ database='movr',
+ user='root',
+ host=args.host,
+ port=26257
+)
+conn.set_session(autocommit=True)
+cur = conn.cursor()
+
+def median(lst):
+ n = len(lst)
+ if n < 1:
+ return None
+ if n % 2 == 1:
+ return sorted(lst)[n//2]
+ else:
+ return sum(sorted(lst)[n//2-1:n//2+1])/2.0
+
+times = list()
+for n in range(args.repeat):
+ start = time.time()
+ statement = args.statement
+ cur.execute(statement)
+ if n < 1:
+ if cur.description is not None:
+ colnames = [desc[0] for desc in cur.description]
+ print("")
+ print("Result:")
+ print(colnames)
+ rows = cur.fetchall()
+ for row in rows:
+ print([str(cell) for cell in row])
+ end = time.time()
+ times.append((end - start)* 1000)
+
+cur.close()
+conn.close()
+
+print("")
+if args.times:
+ print("Times (milliseconds):")
+ print(times)
+ print("")
+# print("Average time (milliseconds):")
+# print(float(sum(times))/len(times))
+# print("")
+print("Median time (milliseconds):")
+print(median(times))
+print("")
+if args.cumulative:
+ print("Cumulative time (milliseconds):")
+ print(sum(times))
+ print("")
diff --git a/src/current/_includes/v25.1/performance/use-hash-sharded-indexes.md b/src/current/_includes/v25.1/performance/use-hash-sharded-indexes.md
new file mode 100644
index 00000000000..ca6132d8de6
--- /dev/null
+++ b/src/current/_includes/v25.1/performance/use-hash-sharded-indexes.md
@@ -0,0 +1 @@
+We [discourage indexing on sequential keys]({% link {{ page.version.version }}/schema-design-indexes.md %}#best-practices). If a table **must** be indexed on sequential keys, use [hash-sharded indexes]({% link {{ page.version.version }}/hash-sharded-indexes.md %}). Hash-sharded indexes distribute sequential traffic uniformly across ranges, eliminating single-range [hot spots]({% link {{ page.version.version }}/performance-best-practices-overview.md %}#hot-spots) and improving write performance on sequentially-keyed indexes at a small cost to read performance.
\ No newline at end of file
diff --git a/src/current/_includes/v25.1/physical-replication/alter-virtual-cluster-diagram.html b/src/current/_includes/v25.1/physical-replication/alter-virtual-cluster-diagram.html
new file mode 100644
index 00000000000..c5400f6e9ed
--- /dev/null
+++ b/src/current/_includes/v25.1/physical-replication/alter-virtual-cluster-diagram.html
@@ -0,0 +1,431 @@
+
+
+
+
\ No newline at end of file
diff --git a/src/current/_includes/v25.1/physical-replication/create-virtual-cluster-diagram.html b/src/current/_includes/v25.1/physical-replication/create-virtual-cluster-diagram.html
new file mode 100644
index 00000000000..2f0eff15eb9
--- /dev/null
+++ b/src/current/_includes/v25.1/physical-replication/create-virtual-cluster-diagram.html
@@ -0,0 +1,171 @@
+
+
+
+
\ No newline at end of file
diff --git a/src/current/_includes/v25.1/physical-replication/drop-virtual-cluster-diagram.html b/src/current/_includes/v25.1/physical-replication/drop-virtual-cluster-diagram.html
new file mode 100644
index 00000000000..df3fc0eb983
--- /dev/null
+++ b/src/current/_includes/v25.1/physical-replication/drop-virtual-cluster-diagram.html
@@ -0,0 +1,65 @@
+
+
+
+
diff --git a/src/current/_includes/v25.1/physical-replication/failover-read-virtual-cluster.md b/src/current/_includes/v25.1/physical-replication/failover-read-virtual-cluster.md
new file mode 100644
index 00000000000..7430d38f9a9
--- /dev/null
+++ b/src/current/_includes/v25.1/physical-replication/failover-read-virtual-cluster.md
@@ -0,0 +1 @@
+If you started the PCR stream with the `READ VIRTUAL CLUSTER` option, failing over with `SYSTEM TIME` will destroy the `readonly` virtual cluster. If you fail over with `LATEST`, the `readonly` virtual cluster will remain on the original standby cluster, but will **not** update with new writes.
\ No newline at end of file
diff --git a/src/current/_includes/v25.1/physical-replication/fast-failback-latest-timestamp.md b/src/current/_includes/v25.1/physical-replication/fast-failback-latest-timestamp.md
new file mode 100644
index 00000000000..b5c5234fbb4
--- /dev/null
+++ b/src/current/_includes/v25.1/physical-replication/fast-failback-latest-timestamp.md
@@ -0,0 +1 @@
+When you [fail back]({% link {{ page.version.version }}/failover-replication.md %}#fail-back-to-the-primary-cluster) to a cluster that was previously the primary cluster, you should fail over to the `LATEST` timestamp. Using a [historical timestamp]({% link {{ page.version.version }}/as-of-system-time.md %}) may lead to the failback failing. {% if page.name == "failover-replication.md" %} Refer to the [PCR known limitations]({% link {{ page.version.version }}/physical-cluster-replication-overview.md %}#known-limitations).{% endif %}
diff --git a/src/current/_includes/v25.1/physical-replication/fast-failback-syntax.md b/src/current/_includes/v25.1/physical-replication/fast-failback-syntax.md
new file mode 100644
index 00000000000..6bf2a8f03bc
--- /dev/null
+++ b/src/current/_includes/v25.1/physical-replication/fast-failback-syntax.md
@@ -0,0 +1,8 @@
+To {% if page.name == "alter-virtual-cluster.md" %} [fail back]({% link {{ page.version.version }}/failover-replication.md %}#fail-back-to-the-primary-cluster) {% else %} fail back {% endif %} to a cluster that was previously the primary cluster, use the {% if page.name == "alter-virtual-cluster.md" %} `ALTER VIRTUAL CLUSTER` {% else %} [`ALTER VIRTUAL CLUSTER`]({% link {{ page.version.version }}/alter-virtual-cluster.md %}) {% endif %} syntax:
+
+{% include_cached copy-clipboard.html %}
+~~~ sql
+ALTER VIRTUAL CLUSTER {original_primary_vc} START REPLICATION OF {promoted_standby_vc} ON {connection_string_standby};
+~~~
+
+The original primary virtual cluster may be almost up to date with the promoted standby's virtual cluster. The difference in data between the two virtual clusters will include only the writes that have been applied to the promoted standby after failover from the primary cluster.
diff --git a/src/current/_includes/v25.1/physical-replication/interface-virtual-cluster.md b/src/current/_includes/v25.1/physical-replication/interface-virtual-cluster.md
new file mode 100644
index 00000000000..02890c3fc83
--- /dev/null
+++ b/src/current/_includes/v25.1/physical-replication/interface-virtual-cluster.md
@@ -0,0 +1,2 @@
+- The system virtual cluster manages the cluster's control plane and the replication of the cluster's data. Admins connect to the system virtual cluster to configure and manage the underlying CockroachDB cluster, set up PCR, create and manage a virtual cluster, and observe metrics and logs for the CockroachDB cluster and each virtual cluster.
+- Each other virtual cluster manages its own data plane. Users connect to a virtual cluster by default, rather than the system virtual cluster. To connect to the system virtual cluster, the connection string must be modified. Virtual clusters contain user data and run application workloads. When PCR is enabled, the non-system virtual cluster on both primary and secondary clusters is named `main`.
diff --git a/src/current/_includes/v25.1/physical-replication/like-description.md b/src/current/_includes/v25.1/physical-replication/like-description.md
new file mode 100644
index 00000000000..5922c4a6463
--- /dev/null
+++ b/src/current/_includes/v25.1/physical-replication/like-description.md
@@ -0,0 +1 @@
+Including the `LIKE template` parameter ensures that the virtual cluster on the standby is created with the correct capabilities, which manage what the virtual cluster can do. `LIKE` will refer to a virtual cluster on the CockroachDB cluster you're running the statement from.
\ No newline at end of file
diff --git a/src/current/_includes/v25.1/physical-replication/phys-rep-sql-pages.md b/src/current/_includes/v25.1/physical-replication/phys-rep-sql-pages.md
new file mode 100644
index 00000000000..562905ff97e
--- /dev/null
+++ b/src/current/_includes/v25.1/physical-replication/phys-rep-sql-pages.md
@@ -0,0 +1,5 @@
+PCR happens between an _active_ primary cluster and a _passive_ standby cluster that accepts updates from the primary cluster. The unit of replication is a _virtual cluster_, which is part of the underlying infrastructure in the primary and standby clusters. The CockroachDB cluster has:
+
+{% include {{ page.version.version }}/physical-replication/interface-virtual-cluster.md %}
+
+For more detail, refer to the [Physical Cluster Replication Overview]({% link {{ page.version.version }}/physical-cluster-replication-overview.md %}).
diff --git a/src/current/_includes/v25.1/physical-replication/reference-links-replication.md b/src/current/_includes/v25.1/physical-replication/reference-links-replication.md
new file mode 100644
index 00000000000..5d7b017b9fb
--- /dev/null
+++ b/src/current/_includes/v25.1/physical-replication/reference-links-replication.md
@@ -0,0 +1,4 @@
+{% comment %}
+- Cluster virtualization: The primary and standby clusters are started as virtualized clusters.
+- [Physical Cluster Replication Technical Overview]({% link {{ page.version.version }}/physical-cluster-replication-overview.md %}) page.
+{% endcomment %}
\ No newline at end of file
diff --git a/src/current/_includes/v25.1/physical-replication/retention.md b/src/current/_includes/v25.1/physical-replication/retention.md
new file mode 100644
index 00000000000..303fe6ebc79
--- /dev/null
+++ b/src/current/_includes/v25.1/physical-replication/retention.md
@@ -0,0 +1 @@
+We do not recommend setting `RETENTION` much higher than the 24-hour default on the standby cluster. Accumulated data from an excessive [retention (failover) window]({% link {{ page.version.version }}/physical-cluster-replication-technical-overview.md %}#failover-and-promotion-process) could affect queries running on the standby cluster that is active following a [failover]({% link {{ page.version.version }}/failover-replication.md %}).
\ No newline at end of file
diff --git a/src/current/_includes/v25.1/physical-replication/show-virtual-cluster-data-state.md b/src/current/_includes/v25.1/physical-replication/show-virtual-cluster-data-state.md
new file mode 100644
index 00000000000..16f858ef621
--- /dev/null
+++ b/src/current/_includes/v25.1/physical-replication/show-virtual-cluster-data-state.md
@@ -0,0 +1,10 @@
+State | Description
+-----------+----------------
+`add` | ([**Preview**]({% link {{ page.version.version }}/cockroachdb-feature-availability.md %}#features-in-preview)) The [`readonly` virtual cluster]({% link {{ page.version.version }}/create-virtual-cluster.md %}#start-a-pcr-stream-with-read-from-standby) is waiting for the PCR job's initial scan to complete, then `readonly` will be available for read queries.
+`initializing replication` | The replication job is completing the initial scan of data from the primary cluster before it starts replicating data in real time.
+`ready` | A virtual cluster's data is ready for use. The `readonly` virtual cluster is ready to serve read queries.
+`replicating` | The replication job has started and is replicating data.
+`replication paused` | The replication job is paused due to an error or a manual request with [`ALTER VIRTUAL CLUSTER ... PAUSE REPLICATION`]({% link {{ page.version.version }}/alter-virtual-cluster.md %}).
+`replication pending failover` | The replication job is running and the failover time has been set. Once the the replication reaches the failover time, the failover will begin automatically.
+`replication failing over` | The job has started failing over. The failover time can no longer be changed. Once failover is complete, a virtual cluster will be available for use with [`ALTER VIRTUAL CLUSTER ... START SERVICE SHARED`]({% link {{ page.version.version }}/alter-virtual-cluster.md %}).
+`replication error` | An error has occurred. You can find more detail in the error message and the [logs]({% link {{ page.version.version }}/configure-logs.md %}). **Note:** A PCR job will retry for 3 minutes before failing.
diff --git a/src/current/_includes/v25.1/physical-replication/show-virtual-cluster-diagram.html b/src/current/_includes/v25.1/physical-replication/show-virtual-cluster-diagram.html
new file mode 100644
index 00000000000..f9d31ede888
--- /dev/null
+++ b/src/current/_includes/v25.1/physical-replication/show-virtual-cluster-diagram.html
@@ -0,0 +1,89 @@
+
+
+
+
\ No newline at end of file
diff --git a/src/current/_includes/v25.1/physical-replication/show-virtual-cluster-responses.md b/src/current/_includes/v25.1/physical-replication/show-virtual-cluster-responses.md
new file mode 100644
index 00000000000..97c962a2547
--- /dev/null
+++ b/src/current/_includes/v25.1/physical-replication/show-virtual-cluster-responses.md
@@ -0,0 +1,15 @@
+Field | Response
+---------+----------
+`id` | The ID of a virtual cluster.
+`name` | The name of the standby (destination) virtual cluster.
+`data_state` | The state of the data on a virtual cluster. This can show one of the following: `initializing replication`, `ready`, `replicating`, `replication paused`, `replication pending failover`, `replication failing over`, `replication error`. Refer to [Data state](#data-state) for more detail on each response.
+`service_mode` | The service mode shows whether a virtual cluster is ready to accept SQL requests. This can show `none` or `shared`. When `shared`, a virtual cluster's SQL connections will be served by the same nodes that are serving the system virtual cluster.
+`source_tenant_name` | The name of the primary (source) virtual cluster.
+`source_cluster_uri` | The URI of the primary (source) cluster. The standby cluster connects to the primary cluster using this URI when [starting a replication stream]({% link {{ page.version.version }}/set-up-physical-cluster-replication.md %}#step-4-start-replication).
+`replicated_time` | The latest timestamp at which the standby cluster has consistent data — that is, the latest time you can fail over to. This time advances automatically as long as the replication proceeds without error. `replicated_time` is updated periodically (every `30s`).
+`retained_time` | The earliest timestamp at which the standby cluster has consistent data — that is, the earliest time you can fail over to.
+`replication_lag` | The time between the most up-to-date replicated time and the actual time. Refer to the [Technical Overview]({% link {{ page.version.version }}/physical-cluster-replication-technical-overview.md %}) for more detail.
+`failover_time` | The time at which the failover will begin. This can be in the past or the future. Refer to [Fail over to a point in time]({% link {{ page.version.version }}/failover-replication.md %}#fail-over-to-a-point-in-time).
+`status` | The status of the replication stream. This can show one of the following: `initializing replication`, `ready`, `replicating`, `replication paused`, `replication pending failover`, `replication failing over`, `replication error`. Refer to [Data state](#data-state) for more detail on each response.
+`capability_name` | The [capability]({% link {{ page.version.version }}/create-virtual-cluster.md %}#capabilities) name.
+`capability_value` | Whether the [capability]({% link {{ page.version.version }}/create-virtual-cluster.md %}#capabilities) is enabled for a virtual cluster.
diff --git a/src/current/_includes/v25.1/physical-replication/template-description.md b/src/current/_includes/v25.1/physical-replication/template-description.md
new file mode 100644
index 00000000000..233b31f99b1
--- /dev/null
+++ b/src/current/_includes/v25.1/physical-replication/template-description.md
@@ -0,0 +1 @@
+The [configuration profile](#start-the-standby-cluster) included at startup creates the `template` virtual cluster with the same set of _capabilities_ per CockroachDB version. When you start a replication stream, you can specify the `template` VC with `LIKE` to ensure other virtual clusters on the standby cluster will work in the same way. Refer to [Step 4: Start replication](#step-4-start-replication) for syntax details.
\ No newline at end of file
diff --git a/src/current/_includes/v25.1/prod-deployment/advertise-addr-join.md b/src/current/_includes/v25.1/prod-deployment/advertise-addr-join.md
new file mode 100644
index 00000000000..2c8d39660fb
--- /dev/null
+++ b/src/current/_includes/v25.1/prod-deployment/advertise-addr-join.md
@@ -0,0 +1,4 @@
+Flag | Description
+-----|------------
+`--advertise-addr` | Specifies the IP address/hostname and port to tell other nodes to use. The port number can be omitted, in which case it defaults to `26257`.
This value must route to an IP address the node is listening on (with `--listen-addr` unspecified, the node listens on all IP addresses).
In some networking scenarios, you may need to use `--advertise-addr` and/or `--listen-addr` differently. For more details, see [Networking]({% link {{ page.version.version }}/recommended-production-settings.md %}#networking).
+`--join` | Identifies the address of 3-5 of the initial nodes of the cluster. These addresses should match the addresses that the target nodes are advertising.
diff --git a/src/current/_includes/v25.1/prod-deployment/aws-inbound-rules.md b/src/current/_includes/v25.1/prod-deployment/aws-inbound-rules.md
new file mode 100644
index 00000000000..8be748205a6
--- /dev/null
+++ b/src/current/_includes/v25.1/prod-deployment/aws-inbound-rules.md
@@ -0,0 +1,31 @@
+#### Inter-node and load balancer-node communication
+
+ Field | Value
+-------|-------------------
+ Port Range | **26257**
+ Source | The ID of your security group (e.g., *sg-07ab277a*)
+
+#### Application data
+
+ Field | Value
+-------|-------------------
+ Port Range | **26257**
+ Source | Your application's IP ranges
+
+#### DB Console
+
+ Field | Value
+-------|-------------------
+ Port Range | **8080**
+ Source | Your network's IP ranges
+
+You can set your network IP by selecting "My IP" in the Source field.
+
+#### Load balancer-health check communication
+
+ Field | Value
+-------|-------------------
+ Port Range | **8080**
+ Source | The IP range of your VPC in CIDR notation (e.g., 10.12.0.0/16)
+
+ To get the IP range of a VPC, open the [Amazon VPC console](https://console.aws.amazon.com/vpc/) and find the VPC listed in the section called Your VPCs.
\ No newline at end of file
diff --git a/src/current/_includes/v25.1/prod-deployment/backup.sh b/src/current/_includes/v25.1/prod-deployment/backup.sh
new file mode 100644
index 00000000000..efcbd4c7041
--- /dev/null
+++ b/src/current/_includes/v25.1/prod-deployment/backup.sh
@@ -0,0 +1,21 @@
+#!/bin/bash
+
+set -euo pipefail
+
+# This script creates full backups when run on the configured
+# day of the week and incremental backups when run on other days, and tracks
+# recently created backups in a file to pass as the base for incremental backups.
+
+what="" # Leave empty for cluster backup, or add "DATABASE database_name" to backup a database.
+base="/backups" # The URL where you want to store the backup.
+extra="" # Any additional parameters that need to be appended to the BACKUP URI e.g., AWS key params.
+recent=recent_backups.txt # File in which recent backups are tracked.
+backup_parameters= # e.g., "WITH revision_history"
+
+# Customize the `cockroach sql` command with `--host`, `--certs-dir` or `--insecure`, `--port`, and additional flags as needed to connect to the SQL client.
+runsql() { cockroach sql --insecure -e "$1"; }
+
+destination="${base}/$(date +"%Y-%V")${extra}" # %V is the week number of the year, with Monday as the first day of the week.
+
+runsql "BACKUP $what TO '$destination' AS OF SYSTEM TIME '-1m' $backup_parameters"
+echo "backed up to ${destination}"
diff --git a/src/current/_includes/v25.1/prod-deployment/check-sql-query-performance.md b/src/current/_includes/v25.1/prod-deployment/check-sql-query-performance.md
new file mode 100644
index 00000000000..5fffceed436
--- /dev/null
+++ b/src/current/_includes/v25.1/prod-deployment/check-sql-query-performance.md
@@ -0,0 +1,3 @@
+{{site.data.alerts.callout_info}}
+If you aren't sure whether SQL query performance needs to be improved on your cluster, see [Identify slow statements]({% link {{ page.version.version }}/query-behavior-troubleshooting.md %}#identify-slow-queries).
+{{site.data.alerts.end}}
diff --git a/src/current/_includes/v25.1/prod-deployment/cloud-report.md b/src/current/_includes/v25.1/prod-deployment/cloud-report.md
new file mode 100644
index 00000000000..aa2a765af6e
--- /dev/null
+++ b/src/current/_includes/v25.1/prod-deployment/cloud-report.md
@@ -0,0 +1 @@
+Cockroach Labs creates a yearly cloud report focused on evaluating hardware performance. For more information, see the [2022 Cloud Report](https://www.cockroachlabs.com/guides/2022-cloud-report/).
\ No newline at end of file
diff --git a/src/current/_includes/v25.1/prod-deployment/cluster-unavailable-monitoring.md b/src/current/_includes/v25.1/prod-deployment/cluster-unavailable-monitoring.md
new file mode 100644
index 00000000000..70f7e08e47f
--- /dev/null
+++ b/src/current/_includes/v25.1/prod-deployment/cluster-unavailable-monitoring.md
@@ -0,0 +1,3 @@
+{{site.data.alerts.callout_info}}
+If the cluster becomes unavailable, the DB Console and Cluster API will also become unavailable. You can continue to monitor the cluster via the [Prometheus endpoint]({% link {{ page.version.version }}/monitoring-and-alerting.md %}#prometheus-endpoint) and [logs]({% link {{ page.version.version }}/logging-overview.md %}).
+{{site.data.alerts.end}}
\ No newline at end of file
diff --git a/src/current/_includes/v25.1/prod-deployment/decommission-pre-flight-checks.md b/src/current/_includes/v25.1/prod-deployment/decommission-pre-flight-checks.md
new file mode 100644
index 00000000000..b267379384b
--- /dev/null
+++ b/src/current/_includes/v25.1/prod-deployment/decommission-pre-flight-checks.md
@@ -0,0 +1,18 @@
+By default, CockroachDB will perform a set of "decommissioning pre-flight checks". That is, decommission pre-checks look over the ranges with replicas on the to-be-decommissioned node, and check that each replica can be moved to some other node in the cluster. If errors are detected that would result in the inability to complete node decommissioning, they will be printed to `STDERR` and the command will exit *without attempting to perform node decommissioning*. For example, ranges that require a certain number of voting replicas in a region but do not have any available nodes in the region not already containing a replica will block the decommissioning process.
+
+The error format is shown below:
+
+~~~
+ranges blocking decommission detected
+n1 has 44 replicas blocked with error: "0 of 1 live stores are able to take a new replica for the range (2 already have a voter, 0 already have a non-voter); likely not enough nodes in cluster"
+n2 has 27 replicas blocked with error: "0 of 1 live stores are able to take a new replica for the range (2 already have a voter, 0 already have a non-voter); likely not enough nodes in cluster"
+
+ERROR: Cannot decommission nodes.
+Failed running "node decommission"
+~~~
+
+These checks can be skipped by [passing the flag `--checks=skip` to `cockroach node decommission`]({% link {{ page.version.version }}/cockroach-node.md %}#decommission-checks).
+
+{{site.data.alerts.callout_info}}
+The amount of remaining disk space on other nodes in the cluster is not yet considered as part of the decommissioning pre-flight checks. For more information, see [cockroachdb/cockroach#71757](https://github.com/cockroachdb/cockroach/issues/71757)
+{{site.data.alerts.end}}
diff --git a/src/current/_includes/v25.1/prod-deployment/healthy-command-commit-latency.md b/src/current/_includes/v25.1/prod-deployment/healthy-command-commit-latency.md
new file mode 100644
index 00000000000..63fd751610c
--- /dev/null
+++ b/src/current/_includes/v25.1/prod-deployment/healthy-command-commit-latency.md
@@ -0,0 +1 @@
+**Expected values for a healthy cluster**: On SSDs ([strongly recommended]({% link {{ page.version.version }}/recommended-production-settings.md %}#storage)), this should be between 1 and 100 milliseconds. On HDDs, this should be no more than 1 second.
\ No newline at end of file
diff --git a/src/current/_includes/v25.1/prod-deployment/healthy-cpu-percent.md b/src/current/_includes/v25.1/prod-deployment/healthy-cpu-percent.md
new file mode 100644
index 00000000000..a58b0b87973
--- /dev/null
+++ b/src/current/_includes/v25.1/prod-deployment/healthy-cpu-percent.md
@@ -0,0 +1 @@
+**Expected values for a healthy cluster**: CPU utilized by CockroachDB should not persistently exceed 80%. Because this metric does not reflect system CPU usage, values above 80% suggest that actual CPU utilization is nearing 100%.
\ No newline at end of file
diff --git a/src/current/_includes/v25.1/prod-deployment/healthy-crdb-memory.md b/src/current/_includes/v25.1/prod-deployment/healthy-crdb-memory.md
new file mode 100644
index 00000000000..9d682d3cfb0
--- /dev/null
+++ b/src/current/_includes/v25.1/prod-deployment/healthy-crdb-memory.md
@@ -0,0 +1 @@
+**Expected values for a healthy cluster**: Go Allocated will depend on workload but should not exceed [`--max-sql-memory`]({% link {{ page.version.version }}/cockroach-start.md %}#flags) by more than 100%. CGo Allocated should not exceed the [`--cache`]({% link {{ page.version.version }}/cockroach-start.md %}#flags) size and CGo Total should not exceed the [`--cache`]({% link {{ page.version.version }}/cockroach-start.md %}#flags) size by more than 15%.
\ No newline at end of file
diff --git a/src/current/_includes/v25.1/prod-deployment/healthy-disk-ops-in-progress.md b/src/current/_includes/v25.1/prod-deployment/healthy-disk-ops-in-progress.md
new file mode 100644
index 00000000000..e80714df120
--- /dev/null
+++ b/src/current/_includes/v25.1/prod-deployment/healthy-disk-ops-in-progress.md
@@ -0,0 +1 @@
+**Expected values for a healthy cluster**: This value should be 0 or single-digit values for short periods of time. If the values persist in double digits, you may have an I/O bottleneck.
\ No newline at end of file
diff --git a/src/current/_includes/v25.1/prod-deployment/healthy-lsm.md b/src/current/_includes/v25.1/prod-deployment/healthy-lsm.md
new file mode 100644
index 00000000000..67ca6f36420
--- /dev/null
+++ b/src/current/_includes/v25.1/prod-deployment/healthy-lsm.md
@@ -0,0 +1 @@
+**Expected values for a healthy cluster**: An IO Overload value greater than 1.0 generally indicates an overload in the Pebble LSM tree. High values indicate heavy write load that is causing accumulation of files in level 0. These files are not being compacted quickly enough to lower levels, resulting in a [misshapen LSM]({% link {{ page.version.version }}/architecture/storage-layer.md %}#inverted-lsms).
\ No newline at end of file
diff --git a/src/current/_includes/v25.1/prod-deployment/healthy-node-heartbeat-latency.md b/src/current/_includes/v25.1/prod-deployment/healthy-node-heartbeat-latency.md
new file mode 100644
index 00000000000..982514be9c9
--- /dev/null
+++ b/src/current/_includes/v25.1/prod-deployment/healthy-node-heartbeat-latency.md
@@ -0,0 +1 @@
+**Expected values for a healthy cluster**: Less than 100ms in addition to the [network latency]({% link {{ page.version.version }}/ui-network-latency-page.md %}) between nodes in the cluster.
\ No newline at end of file
diff --git a/src/current/_includes/v25.1/prod-deployment/healthy-read-amplification.md b/src/current/_includes/v25.1/prod-deployment/healthy-read-amplification.md
new file mode 100644
index 00000000000..c7ffe9c6d17
--- /dev/null
+++ b/src/current/_includes/v25.1/prod-deployment/healthy-read-amplification.md
@@ -0,0 +1 @@
+**Expected values for a healthy cluster**: Read amplification factor should be in the single digits. A value exceeding 50 for 1 hour strongly suggests that the LSM tree has an unhealthy shape.
\ No newline at end of file
diff --git a/src/current/_includes/v25.1/prod-deployment/healthy-sql-memory.md b/src/current/_includes/v25.1/prod-deployment/healthy-sql-memory.md
new file mode 100644
index 00000000000..968b79b0b61
--- /dev/null
+++ b/src/current/_includes/v25.1/prod-deployment/healthy-sql-memory.md
@@ -0,0 +1 @@
+**Expected values for a healthy cluster**: This value should not exceed the [`--max-sql-memory`]({% link {{ page.version.version }}/recommended-production-settings.md %}#cache-and-sql-memory-size) size. A healthy threshold is 75% of allocated `--max-sql-memory`.
\ No newline at end of file
diff --git a/src/current/_includes/v25.1/prod-deployment/healthy-storage-capacity.md b/src/current/_includes/v25.1/prod-deployment/healthy-storage-capacity.md
new file mode 100644
index 00000000000..bd8c44e1a31
--- /dev/null
+++ b/src/current/_includes/v25.1/prod-deployment/healthy-storage-capacity.md
@@ -0,0 +1 @@
+**Expected values for a healthy cluster**: Used capacity should not persistently exceed 80% of the total capacity.
diff --git a/src/current/_includes/v25.1/prod-deployment/healthy-workload-concurrency.md b/src/current/_includes/v25.1/prod-deployment/healthy-workload-concurrency.md
new file mode 100644
index 00000000000..8c0c8e1ffc8
--- /dev/null
+++ b/src/current/_includes/v25.1/prod-deployment/healthy-workload-concurrency.md
@@ -0,0 +1 @@
+**Expected values for a healthy cluster**: At any time, the total number of connections actively executing SQL statements should not exceed 4 times the number of vCPUs in the cluster. You can find them in the Active Executions view in the [DB Console]({% link {{ page.version.version }}/ui-statements-page.md %}) or [Cloud Console]({% link cockroachcloud/statements-page.md %}). You can find the number of open connections in the [DB Console]({% link {{ page.version.version }}/ui-sql-dashboard.md %}#open-sql-sessions) or [Cloud Console]({% link cockroachcloud/metrics-sql.md %}#open-sql-sessions). For more details on configuring connection pools, see [Size connection pools](connection-pooling.html#size-connection-pools).
\ No newline at end of file
diff --git a/src/current/_includes/v25.1/prod-deployment/insecure-flag.md b/src/current/_includes/v25.1/prod-deployment/insecure-flag.md
new file mode 100644
index 00000000000..a13951ba4bc
--- /dev/null
+++ b/src/current/_includes/v25.1/prod-deployment/insecure-flag.md
@@ -0,0 +1,3 @@
+{{site.data.alerts.callout_danger}}
+The `--insecure` flag used in this tutorial is intended for non-production testing only. To run CockroachDB in production, use a secure cluster instead.
+{{site.data.alerts.end}}
diff --git a/src/current/_includes/v25.1/prod-deployment/insecure-initialize-cluster.md b/src/current/_includes/v25.1/prod-deployment/insecure-initialize-cluster.md
new file mode 100644
index 00000000000..01cbad5a6ac
--- /dev/null
+++ b/src/current/_includes/v25.1/prod-deployment/insecure-initialize-cluster.md
@@ -0,0 +1,12 @@
+On your local machine, complete the node startup process and have them join together as a cluster:
+
+1. [Install CockroachDB]({% link {{ page.version.version }}/install-cockroachdb.md %}) on your local machine, if you haven't already.
+
+1. Run the [`cockroach init`]({% link {{ page.version.version }}/cockroach-init.md %}) command, with the `--host` flag set to the address of any node:
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ cockroach init --insecure --host=
+ ~~~
+
+ Each node then prints helpful details to the [standard output]({% link {{ page.version.version }}/cockroach-start.md %}#standard-output), such as the CockroachDB version, the URL for the DB Console, and the SQL URL for clients.
diff --git a/src/current/_includes/v25.1/prod-deployment/insecure-recommendations.md b/src/current/_includes/v25.1/prod-deployment/insecure-recommendations.md
new file mode 100644
index 00000000000..5ca9e9d4175
--- /dev/null
+++ b/src/current/_includes/v25.1/prod-deployment/insecure-recommendations.md
@@ -0,0 +1,13 @@
+- Consider using a [secure cluster]({% link {{ page.version.version }}/manual-deployment.md %}) instead. Using an insecure cluster comes with risks:
+ - Your cluster is open to any client that can access any node's IP addresses.
+ - Any user, even `root`, can log in without providing a password.
+ - Any user, connecting as `root`, can read or write any data in your cluster.
+ - There is no network encryption or authentication, and thus no confidentiality.
+
+- Decide how you want to access your DB Console:
+
+ Access Level | Description
+ -------------|------------
+ Partially open | Set a firewall rule to allow only specific IP addresses to communicate on port `8080`.
+ Completely open | Set a firewall rule to allow all IP addresses to communicate on port `8080`.
+ Completely closed | Set a firewall rule to disallow all communication on port `8080`. In this case, a machine with SSH access to a node could use an SSH tunnel to access the DB Console.
diff --git a/src/current/_includes/v25.1/prod-deployment/insecure-requirements.md b/src/current/_includes/v25.1/prod-deployment/insecure-requirements.md
new file mode 100644
index 00000000000..3334d2955b9
--- /dev/null
+++ b/src/current/_includes/v25.1/prod-deployment/insecure-requirements.md
@@ -0,0 +1,9 @@
+- You must have [SSH access]({{page.ssh-link}}) to each machine. This is necessary for distributing and starting CockroachDB binaries.
+
+- Your network configuration must allow TCP communication on the following ports:
+ - `26257` for intra-cluster and client-cluster communication
+ - `8080` to expose your DB Console
+
+- Carefully review the [Production Checklist]({% link {{ page.version.version }}/recommended-production-settings.md %}) and recommended [Topology Patterns]({% link {{ page.version.version }}/topology-patterns.md %}).
+
+{% include {{ page.version.version }}/prod-deployment/topology-recommendations.md %}
\ No newline at end of file
diff --git a/src/current/_includes/v25.1/prod-deployment/insecure-scale-cluster.md b/src/current/_includes/v25.1/prod-deployment/insecure-scale-cluster.md
new file mode 100644
index 00000000000..aaf0e5f7688
--- /dev/null
+++ b/src/current/_includes/v25.1/prod-deployment/insecure-scale-cluster.md
@@ -0,0 +1,120 @@
+You can start the nodes manually or automate the process using [systemd](https://www.freedesktop.org/wiki/Software/systemd/).
+
+
+
+
+
+
+
+
+
+For each additional node you want to add to the cluster, complete the following steps:
+
+1. SSH to the machine where you want the node to run.
+
+1. Download the [CockroachDB archive](https://binaries.cockroachdb.com/cockroach-{{ page.release_info.version }}.linux-amd64.tgz) for Linux, and extract the binary:
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ curl https://binaries.cockroachdb.com/cockroach-{{ page.release_info.version }}.linux-amd64.tgz \
+ | tar -xz
+ ~~~
+
+1. Copy the binary into the `PATH`:
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ cp -i cockroach-{{ page.release_info.version }}.linux-amd64/cockroach /usr/local/bin/
+ ~~~
+
+ If you get a permissions error, prefix the command with `sudo`.
+
+1. Run the [`cockroach start`]({% link {{ page.version.version }}/cockroach-start.md %}) command, passing the new node's address as the `--advertise-addr` flag and pointing `--join` to the three existing nodes (also include `--locality` if you set it earlier).
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ cockroach start \
+ --insecure \
+ --advertise-addr= \
+ --join=,, \
+ --cache=.25 \
+ --max-sql-memory=.25 \
+ --background
+ ~~~
+
+1. Update your load balancer to recognize the new node.
+
+
+
+
+
+For each additional node you want to add to the cluster, complete the following steps:
+
+1. SSH to the machine where you want the node to run. Ensure you are logged in as the `root` user.
+
+1. Download the [CockroachDB archive](https://binaries.cockroachdb.com/cockroach-{{ page.release_info.version }}.linux-amd64.tgz) for Linux, and extract the binary:
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ curl -o cockroach-{{ page.release_info.version }}.linux-amd64.tgz; tar xzvf cockroach-{{ page.release_info.version }}.linux-amd64.tgz
+ ~~~
+
+1. Copy the binary into the `PATH`:
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ cp -i cockroach-{{ page.release_info.version }}.linux-amd64/cockroach /usr/local/bin/
+ ~~~
+
+ If you get a permissions error, prefix the command with `sudo`.
+
+1. Create the Cockroach directory:
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ mkdir /var/lib/cockroach
+ ~~~
+
+1. Create a Unix user named `cockroach`:
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ useradd cockroach
+ ~~~
+
+1. Change the ownership of the `cockroach` directory to the user `cockroach`:
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ chown cockroach /var/lib/cockroach
+ ~~~
+
+1. Download the [sample configuration template](https://raw.githubusercontent.com/cockroachdb/docs/master/_includes/{{ page.version.version }}/prod-deployment/insecurecockroachdb.service):
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ curl -o insecurecockroachdb.service https://raw.githubusercontent.com/cockroachdb/docs/master/_includes/{{ page.version.version }}/prod-deployment/insecurecockroachdb.service
+ ~~~
+
+ Alternatively, you can create the file yourself and copy the script into it:
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ {% include {{ page.version.version }}/prod-deployment/insecurecockroachdb.service %}
+ ~~~
+
+ {{site.data.alerts.callout_info}}
+ Previously, the sample configuration file set `TimeoutStopSec` to 60 seconds. This recommendation has been lengthened to 300 seconds, to give the `cockroach` process more time to stop gracefully.
+ {{site.data.alerts.end}}
+
+ Save the file in the `/etc/systemd/system/` directory
+
+1. Customize the sample configuration template for your deployment:
+
+ Specify values for the following flags in the sample configuration template:
+
+ {% include {{ page.version.version }}/prod-deployment/advertise-addr-join.md %}
+
+1. Repeat these steps for each additional node that you want in your cluster.
+
+
diff --git a/src/current/_includes/v25.1/prod-deployment/insecure-start-nodes.md b/src/current/_includes/v25.1/prod-deployment/insecure-start-nodes.md
new file mode 100644
index 00000000000..75d0de816b5
--- /dev/null
+++ b/src/current/_includes/v25.1/prod-deployment/insecure-start-nodes.md
@@ -0,0 +1,165 @@
+You can start the nodes manually or automate the process using [systemd](https://www.freedesktop.org/wiki/Software/systemd/).
+
+
+
+
+
+
+
+
+
+For each initial node of your cluster, complete the following steps:
+
+{{site.data.alerts.callout_info}}
+After completing these steps, nodes will not yet be live. They will complete the startup process and join together to form a cluster as soon as the cluster is initialized in the next step.
+{{site.data.alerts.end}}
+
+1. Visit [Releases]({% link releases/index.md %}) and download the full binary of CockroachDB to the node.
+
+1. On the node, follow the instructions to [install CockroachDB]({% link {{ page.version.version }}/install-cockroachdb.md %}).
+
+1. Run the [`cockroach start`]({% link {{ page.version.version }}/cockroach-start.md %}) command:
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ cockroach start \
+ --insecure \
+ --advertise-addr= \
+ --join=,, \
+ --cache=.25 \
+ --max-sql-memory=.25 \
+ --background
+ ~~~
+
+ This command primes the node to start, using the following flags:
+
+ Flag | Description
+ -----|------------
+ `--insecure` | Indicates that the cluster is insecure, with no network encryption or authentication.
+ `--advertise-addr` | Specifies the IP address/hostname and port to tell other nodes to use. The port number can be omitted, in which case it defaults to `26257`.
This value must route to an IP address the node is listening on (with `--listen-addr` unspecified, the node listens on all IP addresses).
In some networking scenarios, you may need to use `--advertise-addr` and/or `--listen-addr` differently. For more details, see [Networking]({% link {{ page.version.version }}/recommended-production-settings.md %}#networking).
+ `--join` | Identifies the address of 3-5 of the initial nodes of the cluster. These addresses should match the addresses that the target nodes are advertising.
+ `--cache` `--max-sql-memory` | Increases the node's cache size to 25% of available system memory to improve read performance. The capacity for in-memory SQL processing defaults to 25% of system memory but can be raised, if necessary, to increase the number of simultaneous client connections allowed by the node as well as the node's capacity for in-memory processing of rows when using `ORDER BY`, `GROUP BY`, `DISTINCT`, joins, and window functions. For more details, see [Cache and SQL Memory Size]({% link {{ page.version.version }}/recommended-production-settings.md %}#cache-and-sql-memory-size).
+ `--background` | Starts the node in the background so you gain control of the terminal to issue more commands.
+
+ When deploying across multiple datacenters, or when there is otherwise high latency between nodes, it is recommended to set `--locality` as well. It is also required to use certain enterprise features. For more details, see [Locality]({% link {{ page.version.version }}/cockroach-start.md %}#locality).
+
+ For other flags not explicitly set, the command uses default values. For example, the node stores data in `--store=cockroach-data` and binds DB Console HTTP requests to `--http-addr=localhost:8080`. To set these options manually, see [Start a Node]({% link {{ page.version.version }}/cockroach-start.md %}).
+
+1. Repeat these steps for each additional node that you want in your cluster.
+
+
+
+
+
+For each initial node of your cluster, complete the following steps:
+
+{{site.data.alerts.callout_info}}
+After completing these steps, nodes will not yet be live. They will complete the startup process and join together to form a cluster as soon as the cluster is initialized in the next step.
+{{site.data.alerts.end}}
+
+1. SSH to the machine where you want the node to run. Ensure you are logged in as the `root` user.
+
+1. Download the [CockroachDB archive](https://binaries.cockroachdb.com/cockroach-{{ page.release_info.version }}.linux-amd64.tgz) for Linux, and extract the binary:
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ curl https://binaries.cockroachdb.com/cockroach-{{ page.release_info.version }}.linux-amd64.tgz \
+ | tar -xz
+ ~~~
+
+1. Copy the binary into the `PATH`:
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ cp -i cockroach-{{ page.release_info.version }}.linux-amd64/cockroach /usr/local/bin/
+ ~~~
+
+ If you get a permissions error, prefix the command with `sudo`.
+
+1. CockroachDB uses custom-built versions of the [GEOS]({% link {{ page.version.version }}/architecture/glossary.md %}#geos) libraries. Copy these libraries to the location where CockroachDB expects to find them:
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ mkdir -p /usr/local/lib/cockroach
+ ~~~
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ cp -i cockroach-{{ page.release_info.version }}.linux-amd64/lib/libgeos.so /usr/local/lib/cockroach/
+ ~~~
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ cp -i cockroach-{{ page.release_info.version }}.linux-amd64/lib/libgeos_c.so /usr/local/lib/cockroach/
+ ~~~
+
+ If you get a permissions error, prefix the command with `sudo`.
+
+1. Create the Cockroach directory:
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ mkdir /var/lib/cockroach
+ ~~~
+
+1. Create a Unix user named `cockroach`:
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ useradd cockroach
+ ~~~
+
+1. Change the ownership of the `cockroach` directory to the user `cockroach`:
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ chown cockroach /var/lib/cockroach
+ ~~~
+
+1. Download the [sample configuration template](https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/_includes/{{ page.version.version }}/prod-deployment/insecurecockroachdb.service) and save the file in the `/etc/systemd/system/` directory:
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ curl -o insecurecockroachdb.service https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/_includes/{{ page.version.version }}/prod-deployment/insecurecockroachdb.service
+ ~~~
+
+ Alternatively, you can create the file yourself and copy the script into it:
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ {% include {{ page.version.version }}/prod-deployment/insecurecockroachdb.service %}
+ ~~~
+
+ {{site.data.alerts.callout_info}}
+ Previously, the sample configuration file set `TimeoutStopSec` to 60 seconds. This recommendation has been lengthened to 300 seconds, to give the `cockroach` process more time to stop gracefully.
+ {{site.data.alerts.end}}
+
+1. In the sample configuration template, specify values for the following flags:
+
+ {% include {{ page.version.version }}/prod-deployment/advertise-addr-join.md %}
+
+ When deploying across multiple datacenters, or when there is otherwise high latency between nodes, it is recommended to set `--locality` as well. It is also required to use certain enterprise features. For more details, see [Locality]({% link {{ page.version.version }}/cockroach-start.md %}#locality).
+
+ For other flags not explicitly set, the command uses default values. For example, the node stores data in `--store=cockroach-data` and binds DB Console HTTP requests to `--http-port=8080`. To set these options manually, see [Start a Node]({% link {{ page.version.version }}/cockroach-start.md %}).
+
+1. Start the CockroachDB cluster:
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ systemctl start insecurecockroachdb
+ ~~~
+
+1. Configure `systemd` to start CockroachDB automatically after a reboot:
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ systemctl enable insecurecockroachdb
+ ~~~
+
+1. Repeat these steps for each additional node that you want in your cluster.
+
+{{site.data.alerts.callout_info}}
+`systemd` handles node restarts in case of node failure. To stop a node without `systemd` restarting it, run `systemctl stop insecurecockroachdb`
+{{site.data.alerts.end}}
+
+
diff --git a/src/current/_includes/v25.1/prod-deployment/insecure-test-cluster.md b/src/current/_includes/v25.1/prod-deployment/insecure-test-cluster.md
new file mode 100644
index 00000000000..b67b97cde01
--- /dev/null
+++ b/src/current/_includes/v25.1/prod-deployment/insecure-test-cluster.md
@@ -0,0 +1,41 @@
+CockroachDB replicates and distributes data behind-the-scenes and uses a [Gossip protocol](https://wikipedia.org/wiki/Gossip_protocol) to enable each node to locate data across the cluster. Once a cluster is live, any node can be used as a SQL gateway.
+
+When using a load balancer, you should issue commands directly to the load balancer, which then routes traffic to the nodes.
+
+Use the [built-in SQL client]({% link {{ page.version.version }}/cockroach-sql.md %}) locally as follows:
+
+1. On your local machine, launch the built-in SQL client, with the `--host` flag set to the address of the load balancer:
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ cockroach sql --insecure --host=
+ ~~~
+
+1. Create an `insecurenodetest` database:
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ sql
+ > CREATE DATABASE insecurenodetest;
+ ~~~
+
+1. View the cluster's databases, which will include `insecurenodetest`:
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ sql
+ > SHOW DATABASES;
+ ~~~
+
+ ~~~
+ +--------------------+
+ | Database |
+ +--------------------+
+ | crdb_internal |
+ | information_schema |
+ | insecurenodetest |
+ | pg_catalog |
+ | system |
+ +--------------------+
+ (5 rows)
+ ~~~
+
+1. Use `\q` to exit the SQL shell.
diff --git a/src/current/_includes/v25.1/prod-deployment/insecure-test-load-balancing.md b/src/current/_includes/v25.1/prod-deployment/insecure-test-load-balancing.md
new file mode 100644
index 00000000000..aaa2873a2ba
--- /dev/null
+++ b/src/current/_includes/v25.1/prod-deployment/insecure-test-load-balancing.md
@@ -0,0 +1,79 @@
+CockroachDB comes with a number of [built-in workloads]({% link {{ page.version.version }}/cockroach-workload.md %}) for simulating client traffic. This step features CockroachDB's version of the [TPC-C](http://www.tpc.org/tpcc/) workload.
+
+{{site.data.alerts.callout_info}}
+Be sure that you have configured your network to allow traffic from the application to the load balancer. In this case, you will run the sample workload on one of your machines. The traffic source should therefore be the **internal (private)** IP address of that machine.
+{{site.data.alerts.end}}
+
+{{site.data.alerts.callout_success}}
+For comprehensive guidance on benchmarking CockroachDB with TPC-C, see [Performance Benchmarking]({% link {{ page.version.version }}/performance-benchmarking-with-tpcc-local.md %}).
+{{site.data.alerts.end}}
+
+1. SSH to the machine where you want the run the sample TPC-C workload.
+
+ This should be a machine that is not running a CockroachDB node.
+
+1. Download the [CockroachDB archive](https://binaries.cockroachdb.com/cockroach-{{ page.release_info.version }}.linux-amd64.tgz) for Linux, and extract the binary:
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ curl https://binaries.cockroachdb.com/cockroach-{{ page.release_info.version }}.linux-amd64.tgz \
+ | tar -xz
+ ~~~
+
+1. Copy the binary into the `PATH`:
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ cp -i cockroach-{{ page.release_info.version }}.linux-amd64/cockroach /usr/local/bin/
+ ~~~
+
+ If you get a permissions error, prefix the command with `sudo`.
+
+1. Use the [`cockroach workload`]({% link {{ page.version.version }}/cockroach-workload.md %}) command to load the initial schema and data, pointing it at the IP address of the load balancer:
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ cockroach workload init tpcc \
+ 'postgresql://root@:26257/tpcc?sslmode=disable'
+ ~~~
+
+1. Use the `cockroach workload` command to run the workload for 10 minutes:
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ cockroach workload run tpcc \
+ --duration=10m \
+ 'postgresql://root@:26257/tpcc?sslmode=disable'
+ ~~~
+
+ You'll see per-operation statistics print to standard output every second:
+
+ ~~~
+ _elapsed___errors__ops/sec(inst)___ops/sec(cum)__p50(ms)__p95(ms)__p99(ms)_pMax(ms)
+ 1s 0 1443.4 1494.8 4.7 9.4 27.3 67.1 transfer
+ 2s 0 1686.5 1590.9 4.7 8.1 15.2 28.3 transfer
+ 3s 0 1735.7 1639.0 4.7 7.3 11.5 28.3 transfer
+ 4s 0 1542.6 1614.9 5.0 8.9 12.1 21.0 transfer
+ 5s 0 1695.9 1631.1 4.7 7.3 11.5 22.0 transfer
+ 6s 0 1569.2 1620.8 5.0 8.4 11.5 15.7 transfer
+ 7s 0 1614.6 1619.9 4.7 8.1 12.1 16.8 transfer
+ 8s 0 1344.4 1585.6 5.8 10.0 15.2 31.5 transfer
+ 9s 0 1351.9 1559.5 5.8 10.0 16.8 54.5 transfer
+ 10s 0 1514.8 1555.0 5.2 8.1 12.1 16.8 transfer
+ ...
+ ~~~
+
+ After the specified duration (10 minutes in this case), the workload will stop and you'll see totals printed to standard output:
+
+ ~~~
+ _elapsed___errors_____ops(total)___ops/sec(cum)__avg(ms)__p50(ms)__p95(ms)__p99(ms)_pMax(ms)__result
+ 600.0s 0 823902 1373.2 5.8 5.5 10.0 15.2 209.7
+ ~~~
+
+ {{site.data.alerts.callout_success}}
+ For more `tpcc` options, use `cockroach workload run tpcc --help`. For details about other workloads built into the `cockroach` binary, use `cockroach workload --help`.
+ {{site.data.alerts.end}}
+
+1. To monitor the load generator's progress, open the [DB Console]({% link {{ page.version.version }}/ui-overview.md %}) by pointing a browser to the address in the `admin` field in the standard output of any node on startup.
+
+ Since the load generator is pointed at the load balancer, the connections will be evenly distributed across nodes. To verify this, click **Metrics** on the left, select the **SQL** dashboard, and then check the **SQL Connections** graph. You can use the **Graph** menu to filter the graph for specific nodes.
diff --git a/src/current/_includes/v25.1/prod-deployment/insecurecockroachdb.service b/src/current/_includes/v25.1/prod-deployment/insecurecockroachdb.service
new file mode 100644
index 00000000000..54d5ea2047a
--- /dev/null
+++ b/src/current/_includes/v25.1/prod-deployment/insecurecockroachdb.service
@@ -0,0 +1,16 @@
+[Unit]
+Description=Cockroach Database cluster node
+Requires=network.target
+[Service]
+Type=notify
+WorkingDirectory=/var/lib/cockroach
+ExecStart=/usr/local/bin/cockroach start --insecure --advertise-addr= --join=,, --cache=.25 --max-sql-memory=.25
+TimeoutStopSec=300
+Restart=always
+RestartSec=10
+StandardOutput=syslog
+StandardError=syslog
+SyslogIdentifier=cockroach
+User=cockroach
+[Install]
+WantedBy=default.target
diff --git a/src/current/_includes/v25.1/prod-deployment/join-flag-multi-region.md b/src/current/_includes/v25.1/prod-deployment/join-flag-multi-region.md
new file mode 100644
index 00000000000..6c07cf7abe0
--- /dev/null
+++ b/src/current/_includes/v25.1/prod-deployment/join-flag-multi-region.md
@@ -0,0 +1 @@
+When starting a multi-region cluster, set more than one `--join` address per region, and select nodes that are spread across failure domains. This ensures [high availability]({% link {{ page.version.version }}/architecture/replication-layer.md %}#overview).
\ No newline at end of file
diff --git a/src/current/_includes/v25.1/prod-deployment/join-flag-single-region.md b/src/current/_includes/v25.1/prod-deployment/join-flag-single-region.md
new file mode 100644
index 00000000000..99250cdfee9
--- /dev/null
+++ b/src/current/_includes/v25.1/prod-deployment/join-flag-single-region.md
@@ -0,0 +1 @@
+For a cluster in a single region, set 3-5 `--join` addresses. Each starting node will attempt to contact one of the join hosts. In case a join host cannot be reached, the node will try another address on the list until it can join the gossip network.
\ No newline at end of file
diff --git a/src/current/_includes/v25.1/prod-deployment/monitor-cluster.md b/src/current/_includes/v25.1/prod-deployment/monitor-cluster.md
new file mode 100644
index 00000000000..b6c1fcbf609
--- /dev/null
+++ b/src/current/_includes/v25.1/prod-deployment/monitor-cluster.md
@@ -0,0 +1,3 @@
+Despite CockroachDB's various [built-in safeguards against failure]({% link {{ page.version.version }}/frequently-asked-questions.md %}#how-does-cockroachdb-survive-failures), it is critical to actively monitor the overall health and performance of a cluster running in production and to create alerting rules that promptly send notifications when there are events that require investigation or intervention.
+
+For details about available monitoring options and the most important events and metrics to alert on, see [Monitoring and Alerting]({% link {{ page.version.version }}/monitoring-and-alerting.md %}).
diff --git a/src/current/_includes/v25.1/prod-deployment/process-termination.md b/src/current/_includes/v25.1/prod-deployment/process-termination.md
new file mode 100644
index 00000000000..25dcba0fe50
--- /dev/null
+++ b/src/current/_includes/v25.1/prod-deployment/process-termination.md
@@ -0,0 +1,11 @@
+{{site.data.alerts.callout_danger}}
+Cockroach Labs does not recommend terminating the `cockroach` process by sending a `SIGKILL` signal, because it bypasses CockroachDB's [node shutdown logic](#node-shutdown-sequence) and degrades the cluster's health. From the point of view of other cluster nodes, the node will be suddenly unavailable.
+
+- If a decommissioning node is forcibly terminated before decommission completes, [ranges will be under-replicated]({% link {{ page.version.version }}/monitoring-and-alerting.md %}#critical-nodes-endpoint) and the cluster is at risk of [loss of quorum]({% link {{ page.version.version }}/architecture/replication-layer.md %}#overview) if an additional node experiences an outage in the window before up-replication completes.
+- If a draining or decommissioning node is forcibly terminated before the operation completes, it can corrupt log files and, in certain edge cases, can result in temporary data unavailability, latency spikes, [uncertainty errors]({% link {{ page.version.version }}/transaction-retry-error-reference.md %}#readwithinuncertaintyintervalerror), [ambiguous commit errors]({% link {{ page.version.version }}/common-errors.md %}#result-is-ambiguous), or query timeouts.
+
+{{site.data.alerts.end}}
+
+- On production deployments, use the process manager, orchestration system, or other deployment tooling to send `SIGTERM` to the process. For example, with [`systemd`](https://www.freedesktop.org/wiki/Software/systemd/), run `systemctl stop {systemd config filename}`.
+
+- If you run CockroachDB in the foreground for local testing, you can use `ctrl-c` in the terminal to terminate the process.
diff --git a/src/current/_includes/v25.1/prod-deployment/prod-guidance-cache-max-sql-memory.md b/src/current/_includes/v25.1/prod-deployment/prod-guidance-cache-max-sql-memory.md
new file mode 100644
index 00000000000..4cc0a947a21
--- /dev/null
+++ b/src/current/_includes/v25.1/prod-deployment/prod-guidance-cache-max-sql-memory.md
@@ -0,0 +1,12 @@
+{% capture formula %}{% include_cached copy-clipboard.html %}
(2 * --max-sql-memory) + --cache <= 80% of system RAM
+
+{% endcapture %}
+The default value for `--cache` is 128 MiB. For production deployments, set `--cache` to `25%` or higher. To determine appropriate settings for `--cache` and `--max-sql-memory`, use the following formula: {{ formula }}
+
+To help guard against [OOM events]({% link {{ page.version.version }}/cluster-setup-troubleshooting.md %}#out-of-memory-oom-crash), CockroachDB sets a soft memory limit using mechanisms in Go. Depending on your hardware and workload, you may not need to manually tune `--max-sql-memory`.
+
+Test the configuration with a reasonable workload before deploying it to production.
+
+{{site.data.alerts.callout_info}}
+On startup, if CockroachDB detects that `--max-sql-memory` or `--cache` are set too aggressively, a warning is logged.
+{{site.data.alerts.end}}
diff --git a/src/current/_includes/v25.1/prod-deployment/prod-guidance-connection-pooling.md b/src/current/_includes/v25.1/prod-deployment/prod-guidance-connection-pooling.md
new file mode 100644
index 00000000000..17b87a9988b
--- /dev/null
+++ b/src/current/_includes/v25.1/prod-deployment/prod-guidance-connection-pooling.md
@@ -0,0 +1 @@
+The total number of workload connections across all connection pools **should not exceed 4 times the number of vCPUs** in the cluster by a large amount.
\ No newline at end of file
diff --git a/src/current/_includes/v25.1/prod-deployment/prod-guidance-disable-swap.md b/src/current/_includes/v25.1/prod-deployment/prod-guidance-disable-swap.md
new file mode 100644
index 00000000000..f988eb016d4
--- /dev/null
+++ b/src/current/_includes/v25.1/prod-deployment/prod-guidance-disable-swap.md
@@ -0,0 +1 @@
+Disable Linux memory swapping. Over-allocating memory on production machines can lead to unexpected performance issues when pages have to be read back into memory.
\ No newline at end of file
diff --git a/src/current/_includes/v25.1/prod-deployment/prod-guidance-larger-nodes.md b/src/current/_includes/v25.1/prod-deployment/prod-guidance-larger-nodes.md
new file mode 100644
index 00000000000..c165a0130b7
--- /dev/null
+++ b/src/current/_includes/v25.1/prod-deployment/prod-guidance-larger-nodes.md
@@ -0,0 +1 @@
+To optimize for throughput, use larger nodes with up to 32 vCPUs. To further increase throughput, add more nodes to the cluster instead of increasing node size.
\ No newline at end of file
diff --git a/src/current/_includes/v25.1/prod-deployment/prod-guidance-log-volume.md b/src/current/_includes/v25.1/prod-deployment/prod-guidance-log-volume.md
new file mode 100644
index 00000000000..d4699e861f7
--- /dev/null
+++ b/src/current/_includes/v25.1/prod-deployment/prod-guidance-log-volume.md
@@ -0,0 +1 @@
+Store CockroachDB log files in a separate volume from the main data store so that logging is not impacted by I/O throttling.
\ No newline at end of file
diff --git a/src/current/_includes/v25.1/prod-deployment/prod-guidance-lvm.md b/src/current/_includes/v25.1/prod-deployment/prod-guidance-lvm.md
new file mode 100644
index 00000000000..383f2a5d536
--- /dev/null
+++ b/src/current/_includes/v25.1/prod-deployment/prod-guidance-lvm.md
@@ -0,0 +1 @@
+Do not use LVM in the I/O path. Dynamically resizing CockroachDB store volumes can result in significant performance degradation. Using LVM snapshots in lieu of CockroachDB backup and restore is also not supported.
\ No newline at end of file
diff --git a/src/current/_includes/v25.1/prod-deployment/prod-guidance-store-volume.md b/src/current/_includes/v25.1/prod-deployment/prod-guidance-store-volume.md
new file mode 100644
index 00000000000..2f1bcd9cf5a
--- /dev/null
+++ b/src/current/_includes/v25.1/prod-deployment/prod-guidance-store-volume.md
@@ -0,0 +1 @@
+Use dedicated volumes for the CockroachDB store. Do not share the store volume with any other I/O activity.
\ No newline at end of file
diff --git a/src/current/_includes/v25.1/prod-deployment/prod-see-also.md b/src/current/_includes/v25.1/prod-deployment/prod-see-also.md
new file mode 100644
index 00000000000..88d81e565c9
--- /dev/null
+++ b/src/current/_includes/v25.1/prod-deployment/prod-see-also.md
@@ -0,0 +1,7 @@
+- [Production Checklist]({% link {{ page.version.version }}/recommended-production-settings.md %})
+- [Manual Deployment]({% link {{ page.version.version }}/manual-deployment.md %})
+- [Orchestrated Deployment]({% link {{ page.version.version }}/kubernetes-overview.md %})
+- [Monitoring and Alerting]({% link {{ page.version.version }}/monitoring-and-alerting.md %})
+- [Performance Benchmarking]({% link {{ page.version.version }}/performance-benchmarking-with-tpcc-small.md %})
+- [Performance Tuning]({% link {{ page.version.version }}/performance-best-practices-overview.md %})
+- [Local Deployment]({% link {{ page.version.version }}/start-a-local-cluster.md %})
diff --git a/src/current/_includes/v25.1/prod-deployment/provision-cpu.md b/src/current/_includes/v25.1/prod-deployment/provision-cpu.md
new file mode 100644
index 00000000000..48896a432cd
--- /dev/null
+++ b/src/current/_includes/v25.1/prod-deployment/provision-cpu.md
@@ -0,0 +1 @@
+{% if include.threshold == "absolute_minimum" %}**4 vCPUs**{% elsif include.threshold == "minimum" %}**8 vCPUs**{% elsif include.threshold == "maximum" %}**32 vCPUs**{% endif %}
diff --git a/src/current/_includes/v25.1/prod-deployment/provision-disk-io.md b/src/current/_includes/v25.1/prod-deployment/provision-disk-io.md
new file mode 100644
index 00000000000..dadd7113e01
--- /dev/null
+++ b/src/current/_includes/v25.1/prod-deployment/provision-disk-io.md
@@ -0,0 +1 @@
+500 IOPS and 30 MB/s per vCPU
diff --git a/src/current/_includes/v25.1/prod-deployment/provision-memory.md b/src/current/_includes/v25.1/prod-deployment/provision-memory.md
new file mode 100644
index 00000000000..98136337374
--- /dev/null
+++ b/src/current/_includes/v25.1/prod-deployment/provision-memory.md
@@ -0,0 +1 @@
+**4 GiB of RAM per vCPU**
\ No newline at end of file
diff --git a/src/current/_includes/v25.1/prod-deployment/provision-storage.md b/src/current/_includes/v25.1/prod-deployment/provision-storage.md
new file mode 100644
index 00000000000..745ebc8dace
--- /dev/null
+++ b/src/current/_includes/v25.1/prod-deployment/provision-storage.md
@@ -0,0 +1 @@
+320 GiB per vCPU
\ No newline at end of file
diff --git a/src/current/_includes/v25.1/prod-deployment/recommended-instances-aws.md b/src/current/_includes/v25.1/prod-deployment/recommended-instances-aws.md
new file mode 100644
index 00000000000..87d0f53e95c
--- /dev/null
+++ b/src/current/_includes/v25.1/prod-deployment/recommended-instances-aws.md
@@ -0,0 +1,7 @@
+- Use general-purpose [`m6i` or `m6a`](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/general-purpose-instances.html) VMs with SSD-backed [EBS volumes](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-volume-types.html). For example, Cockroach Labs has used `m6i.2xlarge` for performance benchmarking. If your workload requires high throughput, use network-optimized `m5n` instances. To simulate bare-metal deployments, use `m5d` with [SSD Instance Store volumes](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ssd-instance-store.html).
+
+ - `m5` and `m5a` instances, and [compute-optimized `c5`, `c5a`, and `c5n`](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/compute-optimized-instances.html) instances, are also acceptable.
+
+ {{site.data.alerts.callout_danger}}
+ **Do not** use [burstable performance instances](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/burstable-performance-instances.html), which limit the load on a single core.
+ {{site.data.alerts.end}}
diff --git a/src/current/_includes/v25.1/prod-deployment/recommended-instances-azure.md b/src/current/_includes/v25.1/prod-deployment/recommended-instances-azure.md
new file mode 100644
index 00000000000..2712b3542ef
--- /dev/null
+++ b/src/current/_includes/v25.1/prod-deployment/recommended-instances-azure.md
@@ -0,0 +1,7 @@
+- Use general-purpose [Dsv5-series](https://docs.microsoft.com/azure/virtual-machines/dv5-dsv5-series) and [Dasv5-series](https://docs.microsoft.com/azure/virtual-machines/dasv5-dadsv5-series) or memory-optimized [Ev5-series](https://docs.microsoft.com/azure/virtual-machines/ev5-esv5-series) and [Easv5-series](https://docs.microsoft.com/azure/virtual-machines/easv5-eadsv5-series#easv5-series) VMs. For example, Cockroach Labs has used `Standard_D8s_v5`, `Standard_D8as_v5`, `Standard_E8s_v5`, and `Standard_e8as_v5` for performance benchmarking.
+
+ - Compute-optimized [F-series](https://docs.microsoft.com/azure/virtual-machines/fsv2-series) VMs are also acceptable.
+
+ {{site.data.alerts.callout_danger}}
+ Do not use ["burstable" B-series](https://docs.microsoft.com/azure/virtual-machines/linux/b-series-burstable) VMs, which limit the load on CPU resources. Also, Cockroach Labs has experienced data corruption issues on A-series VMs, so we recommend avoiding those as well.
+ {{site.data.alerts.end}}
diff --git a/src/current/_includes/v25.1/prod-deployment/recommended-instances-gcp.md b/src/current/_includes/v25.1/prod-deployment/recommended-instances-gcp.md
new file mode 100644
index 00000000000..6dbe048cd16
--- /dev/null
+++ b/src/current/_includes/v25.1/prod-deployment/recommended-instances-gcp.md
@@ -0,0 +1,5 @@
+- Use general-purpose [`t2d-standard`, `n2-standard`, or `n2d-standard`](https://cloud.google.com/compute/pricing#predefined_machine_types) VMs, or use [custom VMs](https://cloud.google.com/compute/docs/instances/creating-instance-with-custom-machine-type). For example, Cockroach Labs has used `t2d-standard-8`, `n2-standard-8`, and `n2d-standard-8` for performance benchmarking.
+
+ {{site.data.alerts.callout_danger}}
+ Do not use `f1` or `g1` [shared-core machines](https://cloud.google.com/compute/docs/machine-types#sharedcore), which limit the load on CPU resources.
+ {{site.data.alerts.end}}
\ No newline at end of file
diff --git a/src/current/_includes/v25.1/prod-deployment/resolution-excessive-concurrency.md b/src/current/_includes/v25.1/prod-deployment/resolution-excessive-concurrency.md
new file mode 100644
index 00000000000..01d54228e53
--- /dev/null
+++ b/src/current/_includes/v25.1/prod-deployment/resolution-excessive-concurrency.md
@@ -0,0 +1 @@
+To prevent issues with workload concurrency, [provision sufficient CPU]({% link {{ page.version.version }}/recommended-production-settings.md %}#sizing) and use [connection pooling]({% link {{ page.version.version }}/recommended-production-settings.md %}#connection-pooling) for the workload.
\ No newline at end of file
diff --git a/src/current/_includes/v25.1/prod-deployment/resolution-inverted-lsm.md b/src/current/_includes/v25.1/prod-deployment/resolution-inverted-lsm.md
new file mode 100644
index 00000000000..75693d3bb35
--- /dev/null
+++ b/src/current/_includes/v25.1/prod-deployment/resolution-inverted-lsm.md
@@ -0,0 +1 @@
+If compaction has fallen behind and caused an [inverted LSM]({% link {{ page.version.version }}/architecture/storage-layer.md %}#inverted-lsms), throttle your workload concurrency to allow [compaction]({% link {{ page.version.version }}/architecture/storage-layer.md %}#compaction) to catch up and restore a healthy LSM shape. {% include {{ page.version.version }}/prod-deployment/prod-guidance-connection-pooling.md %} If a node is severely impacted, you can [start a new node]({% link {{ page.version.version }}/cockroach-start.md %}) and then [decommission the problematic node](node-shutdown.html?filters=decommission#remove-nodes). {% include {{page.version.version}}/storage/compaction-concurrency.md %}
diff --git a/src/current/_includes/v25.1/prod-deployment/resolution-oom-crash.md b/src/current/_includes/v25.1/prod-deployment/resolution-oom-crash.md
new file mode 100644
index 00000000000..e407039f21c
--- /dev/null
+++ b/src/current/_includes/v25.1/prod-deployment/resolution-oom-crash.md
@@ -0,0 +1 @@
+To prevent OOM crashes, [provision sufficient memory]({% link {{ page.version.version }}/recommended-production-settings.md %}#memory). If all CockroachDB machines are provisioned and configured correctly, either run the CockroachDB process on another node with sufficient memory, or [reduce the memory allocated to CockroachDB]({% link {{ page.version.version }}/recommended-production-settings.md %}#cache-and-sql-memory-size).
\ No newline at end of file
diff --git a/src/current/_includes/v25.1/prod-deployment/resolution-untuned-query.md b/src/current/_includes/v25.1/prod-deployment/resolution-untuned-query.md
new file mode 100644
index 00000000000..3db116e41c1
--- /dev/null
+++ b/src/current/_includes/v25.1/prod-deployment/resolution-untuned-query.md
@@ -0,0 +1 @@
+If you find queries that are consuming too much memory, [cancel the queries]({% link {{ page.version.version }}/manage-long-running-queries.md %}#cancel-long-running-queries) to free up memory usage. For information on optimizing query performance, see [SQL Performance Best Practices]({% link {{ page.version.version }}/performance-best-practices-overview.md %}).
\ No newline at end of file
diff --git a/src/current/_includes/v25.1/prod-deployment/secure-generate-certificates.md b/src/current/_includes/v25.1/prod-deployment/secure-generate-certificates.md
new file mode 100644
index 00000000000..9909968bf8e
--- /dev/null
+++ b/src/current/_includes/v25.1/prod-deployment/secure-generate-certificates.md
@@ -0,0 +1,201 @@
+You can use `cockroach cert` commands, [`openssl` commands]({% link {{ page.version.version }}/create-security-certificates-openssl.md %}), or [Auto TLS cert generation]({% link {{ page.version.version }}/auto-tls.md %}) (alpha) to generate security certificates. This section features the `cockroach cert` commands.
+
+Locally, you'll need to [create the following certificates and keys]({% link {{ page.version.version }}/cockroach-cert.md %}):
+
+- A certificate authority (CA) key pair (`ca.crt` and `ca.key`).
+- A node key pair for each node, issued to its IP addresses and any common names the machine uses, as well as to the IP addresses and common names for machines running load balancers.
+- A client key pair for the `root` user. You'll use this to run a sample workload against the cluster as well as some `cockroach` client commands from your local machine.
+
+{{site.data.alerts.callout_success}}Before beginning, it's useful to collect each of your machine's internal and external IP addresses, as well as any server names you want to issue certificates for.{{site.data.alerts.end}}
+
+1. [Install CockroachDB]({% link {{ page.version.version }}/install-cockroachdb.md %}) on your local machine, if you haven't already.
+
+1. Create two directories:
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ mkdir certs
+ ~~~
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ mkdir my-safe-directory
+ ~~~
+ - `certs`: You'll generate your CA certificate and all node and client certificates and keys in this directory and then upload some of the files to your nodes.
+ - `my-safe-directory`: You'll generate your CA key in this directory and then reference the key when generating node and client certificates. After that, you'll keep the key safe and secret; you will not upload it to your nodes.
+
+1. Create the CA certificate and key:
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ cockroach cert create-ca \
+ --certs-dir=certs \
+ --ca-key=my-safe-directory/ca.key
+ ~~~
+
+1. Create the certificate and key for the first node, issued to all common names you might use to refer to the node as well as to the load balancer instances:
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ cockroach cert create-node \
+ \
+ \
+ \
+ \
+ localhost \
+ 127.0.0.1 \
+ \
+ \
+ \
+ --certs-dir=certs \
+ --ca-key=my-safe-directory/ca.key
+ ~~~
+
+1. Upload the CA certificate and node certificate and key to the first node:
+
+ {% if page.title contains "Google" %}
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ gcloud compute ssh \
+ --project \
+ --command "mkdir certs"
+ ~~~
+
+ {{site.data.alerts.callout_info}}
+ `gcloud compute ssh` associates your public SSH key with the GCP project and is only needed when connecting to the first node. See the [GCP docs](https://cloud.google.com/sdk/gcloud/reference/compute/ssh) for more details.
+ {{site.data.alerts.end}}
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ scp certs/ca.crt \
+ certs/node.crt \
+ certs/node.key \
+ @:~/certs
+ ~~~
+
+ {% elsif page.title contains "AWS" %}
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ ssh-add /path/.pem
+ ~~~
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ ssh @ "mkdir certs"
+ ~~~
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ scp certs/ca.crt \
+ certs/node.crt \
+ certs/node.key \
+ @:~/certs
+ ~~~
+
+ {% else %}
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ ssh @ "mkdir certs"
+ ~~~
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ scp certs/ca.crt \
+ certs/node.crt \
+ certs/node.key \
+ @:~/certs
+ ~~~
+ {% endif %}
+
+1. Delete the local copy of the node certificate and key:
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ rm certs/node.crt certs/node.key
+ ~~~
+
+ {{site.data.alerts.callout_info}}
+ This is necessary because the certificates and keys for additional nodes will also be named `node.crt` and `node.key`. As an alternative to deleting these files, you can run the next `cockroach cert create-node` commands with the `--overwrite` flag.
+ {{site.data.alerts.end}}
+
+1. Create the certificate and key for the second node, issued to all common names you might use to refer to the node as well as to the load balancer instances:
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ cockroach cert create-node \
+ \
+ \
+ \
+ \
+ localhost \
+ 127.0.0.1 \
+ \
+ \
+ \
+ --certs-dir=certs \
+ --ca-key=my-safe-directory/ca.key
+ ~~~
+
+1. Upload the CA certificate and node certificate and key to the second node:
+
+ {% if page.title contains "AWS" %}
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ ssh @ "mkdir certs"
+ ~~~
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ scp certs/ca.crt \
+ certs/node.crt \
+ certs/node.key \
+ @:~/certs
+ ~~~
+
+ {% else %}
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ ssh @ "mkdir certs"
+ ~~~
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ scp certs/ca.crt \
+ certs/node.crt \
+ certs/node.key \
+ @:~/certs
+ ~~~
+ {% endif %}
+
+1. Repeat steps 6 - 8 for each additional node.
+
+1. Create a client certificate and key for the `root` user:
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ cockroach cert create-client \
+ root \
+ --certs-dir=certs \
+ --ca-key=my-safe-directory/ca.key
+ ~~~
+
+1. Upload the CA certificate and client certificate and key to the machine where you will run a sample workload:
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ ssh @ "mkdir certs"
+ ~~~
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ scp certs/ca.crt \
+ certs/client.root.crt \
+ certs/client.root.key \
+ @:~/certs
+ ~~~
+
+ In later steps, you'll also use the `root` user's certificate to run [`cockroach`]({% link {{ page.version.version }}/cockroach-commands.md %}) client commands from your local machine. If you might also want to run `cockroach` client commands directly on a node (e.g., for local debugging), you'll need to copy the `root` user's certificate and key to that node as well.
+
+{{site.data.alerts.callout_info}}
+On accessing the DB Console in a later step, your browser will consider the CockroachDB-created certificate invalid and you’ll need to click through a warning message to get to the UI. You can avoid this issue by [using a certificate issued by a public CA]({% link {{ page.version.version }}/create-security-certificates-custom-ca.md %}#accessing-the-db-console-for-a-secure-cluster).
+{{site.data.alerts.end}}
diff --git a/src/current/_includes/v25.1/prod-deployment/secure-initialize-cluster.md b/src/current/_includes/v25.1/prod-deployment/secure-initialize-cluster.md
new file mode 100644
index 00000000000..5efa831e6f7
--- /dev/null
+++ b/src/current/_includes/v25.1/prod-deployment/secure-initialize-cluster.md
@@ -0,0 +1,8 @@
+On your local machine, run the [`cockroach init`]({% link {{ page.version.version }}/cockroach-init.md %}) command to complete the node startup process and have them join together as a cluster:
+
+{% include_cached copy-clipboard.html %}
+~~~ shell
+$ cockroach init --certs-dir=certs --host=
+~~~
+
+After running this command, each node prints helpful details to the [standard output]({% link {{ page.version.version }}/cockroach-start.md %}#standard-output), such as the CockroachDB version, the URL for the DB Console, and the SQL URL for clients.
diff --git a/src/current/_includes/v25.1/prod-deployment/secure-recommendations.md b/src/current/_includes/v25.1/prod-deployment/secure-recommendations.md
new file mode 100644
index 00000000000..528850dbbb0
--- /dev/null
+++ b/src/current/_includes/v25.1/prod-deployment/secure-recommendations.md
@@ -0,0 +1,7 @@
+- Decide how you want to access your DB Console:
+
+ Access Level | Description
+ -------------|------------
+ Partially open | Set a firewall rule to allow only specific IP addresses to communicate on port `8080`.
+ Completely open | Set a firewall rule to allow all IP addresses to communicate on port `8080`.
+ Completely closed | Set a firewall rule to disallow all communication on port `8080`. In this case, a machine with SSH access to a node could use an SSH tunnel to access the DB Console.
diff --git a/src/current/_includes/v25.1/prod-deployment/secure-requirements.md b/src/current/_includes/v25.1/prod-deployment/secure-requirements.md
new file mode 100644
index 00000000000..f27496dd612
--- /dev/null
+++ b/src/current/_includes/v25.1/prod-deployment/secure-requirements.md
@@ -0,0 +1,11 @@
+- You must have [CockroachDB installed]({% link {{ page.version.version }}/install-cockroachdb.md %}) locally. This is necessary for generating and managing your deployment's certificates.
+
+- You must have [SSH access]({{page.ssh-link}}) to each machine. This is necessary for distributing and starting CockroachDB binaries.
+
+- Your network configuration must allow TCP communication on the following ports:
+ - `26257` for intra-cluster and client-cluster communication
+ - `8080` to expose your DB Console
+
+- Carefully review the [Production Checklist]({% link {{ page.version.version }}/recommended-production-settings.md %}), including supported hardware and software, and the recommended [Topology Patterns]({% link {{ page.version.version }}/topology-patterns.md %}).
+
+{% include {{ page.version.version }}/prod-deployment/topology-recommendations.md %}
\ No newline at end of file
diff --git a/src/current/_includes/v25.1/prod-deployment/secure-scale-cluster.md b/src/current/_includes/v25.1/prod-deployment/secure-scale-cluster.md
new file mode 100644
index 00000000000..8c980b018a3
--- /dev/null
+++ b/src/current/_includes/v25.1/prod-deployment/secure-scale-cluster.md
@@ -0,0 +1,123 @@
+You can start the nodes manually or automate the process using [systemd](https://www.freedesktop.org/wiki/Software/systemd/).
+
+
+
+
+
+
+
+
+
+For each additional node you want to add to the cluster, complete the following steps:
+
+1. SSH to the machine where you want the node to run.
+
+1. Download the [CockroachDB archive](https://binaries.cockroachdb.com/cockroach-{{ page.release_info.version }}.linux-amd64.tgz) for Linux, and extract the binary:
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ curl https://binaries.cockroachdb.com/cockroach-{{ page.release_info.version }}.linux-amd64.tgz \
+ | tar -xz
+ ~~~
+
+1. Copy the binary into the `PATH`:
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ cp -i cockroach-{{ page.release_info.version }}.linux-amd64/cockroach /usr/local/bin/
+ ~~~
+
+ If you get a permissions error, prefix the command with `sudo`.
+
+1. Run the [`cockroach start`]({% link {{ page.version.version }}/cockroach-start.md %}) command, passing the new node's address as the `--advertise-addr` flag and pointing `--join` to the three existing nodes (also include `--locality` if you set it earlier).
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ cockroach start \
+ --certs-dir=certs \
+ --advertise-addr= \
+ --join=,, \
+ --cache=.25 \
+ --max-sql-memory=.25 \
+ --background
+ ~~~
+
+1. Update your load balancer to recognize the new node.
+
+
+
+
+
+For each additional node you want to add to the cluster, complete the following steps:
+
+1. SSH to the machine where you want the node to run. Ensure you are logged in as the `root` user.
+
+1. Download the [CockroachDB archive](https://binaries.cockroachdb.com/cockroach-{{ page.release_info.version }}.linux-amd64.tgz) for Linux, and extract the binary:
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ curl -o cockroach-{{ page.release_info.version }}.linux-amd64.tgz https://binaries.cockroachdb.com/cockroach-{{ page.release_info.version }}.linux-amd64.tgz; tar xzvf cockroach-{{ page.release_info.version }}.linux-amd64.tgz
+ ~~~
+
+1. Copy the binary into the `PATH`:
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ cp -i cockroach-{{ page.release_info.version }}.linux-amd64/cockroach /usr/local/bin/
+ ~~~
+
+ If you get a permissions error, prefix the command with `sudo`.
+
+1. Create the Cockroach directory:
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ mkdir /var/lib/cockroach
+ ~~~
+
+1. Create a Unix user named `cockroach`:
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ useradd cockroach
+ ~~~
+
+1. Move the `certs` directory to the `cockroach` directory.
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ mv certs /var/lib/cockroach/
+ ~~~
+
+1. Change the ownership of the `cockroach` directory to the user `cockroach`:
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ chown -R cockroach /var/lib/cockroach
+ ~~~
+
+1. Download the [sample configuration template](https://raw.githubusercontent.com/cockroachdb/docs/master/_includes/{{ page.version.version }}/prod-deployment/securecockroachdb.service):
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ curl -o securecockroachdb.service https://raw.githubusercontent.com/cockroachdb/docs/master/_includes/{{ page.version.version }}/prod-deployment/securecockroachdb.service
+ ~~~
+
+ Alternatively, you can create the file yourself and copy the script into it:
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ {% include {{ page.version.version }}/prod-deployment/securecockroachdb.service %}
+ ~~~
+
+ Save the file in the `/etc/systemd/system/` directory.
+
+1. Customize the sample configuration template for your deployment:
+
+ Specify values for the following flags in the sample configuration template:
+
+ {% include {{ page.version.version }}/prod-deployment/advertise-addr-join.md %}
+
+1. Repeat these steps for each additional node that you want in your cluster.
+
+
diff --git a/src/current/_includes/v25.1/prod-deployment/secure-start-nodes.md b/src/current/_includes/v25.1/prod-deployment/secure-start-nodes.md
new file mode 100644
index 00000000000..f3c554f4f19
--- /dev/null
+++ b/src/current/_includes/v25.1/prod-deployment/secure-start-nodes.md
@@ -0,0 +1,168 @@
+You can start the nodes manually or automate the process using [systemd](https://www.freedesktop.org/wiki/Software/systemd/).
+
+
+
+
+
+
+
+
+
+For each initial node of your cluster, complete the following steps:
+
+{{site.data.alerts.callout_info}}
+After completing these steps, nodes will not yet be live. They will complete the startup process and join together to form a cluster as soon as the cluster is initialized in the next step.
+{{site.data.alerts.end}}
+
+1. Visit [Releases]({% link releases/index.md %}) and download the full binary of CockroachDB to the node.
+
+1. On the node, follow the instructions to [install CockroachDB]({% link {{ page.version.version }}/install-cockroachdb.md %}).
+
+1. Run the [`cockroach start`]({% link {{ page.version.version }}/cockroach-start.md %}) command:
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ cockroach start \
+ --certs-dir=certs \
+ --advertise-addr= \
+ --join=,, \
+ --cache=.25 \
+ --max-sql-memory=.25 \
+ --background
+ ~~~
+
+ This command primes the node to start, using the following flags:
+
+ Flag | Description
+ -----|------------
+ `--certs-dir` | Specifies the directory where you placed the `ca.crt` file and the `node.crt` and `node.key` files for the node.
+ `--advertise-addr` | Specifies the IP address/hostname and port to tell other nodes to use. The port number can be omitted, in which case it defaults to `26257`.
This value must route to an IP address the node is listening on (with `--listen-addr` unspecified, the node listens on all IP addresses).
In some networking scenarios, you may need to use `--advertise-addr` and/or `--listen-addr` differently. For more details, see [Networking]({% link {{ page.version.version }}/recommended-production-settings.md %}#networking).
+ `--join` | Identifies the address of 3-5 of the initial nodes of the cluster. These addresses should match the addresses that the target nodes are advertising.
+ `--cache` `--max-sql-memory` | Increases the node's cache size to 25% of available system memory to improve read performance. The capacity for in-memory SQL processing defaults to 25% of system memory but can be raised, if necessary, to increase the number of simultaneous client connections allowed by the node as well as the node's capacity for in-memory processing of rows when using `ORDER BY`, `GROUP BY`, `DISTINCT`, joins, and window functions. For more details, see [Cache and SQL Memory Size]({% link {{ page.version.version }}/recommended-production-settings.md %}#cache-and-sql-memory-size).
+ `--background` | Starts the node in the background so you gain control of the terminal to issue more commands.
+
+ When deploying across multiple datacenters, or when there is otherwise high latency between nodes, it is recommended to set `--locality` as well. For more details, see [Locality]({% link {{ page.version.version }}/cockroach-start.md %}#locality).
+
+ For other flags not explicitly set, the command uses default values. For example, the node stores data in `--store=cockroach-data` and binds DB Console HTTP requests to `--http-addr=:8080`. To set these options manually, see [Start a Node]({% link {{ page.version.version }}/cockroach-start.md %}).
+
+Repeat these steps for each additional node that you want in your cluster.
+
+
+
+
+
+For each initial node of your cluster, complete the following steps:
+
+{{site.data.alerts.callout_info}}
+After completing these steps, nodes will not yet be live. They will complete the startup process and join together to form a cluster as soon as the cluster is initialized in the next step.
+{{site.data.alerts.end}}
+
+1. SSH to the machine where you want the node to run. Ensure you are logged in as the `root` user.
+
+1. Download the [CockroachDB archive](https://binaries.cockroachdb.com/cockroach-{{ page.release_info.version }}.linux-amd64.tgz) for Linux, and extract the binary:
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ curl https://binaries.cockroachdb.com/cockroach-{{ page.release_info.version }}.linux-amd64.tgz \
+ | tar -xz
+ ~~~
+
+1. Copy the binary into the `PATH`:
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ cp -i cockroach-{{ page.release_info.version }}.linux-amd64/cockroach /usr/local/bin/
+ ~~~
+
+ If you get a permissions error, prefix the command with `sudo`.
+
+1. CockroachDB uses custom-built versions of the [GEOS]({% link {{ page.version.version }}/architecture/glossary.md %}#geos) libraries. Copy these libraries to the location where CockroachDB expects to find them:
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ mkdir -p /usr/local/lib/cockroach
+ ~~~
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ cp -i cockroach-{{ page.release_info.version }}.linux-amd64/lib/libgeos.so /usr/local/lib/cockroach/
+ ~~~
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ cp -i cockroach-{{ page.release_info.version }}.linux-amd64/lib/libgeos_c.so /usr/local/lib/cockroach/
+ ~~~
+
+ If you get a permissions error, prefix the command with `sudo`.
+
+1. Create the Cockroach directory:
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ mkdir /var/lib/cockroach
+ ~~~
+
+1. Create a Unix user named `cockroach`:
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ useradd cockroach
+ ~~~
+
+1. Move the `certs` directory to the `cockroach` directory.
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ mv certs /var/lib/cockroach/
+ ~~~
+
+1. Change the ownership of the `cockroach` directory to the user `cockroach`:
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ chown -R cockroach /var/lib/cockroach
+ ~~~
+
+1. Download the [sample configuration template](https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/_includes/{{ page.version.version }}/prod-deployment/securecockroachdb.service) and save the file in the `/etc/systemd/system/` directory:
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ curl -o securecockroachdb.service https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/_includes/v23.2/prod-deployment/securecockroachdb.service
+ ~~~
+
+ Alternatively, you can create the file yourself and copy the script into it:
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ {% include {{ page.version.version }}/prod-deployment/securecockroachdb.service %}
+ ~~~
+
+1. In the sample configuration template, specify values for the following flags:
+
+ {% include {{ page.version.version }}/prod-deployment/advertise-addr-join.md %}
+
+ When deploying across multiple datacenters, or when there is otherwise high latency between nodes, it is recommended to set `--locality` as well. For more details, see [Locality]({% link {{ page.version.version }}/cockroach-start.md %}#locality).
+
+ For other flags not explicitly set, the command uses default values. For example, the node stores data in `--store=cockroach-data` and binds DB Console HTTP requests to `--http-addr=localhost:8080`. To set these options manually, see [Start a Node]({% link {{ page.version.version }}/cockroach-start.md %}).
+
+1. Start the CockroachDB cluster:
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ systemctl start securecockroachdb
+ ~~~
+
+1. Configure `systemd` to start CockroachDB automatically after a reboot:
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ systemctl enable securecockroachdb
+ ~~~
+
+1. Repeat these steps for each additional node that you want in your cluster.
+
+{{site.data.alerts.callout_info}}
+`systemd` handles node restarts in case of node failure. To stop a node without `systemd` restarting it, run `systemctl stop securecockroachdb`
+{{site.data.alerts.end}}
+
+
diff --git a/src/current/_includes/v25.1/prod-deployment/secure-test-cluster.md b/src/current/_includes/v25.1/prod-deployment/secure-test-cluster.md
new file mode 100644
index 00000000000..2eef1f9ef4f
--- /dev/null
+++ b/src/current/_includes/v25.1/prod-deployment/secure-test-cluster.md
@@ -0,0 +1,41 @@
+CockroachDB replicates and distributes data behind-the-scenes and uses a [Gossip protocol](https://wikipedia.org/wiki/Gossip_protocol) to enable each node to locate data across the cluster. Once a cluster is live, any node can be used as a SQL gateway.
+
+When using a load balancer, you should issue commands directly to the load balancer, which then routes traffic to the nodes.
+
+Use the [built-in SQL client]({% link {{ page.version.version }}/cockroach-sql.md %}) locally as follows:
+
+1. On your local machine, launch the built-in SQL client, with the `--host` flag set to the address of the load balancer:
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ cockroach sql --certs-dir=certs --host=
+ ~~~
+
+1. Create a `securenodetest` database:
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ sql
+ > CREATE DATABASE securenodetest;
+ ~~~
+
+1. View the cluster's databases, which will include `securenodetest`:
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ sql
+ > SHOW DATABASES;
+ ~~~
+
+ ~~~
+ +--------------------+
+ | Database |
+ +--------------------+
+ | crdb_internal |
+ | information_schema |
+ | securenodetest |
+ | pg_catalog |
+ | system |
+ +--------------------+
+ (5 rows)
+ ~~~
+
+1. Use `\q` to exit the SQL shell.
\ No newline at end of file
diff --git a/src/current/_includes/v25.1/prod-deployment/secure-test-load-balancing.md b/src/current/_includes/v25.1/prod-deployment/secure-test-load-balancing.md
new file mode 100644
index 00000000000..ba1ecd90919
--- /dev/null
+++ b/src/current/_includes/v25.1/prod-deployment/secure-test-load-balancing.md
@@ -0,0 +1,77 @@
+CockroachDB comes with a number of [built-in workloads]({% link {{ page.version.version }}/cockroach-workload.md %}) for simulating client traffic. This step features CockroachDB's version of the [TPC-C](http://www.tpc.org/tpcc/) workload.
+
+{{site.data.alerts.callout_info}}
+Be sure that you have configured your network to allow traffic from the application to the load balancer. In this case, you will run the sample workload on one of your machines. The traffic source should therefore be the **internal (private)** IP address of that machine.
+{{site.data.alerts.end}}
+
+For comprehensive guidance on benchmarking CockroachDB with TPC-C, refer to [Performance Benchmarking]({% link {{ page.version.version }}/performance-benchmarking-with-tpcc-local.md %}).
+
+1. SSH to the machine where you want to run the sample TPC-C workload.
+
+ This should be a machine that is not running a CockroachDB node, and it should already have a `certs` directory containing `ca.crt`, `client.root.crt`, and `client.root.key` files.
+
+1. Download the [CockroachDB archive](https://binaries.cockroachdb.com/cockroach-{{ page.release_info.version }}.linux-amd64.tgz) for Linux, and extract the binary:
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ curl https://binaries.cockroachdb.com/cockroach-{{ page.release_info.version }}.linux-amd64.tgz \
+ | tar -xz
+ ~~~
+
+1. Copy the binary into the `PATH`:
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ cp -i cockroach-{{ page.release_info.version }}.linux-amd64/cockroach /usr/local/bin/
+ ~~~
+
+ If you get a permissions error, prefix the command with `sudo`.
+
+1. Use the [`cockroach workload`]({% link {{ page.version.version }}/cockroach-workload.md %}) command to load the initial schema and data, pointing it at the IP address of the load balancer:
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ cockroach workload init tpcc \
+ 'postgresql://root@:26257/tpcc?sslmode=verify-full&sslrootcert=certs/ca.crt&sslcert=certs/client.root.crt&sslkey=certs/client.root.key'
+ ~~~
+
+1. Use the `cockroach workload` command to run the workload for 10 minutes:
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ cockroach workload run tpcc \
+ --duration=10m \
+ 'postgresql://root@:26257/tpcc?sslmode=verify-full&sslrootcert=certs/ca.crt&sslcert=certs/client.root.crt&sslkey=certs/client.root.key'
+ ~~~
+
+ You'll see per-operation statistics print to standard output every second:
+
+ ~~~
+ _elapsed___errors__ops/sec(inst)___ops/sec(cum)__p50(ms)__p95(ms)__p99(ms)_pMax(ms)
+ 1s 0 1443.4 1494.8 4.7 9.4 27.3 67.1 transfer
+ 2s 0 1686.5 1590.9 4.7 8.1 15.2 28.3 transfer
+ 3s 0 1735.7 1639.0 4.7 7.3 11.5 28.3 transfer
+ 4s 0 1542.6 1614.9 5.0 8.9 12.1 21.0 transfer
+ 5s 0 1695.9 1631.1 4.7 7.3 11.5 22.0 transfer
+ 6s 0 1569.2 1620.8 5.0 8.4 11.5 15.7 transfer
+ 7s 0 1614.6 1619.9 4.7 8.1 12.1 16.8 transfer
+ 8s 0 1344.4 1585.6 5.8 10.0 15.2 31.5 transfer
+ 9s 0 1351.9 1559.5 5.8 10.0 16.8 54.5 transfer
+ 10s 0 1514.8 1555.0 5.2 8.1 12.1 16.8 transfer
+ ...
+ ~~~
+
+ After the specified duration (10 minutes in this case), the workload will stop and you'll see totals printed to standard output:
+
+ ~~~
+ _elapsed___errors_____ops(total)___ops/sec(cum)__avg(ms)__p50(ms)__p95(ms)__p99(ms)_pMax(ms)__result
+ 600.0s 0 823902 1373.2 5.8 5.5 10.0 15.2 209.7
+ ~~~
+
+ {{site.data.alerts.callout_success}}
+ For more `tpcc` options, use `cockroach workload run tpcc --help`. For details about other workloads built into the `cockroach` binary, use `cockroach workload --help`.
+ {{site.data.alerts.end}}
+
+1. To monitor the load generator's progress, open the [DB Console]({% link {{ page.version.version }}/ui-overview.md %}) by pointing a browser to the address in the `admin` field in the standard output of any node on startup.
+
+ Since the load generator is pointed at the load balancer, the connections will be evenly distributed across nodes. To verify this, click **Metrics** on the left, select the **SQL** dashboard, and then check the **SQL Connections** graph. You can use the **Graph** menu to filter the graph for specific nodes.
diff --git a/src/current/_includes/v25.1/prod-deployment/securecockroachdb.service b/src/current/_includes/v25.1/prod-deployment/securecockroachdb.service
new file mode 100644
index 00000000000..13658ae4cce
--- /dev/null
+++ b/src/current/_includes/v25.1/prod-deployment/securecockroachdb.service
@@ -0,0 +1,16 @@
+[Unit]
+Description=Cockroach Database cluster node
+Requires=network.target
+[Service]
+Type=notify
+WorkingDirectory=/var/lib/cockroach
+ExecStart=/usr/local/bin/cockroach start --certs-dir=certs --advertise-addr= --join=,, --cache=.25 --max-sql-memory=.25
+TimeoutStopSec=300
+Restart=always
+RestartSec=10
+StandardOutput=syslog
+StandardError=syslog
+SyslogIdentifier=cockroach
+User=cockroach
+[Install]
+WantedBy=default.target
diff --git a/src/current/_includes/v25.1/prod-deployment/synchronize-clocks.md b/src/current/_includes/v25.1/prod-deployment/synchronize-clocks.md
new file mode 100644
index 00000000000..b120a3a735b
--- /dev/null
+++ b/src/current/_includes/v25.1/prod-deployment/synchronize-clocks.md
@@ -0,0 +1,179 @@
+CockroachDB requires moderate levels of [clock synchronization]({% link {{ page.version.version }}/recommended-production-settings.md %}#clock-synchronization) to preserve data consistency. For this reason, when a node detects that its clock is out of sync with at least half of the other nodes in the cluster by 80% of the maximum offset allowed (500ms by default), it spontaneously shuts down. This avoids the risk of consistency anomalies, but it's best to prevent clocks from drifting too far in the first place by running clock synchronization software on each node.
+
+{% if page.title contains "Digital Ocean" or page.title contains "On-Premises" %}
+
+[`ntpd`](http://doc.ntp.org/) should keep offsets in the single-digit milliseconds, so that software is featured here, but other methods of clock synchronization are suitable as well.
+
+1. SSH to the first machine.
+
+1. Disable `timesyncd`, which tends to be active by default on some Linux distributions:
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ sudo timedatectl set-ntp no
+ ~~~
+
+ Verify that `timesyncd` is off:
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ timedatectl
+ ~~~
+
+ Look for `Network time on: no` or `NTP enabled: no` in the output.
+
+1. Install the `ntp` package:
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ sudo apt-get install ntp
+ ~~~
+
+1. Stop the NTP daemon:
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ sudo service ntp stop
+ ~~~
+
+1. Sync the machine's clock with Google's NTP service:
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ sudo ntpd -b time.google.com
+ ~~~
+
+ To make this change permanent, in the `/etc/ntp.conf` file, remove or comment out any lines starting with `server` or `pool` and add the following lines:
+
+ {% include_cached copy-clipboard.html %}
+ ~~~
+ server time1.google.com iburst
+ server time2.google.com iburst
+ server time3.google.com iburst
+ server time4.google.com iburst
+ ~~~
+
+ Restart the NTP daemon:
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ sudo service ntp start
+ ~~~
+
+ {{site.data.alerts.callout_info}}
+ We recommend Google's NTP service because it handles ["smearing" the leap second](https://developers.google.com/time/smear). If you use a different NTP service that doesn't smear the leap second, be sure to configure client-side smearing in the same way on each machine. See the [Production Checklist]({% link {{ page.version.version }}/recommended-production-settings.md %}#considerations) for details.
+ {{site.data.alerts.end}}
+
+1. Verify that the machine is using a Google NTP server:
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ sudo ntpq -p
+ ~~~
+
+ The active NTP server will be marked with an asterisk.
+
+1. Repeat these steps for each machine where a CockroachDB node will run.
+
+{% elsif page.title contains "Google" %}
+
+Compute Engine instances are preconfigured to use [NTP](http://www.ntp.org/), which should keep offsets in the single-digit milliseconds. However, Google can’t predict how external NTP services, such as `pool.ntp.org`, will handle the leap second. Therefore, you should:
+
+- [Configure each GCE instance to use Google's internal NTP service](https://cloud.google.com/compute/docs/instances/configure-ntp#configure_ntp_for_your_instances).
+- If you plan to run a hybrid cluster across GCE and other cloud providers or environments, note that all of the nodes must be synced to the same time source, or to different sources that implement leap second smearing in the same way. See the [Production Checklist]({% link {{ page.version.version }}/recommended-production-settings.md %}#considerations) for details.
+
+{% elsif page.title contains "AWS" %}
+
+Amazon provides the [Amazon Time Sync Service](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/set-time.html), which uses a fleet of satellite-connected and atomic reference clocks in each AWS Region to deliver accurate current time readings. The service also smears the leap second.
+
+- [Configure each AWS instance to use the internal Amazon Time Sync Service](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/set-time.html#configure-amazon-time-service).
+ - Per the above instructions, ensure that `etc/chrony.conf` on the instance contains the line `server 169.254.169.123 prefer iburst minpoll 4 maxpoll 4` and that other `server` or `pool` lines are commented out.
+ - To verify that Amazon Time Sync Service is being used, run `chronyc sources -v` and check for a line containing `* 169.254.169.123`. The `*` denotes the preferred time server.
+- If you plan to run a hybrid cluster across GCE and other cloud providers or environments, note that all of the nodes must be synced to the same time source, or to different sources that implement leap second smearing in the same way. See the [Production Checklist]({% link {{ page.version.version }}/recommended-production-settings.md %}#considerations) for details.
+
+{% elsif page.title contains "Azure" %}
+
+[`ntpd`](http://doc.ntp.org/) should keep offsets in the single-digit milliseconds, so that software is featured here. However, to run `ntpd` properly on Azure VMs, it's necessary to first unbind the Time Synchronization device used by the Hyper-V technology running Azure VMs; this device aims to synchronize time between the VM and its host operating system but has been known to cause problems.
+
+1. SSH to the first machine.
+
+1. Find the ID of the Hyper-V Time Synchronization device:
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ curl -O https://raw.githubusercontent.com/torvalds/linux/master/tools/hv/lsvmbus
+ ~~~
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ python lsvmbus -vv | grep -w "Time Synchronization" -A 3
+ ~~~
+
+ ~~~
+ VMBUS ID 12: Class_ID = {9527e630-d0ae-497b-adce-e80ab0175caf} - [Time Synchronization]
+ Device_ID = {2dd1ce17-079e-403c-b352-a1921ee207ee}
+ Sysfs path: /sys/bus/vmbus/devices/2dd1ce17-079e-403c-b352-a1921ee207ee
+ Rel_ID=12, target_cpu=0
+ ~~~
+
+1. Unbind the device, using the `Device_ID` from the previous command's output:
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ echo | sudo tee /sys/bus/vmbus/drivers/hv_utils/unbind
+ ~~~
+
+1. Install the `ntp` package:
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ sudo apt-get install ntp
+ ~~~
+
+1. Stop the NTP daemon:
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ sudo service ntp stop
+ ~~~
+
+1. Sync the machine's clock with Google's NTP service:
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ sudo ntpd -b time.google.com
+ ~~~
+
+ To make this change permanent, in the `/etc/ntp.conf` file, remove or comment out any lines starting with `server` or `pool` and add the following lines:
+
+ {% include_cached copy-clipboard.html %}
+ ~~~
+ server time1.google.com iburst
+ server time2.google.com iburst
+ server time3.google.com iburst
+ server time4.google.com iburst
+ ~~~
+
+ Restart the NTP daemon:
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ sudo service ntp start
+ ~~~
+
+ {{site.data.alerts.callout_info}}
+ We recommend Google's NTP service because it handles ["smearing" the leap second](https://developers.google.com/time/smear). If you use a different NTP service that doesn't smear the leap second, be sure to configure client-side smearing in the same way on each machine. See the [Production Checklist]({% link {{ page.version.version }}/recommended-production-settings.md %}#considerations) for details.
+ {{site.data.alerts.end}}
+
+1. Verify that the machine is using a Google NTP server:
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ sudo ntpq -p
+ ~~~
+
+ The active NTP server will be marked with an asterisk.
+
+1. Repeat these steps for each machine where a CockroachDB node will run.
+
+{% endif %}
diff --git a/src/current/_includes/v25.1/prod-deployment/terminology-vcpu.md b/src/current/_includes/v25.1/prod-deployment/terminology-vcpu.md
new file mode 100644
index 00000000000..790ce37a2b9
--- /dev/null
+++ b/src/current/_includes/v25.1/prod-deployment/terminology-vcpu.md
@@ -0,0 +1,3 @@
+{{site.data.alerts.callout_info}}
+In our sizing and production guidance, 1 vCPU is considered equivalent to 1 core in the underlying hardware platform.
+{{site.data.alerts.end}}
\ No newline at end of file
diff --git a/src/current/_includes/v25.1/prod-deployment/topology-recommendations.md b/src/current/_includes/v25.1/prod-deployment/topology-recommendations.md
new file mode 100644
index 00000000000..b6cdfdb7510
--- /dev/null
+++ b/src/current/_includes/v25.1/prod-deployment/topology-recommendations.md
@@ -0,0 +1,26 @@
+- Do not run multiple node processes on the same VM or machine. This defeats CockroachDB's replication and causes the system to be a single point of failure. Instead, start each node on a separate VM or machine.
+- To start a node with multiple disks or SSDs, you can use either of these approaches:
+ - Configure the disks or SSDs as a single RAID volume, then pass the RAID volume to the `--store` flag when starting the `cockroach` process on the node.
+ - Provide a separate `--store` flag for each disk when starting the `cockroach` process on the node. For more details about stores, see [Start a Node]({% link {{ page.version.version }}/cockroach-start.md %}#store).
+
+ {{site.data.alerts.callout_danger}}
+ If you start a node with multiple `--store` flags, it is not possible to scale back down to only using a single store on the node. Instead, you must decommission the node and start a new node with the updated `--store`.
+ {{site.data.alerts.end}}
+
+- When starting each node, use the [`--locality`]({% link {{ page.version.version }}/cockroach-start.md %}#locality) flag to describe the node's location, for example, `--locality=region=west,zone=us-west-1`. The key-value pairs should be ordered from most to least inclusive, and the keys and order of key-value pairs must be the same on all nodes.
+
+- When deploying in a single availability zone:
+
+ - To be able to tolerate the failure of any 1 node, use at least 3 nodes with the [`default` 3-way replication factor]({% link {{ page.version.version }}/configure-replication-zones.md %}#view-the-default-replication-zone). In this case, if 1 node fails, each range retains 2 of its 3 replicas, a majority.
+
+ - To be able to tolerate 2 simultaneous node failures, use at least 5 nodes and [increase the `default` replication factor for user data]({% link {{ page.version.version }}/configure-replication-zones.md %}#edit-the-default-replication-zone) to 5. The replication factor for [important internal data]({% link {{ page.version.version }}/configure-replication-zones.md %}#create-a-replication-zone-for-a-system-range) is 5 by default, so no adjustments are needed for internal data. In this case, if 2 nodes fail at the same time, each range retains 3 of its 5 replicas, a majority.
+
+- When deploying across multiple availability zones:
+
+ - To be able to tolerate the failure of 1 entire AZ in a region, use at least 3 AZs per region and set `--locality` on each node to spread data evenly across regions and AZs. In this case, if 1 AZ goes offline, the 2 remaining AZs retain a majority of replicas.
+
+ - To ensure that ranges are split evenly across nodes, use the same number of nodes in each AZ. This is to avoid overloading any nodes with excessive resource consumption.
+
+- When deploying across multiple regions:
+
+ - To be able to tolerate the failure of 1 entire region, use at least 3 regions.
diff --git a/src/current/_includes/v25.1/prod-deployment/use-cluster.md b/src/current/_includes/v25.1/prod-deployment/use-cluster.md
new file mode 100644
index 00000000000..0230ff5e682
--- /dev/null
+++ b/src/current/_includes/v25.1/prod-deployment/use-cluster.md
@@ -0,0 +1,12 @@
+Now that your deployment is working, you can:
+
+1. [Implement your data model]({% link {{ page.version.version }}/sql-statements.md %}).
+1. [Create users]({% link {{ page.version.version }}/create-user.md %}) and [grant them privileges]({% link {{ page.version.version }}/grant.md %}).
+1. [Connect your application]({% link {{ page.version.version }}/install-client-drivers.md %}). Be sure to connect your application to the load balancer, not to a CockroachDB node.
+1. [Take backups]({% link {{ page.version.version }}/take-full-and-incremental-backups.md %}) of your data.
+
+You may also want to adjust the way the cluster replicates data. For example, by default, a multi-node cluster replicates all data 3 times; you can change this replication factor or create additional rules for replicating individual databases and tables differently. For more information, see [Replication Controls]({% link {{ page.version.version }}/configure-replication-zones.md %}).
+
+{{site.data.alerts.callout_danger}}
+When running a cluster of 5 nodes or more, it's safest to [increase the replication factor for important internal data]({% link {{ page.version.version }}/configure-replication-zones.md %}#create-a-replication-zone-for-a-system-range) to 5, even if you do not do so for user data. For the cluster as a whole to remain available, the ranges for this internal data must always retain a majority of their replicas.
+{{site.data.alerts.end}}
diff --git a/src/current/_includes/v25.1/resilience/dr-feature-table.md b/src/current/_includes/v25.1/resilience/dr-feature-table.md
new file mode 100644
index 00000000000..2429d8b0675
--- /dev/null
+++ b/src/current/_includes/v25.1/resilience/dr-feature-table.md
@@ -0,0 +1,56 @@
+
+
+
+
Point-in-time backup & restore
+
Physical cluster replication (asynchronous)
+
+
+
+
+ RPO
+
+
>=5 minutes
+
10s of seconds
+
+
+
+
+ RTO
+
+
Minutes to hours, depending on data size and number of nodes
+
Seconds to minutes, depending on cluster size, and time of failover
+
+
+
+
+ Write latency
+
+
No impact
+
No impact
+
+
+
+
+ Recovery
+
+
Manual restore
+
Manual failover
+
+
+
+
+ Fault tolerance
+
+
Not applicable
+
Zero RPO node, availability zone within a cluster, region failures with loss up to RPO in a two-region (or two-datacenter) setup
+
+
+
+
+ Minimum regions to achieve fault tolerance
+
+
1
+
2
+
+
+
diff --git a/src/current/_includes/v25.1/resilience/recovery-objectives-definition.md b/src/current/_includes/v25.1/resilience/recovery-objectives-definition.md
new file mode 100644
index 00000000000..bcbd9102359
--- /dev/null
+++ b/src/current/_includes/v25.1/resilience/recovery-objectives-definition.md
@@ -0,0 +1,2 @@
+- **Recovery Point Objective (RPO)**: The maximum amount of data loss (measured by time) that an organization can tolerate.
+- **Recovery Time Objective (RTO)**: The maximum length of time it should take to restore normal operations following an outage.
\ No newline at end of file
diff --git a/src/current/_includes/v25.1/scram-authentication-recommendations.md b/src/current/_includes/v25.1/scram-authentication-recommendations.md
new file mode 100644
index 00000000000..2ad41f75cd8
--- /dev/null
+++ b/src/current/_includes/v25.1/scram-authentication-recommendations.md
@@ -0,0 +1,4 @@
+- Test and adjust your workloads in batches when migrating to SCRAM authentication.
+- Start by enabling SCRAM authentication in a testing environment, and test the performance of your client application against the types of workloads you expect it to handle in production before rolling the changes out to production.
+- Limit the maximum number of connections in the client driver's connection pool.
+- Limit the maximum number of concurrent transactions the client application can issue.
diff --git a/src/current/_includes/v25.1/setup/create-a-free-cluster.md b/src/current/_includes/v25.1/setup/create-a-free-cluster.md
new file mode 100644
index 00000000000..28fbf2e780b
--- /dev/null
+++ b/src/current/_includes/v25.1/setup/create-a-free-cluster.md
@@ -0,0 +1,9 @@
+{% include cockroachcloud/free-cluster-limit.md %}
+
+1. If you haven't already, sign up for a CockroachDB {{ site.data.products.cloud }} account.
+1. [Log in](https://cockroachlabs.cloud/) to your CockroachDB {{ site.data.products.cloud }} account.
+1. On the **Clusters** page, click **Create Cluster**.
+1. On the **Create your cluster** page, select **Serverless**.
+1. Click **Create cluster**.
+
+ Your cluster will be created in a few seconds and the **Create SQL user** dialog will display.
\ No newline at end of file
diff --git a/src/current/_includes/v25.1/setup/create-first-sql-user.md b/src/current/_includes/v25.1/setup/create-first-sql-user.md
new file mode 100644
index 00000000000..21c080a0626
--- /dev/null
+++ b/src/current/_includes/v25.1/setup/create-first-sql-user.md
@@ -0,0 +1,8 @@
+The **Create SQL user** dialog allows you to create a new SQL user and password.
+
+1. Enter a username in the **SQL user** field or use the one provided by default.
+1. Click **Generate & save password**.
+1. Copy the generated password and save it in a secure location.
+1. Click **Next**.
+
+ Currently, all new SQL users are created with admin privileges. For more information and to change the default settings, see [Manage SQL users on a cluster]({% link cockroachcloud/managing-access.md %}#manage-sql-users-on-a-cluster).
diff --git a/src/current/_includes/v25.1/setup/init-bank-sample.md b/src/current/_includes/v25.1/setup/init-bank-sample.md
new file mode 100644
index 00000000000..534658659ef
--- /dev/null
+++ b/src/current/_includes/v25.1/setup/init-bank-sample.md
@@ -0,0 +1,38 @@
+1. Set the `DATABASE_URL` environment variable to the connection string for your cluster:
+
+
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ export DATABASE_URL="postgresql://root@localhost:26257?sslmode=disable"
+ ~~~
+
+
+
+
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ export DATABASE_URL="{connection-string}"
+ ~~~
+
+ Where `{connection-string}` is the connection string you copied earlier.
+
+
+
+
+1. To initialize the example database, use the [`cockroach sql`]({% link {{ page.version.version }}/cockroach-sql.md %}) command to execute the SQL statements in the `dbinit.sql` file:
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ cat dbinit.sql | cockroach sql --url $DATABASE_URL
+ ~~~
+
+ The SQL statement in the initialization file should execute:
+
+ ~~~
+ CREATE TABLE
+
+
+ Time: 102ms
+ ~~~
diff --git a/src/current/_includes/v25.1/setup/sample-setup-certs.md b/src/current/_includes/v25.1/setup/sample-setup-certs.md
new file mode 100644
index 00000000000..e3c70dd385e
--- /dev/null
+++ b/src/current/_includes/v25.1/setup/sample-setup-certs.md
@@ -0,0 +1,78 @@
+
+
+
+
+
+
+
+
+
+
Choose your installation method
+
+You can create a CockroachDB {{ site.data.products.serverless }} cluster using either the CockroachDB Cloud Console, a web-based graphical user interface (GUI) tool, or ccloud, a command-line interface (CLI) tool.
+
+
+
+
+
+
+
+
+### Create a free cluster
+
+{% include cockroachcloud/quickstart/create-a-free-cluster.md %}
+
+### Create a SQL user
+
+{% include {{ page.version.version }}/setup/create-first-sql-user.md %}
+
+### Get the root certificate
+
+The **Connect to cluster** dialog shows information about how to connect to your cluster.
+
+1. Select **General connection string** from the **Select option** dropdown.
+1. Open a new terminal on your local machine, and run the **CA Cert download command** provided in the **Download CA Cert** section. The client driver used in this tutorial requires this certificate to connect to CockroachDB {{ site.data.products.cloud }}.
+
+### Get the connection string
+
+Open the **General connection string** section, then copy the connection string provided and save it in a secure location.
+
+{{site.data.alerts.callout_info}}
+The connection string is pre-populated with your username, password, cluster name, and other details. Your password, in particular, will be provided *only once*. Save it in a secure place (Cockroach Labs recommends a password manager) to connect to your cluster in the future. If you forget your password, you can reset it by going to the **SQL Users** page for the cluster, found at `https://cockroachlabs.cloud/cluster//users`.
+{{site.data.alerts.end}}
+
+
+
+
+
+Follow these steps to create a CockroachDB {{ site.data.products.serverless }} cluster using the ccloud CLI tool.
+
+{{site.data.alerts.callout_info}}
+The ccloud CLI tool is in Preview.
+{{site.data.alerts.end}}
+
+
Install ccloud
+
+{% include cockroachcloud/ccloud/install-ccloud.md %}
+
+### Run `ccloud quickstart` to create a new cluster, create a SQL user, and retrieve the connection string.
+
+{% include cockroachcloud/ccloud/quickstart.md %}
+
+Select **General connection string**, then copy the connection string displayed and save it in a secure location. The connection string is the line starting `postgresql://`.
+
+~~~
+? How would you like to connect? General connection string
+Retrieving cluster info: succeeded
+ Downloading cluster cert to /Users/maxroach/.postgresql/root.crt: succeeded
+postgresql://maxroach:ThisIsNotAGoodPassword@blue-dog-147.6wr.cockroachlabs.cloud:26257/defaultdb?sslmode=verify-full&sslrootcert=%2FUsers%2Fmaxroach%2F.postgresql%2Froot.crt
+~~~
+
+
+You can create a CockroachDB {{ site.data.products.serverless }} cluster using either the CockroachDB Cloud Console, a web-based graphical user interface (GUI) tool, or ccloud, a command-line interface (CLI) tool.
+
+
+
+
+
+
+
+
+### Create a free cluster
+
+{% include cockroachcloud/quickstart/create-a-free-cluster.md %}
+
+### Create a SQL user
+
+{% include {{ page.version.version }}/setup/create-first-sql-user.md %}
+
+### Get the connection string
+
+The **Connect to cluster** dialog shows information about how to connect to your cluster.
+
+1. Select **Java** from the **Select option/language** dropdown.
+1. Select **JDBC** from the **Select tool** dropdown.
+1. Copy the command provided to set the `JDBC_DATABASE_URL` environment variable.
+
+ {{site.data.alerts.callout_info}}
+ The JDBC connection URL is pre-populated with your username, password, cluster name, and other details. Your password, in particular, will be provided *only once*. Save it in a secure place (Cockroach Labs recommends a password manager) to connect to your cluster in the future. If you forget your password, you can reset it by going to the **SQL Users** page for the cluster, found at `https://cockroachlabs.cloud/cluster//users`.
+ {{site.data.alerts.end}}
+
+
+
+
+
+Follow these steps to create a CockroachDB {{ site.data.products.serverless }} cluster using the ccloud CLI tool.
+
+{{site.data.alerts.callout_info}}
+The ccloud CLI tool is in Preview.
+{{site.data.alerts.end}}
+
+
Install ccloud
+
+{% include cockroachcloud/ccloud/install-ccloud.md %}
+
+### Run `ccloud quickstart` to create a new cluster, create a SQL user, and retrieve the connection string.
+
+{% include cockroachcloud/ccloud/quickstart.md %}
+
+Select **General connection string**, then copy the connection string displayed and save it in a secure location. The connection string is the line starting `postgresql://`.
+
+~~~
+? How would you like to connect? General connection string
+Retrieving cluster info: succeeded
+ Downloading cluster cert to /Users/maxroach/.postgresql/root.crt: succeeded
+postgresql://maxroach:ThisIsNotAGoodPassword@blue-dog-147.6wr.cockroachlabs.cloud:26257/defaultdb?sslmode=verify-full&sslrootcert=%2FUsers%2Fmaxroach%2F.postgresql%2Froot.crt
+~~~
+
+
+
+
+
+
+{% include {{ page.version.version }}/setup/start-single-node-insecure.md %}
+
+
\ No newline at end of file
diff --git a/src/current/_includes/v25.1/setup/sample-setup-parameters-certs.md b/src/current/_includes/v25.1/setup/sample-setup-parameters-certs.md
new file mode 100644
index 00000000000..7cbf4348f1a
--- /dev/null
+++ b/src/current/_includes/v25.1/setup/sample-setup-parameters-certs.md
@@ -0,0 +1,85 @@
+
+
+
+
+
+
+
+
+
+
Choose your installation method
+
+You can install a CockroachDB {{ site.data.products.serverless }} cluster using either the CockroachDB Cloud Console, a web-based graphical user interface (GUI) tool, or ccloud, a command-line interface (CLI) tool.
+
+
+
+
+
+
+
+
+### Create a free cluster
+
+{% include cockroachcloud/quickstart/create-a-free-cluster.md %}
+
+### Create a SQL user
+
+{% include {{ page.version.version }}/setup/create-first-sql-user.md %}
+
+### Get the root certificate
+
+The **Connect to cluster** dialog shows information about how to connect to your cluster.
+
+1. Select **General connection string** from the **Select option** dropdown.
+1. Open a new terminal on your local machine, and run the **CA Cert download command** provided in the **Download CA Cert** section. The client driver used in this tutorial requires this certificate to connect to CockroachDB {{ site.data.products.cloud }}.
+
+### Get the connection information
+
+1. Select **Parameters only** from the **Select option** dropdown.
+1. Copy the connection information for each parameter displayed and save it in a secure location.
+
+
+
+
+
+Follow these steps to create a CockroachDB {{ site.data.products.serverless }} cluster using the ccloud CLI tool.
+
+{{site.data.alerts.callout_info}}
+The ccloud CLI tool is in Preview.
+{{site.data.alerts.end}}
+
+
Install ccloud
+
+{% include cockroachcloud/ccloud/install-ccloud.md %}
+
+### Run `ccloud quickstart` to create a new cluster, create a SQL user, and retrieve the connection string.
+
+{% include cockroachcloud/ccloud/quickstart.md %}
+
+Select **Parameters only** then copy the connection parameters displayed and save them in a secure location.
+
+~~~
+? How would you like to connect? Parameters only
+Looking up cluster ID: succeeded
+Creating SQL user: succeeded
+Success! Created SQL user
+ name: maxroach
+ cluster: 37174250-b944-461f-b1c1-3a99edb6af32
+Retrieving cluster info: succeeded
+Connection parameters
+ Database: defaultdb
+ Host: blue-dog-147.6wr.cockroachlabs.cloud
+ Password: ThisIsNotAGoodPassword
+ Port: 26257
+ Username: maxroach
+~~~
+
+
+
+You can install a CockroachDB {{ site.data.products.serverless }} cluster using either the CockroachDB Cloud Console, a web-based graphical user interface (GUI) tool, or ccloud, a command-line interface (CLI) tool.
+
+
+
+
+
+
+
+
+### Create a free cluster
+
+{% include cockroachcloud/quickstart/create-a-free-cluster.md %}
+
+### Create a SQL user
+
+{% include {{ page.version.version }}/setup/create-first-sql-user.md %}
+
+### Get the connection information
+
+The **Connect to cluster** dialog shows information about how to connect to your cluster.
+
+1. Select **Parameters only** from the **Select option** dropdown.
+1. Copy the connection information for each parameter displayed and save it in a secure location.
+
+
+
+
+
+Follow these steps to create a CockroachDB {{ site.data.products.serverless }} cluster using the ccloud CLI tool.
+
+{{site.data.alerts.callout_info}}
+The ccloud CLI tool is in Preview.
+{{site.data.alerts.end}}
+
+
Install ccloud
+
+{% include cockroachcloud/ccloud/install-ccloud.md %}
+
+### Run `ccloud quickstart` to create a new cluster, create a SQL user, and retrieve the connection string.
+
+{% include cockroachcloud/ccloud/quickstart.md %}
+
+Select **Parameters only** then copy the connection parameters displayed and save them in a secure location.
+
+~~~
+? How would you like to connect? Parameters only
+Looking up cluster ID: succeeded
+Creating SQL user: succeeded
+Success! Created SQL user
+ name: maxroach
+ cluster: 37174250-b944-461f-b1c1-3a99edb6af32
+Retrieving cluster info: succeeded
+Connection parameters
+ Database: defaultdb
+ Host: blue-dog-147.6wr.cockroachlabs.cloud
+ Password: ThisIsNotAGoodPassword
+ Port: 26257
+ Username: maxroach
+~~~
+
+
+
+You can install a CockroachDB {{ site.data.products.serverless }} cluster using either the CockroachDB Cloud Console, a web-based graphical user interface (GUI) tool, or ccloud, a command-line interface (CLI) tool.
+
+
+
+
+
+
+
+
+### Create a free cluster
+
+{% include cockroachcloud/quickstart/create-a-free-cluster.md %}
+
+### Create a SQL user
+
+{% include {{ page.version.version }}/setup/create-first-sql-user.md %}
+
+### Get the connection string
+
+The **Connect to cluster** dialog shows information about how to connect to your cluster.
+
+1. Select **General connection string** from the **Select option** dropdown.
+1. Open the **General connection string** section, then copy the connection string provided and save it in a secure location.
+
+ The sample application used in this tutorial uses system CA certificates for server certificate verification, so you can skip the **Download CA Cert** instructions.
+
+ {{site.data.alerts.callout_info}}
+ The connection string is pre-populated with your username, password, cluster name, and other details. Your password, in particular, will be provided *only once*. Save it in a secure place (Cockroach Labs recommends a password manager) to connect to your cluster in the future. If you forget your password, you can reset it by going to the **SQL Users** page for the cluster, found at `https://cockroachlabs.cloud/cluster//users`.
+ {{site.data.alerts.end}}
+
+
+
+
+
+Follow these steps to create a CockroachDB {{ site.data.products.serverless }} cluster using the ccloud CLI tool.
+
+{{site.data.alerts.callout_info}}
+The ccloud CLI tool is in Preview.
+{{site.data.alerts.end}}
+
+
Install ccloud
+
+{% include cockroachcloud/ccloud/install-ccloud.md %}
+
+### Run `ccloud quickstart` to create a new cluster, create a SQL user, and retrieve the connection string.
+
+{% include cockroachcloud/ccloud/quickstart.md %}
+
+Select **General connection string**, then copy the connection string displayed and save it in a secure location. The connection string is the line starting `postgresql://`.
+
+~~~
+? How would you like to connect? General connection string
+Retrieving cluster info: succeeded
+ Downloading cluster cert to /Users/maxroach/.postgresql/root.crt: succeeded
+postgresql://maxroach:ThisIsNotAGoodPassword@blue-dog-147.6wr.cockroachlabs.cloud:26257/defaultdb?sslmode=verify-full&sslrootcert=%2FUsers%2Fmaxroach%2F.postgresql%2Froot.crt
+~~~
+
**Env Variable:** `COCKROACH_URL` **Default:** no URL
+`--host` | The server host and port number to connect to. This can be the address of any node in the cluster.
`-u` | The [SQL user]({% link {{ page.version.version }}/create-user.md %}) that will own the client session.
**Env Variable:** `COCKROACH_USER` **Default:** `root`
+`--insecure` | Use an insecure connection.
**Env Variable:** `COCKROACH_INSECURE` **Default:** `false`
+`--cert-principal-map` | A comma-separated list of `:` mappings. This allows mapping the principal in a cert to a DB principal such as `node` or `root` or any SQL user. This is intended for use in situations where the certificate management system places restrictions on the `Subject.CommonName` or `SubjectAlternateName` fields in the certificate (e.g., disallowing a `CommonName` like `node` or `root`). If multiple mappings are provided for the same ``, the last one specified in the list takes precedence. A principal not specified in the map is passed through as-is via the identity function. A cert is allowed to authenticate a DB principal if the DB principal name is contained in the mapped `CommonName` or DNS-type `SubjectAlternateName` fields.
+`--certs-dir` | The path to the [certificate directory]({% link {{ page.version.version }}/cockroach-cert.md %}) containing the CA and client certificates and client key.
**Env Variable:** `COCKROACH_CERTS_DIR` **Default:** `${HOME}/.cockroach-certs/`
\ No newline at end of file
diff --git a/src/current/_includes/v25.1/sql/covering-index.md b/src/current/_includes/v25.1/sql/covering-index.md
new file mode 100644
index 00000000000..4ce5b00cf12
--- /dev/null
+++ b/src/current/_includes/v25.1/sql/covering-index.md
@@ -0,0 +1 @@
+An index that stores all the columns needed by a query is also known as a _covering index_ for that query. When a query has a covering index, CockroachDB can use that index directly instead of doing an "index join" with the primary index, which is likely to be slower.
diff --git a/src/current/_includes/v25.1/sql/crdb-internal-is-not-supported-for-production-use.md b/src/current/_includes/v25.1/sql/crdb-internal-is-not-supported-for-production-use.md
new file mode 100644
index 00000000000..475a8804b04
--- /dev/null
+++ b/src/current/_includes/v25.1/sql/crdb-internal-is-not-supported-for-production-use.md
@@ -0,0 +1 @@
+Many of the tables in the `crdb_internal` system catalog are **not supported for external use in production**. This output is provided **as a debugging aid only**. The output of particular `crdb_internal` facilities may change from patch release to patch release without advance warning. For more information, see [the `crdb_internal` documentation]({% link {{ page.version.version }}/crdb-internal.md %}).
diff --git a/src/current/_includes/v25.1/sql/crdb-internal-partitions-example.md b/src/current/_includes/v25.1/sql/crdb-internal-partitions-example.md
new file mode 100644
index 00000000000..680b0adf261
--- /dev/null
+++ b/src/current/_includes/v25.1/sql/crdb-internal-partitions-example.md
@@ -0,0 +1,43 @@
+## Querying partitions programmatically
+
+The `crdb_internal.partitions` internal table contains information about the partitions in your database. In testing, scripting, and other programmatic environments, we recommend querying this table for partition information instead of using the `SHOW PARTITIONS` statement. For example, to get all `us_west` partitions of in your database, you can run the following query:
+
+{% include_cached copy-clipboard.html %}
+~~~ sql
+> SELECT * FROM crdb_internal.partitions WHERE name='us_west';
+~~~
+
+~~~
+ table_id | index_id | parent_name | name | columns | column_names | list_value | range_value | zone_id | subzone_id
++----------+----------+-------------+---------+---------+--------------+-------------------------------------------------+-------------+---------+------------+
+ 53 | 1 | NULL | us_west | 1 | city | ('seattle'), ('san francisco'), ('los angeles') | NULL | 0 | 0
+ 54 | 1 | NULL | us_west | 1 | city | ('seattle'), ('san francisco'), ('los angeles') | NULL | 54 | 1
+ 54 | 2 | NULL | us_west | 1 | city | ('seattle'), ('san francisco'), ('los angeles') | NULL | 54 | 2
+ 55 | 1 | NULL | us_west | 1 | city | ('seattle'), ('san francisco'), ('los angeles') | NULL | 55 | 1
+ 55 | 2 | NULL | us_west | 1 | city | ('seattle'), ('san francisco'), ('los angeles') | NULL | 55 | 2
+ 55 | 3 | NULL | us_west | 1 | vehicle_city | ('seattle'), ('san francisco'), ('los angeles') | NULL | 55 | 3
+ 56 | 1 | NULL | us_west | 1 | city | ('seattle'), ('san francisco'), ('los angeles') | NULL | 56 | 1
+ 58 | 1 | NULL | us_west | 1 | city | ('seattle'), ('san francisco'), ('los angeles') | NULL | 58 | 1
+(8 rows)
+~~~
+
+Other internal tables, like `crdb_internal.tables`, include information that could be useful in conjunction with `crdb_internal.partitions`.
+
+For example, if you want the output for your partitions to include the name of the table and database, you can perform a join of the two tables:
+
+{% include_cached copy-clipboard.html %}
+~~~ sql
+> SELECT
+ partitions.name AS partition_name, column_names, list_value, tables.name AS table_name, database_name
+ FROM crdb_internal.partitions JOIN crdb_internal.tables ON partitions.table_id=tables.table_id
+ WHERE tables.name='users';
+~~~
+
+~~~
+ partition_name | column_names | list_value | table_name | database_name
++----------------+--------------+-------------------------------------------------+------------+---------------+
+ us_west | city | ('seattle'), ('san francisco'), ('los angeles') | users | movr
+ us_east | city | ('new york'), ('boston'), ('washington dc') | users | movr
+ europe_west | city | ('amsterdam'), ('paris'), ('rome') | users | movr
+(3 rows)
+~~~
diff --git a/src/current/_includes/v25.1/sql/crdb-internal-partitions.md b/src/current/_includes/v25.1/sql/crdb-internal-partitions.md
new file mode 100644
index 00000000000..11faab704cd
--- /dev/null
+++ b/src/current/_includes/v25.1/sql/crdb-internal-partitions.md
@@ -0,0 +1,3 @@
+{{site.data.alerts.callout_success}}
+In testing, scripting, and other programmatic environments, we recommend querying the `crdb_internal.partitions` internal table for partition information instead of using the `SHOW PARTITIONS` statement. For more information, see [Querying partitions programmatically]({% link {{ page.version.version }}/show-partitions.md %}#querying-partitions-programmatically).
+{{site.data.alerts.end}}
diff --git a/src/current/_includes/v25.1/sql/cursors-vs-keyset-pagination.md b/src/current/_includes/v25.1/sql/cursors-vs-keyset-pagination.md
new file mode 100644
index 00000000000..ba5391b5ace
--- /dev/null
+++ b/src/current/_includes/v25.1/sql/cursors-vs-keyset-pagination.md
@@ -0,0 +1,3 @@
+_Cursors_ are stateful objects that use more database resources than keyset pagination, since each cursor holds open a transaction. However, they are easier to use, and make it easier to get consistent results without having to write complex queries from your application logic. They do not require that the results be returned in a particular order (that is, you don't have to include an `ORDER BY` clause), which makes them more flexible.
+
+_Keyset pagination_ queries are usually much faster than cursors since they order by indexed columns. However, in order to get that performance they require that you return results in some defined order that can be calculated by your application's queries. Because that ordering involves calculating the start/end point of pages of results based on an indexed key, they require more care to write correctly.
diff --git a/src/current/_includes/v25.1/sql/db-terms.md b/src/current/_includes/v25.1/sql/db-terms.md
new file mode 100644
index 00000000000..5776ed951e0
--- /dev/null
+++ b/src/current/_includes/v25.1/sql/db-terms.md
@@ -0,0 +1,3 @@
+{{site.data.alerts.callout_info}}
+To avoid confusion with the general term "[database](https://en.wikipedia.org/wiki/Database)", throughout this guide we refer to the logical object as a *database*, to CockroachDB by name, and to a deployment of CockroachDB as a [*cluster*]({% link {{ page.version.version }}/architecture/glossary.md %}#cockroachdb-architecture-terms).
+{{site.data.alerts.end}}
diff --git a/src/current/_includes/v25.1/sql/dev-schema-change-limits.md b/src/current/_includes/v25.1/sql/dev-schema-change-limits.md
new file mode 100644
index 00000000000..e6f10db0bc9
--- /dev/null
+++ b/src/current/_includes/v25.1/sql/dev-schema-change-limits.md
@@ -0,0 +1,3 @@
+Review the [limitations of online schema changes]({% link {{ page.version.version }}/online-schema-changes.md %}#known-limitations). CockroachDB [doesn't guarantee the atomicity of schema changes within transactions with multiple statements]({% link {{ page.version.version }}/online-schema-changes.md %}#schema-changes-within-transactions).
+
+ Cockroach Labs recommends that you perform schema changes outside explicit transactions. When a database [schema management tool]({% link {{ page.version.version }}/third-party-database-tools.md %}#schema-migration-tools) manages transactions on your behalf, include one schema change operation per transaction.
diff --git a/src/current/_includes/v25.1/sql/dev-schema-changes.md b/src/current/_includes/v25.1/sql/dev-schema-changes.md
new file mode 100644
index 00000000000..9e42fd08614
--- /dev/null
+++ b/src/current/_includes/v25.1/sql/dev-schema-changes.md
@@ -0,0 +1 @@
+Use a [database schema migration tool]({% link {{ page.version.version }}/third-party-database-tools.md %}#schema-migration-tools) or the [CockroachDB SQL client]({% link {{ page.version.version }}/cockroach-sql.md %}) instead of a [client library]({% link {{ page.version.version }}/third-party-database-tools.md %}#drivers) to execute [database schema changes](online-schema-changes.html).
diff --git a/src/current/_includes/v25.1/sql/disallow-full-table-scans.md b/src/current/_includes/v25.1/sql/disallow-full-table-scans.md
new file mode 100644
index 00000000000..57701770065
--- /dev/null
+++ b/src/current/_includes/v25.1/sql/disallow-full-table-scans.md
@@ -0,0 +1,12 @@
+- At the cluster level, set `disallow_full_table_scans` for some or all users and roles. For example:
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ sql
+ ALTER ROLE ALL SET disallow_full_table_scans = true;
+ ~~~
+
+- At the application level, add `disallow_full_table_scans` to the connection string using the [`options` parameter]({% link {{page.version.version}}/connection-parameters.md %}#additional-connection-parameters).
+
+If you disable full scans, you can set the [`large_full_scan_rows` session variable]({% link {{ page.version.version }}/set-vars.md %}#large-full-scan-rows) to specify the maximum table size allowed for a full scan. If no alternative plan is possible, the optimizer will return an error.
+
+If you disable full scans, and you provide an [index hint]({% link {{ page.version.version }}/indexes.md %}#selection), the optimizer will try to avoid a full scan while also respecting the index hint. If this is not possible, the optimizer will return an error. If you do not provide an index hint and it is not possible to avoid a full scan, the optimizer will return an error, the full scan will be logged, and the `sql.guardrails.full_scan_rejected.count` [metric]({% link {{ page.version.version }}/ui-overview-dashboard.md %}) will be updated.
\ No newline at end of file
diff --git a/src/current/_includes/v25.1/sql/drop-role-considerations.md b/src/current/_includes/v25.1/sql/drop-role-considerations.md
new file mode 100644
index 00000000000..585a062934b
--- /dev/null
+++ b/src/current/_includes/v25.1/sql/drop-role-considerations.md
@@ -0,0 +1,4 @@
+- The `admin` user/role cannot be dropped, and `root` must always be a member of `admin`.
+- A user/role cannot be dropped if it has privileges. Use [`REVOKE`]({% link {{ page.version.version }}/revoke.md %}) to remove privileges.
+- Users/roles that [own objects]({% link {{ page.version.version }}/security-reference/authorization.md %}#object-ownership) (such as databases, tables, schemas, and types) cannot be dropped until the [ownership is transferred to another user/role]({% link {{ page.version.version }}/alter-database.md %}#change-a-databases-owner).
+- If a user/role is logged in while a [different session]({% link {{ page.version.version }}/show-sessions.md %}) drops that user, CockroachDB checks that the user exists before allowing it to inherit privileges from [the `public` role]({% link {{ page.version.version }}/security-reference/authorization.md %}). In addition, any active [web]({% link {{ page.version.version }}/ui-overview.md %}#authentication) [sessions]({% link {{ page.version.version }}/cockroach-auth-session.md %}) are revoked when a user is dropped.
diff --git a/src/current/_includes/v25.1/sql/enable-super-region-primary-region-changes.md b/src/current/_includes/v25.1/sql/enable-super-region-primary-region-changes.md
new file mode 100644
index 00000000000..94920f7d481
--- /dev/null
+++ b/src/current/_includes/v25.1/sql/enable-super-region-primary-region-changes.md
@@ -0,0 +1,23 @@
+By default, you may not change the [primary region]({% link {{ page.version.version }}/alter-database.md %}#set-primary-region) of a [multi-region database]({% link {{ page.version.version }}/multiregion-overview.md %}) when that region is part of a super region. This is a safety setting designed to prevent you from accidentally moving the data for a [regional table]({% link {{ page.version.version }}/regional-tables.md %}) that is meant to be stored in the super region out of that super region, which could break your data domiciling setup.
+
+If you are sure about what you are doing, you can allow modifying the primary region by setting the `alter_primary_region_super_region_override` [session setting]({% link {{ page.version.version }}/set-vars.md %}) to `'on'`:
+
+{% include_cached copy-clipboard.html %}
+~~~ sql
+SET alter_primary_region_super_region_override = 'on';
+~~~
+
+~~~
+SET
+~~~
+
+You can also accomplish this by setting the `sql.defaults.alter_primary_region_super_region_override.enable` [cluster setting]({% link {{ page.version.version }}/cluster-settings.md %}) to `true`:
+
+{% include_cached copy-clipboard.html %}
+~~~ sql
+SET CLUSTER SETTING sql.defaults.alter_primary_region_super_region_override.enable = true;
+~~~
+
+~~~
+SET CLUSTER SETTING
+~~~
diff --git a/src/current/_includes/v25.1/sql/enable-super-regions.md b/src/current/_includes/v25.1/sql/enable-super-regions.md
new file mode 100644
index 00000000000..0dd7ac26077
--- /dev/null
+++ b/src/current/_includes/v25.1/sql/enable-super-regions.md
@@ -0,0 +1,21 @@
+To enable super regions, set the `enable_super_regions` [session setting]({% link {{ page.version.version }}/set-vars.md %}) to `'on'`:
+
+{% include_cached copy-clipboard.html %}
+~~~ sql
+SET enable_super_regions = 'on';
+~~~
+
+~~~
+SET
+~~~
+
+You can also set the `sql.defaults.super_regions.enabled` [cluster setting]({% link {{ page.version.version }}/cluster-settings.md %}) to `true`:
+
+{% include_cached copy-clipboard.html %}
+~~~ sql
+SET CLUSTER SETTING sql.defaults.super_regions.enabled = true;
+~~~
+
+~~~
+SET CLUSTER SETTING
+~~~
diff --git a/src/current/_includes/v25.1/sql/export-csv-tsv.md b/src/current/_includes/v25.1/sql/export-csv-tsv.md
new file mode 100644
index 00000000000..ea1e69968a0
--- /dev/null
+++ b/src/current/_includes/v25.1/sql/export-csv-tsv.md
@@ -0,0 +1,11 @@
+[`IMPORT INTO`](import-into.html) requires that you export one file per table with the following attributes:
+
+- Files must be in valid [CSV](https://tools.ietf.org/html/rfc4180) (comma-separated values) or [TSV](https://www.iana.org/assignments/media-types/text/tab-separated-values) (tab-separated values) format.
+- The delimiter must be a single character. Use the [`delimiter` option](import-into.html#import-options) to set a character other than a comma (such as a tab, for TSV format).
+- Files must be UTF-8 encoded.
+- If one of the following characters appears in a field, the field must be enclosed by double quotes:
+ - Delimiter (`,` by default).
+ - Double quote (`"`). Because the field will be enclosed by double quotes, escape a double quote inside a field by preceding it with another double quote. For example: `"aaa","b""bb","ccc"`.
+ - Newline (`\n`).
+ - Carriage return (`\r`).
+- If a column is of type [`BYTES`](bytes.html), it can either be a valid UTF-8 string or a [hex-encoded byte literal](sql-constants.html#hexadecimal-encoded-byte-array-literals) beginning with `\x`. For example, a field whose value should be the bytes `1`, `2` would be written as `\x0102`.
\ No newline at end of file
diff --git a/src/current/_includes/v25.1/sql/function-special-forms.md b/src/current/_includes/v25.1/sql/function-special-forms.md
new file mode 100644
index 00000000000..b9ac987444a
--- /dev/null
+++ b/src/current/_includes/v25.1/sql/function-special-forms.md
@@ -0,0 +1,29 @@
+| Special form | Equivalent to |
+|-----------------------------------------------------------|---------------------------------------------|
+| `AT TIME ZONE` | `timezone()` |
+| `CURRENT_CATALOG` | `current_catalog()` |
+| `COLLATION FOR` | `pg_collation_for()` |
+| `CURRENT_DATE` | `current_date()` |
+| `CURRENT_ROLE` | `current_user()` |
+| `CURRENT_SCHEMA` | `current_schema()` |
+| `CURRENT_TIMESTAMP` | `current_timestamp()` |
+| `CURRENT_TIME` | `current_time()` |
+| `CURRENT_USER` | `current_user()` |
+| `EXTRACT( FROM )` | `extract("", )` |
+| `EXTRACT_DURATION( FROM )` | `extract_duration("", )` |
+| `OVERLAY( PLACING FROM FOR )` | `overlay(, , , )` |
+| `OVERLAY( PLACING FROM )` | `overlay(, , )` |
+| `POSITION( IN )` | `strpos(, )` |
+| `SESSION_USER` | `current_user()` |
+| `SUBSTRING( FOR FROM )` | `substring(, , )` |
+| `SUBSTRING( FOR )` | `substring(, 1, )` |
+| `SUBSTRING( FROM FOR )` | `substring(, , )` |
+| `SUBSTRING( FROM )` | `substring(, )` |
+| `TRIM( FROM )` | `btrim(, )` |
+| `TRIM(, )` | `btrim(, )` |
+| `TRIM(FROM )` | `btrim()` |
+| `TRIM(LEADING FROM )` | `ltrim(, )` |
+| `TRIM(LEADING FROM )` | `ltrim()` |
+| `TRIM(TRAILING FROM )` | `rtrim(, )` |
+| `TRIM(TRAILING FROM )` | `rtrim()` |
+| `USER` | `current_user()` |
diff --git a/src/current/_includes/v25.1/sql/global-table-description.md b/src/current/_includes/v25.1/sql/global-table-description.md
new file mode 100644
index 00000000000..515f1dbd4c4
--- /dev/null
+++ b/src/current/_includes/v25.1/sql/global-table-description.md
@@ -0,0 +1,7 @@
+A _global_ table is optimized for low-latency reads from every region in the database. This means that any region can effectively act as the home region of the table. The tradeoff is that writes will incur higher latencies from any given region, since writes have to be replicated across every region to make the global low-latency reads possible. Use global tables when your application has a "read-mostly" table of reference data that is rarely updated, and needs to be available to all regions.
+
+For an example of a table that can benefit from the _global_ table locality setting in a multi-region deployment, see the `promo_codes` table from the [MovR application]({% link {{ page.version.version }}/movr.md %}).
+
+For instructions showing how to set a table's locality to `GLOBAL`, see [`ALTER TABLE ... SET LOCALITY`]({% link {{ page.version.version }}/alter-table.md %}#global).
+
+For more information about global tables, including troubleshooting information, see [Global Tables]({% link {{ page.version.version }}/global-tables.md %}).
diff --git a/src/current/_includes/v25.1/sql/import-into-default-value.md b/src/current/_includes/v25.1/sql/import-into-default-value.md
new file mode 100644
index 00000000000..e7ef86fa18b
--- /dev/null
+++ b/src/current/_includes/v25.1/sql/import-into-default-value.md
@@ -0,0 +1,3 @@
+{{site.data.alerts.callout_danger}}
+Column values cannot be generated by [`DEFAULT`]({% link {{ page.version.version }}/default-value.md %}) when importing; an import must include a value for every column specified in the `IMPORT INTO` statement. To use `DEFAULT` values, your file must contain values for the column upon import, or you can [add the column]({% link {{ page.version.version }}/alter-table.md %}#add-column) or [alter the column]({% link {{ page.version.version }}/alter-table.md %}#set-or-change-a-default-value) after the table has been imported.
+{{site.data.alerts.end}}
diff --git a/src/current/_includes/v25.1/sql/import-into-regional-by-row-table.md b/src/current/_includes/v25.1/sql/import-into-regional-by-row-table.md
new file mode 100644
index 00000000000..ffb93fc8046
--- /dev/null
+++ b/src/current/_includes/v25.1/sql/import-into-regional-by-row-table.md
@@ -0,0 +1 @@
+`IMPORT INTO` cannot directly import data to [`REGIONAL BY ROW`]({% link {{ page.version.version }}/alter-table.md %}#regional-by-row) tables that are part of [multi-region databases]({% link {{ page.version.version }}/multiregion-overview.md %}). For more information, including a workaround for this limitation, see [Known Limitations]({% link {{ page.version.version }}/known-limitations.md %}#import-into-a-regional-by-row-table).
diff --git a/src/current/_includes/v25.1/sql/indexes-regional-by-row.md b/src/current/_includes/v25.1/sql/indexes-regional-by-row.md
new file mode 100644
index 00000000000..e02a9abcafb
--- /dev/null
+++ b/src/current/_includes/v25.1/sql/indexes-regional-by-row.md
@@ -0,0 +1,3 @@
+ In [multi-region deployments]({% link {{ page.version.version }}/multiregion-overview.md %}), most users should use [`REGIONAL BY ROW` tables]({% link {{ page.version.version }}/table-localities.md %}#regional-by-row-tables) instead of explicit index [partitioning]({% link {{ page.version.version }}/partitioning.md %}). When you add an index to a `REGIONAL BY ROW` table, it is automatically partitioned on the [`crdb_region` column](alter-table.html#crdb_region). Explicit index partitioning is not required.
+
+ While CockroachDB process an [`ADD REGION`]({% link {{ page.version.version }}/alter-database.md %}#add-region) or [`DROP REGION`]({% link {{ page.version.version }}/alter-database.md %}#drop-region) statement on a particular database, creating or modifying an index will throw an error. Similarly, all [`ADD REGION`]({% link {{ page.version.version }}/alter-database.md %}#add-region) and [`DROP REGION`]({% link {{ page.version.version }}/alter-database.md %}#drop-region) statements will be blocked while an index is being modified on a `REGIONAL BY ROW` table within the same database.
diff --git a/src/current/_includes/v25.1/sql/insert-vs-upsert.md b/src/current/_includes/v25.1/sql/insert-vs-upsert.md
new file mode 100644
index 00000000000..f22d20ea511
--- /dev/null
+++ b/src/current/_includes/v25.1/sql/insert-vs-upsert.md
@@ -0,0 +1,3 @@
+When inserting or updating columns on a table that does not have [secondary indexes]({% link {{ page.version.version }}/indexes.md %}), Cockroach Labs recommends using an `UPSERT` statement instead of `INSERT ON CONFLICT DO UPDATE`. Whereas `INSERT ON CONFLICT` always performs a read, the `UPSERT` statement writes without reading, making it faster. This may be useful if you are using a simple SQL table of two columns to [simulate direct KV access]({% link {{ page.version.version }}/sql-faqs.md %}#can-i-use-cockroachdb-as-a-key-value-store).
+
+If the table has a secondary index, there is no performance difference between `UPSERT` and `INSERT ON CONFLICT`. However, `INSERT` without an `ON CONFLICT` clause may not scan the table for existing values. This can provide a performance improvement over `UPSERT`.
\ No newline at end of file
diff --git a/src/current/_includes/v25.1/sql/inverted-joins.md b/src/current/_includes/v25.1/sql/inverted-joins.md
new file mode 100644
index 00000000000..a66d0a651fe
--- /dev/null
+++ b/src/current/_includes/v25.1/sql/inverted-joins.md
@@ -0,0 +1,97 @@
+To run these examples, initialize a demo cluster with the MovR workload.
+
+{% include {{ page.version.version }}/demo_movr.md %}
+
+Create a GIN index on the `vehicles` table's `ext` column.
+
+{% include_cached copy-clipboard.html %}
+~~~ sql
+CREATE INVERTED INDEX idx_vehicle_details ON vehicles(ext);
+~~~
+
+Check the statement plan for a `SELECT` statement that uses an inner inverted join.
+
+{% include_cached copy-clipboard.html %}
+~~~ sql
+EXPLAIN SELECT * FROM vehicles@vehicles_pkey AS v2 INNER INVERTED JOIN vehicles@idx_vehicle_details AS v1 ON v1.ext @> v2.ext;
+~~~
+
+~~~
+ info
+---------------------------------------------
+ distribution: full
+ vectorized: true
+
+ • lookup join
+ │ table: vehicles@vehicles_pkey
+ │ equality: (city, id) = (city,id)
+ │ equality cols are key
+ │ pred: ext @> ext
+ │
+ └── • inverted join
+ │ table: vehicles@idx_vehicle_details
+ │
+ └── • scan
+ estimated row count: 3,750 (100% of the table; stats collected 1 hour ago)
+ table: vehicles@vehicles_pkey
+ spans: FULL SCAN
+(16 rows)
+~~~
+
+You can omit the `INNER INVERTED JOIN` statement by putting `v1.ext` on the left side of a `@>` join condition in a `WHERE` clause and using an [index hint]({% link {{ page.version.version }}/table-expressions.md %}#force-index-selection) for the GIN index.
+
+{% include_cached copy-clipboard.html %}
+~~~ sql
+EXPLAIN SELECT * FROM vehicles@idx_vehicle_details AS v1, vehicles AS v2 WHERE v1.ext @> v2.ext;
+~~~
+
+~~~
+ info
+--------------------------------------------------------------------------------------------
+ distribution: full
+ vectorized: true
+
+ • lookup join
+ │ table: vehicles@vehicles_pkey
+ │ equality: (city, id) = (city,id)
+ │ equality cols are key
+ │ pred: ext @> ext
+ │
+ └── • inverted join
+ │ table: vehicles@idx_vehicle_details
+ │
+ └── • scan
+ estimated row count: 3,750 (100% of the table; stats collected 1 hour ago)
+ table: vehicles@vehicles_pkey
+ spans: FULL SCAN
+(16 rows)
+~~~
+
+Use the `LEFT INVERTED JOIN` hint to perform a left inverted join.
+
+{% include_cached copy-clipboard.html %}
+~~~ sql
+EXPLAIN SELECT * FROM vehicles AS v2 LEFT INVERTED JOIN vehicles AS v1 ON v1.ext @> v2.ext;
+~~~
+
+~~~
+ info
+------------------------------------------------------------------------------------------
+ distribution: full
+ vectorized: true
+
+ • lookup join (left outer)
+ │ table: vehicles@vehicles_pkey
+ │ equality: (city, id) = (city,id)
+ │ equality cols are key
+ │ pred: ext @> ext
+ │
+ └── • inverted join (left outer)
+ │ table: vehicles@idx_vehicle_details
+ │
+ └── • scan
+ estimated row count: 3,750 (100% of the table; stats collected 1 hour ago)
+ table: vehicles@vehicles_pkey
+ spans: FULL SCAN
+(16 rows)
+~~~
diff --git a/src/current/_includes/v25.1/sql/isolation-levels.md b/src/current/_includes/v25.1/sql/isolation-levels.md
new file mode 100644
index 00000000000..29afd53f2b0
--- /dev/null
+++ b/src/current/_includes/v25.1/sql/isolation-levels.md
@@ -0,0 +1,7 @@
+Isolation is an element of [ACID transactions](https://en.wikipedia.org/wiki/ACID) that determines how concurrency is controlled, and ultimately guarantees consistency. CockroachDB offers two transaction isolation levels: [`SERIALIZABLE`]({% link {{ page.version.version }}/demo-serializable.md %}) and [`READ COMMITTED`]({% link {{ page.version.version }}/read-committed.md %}).
+
+By default, CockroachDB executes all transactions at the strongest ANSI transaction isolation level: `SERIALIZABLE`, which permits no concurrency anomalies. To place all transactions in a serializable ordering, `SERIALIZABLE` isolation may require [transaction restarts]({% link {{ page.version.version }}/transaction-retry-error-reference.md %}) and [client-side retry handling]({% link {{ page.version.version }}/transaction-retry-error-reference.md %}#client-side-retry-handling). For a demonstration of how `SERIALIZABLE` prevents anomalies such as write skew, see [Serializable Transactions]({% link {{ page.version.version }}/demo-serializable.md %}).
+
+CockroachDB can be configured to execute transactions at [`READ COMMITTED`]({% link {{ page.version.version }}/read-committed.md %}) instead of `SERIALIZABLE` isolation. If [enabled]({% link {{ page.version.version }}/read-committed.md %}#enable-read-committed-isolation), `READ COMMITTED` is no longer an alias for `SERIALIZABLE` . `READ COMMITTED` permits some concurrency anomalies in exchange for minimizing transaction aborts and removing the need for client-side retries. Depending on your workload requirements, this may be desirable. For more information, see [Read Committed Transactions]({% link {{ page.version.version }}/read-committed.md %}).
+
+{% include {{ page.version.version }}/sql/mixed-isolation-levels.md %}
\ No newline at end of file
diff --git a/src/current/_includes/v25.1/sql/limit-row-size.md b/src/current/_includes/v25.1/sql/limit-row-size.md
new file mode 100644
index 00000000000..ae9e95a1391
--- /dev/null
+++ b/src/current/_includes/v25.1/sql/limit-row-size.md
@@ -0,0 +1,22 @@
+## Limit the size of rows
+
+To help you avoid failures arising from misbehaving applications that bloat the size of rows, you can specify the behavior when a row or individual [column family]({% link {{ page.version.version }}/column-families.md %}) larger than a specified size is written to the database. Use the [cluster settings]({% link {{ page.version.version }}/cluster-settings.md %}) `sql.guardrails.max_row_size_log` to discover large rows and `sql.guardrails.max_row_size_err` to reject large rows.
+
+When you write a row that exceeds `sql.guardrails.max_row_size_log`:
+
+- `INSERT`, `UPSERT`, `UPDATE`, `CREATE TABLE AS`, `CREATE INDEX`, `ALTER TABLE`, `ALTER INDEX`, `IMPORT`, or `RESTORE` statements will log a `LargeRow` to the [`SQL_PERF`]({% link {{ page.version.version }}/logging.md %}#sql_perf) channel.
+- `SELECT`, `DELETE`, `TRUNCATE`, and `DROP` are not affected.
+
+When you write a row that exceeds `sql.guardrails.max_row_size_err`:
+
+- `INSERT`, `UPSERT`, and `UPDATE` statements will fail with a code `54000 (program_limit_exceeded)` error.
+
+- `CREATE TABLE AS`, `CREATE INDEX`, `ALTER TABLE`, `ALTER INDEX`, `IMPORT`, and `RESTORE` statements will log a `LargeRowInternal` event to the [`SQL_INTERNAL_PERF`]({% link {{ page.version.version }}/logging.md %}#sql_internal_perf) channel.
+
+- `SELECT`, `DELETE`, `TRUNCATE`, and `DROP` are not affected.
+
+You **cannot** update existing rows that violate the limit unless the update shrinks the size of the
+row below the limit. You **can** select, delete, alter, back up, and restore such rows. We
+recommend using the accompanying setting `sql.guardrails.max_row_size_log` in conjunction with
+`SELECT pg_column_size()` queries to detect and fix any existing large rows before lowering
+`sql.guardrails.max_row_size_err`.
diff --git a/src/current/_includes/v25.1/sql/locality-optimized-search.md b/src/current/_includes/v25.1/sql/locality-optimized-search.md
new file mode 100644
index 00000000000..23cac1bc9d9
--- /dev/null
+++ b/src/current/_includes/v25.1/sql/locality-optimized-search.md
@@ -0,0 +1 @@
+Note that the [SQL engine]({% link {{ page.version.version }}/architecture/sql-layer.md %}) will avoid sending requests to nodes in other regions when it can instead read a value from a unique column that is stored locally. This capability is known as [_locality optimized search_]({% link {{ page.version.version }}/cost-based-optimizer.md %}#locality-optimized-search-in-multi-region-clusters).
diff --git a/src/current/_includes/v25.1/sql/macos-terminal-configuration.md b/src/current/_includes/v25.1/sql/macos-terminal-configuration.md
new file mode 100644
index 00000000000..5b636259ce1
--- /dev/null
+++ b/src/current/_includes/v25.1/sql/macos-terminal-configuration.md
@@ -0,0 +1,14 @@
+In **Apple Terminal**:
+
+1. Navigate to "Preferences", then "Profiles", then "Keyboard".
+1. Enable the checkbox "Use Option as Meta Key".
+
+
+
+In **iTerm2**:
+
+1. Navigate to "Preferences", then "Profiles", then "Keys".
+1. Select the radio button "Esc+" for the behavior of the Left Option Key.
+
+
+
diff --git a/src/current/_includes/v25.1/sql/mixed-isolation-levels.md b/src/current/_includes/v25.1/sql/mixed-isolation-levels.md
new file mode 100644
index 00000000000..440e29f24bd
--- /dev/null
+++ b/src/current/_includes/v25.1/sql/mixed-isolation-levels.md
@@ -0,0 +1,12 @@
+{% if page.name == "transactions.md" %}### Mixed isolation levels{% else if page.name == "transaction-layer.md" %}#### Mixed isolation levels{% endif %}
+
+Regardless of the isolation levels of other transactions, transactions behave according to their respective isolation levels: Statements in `SERIALIZABLE` transactions see data that committed before the transaction began, whereas statements in `READ COMMITTED` transactions see data that committed before each **statement** began. Therefore:
+
+- If a `READ COMMITTED` transaction `R` commits before a `SERIALIZABLE` transaction `S`, every statement in `S` will observe all writes from `R`. Otherwise, `S` will not observe any writes from `R`.
+- If a `SERIALIZABLE` transaction `S` commits before a `READ COMMITTED` transaction `R`, every **subsequent** statement in `R` will observe all writes from `S`. Otherwise, `R` will not observe any writes from `S`.
+
+However, there is one difference in how `SERIALIZABLE` writes affect non-locking reads: While writes in a `SERIALIZABLE` transaction can block reads in concurrent `SERIALIZABLE` transactions, they will **not** block reads in concurrent `READ COMMITTED` transactions. Writes in a `READ COMMITTED` transaction will never block reads in concurrent transactions, regardless of their isolation levels. Therefore:
+
+- If a `READ COMMITTED` transaction `R` writes but does not commit before a `SERIALIZABLE` transaction `S`, no statement in `S` will observe or be blocked by any uncommitted writes from `R`.
+- If a `SERIALIZABLE` transaction `S` writes but does not commit before a `READ COMMITTED` transaction `R`, no statement in `R` will observe or be blocked by any uncommitted writes from `S`.
+- If a `SERIALIZABLE` transaction `S1` writes but does not commit before a `SERIALIZABLE` transaction `S2`, the first statement in `S2` that would observe an unwritten row from `S1` will be blocked until `S1` commits or aborts.
\ No newline at end of file
diff --git a/src/current/_includes/v25.1/sql/movr-start-nodes.md b/src/current/_includes/v25.1/sql/movr-start-nodes.md
new file mode 100644
index 00000000000..3af9ecfbf2b
--- /dev/null
+++ b/src/current/_includes/v25.1/sql/movr-start-nodes.md
@@ -0,0 +1,6 @@
+Run [`cockroach demo`]({% link {{ page.version.version }}/cockroach-demo.md %}) with the [`--nodes`]({% link {{ page.version.version }}/cockroach-demo.md %}#flags) and [`--demo-locality`]({% link {{ page.version.version }}/cockroach-demo.md %}#flags) flags This command opens an interactive SQL shell to a temporary, multi-node in-memory cluster with the `movr` database preloaded and set as the [current database]({% link {{ page.version.version }}/sql-name-resolution.md %}#current-database).
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ cockroach demo --nodes=3 --demo-locality=region=us-east1:region=us-central1:region=us-west1
+ ~~~
diff --git a/src/current/_includes/v25.1/sql/movr-start.md b/src/current/_includes/v25.1/sql/movr-start.md
new file mode 100644
index 00000000000..2c1bb50abf2
--- /dev/null
+++ b/src/current/_includes/v25.1/sql/movr-start.md
@@ -0,0 +1,62 @@
+- Run [`cockroach demo`]({% link {{ page.version.version }}/cockroach-demo.md %}) to start a temporary, in-memory cluster with the `movr` dataset preloaded:
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ cockroach demo
+ ~~~
+
+- Load the `movr` dataset into a persistent local cluster and open an interactive SQL shell:
+ 1. Start a [secure]({% link {{ page.version.version }}/secure-a-cluster.md %}) or [insecure]({% link {{ page.version.version }}/start-a-local-cluster.md %}) local cluster.
+ 1. Use [`cockroach workload`]({% link {{ page.version.version }}/cockroach-workload.md %}) to load the `movr` dataset:
+
+
+
+
+
+
+
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ cockroach workload init movr 'postgresql://root@localhost:26257?sslcert=certs%2Fclient.root.crt&sslkey=certs%2Fclient.root.key&sslmode=verify-full&sslrootcert=certs%2Fca.crt'
+ ~~~
+
+
+
+
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ cockroach workload init movr 'postgresql://root@localhost:26257?sslmode=disable'
+ ~~~
+
+
+ 1. Use [`cockroach sql`]({% link {{ page.version.version }}/cockroach-sql.md %}) to open an interactive SQL shell and set `movr` as the [current database]({% link {{ page.version.version }}/sql-name-resolution.md %}#current-database):
+
+
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ cockroach sql --certs-dir=certs --host=localhost:26257
+ ~~~
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ sql
+ > USE movr;
+ ~~~
+
+
+
+
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ shell
+ $ cockroach sql --insecure --host=localhost:26257
+ ~~~
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ sql
+ > USE movr;
+ ~~~
+
+
diff --git a/src/current/_includes/v25.1/sql/movr-statements-geo-partitioned-replicas.md b/src/current/_includes/v25.1/sql/movr-statements-geo-partitioned-replicas.md
new file mode 100644
index 00000000000..a6a72589436
--- /dev/null
+++ b/src/current/_includes/v25.1/sql/movr-statements-geo-partitioned-replicas.md
@@ -0,0 +1,10 @@
+#### Setup
+
+The following examples use MovR, a fictional vehicle-sharing application, to demonstrate CockroachDB SQL statements. For more information about the MovR example application and dataset, see [MovR: A Global Vehicle-sharing App]({% link {{ page.version.version }}/movr.md %}).
+
+To follow along, run [`cockroach demo`]({% link {{ page.version.version }}/cockroach-demo.md %}) with the `--geo-partitioned-replicas` flag. This command opens an interactive SQL shell to a temporary, 9-node in-memory cluster with the `movr` database.
+
+{% include_cached copy-clipboard.html %}
+~~~ shell
+$ cockroach demo --geo-partitioned-replicas
+~~~
diff --git a/src/current/_includes/v25.1/sql/movr-statements-nodes.md b/src/current/_includes/v25.1/sql/movr-statements-nodes.md
new file mode 100644
index 00000000000..603cf823a13
--- /dev/null
+++ b/src/current/_includes/v25.1/sql/movr-statements-nodes.md
@@ -0,0 +1,10 @@
+### Setup
+
+The following examples use MovR, a fictional vehicle-sharing application, to demonstrate CockroachDB SQL statements. For more information about the MovR example application and dataset, see [MovR: A Global Vehicle-sharing App]({% link {{ page.version.version }}/movr.md %}).
+
+To follow along, run [`cockroach demo`]({% link {{ page.version.version }}/cockroach-demo.md %}) with the [`--nodes`]({% link {{ page.version.version }}/cockroach-demo.md %}#flags) and [`--demo-locality`]({% link {{ page.version.version }}/cockroach-demo.md %}#flags) flags. This command opens an interactive SQL shell to a temporary, multi-node in-memory cluster with the `movr` database preloaded and set as the [current database]({% link {{ page.version.version }}/sql-name-resolution.md %}#current-database).
+
+{% include_cached copy-clipboard.html %}
+~~~ shell
+$ cockroach demo --nodes=6 --demo-locality=region=us-east,zone=us-east-a:region=us-east,zone=us-east-b:region=us-central,zone=us-central-a:region=us-central,zone=us-central-b:region=us-west,zone=us-west-a:region=us-west,zone=us-west-b
+~~~
diff --git a/src/current/_includes/v25.1/sql/movr-statements-partitioning.md b/src/current/_includes/v25.1/sql/movr-statements-partitioning.md
new file mode 100644
index 00000000000..d0e2468269a
--- /dev/null
+++ b/src/current/_includes/v25.1/sql/movr-statements-partitioning.md
@@ -0,0 +1,10 @@
+The following examples use MovR, a fictional vehicle-sharing application, to demonstrate CockroachDB SQL statements. For more information about the MovR example application and dataset, see [MovR: A Global Vehicle-sharing App]({% link {{ page.version.version }}/movr.md %}).
+
+To follow along with the examples below, open a new terminal and run [`cockroach demo`]({% link {{ page.version.version }}/cockroach-demo.md %}) with the [`--nodes`]({% link {{ page.version.version }}/cockroach-demo.md %}#flags) and [`--demo-locality`]({% link {{ page.version.version }}/cockroach-demo.md %}#flags) flags. This command opens an interactive SQL shell to a temporary, multi-node in-memory cluster with the `movr` database preloaded and set as the [current database]({% link {{ page.version.version }}/sql-name-resolution.md %}#current-database).
+
+{% include_cached copy-clipboard.html %}
+~~~ shell
+$ cockroach demo \
+--nodes=9 \
+--demo-locality=region=us-east1:region=us-east1:region=us-east1:region=us-central1:region=us-central1:region=us-central1:region=us-west1:region=us-west1:region=us-west1
+~~~
diff --git a/src/current/_includes/v25.1/sql/movr-statements.md b/src/current/_includes/v25.1/sql/movr-statements.md
new file mode 100644
index 00000000000..457e2b3a38a
--- /dev/null
+++ b/src/current/_includes/v25.1/sql/movr-statements.md
@@ -0,0 +1,8 @@
+#### Setup
+
+To follow along, run [`cockroach demo`]({% link {{ page.version.version }}/cockroach-demo.md %}) to start a temporary, in-memory cluster with the [`movr`]({% link {{ page.version.version }}/movr.md %}) sample dataset preloaded:
+
+{% include_cached copy-clipboard.html %}
+~~~ shell
+$ cockroach demo
+~~~
\ No newline at end of file
diff --git a/src/current/_includes/v25.1/sql/multiregion-example-setup.md b/src/current/_includes/v25.1/sql/multiregion-example-setup.md
new file mode 100644
index 00000000000..9b99539ed4c
--- /dev/null
+++ b/src/current/_includes/v25.1/sql/multiregion-example-setup.md
@@ -0,0 +1,26 @@
+#### Setup
+
+Only a [cluster region]({% link {{ page.version.version }}/multiregion-overview.md %}#cluster-regions) specified [at node startup]({% link {{ page.version.version }}/cockroach-start.md %}#locality) can be used as a [database region]({% link {{ page.version.version }}/multiregion-overview.md %}#database-regions).
+
+To follow along with the examples in this section, start a [demo cluster]({% link {{ page.version.version }}/cockroach-demo.md %}) with the [`--global` flag]({% link {{ page.version.version }}/cockroach-demo.md %}#general) to simulate a multi-region cluster:
+
+{% include_cached copy-clipboard.html %}
+~~~ shell
+$ cockroach demo --global --nodes 9
+~~~
+
+To see the regions available to the databases in the cluster, use a [`SHOW REGIONS FROM CLUSTER`]({% link {{ page.version.version }}/show-regions.md %}#view-the-regions-in-a-cluster) statement:
+
+{% include_cached copy-clipboard.html %}
+~~~ sql
+SHOW REGIONS FROM CLUSTER;
+~~~
+
+~~~
+ region | zones
+---------------+----------
+ europe-west1 | {b,c,d}
+ us-east1 | {b,c,d}
+ us-west1 | {a,b,c}
+(3 rows)
+~~~
\ No newline at end of file
diff --git a/src/current/_includes/v25.1/sql/multiregion-movr-add-regions.md b/src/current/_includes/v25.1/sql/multiregion-movr-add-regions.md
new file mode 100644
index 00000000000..f5cf62f6dd6
--- /dev/null
+++ b/src/current/_includes/v25.1/sql/multiregion-movr-add-regions.md
@@ -0,0 +1,8 @@
+Execute the following statements. They will tell CockroachDB about the database's regions. This information is necessary so that CockroachDB can later move data around to optimize access to particular data from particular regions. For more information about how this works at a high level, see [Database Regions]({% link {{ page.version.version }}/multiregion-overview.md %}#database-regions).
+
+{% include_cached copy-clipboard.html %}
+~~~ sql
+ALTER DATABASE movr PRIMARY REGION "us-east1";
+ALTER DATABASE movr ADD REGION "europe-west1";
+ALTER DATABASE movr ADD REGION "us-west1";
+~~~
diff --git a/src/current/_includes/v25.1/sql/multiregion-movr-global.md b/src/current/_includes/v25.1/sql/multiregion-movr-global.md
new file mode 100644
index 00000000000..e1571108f3a
--- /dev/null
+++ b/src/current/_includes/v25.1/sql/multiregion-movr-global.md
@@ -0,0 +1,17 @@
+Because the data in `promo_codes` is not updated frequently (a.k.a., "read-mostly"), and needs to be available from any region, the right table locality is [`GLOBAL`]({% link {{ page.version.version }}/table-localities.md %}#global-tables).
+
+{% include_cached copy-clipboard.html %}
+~~~ sql
+ALTER TABLE promo_codes SET locality GLOBAL;
+~~~
+
+Next, alter the `user_promo_codes` table to have a foreign key into the global `promo_codes` table. This will enable fast reads of the `promo_codes.code` column from any region in the cluster.
+
+{% include_cached copy-clipboard.html %}
+~~~ sql
+ALTER TABLE user_promo_codes
+ ADD CONSTRAINT user_promo_codes_code_fk
+ FOREIGN KEY (code)
+ REFERENCES promo_codes (code)
+ ON UPDATE CASCADE;
+~~~
diff --git a/src/current/_includes/v25.1/sql/multiregion-movr-regional-by-row.md b/src/current/_includes/v25.1/sql/multiregion-movr-regional-by-row.md
new file mode 100644
index 00000000000..a2c1cfdb5b9
--- /dev/null
+++ b/src/current/_includes/v25.1/sql/multiregion-movr-regional-by-row.md
@@ -0,0 +1,103 @@
+All of the tables except `promo_codes` contain rows which are partitioned by region, and updated very frequently. For these tables, the right table locality for optimizing access to their data is [`REGIONAL BY ROW`]({% link {{ page.version.version }}/table-localities.md %}#regional-by-row-tables).
+
+Apply this table locality to the remaining tables. These statements use a `CASE` statement to put data for a given city in the right region and can take around 1 minute to complete for each table.
+
+- `rides`
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ sql
+ ALTER TABLE rides ADD COLUMN region crdb_internal_region AS (
+ CASE WHEN city = 'amsterdam' THEN 'europe-west1'
+ WHEN city = 'paris' THEN 'europe-west1'
+ WHEN city = 'rome' THEN 'europe-west1'
+ WHEN city = 'new york' THEN 'us-east1'
+ WHEN city = 'boston' THEN 'us-east1'
+ WHEN city = 'washington dc' THEN 'us-east1'
+ WHEN city = 'san francisco' THEN 'us-west1'
+ WHEN city = 'seattle' THEN 'us-west1'
+ WHEN city = 'los angeles' THEN 'us-west1'
+ END
+ ) STORED;
+ ALTER TABLE rides ALTER COLUMN REGION SET NOT NULL;
+ ALTER TABLE rides SET LOCALITY REGIONAL BY ROW AS "region";
+ ~~~
+
+- `user_promo_codes`
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ sql
+ ALTER TABLE user_promo_codes ADD COLUMN region crdb_internal_region AS (
+ CASE WHEN city = 'amsterdam' THEN 'europe-west1'
+ WHEN city = 'paris' THEN 'europe-west1'
+ WHEN city = 'rome' THEN 'europe-west1'
+ WHEN city = 'new york' THEN 'us-east1'
+ WHEN city = 'boston' THEN 'us-east1'
+ WHEN city = 'washington dc' THEN 'us-east1'
+ WHEN city = 'san francisco' THEN 'us-west1'
+ WHEN city = 'seattle' THEN 'us-west1'
+ WHEN city = 'los angeles' THEN 'us-west1'
+ END
+ ) STORED;
+ ALTER TABLE user_promo_codes ALTER COLUMN REGION SET NOT NULL;
+ ALTER TABLE user_promo_codes SET LOCALITY REGIONAL BY ROW AS "region";
+ ~~~
+
+- `users`
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ sql
+ ALTER TABLE users ADD COLUMN region crdb_internal_region AS (
+ CASE WHEN city = 'amsterdam' THEN 'europe-west1'
+ WHEN city = 'paris' THEN 'europe-west1'
+ WHEN city = 'rome' THEN 'europe-west1'
+ WHEN city = 'new york' THEN 'us-east1'
+ WHEN city = 'boston' THEN 'us-east1'
+ WHEN city = 'washington dc' THEN 'us-east1'
+ WHEN city = 'san francisco' THEN 'us-west1'
+ WHEN city = 'seattle' THEN 'us-west1'
+ WHEN city = 'los angeles' THEN 'us-west1'
+ END
+ ) STORED;
+ ALTER TABLE users ALTER COLUMN REGION SET NOT NULL;
+ ALTER TABLE users SET LOCALITY REGIONAL BY ROW AS "region";
+ ~~~
+
+- `vehicle_location_histories`
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ sql
+ ALTER TABLE vehicle_location_histories ADD COLUMN region crdb_internal_region AS (
+ CASE WHEN city = 'amsterdam' THEN 'europe-west1'
+ WHEN city = 'paris' THEN 'europe-west1'
+ WHEN city = 'rome' THEN 'europe-west1'
+ WHEN city = 'new york' THEN 'us-east1'
+ WHEN city = 'boston' THEN 'us-east1'
+ WHEN city = 'washington dc' THEN 'us-east1'
+ WHEN city = 'san francisco' THEN 'us-west1'
+ WHEN city = 'seattle' THEN 'us-west1'
+ WHEN city = 'los angeles' THEN 'us-west1'
+ END
+ ) STORED;
+ ALTER TABLE vehicle_location_histories ALTER COLUMN REGION SET NOT NULL;
+ ALTER TABLE vehicle_location_histories SET LOCALITY REGIONAL BY ROW AS "region";
+ ~~~
+
+- `vehicles`
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ sql
+ ALTER TABLE vehicles ADD COLUMN region crdb_internal_region AS (
+ CASE WHEN city = 'amsterdam' THEN 'europe-west1'
+ WHEN city = 'paris' THEN 'europe-west1'
+ WHEN city = 'rome' THEN 'europe-west1'
+ WHEN city = 'new york' THEN 'us-east1'
+ WHEN city = 'boston' THEN 'us-east1'
+ WHEN city = 'washington dc' THEN 'us-east1'
+ WHEN city = 'san francisco' THEN 'us-west1'
+ WHEN city = 'seattle' THEN 'us-west1'
+ WHEN city = 'los angeles' THEN 'us-west1'
+ END
+ ) STORED;
+ ALTER TABLE vehicles ALTER COLUMN REGION SET NOT NULL;
+ ALTER TABLE vehicles SET LOCALITY REGIONAL BY ROW AS "region";
+ ~~~
diff --git a/src/current/_includes/v25.1/sql/no-full-scan.md b/src/current/_includes/v25.1/sql/no-full-scan.md
new file mode 100644
index 00000000000..304c2ab9697
--- /dev/null
+++ b/src/current/_includes/v25.1/sql/no-full-scan.md
@@ -0,0 +1,15 @@
+- To prevent the optimizer from planning a full scan for a specific table, specify the `NO_FULL_SCAN` index hint. For example:
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ sql
+ SELECT * FROM table_name@{NO_FULL_SCAN};
+ ~~~
+
+- To prevent a full scan of a [partial index]({% link {{ page.version.version }}/partial-indexes.md %}) for a specific table, you must specify `NO_FULL_SCAN` in combination with the index name using [`FORCE_INDEX`]({% link {{ page.version.version }}/table-expressions.md %}#force-index-selection). For example:
+
+ {% include_cached copy-clipboard.html %}
+ ~~~ sql
+ SELECT * FROM table_name@{FORCE_INDEX=index_name,NO_FULL_SCAN} WHERE b > 0;
+ ~~~
+
+ This forces a constrained scan of the partial index. If a constrained scan of the partial index is not possible, an error is returned.
\ No newline at end of file
diff --git a/src/current/_includes/v25.1/sql/partially-visible-indexes.md b/src/current/_includes/v25.1/sql/partially-visible-indexes.md
new file mode 100644
index 00000000000..83f251c8374
--- /dev/null
+++ b/src/current/_includes/v25.1/sql/partially-visible-indexes.md
@@ -0,0 +1 @@
+For the purposes of [index recommendations]({% link {{ page.version.version }}/explain.md %}#success-responses), partially visible indexes are treated as [not visible]({% link {{ page.version.version }}/alter-index.md %}#not-visible). If a partially visible index can be used to improve a query plan, the {% if page.name != "cost-based-optimizer.md" %}[optimizer]({% link {{ page.version.version }}/cost-based-optimizer.md %}){% else %}optimizer{% endif %} will recommend making it fully visible. For an example, refer to [Set an index as partially visible]({% link {{ page.version.version }}/alter-index.md %}#set-an-index-as-partially-visible).
\ No newline at end of file
diff --git a/src/current/_includes/v25.1/sql/physical-plan-url.md b/src/current/_includes/v25.1/sql/physical-plan-url.md
new file mode 100644
index 00000000000..7ad2957a996
--- /dev/null
+++ b/src/current/_includes/v25.1/sql/physical-plan-url.md
@@ -0,0 +1 @@
+The generated physical statement plan is encoded into a byte string after the [fragment identifier (`#`)](https://wikipedia.org/wiki/Fragment_identifier) in the generated URL. The fragment is not sent to the web server; instead, the browser waits for the web server to return a `decode.html` resource, and then JavaScript on the web page decodes the fragment into a physical statement plan diagram. The statement plan is, therefore, not logged by a server external to the CockroachDB cluster and not exposed to the public internet.
diff --git a/src/current/_includes/v25.1/sql/preloaded-databases.md b/src/current/_includes/v25.1/sql/preloaded-databases.md
new file mode 100644
index 00000000000..f3418633c98
--- /dev/null
+++ b/src/current/_includes/v25.1/sql/preloaded-databases.md
@@ -0,0 +1,12 @@
+New clusters and existing clusters [upgraded]({% link {{ page.version.version }}/upgrade-cockroach-version.md %}) to {{ page.version.version }} or later will include auto-generated databases, with the following purposes:
+
+- The empty `defaultdb` database is used if a client does not specify a database in the [connection parameters]({% link {{ page.version.version }}/connection-parameters.md %}).
+- The `movr` database contains data about users, vehicles, and rides for the vehicle-sharing app [MovR]({% link {{ page.version.version }}/movr.md %}) (only when the cluster is started using the [`demo` command]({% link {{ page.version.version }}/cockroach-demo.md %})).
+- The empty `postgres` database is provided for compatibility with PostgreSQL client applications that require it.
+- The `system` database contains CockroachDB metadata and is read-only.
+
+All databases except for the `system` database can be [deleted]({% link {{ page.version.version }}/drop-database.md %}) if they are not needed.
+
+{{site.data.alerts.callout_danger}}
+Do not query the `system` database directly. Instead, use objects within the [system catalogs]({% link {{ page.version.version }}/system-catalogs.md %}).
+{{site.data.alerts.end}}
diff --git a/src/current/_includes/v25.1/sql/privileges.md b/src/current/_includes/v25.1/sql/privileges.md
new file mode 100644
index 00000000000..428e58091cb
--- /dev/null
+++ b/src/current/_includes/v25.1/sql/privileges.md
@@ -0,0 +1,34 @@
+Privilege | Levels | Description
+----------|--------|------------
+`ALL` | System, Database, Schema, Table, Sequence, Type | For the object to which `ALL` is applied, grants all privileges at the system, database, schema, table, sequence, or type level.
+`BACKUP` | System, Database, Table | Grants the ability to create [backups]({% link {{ page.version.version }}/backup-and-restore-overview.md %}) at the system, database, or table level.
+`CANCELQUERY` | System | Grants the ability to cancel queries.
+`CHANGEFEED` | Table | Grants the ability to create [changefeeds]({% link {{ page.version.version }}/change-data-capture-overview.md %}) on a table.
+`CONNECT` | Database | Grants the ability to view a database's metadata, which consists of objects in a database's `information_schema` and `pg_catalog` system catalogs. This allows the role to view the database's table, schemas, user-defined types, and list the database when running `SHOW DATABASES`. The `CONNECT` privilege is also required to run backups of the database.
+`CONTROLJOB` | System | Grants the ability to [pause]({% link {{ page.version.version }}/pause-job.md %}), [resume]({% link {{ page.version.version }}/resume-job.md %}), and [cancel]({% link {{ page.version.version }}/cancel-job.md %}) jobs. Non-admin roles cannot control jobs created by admin roles.
+`CREATE` | Database, Schema, Table, Sequence | Grants the ability to create objects at the database, schema, table, or sequence level. When applied at the database level, grants the ability to configure [multi-region zone configs]({% link {{ page.version.version }}/zone-config-extensions.md %}). In CockroachDB v23.2 and later, the [cluster setting]({% link {{ page.version.version }}/cluster-settings.md %}) `sql.auth.public_schema_create_privilege.enabled` controls whether users receive `CREATE` privileges on the public schema or not. The setting applies at the time that the [public schema is created]({% link {{ page.version.version }}/create-schema.md %}), which happens whenever [a database is created]({% link {{ page.version.version }}/create-database.md %}). The setting is `true` by default, but can be set to `false` for increased compatibility with [PostgreSQL version 15](https://www.postgresql.org/about/news/postgresql-15-released-2526/) as described in [this commit](https://git.postgresql.org/gitweb/?p=postgresql.git;a=commitdiff;h=b073c3ccd06e4cb845e121387a43faa8c68a7b62).
+`CREATEDB` | System | Grants the ability to [create]({% link {{ page.version.version }}/create-database.md %}) or [rename]({% link {{ page.version.version }}/alter-database.md %}#rename-to) a database.
+`CREATELOGIN` | System | Grants the ability to manage authentication using the `WITH PASSWORD`, `VALID UNTIL`, and `LOGIN`/`NOLOGIN` role options.
+`CREATEROLE` | System | Grants the ability to [create]({% link {{ page.version.version }}/create-role.md %}), modify, or [delete]({% link {{ page.version.version }}/drop-role.md %}) non-admin roles.
+`DELETE` | Table, Sequence | Grants the ability to delete objects at the table or sequence level.
+`DROP` | Database, Table, Sequence | Grants the ability to drop objects at the database, table, or sequence level.
+`EXECUTE` | Function | Grants the ability to execute [functions]({% link {{ page.version.version }}/functions-and-operators.md %}).
+`EXTERNALCONNECTION` | System | Grants the ability to connect to external systems such as object stores, key management systems, Kafka feeds, or external file systems. Often used in conjunction with the `BACKUP`, `RESTORE`, and `CHANGEFEED` privilege.
+`EXTERNALIOIMPLICITACCESS` | System | Grants the ability to interact with external resources that require implicit access.
+`INSERT` | Table, Sequence | Grants the ability to insert objects at the table or sequence level.
+`MODIFYCLUSTERSETTING` | System | Grants the ability to modify [cluster settings]({% link {{ page.version.version }}/cluster-settings.md %}).
+`MODIFYSQLCLUSTERSETTING` | System | Grants the ability to modify SQL [cluster settings]({% link {{ page.version.version }}/cluster-settings.md %}) (cluster settings prefixed with `sql.`).
+`NOSQLLOGIN` | System | Prevents roles from connecting to the SQL interface of a cluster.
+`REPLICATION` | System | Grants the ability to create a [logical data replication]({% link {{ page.version.version }}/logical-data-replication-overview.md %}) or [physical cluster replication]({% link {{ page.version.version }}/physical-cluster-replication-overview.md %}) stream.
+`RESTORE` | System, Database | Grants the ability to restore [backups]({% link {{ page.version.version }}/backup-and-restore-overview.md %}) at the system or database level. Refer to `RESTORE` [Required privileges]({% link {{ page.version.version }}/restore.md %}#required-privileges) for more details.
+`SELECT` | Table, Sequence | Grants the ability to run [selection queries]({% link {{ page.version.version }}/query-data.md %}) at the table or sequence level.
+`UPDATE` | Table, Sequence | Grants the ability to run [update statements]({% link {{ page.version.version }}/update-data.md %}) at the table or sequence level.
+`USAGE` | Schema, Sequence, Type | Grants the ability to use [schemas]({% link {{ page.version.version }}/schema-design-overview.md %}), [sequences]({% link {{ page.version.version }}/create-sequence.md %}), or [user-defined types]({% link {{ page.version.version }}/create-type.md %}).
+`VIEWACTIVITY` | System | Grants the ability to view other user's activity statistics of a cluster.
+`VIEWACTIVITYREDACTED` | System | Grants the ability to view other user's activity statistics, but prevents the role from accessing the statement diagnostics bundle in the DB Console, and viewing some columns in introspection queries that contain data about the cluster.
+`VIEWCLUSTERMETADATA` | System | Grants the ability to view range information, data distribution, store information, and Raft information.
+`VIEWCLUSTERSETTING` | System | Grants the ability to view [cluster settings]({% link {{ page.version.version }}/cluster-settings.md %}) and their values.
+`VIEWDEBUG` | System | Grants the ability to view the [Advanced Debug Page]({% link {{ page.version.version }}/ui-debug-pages.md %}) of the DB Console and work with the debugging and profiling endpoints.
+`VIEWJOB` | System | Grants the ability to view [jobs]({% link {{ page.version.version }}/show-jobs.md %}) on the cluster.
+`VIEWSYSTEMTABLE` | System | Grants read-only access (`SELECT`) on all tables in the `system` database, without granting the ability to modify the cluster. This privilege was introduced in v23.1.11.
+`ZONECONFIG` | Database, Table, Sequence | Grants the ability to configure [replication zones]({% link {{ page.version.version }}/configure-replication-zones.md %}) at the database, table, and sequence level.
diff --git a/src/current/_includes/v25.1/sql/querying-partitions.md b/src/current/_includes/v25.1/sql/querying-partitions.md
new file mode 100644
index 00000000000..4491428eada
--- /dev/null
+++ b/src/current/_includes/v25.1/sql/querying-partitions.md
@@ -0,0 +1,163 @@
+## Query partitions
+
+Similar to [indexes]({% link {{ page.version.version }}/indexes.md %}), partitions can improve query performance by limiting the numbers of rows that a query must scan. In the case of [geo-partitioned data]({% link {{ page.version.version }}/regional-tables.md %}), partitioning can limit a query scan to data in a specific region.
+
+### Filter on an indexed column
+
+If you filter the query of a partitioned table on a [column in the index directly following the partition prefix]({% link {{ page.version.version }}/indexes.md %}), the [cost-based optimizer]({% link {{ page.version.version }}/cost-based-optimizer.md %}) creates a query plan that scans each partition in parallel, rather than performing a costly sequential scan of the entire table.
+
+For example, suppose that the tables in the [`movr`]({% link {{ page.version.version }}/movr.md %}) database are geo-partitioned by region, and you want to query the `users` table for information about a specific user.
+
+Here is the `CREATE TABLE` statement for the `users` table:
+
+{% include_cached copy-clipboard.html %}
+~~~ sql
+> SHOW CREATE TABLE users;
+~~~
+
+~~~
+ table_name | create_statement
++------------+-------------------------------------------------------------------------------------+
+ users | CREATE TABLE users (
+ | id UUID NOT NULL,
+ | city VARCHAR NOT NULL,
+ | name VARCHAR NULL,
+ | address VARCHAR NULL,
+ | credit_card VARCHAR NULL,
+ | CONSTRAINT "primary" PRIMARY KEY (city ASC, id ASC),
+ | FAMILY "primary" (id, city, name, address, credit_card)
+ | ) PARTITION BY LIST (city) (
+ | PARTITION us_west VALUES IN (('seattle'), ('san francisco'), ('los angeles')),
+ | PARTITION us_east VALUES IN (('new york'), ('boston'), ('washington dc')),
+ | PARTITION europe_west VALUES IN (('amsterdam'), ('paris'), ('rome'))
+ | );
+ | ALTER PARTITION europe_west OF INDEX movr.public.users@primary CONFIGURE ZONE USING
+ | constraints = '[+region=europe-west1]';
+ | ALTER PARTITION us_east OF INDEX movr.public.users@primary CONFIGURE ZONE USING
+ | constraints = '[+region=us-east1]';
+ | ALTER PARTITION us_west OF INDEX movr.public.users@primary CONFIGURE ZONE USING
+ | constraints = '[+region=us-west1]'
+(1 row)
+~~~
+
+If you know the user's id, you can filter on the `id` column:
+
+{% include_cached copy-clipboard.html %}
+~~~ sql
+> SELECT * FROM users WHERE id='00000000-0000-4000-8000-000000000000';
+~~~
+
+~~~
+ id | city | name | address | credit_card
++--------------------------------------+----------+---------------+----------------------+-------------+
+ 00000000-0000-4000-8000-000000000000 | new york | Robert Murphy | 99176 Anderson Mills | 8885705228
+(1 row)
+~~~
+
+An [`EXPLAIN`]({% link {{ page.version.version }}/explain.md %}) statement shows more detail about the cost-based optimizer's plan:
+
+{% include_cached copy-clipboard.html %}
+~~~ sql
+> EXPLAIN SELECT * FROM users WHERE id='00000000-0000-4000-8000-000000000000';
+~~~
+
+~~~
+ tree | field | description
++------+-------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+ | distributed | true
+ | vectorized | false
+ scan | |
+ | table | users@primary
+ | spans | -/"amsterdam" /"amsterdam"/"\x00\x00\x00\x00\x00\x00@\x00\x80\x00\x00\x00\x00\x00\x00\x00"-/"amsterdam"/"\x00\x00\x00\x00\x00\x00@\x00\x80\x00\x00\x00\x00\x00\x00\x00"/# /"amsterdam\x00"/"\x00\x00\x00\x00\x00\x00@\x00\x80\x00\x00\x00\x00\x00\x00\x00"-/"boston" /"boston"/"\x00\x00\x00\x00\x00\x00@\x00\x80\x00\x00\x00\x00\x00\x00\x00"-/"boston"/"\x00\x00\x00\x00\x00\x00@\x00\x80\x00\x00\x00\x00\x00\x00\x00"/# /"boston\x00"/"\x00\x00\x00\x00\x00\x00@\x00\x80\x00\x00\x00\x00\x00\x00\x00"-/"los angeles" /"los angeles"/"\x00\x00\x00\x00\x00\x00@\x00\x80\x00\x00\x00\x00\x00\x00\x00"-/"los angeles"/"\x00\x00\x00\x00\x00\x00@\x00\x80\x00\x00\x00\x00\x00\x00\x00"/# /"los angeles\x00"/"\x00\x00\x00\x00\x00\x00@\x00\x80\x00\x00\x00\x00\x00\x00\x00"-/"new york" /"new york"/"\x00\x00\x00\x00\x00\x00@\x00\x80\x00\x00\x00\x00\x00\x00\x00"-/"new york"/"\x00\x00\x00\x00\x00\x00@\x00\x80\x00\x00\x00\x00\x00\x00\x00"/# /"new york\x00"/"\x00\x00\x00\x00\x00\x00@\x00\x80\x00\x00\x00\x00\x00\x00\x00"-/"paris" /"paris"/"\x00\x00\x00\x00\x00\x00@\x00\x80\x00\x00\x00\x00\x00\x00\x00"-/"paris"/"\x00\x00\x00\x00\x00\x00@\x00\x80\x00\x00\x00\x00\x00\x00\x00"/# /"paris\x00"/"\x00\x00\x00\x00\x00\x00@\x00\x80\x00\x00\x00\x00\x00\x00\x00"-/"rome" /"rome"/"\x00\x00\x00\x00\x00\x00@\x00\x80\x00\x00\x00\x00\x00\x00\x00"-/"rome"/"\x00\x00\x00\x00\x00\x00@\x00\x80\x00\x00\x00\x00\x00\x00\x00"/# /"rome\x00"/"\x00\x00\x00\x00\x00\x00@\x00\x80\x00\x00\x00\x00\x00\x00\x00"-/"san francisco" /"san francisco"/"\x00\x00\x00\x00\x00\x00@\x00\x80\x00\x00\x00\x00\x00\x00\x00"-/"san francisco"/"\x00\x00\x00\x00\x00\x00@\x00\x80\x00\x00\x00\x00\x00\x00\x00"/# /"san francisco\x00"/"\x00\x00\x00\x00\x00\x00@\x00\x80\x00\x00\x00\x00\x00\x00\x00"-/"seattle" /"seattle"/"\x00\x00\x00\x00\x00\x00@\x00\x80\x00\x00\x00\x00\x00\x00\x00"-/"seattle"/"\x00\x00\x00\x00\x00\x00@\x00\x80\x00\x00\x00\x00\x00\x00\x00"/# /"seattle\x00"/"\x00\x00\x00\x00\x00\x00@\x00\x80\x00\x00\x00\x00\x00\x00\x00"-/"washington dc" /"washington dc"/"\x00\x00\x00\x00\x00\x00@\x00\x80\x00\x00\x00\x00\x00\x00\x00"-/"washington dc"/"\x00\x00\x00\x00\x00\x00@\x00\x80\x00\x00\x00\x00\x00\x00\x00"/# /"washington dc\x00"/"\x00\x00\x00\x00\x00\x00@\x00\x80\x00\x00\x00\x00\x00\x00\x00"-
+ | filter | id = '00000000-0000-4000-8000-000000000000'
+(6 rows)
+~~~
+
+Because the `id` column is in the primary index, directly after the partition prefix (`city`), the optimal query is constrained by the partitioned values. This means the query scans each partition in parallel for the unique `id` value.
+
+If you know the set of all possible partitioned values, adding a check constraint to the table's create statement can also improve performance. For example:
+
+{% include_cached copy-clipboard.html %}
+~~~ sql
+> ALTER TABLE users ADD CONSTRAINT check_city CHECK (city IN ('amsterdam','boston','los angeles','new york','paris','rome','san francisco','seattle','washington dc'));
+~~~
+
+{% include_cached copy-clipboard.html %}
+~~~ sql
+> EXPLAIN SELECT * FROM users WHERE id='00000000-0000-4000-8000-000000000000';
+~~~
+
+~~~
+ tree | field | description
++------+-------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+ | distributed | false
+ | vectorized | false
+ scan | |
+ | table | users@primary
+ | spans | /"amsterdam"/"\x00\x00\x00\x00\x00\x00@\x00\x80\x00\x00\x00\x00\x00\x00\x00"-/"amsterdam"/"\x00\x00\x00\x00\x00\x00@\x00\x80\x00\x00\x00\x00\x00\x00\x00"/# /"boston"/"\x00\x00\x00\x00\x00\x00@\x00\x80\x00\x00\x00\x00\x00\x00\x00"-/"boston"/"\x00\x00\x00\x00\x00\x00@\x00\x80\x00\x00\x00\x00\x00\x00\x00"/# /"los angeles"/"\x00\x00\x00\x00\x00\x00@\x00\x80\x00\x00\x00\x00\x00\x00\x00"-/"los angeles"/"\x00\x00\x00\x00\x00\x00@\x00\x80\x00\x00\x00\x00\x00\x00\x00"/# /"new york"/"\x00\x00\x00\x00\x00\x00@\x00\x80\x00\x00\x00\x00\x00\x00\x00"-/"new york"/"\x00\x00\x00\x00\x00\x00@\x00\x80\x00\x00\x00\x00\x00\x00\x00"/# /"paris"/"\x00\x00\x00\x00\x00\x00@\x00\x80\x00\x00\x00\x00\x00\x00\x00"-/"paris"/"\x00\x00\x00\x00\x00\x00@\x00\x80\x00\x00\x00\x00\x00\x00\x00"/# /"rome"/"\x00\x00\x00\x00\x00\x00@\x00\x80\x00\x00\x00\x00\x00\x00\x00"-/"rome"/"\x00\x00\x00\x00\x00\x00@\x00\x80\x00\x00\x00\x00\x00\x00\x00"/# /"san francisco"/"\x00\x00\x00\x00\x00\x00@\x00\x80\x00\x00\x00\x00\x00\x00\x00"-/"san francisco"/"\x00\x00\x00\x00\x00\x00@\x00\x80\x00\x00\x00\x00\x00\x00\x00"/# /"seattle"/"\x00\x00\x00\x00\x00\x00@\x00\x80\x00\x00\x00\x00\x00\x00\x00"-/"seattle"/"\x00\x00\x00\x00\x00\x00@\x00\x80\x00\x00\x00\x00\x00\x00\x00"/# /"washington dc"/"\x00\x00\x00\x00\x00\x00@\x00\x80\x00\x00\x00\x00\x00\x00\x00"-/"washington dc"/"\x00\x00\x00\x00\x00\x00@\x00\x80\x00\x00\x00\x00\x00\x00\x00"/#
+ | parallel |
+(6 rows)
+~~~
+
+
+To see the performance improvement over a query that performs a full table scan, compare these queries to a query with a filter on a column that is not in the index.
+
+### Filter on a non-indexed column
+
+Suppose that you want to query the `users` table for information about a specific user, but you only know the user's name.
+
+{% include_cached copy-clipboard.html %}
+~~~ sql
+> SELECT * FROM users WHERE name='Robert Murphy';
+~~~
+
+~~~
+ id | city | name | address | credit_card
++--------------------------------------+----------+---------------+----------------------+-------------+
+ 00000000-0000-4000-8000-000000000000 | new york | Robert Murphy | 99176 Anderson Mills | 8885705228
+(1 row)
+~~~
+
+{% include_cached copy-clipboard.html %}
+~~~ sql
+> EXPLAIN SELECT * FROM users WHERE name='Robert Murphy';
+~~~
+
+~~~
+ tree | field | description
++------+-------------+------------------------+
+ | distributed | true
+ | vectorized | false
+ scan | |
+ | table | users@primary
+ | spans | ALL
+ | filter | name = 'Robert Murphy'
+(6 rows)
+~~~
+
+The query returns the same result, but because `name` is not an indexed column, the query performs a full table scan that spans across all partition values.
+
+### Filter on a partitioned column
+
+If you know which partition contains the data that you are querying, using a filter (e.g., a [`WHERE` clause]({% link {{ page.version.version }}/select-clause.md %}#filter-rows)) on the column that is used for the partition can further improve performance by limiting the scan to the specific partition(s) that contain the data that you are querying.
+
+Now suppose that you know the user's name and location. You can query the table with a filter on the user's name and city:
+
+{% include_cached copy-clipboard.html %}
+~~~ sql
+> EXPLAIN SELECT * FROM users WHERE name='Robert Murphy' AND city='new york';
+~~~
+
+~~~
+ tree | field | description
++------+-------------+-----------------------------------+
+ | distributed | true
+ | vectorized | false
+ scan | |
+ | table | users@primary
+ | spans | /"new york"-/"new york"/PrefixEnd
+ | filter | name = 'Robert Murphy'
+(6 rows)
+~~~
+
+The table returns the same results as before, but at a much lower cost, as the query scan now spans just the `new york` partition value.
diff --git a/src/current/_includes/v25.1/sql/range-splits.md b/src/current/_includes/v25.1/sql/range-splits.md
new file mode 100644
index 00000000000..a612774afc0
--- /dev/null
+++ b/src/current/_includes/v25.1/sql/range-splits.md
@@ -0,0 +1,7 @@
+CockroachDB breaks data into ranges. By default, CockroachDB attempts to keep ranges below [the default range size]({% link {{ page.version.version }}/configure-replication-zones.md %}#range-max-bytes). To do this, the system will automatically [split a range]({% link {{ page.version.version }}/architecture/distribution-layer.md %}#range-splits) if it grows larger than this limit. For most use cases, this automatic range splitting is sufficient, and you should never need to worry about when or where the system decides to split ranges.
+
+However, there are reasons why you may want to perform manual splits on the ranges that store tables or indexes:
+
+- When a table only consists of a single range, all writes and reads to the table will be served by that range's [leaseholder]({% link {{ page.version.version }}/architecture/replication-layer.md %}#leases). If a table only holds a small amount of data but is serving a large amount of traffic, load distribution can become unbalanced and a [hot spot]({% link {{ page.version.version }}/performance-best-practices-overview.md %}#hot-spots) can occur. Splitting the table's ranges manually can allow the load on the table to be more evenly distributed across multiple nodes. For tables consisting of more than a few ranges, load will naturally be distributed across multiple nodes and this will not be a concern.
+
+- When a table is created, it will only consist of a single range. If you know that a new table will immediately receive significant write traffic, you may want to preemptively split the table based on the expected distribution of writes before applying the load. This can help avoid reduced workload performance that results when automatic splits are unable to keep up with write traffic and a [hot spot]({% link {{ page.version.version }}/performance-best-practices-overview.md %}#hot-spots) occurs.
diff --git a/src/current/_includes/v25.1/sql/regional-by-row-table-description.md b/src/current/_includes/v25.1/sql/regional-by-row-table-description.md
new file mode 100644
index 00000000000..ac788d7b946
--- /dev/null
+++ b/src/current/_includes/v25.1/sql/regional-by-row-table-description.md
@@ -0,0 +1,15 @@
+In a _regional by row_ table, individual rows are optimized for access from different home regions. Each row's home region is specified in a hidden [`crdb_region` column]({% link {{ page.version.version }}/alter-table.md %}#crdb_region), and is by default the region of the [gateway node]({% link {{ page.version.version }}/architecture/life-of-a-distributed-transaction.md %}#gateway) from which the row is inserted. The `REGIONAL BY ROW` setting automatically divides a table and all of [its indexes]({% link {{ page.version.version }}/table-localities.md %}#indexes-on-regional-by-row-tables) into [partitions]({% link {{ page.version.version }}/partitioning.md %}) that use `crdb_region` as the prefix.
+
+Use regional by row tables when your application requires low-latency reads and writes at a row level where individual rows are primarily accessed from a single region. For an example of a table in a multi-region cluster that can benefit from the `REGIONAL BY ROW` setting, see the `users` table from the [MovR application]({% link {{ page.version.version }}/movr.md %}), which could store users' data in specific regions for better performance.
+
+To take advantage of regional by row tables:
+
+- Use unique key lookups or queries with [`LIMIT`]({% link {{ page.version.version }}/limit-offset.md %}) clauses to enable [locality optimized searches]({% link {{ page.version.version }}/cost-based-optimizer.md %}#locality-optimized-search-in-multi-region-clusters) that prioritize rows in the gateway node's region. If there is a possibility that the results of the query all live in local rows, CockroachDB will first search for rows in the gateway node's region. The search only continues in remote regions if rows in the local region did not satisfy the query.
+
+- Use [foreign keys]({% link {{ page.version.version }}/foreign-key.md %}#rules-for-creating-foreign-keys) that reference the [`crdb_region` column]({% link {{ page.version.version }}/alter-table.md %}#crdb_region) in [`REGIONAL BY ROW`]({% link {{ page.version.version }}/table-localities.md %}#regional-by-row-tables) tables, unless [auto-rehoming is enabled]({% link {{ page.version.version }}/alter-table.md %}#turn-on-auto-rehoming-for-regional-by-row-tables) for those tables.
+
+- [Turn on auto-rehoming for regional by row tables]({% link {{ page.version.version }}/alter-table.md %}#turn-on-auto-rehoming-for-regional-by-row-tables). A row's home region will be automatically set to the gateway region of any [`UPDATE`]({% link {{ page.version.version }}/update.md %}) or [`UPSERT`]({% link {{ page.version.version }}/upsert.md %}) statements that write to those rows.
+
+For instructions showing how to set a table's locality to `REGIONAL BY ROW` and configure the home regions of its rows, see [`ALTER TABLE ... SET LOCALITY`]({% link {{ page.version.version }}/alter-table.md %}#crdb_region).
+
+For more information on regional by row tables, see the [Cockroach Labs blog post](https://www.cockroachlabs.com/blog/regional-by-row/).
\ No newline at end of file
diff --git a/src/current/_includes/v25.1/sql/regional-table-description.md b/src/current/_includes/v25.1/sql/regional-table-description.md
new file mode 100644
index 00000000000..e4d60da08db
--- /dev/null
+++ b/src/current/_includes/v25.1/sql/regional-table-description.md
@@ -0,0 +1,5 @@
+In a _regional_ table, access to the table will be fast in the table's home region and slower in other regions. In other words, CockroachDB optimizes access to data in a regional table from a single region. By default, a regional table's home region is the [database's primary region]({% link {{ page.version.version }}/multiregion-overview.md %}#database-regions), but that can be changed to use any region in the database. Regional tables work well when your application requires low-latency reads and writes for an entire table from a single region.
+
+For instructions showing how to set a table's locality to `REGIONAL BY TABLE` and configure its home region, see [`ALTER TABLE ... SET LOCALITY`]({% link {{ page.version.version }}/alter-table.md %}#regional-by-table).
+
+By default, all tables in a multi-region database are _regional_ tables that use the database's primary region. Unless you know your application needs different performance characteristics than regional tables provide, there is no need to change this setting.
diff --git a/src/current/_includes/v25.1/sql/rename-index.md b/src/current/_includes/v25.1/sql/rename-index.md
new file mode 100644
index 00000000000..b92cec05255
--- /dev/null
+++ b/src/current/_includes/v25.1/sql/rename-index.md
@@ -0,0 +1,49 @@
+### Rename an index
+
+{% include_cached copy-clipboard.html %}
+~~~ sql
+> CREATE INDEX on users(name);
+~~~
+
+{% include_cached copy-clipboard.html %}
+~~~ sql
+> SHOW INDEXES FROM users;
+~~~
+
+~~~
+ table_name | index_name | non_unique | seq_in_index | column_name | direction | storing | implicit | visible
+-------------+------------+------------+--------------+-------------+-----------+---------+----------+----------
+ users | name_idx | t | 1 | name | DESC | f | f | t
+ users | name_idx | t | 2 | city | ASC | f | t | t
+ users | name_idx | t | 3 | id | ASC | f | t | t
+ users | users_pkey | f | 1 | city | ASC | f | f | t
+ users | users_pkey | f | 2 | id | ASC | f | f | t
+ users | users_pkey | f | 3 | name | N/A | t | f | t
+ users | users_pkey | f | 4 | address | N/A | t | f | t
+ users | users_pkey | f | 5 | credit_card | N/A | t | f | t
+(8 rows)
+~~~
+
+{% include_cached copy-clipboard.html %}
+~~~ sql
+> ALTER INDEX users@name_idx RENAME TO users_name_idx;
+~~~
+
+{% include_cached copy-clipboard.html %}
+~~~ sql
+> SHOW INDEXES FROM users;
+~~~
+
+~~~
+ table_name | index_name | non_unique | seq_in_index | column_name | direction | storing | implicit | visible
+-------------+----------------+------------+--------------+-------------+-----------+---------+----------+----------
+ users | users_name_idx | t | 1 | name | DESC | f | f | t
+ users | users_name_idx | t | 2 | city | ASC | f | t | t
+ users | users_name_idx | t | 3 | id | ASC | f | t | t
+ users | users_pkey | f | 1 | city | ASC | f | f | t
+ users | users_pkey | f | 2 | id | ASC | f | f | t
+ users | users_pkey | f | 3 | name | N/A | t | f | t
+ users | users_pkey | f | 4 | address | N/A | t | f | t
+ users | users_pkey | f | 5 | credit_card | N/A | t | f | t
+(8 rows)
+~~~
diff --git a/src/current/_includes/v25.1/sql/replication-zone-patterns-to-multiregion-sql-mapping.md b/src/current/_includes/v25.1/sql/replication-zone-patterns-to-multiregion-sql-mapping.md
new file mode 100644
index 00000000000..6fdea66fc89
--- /dev/null
+++ b/src/current/_includes/v25.1/sql/replication-zone-patterns-to-multiregion-sql-mapping.md
@@ -0,0 +1,5 @@
+| Replication Zone Pattern | Multi-Region SQL |
+|--------------------------------------------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------|
+| [Duplicate indexes]({% link v20.2/topology-duplicate-indexes.md %}) | [`GLOBAL` tables]({% link {{ page.version.version }}/global-tables.md %}) |
+| [Geo-partitioned replicas]({% link v20.2/topology-geo-partitioned-replicas.md %}) | [`REGIONAL BY ROW` tables]({% link {{ page.version.version }}/regional-tables.md %}#regional-by-row-tables) with [`ZONE` survival goals](multiregion-survival-goals.html#survive-zone-failures) |
+| [Geo-partitioned leaseholders]({% link v20.2/topology-geo-partitioned-leaseholders.md %}) | [`REGIONAL BY ROW` tables]({% link {{ page.version.version }}/regional-tables.md %}#regional-by-row-tables) with [`REGION` survival goals](multiregion-survival-goals.html#survive-region-failures) |
diff --git a/src/current/_includes/v25.1/sql/retry-savepoints.md b/src/current/_includes/v25.1/sql/retry-savepoints.md
new file mode 100644
index 00000000000..bc3454195c0
--- /dev/null
+++ b/src/current/_includes/v25.1/sql/retry-savepoints.md
@@ -0,0 +1 @@
+A savepoint defined with the name `cockroach_restart` is a "retry savepoint" and is used to implement [advanced client-side transaction retries]({% link {{ page.version.version }}/advanced-client-side-transaction-retries.md %}). For more information, see [Retry savepoints]({% link {{ page.version.version }}/advanced-client-side-transaction-retries.md %}#retry-savepoints).
diff --git a/src/current/_includes/v25.1/sql/role-options.md b/src/current/_includes/v25.1/sql/role-options.md
new file mode 100644
index 00000000000..44288bff11f
--- /dev/null
+++ b/src/current/_includes/v25.1/sql/role-options.md
@@ -0,0 +1,16 @@
+Role option | Description
+------------|-------------
+`CANCELQUERY`/`NOCANCELQUERY` | **Deprecated in v22.2: Use the `CANCELQUERY` [system privilege]({% link {{ page.version.version }}/security-reference/authorization.md %}#supported-privileges).** Allow or disallow a role to cancel [queries]({% link {{ page.version.version }}/cancel-query.md %}) and [sessions]({% link {{ page.version.version }}/cancel-session.md %}) of other roles. Without this role option, roles can only cancel their own queries and sessions. Even with the `CANCELQUERY` role option, non-`admin` roles cannot cancel `admin` queries or sessions. This option should usually be combined with `VIEWACTIVITY` so that the role can view other roles' query and session information.
By default, the role option is set to `NOCANCELQUERY` for all non-`admin` roles.
+`CONTROLCHANGEFEED`/`NOCONTROLCHANGEFEED` | **Deprecated in v23.1: Use the `CHANGEFEED` [privilege]({% link {{ page.version.version }}/security-reference/authorization.md %}#supported-privileges).** Allow or disallow a role to run [`CREATE CHANGEFEED`]({% link {{ page.version.version }}/create-changefeed.md %}) on tables they have `SELECT` privileges on.
By default, the role option is set to `NOCONTROLCHANGEFEED` for all non-`admin` roles.
+`CONTROLJOB`/`NOCONTROLJOB` | Allow or disallow a role to [pause]({% link {{ page.version.version }}/pause-job.md %}), [resume]({% link {{ page.version.version }}/resume-job.md %}), and [cancel]({% link {{ page.version.version }}/cancel-job.md %}) jobs. Non-`admin` roles cannot control jobs created by `admin` roles.
By default, the role option is set to `NOCONTROLJOB` for all non-`admin` roles.
+`CREATEDB`/`NOCREATEDB` | Allow or disallow a role to [create]({% link {{ page.version.version }}/create-database.md %}) or [rename]({% link {{ page.version.version }}/alter-database.md %}#rename-to) a database. The role is assigned as the owner of the database.
By default, the role option is set to `NOCREATEDB` for all non-`admin` roles.
+`CREATELOGIN`/`NOCREATELOGIN` | Allow or disallow a role to manage authentication using the `WITH PASSWORD`, `VALID UNTIL`, and `LOGIN/NOLOGIN` role options.
By default, the role option is set to `NOCREATELOGIN` for all non-`admin` roles.
+`CREATEROLE`/`NOCREATEROLE` | Allow or disallow the new role to [create]({% link {{ page.version.version }}/create-role.md %}), alter, and [drop]({% link {{ page.version.version }}/drop-role.md %}) other non-`admin` roles.
By default, the role option is set to `NOCREATEROLE` for all non-`admin` roles.
+`LOGIN`/`NOLOGIN` | Allow or disallow a role to log in with one of the [client authentication methods]({% link {{ page.version.version }}/authentication.md %}#client-authentication). Setting the role option to `NOLOGIN` prevents the role from logging in using any authentication method.
+`MODIFYCLUSTERSETTING`/`NOMODIFYCLUSTERSETTING` | Allow or disallow a role to modify the [cluster settings]({% link {{ page.version.version }}/cluster-settings.md %}) with the `sql.defaults` prefix.
By default, the role option is set to `NOMODIFYCLUSTERSETTING` for all non-`admin` roles.
+`PASSWORD password`/`PASSWORD NULL` | The credential the role uses to [authenticate their access to a secure cluster]({% link {{ page.version.version }}/authentication.md %}#client-authentication). A password should be entered as a [string literal]({% link {{ page.version.version }}/sql-constants.md %}#string-literals). For compatibility with PostgreSQL, a password can also be entered as an identifier.
To prevent a role from using [password authentication]({% link {{ page.version.version }}/authentication.md %}#client-authentication) and to mandate [certificate-based client authentication]({% link {{ page.version.version }}/authentication.md %}#client-authentication), [set the password as `NULL`]({% link {{ page.version.version }}/create-role.md %}#prevent-a-role-from-using-password-authentication).
+`SQLLOGIN`/`NOSQLLOGIN` | **Deprecated in v22.2: Use the `NOSQLLOGIN` [system privilege]({% link {{ page.version.version }}/security-reference/authorization.md %}#supported-privileges).** Allow or disallow a role to log in using the SQL CLI with one of the [client authentication methods]({% link {{ page.version.version }}/authentication.md %}#client-authentication). The role option to `NOSQLLOGIN` prevents the role from logging in using the SQL CLI with any authentication method while retaining the ability to log in to DB Console. It is possible to have both `NOSQLLOGIN` and `LOGIN` set for a role and `NOSQLLOGIN` takes precedence on restrictions.
Without any role options all login behavior is permitted.
+`VALID UNTIL` | The date and time (in the [`timestamp`]({% link {{ page.version.version }}/timestamp.md %}) format) after which the [password](#parameters) is not valid.
+`VIEWACTIVITY`/`NOVIEWACTIVITY` | **Deprecated in v22.2: Use the `VIEWACTIVITY` [system privilege]({% link {{ page.version.version }}/security-reference/authorization.md %}#supported-privileges).** Allow or disallow a role to see other roles' [queries]({% link {{ page.version.version }}/show-statements.md %}) and [sessions]({% link {{ page.version.version }}/show-sessions.md %}) using `SHOW STATEMENTS`, `SHOW SESSIONS`, and the [**Statements**](ui-statements-page.html) and [**Transactions**](ui-transactions-page.html) pages in the DB Console. `VIEWACTIVITY` also permits visibility of node hostnames and IP addresses in the DB Console. With `NOVIEWACTIVITY`, the `SHOW` commands show only the role's own data, and DB Console pages redact node hostnames and IP addresses.
By default, the role option is set to `NOVIEWACTIVITY` for all non-`admin` roles.
+`VIEWCLUSTERSETTING` / `NOVIEWCLUSTERSETTING` | **Deprecated in v22.2: Use the `VIEWCLUSTERSETTING` [system privilege]({% link {{ page.version.version }}/security-reference/authorization.md %}#supported-privileges).** Allow or disallow a role to view the [cluster settings]({% link {{ page.version.version }}/cluster-settings.md %}) with `SHOW CLUSTER SETTING` or to access the [**Cluster Settings**]({% link {{ page.version.version }}/ui-debug-pages.md %}) page in the DB Console.
By default, the role option is set to `NOVIEWCLUSTERSETTING` for all non-`admin` roles.
+`VIEWACTIVITYREDACTED`/`NOVIEWACTIVITYREDACTED` | **Deprecated in v22.2: Use the `VIEWACTIVITYREDACTED` [system privilege]({% link {{ page.version.version }}/security-reference/authorization.md %}#supported-privileges).** Allow or disallow a role to see other roles' queries and sessions using `SHOW STATEMENTS`, `SHOW SESSIONS`, and the Statements and Transactions pages in the DB Console. With `VIEWACTIVITYREDACTED`, a user will not have access to the usage of statements diagnostics bundle (which can contain PII information) in the DB Console, and will not be able to list queries containing [constants]({% link {{ page.version.version }}/sql-constants.md %}) for other users when using the `listSessions` endpoint through the [Cluster API]({% link {{ page.version.version }}/cluster-api.md %}). It is possible to have both `VIEWACTIVITY` and `VIEWACTIVITYREDACTED`, and `VIEWACTIVITYREDACTED` takes precedence on restrictions. If the user has `VIEWACTIVITY` but doesn't have `VIEWACTIVITYREDACTED`, they will be able to see DB Console pages and have access to the statements diagnostics bundle.
By default, the role option is set to `NOVIEWACTIVITYREDACTED` for all non-`admin` roles.
diff --git a/src/current/_includes/v25.1/sql/role-subject-option.md b/src/current/_includes/v25.1/sql/role-subject-option.md
new file mode 100644
index 00000000000..c8a71e9af1b
--- /dev/null
+++ b/src/current/_includes/v25.1/sql/role-subject-option.md
@@ -0,0 +1 @@
+You can associate an [X.509](https://en.wikipedia.org/wiki/X.509) certificate's Subject with a [role]({% link {{ page.version.version }}/security-reference/authorization.md %}#roles) as shown below. Note that the Subject fields in the certificate have to be an exact match with what you pass in via the SQL statement. By exact match, we mean that the order of attributes passed in via the SQL statement must match the order of attributes in the certificate.
diff --git a/src/current/_includes/v25.1/sql/row-level-ttl-prefer-ttl-expiration-expressions.md b/src/current/_includes/v25.1/sql/row-level-ttl-prefer-ttl-expiration-expressions.md
new file mode 100644
index 00000000000..a75ed05e6c6
--- /dev/null
+++ b/src/current/_includes/v25.1/sql/row-level-ttl-prefer-ttl-expiration-expressions.md
@@ -0,0 +1,5 @@
+Most users should use `ttl_expiration_expression` instead of `ttl_expire_after` for the following reasons:
+
+- If you add `ttl_expire_after` to an existing table, it **will cause a full table rewrite, which can affect performance**. Specifically, it will result in a [schema change]({% link {{ page.version.version }}/online-schema-changes.md %}) that (1) creates a new [hidden column]({% link {{page.version.version}}/show-create.md%}#show-the-create-table-statement-for-a-table-with-a-hidden-column) `crdb_internal_expiration` for all rows, and (2) backfills the value of that new column to `now()` + `ttl_expire_after`.
+- You cannot use `ttl_expire_after` with an existing [`TIMESTAMPTZ`]({% link {{ page.version.version }}/timestamp.md %}) column.
+- If you use `ttl_expiration_expression`, you can use an existing [`TIMESTAMPTZ`]({% link {{ page.version.version }}/timestamp.md %}) column called e.g. `updated_at`.
diff --git a/src/current/_includes/v25.1/sql/row-level-ttl.md b/src/current/_includes/v25.1/sql/row-level-ttl.md
new file mode 100644
index 00000000000..d10ea9b8e87
--- /dev/null
+++ b/src/current/_includes/v25.1/sql/row-level-ttl.md
@@ -0,0 +1 @@
+CockroachDB has support for Time to Live ("TTL") expiration on table rows, also known as _Row-Level TTL_. Row-Level TTL is a mechanism whereby rows from a table are considered "expired" and can be automatically deleted once those rows have been stored longer than a specified expiration time.
diff --git a/src/current/_includes/v25.1/sql/savepoint-ddl-rollbacks.md b/src/current/_includes/v25.1/sql/savepoint-ddl-rollbacks.md
new file mode 100644
index 00000000000..57da82ae775
--- /dev/null
+++ b/src/current/_includes/v25.1/sql/savepoint-ddl-rollbacks.md
@@ -0,0 +1,3 @@
+{{site.data.alerts.callout_danger}}
+Rollbacks to savepoints over [DDL](https://en.wikipedia.org/wiki/Data_definition_language) statements are only supported if you're rolling back to a savepoint created at the beginning of the transaction.
+{{site.data.alerts.end}}
diff --git a/src/current/_includes/v25.1/sql/savepoints-and-high-priority-transactions.md b/src/current/_includes/v25.1/sql/savepoints-and-high-priority-transactions.md
new file mode 100644
index 00000000000..c6de489e641
--- /dev/null
+++ b/src/current/_includes/v25.1/sql/savepoints-and-high-priority-transactions.md
@@ -0,0 +1 @@
+[`ROLLBACK TO SAVEPOINT`]({% link {{ page.version.version }}/rollback-transaction.md %}#rollback-a-nested-transaction) (for either regular savepoints or "restart savepoints" defined with `cockroach_restart`) causes a "feature not supported" error after a DDL statement in a [`HIGH PRIORITY` transaction]({% link {{ page.version.version }}/transactions.md %}#transaction-priorities), in order to avoid a transaction deadlock. For more information, see GitHub issue [#46414](https://www.github.com/cockroachdb/cockroach/issues/46414).
diff --git a/src/current/_includes/v25.1/sql/savepoints-and-row-locks.md b/src/current/_includes/v25.1/sql/savepoints-and-row-locks.md
new file mode 100644
index 00000000000..39568092558
--- /dev/null
+++ b/src/current/_includes/v25.1/sql/savepoints-and-row-locks.md
@@ -0,0 +1,12 @@
+CockroachDB supports exclusive row locks.
+
+- In PostgreSQL, row locks are released/cancelled upon [`ROLLBACK TO SAVEPOINT`][rts].
+- In CockroachDB, row locks are preserved upon [`ROLLBACK TO SAVEPOINT`][rts].
+
+This is an architectural difference that may or may not be lifted in a later CockroachDB version.
+
+The code of client applications that rely on row locks must be reviewed and possibly modified to account for this difference. In particular, if an application is relying on [`ROLLBACK TO SAVEPOINT`][rts] to release row locks and allow a concurrent transaction touching the same rows to proceed, this behavior will not work with CockroachDB.
+
+{% comment %} Reference Links {% endcomment %}
+
+[rts]: rollback-transaction.html
diff --git a/src/current/_includes/v25.1/sql/schema-changes.md b/src/current/_includes/v25.1/sql/schema-changes.md
new file mode 100644
index 00000000000..c61e9c9a046
--- /dev/null
+++ b/src/current/_includes/v25.1/sql/schema-changes.md
@@ -0,0 +1 @@
+- Schema changes through [`ALTER TABLE`]({% link {{ page.version.version }}/alter-table.md %}), [`DROP DATABASE`]({% link {{ page.version.version }}/drop-database.md %}), [`DROP TABLE`]({% link {{ page.version.version }}/drop-table.md %}), and [`TRUNCATE`](truncate.html)
\ No newline at end of file
diff --git a/src/current/_includes/v25.1/sql/schema-terms.md b/src/current/_includes/v25.1/sql/schema-terms.md
new file mode 100644
index 00000000000..d066d5d979b
--- /dev/null
+++ b/src/current/_includes/v25.1/sql/schema-terms.md
@@ -0,0 +1,3 @@
+{{site.data.alerts.callout_info}}
+To avoid confusion with the general term "[schema](https://wiktionary.org/wiki/schema)", in this guide we refer to the logical object as a *user-defined schema*, and to the relationship structure of logical objects in a cluster as a *database schema*.
+{{site.data.alerts.end}}
\ No newline at end of file
diff --git a/src/current/_includes/v25.1/sql/select-for-update-example-partial.md b/src/current/_includes/v25.1/sql/select-for-update-example-partial.md
new file mode 100644
index 00000000000..62a2bf9c066
--- /dev/null
+++ b/src/current/_includes/v25.1/sql/select-for-update-example-partial.md
@@ -0,0 +1,50 @@
+This example assumes you are running a [local unsecured cluster]({% link {{ page.version.version }}/start-a-local-cluster.md %}).
+
+First, connect to the running cluster (call this Terminal 1):
+
+{% include_cached copy-clipboard.html %}
+~~~ shell
+cockroach sql --insecure
+~~~
+
+Next, create a table and insert some rows:
+
+{% include_cached copy-clipboard.html %}
+~~~ sql
+CREATE TABLE kv (k INT PRIMARY KEY, v INT);
+INSERT INTO kv (k, v) VALUES (1, 5), (2, 10), (3, 15);
+~~~
+
+Next, we'll start a [transaction]({% link {{ page.version.version }}/transactions.md %}) and lock the row we want to operate on:
+
+{% include_cached copy-clipboard.html %}
+~~~ sql
+BEGIN;
+SELECT * FROM kv WHERE k = 1 FOR UPDATE;
+~~~
+
+Press **Enter** twice in the [SQL client]({% link {{ page.version.version }}/cockroach-sql.md %}) to send the statements to be evaluated. This will result in the following output:
+
+~~~
+ k | v
++---+----+
+ 1 | 5
+(1 row)
+~~~
+
+Now open another terminal and connect to the database from a second client (call this Terminal 2):
+
+{% include_cached copy-clipboard.html %}
+~~~ shell
+cockroach sql --insecure
+~~~
+
+From Terminal 2, start a transaction and try to lock the same row for updates that is already being accessed by the transaction we opened in Terminal 1:
+
+{% include_cached copy-clipboard.html %}
+~~~ sql
+BEGIN;
+SELECT * FROM kv WHERE k = 1 FOR UPDATE;
+~~~
+
+Press **Enter** twice to send the statements to be evaluated. Because Terminal 1 has already locked this row, the `SELECT FOR UPDATE` statement from Terminal 2 will appear to "wait".
diff --git a/src/current/_includes/v25.1/sql/select-for-update-overview.md b/src/current/_includes/v25.1/sql/select-for-update-overview.md
new file mode 100644
index 00000000000..812b9c0eb17
--- /dev/null
+++ b/src/current/_includes/v25.1/sql/select-for-update-overview.md
@@ -0,0 +1,22 @@
+{% if page.name != "select-for-update.md" %}`SELECT ... FOR UPDATE` exclusively locks the rows returned by a [selection query][selection], such that other transactions trying to access those rows must wait for the transaction that locked the rows to commit or rollback.{% endif %}
+
+`SELECT ... FOR UPDATE` can be used to:
+
+- Strengthen the isolation of a [`READ COMMITTED`]({% link {{ page.version.version }}/read-committed.md %}) transaction. If you need to read and later update a row within a transaction, use `SELECT ... FOR UPDATE` to acquire an exclusive lock on the row. This guarantees data integrity between the transaction's read and write operations. For details, see [Locking reads]({% link {{ page.version.version }}/read-committed.md %}#locking-reads).
+
+- Order [`SERIALIZABLE`]({% link {{ page.version.version }}/demo-serializable.md %}) transactions by controlling concurrent access to one or more rows of a table. These other transactions are placed into a queue based on when they tried to read the values of the locked rows.
+
+ Because this queueing happens during the read operation, the [thrashing](https://wikipedia.org/wiki/Thrashing_(computer_science)) that would otherwise occur if multiple concurrently executing transactions attempt to `SELECT` the same data and then `UPDATE` the results of that selection is prevented. By preventing thrashing, `SELECT ... FOR UPDATE` also prevents [transaction retries][retries] that would otherwise occur due to [contention]({% link {{ page.version.version }}/performance-best-practices-overview.md %}#transaction-contention).
+
+ As a result, using `SELECT ... FOR UPDATE` leads to increased throughput and decreased tail latency for contended operations.
+
+Note that using `SELECT ... FOR UPDATE` does not completely eliminate the chance of [serialization errors]({% link {{ page.version.version }}/transaction-retry-error-reference.md %}). These errors can also arise due to [time uncertainty]({% link {{ page.version.version }}/architecture/transaction-layer.md %}#transaction-conflicts). To eliminate the need for application-level retry logic, in addition to `SELECT FOR UPDATE` your application also needs to use a [driver that implements automatic retry handling]({% link {{ page.version.version }}/transaction-retry-error-reference.md %}#client-side-retry-handling).
+
+{{site.data.alerts.callout_info}}
+By default, CockroachDB uses the `SELECT ... FOR UPDATE` locking mechanism during the initial row scan performed in [`UPDATE`]({% link {{ page.version.version }}/update.md %}) and [`UPSERT`]({% link {{ page.version.version }}/upsert.md %}) statement execution. To turn off implicit `SELECT ... FOR UPDATE` locking for `UPDATE` and `UPSERT` statements, set the `enable_implicit_select_for_update` [session variable]({% link {{ page.version.version }}/set-vars.md %}) to `false`.
+{{site.data.alerts.end}}
+
+{% comment %} Reference Links {% endcomment %}
+
+[retries]: transactions.html#transaction-retries
+[selection]: selection-queries.html
diff --git a/src/current/_includes/v25.1/sql/select-lock-strengths.md b/src/current/_includes/v25.1/sql/select-lock-strengths.md
new file mode 100644
index 00000000000..fc0b2cd590e
--- /dev/null
+++ b/src/current/_includes/v25.1/sql/select-lock-strengths.md
@@ -0,0 +1,5 @@
+- `SELECT FOR UPDATE` obtains an *exclusive lock* on each qualifying row, blocking concurrent writes and locking reads on the row. Only one transaction can hold an exclusive lock on a row at a time, and only the transaction holding the exclusive lock can write to the row. {% if page.name == "read-committed.md" %}For an example, see [Reserve rows for updates using exclusive locks](#reserve-rows-for-updates-using-exclusive-locks).{% endif %}
+
+- `SELECT FOR SHARE` obtains a *shared lock* on each qualifying row, blocking concurrent writes and **exclusive** locking reads on the row. Multiple transactions can hold a shared lock on a row at the same time. When multiple transactions hold a shared lock on a row, none can write to the row. A shared lock grants transactions mutual read-only access to a row, and ensures that they read the latest version of the row. {% if page.name == "read-committed.md" %}For an example, see [Reserve values using shared locks](#reserve-row-values-using-shared-locks).{% endif %}
+
+When a `SELECT FOR UPDATE` or `SELECT FOR SHARE` read is issued on a row, only the latest version of the row is returned to the client. Under {% if page.name == "read-committed.md" %}`READ COMMITTED`{% else %}[`READ COMMITTED`]({% link {{ page.version.version }}/read-committed.md %}){% endif %} isolation, neither statement will block concurrent, non-locking reads.
\ No newline at end of file
diff --git a/src/current/_includes/v25.1/sql/serializable-tutorial.md b/src/current/_includes/v25.1/sql/serializable-tutorial.md
new file mode 100644
index 00000000000..2a03b091b58
--- /dev/null
+++ b/src/current/_includes/v25.1/sql/serializable-tutorial.md
@@ -0,0 +1,3 @@
+{{site.data.alerts.callout_info}}
+This tutorial assumes you are running under [`SERIALIZABLE`]({% link {{ page.version.version }}/demo-serializable.md %}) isolation, which requires client-side retry handling for [serialization errors]({% link {{ page.version.version }}/transaction-retry-error-reference.md %}).
+{{site.data.alerts.end}}
\ No newline at end of file
diff --git a/src/current/_includes/v25.1/sql/server-side-connection-limit.md b/src/current/_includes/v25.1/sql/server-side-connection-limit.md
new file mode 100644
index 00000000000..62300b45619
--- /dev/null
+++ b/src/current/_includes/v25.1/sql/server-side-connection-limit.md
@@ -0,0 +1 @@
+To control the maximum number of non-superuser ([`root`]({% link {{ page.version.version }}/security-reference/authorization.md %}#root-user) user or other [`admin` role]({% link {{ page.version.version }}/security-reference/authorization.md %}#admin-role)) connections a [gateway node]({% link {{ page.version.version }}/architecture/sql-layer.md %}#gateway-node) can have open at one time, use the `server.max_connections_per_gateway` [cluster setting](cluster-settings.html). If a new non-superuser connection would exceed this limit, the error message `"sorry, too many clients already"` is returned, along with error code `53300`.
diff --git a/src/current/_includes/v25.1/sql/set-transaction-as-of-system-time-example.md b/src/current/_includes/v25.1/sql/set-transaction-as-of-system-time-example.md
new file mode 100644
index 00000000000..8e758f1c303
--- /dev/null
+++ b/src/current/_includes/v25.1/sql/set-transaction-as-of-system-time-example.md
@@ -0,0 +1,24 @@
+{% include_cached copy-clipboard.html %}
+~~~ sql
+> BEGIN;
+~~~
+
+{% include_cached copy-clipboard.html %}
+~~~ sql
+> SET TRANSACTION AS OF SYSTEM TIME '2019-04-09 18:02:52.0+00:00';
+~~~
+
+{% include_cached copy-clipboard.html %}
+~~~ sql
+> SELECT * FROM orders;
+~~~
+
+{% include_cached copy-clipboard.html %}
+~~~ sql
+> SELECT * FROM products;
+~~~
+
+{% include_cached copy-clipboard.html %}
+~~~ sql
+> COMMIT;
+~~~
diff --git a/src/current/_includes/v25.1/sql/shell-commands.md b/src/current/_includes/v25.1/sql/shell-commands.md
new file mode 100644
index 00000000000..7586108f3d1
--- /dev/null
+++ b/src/current/_includes/v25.1/sql/shell-commands.md
@@ -0,0 +1,54 @@
+The following commands can be used within the interactive SQL shell:
+
+Command | Usage
+--------|------------
+`\?`,`help` | View this help within the shell.
+`\q`,`quit`,`exit`,`ctrl-d` | Exit the shell. When no text follows the prompt, `ctrl-c` exits the shell as well; otherwise, `ctrl-c` clears the line.
+`\!` | Run an external command and print its results to `stdout`. [See an example]({% link {{ page.version.version }}/cockroach-sql.md %}#run-external-commands-from-the-sql-shell).
+\| | Run the output of an external command as SQL statements. [See an example]({% link {{ page.version.version }}/cockroach-sql.md %}#run-external-commands-from-the-sql-shell).
+`\set