From 62f7121acaca30416826d0ba1ff47fef67399aa0 Mon Sep 17 00:00:00 2001
From: Ryan Fox-Tyler <60440289+ryanfoxtyler@users.noreply.github.com>
Date: Sun, 9 Feb 2025 13:29:31 -0500
Subject: [PATCH 01/53] initial port
all files unlisted at this point
---
.../reference/cloud/admin/authentication.mdx | 47 +
dgraph/reference/cloud/admin/clone.mdx | 18 +
dgraph/reference/cloud/admin/drop-data.mdx | 44 +
.../reference/cloud/admin/import-export.mdx | 141 +++
dgraph/reference/cloud/admin/index.mdx | 3 +
dgraph/reference/cloud/admin/monitoring.mdx | 88 ++
dgraph/reference/cloud/admin/overview.mdx | 25 +
dgraph/reference/cloud/admin/schema-modes.mdx | 87 ++
dgraph/reference/cloud/admin/schema.mdx | 43 +
dgraph/reference/cloud/advanced-queries.mdx | 219 ++++
.../cloud/cloud-api/authentication.mdx | 64 +
dgraph/reference/cloud/cloud-api/backend.mdx | 498 ++++++++
dgraph/reference/cloud/cloud-api/backup.mdx | 179 +++
dgraph/reference/cloud/cloud-api/index.mdx | 3 +
dgraph/reference/cloud/cloud-api/lambda.mdx | 247 ++++
dgraph/reference/cloud/cloud-api/overview.mdx | 69 +
dgraph/reference/cloud/cloud-api/schema.mdx | 146 +++
dgraph/reference/cloud/cloud-multitenancy.mdx | 83 ++
dgraph/reference/cloud/index.mdx | 3 +
dgraph/reference/cloud/introduction.mdx | 99 ++
.../cloud/migrating-from-hosted-dgraph.mdx | 14 +
dgraph/reference/cloud/provision-backend.mdx | 37 +
.../deploy/admin/data-compression.mdx | 40 +
.../deploy/admin/dgraph-administration.mdx | 319 +++++
dgraph/reference/deploy/admin/index.mdx | 3 +
dgraph/reference/deploy/admin/log-format.mdx | 130 ++
dgraph/reference/deploy/admin/metrics.mdx | 121 ++
dgraph/reference/deploy/admin/tracing.mdx | 63 +
.../deploy/cli-command-reference.mdx | 1114 +++++++++++++++++
dgraph/reference/deploy/cluster-checklist.mdx | 15 +
dgraph/reference/deploy/cluster-setup.mdx | 48 +
dgraph/reference/deploy/config.mdx | 199 +++
dgraph/reference/deploy/decrypt.mdx | 76 ++
dgraph/reference/deploy/dgraph-alpha.mdx | 89 ++
dgraph/reference/deploy/dgraph-zero.mdx | 279 +++++
dgraph/reference/deploy/index.mdx | 32 +
.../deploy/installation/download.mdx | 83 ++
.../reference/deploy/installation/index.mdx | 7 +
.../installation/kubernetes/cluster-types.mdx | 175 +++
.../installation/kubernetes/ha-cluster.mdx | 418 +++++++
.../deploy/installation/kubernetes/index.mdx | 3 +
.../kubernetes/monitoring-cluster.mdx | 343 +++++
.../kubernetes/single-server-cluster.mdx | 95 ++
.../deploy/installation/lambda-server.mdx | 110 ++
.../installation/production-checklist.mdx | 207 +++
.../deploy/installation/single-host-setup.mdx | 229 ++++
dgraph/reference/deploy/monitoring.mdx | 159 +++
dgraph/reference/deploy/security/index.mdx | 3 +
.../reference/deploy/security/ports-usage.mdx | 117 ++
.../deploy/security/tls-configuration.mdx | 424 +++++++
dgraph/reference/deploy/troubleshooting.mdx | 61 +
.../reference/design-concepts/acl-concept.mdx | 20 +
.../design-concepts/badger-concept.mdx | 15 +
.../design-concepts/clients-concept.mdx | 26 +
.../design-concepts/consistency-model.mdx | 91 ++
.../design-concepts/discovery-concept.mdx | 9 +
.../reference/design-concepts/dql-concept.mdx | 11 +
.../dql-graphql-layering-concept.mdx | 27 +
.../design-concepts/facets-concept.mdx | 15 +
.../design-concepts/graphql-concept.mdx | 17 +
.../design-concepts/group-concept.mdx | 39 +
.../index-tokenize-concept.mdx | 27 +
dgraph/reference/design-concepts/index.mdx | 8 +
.../design-concepts/lambda-concept.mdx | 8 +
.../minimizing-network-calls.mdx | 116 ++
.../namespace-tenant-concept.mdx | 14 +
.../network-call-minimization-concept.mdx | 11 +
.../design-concepts/posting-list-concept.mdx | 124 ++
.../protocol-buffers-concept.mdx | 8 +
.../design-concepts/queries-process.mdx | 53 +
dgraph/reference/design-concepts/raft.mdx | 212 ++++
.../design-concepts/relationships-concept.mdx | 34 +
.../design-concepts/replication-concept.mdx | 15 +
.../transaction-mutation-concept.mdx | 12 +
.../design-concepts/transactions-concept.mdx | 32 +
.../design-concepts/wal-memtable-concept.mdx | 14 +
.../design-concepts/workers-concept.mdx | 13 +
dgraph/reference/dgraph-glossary.mdx | 154 +++
dgraph/reference/dgraph-overview.md | 206 +++
dgraph/reference/dgraph-overview.mdx | 206 +++
dgraph/reference/dql/clients/csharp.mdx | 196 +++
dgraph/reference/dql/clients/go.mdx | 526 ++++++++
dgraph/reference/dql/clients/index.mdx | 58 +
dgraph/reference/dql/clients/java.mdx | 595 +++++++++
.../reference/dql/clients/javascript/grpc.mdx | 443 +++++++
.../reference/dql/clients/javascript/http.mdx | 365 ++++++
.../dql/clients/javascript/index.mdx | 31 +
dgraph/reference/dql/clients/python.mdx | 454 +++++++
dgraph/reference/dql/clients/raw-http.mdx | 469 +++++++
.../dql/clients/unofficial-clients.mdx | 30 +
dgraph/reference/dql/dql-get-started.mdx | 194 +++
dgraph/reference/dql/dql-schema.mdx | 512 ++++++++
.../reference/dql/dql-syntax/dql-mutation.mdx | 280 +++++
dgraph/reference/dql/dql-syntax/dql-query.mdx | 218 ++++
dgraph/reference/dql/dql-syntax/dql-rdf.mdx | 185 +++
dgraph/reference/dql/dql-syntax/index.mdx | 10 +
.../dql/dql-syntax/json-mutation-format.mdx | 436 +++++++
.../reference/dql/dql-syntax/to-sort.md.txt | 631 ++++++++++
dgraph/reference/dql/index.mdx | 9 +
.../mutations/external-ids-upsert-block.mdx | 88 ++
dgraph/reference/dql/mutations/index.mdx | 5 +
dgraph/reference/dql/mutations/uid-upsert.mdx | 212 ++++
dgraph/reference/dql/mutations/val-upsert.mdx | 77 ++
dgraph/reference/dql/predicate-indexing.mdx | 283 +++++
dgraph/reference/dql/tips/index.mdx | 76 ++
.../access-control-lists.mdx | 877 +++++++++++++
.../enterprise-features/audit-logs.mdx | 151 +++
.../enterprise-features/binary-backups.mdx | 850 +++++++++++++
.../change-data-capture.mdx | 157 +++
.../encryption-at-rest.mdx | 176 +++
.../reference/enterprise-features/index.mdx | 76 ++
.../enterprise-features/learner-nodes.mdx | 92 ++
.../reference/enterprise-features/license.mdx | 56 +
.../enterprise-features/lsbackup.mdx | 179 +++
.../enterprise-features/multitenancy.mdx | 333 +++++
.../reference/graphql-dql/dql-for-graphql.mdx | 13 +
.../graphql-dql/graphql-data-loading.mdx | 22 +
.../graphql-dql/graphql-data-migration.mdx | 70 ++
.../reference/graphql-dql/graphql-dgraph.mdx | 51 +
.../graphql-dql/graphql-dql-schema.mdx | 205 +++
dgraph/reference/graphql-dql/index.mdx | 19 +
dgraph/reference/graphql/admin/index.mdx | 1067 ++++++++++++++++
.../reference/graphql/custom/custom-dql.mdx | 140 +++
.../graphql/custom/custom-overview.mdx | 60 +
dgraph/reference/graphql/custom/directive.mdx | 562 +++++++++
dgraph/reference/graphql/custom/field.mdx | 93 ++
dgraph/reference/graphql/custom/index.mdx | 3 +
dgraph/reference/graphql/custom/mutation.mdx | 69 +
dgraph/reference/graphql/custom/query.mdx | 90 ++
dgraph/reference/graphql/federation/index.mdx | 218 ++++
.../endpoint/graphql-get-request.mdx | 20 +
.../endpoint/graphql-request.mdx | 386 ++++++
.../endpoint/graphql-response.mdx | 211 ++++
.../graphql-clients/endpoint/index.mdx | 45 +
.../graphql/graphql-clients/graphql-ide.mdx | 21 +
.../graphql/graphql-clients/graphql-ui.mdx | 18 +
.../graphql/graphql-clients/index.mdx | 5 +
dgraph/reference/graphql/index.mdx | 29 +
dgraph/reference/graphql/lambda/field.mdx | 221 ++++
dgraph/reference/graphql/lambda/index.mdx | 3 +
.../graphql/lambda/lambda-overview.mdx | 350 ++++++
dgraph/reference/graphql/lambda/mutation.mdx | 114 ++
dgraph/reference/graphql/lambda/query.mdx | 100 ++
dgraph/reference/graphql/lambda/webhook.mdx | 110 ++
dgraph/reference/graphql/mutations/add.mdx | 91 ++
dgraph/reference/graphql/mutations/deep.mdx | 113 ++
dgraph/reference/graphql/mutations/delete.mdx | 65 +
dgraph/reference/graphql/mutations/index.mdx | 3 +
.../graphql/mutations/mutations-overview.mdx | 302 +++++
dgraph/reference/graphql/mutations/update.mdx | 124 ++
dgraph/reference/graphql/mutations/upsert.mdx | 129 ++
.../reference/graphql/queries/aggregate.mdx | 191 +++
.../reference/graphql/queries/and-or-not.mdx | 102 ++
.../graphql/queries/cached-results.mdx | 49 +
dgraph/reference/graphql/queries/cascade.mdx | 141 +++
dgraph/reference/graphql/queries/index.mdx | 3 +
.../reference/graphql/queries/order-page.mdx | 32 +
.../graphql/queries/persistent-queries.mdx | 80 ++
.../graphql/queries/queries-overview.mdx | 52 +
.../graphql/queries/search-filtering.mdx | 321 +++++
.../graphql/queries/skip-include.mdx | 65 +
.../graphql/queries/vector-similarity.mdx | 76 ++
.../reference/graphql/quick-start/index.mdx | 302 +++++
.../graphql/schema/dgraph-schema.mdx | 284 +++++
.../graphql/schema/directives/auth.mdx | 12 +
.../graphql/schema/directives/deprecated.mdx | 25 +
.../schema/directives/directive-dgraph.mdx | 63 +
.../directives/directive-withsubscription.mdx | 25 +
.../graphql/schema/directives/embedding.mdx | 14 +
.../graphql/schema/directives/generate.mdx | 61 +
.../graphql/schema/directives/ids.mdx | 141 +++
.../graphql/schema/directives/index.mdx | 131 ++
.../graphql/schema/directives/search.mdx | 697 +++++++++++
.../graphql/schema/documentation.mdx | 75 ++
.../reference/graphql/schema/graph-links.mdx | 131 ++
dgraph/reference/graphql/schema/index.mdx | 17 +
dgraph/reference/graphql/schema/migration.mdx | 230 ++++
dgraph/reference/graphql/schema/reserved.mdx | 57 +
dgraph/reference/graphql/schema/types.mdx | 528 ++++++++
.../reference/graphql/security/RBAC-rules.mdx | 146 +++
.../graphql/security/anonymous-access.mdx | 83 ++
.../reference/graphql/security/auth-tips.mdx | 91 ++
dgraph/reference/graphql/security/cors.mdx | 28 +
.../graphql/security/graphtraversal-rules.mdx | 191 +++
dgraph/reference/graphql/security/index.mdx | 109 ++
dgraph/reference/graphql/security/jwt.mdx | 215 ++++
.../reference/graphql/security/mutations.mdx | 143 +++
.../reference/graphql/subscriptions/index.mdx | 229 ++++
.../reference/howto/commandline/about-cli.mdx | 28 +
.../howto/commandline/create-cli.mdx | 89 ++
dgraph/reference/howto/commandline/index.mdx | 3 +
dgraph/reference/howto/completion.mdx | 111 ++
...urrent-modification-java-multithreaded.mdx | 93 ++
.../howto/dgraph-sentry-integration.mdx | 86 ++
dgraph/reference/howto/dql-schema-request.mdx | 70 ++
dgraph/reference/howto/drop-data.mdx | 84 ++
.../howto/exportdata/about-export.mdx | 21 +
.../howto/exportdata/export-data-cloud.mdx | 92 ++
.../howto/exportdata/export-data.mdx | 353 ++++++
dgraph/reference/howto/exportdata/index.mdx | 3 +
.../howto/importdata/about_import.mdx | 21 +
.../howto/importdata/bulk-loader.mdx | 443 +++++++
dgraph/reference/howto/importdata/index.mdx | 3 +
.../howto/importdata/live-loader.mdx | 340 +++++
dgraph/reference/howto/index.mdx | 3 +
dgraph/reference/howto/jepsen-tests.mdx | 33 +
.../reference/howto/load-balancing-nginx.mdx | 184 +++
dgraph/reference/howto/login-system.mdx | 59 +
.../howto/retrieving-debug-information.mdx | 224 ++++
.../reference/howto/update-dgraph-types.mdx | 84 ++
dgraph/reference/howto/upserts.mdx | 75 ++
dgraph/reference/howto/using-debug-tool.mdx | 273 ++++
.../reference/howto/using-increment-tool.mdx | 105 ++
.../reference/learn/administrator/index.mdx | 19 +
.../data-model-101/01-dm-101-introduction.mdx | 48 +
.../02-relational-data-model.mdx | 77 ++
.../data-model-101/03-graph-data-model.mdx | 66 +
.../data-model-101/04-rel-query.mdx | 69 +
.../data-model-101/05-graph-query.mdx | 86 ++
.../data-model-101/06-dm-101-conclusion.mdx | 20 +
.../data-engineer/data-model-101/index.mdx | 47 +
.../get-started-with-dgraph/index.mdx | 107 ++
.../tutorial-1/index.mdx | 236 ++++
.../tutorial-2/index.mdx | 395 ++++++
.../tutorial-3/index.mdx | 649 ++++++++++
.../tutorial-4/index.mdx | 441 +++++++
.../tutorial-5/index.mdx | 665 ++++++++++
.../tutorial-6/index.mdx | 455 +++++++
.../tutorial-7/index.mdx | 325 +++++
.../tutorial-8/index.mdx | 851 +++++++++++++
.../reference/learn/data-engineer/index.mdx | 24 +
dgraph/reference/learn/developer/index.mdx | 26 +
.../react/graphql/design-app-schema.mdx | 168 +++
.../react/graphql/graphql-operations.mdx | 45 +
.../react/graphql/graphql-schema.mdx | 258 ++++
.../learn/developer/react/graphql/index.mdx | 18 +
.../graphql/load-schema-to-dgraph-cloud.mdx | 20 +
.../react/graphql/react-graphql-mutations.mdx | 302 +++++
.../react/graphql/react-graphql-queries.mdx | 302 +++++
.../reference/learn/developer/react/index.mdx | 23 +
.../developer/react/react-conclusion.mdx | 32 +
.../developer/react/react-introduction.mdx | 109 ++
.../react/react-provision-backend.mdx | 27 +
.../react-ui/connect-to-dgraph-cloud.mdx | 80 ++
.../learn/developer/react/react-ui/index.mdx | 20 +
.../react/react-ui/react-app-boiler-plate.mdx | 67 +
.../react/react-ui/react-routing.mdx | 108 ++
.../react-ui/react-ui-graphql-mutations.mdx | 453 +++++++
.../react-ui/react-ui-graphql-queries.mdx | 564 +++++++++
.../developer/react/react-ui/tech-stack.mdx | 21 +
.../learn/developer/sample-apps/charts.mdx | 37 +
.../learn/developer/sample-apps/devjokes.mdx | 48 +
.../learn/developer/sample-apps/index.mdx | 21 +
.../learn/developer/sample-apps/pokedex.mdx | 28 +
.../learn/developer/sample-apps/surveyo.mdx | 45 +
.../learn/developer/sample-apps/todos.mdx | 43 +
.../developer/todo-app-tutorial/index.mdx | 3 +
.../developer/todo-app-tutorial/todo-UI.mdx | 468 +++++++
.../todo-app-tutorial/todo-auth-rules.mdx | 75 ++
.../todo-app-tutorial/todo-auth0-jwt.mdx | 263 ++++
.../todo-app-tutorial/todo-deploy.mdx | 77 ++
.../todo-app-tutorial/todo-firebase-jwt.mdx | 307 +++++
.../todo-app-tutorial/todo-overview.mdx | 21 +
.../todo-app-tutorial/todo-schema-design.mdx | 245 ++++
dgraph/reference/learn/index.mdx | 6 +
.../migration/about-data-migration.mdx | 13 +
dgraph/reference/migration/index.mdx | 3 +
.../reference/migration/loading-csv-data.mdx | 200 +++
dgraph/reference/migration/migrate-tool.mdx | 87 ++
.../reference/query-language/aggregation.mdx | 133 ++
dgraph/reference/query-language/alias.mdx | 43 +
.../query-language/cascade-directive.mdx | 146 +++
.../query-language/connecting-filters.mdx | 24 +
dgraph/reference/query-language/count.mdx | 41 +
dgraph/reference/query-language/debug.mdx | 67 +
.../query-language/expand-predicates.mdx | 71 ++
dgraph/reference/query-language/facets.mdx | 349 ++++++
dgraph/reference/query-language/fragments.mdx | 34 +
dgraph/reference/query-language/functions.mdx | 760 +++++++++++
.../query-language/graphql-fundamentals.mdx | 57 +
.../query-language/graphql-variables.mdx | 104 ++
dgraph/reference/query-language/groupby.mdx | 44 +
.../query-language/ignorereflex-directive.mdx | 14 +
dgraph/reference/query-language/index.mdx | 3 +
.../indexing-custom-tokenizers.mdx | 582 +++++++++
.../query-language/kshortest-path-queries.mdx | 323 +++++
.../query-language/language-support.mdx | 78 ++
.../math-on-value-variables.mdx | 73 ++
.../query-language/multiple-query-blocks.mdx | 115 ++
.../query-language/normalize-directive.mdx | 17 +
.../reference/query-language/pagination.mdx | 112 ++
.../query-language/query-variables.mdx | 57 +
.../query-language/recurse-query.mdx | 26 +
dgraph/reference/query-language/sorting.mdx | 66 +
.../query-language/value-variables.mdx | 136 ++
dgraph/reference/ratel/backups.mdx | 16 +
dgraph/reference/ratel/cluster.mdx | 22 +
dgraph/reference/ratel/connection.mdx | 57 +
dgraph/reference/ratel/console.mdx | 45 +
dgraph/reference/ratel/index.mdx | 3 +
dgraph/reference/ratel/overview.mdx | 26 +
dgraph/reference/ratel/schema.mdx | 49 +
dgraph/reference/releases/index.mdx | 48 +
snippets/sdk-header.mdx | 2 +-
304 files changed, 44103 insertions(+), 1 deletion(-)
create mode 100644 dgraph/reference/cloud/admin/authentication.mdx
create mode 100644 dgraph/reference/cloud/admin/clone.mdx
create mode 100644 dgraph/reference/cloud/admin/drop-data.mdx
create mode 100644 dgraph/reference/cloud/admin/import-export.mdx
create mode 100644 dgraph/reference/cloud/admin/index.mdx
create mode 100644 dgraph/reference/cloud/admin/monitoring.mdx
create mode 100644 dgraph/reference/cloud/admin/overview.mdx
create mode 100644 dgraph/reference/cloud/admin/schema-modes.mdx
create mode 100644 dgraph/reference/cloud/admin/schema.mdx
create mode 100644 dgraph/reference/cloud/advanced-queries.mdx
create mode 100644 dgraph/reference/cloud/cloud-api/authentication.mdx
create mode 100644 dgraph/reference/cloud/cloud-api/backend.mdx
create mode 100644 dgraph/reference/cloud/cloud-api/backup.mdx
create mode 100644 dgraph/reference/cloud/cloud-api/index.mdx
create mode 100644 dgraph/reference/cloud/cloud-api/lambda.mdx
create mode 100644 dgraph/reference/cloud/cloud-api/overview.mdx
create mode 100644 dgraph/reference/cloud/cloud-api/schema.mdx
create mode 100644 dgraph/reference/cloud/cloud-multitenancy.mdx
create mode 100644 dgraph/reference/cloud/index.mdx
create mode 100644 dgraph/reference/cloud/introduction.mdx
create mode 100644 dgraph/reference/cloud/migrating-from-hosted-dgraph.mdx
create mode 100644 dgraph/reference/cloud/provision-backend.mdx
create mode 100644 dgraph/reference/deploy/admin/data-compression.mdx
create mode 100644 dgraph/reference/deploy/admin/dgraph-administration.mdx
create mode 100644 dgraph/reference/deploy/admin/index.mdx
create mode 100644 dgraph/reference/deploy/admin/log-format.mdx
create mode 100644 dgraph/reference/deploy/admin/metrics.mdx
create mode 100644 dgraph/reference/deploy/admin/tracing.mdx
create mode 100644 dgraph/reference/deploy/cli-command-reference.mdx
create mode 100644 dgraph/reference/deploy/cluster-checklist.mdx
create mode 100644 dgraph/reference/deploy/cluster-setup.mdx
create mode 100644 dgraph/reference/deploy/config.mdx
create mode 100644 dgraph/reference/deploy/decrypt.mdx
create mode 100644 dgraph/reference/deploy/dgraph-alpha.mdx
create mode 100644 dgraph/reference/deploy/dgraph-zero.mdx
create mode 100644 dgraph/reference/deploy/index.mdx
create mode 100644 dgraph/reference/deploy/installation/download.mdx
create mode 100644 dgraph/reference/deploy/installation/index.mdx
create mode 100644 dgraph/reference/deploy/installation/kubernetes/cluster-types.mdx
create mode 100644 dgraph/reference/deploy/installation/kubernetes/ha-cluster.mdx
create mode 100644 dgraph/reference/deploy/installation/kubernetes/index.mdx
create mode 100644 dgraph/reference/deploy/installation/kubernetes/monitoring-cluster.mdx
create mode 100644 dgraph/reference/deploy/installation/kubernetes/single-server-cluster.mdx
create mode 100644 dgraph/reference/deploy/installation/lambda-server.mdx
create mode 100644 dgraph/reference/deploy/installation/production-checklist.mdx
create mode 100644 dgraph/reference/deploy/installation/single-host-setup.mdx
create mode 100644 dgraph/reference/deploy/monitoring.mdx
create mode 100644 dgraph/reference/deploy/security/index.mdx
create mode 100644 dgraph/reference/deploy/security/ports-usage.mdx
create mode 100644 dgraph/reference/deploy/security/tls-configuration.mdx
create mode 100644 dgraph/reference/deploy/troubleshooting.mdx
create mode 100644 dgraph/reference/design-concepts/acl-concept.mdx
create mode 100644 dgraph/reference/design-concepts/badger-concept.mdx
create mode 100644 dgraph/reference/design-concepts/clients-concept.mdx
create mode 100644 dgraph/reference/design-concepts/consistency-model.mdx
create mode 100644 dgraph/reference/design-concepts/discovery-concept.mdx
create mode 100644 dgraph/reference/design-concepts/dql-concept.mdx
create mode 100644 dgraph/reference/design-concepts/dql-graphql-layering-concept.mdx
create mode 100644 dgraph/reference/design-concepts/facets-concept.mdx
create mode 100644 dgraph/reference/design-concepts/graphql-concept.mdx
create mode 100644 dgraph/reference/design-concepts/group-concept.mdx
create mode 100644 dgraph/reference/design-concepts/index-tokenize-concept.mdx
create mode 100644 dgraph/reference/design-concepts/index.mdx
create mode 100644 dgraph/reference/design-concepts/lambda-concept.mdx
create mode 100644 dgraph/reference/design-concepts/minimizing-network-calls.mdx
create mode 100644 dgraph/reference/design-concepts/namespace-tenant-concept.mdx
create mode 100644 dgraph/reference/design-concepts/network-call-minimization-concept.mdx
create mode 100644 dgraph/reference/design-concepts/posting-list-concept.mdx
create mode 100644 dgraph/reference/design-concepts/protocol-buffers-concept.mdx
create mode 100644 dgraph/reference/design-concepts/queries-process.mdx
create mode 100644 dgraph/reference/design-concepts/raft.mdx
create mode 100644 dgraph/reference/design-concepts/relationships-concept.mdx
create mode 100644 dgraph/reference/design-concepts/replication-concept.mdx
create mode 100644 dgraph/reference/design-concepts/transaction-mutation-concept.mdx
create mode 100644 dgraph/reference/design-concepts/transactions-concept.mdx
create mode 100644 dgraph/reference/design-concepts/wal-memtable-concept.mdx
create mode 100644 dgraph/reference/design-concepts/workers-concept.mdx
create mode 100644 dgraph/reference/dgraph-glossary.mdx
create mode 100644 dgraph/reference/dgraph-overview.md
create mode 100644 dgraph/reference/dgraph-overview.mdx
create mode 100644 dgraph/reference/dql/clients/csharp.mdx
create mode 100644 dgraph/reference/dql/clients/go.mdx
create mode 100644 dgraph/reference/dql/clients/index.mdx
create mode 100644 dgraph/reference/dql/clients/java.mdx
create mode 100644 dgraph/reference/dql/clients/javascript/grpc.mdx
create mode 100644 dgraph/reference/dql/clients/javascript/http.mdx
create mode 100644 dgraph/reference/dql/clients/javascript/index.mdx
create mode 100644 dgraph/reference/dql/clients/python.mdx
create mode 100644 dgraph/reference/dql/clients/raw-http.mdx
create mode 100644 dgraph/reference/dql/clients/unofficial-clients.mdx
create mode 100644 dgraph/reference/dql/dql-get-started.mdx
create mode 100644 dgraph/reference/dql/dql-schema.mdx
create mode 100644 dgraph/reference/dql/dql-syntax/dql-mutation.mdx
create mode 100644 dgraph/reference/dql/dql-syntax/dql-query.mdx
create mode 100644 dgraph/reference/dql/dql-syntax/dql-rdf.mdx
create mode 100644 dgraph/reference/dql/dql-syntax/index.mdx
create mode 100644 dgraph/reference/dql/dql-syntax/json-mutation-format.mdx
create mode 100644 dgraph/reference/dql/dql-syntax/to-sort.md.txt
create mode 100644 dgraph/reference/dql/index.mdx
create mode 100644 dgraph/reference/dql/mutations/external-ids-upsert-block.mdx
create mode 100644 dgraph/reference/dql/mutations/index.mdx
create mode 100644 dgraph/reference/dql/mutations/uid-upsert.mdx
create mode 100644 dgraph/reference/dql/mutations/val-upsert.mdx
create mode 100644 dgraph/reference/dql/predicate-indexing.mdx
create mode 100644 dgraph/reference/dql/tips/index.mdx
create mode 100644 dgraph/reference/enterprise-features/access-control-lists.mdx
create mode 100644 dgraph/reference/enterprise-features/audit-logs.mdx
create mode 100644 dgraph/reference/enterprise-features/binary-backups.mdx
create mode 100644 dgraph/reference/enterprise-features/change-data-capture.mdx
create mode 100644 dgraph/reference/enterprise-features/encryption-at-rest.mdx
create mode 100644 dgraph/reference/enterprise-features/index.mdx
create mode 100644 dgraph/reference/enterprise-features/learner-nodes.mdx
create mode 100644 dgraph/reference/enterprise-features/license.mdx
create mode 100644 dgraph/reference/enterprise-features/lsbackup.mdx
create mode 100644 dgraph/reference/enterprise-features/multitenancy.mdx
create mode 100644 dgraph/reference/graphql-dql/dql-for-graphql.mdx
create mode 100644 dgraph/reference/graphql-dql/graphql-data-loading.mdx
create mode 100644 dgraph/reference/graphql-dql/graphql-data-migration.mdx
create mode 100644 dgraph/reference/graphql-dql/graphql-dgraph.mdx
create mode 100644 dgraph/reference/graphql-dql/graphql-dql-schema.mdx
create mode 100644 dgraph/reference/graphql-dql/index.mdx
create mode 100644 dgraph/reference/graphql/admin/index.mdx
create mode 100644 dgraph/reference/graphql/custom/custom-dql.mdx
create mode 100644 dgraph/reference/graphql/custom/custom-overview.mdx
create mode 100644 dgraph/reference/graphql/custom/directive.mdx
create mode 100644 dgraph/reference/graphql/custom/field.mdx
create mode 100644 dgraph/reference/graphql/custom/index.mdx
create mode 100644 dgraph/reference/graphql/custom/mutation.mdx
create mode 100644 dgraph/reference/graphql/custom/query.mdx
create mode 100644 dgraph/reference/graphql/federation/index.mdx
create mode 100644 dgraph/reference/graphql/graphql-clients/endpoint/graphql-get-request.mdx
create mode 100644 dgraph/reference/graphql/graphql-clients/endpoint/graphql-request.mdx
create mode 100644 dgraph/reference/graphql/graphql-clients/endpoint/graphql-response.mdx
create mode 100644 dgraph/reference/graphql/graphql-clients/endpoint/index.mdx
create mode 100644 dgraph/reference/graphql/graphql-clients/graphql-ide.mdx
create mode 100644 dgraph/reference/graphql/graphql-clients/graphql-ui.mdx
create mode 100644 dgraph/reference/graphql/graphql-clients/index.mdx
create mode 100644 dgraph/reference/graphql/index.mdx
create mode 100644 dgraph/reference/graphql/lambda/field.mdx
create mode 100644 dgraph/reference/graphql/lambda/index.mdx
create mode 100644 dgraph/reference/graphql/lambda/lambda-overview.mdx
create mode 100644 dgraph/reference/graphql/lambda/mutation.mdx
create mode 100644 dgraph/reference/graphql/lambda/query.mdx
create mode 100644 dgraph/reference/graphql/lambda/webhook.mdx
create mode 100644 dgraph/reference/graphql/mutations/add.mdx
create mode 100644 dgraph/reference/graphql/mutations/deep.mdx
create mode 100644 dgraph/reference/graphql/mutations/delete.mdx
create mode 100644 dgraph/reference/graphql/mutations/index.mdx
create mode 100644 dgraph/reference/graphql/mutations/mutations-overview.mdx
create mode 100644 dgraph/reference/graphql/mutations/update.mdx
create mode 100644 dgraph/reference/graphql/mutations/upsert.mdx
create mode 100644 dgraph/reference/graphql/queries/aggregate.mdx
create mode 100644 dgraph/reference/graphql/queries/and-or-not.mdx
create mode 100644 dgraph/reference/graphql/queries/cached-results.mdx
create mode 100644 dgraph/reference/graphql/queries/cascade.mdx
create mode 100644 dgraph/reference/graphql/queries/index.mdx
create mode 100644 dgraph/reference/graphql/queries/order-page.mdx
create mode 100644 dgraph/reference/graphql/queries/persistent-queries.mdx
create mode 100644 dgraph/reference/graphql/queries/queries-overview.mdx
create mode 100644 dgraph/reference/graphql/queries/search-filtering.mdx
create mode 100644 dgraph/reference/graphql/queries/skip-include.mdx
create mode 100644 dgraph/reference/graphql/queries/vector-similarity.mdx
create mode 100644 dgraph/reference/graphql/quick-start/index.mdx
create mode 100644 dgraph/reference/graphql/schema/dgraph-schema.mdx
create mode 100644 dgraph/reference/graphql/schema/directives/auth.mdx
create mode 100644 dgraph/reference/graphql/schema/directives/deprecated.mdx
create mode 100644 dgraph/reference/graphql/schema/directives/directive-dgraph.mdx
create mode 100644 dgraph/reference/graphql/schema/directives/directive-withsubscription.mdx
create mode 100644 dgraph/reference/graphql/schema/directives/embedding.mdx
create mode 100644 dgraph/reference/graphql/schema/directives/generate.mdx
create mode 100644 dgraph/reference/graphql/schema/directives/ids.mdx
create mode 100644 dgraph/reference/graphql/schema/directives/index.mdx
create mode 100644 dgraph/reference/graphql/schema/directives/search.mdx
create mode 100644 dgraph/reference/graphql/schema/documentation.mdx
create mode 100644 dgraph/reference/graphql/schema/graph-links.mdx
create mode 100644 dgraph/reference/graphql/schema/index.mdx
create mode 100644 dgraph/reference/graphql/schema/migration.mdx
create mode 100644 dgraph/reference/graphql/schema/reserved.mdx
create mode 100644 dgraph/reference/graphql/schema/types.mdx
create mode 100644 dgraph/reference/graphql/security/RBAC-rules.mdx
create mode 100644 dgraph/reference/graphql/security/anonymous-access.mdx
create mode 100644 dgraph/reference/graphql/security/auth-tips.mdx
create mode 100644 dgraph/reference/graphql/security/cors.mdx
create mode 100644 dgraph/reference/graphql/security/graphtraversal-rules.mdx
create mode 100644 dgraph/reference/graphql/security/index.mdx
create mode 100644 dgraph/reference/graphql/security/jwt.mdx
create mode 100644 dgraph/reference/graphql/security/mutations.mdx
create mode 100644 dgraph/reference/graphql/subscriptions/index.mdx
create mode 100644 dgraph/reference/howto/commandline/about-cli.mdx
create mode 100644 dgraph/reference/howto/commandline/create-cli.mdx
create mode 100644 dgraph/reference/howto/commandline/index.mdx
create mode 100644 dgraph/reference/howto/completion.mdx
create mode 100644 dgraph/reference/howto/concurrent-modification-java-multithreaded.mdx
create mode 100644 dgraph/reference/howto/dgraph-sentry-integration.mdx
create mode 100644 dgraph/reference/howto/dql-schema-request.mdx
create mode 100644 dgraph/reference/howto/drop-data.mdx
create mode 100644 dgraph/reference/howto/exportdata/about-export.mdx
create mode 100644 dgraph/reference/howto/exportdata/export-data-cloud.mdx
create mode 100644 dgraph/reference/howto/exportdata/export-data.mdx
create mode 100644 dgraph/reference/howto/exportdata/index.mdx
create mode 100644 dgraph/reference/howto/importdata/about_import.mdx
create mode 100644 dgraph/reference/howto/importdata/bulk-loader.mdx
create mode 100644 dgraph/reference/howto/importdata/index.mdx
create mode 100644 dgraph/reference/howto/importdata/live-loader.mdx
create mode 100644 dgraph/reference/howto/index.mdx
create mode 100644 dgraph/reference/howto/jepsen-tests.mdx
create mode 100644 dgraph/reference/howto/load-balancing-nginx.mdx
create mode 100644 dgraph/reference/howto/login-system.mdx
create mode 100644 dgraph/reference/howto/retrieving-debug-information.mdx
create mode 100644 dgraph/reference/howto/update-dgraph-types.mdx
create mode 100644 dgraph/reference/howto/upserts.mdx
create mode 100644 dgraph/reference/howto/using-debug-tool.mdx
create mode 100644 dgraph/reference/howto/using-increment-tool.mdx
create mode 100644 dgraph/reference/learn/administrator/index.mdx
create mode 100644 dgraph/reference/learn/data-engineer/data-model-101/01-dm-101-introduction.mdx
create mode 100644 dgraph/reference/learn/data-engineer/data-model-101/02-relational-data-model.mdx
create mode 100644 dgraph/reference/learn/data-engineer/data-model-101/03-graph-data-model.mdx
create mode 100644 dgraph/reference/learn/data-engineer/data-model-101/04-rel-query.mdx
create mode 100644 dgraph/reference/learn/data-engineer/data-model-101/05-graph-query.mdx
create mode 100644 dgraph/reference/learn/data-engineer/data-model-101/06-dm-101-conclusion.mdx
create mode 100644 dgraph/reference/learn/data-engineer/data-model-101/index.mdx
create mode 100644 dgraph/reference/learn/data-engineer/get-started-with-dgraph/index.mdx
create mode 100644 dgraph/reference/learn/data-engineer/get-started-with-dgraph/tutorial-1/index.mdx
create mode 100644 dgraph/reference/learn/data-engineer/get-started-with-dgraph/tutorial-2/index.mdx
create mode 100644 dgraph/reference/learn/data-engineer/get-started-with-dgraph/tutorial-3/index.mdx
create mode 100644 dgraph/reference/learn/data-engineer/get-started-with-dgraph/tutorial-4/index.mdx
create mode 100644 dgraph/reference/learn/data-engineer/get-started-with-dgraph/tutorial-5/index.mdx
create mode 100644 dgraph/reference/learn/data-engineer/get-started-with-dgraph/tutorial-6/index.mdx
create mode 100644 dgraph/reference/learn/data-engineer/get-started-with-dgraph/tutorial-7/index.mdx
create mode 100644 dgraph/reference/learn/data-engineer/get-started-with-dgraph/tutorial-8/index.mdx
create mode 100644 dgraph/reference/learn/data-engineer/index.mdx
create mode 100644 dgraph/reference/learn/developer/index.mdx
create mode 100644 dgraph/reference/learn/developer/react/graphql/design-app-schema.mdx
create mode 100644 dgraph/reference/learn/developer/react/graphql/graphql-operations.mdx
create mode 100644 dgraph/reference/learn/developer/react/graphql/graphql-schema.mdx
create mode 100644 dgraph/reference/learn/developer/react/graphql/index.mdx
create mode 100644 dgraph/reference/learn/developer/react/graphql/load-schema-to-dgraph-cloud.mdx
create mode 100644 dgraph/reference/learn/developer/react/graphql/react-graphql-mutations.mdx
create mode 100644 dgraph/reference/learn/developer/react/graphql/react-graphql-queries.mdx
create mode 100644 dgraph/reference/learn/developer/react/index.mdx
create mode 100644 dgraph/reference/learn/developer/react/react-conclusion.mdx
create mode 100644 dgraph/reference/learn/developer/react/react-introduction.mdx
create mode 100644 dgraph/reference/learn/developer/react/react-provision-backend.mdx
create mode 100644 dgraph/reference/learn/developer/react/react-ui/connect-to-dgraph-cloud.mdx
create mode 100644 dgraph/reference/learn/developer/react/react-ui/index.mdx
create mode 100644 dgraph/reference/learn/developer/react/react-ui/react-app-boiler-plate.mdx
create mode 100644 dgraph/reference/learn/developer/react/react-ui/react-routing.mdx
create mode 100644 dgraph/reference/learn/developer/react/react-ui/react-ui-graphql-mutations.mdx
create mode 100644 dgraph/reference/learn/developer/react/react-ui/react-ui-graphql-queries.mdx
create mode 100644 dgraph/reference/learn/developer/react/react-ui/tech-stack.mdx
create mode 100644 dgraph/reference/learn/developer/sample-apps/charts.mdx
create mode 100644 dgraph/reference/learn/developer/sample-apps/devjokes.mdx
create mode 100644 dgraph/reference/learn/developer/sample-apps/index.mdx
create mode 100644 dgraph/reference/learn/developer/sample-apps/pokedex.mdx
create mode 100644 dgraph/reference/learn/developer/sample-apps/surveyo.mdx
create mode 100644 dgraph/reference/learn/developer/sample-apps/todos.mdx
create mode 100644 dgraph/reference/learn/developer/todo-app-tutorial/index.mdx
create mode 100644 dgraph/reference/learn/developer/todo-app-tutorial/todo-UI.mdx
create mode 100644 dgraph/reference/learn/developer/todo-app-tutorial/todo-auth-rules.mdx
create mode 100644 dgraph/reference/learn/developer/todo-app-tutorial/todo-auth0-jwt.mdx
create mode 100644 dgraph/reference/learn/developer/todo-app-tutorial/todo-deploy.mdx
create mode 100644 dgraph/reference/learn/developer/todo-app-tutorial/todo-firebase-jwt.mdx
create mode 100644 dgraph/reference/learn/developer/todo-app-tutorial/todo-overview.mdx
create mode 100644 dgraph/reference/learn/developer/todo-app-tutorial/todo-schema-design.mdx
create mode 100644 dgraph/reference/learn/index.mdx
create mode 100644 dgraph/reference/migration/about-data-migration.mdx
create mode 100644 dgraph/reference/migration/index.mdx
create mode 100644 dgraph/reference/migration/loading-csv-data.mdx
create mode 100644 dgraph/reference/migration/migrate-tool.mdx
create mode 100644 dgraph/reference/query-language/aggregation.mdx
create mode 100644 dgraph/reference/query-language/alias.mdx
create mode 100644 dgraph/reference/query-language/cascade-directive.mdx
create mode 100644 dgraph/reference/query-language/connecting-filters.mdx
create mode 100644 dgraph/reference/query-language/count.mdx
create mode 100644 dgraph/reference/query-language/debug.mdx
create mode 100644 dgraph/reference/query-language/expand-predicates.mdx
create mode 100644 dgraph/reference/query-language/facets.mdx
create mode 100644 dgraph/reference/query-language/fragments.mdx
create mode 100644 dgraph/reference/query-language/functions.mdx
create mode 100644 dgraph/reference/query-language/graphql-fundamentals.mdx
create mode 100644 dgraph/reference/query-language/graphql-variables.mdx
create mode 100644 dgraph/reference/query-language/groupby.mdx
create mode 100644 dgraph/reference/query-language/ignorereflex-directive.mdx
create mode 100644 dgraph/reference/query-language/index.mdx
create mode 100644 dgraph/reference/query-language/indexing-custom-tokenizers.mdx
create mode 100644 dgraph/reference/query-language/kshortest-path-queries.mdx
create mode 100644 dgraph/reference/query-language/language-support.mdx
create mode 100644 dgraph/reference/query-language/math-on-value-variables.mdx
create mode 100644 dgraph/reference/query-language/multiple-query-blocks.mdx
create mode 100644 dgraph/reference/query-language/normalize-directive.mdx
create mode 100644 dgraph/reference/query-language/pagination.mdx
create mode 100644 dgraph/reference/query-language/query-variables.mdx
create mode 100644 dgraph/reference/query-language/recurse-query.mdx
create mode 100644 dgraph/reference/query-language/sorting.mdx
create mode 100644 dgraph/reference/query-language/value-variables.mdx
create mode 100644 dgraph/reference/ratel/backups.mdx
create mode 100644 dgraph/reference/ratel/cluster.mdx
create mode 100644 dgraph/reference/ratel/connection.mdx
create mode 100644 dgraph/reference/ratel/console.mdx
create mode 100644 dgraph/reference/ratel/index.mdx
create mode 100644 dgraph/reference/ratel/overview.mdx
create mode 100644 dgraph/reference/ratel/schema.mdx
create mode 100644 dgraph/reference/releases/index.mdx
diff --git a/dgraph/reference/cloud/admin/authentication.mdx b/dgraph/reference/cloud/admin/authentication.mdx
new file mode 100644
index 00000000..86aba6f7
--- /dev/null
+++ b/dgraph/reference/cloud/admin/authentication.mdx
@@ -0,0 +1,47 @@
+---
+title: API keys
+description:
+---
+
+Client applications accessing the Dgraph Cloud cluster endpoints
+
+- `/query`
+- `/mutate`
+- `/commit`
+
+must present a valid **client** or **admin** **API key** in the `Dg-Auth` or
+`X-Auth-Token` header of every HTTP request.
+
+Client applications accessing the Dgraph Cloud cluster endpoints
+
+- `/admin`
+- `/admin/slash`
+- `/alter`
+
+must present a valid **admin API key** in the `Dg-Auth` or `X-Auth-Token` header
+of every HTTP request.
+
+Client applications accessing the Dgraph Cloud cluster endpoint
+
+- `/graphql`
+
+with [anonymous access](dgraph/reference/graphql/security/anonymous-access) not
+set on the requested operation, must present a valid **client** or **admin API
+key** in the `Dg-Auth` or `X-Auth-Token` header of every HTTP request.
+
+**Client API keys** can only be used to perform query, mutation, and commit
+operations. **Admin API keys** can be used to perform both client operations and
+admin operations like drop data, destroy backend, and update schema.
+
+## Generate a new API
+
+To generate a new API key :
+
+1. Go to the [Settings](https://cloud.dgraph.io/_/settings) section of Dgraph
+ Cloud console.
+2. Access `[API Keys](https://cloud.dgraph.io/_/settings?tab=api-keys)` tab.
+3. Click Create New button.
+4. Give the key a name, and select **Client** or **Admin** type and click
+ Create
+5. Copy the key in a safe place, it will not be accessible once you leave the
+ page.
diff --git a/dgraph/reference/cloud/admin/clone.mdx b/dgraph/reference/cloud/admin/clone.mdx
new file mode 100644
index 00000000..bbef737c
--- /dev/null
+++ b/dgraph/reference/cloud/admin/clone.mdx
@@ -0,0 +1,18 @@
+---
+title: Cloning Backend
+---
+
+Cloning a backend allows making a copy of an existing backend. The clone will be
+created with all the data and schema of the original backend present at the time
+of cloning. The clone will have its own endpoint and will be independent of the
+original backend once it is created. Any further changes in either backends will
+not reflect in the other. Currently, a clone can only be created in the same
+zone as that of the original backend.
+
+In order to clone your backend, click on the `Clone Backend` button under the
+[Settings](https://cloud.dgraph.io/_/settings) tab in the dashboard's sidebar.
+
+You can also perform the restore operation on an existing backend if you have an
+unused backend or want to reuse an existing endpoint. But note that the restore
+operation will drop all the existing data along with schema on the current
+backend and replace it with the original backend's data and schema.
diff --git a/dgraph/reference/cloud/admin/drop-data.mdx b/dgraph/reference/cloud/admin/drop-data.mdx
new file mode 100644
index 00000000..5edc5e4f
--- /dev/null
+++ b/dgraph/reference/cloud/admin/drop-data.mdx
@@ -0,0 +1,44 @@
+---
+title: Dropping Data from your Backend
+description:
+---
+
+It is possible to drop all data from your Dgraph Cloud backend, and start afresh
+while retaining the same endpoint. Be careful, as this operation is not
+reversible, and all data will be lost. It is highly recommended that you
+[export](./cloud/admin/import-export) your data before you drop your data.
+
+In order to drop all data while retaining the schema, click the `Drop Data`
+button under the [Schema](https://cloud.dgraph.io/_/schema) tab in the sidebar.
+
+
+
+### Dropping Data Programmatically
+
+In order to do this, call the `dropData` mutation on `/admin/slash`. As an
+example, if your GraphQL endpoint is
+`https://frozen-mango.us-west-2.aws.cloud.dgraph.io/graphql`, then the admin
+endpoint for schema will be at
+`https://frozen-mango.us-west-2.aws.cloud.dgraph.io/admin/slash`.
+
+Please note that this endpoint requires
+[Authentication](./cloud/admin/authentication).
+
+Please see the following curl as an example.
+
+```
+curl 'https:///admin/slash' \
+ -H 'X-Auth-Token: ' \
+ -H 'Content-Type: application/graphql' \
+ --data-binary 'mutation { dropData(allData: true) { response { code message } } }'
+```
+
+If you would like to drop the schema along with the data, then you can set the
+`allDataAndSchema` flag.
+
+```
+curl 'https:///admin/slash' \
+ -H 'X-Auth-Token: ' \
+ -H 'Content-Type: application/graphql' \
+ --data-binary 'mutation { dropData(allDataAndSchema: true) { response { code message } } }'
+```
diff --git a/dgraph/reference/cloud/admin/import-export.mdx b/dgraph/reference/cloud/admin/import-export.mdx
new file mode 100644
index 00000000..1dfe7e6f
--- /dev/null
+++ b/dgraph/reference/cloud/admin/import-export.mdx
@@ -0,0 +1,141 @@
+---
+title: Importing and Exporting data from Dgraph Cloud
+---
+
+## Exporting and Importing Data in Dgraph Cloud
+
+You can export your data as an Administrator from one Dgraph Cloud backend, and
+then import this data back into another Dgraph instance or Dgraph Cloud backend.
+For more information about how to export data in Dgraph Cloud, see
+[Export data](./howto/exportdata/export-data-cloud). You can also export data
+from Dgraph Cloud programatically using the Dgraph Cloud API. For more
+information, see [Cloud API documentation](./cloud/cloud-api/backup).
+
+To import data to Dgraph Cloud, see
+[live loader](./howto/importdata/live-loader).
+
+## Exporting Data with Multi-Tenancy feature enabled in Dgraph Cloud
+
+
+ With Multi-Tenancy feature enabled, for any GraphQL request you need to
+ provide the `accessJWT` for the specific user in the `X-Dgraph-AccessToken`
+ header.
+
+
+You can trigger two types of exports:
+
+- Cluster-wide export: this is an export of the entire backend (including all
+ namespaces). This request can be only triggered by the
+ [_Guardian of Galaxy_](https://dgraph.io/docs/enterprise-features/multitenancy/#guardians-of-the-galaxy)
+ users.
+- Namespace-specific export: this is an export of a specific namespace. This
+ request can be triggered by the _Guardian of Galaxy_ users and by the
+ _Guardian of Namespace_ users.
+
+### Cluster-wide Exports
+
+This can only be done by the _Guardian of Galaxy_ users (AKA Super Admin), the
+steps are:
+
+1. Get the `accessJWT` token for the _Guardian of Galaxy_ user. Send the
+ following GraphQL mutation to the `/admin` endpoint:
+
+```graphql
+mutation login($userId: String, $password: String, $namespace: Int) {
+ login(userId: $userId, password: $password, namespace: $namespace) {
+ response {
+ accessJWT
+ refreshJWT
+ }
+ }
+}
+```
+
+Your variables should be referring to the _Guardian of Galaxy_ user:
+
+```json
+{
+ "userId": "groot",
+ "password": "password",
+ "namespace": 0
+}
+```
+
+2. Once obtained the `accessJWT` token you need to pass it in
+ `X-Dgraph-AccessToken` Header and only then you can send the following
+ GraphQL mutation to `/admin/slash` endpoint:
+
+```graphql
+mutation {
+ export(namespace: -1) {
+ response {
+ code
+ message
+ }
+ exportId
+ taskId
+ }
+}
+```
+
+3. Once done, you can now send the following GraqhQL mutation to get the
+ `signedUrls` from where you can download your export files:
+
+```graphql
+query {
+ exportStatus(
+ exportId: ""
+ taskId: ""
+ ) {
+ kind
+ lastUpdated
+ signedUrls
+ status
+ }
+}
+```
+
+### Namespace-specific Exports
+
+Namespace-specific exports can be triggered by the _Guardian of Galaxy_ users.
+In this case you can follow the same steps for the Cluster-wide exports and
+replace the namespace value from `-1` to the namespace you want to export. It's
+important that you get the `accessJWT` token for the _Guardian of Galaxy_ user
+and pass it in the `X-Dgraph-AccessToken` header.
+
+E.g. if you want to export the namespace `0x123` your GraphQL request sent to
+the `/admin/slash` endpoint would look like:
+
+```graphql
+mutation {
+ export(namespace: 123) {
+ response {
+ code
+ message
+ }
+ exportId
+ taskId
+ }
+}
+```
+
+You can also trigger namespace-specific export using the _Guardian of Namespace_
+users, in this case there is no need to specify any namespace in the GraphQL
+request as these users can only export their own namespace. It's important that
+you get the `accessJWT` token for the _Guardian of Namespace_ user and pass it
+in the `X-Dgraph-AccessToken` header.
+
+The GraphQL request sent to the `/admin/slash` endpoint would be:
+
+```graphql
+mutation {
+ export {
+ response {
+ code
+ message
+ }
+ exportId
+ taskId
+ }
+}
+```
diff --git a/dgraph/reference/cloud/admin/index.mdx b/dgraph/reference/cloud/admin/index.mdx
new file mode 100644
index 00000000..02eb5517
--- /dev/null
+++ b/dgraph/reference/cloud/admin/index.mdx
@@ -0,0 +1,3 @@
+---
+title: Administering Your Backend
+---
diff --git a/dgraph/reference/cloud/admin/monitoring.mdx b/dgraph/reference/cloud/admin/monitoring.mdx
new file mode 100644
index 00000000..6d7cb91a
--- /dev/null
+++ b/dgraph/reference/cloud/admin/monitoring.mdx
@@ -0,0 +1,88 @@
+---
+title: Monitoring with Prometheus
+---
+
+Dgraph Cloud provides enterprises with real-time observability and high-fidelity
+telemetry of their instances with [Prometheus](https://prometheus.io/). Once
+enabled Dgraph exposes real-time values for **Dedicated** backends at any given
+instant via the `/prometheus` endpoint. You can also configure Grafana for
+real-time visualization and analysis thus allowing for in-depth visibility into
+the behavior, performance and health of your instances.
+
+
+ Prometheus integration is only available to users of **Dedicated Instance**
+ types and not **Free** or **Shared Instance**.
+
+
+### Enable Prometheus for your instance
+
+To enable Prometheus with your Dgraph Cloud instance:
+
+1. Login to Dgraph Cloud Dashboard, select **Settings** under the **Admin**
+ subsection and then select **Modify Backend**. Alternately, you can also
+ enable Prometheus while launching a new backend.
+
+2. For your existing **Dgraph Cloud Backend**, enable the **Prometheus** option
+ under **Additional Settings**. Review and select one of the available
+ configurations viz. 1C (1 vCPU 4GB RAM), 2C, 4C, or 8C.
+
+3. Review the estimated hourly cost which should include additional charges for
+ enabling Prometheus. Click **Launch** to submit changes.
+
+
+
+### Configure your instance endpoint with Prometheus
+
+1. For all dedicated backends with Prometheus enabled, a new endpoint called
+ `/prometheus` would be available. For example, a backend at URL
+ https://sudomain.region.cloud-provider.cloud.dgraph.io/graphql would expose
+ metrics at URL -
+ https://sudomain.region.cloud-provider.cloud.dgraph.io/prometheus
+
+2. The `/prometheus` endpoint is protected with the **Admin API key**. Upon
+ accessing the URL for this endpoint, you will be prompted to enter the key.
+ More information on creating an Admin API key can be found
+ [here](https://dgraph.io/docs/cloud/admin/authentication/).
+
+ 
+
+3. Once you enter the Admin API token click **Submit** to launch the
+ **Prometheus Dashboard**.
+
+ 
+
+### Integrating with Grafana
+
+To visualize Prometheus metrics within the **Grafana Dashboard** for Dgraph
+Cloud, perform the following actions:
+
+1. Launch the Grafana Dashboard and follow the same steps to add a **Prometheus
+ Datasource** to Grafana as described
+ **[here](https://prometheus.io/docs/visualization/grafana/#creating-a-prometheus-data-source)**
+ but with the following changes:
+
+2. Under the section **HTTP**, for the option **URL**, enter the URL for your
+ Prometheus endpoint (Example -
+ https://sudomain.region.cloud-provider.cloud.dgraph.io/prometheus). For the
+ **Access** option select **Server (default)** from the dropdown menu.
+
+3. Lastly, under **Auth**, within the **Custom HTTP Headers** subsection, click
+ **Add Header** and add a new **Header** called `X-Auth-Token`. Enter your
+ Admin API key as its **Value**. The following image shows an example data
+ source configuration.
+
+ 
+
+4. Click **Save & Test** to save and test the new Prometheus data source.
+
+5. Create and populate your **Grafana Dashboard**. Select the **Prometheus Data
+ Source** that was configured earlier and select the metrics to visualize
+ (e.g. dgraph_memory_inuse_bytes, dgraph_alpha_health_status etc). If
+ correctly configured the metrics can be visualized as below:
+
+ 
+
+Your new monitoring and observability stack for Dgraph Cloud leveraging the
+Prometheus and Grafana solutions should now be ready for use. The same is useful
+to monitor your Dgraph backend efficiently, without the overhead of installing,
+maintaining, and scaling your own observability stack.
diff --git a/dgraph/reference/cloud/admin/overview.mdx b/dgraph/reference/cloud/admin/overview.mdx
new file mode 100644
index 00000000..7964d4cd
--- /dev/null
+++ b/dgraph/reference/cloud/admin/overview.mdx
@@ -0,0 +1,25 @@
+---
+title: Overview
+---
+
+Here is a guide to programmatically administering your Dgraph Cloud backend.
+
+Wherever possible, we have maintained compatibility with the corresponding
+Dgraph API, with the additional step of requiring
+[authentication](./authentication) via the `X-Auth-Token` header.
+
+
+ Keep in mind that free Dgraph Cloud backends will be frozen automatically
+ after 4 hours of inactivity.
+
+
+Please see the following topics:
+
+- [Authentication](./authentication) will guide you in creating a API token.
+ Since all admin APIs require an auth token, this is a good place to start.
+- [Schema](./schema) describes how to programmatically query and update your
+ GraphQL schema.
+- [Import and Exporting Data](./import-export) is a guide for exporting your
+ data from a Dgraph Cloud backend, and how to import it into another cluster
+- [Dropping Data](./drop-data) will guide you through dropping all data from
+ your Dgraph Cloud backend.
diff --git a/dgraph/reference/cloud/admin/schema-modes.mdx b/dgraph/reference/cloud/admin/schema-modes.mdx
new file mode 100644
index 00000000..36c9211b
--- /dev/null
+++ b/dgraph/reference/cloud/admin/schema-modes.mdx
@@ -0,0 +1,87 @@
+---
+title: Switch Dgraph Cloud Schema Modes
+description:
+ Dgraph Cloud provides a variety of schema modes that let you configure how the
+ underlying Dgraph Cloud instance responds to schema changes or mutation
+ requests that seek to change data stored in your backend.
+---
+
+Dgraph Cloud uses the following three schema modes, which control how the
+underlying Dgraph database instance is configured:
+
+- [Read-Only mode](#read-only-mode) (_dedicated instances only_): In this mode,
+ no schema changes or mutations are allowed
+- [Strict mode](#strict-mode): In this mode, only mutations on predicates that
+ are already present in the schema are allowed
+- [Flexible mode](#flexible-mode) (_dedicated instances only_): In this mode,
+ there are no global restrictions on schemas and mutations; this mode also
+ provides access to advanced Dgraph features
+
+Each mode is pre-configured to provide simplicity and ease-of-use. By default,
+your Dgraph Cloud schema will run in [Strict mode](#strict-mode). If you want
+your dedicated instance to have the same behavior as a local Dgraph instance,
+change your schema to [Flexible mode](#flexible-mode).
+
+### Read-Only mode
+
+In read-only mode, all mutations and attempts to alter the Cloud schema are
+blocked. You can still access your data through read-only queries.
+
+### Strict mode
+
+Strict mode is the default mode on Dgraph Cloud, and the only mode available for
+free and shared instances. In this mode, Dgraph Cloud enforces a
+[strict schema](https://dgraph.io/docs/deploy/dgraph-administration/#restricting-mutation-operations),
+only allowing mutations on predicates already present in the schema.
+
+You can use GraphQL and DQL (formerly _GraphQL+-_) queries and mutations in this
+mode, as described in the [advanced queries](/advanced-queries/) section.
+However, all queries and mutations must be valid for the applied schema.
+
+
+ In **Strict** mode, before executing a mutation on a predicate that doesn’t
+ exist in the schema, you need add that predicate to the schema. To add a
+ predicate, perform an [`alter`
+ operation](https://dgraph.io/docs/clients/raw-http/#alter-the-database) with
+ that predicate and its schema type (_dedicated instances only_), or [update
+ your schema](./schema) to include that predicate and its schema type.
+
+
+### Flexible mode
+
+Flexible mode is suitable for users who are already familiar with Dgraph. It
+removes global restrictions on schemas and mutations, and also provides access
+to advanced Dgraph features like the following:
+
+- Directly altering the schema with the
+ [`alter`](https://dgraph.io/docs/clients/raw-http/#alter-the-database) HTTP
+ and GRPC endpoints
+- Support for access control lists
+ ([ACLs](https://dgraph.io/docs/enterprise-features/access-control-lists/))
+
+## Switch schema modes with the Dgraph Cloud console
+
+To change your schema mode on a dedicated instance, go to the
+[settings page](https://cloud.dgraph.io/_/settings), click the
+
+General tab, and then select a mode from the **Schema Mode** list
+box.
+
+## Switch schema modes with the `/admin` endpoint
+
+
+ Dgraph Labs recommends using the Dgraph Cloud [settings
+ page](https://cloud.dgraph.io/_/settings) to change your dedicated instance's
+ schema mode for most scenarios, instead of directly modifying your schema.
+
+
+You can change the schema mode for your dedicated instance directly in the
+schema using the `updateGQLSchema` mutation on the `/admin` HTTP and GRPC
+endpoints. To learn more, see [Fetch and Update Your Schema](./schema).
+
+To set your schema mode, configure `UpdateOption` to use one of the following
+values in your schema:
+
+- To use Read Only mode, set `UpdateOption` to `readonly`
+- To use Strict mode, set `UpdateOption` to `graphql`
+- To use Flexible mode, set `UpdateOption` to `flexible`
diff --git a/dgraph/reference/cloud/admin/schema.mdx b/dgraph/reference/cloud/admin/schema.mdx
new file mode 100644
index 00000000..f92cedb0
--- /dev/null
+++ b/dgraph/reference/cloud/admin/schema.mdx
@@ -0,0 +1,43 @@
+---
+title: Fetching and Updating Your Schema
+---
+
+Your GraphQL schema can be fetched and updated using the `/admin` endpoint of
+your cluster. As an example, if your GraphQL endpoint is
+`https://frozen-mango.us-west-2.aws.cloud.dgraph.io/graphql`, then the admin
+endpoint for schema will be at
+`https://frozen-mango.us-west-2.aws.cloud.dgraph.io/admin`.
+
+This endpoint works in a similar way to the
+[/admin](https://dgraph.io/docs/graphql/admin) endpoint of Dgraph, with the
+additional constraint of [requiring authentication](/admin/authentication).
+
+### Fetching the Current Schema
+
+It is possible to fetch your current schema using the `getGQLSchema` query on
+`/admin`. Below is a sample GraphQL query which will fetch this schema.
+
+```graphql
+{
+ getGQLSchema {
+ schema
+ }
+}
+```
+
+### Setting a New Schema
+
+You can save a new schema using the `updateGQLSchema` mutation on `/admin`.
+Below is an example GraphQL body, with a variable called `sch` which must be
+passed in as a [variable](https://graphql.org/graphql-js/passing-arguments/)
+
+```graphql
+mutation ($sch: String!) {
+ updateGQLSchema(input: { set: { schema: $sch } }) {
+ gqlSchema {
+ schema
+ generatedSchema
+ }
+ }
+}
+```
diff --git a/dgraph/reference/cloud/advanced-queries.mdx b/dgraph/reference/cloud/advanced-queries.mdx
new file mode 100644
index 00000000..db9f753b
--- /dev/null
+++ b/dgraph/reference/cloud/advanced-queries.mdx
@@ -0,0 +1,219 @@
+---
+title: Advanced Queries with DQL
+---
+
+_You can now
+[embed DQL queries inside your GraphQL schema](https://dgraph.io/docs/graphql/custom/graphqlpm),
+which is recommended for most use cases. The rest of this document covers how to
+connect to your Dgraph Cloud backend with existing Dgraph clients._
+
+In addition to GraphQL support, Dgraph Cloud also supports running advanced
+queries using Dgraph Query Language (DQL) (previously named GraphQL+-). DQL is
+based on GraphQL, but adds and removes features to better support graph database
+operations. Advanced users can use DQL to send queries and mutations to Dgraph
+Cloud's HTTP or gRPC endpoints using the Dgraph client libraries. To learn more
+about the Dgraph client libraries, see the
+[client library documentation](https://dgraph.io/docs/clients/). To learn more
+about DQL, see
+[DQL Fundamentals](https://dgraph.io/docs/query-language/graphql-fundamentals/)..
+
+If you are getting started with Dgraph Cloud, you should probably start out by
+using Dgraph's [GraphQL API](https://dgraph.io/docs/graphql/overview) instead.
+Dgraph's GraphQL API lets you quickly use Dgraph Cloud before moving on to the
+advanced features available using DQL.
+
+
+ Dgraph Cloud's [schema modes](/admin/schema-modes/) let you configure whether
+ and how schema changes are allowed. To alter your schema using the `/alter`
+ HTTP and GRPC endpoints, you'll need to use **Flexible Mode**.
+
+
+## Authentication
+
+The APIs documented here all require an API token for access. To learn how to
+create an API token, please see [Authentication](/admin/authentication).
+
+### HTTP
+
+You can query your backend with DQL using your cluster's `/query` endpoint. As
+an example, if your GraphQL endpoint is
+`https://frozen-mango.us-west-2.aws.cloud.dgraph.io/graphql`, then the admin
+endpoint for the schema is
+`https://frozen-mango.us-west-2.aws.cloud.dgraph.io/query`.
+
+You can also access the [`/mutate`](https://dgraph.io/docs/mutations/) and
+`/commit` endpoints.
+
+For example, let's say you have the following GraphQL schema:
+
+```graphql
+type Person {
+ name: String! @search(by: [fulltext])
+ age: Int
+ country: String
+}
+```
+
+Here is an example of a cURL command with the `/mutate` endpoint:
+
+```
+curl -H "Content-Type: application/rdf" -H "x-auth-token: " -X POST "/mutate?commitNow=true" -d $'
+{
+ set {
+ _:x "John" .
+ _:x "30" .
+ _:x "US" .
+ }
+}'
+```
+
+Here is an example of a cURL command with the `/query` endpoint:
+
+```
+curl -H "Content-Type: application/dql" -H "x-auth-token: " -XPOST "/query" -d '{
+ queryPerson(func: type(Person)) {
+ Person.name
+ Person.age
+ Person.country
+ }
+}'
+```
+
+### gRPC
+
+Dgraph Cloud is compatible with most existing Dgraph clients. You can use the
+helper methods from each library to connect to your backend, passing in a Dgraph
+Cloud endpoint and an API token.
+
+Here is an example which uses the
+[pydgraph client](https://github.com/dgraph-io/pydgraph) to make gRPC requests.
+
+```python
+import pydgraph
+
+client_stub = pydgraph.DgraphClientStub.from_slash_endpoint("https://frozen-mango.eu-central-1.aws.cloud.dgraph.io/graphql", "")
+client = pydgraph.DgraphClient(client_stub)
+```
+
+Here is an example of a mutation using the `pydgraph` client:
+
+```python
+mut = {
+ "Person.name": "John Doe",
+ "Person.age": "32",
+ "Person.country": "US"
+}
+
+txn = client.txn()
+try:
+ res = txn.mutate(set_obj=mut)
+finally:
+ txn.discard()
+```
+
+Here is an example of a query using the `pydgraph` client:
+
+```python
+query = """
+{
+ queryPerson(func: type(Person)) {
+ Person.name
+ Person.age
+ Person.country
+ }
+}"""
+txn = client.txn()
+try:
+ res = txn.query(query)
+ ppl = json.loads(res.json)
+ print(ppl)
+finally:
+ txn.discard()
+```
+
+#### Connecting from Dgraph Clients
+
+Below are snippets to connect to your Dgraph Cloud backend from various Dgraph
+clients.
+
+**Python**
+
+```python
+import pydgraph
+
+client_stub = pydgraph.DgraphClientStub.from_slash_endpoint("https://frozen-mango.eu-central-1.aws.cloud.dgraph.io/graphql", "")
+client = pydgraph.DgraphClient(client_stub)
+```
+
+**JavaScript**
+
+```javascript
+const dgraph = require("dgraph-js")
+
+const clientStub = dgraph.clientStubFromSlashGraphQLEndpoint(
+ "https://frozen-mango.eu-central-1.aws.cloud.dgraph.io/graphql",
+ "",
+)
+const dgraphClient = new dgraph.DgraphClient(clientStub)
+```
+
+**Go**
+
+```golang
+// This example uses dgo
+conn, err := dgo.DialSlashEndpoint("https://frozen-mango.eu-central-1.aws.cloud.dgraph.io/graphql", "")
+if err != nil {
+ log.Fatal(err)
+}
+defer conn.Close()
+dgraphClient := dgo.NewDgraphClient(api.NewDgraphClient(conn))
+```
+
+**Java**
+
+```java
+// This example uses dgraph4j
+DgraphStub stub = DgraphClient.clientStubFromSlashEndpoint("https://frozen-mango.eu-central-1.aws.cloud.dgraph.io/graphql", "");
+DgraphClient dgraphClient = new DgraphClient(stub);
+```
+
+**C# / .NET**
+
+```c#
+var client = new DgraphClient(SlashChannel.Create("frozen-mango.eu-central-1.aws.cloud.dgraph.io:443", ""));
+```
+
+### Visualizing your Graph with Ratel
+
+You can use Ratel to visualize your Dgraph Cloud backend with DQL. You can host
+Ratel yourself, or you can use Ratel online at
+[Dgraph Play](https://play.dgraph.io/?latest#connection).
+
+To configure Ratel:
+
+1. Click the Dgraph logo in the top left to bring up the connection screen (by
+ default, it has the caption: play.dgraph.io)
+2. Enter your backend's host in the Dgraph Server URL field. This is obtained by
+ removing `/graphql` from the end of your `/graphql` endpoint URL. For
+ example, if your `/graphql` endpoint is
+ `https://frozen-mango.us-west-2.aws.cloud.dgraph.io/graphql`, then the host
+ for Ratel is `https://frozen-mango.us-west-2.aws.cloud.dgraph.io`
+3. Click the **Connect** button. You should see a green check mark next to the
+ word **Connected**.
+4. Click on the **Extra Settings** tab, and then enter your API token into the
+ **API Key** field. To create a new API token, see
+ [Authentication](/admin/authentication).
+5. Click on the **Continue** button.
+
+You can now run queries and mutations using Ratel, and see visualizations of
+your data.
+
+Ratel has certain limitations; it doesn't support backups, modifying ACL or
+attempting to remove nodes from the cluster.
+
+### Switching Schema Modes
+
+If you want to use DQL as your primary mode of interaction with the Dgraph Cloud
+backend (instead of primarily using the GraphQL API), you can switch your
+backend to flexible mode. To learn more, see
+[Schema Modes](/admin/schema-modes).
diff --git a/dgraph/reference/cloud/cloud-api/authentication.mdx b/dgraph/reference/cloud/cloud-api/authentication.mdx
new file mode 100644
index 00000000..cc70cbf9
--- /dev/null
+++ b/dgraph/reference/cloud/cloud-api/authentication.mdx
@@ -0,0 +1,64 @@
+---
+title: Authentication
+---
+
+## Login
+
+Login will generate a JWT token that can be used to access other Dgraph Cloud
+APIs.
+
+This API requires an email address and password. If you have signed up with a
+social media provider, you may create a new password by selecting
+`Forgot Password` on the login page.
+
+### Cloud Endpoint
+
+```
+https://cerebro.cloud.dgraph.io/graphql
+```
+
+### API Command
+
+```graphql
+query Login($email: String!, $password: String!) {
+ login(email: $email, password: $password) {
+ token
+ }
+}
+```
+
+**Arguments**
+
+- `email`: your email address
+- `password`: your password
+
+### Example
+
+Below is an example request and response. The token below must be passed to all
+future API calls as a bearer token in the `Authorization` header.
+
+
+
+```bash
+curl 'https://cerebro.cloud.dgraph.io/graphql' \
+ -H 'Content-Type: application/json' \
+ --data-binary '{"query":"query Login($email: String!, $password: String!) {\n login(email: $email, password: $password) { \n token\n }\n}","variables":{"email":"","password":""}}' \
+ --compressed
+```
+
+```json
+{
+ "data": {
+ "login": {
+ "token": ""
+ }
+ }
+}
+```
+
+
+
+## Using the authentication token
+
+The token returned from the login API must be passed to all future API calls as
+a bearer token in the `Authorization` header.
diff --git a/dgraph/reference/cloud/cloud-api/backend.mdx b/dgraph/reference/cloud/cloud-api/backend.mdx
new file mode 100644
index 00000000..b1c824e2
--- /dev/null
+++ b/dgraph/reference/cloud/cloud-api/backend.mdx
@@ -0,0 +1,498 @@
+---
+title: Backend
+---
+
+## List Backends
+
+List backends that you have access to.
+
+
+ This API requires authentication, please see
+ [Authentication](./authentication) for instructions on issuing and passing a
+ JWT token to the API.
+
+
+### Cloud Endpoint
+
+```
+https://cerebro.cloud.dgraph.io/graphql
+```
+
+### API Command
+
+```graphql
+{
+ deployments {
+ uid
+ name
+ zone
+ url
+ owner
+ jwtToken
+ deploymentMode
+ deploymentType
+ lambdaScript
+ }
+}
+```
+
+### Example
+
+- `` is the JWT returned from [Authentication](./authentication).
+- `` is a base64 string that will be non-empty if you have saved
+ [Lambdas](./lambda) on your backend
+
+
+
+```bash
+#!/usr/bin/env bash
+
+CEREBRO_JWT=""
+
+curl 'https://cerebro.cloud.dgraph.io/graphql' \
+ -H 'Content-Type: application/json' \
+ -H "Authorization: Bearer ${CEREBRO_JWT}" \
+ --data-binary '{"query":"{\n deployments {\n uid\n name\n zone\n url\n owner\n jwtToken\n deploymentMode\n deploymentType\n lambdaScript\n }\n}","variables":{}}' \
+ --compressed
+```
+
+```json
+{
+ "data": {
+ "deployments": [
+ {
+ "uid": "0xf0ffe9",
+ "name": "testing",
+ "zone": "us-east-1",
+ "url": "polished-violet.us-east-1.aws.cloud.dgraph.io",
+ "owner": "486c69b4-e09b-48f9-a28a-86314fe232cd",
+ "jwtToken": "",
+ "deploymentMode": "graphql",
+ "deploymentType": "free",
+ "lambdaScript": ""
+ }
+ ]
+ }
+}
+```
+
+
+
+ For any `/admin` or `/admin/slash` requests to
+`https://`, you **must use the `` returned above
+in the `X-Auth-Token` header.** The Cerebro JWT is only used in the
+`Authorization` header for requests to `https://cerebro.cloud.dgraph.io/graphql`.
+
+## Deploy Backend
+
+Launch a new backend.
+
+
+ This API requires authentication, please see
+ [Authentication](./authentication) for instructions on issuing and passing a
+ JWT to the API.
+
+
+### Cloud Endpoint
+
+```
+https://cerebro.cloud.dgraph.io/graphql
+```
+
+### API Command
+
+```graphql
+mutation CreateDeployment($newDeployment: NewDeployment!) {
+ createDeployment(input: $newDeployment) {
+ uid
+ name
+ url
+ jwtToken
+ }
+}
+```
+
+**Arguments**
+
+- `newDeployment`: parameter object for new deployment
+- `newDeployment.name`: name of the deployment
+- `newDeployment.zone`: region to launch
+- `newDeployment.deploymentType`: type of deployment `(free|shared|dedicated)`
+
+### Example
+
+- `` is the JWT returned from [Authentication](./authentication).
+
+
+
+```bash
+#!/usr/bin/env bash
+
+CEREBRO_JWT=""
+
+curl 'https://cerebro.cloud.dgraph.io/graphql' \
+ -H 'Content-Type: application/json' \
+ -H "Authorization: Bearer ${CEREBRO_JWT}" \
+ --data-binary '{"query":"mutation CreateDeployment($deployment: NewDeployment!) {\n createDeployment(input: $deployment) {\n uid\n name\n url\n jwtToken\n }\n}","variables":{"deployment":{"name":"My New Deployment","zone":"us-east-1","deploymentType":"dedicated"}}}' \
+ --compressed
+```
+
+```json
+{
+ "data": {
+ "createDeployment": {
+ "uid": "0x42",
+ "name": "My New Deployment",
+ "url": "my-new-deployment.us-east-1.aws.cloud.dgraph.io",
+ "jwtToken": ""
+ }
+ }
+}
+```
+
+
+
+## Update Backend
+
+Update backend.
+
+
+ This API requires authentication, please see
+ [Authentication](./authentication) for instructions on issuing and passing a
+ JWT token to the API.
+
+
+### Cloud Endpoint
+
+```
+https://cerebro.cloud.dgraph.io/graphql
+```
+
+### API Command
+
+```graphql
+mutation UpdateDeployment($updateDeploymentInput: UpdateDeploymentInput!) {
+ updateDeployment(input: $updateDeploymentInput)
+}
+```
+
+**Arguments**
+
+- `updateDeploymentInput`: parameter object for update deployment
+- `updateDeploymentInput.uid` (required): deployment `uid`
+
+### Example
+
+- `` is the JWT returned from [Authentication](./authentication).
+- `` is the UID returned from [List Backends](#list-backends).
+
+
+
+```bash
+#!/usr/bin/env bash
+
+CEREBRO_JWT=""
+
+curl 'https://cerebro.cloud.dgraph.io/graphql' \
+ -H 'Content-Type: application/json' \
+ -H "Authorization: Bearer ${CEREBRO_JWT}" \
+ --data-binary '{"query":"mutation UpdateDeployment($dep: UpdateDeploymentInput!) {\n updateDeployment(input: $dep)\n}","variables":{"dep":{"uid":"","name":"My Deployment!"}}}' \
+ --compressed
+```
+
+```json
+{
+ "data": {
+ "updateDeployment": "Successfully Updated the backend"
+ }
+}
+```
+
+
+
+## Destroy Backend
+
+Destroy (i.e., delete) a backend by id.
+
+
+ This API requires authentication, please see
+ [Authentication](./authentication) for instructions on issuing and passing a
+ JWT token to the API.
+
+
+### Cloud Endpoint
+
+```
+https://cerebro.cloud.dgraph.io/graphql
+```
+
+### API Command
+
+```graphql
+mutation DeleteDeployment($deploymentID: String!) {
+ deleteDeployment(deploymentID: $deploymentID)
+}
+```
+
+**Arguments**
+
+- `deploymentID` (required): deployment `uid` returned from a
+ [List Backends](#list-backends) request
+
+### Example
+
+- `` is the JWT returned from [Authentication](./authentication).
+
+
+
+```bash
+#!/usr/bin/env bash
+
+CEREBRO_JWT=""
+
+curl 'https://cerebro.cloud.dgraph.io/graphql' \
+ -H 'Content-Type: application/json' \
+ -H "Authorization: Bearer ${CEREBRO_JWT}" \
+ --data-binary '{"query":"mutation DeleteDeployment($deploymentUid: String!) {\n deleteDeployment(deploymentID: $deploymentUid)\n}","variables":{"deploymentUid":""}}' \
+ --compressed
+```
+
+```
+{
+ "data": {
+ "deleteDeployment": "Successfully deleted the Deployment"
+ }
+}
+```
+
+
+
+## Restore Backends
+
+Restore into a backend by source backend ID.
+
+### Cloud Endpoint
+
+```bash
+https://${DEPLOYMENT_URL}/admin/slash
+```
+
+### API Command
+
+```graphql
+mutation ($uid: String!, $backupFolder: String, $backupNum: Int) {
+ restore(uid: $uid, backupFolder: $backupFolder, backupNum: $backupNum) {
+ response {
+ code
+ message
+ restoreId
+ }
+ errors {
+ message
+ }
+ }
+}
+```
+
+**Arguments**
+
+- `uid` (required): the deployment `uid` from List Backends
+- `backupFolder` (required): TODO
+- `backupNum` (required): TODO
+
+### Example
+
+
+
+```bash
+#!/usr/bin/env bash
+
+DEPLOYMENT_URL="polished-violet.us-east-1.aws.cloud.dgraph.io"
+DEPLOYMENT_JWT=""
+
+curl "https://${DEPLOYMENT_URL}/admin/slash" \
+ -H 'Content-Type: application/json' \
+ -H "X-Auth-Token: ${DEPLOYMENT_JWT}" \
+ --data-binary '{"query":"mutation($uid: String!, $backupFolder: String, $backupNum: Int) {\n restore(uid: $uid, backupFolder: $backupFolder, backupNum: $backupNum) {\n response {\n code\n message\n restoreId\n }, errors {\n message\n }\n}\n}","variables":{"uid":"","backupFolder":"","backupNum":}}' \
+ --compressed
+```
+
+```json
+{
+ "data": {
+ "restore": {
+ "errors": null,
+ "response": {
+ "code": "Success",
+ "message": "Restore operation started.",
+ "restoreId": 1
+ }
+ }
+ }
+}
+```
+
+
+## Restore Backend Status
+
+Retrieve the status of a restore operation.
+
+### Cloud Endpoint
+
+```bash
+https://${DEPLOYMENT_URL}/admin/slash
+```
+
+### API Command
+
+```graphql
+query ($restoreId: Int!) {
+ restoreStatus(restoreId: $restoreId) {
+ response {
+ status
+ errors
+ }
+ }
+}
+```
+
+**Arguments**
+
+- `restoreId` (required): the id of the restore operation returned from
+ [Restore Backends](#restore-backends) request
+
+### Example
+
+
+
+```bash
+#!/usr/bin/env bash
+
+DEPLOYMENT_URL="polished-violet.us-east-1.aws.cloud.dgraph.io"
+DEPLOYMENT_JWT=""
+
+curl "https://${DEPLOYMENT_URL}/admin/slash" \
+ -H 'Content-Type: application/json' \
+ -H "X-Auth-Token: ${DEPLOYMENT_JWT}" \
+ --data-binary '{"query":"query($restoreId: Int!) {\n restoreStatus(restoreId: $restoreId) {\n response {status errors}\n}\n}","variables":{"restoreId":1}}' \
+ --compressed
+```
+
+```json
+{
+ "data": {
+ "restoreStatus": {
+ "response": {
+ "errors": [],
+ "status": "OK"
+ }
+ }
+ }
+}
+```
+
+
+
+## Drop
+
+Drop (i.e., delete) all data in your backend.
+
+### Cloud Endpoint
+
+```bash
+https://${DEPLOYMENT_URL}/admin/slash
+```
+
+### API Command
+
+#### Drop Data
+
+```graphql
+mutation {
+ dropData(allData: true) {
+ response {
+ code
+ message
+ }
+ }
+}
+```
+
+#### Drop Schema
+
+```graphql
+mutation {
+ dropData(allDataAndSchema: true) {
+ response {
+ code
+ message
+ }
+ }
+}
+```
+
+#### Drop Types
+
+```graphql
+mutation ($types: [String!]) {
+ dropData(types: $types) {
+ response {
+ code
+ message
+ }
+ }
+}
+```
+
+**Arguments**
+
+- `types`: string array containing type Names
+
+#### Drop Fields
+
+```graphql
+mutation ($fields: [String!]) {
+ dropData(fields: $fields) {
+ response {
+ code
+ message
+ }
+ }
+}
+```
+
+**Arguments**
+
+- `fields`: string array containing field Names
+
+### Example
+
+
+
+```bash
+#!/usr/bin/env bash
+
+DEPLOYMENT_URL="polished-violet.us-east-1.aws.cloud.dgraph.io"
+DEPLOYMENT_JWT=""
+
+curl "https://${DEPLOYMENT_URL}/admin/slash" \
+ -H 'Content-Type: application/json' \
+ -H "X-Auth-Token: ${DEPLOYMENT_JWT}" \
+ --data-binary '{"query":"mutation {\n dropData(allDataAndSchema: true) {\n response { code message }\n}\n}","variables":{}}' \
+ --compressed
+```
+
+```json
+{
+ "data": {
+ "dropData": {
+ "response": {
+ "code": "Success",
+ "message": "Done"
+ }
+ }
+ }
+}
+```
+
+
diff --git a/dgraph/reference/cloud/cloud-api/backup.mdx b/dgraph/reference/cloud/cloud-api/backup.mdx
new file mode 100644
index 00000000..dbc4c363
--- /dev/null
+++ b/dgraph/reference/cloud/cloud-api/backup.mdx
@@ -0,0 +1,179 @@
+---
+title: Backup
+---
+
+
+ Backup feature is only available for Dedicated Instances. This feature is not
+ available for the Free and Shared Instances.
+
+
+## Periodic Backups
+
+Periodic Backups are created at a given schedule that by default is:
+
+- Full Backup every week
+- Incremental Backups every 4 hours
+
+You can trigger the Backup on-demand directly from your Dgraph Cloud Dashboard,
+simply go to Admin>Setting>Backups and click on "Create Backup" button on the
+top left.
+
+In case you would like to change your default Backup schedule please contact us
+and we will be happy to set you up.
+
+## List Backups
+
+List all backups of the current backend.
+
+### Cloud Endpoint
+
+```bash
+https://${DEPLOYMENT_URL}/admin/slash
+```
+
+### API Command
+
+```graphql
+query {
+ listBackups {
+ response {
+ type
+ backupNum
+ folder
+ timestamp
+ }
+ errors {
+ message
+ }
+ }
+}
+```
+
+### Example
+
+
+
+```bash
+#!/usr/bin/env bash
+
+DEPLOYMENT_URL="polished-violet.us-east-1.aws.cloud.dgraph.io"
+DEPLOYMENT_JWT=""
+
+curl "https://${DEPLOYMENT_URL}/admin/slash" \
+ -H 'Content-Type: application/json' \
+ -H "X-Auth-Token: ${DEPLOYMENT_JWT}" \
+ --data-binary '{"query":"{\n listBackups {\n response {\n type\n backupNum\n folder\n timestamp\n }, errors {\n message\n }\n} \n}","variables":{}}' \
+ --compressed
+```
+
+```json
+{
+ "data": {
+ "listBackups": {
+ "errors": [],
+ "response": [
+ [
+ {
+ "backupNum": 1,
+ "folder": "2021-15",
+ "timestamp": "2021-04-15T18:00:58+0000",
+ "type": "full"
+ },
+ {
+ "backupNum": 2,
+ "folder": "2021-15",
+ "timestamp": "2021-04-15T18:04:29+0000",
+ "type": "incremental"
+ }
+ ]
+ ]
+ }
+ }
+}
+```
+
+
+
+## Export Data
+
+Export data from your backend.
+
+### Cloud Endpoint
+
+```bash
+https://${DEPLOYMENT_URL}/admin/slash
+```
+
+### API Command
+
+```graphql
+mutation {
+ export {
+ signedUrls
+ }
+}
+```
+
+### Example
+
+
+
+```bash
+#!/usr/bin/env bash
+
+DEPLOYMENT_URL="polished-violet.us-east-1.aws.cloud.dgraph.io"
+DEPLOYMENT_JWT=""
+
+curl "https://${DEPLOYMENT_URL}/admin/slash" \
+ -H 'Content-Type: application/json' \
+ -H "X-Auth-Token: ${DEPLOYMENT_JWT}" \
+ --data-binary '{"query":"mutation {\n export {\n signedUrls\n }\n }","variables":{}}' \
+ --compressed
+```
+
+```json
+{
+ "data": {
+ "export": {
+ "signedUrls": ["", "", ""]
+ }
+ }
+}
+```
+
+
+
+## Import Data
+
+Import your data back using Dgraph
+[Live Loader](./cloud/admin/import-export.md#importing-data-with-live-loader)
+(requires Docker).
+
+### Shell Command
+
+Live loader command (via Docker):
+
+```sh
+docker run -it --rm -v /tmp/file:/tmp/g01.json.gz dgraph/dgraph:v21.03-slash \
+ dgraph live --slash_grpc_endpoint=${DEPLOYMENT_URL} -f /tmp/g01.json.gz -t ${DEPLOYMENT_JWT}
+```
+
+### Example
+
+
+
+```bash
+#!/usr/bin/env bash
+
+DEPLOYMENT_URL="lively-dream.grpc.us-east-1.aws.cloud.dgraph.io"
+DEPLOYMENT_JWT=""
+
+docker run -it --rm -v /users/dgraph/downloads:/tmp dgraph/dgraph:v21.03-slash \
+ dgraph live --slash_grpc_endpoint=${DEPLOYMENT_URL}:443 -f /tmp/1million.rdf.gz -t ${DEPLOYMENT_JWT}
+```
+
+```json
+
+```
+
+
diff --git a/dgraph/reference/cloud/cloud-api/index.mdx b/dgraph/reference/cloud/cloud-api/index.mdx
new file mode 100644
index 00000000..d9e99a22
--- /dev/null
+++ b/dgraph/reference/cloud/cloud-api/index.mdx
@@ -0,0 +1,3 @@
+---
+title: Dgraph Cloud API
+---
diff --git a/dgraph/reference/cloud/cloud-api/lambda.mdx b/dgraph/reference/cloud/cloud-api/lambda.mdx
new file mode 100644
index 00000000..7a9ca3a8
--- /dev/null
+++ b/dgraph/reference/cloud/cloud-api/lambda.mdx
@@ -0,0 +1,247 @@
+---
+title: Lambda
+---
+
+# Introduction
+
+For Shared and Free backends, a Dgraph lambda script is uniquely identified by a
+deployment id associated with the backend. You can identify your backend and the
+associated deployment id by using the
+[List Backends API](./backend#list-backends). In case of a Dedicated, and
+multi-tenant backend, an additional key, the tenant id is also required to
+uniquely identify a lambda script. This tenant id key is ignored when used in
+the context of Shared and Free backends.
+
+As a first step, you will need to identify your backend.
+
+## List Deployment and Get Lambda Script
+
+Use the [List Backends API](./backend#list-backends) to identify your backend,
+as well as get the lambda script deployed. In order to list the backends, you
+will need to pass a Bearer token as an `Authorization` header. This token is
+generated by logging in via the [Login query](./authentication#authentication)
+first.
+
+### Cloud Endpoint
+
+```
+https://cerebro.cloud.dgraph.io/graphql
+```
+
+### API Command
+
+```graphql
+query {
+ deployments {
+ uid
+ name
+ zone
+ subdomain
+ url
+ tenantID
+ lambdaScript
+ }
+}
+```
+
+**Arguments**
+
+None
+
+### Example
+
+
+
+```bash
+#!/usr/bin/env bash
+
+CEREBRO_JWT=""
+
+curl "https://cerebro.cloud.dgraph.io/graphql" \
+ -H 'Content-Type: application/json' \
+ -H "Authorization: Bearer ${CEREBRO_JWT}" \
+ --data-binary '{"query":"query{\n deployments {\n uid\n name\n zone\n subdomain\n url\n tenantID\n lambdaScript\n }\n}","variables":{}}' \
+ --compressed
+```
+
+```json
+{
+ "data": {
+ "deployments": [
+ {
+ "uid": "0x6238",
+ "name": "OrderJourney",
+ "zone": "us-west-2",
+ "subdomain": "vacuous-wash",
+ "url": "vacuous-wash.us-west-2.aws.cloud.dgraph.io",
+ "tenantID": 107,
+ "lambdaScript": "Ly8gWW91IGNhbiB0eXBlL3Bhc3RlIHlvdXIgc2NyaXB0IGhlcmUKY29uc3QgTmFtZVJlc29sdmVyID0gKHtwYXJlbnQ6IHtuYW1lfX0pID0+IGBNeSBuYW1lIGlzICR7bmFtZX0uYAoKc2VsZi5hZGRHcmFwaFFMUmVzb2x2ZXJzKHsKICAgICJQZXJzb24ubmFtZSI6IE5hbWVSZXNvbHZlcgp9KQ=="
+ }
+ ]
+ }
+}
+```
+
+
+
+The `uid` field in the response of the query is to be used as the deployment id.
+In the above example, the deployment id for the backend with name "OrderJourney"
+is "0x6238". The field `lambdaScript` contains the lambda script in the form of
+a Base64 encoded string.
+
+#### Decode the Base64 encoded `lambdaScript`
+
+In order to decode the Base64 encoded string into the actual lambda code, please
+use the command as shown below.
+
+```bash
+$ echo "Ly8gWW91IGNhbiB0eXBlL3Bhc3RlIHlvdXIgc2NyaXB0IGhlcmUKY29uc3QgTmFtZVJlc29sdmVyID0gKHtwYXJlbnQ6IHtuYW1lfX0pID0+IGBNeSBuYW1lIGlzICR7bmFtZX0uYAoKc2VsZi5hZGRHcmFwaFFMUmVzb2x2ZXJzKHsKICAgICJQZXJzb24ubmFtZSI6IE5hbWVSZXNvbHZlcgp9KQ==" | base64 -d
+```
+
+**Output**
+
+```js
+// You can type/paste your script here
+const NameResolver = ({ parent: { name } }) => `My name is ${name}.`
+
+self.addGraphQLResolvers({
+ "Person.name": NameResolver,
+})
+```
+
+## Lambda Logs
+
+You can fetch the logs for your lambda by using the `getLambdaLogs` query.
+
+### Cloud Endpoint
+
+```
+https://cerebro.cloud.dgraph.io/graphql
+```
+
+### API Command
+
+```graphql
+query GetLambdaLogs($lambdaLogsInput: LambdaLogsInput!) {
+ getLambdaLogs(input: $lambdaLogsInput)
+}
+```
+
+**Arguments**
+
+- `lambdaLogsInput`: a LambdaLogsInput object
+- `lambdaLogsInput.deploymentID`: the deployment UID returned from [List
+ Backends]((./backend#list-backends)
+- `lambdaLogsInput.tenantID`: In case of a multi-tenant, and dedicated backend,
+ you will need to pass the tenant Id as well
+- `lambdaLogsInput.start`: start time
+- `lambdaLogsInput.end`: end time
+
+### Example
+
+
+
+```bash
+#!/usr/bin/env bash
+
+CEREBRO_JWT=""
+
+curl "https://cerebro.cloud.dgraph.io/graphql" \
+ -H 'Content-Type: application/json' \
+ -H "Authorization: Bearer ${CEREBRO_JWT}" \
+ --data-binary '{"query":"query GetLambdaLogs($input: LambdaLogsInput!) {\n getLambdaLogs(input: $input)\n}","variables":{"input":{"deploymentID":"0xf0ffe9"}}}' \
+ --compressed
+```
+
+```json
+{
+ "data": {
+ "getLambdaLogs": [
+ "2021-04-16 19:03:54.009209524 +0000 UTC Server Listening on port 8686!",
+ "2021-04-16 19:03:54.202216548 +0000 UTC Server Listening on port 8686!",
+ "2021-04-16 19:03:54.51171317 +0000 UTC Server Listening on port 8686!",
+ "2021-04-16 19:03:54.707496343 +0000 UTC Server Listening on port 8686!"
+ ]
+ }
+}
+```
+
+
+
+## Update or Delete Lambda
+
+You can update or delete your lambda by using the `updateLambda` mutation. You
+will need to pass a deployment id to uniquely identify your lambda. In case your
+backend is multi-tenant, you will need to pass the tenant Id as well.
+
+In order to update your lambda, you would need to convert your lamdba script
+into a Base64 encoded format and send it as the `lambdaScript` argument. In
+order to delete your lambda, you can simply send in an empty string in the
+`lambdaScript` argument.
+
+### Cloud Endpoint
+
+```
+https://cerebro.cloud.dgraph.io/graphql
+```
+
+### API Command
+
+```graphql
+mutation updateLambda($input: UpdateLambdaInput!) {
+ updateLambda(input: $input)
+}
+```
+
+**Arguments**
+
+- `updateLambdaInput`: an UpdateLambdaInput object
+- `updateLambdaInput.deploymentID`: the deployment UID returned from [List
+ Backends]((./backend#list-backends)
+- `updateLambdaInput.tenantID`: the tenant ID in case your backend is a
+ dedicated, and multi-tenant backend. In case you have any other type of
+ backend, you can leave it as the default `0` value
+- `updateLambdaInput.lambdaScript`: the base64-encoded Javascript string
+ containing your [Lambda Resolver](./lambda-overview)
+
+### Example
+
+1. Create your [Lambda Resolver](./lambda-overview) script
+
+```js
+//your lambda resolver
+```
+
+2. Base64 encode your script
+
+```bash
+$ echo "//your lambda resolver" | base64
+
+Ly95b3VyIGxhbWJkYSByZXNvbHZlcgo=
+```
+
+3. Send in a cURL request
+
+
+
+```bash
+#!/usr/bin/env bash
+
+CEREBRO_JWT=""
+
+curl "https://cerebro.cloud.dgraph.io/graphql" \
+ -H 'Content-Type: application/json' \
+ -H "Authorization: Bearer ${CEREBRO_JWT}" \
+ --data-binary '{"query":"mutation updateLambda($input: UpdateLambdaInput!){\n updateLambda(input: $input)\n}","variables":{"input":{"deploymentID":"0x6238","tenantID":0,"lambdaScript":"Ly95b3VyIGxhbWJkYSByZXNvbHZlcgo="}}}' \
+ --compressed
+```
+
+```json
+{
+ "data": {
+ "updateLambda": "Successfully added the lambda function"
+ }
+}
+```
+
+
diff --git a/dgraph/reference/cloud/cloud-api/overview.mdx b/dgraph/reference/cloud/cloud-api/overview.mdx
new file mode 100644
index 00000000..e82381f6
--- /dev/null
+++ b/dgraph/reference/cloud/cloud-api/overview.mdx
@@ -0,0 +1,69 @@
+---
+title: Overview
+---
+
+Dgraph Cloud now includes an API so you can programmatically launch and manage
+your Cloud backups.
+
+The majority of these APIs use `https://cerebro.cloud.dgraph.io/graphql` as the
+primary endpoint, and will require you to log in with a username and password.
+Please see [Authentication](./authentication) for instructions on generating a
+JWT token.
+
+### Commands
+
+Please see the following topics:
+
+- [Authentication](./authentication) describes how to authenticate with the
+ Dgraph Cloud API.
+- [Backend](./backend) lists commands related to backend.
+- [Backup](./backup) lists commands related to backup.
+- [Lambda](./lambda) lists commands related to Lambda.
+- [Schema](./schema) lists commands related to schema.
+
+## Understanding Headers used across the APIs
+
+Dgraph Cloud has two layers of security: Dgraph Cloud Authentication and Dgraph
+Access Control Lists Authentication. The following section introduces the usage
+of the headers involved in the authentication process. These include the
+`Dg-Auth`, `X-Auth-Token`, `Authorization`, and `X-Dgraph-AccessToken` headers.
+
+### Dgraph Cloud Authentication
+
+The `Dg-Auth` or `X-Auth-Token` headers are for Dgraph Cloud’s API key
+authentication (where you pass in any API key you would generate from the
+["API Keys" tab](https://cloud.dgraph.io/_/settings?tab=api-keys) on the
+Settings page). The API key passed can be one of two kinds: Admin API key or
+Client API key. The tokens generated from the admin API grant access to the
+`/admin` or `/admin/slash` endpoints to perform schema alterations and similar
+operations. The tokens generated via the Client API key provides access to the
+`/graphql` endpoint to run GraphQL queries and mutations.
+
+Dgraph Cloud also offers the Dgraph Cloud API, hosted at
+[this endpoint](https://cerebro.cloud.dgraph.io/graphql), that helps to automate
+tasks such as deployment of a lambda. In order to use this API, users need to
+pass an `Authorization` header. In order to generate this header, the user must
+first [Authenticate](./authentication) and generate a token. The token is then
+set in `Authorization` header as a Bearer token (e.g. `Bearer {token}`).
+
+
+ The `Dg-Auth`, `X-Auth-Token` and the `Authorization` headers are relevant to
+ all types of backends, including Free, Shared, and Dedicated Backends.
+
+
+### Dgraph Access Control Lists Authentication
+
+The `X-Dgraph-AccessToken` header is used for accessing backends using Dgraph’s
+Access Control Lists or the Multitenancy feature. This lets you pass in an
+access JWT generated via a login mutation for a Dgraph user from the access
+control list permissions and/or log into a specific namespace with
+multi-tenancy. The Login mutation relevant for ACL is documented
+[here](https://dgraph.io/docs/enterprise-features/access-control-lists/#logging-in).
+
+If you’re using ACLs or multitenancy, then you’ll need to set the
+`X-Dgraph-AccessToken` with a JWT token to access your backend.
+
+
+ The `X-Dgraph-AccessToken` header is relevant only for the Dedicated backends.
+ Users with Free or Shared backends can ignore this header.
+
diff --git a/dgraph/reference/cloud/cloud-api/schema.mdx b/dgraph/reference/cloud/cloud-api/schema.mdx
new file mode 100644
index 00000000..f0df69ea
--- /dev/null
+++ b/dgraph/reference/cloud/cloud-api/schema.mdx
@@ -0,0 +1,146 @@
+---
+title: Schema
+---
+
+## Get Schema
+
+Fetch the schema from your backend.
+
+### Cloud Endpoint
+
+```bash
+https://${DEPLOYMENT_URL}/admin
+```
+
+### API Command
+
+```graphql
+{
+ getGQLSchema {
+ schema
+ generatedSchema
+ }
+}
+```
+
+### Example
+
+
+
+```bash
+#!/usr/bin/env bash
+
+DEPLOYMENT_URL="polished-violet.us-east-1.aws.cloud.dgraph.io"
+DEPLOYMENT_JWT=""
+
+curl "https://${DEPLOYMENT_URL}/admin" \
+ -H "Content-Type: application/json" \
+ -H "X-Auth-Token: ${DEPLOYMENT_JWT}" \
+ --data-binary '{"query":"{\n getGQLSchema {\n schema\n generatedSchema\n }\n}","variables":{}}' \
+ --compressed
+```
+
+```json
+{
+ "data": {
+ "getGQLSchema": {
+ "schema": "type Person { name: String! }",
+ "generatedSchema": ""
+ }
+ },
+ "extensions": {
+ "tracing": {
+ "version": 1,
+ "startTime": "2021-04-15T19:58:33.412544782Z",
+ "endTime": "2021-04-15T19:58:33.412851891Z",
+ "duration": 307129,
+ "execution": {
+ "resolvers": [
+ {
+ "path": ["getGQLSchema"],
+ "parentType": "Query",
+ "fieldName": "getGQLSchema",
+ "returnType": "GQLSchema",
+ "startOffset": 115909,
+ "duration": 159961,
+ "dgraph": [
+ {
+ "label": "query",
+ "startOffset": 118110,
+ "duration": 53165
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+}
+```
+
+
+
+## Update Schema
+
+Update the schema in your backend.
+
+### Cloud Endpoint
+
+```bash
+https://${DEPLOYMENT_URL}/admin
+```
+
+### API Command
+
+```graphql
+mutation ($schema: String!) {
+ updateGQLSchema(input: { set: { schema: $schema } }) {
+ gqlSchema {
+ schema
+ }
+ }
+}
+```
+
+**Arguments**
+
+- `schema`: your desired schema string in GraphQL format
+
+### Example
+
+
+
+```bash
+#!/usr/bin/env bash
+
+DEPLOYMENT_URL="polished-violet.us-east-1.aws.cloud.dgraph.io"
+DEPLOYMENT_JWT=""
+
+curl "https://${DEPLOYMENT_URL}/admin" \
+ -H "Content-Type: application/json" \
+ -H "X-Auth-Token: ${DEPLOYMENT_JWT}" \
+ --data-binary '{"query":"mutation($sch: String!) {\n updateGQLSchema(input: { set: { schema: $sch } })\n {\n gqlSchema {\n schema\n }\n }\n}","variables":{"sch": "type Person { name: String! }"}}' \
+ --compressed
+```
+
+```json
+{
+ "data": {
+ "updateGQLSchema": {
+ "gqlSchema": {
+ "schema": "type Person { name: String! }"
+ }
+ }
+ },
+ "extensions": {
+ "tracing": {
+ "version": 1,
+ "startTime": "2021-04-15T19:53:16.283198298Z",
+ "endTime": "2021-04-15T19:53:16.286478152Z",
+ "duration": 3279886
+ }
+ }
+}
+```
+
+
diff --git a/dgraph/reference/cloud/cloud-multitenancy.mdx b/dgraph/reference/cloud/cloud-multitenancy.mdx
new file mode 100644
index 00000000..96f8e087
--- /dev/null
+++ b/dgraph/reference/cloud/cloud-multitenancy.mdx
@@ -0,0 +1,83 @@
+---
+title: Multi-tenancy in Dgraph Cloud
+---
+
+Multi-Tenancy in Dgraph cloud is a dedicated cluster feature. It is not
+supported in free/shared clusters. Multi-tenancy is built upon Access Control
+Lists (ACL), and enables multiple tenants to share a Dgraph cluster using unique
+namespaces. The tenants are logically separated, and their data lies in the same
+p directory. Each namespace has a group
+[guardian(admin)](https://dgraph.io/docs/enterprise-features/multitenancy/#guardians-of-the-galaxy),
+which has root access to that namespace.
+[Read more about Multi-Tenancy in Dgraph](https://dgraph.io/docs/enterprise-features/multitenancy/)
+
+### Enabling Multi-Tenancy in Dgraph Cloud
+
+In order to enable multi-tenancy you need to do the following -
+
+1. **Enable ACL**
+
+ - If you are launching a dedicated cluster for the first time, you need to
+ click on the checkbox for ACLs under `Additional Settings`.
+ - For existing dedicated cluster, you need to go to Settings>Modify Backend
+ and click on the checkbox for ACLs under `Additional Settings`. Note - This
+ restarts your backend hence causing some downtime.
+ 
+
+2. **Create namespaces**
+
+ - Go to `Namespaces` page under `Admin` section on the sidebar to the left.
+ - Click on `Create New` button, enter the description of the namespace and
+ click `Create`.
+ - You can view the created namespaces on the `Namespaces` page.
+ - Please note that Tenant 0 cannot be deleted.
+ 
+
+3. **Toggle namespaces**
+ - Now you should be able to see a select box next to selected backend select
+ box on the navbar.
+ - You can switch between namespaces with the selection.
+ - Also, each namespace has its own Schema, ACLs & Lambdas. Also has its own
+ view of Dgraph Studio, API Explorer, DQL Page.
+ - After switching namespaces, you can update view/update the namespace's
+ schema and query/mutate as well.
+ 
+
+### Accessing namespaces via client
+
+Namespaces are protected via ACLs. You need to create a user with username &
+password in the namespace. This can be done using `ACLs` page under `Admin`
+section on the sidebar to the left.
+
+**Example - we want to give all read access to a user for namespace**
+
+1. First, you should create an ACL group and select all predicates that you want
+ to provision read access. 
+2. Next, create a user with access to the created group. While creating the
+ user, you will be asked to provide with a username & password. Keep it handy.
+ 
+3. You can go to the APIe Explorer and user the login mutation to fetch the API
+ access token. Click on admin radio button at the top to query the admin
+ endpoint. Use the mutation shown below to get the access token. (Note -
+ NamespaceID can be found on the namespace page)
+
+ ```
+ mutation MyMutation {
+ login(namespace: 1, password: "password", userId: "userID") {
+ response {
+ accessJWT
+ refreshJWT
+ }
+ }
+ }
+
+ ```
+
+4. You have the access token which you need to pass it in `X-Dgraph-AccessToken`
+ header.
+5. On client side you will need to use the above mutation programmatically to
+ generate the access token for your namespace.
+6. If you are using a [dgraph client](./dql/clients) you need to set the
+ username & password and the client handles fetching the token & refresh logic
+ for you. Note - Most of the dgraph clients will have a special method to
+ login to a specific namespace.
diff --git a/dgraph/reference/cloud/index.mdx b/dgraph/reference/cloud/index.mdx
new file mode 100644
index 00000000..720e26e8
--- /dev/null
+++ b/dgraph/reference/cloud/index.mdx
@@ -0,0 +1,3 @@
+---
+title: Dgraph Cloud
+---
diff --git a/dgraph/reference/cloud/introduction.mdx b/dgraph/reference/cloud/introduction.mdx
new file mode 100644
index 00000000..3d349943
--- /dev/null
+++ b/dgraph/reference/cloud/introduction.mdx
@@ -0,0 +1,99 @@
+---
+title: Dgraph Cloud Overview
+---
+
+## Dgraph
+
+Designed from day one to be distributed for scale and speed, **Dgraph** is the
+native Graph database with native GraphQL support. It is open-source, scalable,
+distributed, highly-available, and lightning fast.
+
+Dgraph is different from other graph databases in a number of ways, including:
+
+- **Distributed Scale**: _Built from day 1 to be distributed, to handle
+ larger data sets._
+
+- **GraphQL Support**: _GraphQL is built in to make data access simple
+ and standards-compliant. Unlike most GraphQL solutions, no resolvers are
+ needed - Dgraph resolves queries automatically through graph navigation._
+
+- **Fully Transactional and ACID Compliant**: _Dgraph satisfies demanding
+ OLTP workloads that require frequent inserts and updates._
+
+- **Language support & Text Search**: _Full-text searching is included
+ and strings can be expressed in multiple languages_
+
+- **Gelocation data and geo querie**: _Dgraph can store Points and Shapes
+ and queries can use near, within, contains, intersects geo functions_
+
+More details at [Dgraph Database Overview](./dgraph-overview)
+
+## Dgraph Cloud
+
+Dgraph Cloud gives you the power of Dgraph database including performance,
+high-availability, horizontally-scalability and the support of GraphQL for rapid
+application development in a hosted and fully-managed environment. Dgraph Cloud
+lets you focus on building apps, not managing infrastructure.
+
+### Dgraph Cloud Cluster Types
+
+- **Shared Instance**: Dgraph Cloud with
+ [shared instances](https://cloud.dgraph.io/pricing?type=shared) is a fast and
+ easy way to get started with GraphQL, and does not require any graph database
+ knowledge to start and run. Shared instances run in a common database using
+ Dgraph multi-tenancy. Your data is protected but you share resources and will
+ have limited scale.
+
+- **Dedicated instances** run on their own dedicated hardware to ensure
+ consistent performance. This option extends the capabilities of the lower-cost
+ shared instances to support enterprise, production workloads, and includes a
+ high availability option.
+
+## Key features
+
+| Feature | Notes |
+| :-------------------------------------------------------- | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
+| Production-ready | Dgraph Cloud is built to meet the needs of your business as it grows with built-in authorization, encryption at rest, TLS, incremental backups, and more. |
+| Scale and expand your app without rebuilding your backend | Dgraph Cloud stores and distributes data to optimize the execution of GraphQL traversals, joins, and retrievals. Dgraph natively parses and executes GraphQL, achieving great performance and the ability to scale horizontally up to terabytes of data. |
+| A high-performance, graph-first database | Dgraph Cloud runs Dgraph database, which is built to support the needs of modern apps with lightning-fast queries at any depth. |
+| Custom logic | Use JavaScript to add programmatic custom logic on the backend, adding power to your apps without sacrificing client-side performance. |
+| Power your app with native GraphQL support | Dgraph is built for graph data, so you don’t need to configure and maintain a cumbersome GraphQL layer over a traditional relational database. |
+| Evolve your schema without downtime | When it comes time to deploy a new schema, you can do that in seconds, not hours. |
+| GraphQL-based authorization and management | GraphQL is used throughout Dgraph Cloud, so you don’t need to use another tool or learn another syntax to handle user authorization or database administration tasks such as schema changes and data exports. |
+| Work with the Open Source ecosystem | Because Dgraph is open-source, your app relies on a codebase that you can contribute to, not an opaque “black box”. |
+
+## Next steps
+
+To learn more about how Dgraph Cloud makes it easier to develop apps, create a
+trial account at [Dgraph Cloud](https://cloud.dgraph.io) and try the
+[Introduction to GraphQL](https://dgraph.io/tour/graphqlintro/2/) tutorial to
+define a GraphQL schema, insert and query data in just a few minutes.
+
+## Recommended Reading
+
+Please see the following topics to learn more about how to use Dgraph Cloud:
+
+- [Administering your Backend](./admin/_index) covers topics such as how to
+ programmatically set your schema, and import or export your data
+ - [Authentication](./admin/authentication) will guide you in creating a API
+ token. Since all admin APIs require an auth token, this is a good place to
+ start.
+ - [Schema](./admin/schema) describes how to programmatically query and update
+ your GraphQL schema.
+ - [Import and Exporting Data](./admin/import-export) is a guide for exporting
+ your data from a Dgraph Cloud backend, and how to import it into another
+ cluster
+ - [Dropping Data](./admin/drop-data) will guide you through dropping all data
+ from your Dgraph Cloud backend.
+ - [Switching Schema Modes](./admin/schema-modes) will guide you through
+ changing Dgraph Cloud schema mode.
+- [Dgraph Cloud API](./cloud-api/overview) Dgraph Cloud now includes a API so
+ you can programmatically manage your backends
+- [Schema](./cloud-api/schema) lists commands related to schema.
+
+You might also be interested in:
+
+- [Dgraph GraphQL Schema Reference](./graphql/schema), which lists all the types
+ and directives supported by Dgraph
+- [Dgraph GraphQL API Reference](./graphql-clients), which serves as a guide to
+ using your new `/graphql` endpoint
diff --git a/dgraph/reference/cloud/migrating-from-hosted-dgraph.mdx b/dgraph/reference/cloud/migrating-from-hosted-dgraph.mdx
new file mode 100644
index 00000000..fde93a65
--- /dev/null
+++ b/dgraph/reference/cloud/migrating-from-hosted-dgraph.mdx
@@ -0,0 +1,14 @@
+---
+title: Migrating from Self-Managed Dgraph
+---
+
+Dgraph Cloud is compatible with the majority of Dgraph features, so you can
+easily migrate your existing Dgraph-powered app over to Dgraph Cloud.
+
+### To migrate data from self-managed Dgraph to Dgraph Cloud
+
+1. Create a new backend. You can do this using the Dgraph Cloud interface.
+2. (optional) Switch your backend to
+ [flexible mode](/admin/schema-modes#flexible-mode).
+3. Connect to your backend with your favorite client. To learn more, see
+ [Connecting from Dgraph Clients](/advanced-queries#connecting-from-dgraph-clients).
diff --git a/dgraph/reference/cloud/provision-backend.mdx b/dgraph/reference/cloud/provision-backend.mdx
new file mode 100644
index 00000000..c5fcc9af
--- /dev/null
+++ b/dgraph/reference/cloud/provision-backend.mdx
@@ -0,0 +1,37 @@
+---
+title: Provision a backend
+---
+
+### Before you begin
+
+Log in to [Dgraph Cloud](https://cloud.dgraph.io) using **Sign in with Google**,
+**Sign in with GitHub** or any other email account that you prefer to use.
+
+### Provision a backend
+
+1. After you have signed up and verified you email, log into
+ [Dgraph Cloud](https://cloud.dgraph.io/) and you'll arrive at the dashboard
+ screen.
+
+ 
+
+2. Click the **Launch New Backend** button and you'll be taken to a screen to
+ enter the details of the backend.
+
+Name the backend, optionally set a subdomain (if left blank, Dgraph Cloud picks
+a random domain for you), and then pick a region to deploy you GraphQL backend
+to.
+
+
+
+3. click **Launch** and your backend will be deployed in a few seconds.
+
+
+
+That's it! You now have a running Dgraph backend. Time to build an app.
+
+The URL listed in "GraphQL Endpoint" is the URL at which Dgraph Cloud will serve
+data to your app.
+
+You can copy it at any time to use in a GraphQL client application.
diff --git a/dgraph/reference/deploy/admin/data-compression.mdx b/dgraph/reference/deploy/admin/data-compression.mdx
new file mode 100644
index 00000000..dece16b2
--- /dev/null
+++ b/dgraph/reference/deploy/admin/data-compression.mdx
@@ -0,0 +1,40 @@
+---
+title: Data compression on Disk
+---
+
+Dgraph Alpha lets you configure the compression of data on disk using the
+`--badger` superflag's `compression` option. You can choose between the
+[Snappy](https://github.com/golang/snappy) and
+[Zstandard](https://github.com/facebook/zstd) compression algorithms, or choose
+not to compress data on disk.
+
+
+ This option replaces the `--badger.compression_level` and
+ `--badger.compression` options used in earlier Dgraph versions.
+
+
+The following disk compression settings are available:
+
+| Setting | Notes |
+| ------------ | -------------------------------------------------------------------- |
+| `none` | Data on disk will not be compressed. |
+| `zstd:level` | Use Zstandard compression, with a compression level specified (1-3). |
+| `snappy` | Use Snappy compression (this is the default value). |
+
+For example, you could choose to use Zstandard compression with the highest
+compression level using the following command:
+
+```sh
+dgraph alpha --badger compression=zstd:3
+```
+
+This compression setting (Zstandard, level 3) is more CPU-intensive than other
+options, but offers the highest compression ratio. To change back to the default
+compression setting, use the following command:
+
+```sh
+dgraph alpha --badger compression=snappy
+```
+
+Using this compression setting (Snappy) provides a good compromise between the
+need for a high compression ratio and efficient CPU usage.
diff --git a/dgraph/reference/deploy/admin/dgraph-administration.mdx b/dgraph/reference/deploy/admin/dgraph-administration.mdx
new file mode 100644
index 00000000..ad4b6867
--- /dev/null
+++ b/dgraph/reference/deploy/admin/dgraph-administration.mdx
@@ -0,0 +1,319 @@
+---
+title: Dgraph Administration
+---
+
+Each Dgraph Alpha exposes various administrative (admin) endpoints both over
+HTTP and GraphQL, for example endpoints to export data and to perform a clean
+shutdown. All such admin endpoints are protected by three layers of
+authentication:
+
+1. IP White-listing (use the `--security` superflag's `whitelist` option on
+ Dgraph Alpha to whitelist IP addresses other than localhost).
+2. Poor-man's auth, if Dgraph Alpha is started with the `--security` superflag's
+ `token` option, then you should pass the token as an `X-Dgraph-AuthToken`
+ header while making the HTTP request.
+3. Guardian-only access, if ACL is enabled. In this case you should pass the
+ ACL-JWT of a Guardian user using the `X-Dgraph-AccessToken` header while
+ making the HTTP request.
+
+An admin endpoint is any HTTP endpoint which provides admin functionality. Admin
+endpoints usually start with the `/admin` path. The current list of admin
+endpoints includes the following:
+
+- `/admin`
+- `/admin/config/cache_mb`
+- `/admin/draining`
+- `/admin/shutdown`
+- `/admin/schema`
+- `/admin/schema/validate`
+- `/alter`
+- `/login`
+
+There are a few exceptions to the general rule described above:
+
+- `/login`: This endpoint logs-in an ACL user, and provides them with a JWT.
+ Only IP Whitelisting and Poor-man's auth checks are performed for this
+ endpoint.
+- `/admin`: This endpoint provides GraphQL queries/mutations corresponding to
+ the HTTP admin endpoints. All of the queries/mutations on `/admin` have all
+ three layers of authentication, except for `login (mutation)`, which has the
+ same behavior as the above HTTP `/login` endpoint.
+
+## Whitelisting Admin Operations
+
+By default, admin operations can only be initiated from the machine on which the
+Dgraph Alpha runs.
+
+You can use the `--security` superflag's `whitelist` option to specify a
+comma-separated whitelist of IP addresses, IP ranges, CIDR ranges, or hostnames
+for hosts from which admin operations can be initiated.
+
+**IP Address**
+
+```sh
+dgraph alpha --security whitelist=127.0.0.1 ...
+```
+
+This would allow admin operations from hosts with IP 127.0.0.1 (i.e., localhost
+only).
+
+**IP Range**
+
+```sh
+dgraph alpha --security whitelist=172.17.0.0:172.20.0.0,192.168.1.1 ...
+```
+
+This would allow admin operations from hosts with IP between `172.17.0.0` and
+`172.20.0.0` along with the server which has IP address as `192.168.1.1`.
+
+**CIDR Range**
+
+```sh
+dgraph alpha --security whitelist=172.17.0.0/16,172.18.0.0/15,172.20.0.0/32,192.168.1.1/32 ...
+```
+
+This would allow admin operations from hosts that matches the CIDR range
+`172.17.0.0/16`, `172.18.0.0/15`, `172.20.0.0/32`, or `192.168.1.1/32` (the same
+range as the IP Range example).
+
+You can set whitelist IP to `0.0.0.0/0` to whitelist all IP addresses.
+
+**Hostname**
+
+```sh
+dgraph alpha --security whitelist=admin-bastion,host.docker.internal ...
+```
+
+This would allow admin operations from hosts with hostnames `admin-bastion` and
+`host.docker.internal`.
+
+## Restrict Mutation Operations
+
+By default, you can perform mutation operations for any predicate. If the
+predicate in mutation doesn't exist in the schema, the predicate gets added to
+the schema with an appropriate [Dgraph Type](./dql-schema).
+
+You can use `--limit "mutations=disallow"` to disable all mutations, which is
+set to `allow` by default.
+
+```sh
+dgraph alpha --limit "mutations=disallow;"
+```
+
+Enforce a strict schema by setting `--limit "mutations=strict`. This mode allows
+mutations only on predicates already in the schema. Before performing a mutation
+on a predicate that doesn't exist in the schema, you need to perform an alter
+operation with that predicate and its schema type.
+
+```sh
+dgraph alpha --limit "mutations=strict; mutations-nquad=1000000"
+```
+
+## Secure Alter Operations
+
+Clients can use alter operations to apply schema updates and drop particular or
+all predicates from the database. By default, all clients are allowed to perform
+alter operations. You can configure Dgraph to only allow alter operations when
+the client provides a specific token. You can use this "Simple ACL" token to
+prevent clients from making unintended or accidental schema updates or predicate
+drops.
+
+You can specify the auth token with the `--security` superflag's `token` option
+for each Dgraph Alpha in the cluster. Clients must include the same auth token
+to make alter requests.
+
+```sh
+$ dgraph alpha --security token=
+```
+
+```sh
+$ curl -s localhost:8080/alter -d '{ "drop_all": true }'
+# Permission denied. No token provided.
+```
+
+```sh
+$ curl -s -H 'X-Dgraph-AuthToken: ' localhost:8080/alter -d '{ "drop_all": true }'
+# Permission denied. Incorrect token.
+```
+
+```sh
+$ curl -H 'X-Dgraph-AuthToken: ' localhost:8080/alter -d '{ "drop_all": true }'
+# Success. Token matches.
+```
+
+
+ To fully secure alter operations in the cluster, the authentication token must
+ be set for every Alpha node.
+
+
+## Export database
+
+As an `Administrator` you might want to export data from Dgraph to:
+
+- backup your data
+- move the data from Dgraph Cloud instance to another Dgraph instance, or Dgraph
+ Cloud instance
+- share your data
+
+For more information about exporting your database, see
+[Export data](./howto/exportdata/about-export)
+
+## Shut down database
+
+A clean exit of a single Dgraph node is initiated by running the following
+GraphQL mutation on /admin endpoint.
+
+
+ This won't work if called from outside the server where Dgraph is running. You
+ can specify a list or range of whitelisted IP addresses from which shutdown or
+ other admin operations can be initiated using the `--security` superflag's
+ `whitelist` option on `dgraph alpha`.
+
+
+```graphql
+mutation {
+ shutdown {
+ response {
+ message
+ code
+ }
+ }
+}
+```
+
+This stops the Alpha on which the command is executed and not the entire
+cluster.
+
+## Delete database
+
+To drop all data, you could send a `DropAll` request via `/alter` endpoint.
+
+Alternatively, you could:
+
+- [Shutdown Dgraph](./#shut-down-database) and wait for all writes to complete,
+- Delete (maybe do an export first) the `p` and `w` directories, then
+- Restart Dgraph.
+
+## Upgrade database
+
+Doing periodic exports is always a good idea. This is particularly useful if you
+wish to upgrade Dgraph or reconfigure the sharding of a cluster. The following
+are the right steps to safely export and restart.
+
+1. Start an [export](./#export-database)
+2. Ensure it is successful
+3. [Shutdown Dgraph](./#shut-down-database) and wait for all writes to complete
+4. Start a new Dgraph cluster using new data directories (this can be done by
+ passing empty directories to the options `-p` and `-w` for Alphas and `-w`
+ for Zeros)
+5. Reload the data via [bulk loader](./bulk-loader)
+6. Verify the correctness of the new Dgraph cluster. If all looks good, you can
+ delete the old directories (export serves as an insurance)
+
+These steps are necessary because Dgraph's underlying data format could have
+changed, and reloading the export avoids encoding incompatibilities.
+
+Blue-green deployment is a common approach to minimize downtime during the
+upgrade process. This approach involves switching your application to read-only
+mode. To make sure that no mutations are executed during the maintenance window
+you can do a rolling restart of all your Alpha using the option
+`--mutations disallow` when you restart the Alpha nodes. This will ensure the
+cluster is in read-only mode.
+
+At this point your application can still read from the old cluster and you can
+perform the steps 4. and 5. described above. When the new cluster (that uses the
+upgraded version of Dgraph) is up and running, you can point your application to
+it, and shutdown the old cluster.
+
+### Upgrade from v1.2.2 to v20.03.0 for Enterprise customers
+
+{/* TODO: Redirect(s) */}
+
+1. Use [binary backup](./enterprise-features/binary-backups.md) to export data
+ from old cluster
+2. Ensure it is successful
+3. [Shutdown Dgraph](./#shut-down-database) and wait for all writes to complete
+4. Upgrade `dgraph` binary to `v20.03.0`
+5. [Restore](/enterprise-features/binary-backups.md#restore-from-backup) from
+ the backups using upgraded `dgraph` binary
+6. Start a new Dgraph cluster using the restored data directories
+7. Upgrade ACL data using the following command:
+
+```sh
+dgraph upgrade --acl -a localhost:9080 -u groot -p password
+```
+
+### Upgrade from v20.03.0 to v20.07.0 for Enterprise customers
+
+1. Use [binary backup](./enterprise-features/binary-backups.md) to export data
+ from old cluster
+2. Ensure it is successful
+3. [Shutdown Dgraph](./#shut-down-database) and wait for all writes to complete
+4. Upgrade `dgraph` binary to `v20.07.0`
+5. [Restore](/enterprise-features/binary-backups.md#restore-from-backup) from
+ the backups using upgraded `dgraph` binary
+6. Start a new Dgraph cluster using the restored data directories
+7. Upgrade ACL data using the following command:
+
+ ```sh
+ dgraph upgrade --acl -a localhost:9080 -u groot -p password -f v20.03.0 -t v20.07.0
+ ```
+
+ This is required because previously the type-names `User`, `Group` and `Rule`
+ were used by ACL. They have now been renamed as `dgraph.type.User`,
+ `dgraph.type.Group` and `dgraph.type.Rule`, to keep them in Dgraph's internal
+ namespace. This upgrade just changes the type-names for the ACL nodes to the
+ new type-names.
+
+ You can use `--dry-run` option in `dgraph upgrade` command to see a dry run
+ of what the upgrade command will do.
+
+8. If you have types or predicates in your schema whose names start with
+ `dgraph.`, then you would need to manually alter schema to change their names
+ to something else which isn't prefixed with `dgraph.`, and also do mutations
+ to change the value of `dgraph.type` edge to the new type name and copy data
+ from old predicate name to new predicate name for all the nodes which are
+ affected. Then, you can drop the old types and predicates from DB.
+
+### Upgrade from v20.11.0 to v21.03.0 for Enterprise customers
+
+1. Use [binary backup](./enterprise-features/binary-backups.md) to export data
+ from the old cluster
+2. Ensure it is successful
+3. [Shutdown Dgraph](./#shut-down-database) and wait for all writes to complete
+4. Upgrade `dgraph` binary to `v21.03.0`
+5. [Restore](/enterprise-features/binary-backups.md#restore-from-backup) from
+ the backups using the upgraded `dgraph` binary
+6. Start a new Dgraph cluster using the restored data directories
+7. Upgrade the CORS and persisted queries. To upgrade an ACL cluster use:
+
+ ```sh
+ dgraph upgrade --from v20.11.0 --to v21.03.0 --user groot --password password --alpha http://localhost:9080 --alpha-http http://localhost:8080 --deleteOld
+ ```
+
+ To upgrade a non-ACL cluster use:
+
+ ```sh
+ dgraph upgrade --from v20.11.0 --to v21.03.0 --alpha http://localhost:9080 --alpha-http http://localhost:8080 --deleteOld
+ ```
+
+ This is required because previously CORS information was stored in
+ `dgraph.cors` predicate which has now been moved to be a part of the GraphQL
+ schema. Also, the format of persisted queries has changed. Some of the
+ internal deprecated predicates will be removed by this change.
+
+ You can use `--dry-run` option in `dgraph upgrade` command to see a dry run
+ of what the upgrade command will do.
+
+
+ The above steps are valid for migration from a cluster in `v20.11` to a
+ single-tenant cluster in `v21.03`, as backup and restore are cluster-wide
+ operations and a single namespace cannot be restored in a multi-tenant
+ cluster.
+
+
+## Post Installation
+
+Now that Dgraph is up and running, to understand how to add and query data to
+Dgraph, follow [Query Language Spec](/query-language). Also, have a look at
+[Frequently asked questions](/faq).
diff --git a/dgraph/reference/deploy/admin/index.mdx b/dgraph/reference/deploy/admin/index.mdx
new file mode 100644
index 00000000..9f7ab9b1
--- /dev/null
+++ b/dgraph/reference/deploy/admin/index.mdx
@@ -0,0 +1,3 @@
+---
+title: Administration
+---
diff --git a/dgraph/reference/deploy/admin/log-format.mdx b/dgraph/reference/deploy/admin/log-format.mdx
new file mode 100644
index 00000000..d23b279b
--- /dev/null
+++ b/dgraph/reference/deploy/admin/log-format.mdx
@@ -0,0 +1,130 @@
+---
+title: Logging
+description:
+ Dgraph logs requests for queries and mutations, and also provides audit
+ logging capabilities with a Dgraph Enterprise license
+---
+
+Dgraph logs requests for queries and mutations, and also provides audit logging
+capabilities with a Dgraph [enterprise license](/enterprise-features/license).
+
+Dgraph's log format comes from the glog library and is
+[formatted](https://github.com/golang/glog/blob/23def4e6c14b4da8ac2ed8007337bc5eb5007998/glog.go#L523-L533)
+as follows:
+
+```
+Lmmdd hh:mm:ss.uuuuuu threadid file:line] msg...
+```
+
+The fields shown above are defined as follows:
+
+| Field | Definition |
+| ----------------- | ---------------------------------------------------------------- |
+| `L` | A single character, representing the log level (eg 'I' for INFO) |
+| `mm` | Month (zero padded; ie May is '05') |
+| `dd` | Day (zero padded) |
+| `hh:mm:ss.uuuuuu` | Time in hours, minutes and fractional seconds |
+| `threadid` | Space-padded thread ID as returned by GetTID() |
+| `file` | Filename |
+| `line` | Line number |
+| `msg` | User-supplied message |
+
+## Log verbosity
+
+To increase log verbosity, set the flag `-v=3` (or `-v=2`) which will enable
+verbose logging for everything. You can set this flag on both Zero and Alpha
+nodes.
+
+Changing log verbosity requires a restart of the node.
+
+## Request logging
+
+Request logging, sometimes called _query logging_, lets you log queries and
+mutations. You can dynamically turn request logging on or off. To toggle request
+logging on, send the following GraphQL mutation to the `/admin` endpoint of an
+Alpha node (e.g. `localhost:8080/admin`):
+
+```graphql
+mutation {
+ config(input: { logDQLRequest: true }) {
+ response {
+ code
+ message
+ }
+ }
+}
+```
+
+Note this input flag was named logRequest until Dgraph version v23.
+
+The response should look like the following:
+
+```json
+{
+ "data": {
+ "config": {
+ "response": {
+ "code": "Success",
+ "message": "Config updated successfully"
+ }
+ }
+ },
+ "extensions": {
+ "tracing": {
+ "version": 1,
+ "startTime": "2020-12-07T14:53:28.240420495Z",
+ "endTime": "2020-12-07T14:53:28.240569604Z",
+ "duration": 149114
+ }
+ }
+}
+```
+
+Also, the Alpha node will print the following INFO message to confirm that the
+mutation has been applied:
+
+```
+I1207 14:53:28.240516 20143 config.go:39] Got config update through GraphQL admin API
+```
+
+When enabling request logging this prints the requests that Dgraph Alpha
+receives from Ratel or other clients. In this case, the Alpha log will print
+something similar to:
+
+```
+I1201 13:06:26.686466 10905 server.go:908] Got a query: query:"{\n query(func: allofterms(name@en, \"Marc Caro\")) {\n uid\n name@en\n director.film\n }\n}"
+```
+
+As you can see, we got the query that Alpha received. To read it in the original
+DQL format just replace every `\n` with a new line, any `\t` with a tab
+character and `\"` with `"`:
+
+```
+{
+ query(func: allofterms(name@en, "Marc Caro")) {
+ uid
+ name@en
+ director.film
+ }
+}
+```
+
+Similarly, you can turn off request logging by setting `logRequest` to `false`
+in the `/admin` mutation.
+
+```graphql
+mutation {
+ config(input: { logRequest: false }) {
+ response {
+ code
+ message
+ }
+ }
+}
+```
+
+## Audit logging (enterprise feature)
+
+With a Dgraph enterprise license, you can enable audit logging so that all
+requests are tracked and available for use in security audits. To learn more,
+see [Audit Logging](./enterprise-features/audit-logs).
diff --git a/dgraph/reference/deploy/admin/metrics.mdx b/dgraph/reference/deploy/admin/metrics.mdx
new file mode 100644
index 00000000..977afe19
--- /dev/null
+++ b/dgraph/reference/deploy/admin/metrics.mdx
@@ -0,0 +1,121 @@
+---
+title: Metrics
+description:
+ Dgraph database helps administrators by providing metrics on Dgraph instance
+ activity, disk activity, server node health, memory, and Raft leadership
+---
+
+Dgraph database provides metrics on Dgraph instance activity, disk activity,
+server node health, memory, and Raft leadership. It also provides built-in
+metrics provided by Go. Dgraph metrics follow the
+[metric and label conventions for the Prometheus](https://prometheus.io/docs/practices/naming/)
+monitoring and alerting toolkit.
+
+## Activity Metrics
+
+Activity metrics let you track the mutations, queries, and proposals of a Dgraph
+instance.
+
+| Metric | Description |
+| -------------------------------------------------- | ------------------------------------------------------- |
+| `go_goroutines` | Total number of Goroutines currently running in Dgraph. |
+| `dgraph_active_mutations_total` | Total number of mutations currently running. |
+| `dgraph_pending_proposals_total` | Total pending Raft proposals. |
+| `dgraph_pending_queries_total` | Total number of queries in progress. |
+| `dgraph_num_queries_total{method="Server.Mutate"}` | Total number of mutations run in Dgraph. |
+| `dgraph_num_queries_total{method="Server.Query"}` | Total number of queries run in Dgraph. |
+
+## Disk metrics
+
+Disk metrics let you track the disk activity of the Dgraph process. Dgraph does
+not interact directly with the filesystem. Instead it relies on
+[Badger](https://github.com/dgraph-io/badger) to read from and write to disk.
+
+| Metric | Description |
+| ----------------------------------- | ----------------------------------------------------------- |
+| `badger_read_num_vlog` | Total count of reads by badger in vlog, |
+| `badger_write_num_vlog` | Total count of writes by Badger in vlog, |
+| `badger_read_bytes_vlog` | Total bytes read by Badger, |
+| `badger_write_bytes_vlog` | Total bytes written by Badger, |
+| `badger_read_bytes_lsm` | Total bytes read by Badger, |
+| `badger_write_bytes_l0` | Total bytes written by Badger, |
+| `badger_write_bytes_compaction` | Total bytes written by Badger, |
+| `badger_get_num_lsm` | Total count of LSM gets, |
+| `badger_get_num_memtable` | Total count of LSM gets from memtable, |
+| `badger_hit_num_lsm_bloom_filter` | Total count of LSM bloom hits, |
+| `badger_get_num_user` | Total count of calls to Badger's `get`, |
+| `badger_put_num_user` | Total count of calls to Badger's `put`, |
+| `badger_write_bytes_user` | Total bytes written by user, |
+| `badger_get_with_result_num_user` | Total count of calls to Badger's `get` that returned value, |
+| `badger_iterator_num_user` | Total count of iterators made in badger, |
+| `badger_size_bytes_lsm` | Size of the LSM in bytes, |
+| `badger_size_bytes_vlog` | Size of the value log in bytes, |
+| `badger_write_pending_num_memtable` | Total count of pending writes, |
+| `badger_compaction_current_num_lsm` | Number of tables being actively compacted, |
+
+Old Metrics (Pre 23.1.0)
+
+`badger_disk_reads_total` | Total count of disk reads in Badger.
+`badger_disk_writes_total` | Total count of disk writes in Badger.
+`badger_gets_total` | Total count of calls to Badger's `get`.
+`badger_memtable_gets_total` | Total count of memtable accesses to Badger's
+`get`. `badger_puts_total` | Total count of calls to Badger's `put`.
+`badger_read_bytes` | Total bytes read from Badger.
+`badger_lsm_bloom_hits_total` | Total number of LSM tree bloom hits.
+`badger_written_bytes` | Total bytes written to Badger. `badger_lsm_size_bytes`
+| Total size in bytes of the LSM tree. `badger_vlog_size_bytes` | Total size in
+bytes of the value log.
+
+## Go Metrics
+
+Go's built-in metrics may also be useful to measure memory usage and garbage
+collection time.
+
+| Metric | Description |
+| ------------------------------ | ------------------------------------------------------------------------------------------- |
+| `go_memstats_gc_cpu_fraction` | The fraction of this program's available CPU time used by the GC since the program started. |
+| `go_memstats_heap_idle_bytes` | Number of heap bytes waiting to be used. |
+| `go_memstats_heap_inuse_bytes` | Number of heap bytes that are in use. |
+
+## Health Metrics
+
+Health metrics let you check the health of a server node.
+
+ Health metrics are only available for Dgraph Alpha server nodes.
+
+| Metric | Description |
+| ---------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
+| `dgraph_alpha_health_status` | Value is 1 when the Alpha node is ready to accept requests; otherwise 0. |
+| `dgraph_max_assigned_ts` | This shows the latest max assigned timestamp. All Alpha nodes within the same Alpha group should show the same timestamp if they are in sync. |
+| `dgraph_txn_aborts_total` | Shows the total number of server-initiated transaction aborts that have occurred on the Alpha node. |
+| `dgraph_txn_commits_total` | Shows the total number of successful commits that have occurred on the Alpha node. |
+| `dgraph_txn_discards_total` | Shows the total number of client-initiated transaction discards that have occurred on the Alpha node. This is incremented when the client calls for a transaction discard, such as using the Dgraph Go client's `txn.Discard` function. |
+
+## Memory metrics
+
+Memory metrics let you track the memory usage of the Dgraph process. The `idle`
+and `inuse` metrics give you a better sense of the active memory usage of the
+Dgraph process. The process memory metric shows the memory usage as measured by
+the operating system.
+
+By looking at all three metrics you can see how much memory a Dgraph process is
+holding from the operating system and how much is actively in use.
+
+| Metric | Description |
+| --------------------------- | ----------------------------------------------------------------------------------------------------------- |
+| `dgraph_memory_idle_bytes` | Estimated amount of memory that is being held idle that could be reclaimed by the OS. |
+| `dgraph_memory_inuse_bytes` | Total memory usage in bytes (sum of heap usage and stack usage). |
+| `dgraph_memory_proc_bytes` | Total memory usage in bytes of the Dgraph process. This metric is equivalent to resident set size on Linux. |
+
+## Raft leadership metrics
+
+Raft leadership metrics let you track changes in Raft leadership for Dgraph
+Alpha and Dgraph Zero nodes in your Cluster. These metrics include a group label
+along with the node name, so that you can determine which metrics apply to which
+Raft groups.
+
+| Metric | Description |
+| ---------------------------------- | ----------------------------------------------------------------- |
+| `dgraph_raft_has_leader` | Value is 1 when the node has a leader; otherwise 0. |
+| `dgraph_raft_is_leader` | Value is 1 when the node is the leader of its group; otherwise 0. |
+| `dgraph_raft_leader_changes_total` | The total number of leader changes seen by this node. |
diff --git a/dgraph/reference/deploy/admin/tracing.mdx b/dgraph/reference/deploy/admin/tracing.mdx
new file mode 100644
index 00000000..ec30288b
--- /dev/null
+++ b/dgraph/reference/deploy/admin/tracing.mdx
@@ -0,0 +1,63 @@
+---
+title: Tracing
+---
+
+Dgraph is integrated with [OpenCensus](https://opencensus.io/zpages/) to collect
+distributed traces from the Dgraph cluster.
+
+Trace data is always collected within Dgraph. You can adjust the trace sampling
+rate for Dgraph queries using the `--trace`
+[superflag's](./deploy/cli-command-reference) `ratio` option when running Dgraph
+Alpha nodes. By default, `--trace ratio` is set to 0.01 to trace 1% of queries.
+
+## Examining Traces with zPages
+
+The most basic way to view traces is with the integrated trace pages.
+
+OpenCensus's [zPages](https://opencensus.io/zpages/) are accessible via the Zero
+or Alpha HTTP port at `/z/tracez`.
+
+## Examining Traces with Jaeger
+
+Jaeger collects distributed traces and provides a UI to view and query traces
+across different services. This provides the necessary observability to figure
+out what is happening in the system.
+
+Dgraph can be configured to send traces directly to a Jaeger collector with the
+`trace` superflag's `jaeger` option. For example, if the Jaeger collector is
+running on `http://localhost:14268`, then pass this option to the Dgraph Zero
+and Dgraph Alpha instances as `--trace jaeger=http://localhost:14268`.
+
+See
+[Jaeger's Getting Started docs](https://www.jaegertracing.io/docs/getting-started/)
+to get up and running with Jaeger.
+
+### Setting up multiple Dgraph clusters with Jaeger
+
+Jaeger allows you to examine traces from multiple Dgraph clusters. To do this,
+use the `--collector.tags` on a Jaeger collector to set custom trace tags. For
+example, run one collector with `--collector.tags env=qa` and then another
+collector with `--collector.tags env=dev`. In Dgraph, set the `--trace jaeger`
+option in the Dgraph QA cluster to the first collector and set this option in
+the Dgraph Dev cluster to the second collector. You can run multiple Jaeger
+collector components for the same single Jaeger backend (e.g., many Jaeger
+collectors to a single Cassandra backend). This is still a single Jaeger
+installation but with different collectors customizing the tags per environment.
+
+Once you have this configured, you can filter by tags in the Jaeger UI. Filter
+traces by tags matching `env=dev`:
+
+
+
+Every trace has your custom tags set under the “Process” section of each span:
+
+
+
+Filter traces by tags matching `env=qa`:
+
+
+
+
+
+To learn more about Jaeger, see
+[Jaeger's Deployment Guide](https://www.jaegertracing.io/docs/deployment/).
diff --git a/dgraph/reference/deploy/cli-command-reference.mdx b/dgraph/reference/deploy/cli-command-reference.mdx
new file mode 100644
index 00000000..b474e350
--- /dev/null
+++ b/dgraph/reference/deploy/cli-command-reference.mdx
@@ -0,0 +1,1114 @@
+---
+title: Dgraph CLI Reference
+---
+
+You can use the Dgraph command-line interface (CLI) to deploy and manage Dgraph.
+You use it in self-managed deployment scenarios; such as running Dgraph on
+on-premises servers hosted on your physical infrastructure, or running Dgraph in
+the cloud on your AWS, GCP, or Azure infrastructure.
+
+Dgraph has a root command used throughout its CLI: `dgraph`. The `dgraph`
+command is supported by multiple subcommands (such as `alpha` or `update`), some
+of which are also supported by their own subcommands. For example, the
+`dgraph acl` command requires you to specify one of its subcommands: `add`,
+`del`, `info` or `mod`. As with other CLIs, you provide command options using
+flags like `--help` or `--telemetry`.
+
+
+ The term _command_ is used instead of _subcommand_ throughout this document,
+ except when clarifying relationships in the CLI command hierarchy. The term
+ _command_ is also used for combinations of commands and their subcommands,
+ such as `dgraph alpha debug`.
+
+
+## Dgraph CLI superflags in release v21.03
+
+Some flags are deprecated and replaced in release v21.03. In previous Dgraph
+releases, multiple related flags are often used in a command, causing some
+commands to be very long. Starting in release v21.03, Dgraph uses _superflags_
+for some flags used by the most complex commands: `alpha`, `backup`, `bulk`,
+`debug`, `live` and `zero`. Superflags are compound flags: they contain one or
+more options that let you define multiple settings in a semicolon-delimited
+list. Semicolons are required between superflag options, but a semicolon after
+the last superflag option is optional.
+
+The general syntax for superflags is as follows:
+`-- option-a=value; option-b=value`
+
+ You should encapsulate the options for a superflag in
+double-quotes (`"`) if any of those option values include spaces. You can
+encapsulate options in double-quotes to improve readability. You can also use
+the following syntax for superflags:
+`-- "option-a=value; option-b=value"`.
+
+Release v21.03 includes the following superflags:
+
+- `--acl`
+- `--badger`
+- `--cache`
+- `--encryption`
+- `--graphql`
+- `--limit`
+- `--raft`
+- `--security`
+- `--telemetry`
+- `--tls`
+- `--trace`
+- `--vault`
+
+The following table maps Dgraph CLI flags from release v20.11 and earlier that
+have been replaced by superflags (and their options) in release v21.03. Any
+flags not shown here are unchanged in release v21.03.
+
+### ACL superflag
+
+| Old flag | Old type | New superflag and options | New type | Applies to | Notes |
+| ------------------: | :------------ | ------------------------: | :------------------------------------------------------------------------------ | :--------: | :-------------------------------------------------------------------------: |
+| | | **`--acl`** | | | [Access Control List](./enterprise-features/access-control-lists) superflag |
+| `--acl_secret_file` | string | `secret-file` | string | `alpha` | File that stores the HMAC secret that is used for signing the JWT |
+| `--acl_access_ttl` | time.Duration | `access-ttl` | [string](https://github.com/dgraph-io/ristretto/blob/master/z/flags.go#L80-L98) | `alpha` | TTL for the access JWT |
+| `--acl_refresh_ttl` | time.Duration | `refresh-ttl` | [string](https://github.com/dgraph-io/ristretto/blob/master/z/flags.go#L80-L98) | `alpha` | The TTL for the refresh JWT |
+
+### Badger superflag
+
+| Old flag | Old type | New superflag and options | New type | Applies to | Notes |
+| ---------------------: | :------- | -----------------------------------------: | :------- | :-----------------------: | :-----------------------------------------------: |
+| | | **`--badger`** | | | [Badger](https://dgraph.io/docs/badger) superflag |
+| `--badger.compression` | string | `compression` | string | `alpha`, `bulk`, `backup` | Specifies the compression level and algorithm |
+| | | (new) [`numgoroutines`](./troubleshooting) | int | `alpha`, `bulk`, `backup` | Number of Go routines used by Dgraph |
+
+
+ The `--badger` superflag allows you to set many advanced [Badger
+ options](https://pkg.go.dev/github.com/dgraph-io/badger/v3#Options),
+ including: `dir`, `valuedir`, `syncwrites`, `numversionstokeep`, `readonly`,
+ `inmemory`, `metricsenabled`, `memtablesize`, `basetablesize`,
+ `baselevelsize`, `levelsizemultiplier`, `tablesizemultiplier`, `maxlevels`,
+ `vlogpercentile`, `valuethreshold`, `nummemtables`, `blocksize`,
+ `bloomfalsepositive`, `blockcachesize`, `indexcachesize`,
+ `numlevelzerotables`, `numlevelzerotablesstall`, `valuelogfilesize`,
+ `valuelogmaxentries`, `numcompactors`, `compactl0onclose`, `lmaxcompaction`,
+ `zstdcompressionlevel`, `verifyvaluechecksum`,
+ `encryptionkeyrotationduration`, `bypasslockguard`,
+ `checksumverificationmode`, `detectconflicts`, `namespaceoffset`.
+
+
+### Cache superflag
+
+| Old flag | Old type | New superflag and options | New type | Applies to | Notes |
+| -----------------: | :------- | ------------------------: | :------- | :--------: | :--------------------------------------------------: |
+| | | **`--cache`** | | | Cache superflag |
+| `cache_mb` | string | `size-mb` | string | `alpha` | Total size of cache (in MB) per shard in the reducer |
+| `cache_percentage` | string | `percentage` | string | `alpha` | Cache percentages for block cache and index cache |
+
+### Encryption superflag
+
+| Old flag | Old type | New superflag and options | New type | Applies to | Notes |
+| ----------------------: | :------- | ------------------------: | :------- | :---------------------------------------------------------------------: | :------------------------------------: |
+| | | **`--encryption`** | | | Encryption superflag |
+| `--encryption_key_file` | string | `key-file` | string | `alpha`, `bulk`, `live`, `restore`, `debug`, `decrypt`, `export_backup` | The file that stores the symmetric key |
+
+### GraphQL superflag
+
+| Old flag | Old type | New superflag and options | New type | Applies to | Notes |
+| ------------------------: | :------------ | ------------------------: | :------------------------------------------------------------------------------ | :--------: | :----------------------------------------------------------------------------: |
+| | | **`--graphql`** | | | GraphQL superflag |
+| `--graphql_introspection` | bool | `introspection` | bool | `alpha` | Enables GraphQL schema introspection |
+| `--graphql_debug` | bool | `debug` | bool | `alpha` | Enables debug mode in GraphQL |
+| `--graphql_extensions` | bool | `extensions` | bool | `alpha` | Enables extensions in GraphQL response body |
+| `--graphql_poll_interval` | time.Duration | `poll-interval` | [string](https://github.com/dgraph-io/ristretto/blob/master/z/flags.go#L80-L98) | `alpha` | The polling interval for GraphQL subscriptions |
+| `--graphql_lambda_url` | string | `lambda-url` | string | `alpha` | The URL of a lambda server that implements custom GraphQL JavaScript resolvers |
+
+### Limit superflag
+
+| Old flag | Old type | New superflag and options | New type | Applies to | Notes |
+| ------------------------: | :------- | ------------------------: | :------- | :--------: | :----------------------------------------------------------------------------------------------------------------: |
+| | | **`--limit`** | | | Limit-setting superflag for Dgraph Alpha |
+| `--abort_older_than` | string | `txn-abort-after` | string | `alpha` | Abort any pending transactions older than this duration |
+| `--disable_admin_http` | string | `disable-admin-http` | string | `zero` | Turn on/off the administrative endpoints |
+| `--max_retries` | int | `max-retries` | int | `alpha` | Maximum number of retries |
+| `--mutations` | string | `mutations` | string | `alpha` | Mutation mode: `allow`, `disallow`, or `strict` |
+| `--query_edge_limit` | uint64 | `query-edge` | uint64 | `alpha` | Maximum number of edges that can be returned in a query |
+| `--normalize_node_limit` | int | `normalize-node` | int | `alpha` | Maximum number of nodes that can be returned in a query that uses the normalize directive |
+| `--mutations_nquad_limit` | int | `mutations-nquad` | int | `alpha` | Maximum number of nquads that can be inserted in a mutation request |
+| `--max-pending-queries` | int | `max-pending-queries` | int | `alpha` | Maximum number of concurrently processing requests allowed before requests are rejected with 429 Too Many Requests |
+
+### Raft superflag
+
+| Old flag | Old type | New superflag and options | New type | Applies to | Notes |
+| --------------------: | :------- | -----------------------------: | :------- | :-------------: | :--------------------------------------------------------------------------------------------------------: |
+| | | **`--raft`** | | | [Raft](./design-concepts/raft) superflag |
+| `--pending_proposals` | int | `pending-proposals` | int | `alpha` | Maximum number of pending mutation proposals; useful for rate limiting |
+| `--idx` | int | `idx` | int | `alpha`, `zero` | Provides an optional Raft ID that an Alpha node can use to join Raft groups |
+| `--group` | int | `group` | int | `alpha` | Provides an optional Raft group ID that an Alpha node can use to request group membership from a Zero node |
+| | | (new)`learner` | bool | `alpha`, `zero` | Make this Alpha a learner node (used for read-only replicas) |
+| | | (new)`snapshot-after-duration` | int | `alpha` | Frequency at which Raft snapshots are created |
+| `--snapshot-after` | int | `snapshot-after-entries` | int | `alpha` | Create a new Raft snapshot after the specified number of Raft entries |
+
+### Security superflag
+
+| Old flag | Old type | New superflag and options | New type | Applies to | Notes |
+| ----------------: | :------- | ------------------------: | :------- | :----------------: | :---------------------------------------------------------------------------------------------: |
+| | | **`--security`** | | | Security superflag |
+| `--auth_token` | string | `token` | string | `alpha` | Authentication token |
+| `--whitelist` | string | `whitelist` | string | `alpha` | A comma separated list of IP addresses, IP ranges, CIDR blocks, or hostnames for administration |
+| | | **`--telemetry`** | | | Telemetry superflag |
+| `--telemetry` | bool | `reports` | bool | `alpha` and `zero` | Sends anonymous telemetry data to Dgraph |
+| `--enable_sentry` | bool | `sentry` | bool | `alpha` and `zero` | Enable sending crash events to Sentry |
+
+### TLS superflag
+
+| Old flag | Old type | New superflag and options | New type | Applies to | Notes |
+| ----------------------------: | :------- | ------------------------: | :------- | :---------------------------------------: | :--------------------------------------------------------------------------------: |
+| | | **`--tls`** | | | [TLS](./tls-configuration) superflag |
+| `--tls_cacert` | string | `ca-cert` | string | `alpha`, `zero`, `bulk`, `backup`, `live` | The CA cert file used to verify server certificates |
+| `--tls_use_system_ca` | bool | `use-system-ca` | bool | `alpha`, `zero`, `bulk`, `backup`, `live` | Include System CA with Dgraph Root CA |
+| `--tls_server_name` | string | `server-name` | string | `alpha`, `zero`, `bulk`, `backup`, `live` | Server name, used for validating the server’s TLS host name |
+| `--tls_client_auth` | string | `client-auth-type` | string | `alpha`, `zero` | TLS client authentication used to validate client connections from external ports |
+| `--tls_node_cert` | string | `server-cert` | string | `alpha` and `zero` | Path and filename of the node certificate (for example, `node.crt`) |
+| `--tls_node_key` | string | `server-key` | string | `alpha` and `zero` | Path and filename of the node certificate private key (for example, `node.key`) |
+| `--tls_internal_port_enabled` | bool | `internal-port` | bool | `alpha`, `zero`, `bulk`, `backup`, `live` | Makes internal ports (by default, 5080 and 7080) use the REQUIREANDVERIFY setting. |
+| `--tls_cert` | string | `client-cert` | string | `alpha`, `zero`, `bulk`, `backup`, `live` | User cert file provided by the client to the Alpha node |
+| `--tls_key` | string | `client-key` | string | `alpha`, `zero`, `bulk`, `backup`, `live` | User private key file provided by the client to the Alpha node |
+
+### Trace superflag
+
+| Old flag | Old type | New superflag and options | New type | Applies to | Notes |
+| --------------------: | :------- | ------------------------: | :------- | :-------------: | :--------------------------------------: |
+| | | **`--trace`** | | | [Tracing](./tracing) superflag |
+| `--trace` | float64 | `ratio` | float64 | `alpha`, `zero` | The ratio of queries to trace |
+| `--jaeger.collector` | string | `jaeger` | string | `alpha`, `zero` | URL of Jaeger to send OpenCensus traces |
+| `--datadog.collector` | string | `datadog` | string | `alpha`, `zero` | URL of Datadog to send OpenCensus traces |
+
+### Vault superflag
+
+| Old flag | Old type | New superflag and options | New type | Applies to | Notes |
+| ----------------------: | :------- | ------------------------: | :------- | :----------------------------------------: | :-----------------------------------------------------------------------------------------: |
+| | | **`--vault`** | | | Vault superflag |
+| `--vault_addr` | string | `addr` | string | `alpha`, `bulk`, `backup`, `live`, `debug` | Vault server address, formatted as of `http://ip-address:port` |
+| `--vault_roleid_file` | string | `role-id-file` | string | `alpha`, `bulk`, `backup`, `live`, `debug` | File containing Vault `role-id` used for AppRole authentication |
+| `--vault_secretid_file` | string | `secret-id-file` | string | `alpha`, `bulk`, `backup`, `live`, `debug` | File containing Vault `secret-id` used for AppRole authentication |
+| `--vault_path` | string | `path` | string | `alpha`, `bulk`, `backup`, `live`, `debug` | Vault key=value store path (example: `secret/data/dgraph` for kv-v2, `kv/dgraph` for kv-v1) |
+| `--vault_field` | string | `field` | string | `alpha`, `bulk`, `backup`, `live`, `debug` | Vault key=value store field whose value is the base64 encoded encryption key |
+| `--vault_format` | string | `format` | string | `alpha`, `bulk`, `backup`, `live`, `debug` | Vault field format (`raw` or `base64`) |
+
+To learn more about each superflag and its options, see the `--help` output of
+the Dgraph CLI commands listed in the following section.
+
+## Dgraph CLI command help listing
+
+The Dgraph CLI includes the root `dgraph` command and its subcommands. The CLI
+help for these commands is replicated inline below for your reference, or you
+can find help by calling these commands (or their subcommands) using the
+`--help` flag.
+
+
+ Although many of the commands listed below have subcommands, only `dgraph` and
+ subcommands of `dgraph` are included in this listing.
+
+
+The Dgraph CLI has several commands, which are organized into the following
+groups:
+
+- [Dgraph core](#dgraph-core-commands)
+- [Data loading](#data-loading-commands)
+- [Dgraph security](#dgraph-security-commands)
+- [Dgraph debug](#dgraph-debug-commands)
+- [Dgraph tools](#dgraph-tools-commands)
+
+The commands in these groups are shown in the following table:
+
+| Group | Command | Note |
+| --------------- | ---------------------------------------- | --------------------------------------------------------------------------------------------------------------- |
+| (root) | [`dgraph`](#dgraph-root-command) | Root command for Dgraph CLI |
+| Dgraph core | [`alpha`](#dgraph-alpha) | Dgraph Alpha database node commands |
+| Dgraph core | [`zero`](#dgraph-zero) | Dgraph Zero management node commands |
+| Data loading | [`bulk`](#dgraph-bulk) | Dgraph [Bulk Loader](./bulk-loader) commands |
+| Data loading | [`live`](#dgraph-live) | Dgraph [Live Loader](./live-loader) commands |
+| Data loading | [`restore`](#dgraph-restore) | Command used to restore backups created using Dgraph Enterprise Edition |
+| Dgraph security | [`acl`](#dgraph-acl) | Dgraph [Access Control List (ACL)](./enterprise-features/access-control-lists) commands |
+| Dgraph security | [`audit`](#dgraph-audit) | Decrypt audit files |
+| Dgraph security | [`cert`](#dgraph-cert) | Configure TLS and manage TLS certificates |
+| Dgraph debug | [`debug`](#dgraph-debug) | Used to debug issues with Dgraph |
+| Dgraph debug | [`debuginfo`](#dgraph-debuginfo) | Generates information about the current node for use in debugging issues with Dgraph clusters |
+| Dgraph tools | [`completion`](#dgraph-completion) | Generates shell completion scripts for `bash` and `zsh` |
+| Dgraph tools | [`conv`](#dgraph-conv) | Converts geographic files into RDF so that they can be consumed by Dgraph |
+| Dgraph tools | [`decrypt`](#dgraph-decrypt) | Decrypts an export file created by an encrypted Dgraph Cluster |
+| Dgraph tools | [`export_backup`](#dgraph-export_backup) | Converts a binary backup created using Dgraph Enterprise Edition into an exported folder. |
+| Dgraph tools | [`increment`](#dgraph-increment) | Increments a counter transactionally to confirm that a Dgraph Alpha node can handle query and mutation requests |
+| Dgraph tools | [`lsbackup`](#dgraph-lsbackup) | Lists information on backups in a given location |
+| Dgraph tools | [`migrate`](#dgraph-migrate) | Migrates data from a MySQL database to Dgraph |
+| Dgraph tools | [`raftmigrate`](#dgraph-raftmigrate) | Dgraph Raft migration tool |
+| Dgraph tools | [`upgrade`](#dgraph-upgrade) | Upgrades Dgraph to a newer version |
+
+### `dgraph` root command
+
+This command is the root for all commands in the Dgraph CLI. Key information
+from the help listing for `dgraph --help` is shown below:
+
+```shell
+Usage:
+ dgraph [command]
+
+Generic:
+ help Help about any command
+ version Prints the dgraph version details
+
+Available Commands:
+
+Dgraph Core:
+ alpha Run Dgraph Alpha database server
+ zero Run Dgraph Zero management server
+
+Data Loading:
+ bulk Run Dgraph Bulk Loader
+ live Run Dgraph Live Loader
+ restore Restore backup from Dgraph Enterprise Edition
+
+Dgraph Security:
+ acl Run the Dgraph Enterprise Edition ACL tool
+ audit Dgraph audit tool
+ cert Dgraph TLS certificate management
+
+Dgraph Debug:
+ debug Debug Dgraph instance
+ debuginfo Generate debug information on the current node
+
+Dgraph Tools:
+ completion Generates shell completion scripts for bash or zsh
+ conv Dgraph Geo file converter
+ decrypt Run the Dgraph decryption tool
+ export_backup Export data inside single full or incremental backup
+ increment Increment a counter transactionally
+ lsbackup List info on backups in a given location
+ migrate Run the Dgraph migration tool from a MySQL database to Dgraph
+ raftmigrate Run the Raft migration tool
+ upgrade Run the Dgraph upgrade tool
+
+Flags:
+ --alsologtostderr log to standard error as well as files
+ --bindall Use 0.0.0.0 instead of localhost to bind to all addresses on local machine. (default true)
+ --block_rate int Block profiling rate. Must be used along with block profile_mode
+ --config string Configuration file. Takes precedence over default values, but is overridden to values set with environment variables and flags.
+ --cwd string Change working directory to the path specified. The parent must exist.
+ --expose_trace Allow trace endpoint to be accessible from remote
+ -h, --help help for dgraph
+ --log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0)
+ --log_dir string If non-empty, write log files in this directory
+ --logtostderr log to standard error instead of files
+ --profile_mode string Enable profiling mode, one of [cpu, mem, mutex, block]
+ -v, --v Level log level for V logs
+ --vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging
+
+```
+
+### Dgraph core commands
+
+Dgraph core commands provide core deployment and management functionality for
+the Dgraph Alpha database nodes and Dgraph Zero management nodes in your
+deployment.
+
+#### `dgraph alpha`
+
+This command is used to configure and run the Dgraph Alpha database nodes in
+your deployment. The following replicates the help listing for
+`dgraph alpha --help`:
+
+```shell
+A Dgraph Alpha instance stores the data. Each Dgraph Alpha is responsible for
+storing and serving one data group. If multiple Alphas serve the same group,
+they form a Raft group and provide synchronous replication.
+
+Usage:
+ dgraph alpha [flags]
+
+Flags:
+ --acl string [Enterprise Feature] ACL options
+ access-ttl=6h; The TTL for the access JWT.
+ refresh-ttl=30d; The TTL for the refresh JWT.
+ secret-file=; The file that stores the HMAC secret, which is used for signing the JWT and should have at least 32 ASCII characters. Required to enable ACLs.
+ (default "access-ttl=6h; refresh-ttl=30d; secret-file=;")
+ --audit string Audit options
+ compress=false; Enables the compression of old audit logs.
+ days=10; The number of days audit logs will be preserved.
+ encrypt-file=; The path to the key file to be used for audit log encryption.
+ output=; [stdout, /path/to/dir] This specifies where audit logs should be output to.
+ "stdout" is for standard output. You can also specify the directory where audit logs
+ will be saved. When stdout is specified as output other fields will be ignored.
+ size=100; The audit log max size in MB after which it will be rolled over.
+ (default "compress=false; days=10; size=100; dir=; output=; encrypt-file=;")
+ --badger string Badger options
+ compression=snappy; [none, zstd:level, snappy] Specifies the compression algorithm and
+ compression level (if applicable) for the postings directory."none" would disable
+ compression, while "zstd:1" would set zstd compression at level 1.
+ numgoroutines=8; The number of goroutines to use in badger.Stream.
+ max-retries=-1; Commits to disk will give up after these number of retries to prevent locking the worker in a failed state. Use -1 to retry infinitely.
+ (default "compression=snappy; numgoroutines=8; max-retries=-1;")
+ --cache string Cache options
+ percentage=0,65,35; Cache percentages summing up to 100 for various caches (FORMAT: PostingListCache,PstoreBlockCache,PstoreIndexCache)
+ size-mb=1024; Total size of cache (in MB) to be used in Dgraph.
+ (default "size-mb=1024; percentage=0,65,35;")
+ --cdc string Change Data Capture options
+ ca-cert=; The path to CA cert file for TLS encryption.
+ client-cert=; The path to client cert file for TLS encryption.
+ client-key=; The path to client key file for TLS encryption.
+ file=; The path where audit logs will be stored.
+ kafka=; A comma separated list of Kafka hosts.
+ sasl-password=; The SASL password for Kafka.
+ sasl-user=; The SASL username for Kafka.
+ (default "file=; kafka=; sasl_user=; sasl_password=; ca_cert=; client_cert=; client_key=;")
+ --custom_tokenizers string Comma separated list of tokenizer plugins for custom indices.
+ --encryption string [Enterprise Feature] Encryption At Rest options
+ key-file=; The file that stores the symmetric key of length 16, 24, or 32 bytes. The key size determines the chosen AES cipher (AES-128, AES-192, and AES-256 respectively).
+ (default "key-file=;")
+ --export string Folder in which to store exports. (default "export")
+ --graphql string GraphQL options
+ debug=false; Enables debug mode in GraphQL. This returns auth errors to clients, and we do not recommend turning it on for production.
+ extensions=true; Enables extensions in GraphQL response body.
+ introspection=true; Enables GraphQL schema introspection.
+ lambda-url=; The URL of a lambda server that implements custom GraphQL Javascript resolvers.
+ poll-interval=1s; The polling interval for GraphQL subscription.
+ (default "introspection=true; debug=false; extensions=true; poll-interval=1s; lambda-url=;")
+ -h, --help help for alpha
+ --limit string Limit options
+ disallow-drop=false; Set disallow-drop to true to block drop-all and drop-data operation. It still allows dropping attributes and types.
+ mutations-nquad=1000000; The maximum number of nquads that can be inserted in a mutation request.
+ mutations=allow; [allow, disallow, strict] The mutations mode to use.
+ normalize-node=10000; The maximum number of nodes that can be returned in a query that uses the normalize directive.
+ query-edge=1000000; The maximum number of edges that can be returned in a query. This applies to shortest path and recursive queries.
+ query-timeout=0ms; Maximum time after which a query execution will fail. If set to 0, the timeout is infinite.
+ txn-abort-after=5m; Abort any pending transactions older than this duration. The liveness of a transaction is determined by its last mutation.
+ max-pending-queries=10000; Number of maximum pending queries before we reject them as too many requests.
+ (default "mutations=allow; query-edge=1000000; normalize-node=10000; mutations-nquad=1000000; disallow-drop=false; query-timeout=0ms; txn-abort-after=5m; max-pending-queries=10000")
+ --my string addr:port of this server, so other Dgraph servers can talk to this.
+ -o, --port_offset int Value added to all listening port numbers. [Internal=7080, HTTP=8080, Grpc=9080]
+ -p, --postings string Directory to store posting lists. (default "p")
+ --raft string Raft options
+ group=; Provides an optional Raft Group ID that this Alpha would indicate to Zero to join.
+ idx=; Provides an optional Raft ID that this Alpha would use to join Raft groups.
+ learner=false; Make this Alpha a "learner" node. In learner mode, this Alpha will not participate in Raft elections. This can be used to achieve a read-only replica.
+ pending-proposals=256; Number of pending mutation proposals. Useful for rate limiting.
+ snapshot-after-duration=30m; Frequency at which we should create a new raft snapshots. Set to 0 to disable duration based snapshot.
+ snapshot-after-entries=10000; Create a new Raft snapshot after N number of Raft entries. The lower this number, the more frequent snapshot creation will be. Snapshots are created only if both snapshot-after-duration and snapshot-after-entries threshold are crossed.
+ (default "learner=false; snapshot-after-entries=10000; snapshot-after-duration=30m; pending-proposals=256; idx=; group=;")
+ --security string Security options
+ token=; If set, all Admin requests to Dgraph will need to have this token. The token can be passed as follows: for HTTP requests, in the X-Dgraph-AuthToken header. For Grpc, in auth-token key in the context.
+ whitelist=; A comma separated list of IP addresses, IP ranges, CIDR blocks, or hostnames you wish to whitelist for performing admin actions (i.e., --security "whitelist=144.142.126.254,127.0.0.1:127.0.0.3,192.168.0.0/16,host.docker.internal").
+ (default "token=; whitelist=;")
+ --survive string Choose between "process" or "filesystem".
+ If set to "process", there would be no data loss in case of process crash, but the behavior would be nondeterministic in case of filesystem crash.
+ If set to "filesystem", blocking sync would be called after every write, hence guaranteeing no data loss in case of hard reboot.
+ Most users should be OK with choosing "process". (default "process")
+ --telemetry string Telemetry (diagnostic) options
+ reports=true; Send anonymous telemetry data to Dgraph devs.
+ sentry=true; Send crash events to Sentry.
+ (default "reports=true; sentry=true;")
+ --tls string TLS Server options
+ ca-cert=; The CA cert file used to verify server certificates. Required for enabling TLS.
+ client-auth-type=VERIFYIFGIVEN; The TLS client authentication method.
+ client-cert=; (Optional) The client Cert file which is needed to connect as a client with the other nodes in the cluster.
+ client-key=; (Optional) The private client Key file which is needed to connect as a client with the other nodes in the cluster.
+ internal-port=false; (Optional) Enable inter-node TLS encryption between cluster nodes.
+ server-cert=; The server Cert file which is needed to initiate the server in the cluster.
+ server-key=; The server Key file which is needed to initiate the server in the cluster.
+ use-system-ca=true; Includes System CA into CA Certs.
+ (default "use-system-ca=true; client-auth-type=VERIFYIFGIVEN; internal-port=false;")
+ --tmp string Directory to store temporary buffers. (default "t")
+ --trace string Trace options
+ datadog=; URL of Datadog to send OpenCensus traces. As of now, the trace exporter does not support annotation logs and discards them.
+ jaeger=; URL of Jaeger to send OpenCensus traces.
+ ratio=0.01; The ratio of queries to trace.
+ (default "ratio=0.01; jaeger=; datadog=;")
+ --vault string Vault options
+ acl-field=; Vault field containing ACL key.
+ acl-format=base64; ACL key format, can be 'raw' or 'base64'.
+ addr=http://localhost:8200; Vault server address (format: http://ip:port).
+ enc-field=; Vault field containing encryption key.
+ enc-format=base64; Encryption key format, can be 'raw' or 'base64'.
+ path=secret/data/dgraph; Vault KV store path (e.g. 'secret/data/dgraph' for KV V2, 'kv/dgraph' for KV V1).
+ role-id-file=; Vault RoleID file, used for AppRole authentication.
+ secret-id-file=; Vault SecretID file, used for AppRole authentication.
+ (default "addr=http://localhost:8200; role-id-file=; secret-id-file=; path=secret/data/dgraph; acl-field=; acl-format=base64; enc-field=; enc-format=base64")
+ -w, --wal string Directory to store raft write-ahead logs. (default "w")
+ -z, --zero string Comma separated list of Dgraph Zero addresses of the form IP_ADDRESS:PORT. (default "localhost:5080")
+
+Use "dgraph alpha [command] --help" for more information about a command.
+```
+
+#### `dgraph zero`
+
+This command is used to configure and run the Dgraph Zero management nodes in
+your deployment. The following replicates the help listing shown when you run
+`dgraph zero --help`:
+
+```shell
+A Dgraph Zero instance manages the Dgraph cluster. Typically, a single Zero
+instance is sufficient for the cluster; however, one can run multiple Zero
+instances to achieve high-availability.
+
+Usage:
+ dgraph zero [flags]
+
+Flags:
+ --audit string Audit options
+ compress=false; Enables the compression of old audit logs.
+ days=10; The number of days audit logs will be preserved.
+ encrypt-file=; The path to the key file to be used for audit log encryption.
+ output=; [stdout, /path/to/dir] This specifies where audit logs should be output to.
+ "stdout" is for standard output. You can also specify the directory where audit logs
+ will be saved. When stdout is specified as output other fields will be ignored.
+ size=100; The audit log max size in MB after which it will be rolled over.
+ (default "compress=false; days=10; size=100; dir=; output=; encrypt-file=;")
+ --enterprise_license string Path to the enterprise license file.
+ -h, --help help for zero
+ --limit string Limit options
+ disable-admin-http=false; Turn on/off the administrative endpoints exposed over Zero's HTTP port.
+ refill-interval=30s; The interval after which the tokens for UID lease are replenished.
+ uid-lease=0; The maximum number of UIDs that can be leased by namespace (except default namespace)
+ in an interval specified by refill-interval. Set it to 0 to remove limiting.
+ (default "uid-lease=0; refill-interval=30s; disable-admin-http=false;")
+ --my string addr:port of this server, so other Dgraph servers can talk to this.
+ --peer string Address of another dgraphzero server.
+ -o, --port_offset int Value added to all listening port numbers. [Grpc=5080, HTTP=6080]
+ --raft string Raft options
+ idx=1; Provides an optional Raft ID that this Alpha would use to join Raft groups.
+ learner=false; Make this Zero a "learner" node. In learner mode, this Zero will not participate in Raft elections. This can be used to achieve a read-only replica.
+ (default "idx=1; learner=false;")
+ --rebalance_interval duration Interval for trying a predicate move. (default 8m0s)
+ --replicas int How many Dgraph Alpha replicas to run per data shard group. The count includes the original shard. (default 1)
+ --survive string Choose between "process" or "filesystem".
+ If set to "process", there would be no data loss in case of process crash, but the behavior would be nondeterministic in case of filesystem crash.
+ If set to "filesystem", blocking sync would be called after every write, hence guaranteeing no data loss in case of hard reboot.
+ Most users should be OK with choosing "process". (default "process")
+ --telemetry string Telemetry (diagnostic) options
+ reports=true; Send anonymous telemetry data to Dgraph devs.
+ sentry=true; Send crash events to Sentry.
+ (default "reports=true; sentry=true;")
+ --tls string TLS Server options
+ ca-cert=; The CA cert file used to verify server certificates. Required for enabling TLS.
+ client-auth-type=VERIFYIFGIVEN; The TLS client authentication method.
+ client-cert=; (Optional) The client Cert file which is needed to connect as a client with the other nodes in the cluster.
+ client-key=; (Optional) The private client Key file which is needed to connect as a client with the other nodes in the cluster.
+ internal-port=false; (Optional) Enable inter-node TLS encryption between cluster nodes.
+ server-cert=; The server Cert file which is needed to initiate the server in the cluster.
+ server-key=; The server Key file which is needed to initiate the server in the cluster.
+ use-system-ca=true; Includes System CA into CA Certs.
+ (default "use-system-ca=true; client-auth-type=VERIFYIFGIVEN; internal-port=false;")
+ --trace string Trace options
+ datadog=; URL of Datadog to send OpenCensus traces. As of now, the trace exporter does not support annotation logs and discards them.
+ jaeger=; URL of Jaeger to send OpenCensus traces.
+ ratio=0.01; The ratio of queries to trace.
+ (default "ratio=0.01; jaeger=; datadog=;")
+ -w, --wal string Directory storing WAL. (default "zw")
+
+Use "dgraph zero [command] --help" for more information about a command.
+```
+
+### Data loading commands
+
+#### `dgraph bulk`
+
+This command is used to bulk load data with the Dgraph
+[Bulk Loader](./bulk-loader.md) tool. The following replicates the help listing
+shown when you run `dgraph bulk --help`:
+
+```shell
+ Run Dgraph Bulk Loader
+Usage:
+ dgraph bulk [flags]
+
+Flags:
+ --badger string Badger options (Refer to badger documentation for all possible options)
+ compression=snappy; Specifies the compression algorithm and compression level (if applicable) for the postings directory. "none" would disable compression, while "zstd:1" would set zstd compression at level 1.
+ numgoroutines=8; The number of goroutines to use in badger.Stream.
+ (default "compression=snappy; numgoroutines=8;")
+ --cleanup_tmp Clean up the tmp directory after the loader finishes. Setting this to false allows the bulk loader can be re-run while skipping the map phase. (default true)
+ --custom_tokenizers string Comma separated list of tokenizer plugins
+ --encrypted Flag to indicate whether schema and data files are encrypted. Must be specified with --encryption or vault option(s).
+ --encrypted_out Flag to indicate whether to encrypt the output. Must be specified with --encryption or vault option(s).
+ --encryption string [Enterprise Feature] Encryption At Rest options
+ key-file=; The file that stores the symmetric key of length 16, 24, or 32 bytes. The key size determines the chosen AES cipher (AES-128, AES-192, and AES-256 respectively).
+ (default "key-file=;")
+ -f, --files string Location of *.rdf(.gz) or *.json(.gz) file(s) to load.
+ --force-namespace uint Namespace onto which to load the data. If not set, will preserve the namespace. (default 18446744073709551615)
+ --format string Specify file format (rdf or json) instead of getting it from filename.
+ -g, --graphql_schema string Location of the GraphQL schema file.
+ -h, --help help for bulk
+ --http string Address to serve http (pprof). (default "localhost:8080")
+ --ignore_errors ignore line parsing errors in rdf files
+ --map_shards int Number of map output shards. Must be greater than or equal to the number of reduce shards. Increasing allows more evenly sized reduce shards, at the expense of increased memory usage. (default 1)
+ --mapoutput_mb int The estimated size of each map file output. Increasing this increases memory usage. (default 2048)
+ --new_uids Ignore UIDs in load files and assign new ones.
+ -j, --num_go_routines int Number of worker threads to use. MORE THREADS LEAD TO HIGHER RAM USAGE. (default 1)
+ --out string Location to write the final dgraph data directories. (default "./out")
+ --partition_mb int Pick a partition key every N megabytes of data. (default 4)
+ --reduce_shards int Number of reduce shards. This determines the number of dgraph instances in the final cluster. Increasing this potentially decreases the reduce stage runtime by using more parallelism, but increases memory usage. (default 1)
+ --reducers int Number of reducers to run concurrently. Increasing this can improve performance, and must be less than or equal to the number of reduce shards. (default 1)
+ --replace_out Replace out directory and its contents if it exists.
+ -s, --schema string Location of schema file.
+ --skip_map_phase Skip the map phase (assumes that map output files already exist).
+ --store_xids Generate an xid edge for each node.
+ --tls string TLS Client options
+ ca-cert=; The CA cert file used to verify server certificates. Required for enabling TLS.
+ client-cert=; (Optional) The Cert file provided by the client to the server.
+ client-key=; (Optional) The private Key file provided by the clients to the server.
+ internal-port=false; (Optional) Enable inter-node TLS encryption between cluster nodes.
+ server-name=; Used to verify the server hostname.
+ use-system-ca=true; Includes System CA into CA Certs.
+ (default "use-system-ca=true; internal-port=false;")
+ --tmp string Temp directory used to use for on-disk scratch space. Requires free space proportional to the size of the RDF file and the amount of indexing used. (default "tmp")
+ --vault string Vault options
+ acl-field=; Vault field containing ACL key.
+ acl-format=base64; ACL key format, can be 'raw' or 'base64'.
+ addr=http://localhost:8200; Vault server address (format: http://ip:port).
+ enc-field=; Vault field containing encryption key.
+ enc-format=base64; Encryption key format, can be 'raw' or 'base64'.
+ path=secret/data/dgraph; Vault KV store path (e.g. 'secret/data/dgraph' for KV V2, 'kv/dgraph' for KV V1).
+ role-id-file=; Vault RoleID file, used for AppRole authentication.
+ secret-id-file=; Vault SecretID file, used for AppRole authentication.
+ (default "addr=http://localhost:8200; role-id-file=; secret-id-file=; path=secret/data/dgraph; acl-field=; acl-format=base64; enc-field=; enc-format=base64")
+ --version Prints the version of Dgraph Bulk Loader.
+ --xidmap string Directory to store xid to uid mapping
+ -z, --zero string gRPC address for Dgraph zero (default "localhost:5080")
+
+Use "dgraph bulk [command] --help" for more information about a command.
+```
+
+#### `dgraph live`
+
+This command is used to load live data with the Dgraph
+[Live Loader](./live-loader) tool. The following replicates the help listing
+shown when you run `dgraph live --help`:
+
+```shell
+ Run Dgraph Live Loader
+Usage:
+ dgraph live [flags]
+
+Flags:
+ -a, --alpha string Comma-separated list of Dgraph alpha gRPC server addresses (default "127.0.0.1:9080")
+ -t, --auth_token string The auth token passed to the server for Alter operation of the schema file. If used with --slash_grpc_endpoint, then this should be set to the API token issuedby Slash GraphQL
+ -b, --batch int Number of N-Quads to send as part of a mutation. (default 1000)
+ -m, --bufferSize string Buffer for each thread (default "100")
+ -c, --conc int Number of concurrent requests to make to Dgraph (default 10)
+ --creds string Various login credentials if login is required.
+ user defines the username to login.
+ password defines the password of the user.
+ namespace defines the namespace to log into.
+ Sample flag could look like --creds user=username;password=mypass;namespace=2
+ --encryption string [Enterprise Feature] Encryption At Rest options
+ key-file=; The file that stores the symmetric key of length 16, 24, or 32 bytes. The key size determines the chosen AES cipher (AES-128, AES-192, and AES-256 respectively).
+ (default "key-file=;")
+ -f, --files string Location of *.rdf(.gz) or *.json(.gz) file(s) to load
+ --force-namespace int Namespace onto which to load the data.Only guardian of galaxy should use this for loading data into multiple namespaces or somespecific namespace. Setting it to negative value will preserve the namespace.
+ --format string Specify file format (rdf or json) instead of getting it from filename
+ -h, --help help for live
+ --http string Address to serve http (pprof). (default "localhost:6060")
+ --new_uids Ignore UIDs in load files and assign new ones.
+ -s, --schema string Location of schema file
+ --slash_grpc_endpoint string Path to Slash GraphQL GRPC endpoint. If --slash_grpc_endpoint is set, all other TLS options and connection options will beignored
+ --tls string TLS Client options
+ ca-cert=; The CA cert file used to verify server certificates. Required for enabling TLS.
+ client-cert=; (Optional) The Cert file provided by the client to the server.
+ client-key=; (Optional) The private Key file provided by the clients to the server.
+ internal-port=false; (Optional) Enable inter-node TLS encryption between cluster nodes.
+ server-name=; Used to verify the server hostname.
+ use-system-ca=true; Includes System CA into CA Certs.
+ (default "use-system-ca=true; internal-port=false;")
+ --tmp string Directory to store temporary buffers. (default "t")
+ -U, --upsertPredicate string run in upsertPredicate mode. the value would be used to store blank nodes as an xid
+ -C, --use_compression Enable compression on connection to alpha server
+ --vault string Vault options
+ acl-field=; Vault field containing ACL key.
+ acl-format=base64; ACL key format, can be 'raw' or 'base64'.
+ addr=http://localhost:8200; Vault server address (format: http://ip:port).
+ enc-field=; Vault field containing encryption key.
+ enc-format=base64; Encryption key format, can be 'raw' or 'base64'.
+ path=secret/data/dgraph; Vault KV store path (e.g. 'secret/data/dgraph' for KV V2, 'kv/dgraph' for KV V1).
+ role-id-file=; Vault RoleID file, used for AppRole authentication.
+ secret-id-file=; Vault SecretID file, used for AppRole authentication.
+ (default "addr=http://localhost:8200; role-id-file=; secret-id-file=; path=secret/data/dgraph; acl-field=; acl-format=base64; enc-field=; enc-format=base64")
+ --verbose Run the live loader in verbose mode
+ -x, --xidmap string Directory to store xid to uid mapping
+ -z, --zero string Dgraph zero gRPC server address (default "127.0.0.1:5080")
+
+Use "dgraph live [command] --help" for more information about a command.
+```
+
+#### `dgraph restore`
+
+This command loads objects from available backups. The following replicates the
+help listing shown when you run `dgraph restore --help`:
+
+```shell
+Restore loads objects created with the backup feature in Dgraph Enterprise Edition (EE).
+
+Backups taken using the GraphQL API can be restored using CLI restore
+command. Restore is intended to be used with new Dgraph clusters in offline state.
+
+The --location flag indicates a source URI with Dgraph backup objects. This URI supports all
+the schemes used for backup.
+
+Source URI formats:
+ [scheme]://[host]/[path]?[args]
+ [scheme]:///[path]?[args]
+ /[path]?[args] (only for local or NFS)
+
+Source URI parts:
+ scheme - service handler, one of: "s3", "minio", "file"
+ host - remote address. ex: "dgraph.s3.amazonaws.com"
+ path - directory, bucket or container at target. ex: "/dgraph/backups/"
+ args - specific arguments that are ok to appear in logs.
+
+The --posting flag sets the posting list parent dir to store the loaded backup files.
+
+Using the --zero flag will use a Dgraph Zero address to update the start timestamp using
+the restored version. Otherwise, the timestamp must be manually updated through Zero's HTTP
+'assign' command.
+
+Dgraph backup creates a unique backup object for each node group, and restore will create
+a posting directory 'p' matching the backup group ID. Such that a backup file
+named '.../r32-g2.backup' will be loaded to posting dir 'p2'.
+
+Usage examples:
+
+# Restore from local dir or NFS mount:
+$ dgraph restore -p . -l /var/backups/dgraph
+
+# Restore from S3:
+$ dgraph restore -p /var/db/dgraph -l s3://s3.us-west-2.amazonaws.com/srfrog/dgraph
+
+# Restore from dir and update Ts:
+$ dgraph restore -p . -l /var/backups/dgraph -z localhost:5080
+
+
+Usage:
+ dgraph restore [flags]
+
+Flags:
+ --backup_id string The ID of the backup series to restore. If empty, it will restore the latest series.
+ -b, --badger string Badger options
+ compression=snappy; Specifies the compression algorithm and compression level (if applicable) for the postings directory. "none" would disable compression, while "zstd:1" would set zstd compression at level 1.
+ goroutines=; The number of goroutines to use in badger.Stream.
+ (default "compression=snappy; numgoroutines=8;")
+ --encryption string [Enterprise Feature] Encryption At Rest options
+ key-file=; The file that stores the symmetric key of length 16, 24, or 32 bytes. The key size determines the chosen AES cipher (AES-128, AES-192, and AES-256 respectively).
+ (default "key-file=;")
+ --force_zero If false, no connection to a zero in the cluster will be required. Keep in mind this requires you to manually update the timestamp and max uid when you start the cluster. The correct values are printed near the end of this command's output. (default true)
+ -h, --help help for restore
+ -l, --location string Sets the source location URI (required).
+ -p, --postings string Directory where posting lists are stored (required).
+ --tls string TLS Client options
+ ca-cert=; The CA cert file used to verify server certificates. Required for enabling TLS.
+ client-cert=; (Optional) The Cert file provided by the client to the server.
+ client-key=; (Optional) The private Key file provided by the clients to the server.
+ internal-port=false; (Optional) Enable inter-node TLS encryption between cluster nodes.
+ server-name=; Used to verify the server hostname.
+ use-system-ca=true; Includes System CA into CA Certs.
+ (default "use-system-ca=true; internal-port=false;")
+ --vault string Vault options
+ acl-field=; Vault field containing ACL key.
+ acl-format=base64; ACL key format, can be 'raw' or 'base64'.
+ addr=http://localhost:8200; Vault server address (format: http://ip:port).
+ enc-field=; Vault field containing encryption key.
+ enc-format=base64; Encryption key format, can be 'raw' or 'base64'.
+ path=secret/data/dgraph; Vault KV store path (e.g. 'secret/data/dgraph' for KV V2, 'kv/dgraph' for KV V1).
+ role-id-file=; Vault RoleID file, used for AppRole authentication.
+ secret-id-file=; Vault SecretID file, used for AppRole authentication.
+ (default "addr=http://localhost:8200; role-id-file=; secret-id-file=; path=secret/data/dgraph; acl-field=; acl-format=base64; enc-field=; enc-format=base64")
+ -z, --zero string gRPC address for Dgraph zero. ex: localhost:5080
+
+Use "dgraph restore [command] --help" for more information about a command.
+```
+
+### Dgraph security commands
+
+Dgraph security commands let you manage access control lists (ACLs), manage
+certificates, and audit database usage.
+
+#### `dgraph acl`
+
+This command runs the Dgraph Enterprise Edition ACL tool. The following
+replicates the help listing shown when you run `dgraph acl --help`:
+
+```shell
+Run the Dgraph Enterprise Edition ACL tool
+Usage:
+ dgraph acl [command]
+
+Available Commands:
+ add Run Dgraph acl tool to add a user or group
+ del Run Dgraph acl tool to delete a user or group
+ info Show info about a user or group
+ mod Run Dgraph acl tool to modify a user's password, a user's group list, or agroup's predicate permissions
+
+Flags:
+ -a, --alpha string Dgraph Alpha gRPC server address (default "127.0.0.1:9080")
+ --guardian-creds string Login credentials for the guardian
+ user defines the username to login.
+ password defines the password of the user.
+ namespace defines the namespace to log into.
+ Sample flag could look like --guardian-creds user=username;password=mypass;namespace=2
+ -h, --help help for acl
+ --tls string TLS Client options
+ ca-cert=; The CA cert file used to verify server certificates. Required for enabling TLS.
+ client-cert=; (Optional) The Cert file provided by the client to the server.
+ client-key=; (Optional) The private Key file provided by the clients to the server.
+ internal-port=false; (Optional) Enable inter-node TLS encryption between cluster nodes.
+ server-name=; Used to verify the server hostname.
+ use-system-ca=true; Includes System CA into CA Certs.
+ (default "use-system-ca=true; internal-port=false;")
+
+Use "dgraph acl [command] --help" for more information about a command.
+```
+
+#### `dgraph audit`
+
+This command decrypts audit files. These files are created using the `--audit`
+when you run the `dgraph alpha` command. The following replicates the help
+listing shown when you run `dgraph audit --help`:
+
+```shell
+Dgraph audit tool
+Usage:
+ dgraph audit [command]
+
+Available Commands:
+ decrypt Run Dgraph Audit tool to decrypt audit files
+
+Flags:
+ -h, --help help for audit
+
+Use "dgraph audit [command] --help" for more information about a command.
+```
+
+#### `dgraph cert`
+
+This command lets you manage [TLS certificates](./tls-configuration). The
+following replicates the help listing shown when you run `dgraph cert --help`:
+
+```shell
+Dgraph TLS certificate management
+Usage:
+ dgraph cert [flags]
+ dgraph cert [command]
+
+Available Commands:
+ ls lists certificates and keys
+
+Flags:
+ -k, --ca-key string path to the CA private key (default "ca.key")
+ -c, --client string create cert/key pair for a client name
+ -d, --dir string directory containing TLS certs and keys (default "tls")
+ --duration int duration of cert validity in days (default 365)
+ -e, --elliptic-curve string ECDSA curve for private key. Values are: "P224", "P256", "P384", "P521".
+ --force overwrite any existing key and cert
+ -h, --help help for cert
+ -r, --keysize int RSA key bit size for creating new keys (default 2048)
+ -n, --nodes strings creates cert/key pair for nodes
+ --verify verify certs against root CA when creating (default true)
+
+Use "dgraph cert [command] --help" for more information about a command.
+```
+
+### Dgraph debug commands
+
+Dgraph debug commands provide support for debugging issues with Dgraph
+deployments. To learn more, see
+[Using the Debug Tool](./howto/using-debug-tool).
+
+#### `dgraph debug`
+
+This command is used to debug issues with a Dgraph database instance. The
+following replicates the help listing shown when you run `dgraph debug --help`:
+
+```shell
+ Debug Dgraph instance
+Usage:
+ dgraph debug [flags]
+
+Flags:
+ --at uint Set read timestamp for all txns. (default 18446744073709551615)
+ --encryption string [Enterprise Feature] Encryption At Rest options
+ key-file=; The file that stores the symmetric key of length 16, 24, or 32 bytes. The key size determines the chosen AES cipher (AES-128, AES-192, and AES-256 respectively).
+ (default "key-file=;")
+ -h, --help help for debug
+ --histogram Show a histogram of the key and value sizes.
+ -y, --history Show all versions of a key.
+ --item Output item meta as well. Set to false for diffs. (default true)
+ --jepsen string Disect Jepsen output. Can be linear/binary.
+ -l, --lookup string Hex of key to lookup.
+ --nokeys Ignore key_. Only consider amount when calculating total.
+ --only-summary If true, only show the summary of the p directory.
+ -p, --postings string Directory where posting lists are stored.
+ -r, --pred string Only output specified predicate.
+ --prefix string Uses a hex prefix.
+ -o, --readonly Open in read only mode. (default true)
+ --rollup string Hex of key to rollup.
+ -s, --snap string Set snapshot term,index,readts to this. Value must be comma-separated list containing the value for these vars in that order.
+ -t, --truncate uint Remove data from Raft entries until but not including this index.
+ --vals Output values along with keys.
+ --vault string Vault options
+ acl-field=; Vault field containing ACL key.
+ acl-format=base64; ACL key format, can be 'raw' or 'base64'.
+ addr=http://localhost:8200; Vault server address (format: http://ip:port).
+ enc-field=; Vault field containing encryption key.
+ enc-format=base64; Encryption key format, can be 'raw' or 'base64'.
+ path=secret/data/dgraph; Vault KV store path (e.g. 'secret/data/dgraph' for KV V2, 'kv/dgraph' for KV V1).
+ role-id-file=; Vault RoleID file, used for AppRole authentication.
+ secret-id-file=; Vault SecretID file, used for AppRole authentication.
+ (default "addr=http://localhost:8200; role-id-file=; secret-id-file=; path=secret/data/dgraph; acl-field=; acl-format=base64; enc-field=; enc-format=base64")
+ -w, --wal string Directory where Raft write-ahead logs are stored.
+
+Use "dgraph debug [command] --help" for more information about a command.
+```
+
+#### `dgraph debuginfo`
+
+This command generates information about the current node that is useful for
+debugging. The following replicates the help listing shown when you run
+`dgraph debuginfo --help`:
+
+```shell
+Generate debug information on the current node
+Usage:
+ dgraph debuginfo [flags]
+
+Flags:
+ -a, --alpha string Address of running dgraph alpha. (default "localhost:8080")
+ -x, --archive Whether to archive the generated report (default true)
+ -d, --directory string Directory to write the debug info into.
+ -h, --help help for debuginfo
+ -p, --profiles strings List of pprof profiles to dump in the report. (default [goroutine,heap,threadcreate,block,mutex,profile,trace])
+ -s, --seconds uint32 Duration for time-based profile collection. (default 15)
+ -z, --zero string Address of running dgraph zero.
+
+Use "dgraph debuginfo [command] --help" for more information about a command.
+```
+
+### Dgraph tools commands
+
+Dgraph tools provide a variety of tools to make it easier for you to deploy and
+manage Dgraph.
+
+#### `dgraph completion`
+
+This command generates shell completion scripts for `bash` and `zsh` CLIs. The
+following replicates the help listing shown when you run
+`dgraph completion --help`:
+
+```shell
+Generates shell completion scripts for bash or zsh
+Usage:
+ dgraph completion [command]
+
+Available Commands:
+ bash bash shell completion
+ zsh zsh shell completion
+
+Flags:
+ -h, --help help for completion
+
+Use "dgraph completion [command] --help" for more information about a command.
+```
+
+#### `dgraph conv`
+
+This command runs the Dgraph geographic file converter, which converts
+geographic files into RDF so that they can be consumed by Dgraph. The following
+replicates the help listing shown when you run `dgraph conv --help`:
+
+```shell
+Dgraph Geo file converter
+Usage:
+ dgraph conv [flags]
+
+Flags:
+ --geo string Location of geo file to convert
+ --geopred string Predicate to use to store geometries (default "loc")
+ -h, --help help for conv
+ --out string Location of output rdf.gz file (default "output.rdf.gz")
+
+Use "dgraph conv [command] --help" for more information about a command.
+```
+
+#### `dgraph decrypt`
+
+This command lets you decrypt an export file created by an encrypted Dgraph
+Cluster. The following replicates the help listing shown when you run
+`dgraph decrypt --help`:
+
+```shell
+ A tool to decrypt an export file created by an encrypted Dgraph cluster
+Usage:
+ dgraph decrypt [flags]
+
+Flags:
+ --encryption string [Enterprise Feature] Encryption At Rest options
+ key-file=; The file that stores the symmetric key of length 16, 24, or 32 bytes. The key size determines the chosen AES cipher (AES-128, AES-192, and AES-256 respectively).
+ (default "key-file=;")
+ -f, --file string Path to file to decrypt.
+ -h, --help help for decrypt
+ -o, --out string Path to the decrypted file.
+ --vault string Vault options
+ acl-field=; Vault field containing ACL key.
+ acl-format=base64; ACL key format, can be 'raw' or 'base64'.
+ addr=http://localhost:8200; Vault server address (format: http://ip:port).
+ enc-field=; Vault field containing encryption key.
+ enc-format=base64; Encryption key format, can be 'raw' or 'base64'.
+ path=secret/data/dgraph; Vault KV store path (e.g. 'secret/data/dgraph' for KV V2, 'kv/dgraph' for KV V1).
+ role-id-file=; Vault RoleID file, used for AppRole authentication.
+ secret-id-file=; Vault SecretID file, used for AppRole authentication.
+ (default "addr=http://localhost:8200; role-id-file=; secret-id-file=; path=secret/data/dgraph; acl-field=; acl-format=base64; enc-field=; enc-format=base64")
+
+Use "dgraph decrypt [command] --help" for more information about a command.
+```
+
+#### `dgraph export_backup`
+
+This command is used to convert a
+[binary backup](./enterprise-features/binary-backups) created using Dgraph
+Enterprise Edition into an exported folder. The following replicates key
+information from the help listing shown when you run
+`dgraph export_backup --help`:
+
+```shell
+Export data inside single full or incremental backup
+Usage:
+ dgraph export_backup [flags]
+
+Flags:
+ -d, --destination string The folder to which export the backups.
+ --encryption string [Enterprise Feature] Encryption At Rest options
+ key-file=; The file that stores the symmetric key of length 16, 24, or 32 bytes. The key size determines the chosen AES cipher (AES-128, AES-192, and AES-256 respectively).
+ (default "key-file=;")
+ -f, --format string The format of the export output. Accepts a value of either rdf or json (default "rdf")
+ -h, --help help for export_backup
+ -l, --location string Sets the location of the backup. Both file URIs and s3 are supported.
+ This command will take care of all the full + incremental backups present in the location.
+ --upgrade If true, retrieve the CORS from DB and append at the end of GraphQL schema.
+ It also deletes the deprecated types and predicates.
+ Use this option when exporting a backup of 20.11 for loading onto 21.03.
+ --vault string Vault options
+ acl-field=; Vault field containing ACL key.
+ acl-format=base64; ACL key format, can be 'raw' or 'base64'.
+ addr=http://localhost:8200; Vault server address (format: http://ip:port).
+ enc-field=; Vault field containing encryption key.
+ enc-format=base64; Encryption key format, can be 'raw' or 'base64'.
+ path=secret/data/dgraph; Vault KV store path (e.g. 'secret/data/dgraph' for KV V2, 'kv/dgraph' for KV V1).
+ role-id-file=; Vault RoleID file, used for AppRole authentication.
+ secret-id-file=; Vault SecretID file, used for AppRole authentication.
+ (default "addr=http://localhost:8200; role-id-file=; secret-id-file=; path=secret/data/dgraph; acl-field=; acl-format=base64; enc-field=; enc-format=base64")
+
+Use "dgraph export_backup [command] --help" for more information about a command.
+```
+
+#### `dgraph increment`
+
+This command increments a counter transactionally, so that you can confirm that
+an Alpha node is able to handle both query and mutation requests. To learn more,
+see [Using the Increment Tool](./howto/using-increment-tool). The following
+replicates the help listing shown when you run `dgraph increment --help`:
+
+```shell
+Increment a counter transactionally
+Usage:
+ dgraph increment [flags]
+
+Flags:
+ --alpha string Address of Dgraph Alpha. (default "localhost:9080")
+ --be Best-effort. Read counter value without retrieving timestamp from Zero.
+ --creds string Various login credentials if login is required.
+ user defines the username to login.
+ password defines the password of the user.
+ namespace defines the namespace to log into.
+ Sample flag could look like --creds user=username;password=mypass;namespace=2
+ -h, --help help for increment
+ --jaeger string Send opencensus traces to Jaeger.
+ --num int How many times to run. (default 1)
+ --pred string Predicate to use for storing the counter. (default "counter.val")
+ --retries int How many times to retry setting up the connection. (default 10)
+ --ro Read-only. Read the counter value without updating it.
+ --tls string TLS Client options
+ ca-cert=; The CA cert file used to verify server certificates. Required for enabling TLS.
+ client-cert=; (Optional) The Cert file provided by the client to the server.
+ client-key=; (Optional) The private Key file provided by the clients to the server.
+ internal-port=false; (Optional) Enable inter-node TLS encryption between cluster nodes.
+ server-name=; Used to verify the server hostname.
+ use-system-ca=true; Includes System CA into CA Certs.
+ (default "use-system-ca=true; internal-port=false;")
+ --wait duration How long to wait.
+
+Use "dgraph increment [command] --help" for more information about a command.
+```
+
+#### `dgraph lsbackup`
+
+This command lists information on backups in a given location for Dgraph
+Enterprise Edition. To learn more, see
+[Backup List Tool](./enterprise-features/lsbackup). The following replicates the
+help listing shown when you run `dgraph lsbackup --help`:
+
+```shell
+List info on backups in a given location
+Usage:
+ dgraph lsbackup [flags]
+
+Flags:
+ -h, --help help for lsbackup
+ -l, --location string Sets the source location URI (required).
+ --verbose Outputs additional info in backup list.
+
+Use "dgraph lsbackup [command] --help" for more information about a command.
+```
+
+#### `dgraph migrate`
+
+This command runs the Dgraph [migration tool](./migration/migrate-tool) to move
+data from a MySQL database to Dgraph. The following replicates the help listing
+shown when you run `dgraph migrate --help`:
+
+```shell
+Run the Dgraph migration tool from a MySQL database to Dgraph
+Usage:
+ dgraph migrate [flags]
+
+Flags:
+ --db string The database to import
+ -h, --help help for migrate
+ --host string The hostname or IP address of the database server. (default "localhost")
+ -o, --output_data string The data output file (default "sql.rdf")
+ -s, --output_schema string The schema output file (default "schema.txt")
+ --password string The password used for logging in
+ --port string The port of the database server. (default "3306")
+ -q, --quiet Enable quiet mode to suppress the warning logs
+ -p, --separator string The separator for constructing predicate names (default ".")
+ --tables string The comma separated list of tables to import, an empty string means importing all tables in the database
+ --user string The user for logging in
+
+Use "dgraph migrate [command] --help" for more information about a command.
+```
+
+#### `dgraph upgrade`
+
+This command helps you to upgrade from an earlier Dgraph release to a newer
+release. The following replicates the help listing shown when you run
+`dgraph upgrade --help`:
+
+```shell
+This tool is supported only for the mainstream release versions of Dgraph, not for the beta releases.
+Usage:
+ dgraph upgrade [flags]
+
+Flags:
+ --acl upgrade ACL from v1.2.2 to >=v20.03.0
+ -a, --alpha string Dgraph Alpha gRPC server address (default "127.0.0.1:9080")
+ -d, --deleteOld Delete the older ACL types/predicates (default true)
+ --dry-run dry-run the upgrade
+ -f, --from string The version string from which to upgrade, e.g.: v1.2.2
+ -h, --help help for upgrade
+ -p, --password string Password of ACL user
+ -t, --to string The version string till which to upgrade, e.g.: v20.03.0
+ -u, --user string Username of ACL user
+
+Use "dgraph upgrade [command] --help" for more information about a command.
+```
diff --git a/dgraph/reference/deploy/cluster-checklist.mdx b/dgraph/reference/deploy/cluster-checklist.mdx
new file mode 100644
index 00000000..b293d27a
--- /dev/null
+++ b/dgraph/reference/deploy/cluster-checklist.mdx
@@ -0,0 +1,15 @@
+---
+title: Cluster Checklist
+---
+
+In setting up a cluster be sure the check the following.
+
+- Is at least one Dgraph Zero node running?
+- Is each Dgraph Alpha instance in the cluster set up correctly?
+- Will each Dgraph Alpha instance be accessible to all peers on 7080 (+ any port
+ offset)?
+- Does each instance have a unique ID on startup?
+- Has `--bindall=true` been set for networked communication?
+
+See the [Production Checklist](./installation/production-checklist) docs for
+more info.
diff --git a/dgraph/reference/deploy/cluster-setup.mdx b/dgraph/reference/deploy/cluster-setup.mdx
new file mode 100644
index 00000000..91c80d01
--- /dev/null
+++ b/dgraph/reference/deploy/cluster-setup.mdx
@@ -0,0 +1,48 @@
+---
+title: Cluster Setup
+---
+
+## Understanding Dgraph cluster
+
+Dgraph is a truly distributed graph database. It shards by predicate and
+replicates predicates across the cluster, queries can be run on any node and
+joins are handled over the distributed data. A query is resolved locally for
+predicates the node stores, and using distributed joins for predicates stored on
+other nodes.
+
+To effectively running a Dgraph cluster, it's important to understand how
+sharding, replication and rebalancing works.
+
+### Sharding
+
+Dgraph colocates data per predicate (_ P _, in RDF terminology), thus the
+smallest unit of data is one predicate. To shard the graph, one or many
+predicates are assigned to a group. Each Alpha node in the cluster serves a
+single group. Dgraph Zero assigns a group to each Alpha node.
+
+### Shard rebalancing
+
+Dgraph Zero tries to rebalance the cluster based on the disk usage in each
+group. If Zero detects an imbalance, it will try to move a predicate along with
+its indices to a group that has lower disk usage. This can make the predicate
+temporarily read-only. Queries for the predicate will still be serviced, but any
+mutations for the predicate will be rejected and should be retried after the
+move is finished.
+
+Zero would continuously try to keep the amount of data on each server even,
+typically running this check on a 10-min frequency. Thus, each additional Dgraph
+Alpha instance would allow Zero to further split the predicates from groups and
+move them to the new node.
+
+### Consistent Replication
+
+When starting Zero nodes, you can pass, to each one, the `--replicas` flag to
+assign the same group to multiple nodes. The number passed to the `--replicas`
+flag causes that Zero node to assign the same group to the specified number of
+nodes. These nodes will then form a Raft group (or quorum), and every write will
+be consistently replicated to the quorum.
+
+To achieve consensus, it's important that the size of quorum be an odd number.
+Therefore, we recommend setting `--replicas` to 1, 3 or 5 (not 2 or 4). This
+allows 0, 1, or 2 nodes serving the same group to be down, respectively, without
+affecting the overall health of that group.
diff --git a/dgraph/reference/deploy/config.mdx b/dgraph/reference/deploy/config.mdx
new file mode 100644
index 00000000..cbf3b9a3
--- /dev/null
+++ b/dgraph/reference/deploy/config.mdx
@@ -0,0 +1,199 @@
+---
+title: Config
+---
+
+You can see the list of available subcommands with `dgraph --help`. You can view
+the full set of configuration options for a given subcommand with
+`dgraph --help` (for example, `dgraph zero --help`).
+
+You can configure options in multiple ways, which are listed below from highest
+precedence to lowest precedence:
+
+- Using command line flags (as described in the help output).
+- Using environment variables.
+- Using a configuration file.
+
+If no configuration for an option is used, then the default value as described
+in the `--help` output applies.
+
+You can use multiple configuration methods at the same time, so a core set of
+options could be set in a config file, and instance specific options could be
+set using environment vars or flags.
+
+## Command line flags
+
+Dgraph has _global flags_ that apply to all subcommands and flags specific to a
+subcommand.
+
+Some flags have been deprecated and replaced in release `v21.03`, and flags for
+several commands (`alpha`, `backup`, `bulk`,`debug`, `live`, and `zero`) now
+have superflags. Superflags are compound flags that contain one or more options
+that let you define multiple settings in a semicolon-delimited list. The general
+syntax for superflags is as follows:
+`-- option-a=value-a; option-b=value-b`.
+
+The following example shows how to use superflags when running the
+`dgraph alpha` command.
+
+```bash
+dgraph alpha --my=alpha.example.com:7080 --zero=zero.example.com:5080 \
+ --badger "compression=zstd:1" \
+ --block_rate "10" \
+ --trace "jaeger=http://jaeger:14268" \
+ --tls "ca-cert=/dgraph/tls/ca.crt;client-auth-type=REQUIREANDVERIFY;server-cert=/dgraph/tls/node.crt;server-key=/dgraph/tls/node.key;use-system-ca=true;internal-port=true;client-cert=/dgraph/tls/client.dgraphuser.crt;client-key=/dgraph/tls/client.dgraphuser.key"
+ --security "whitelist=10.0.0.0/8,172.0.0.0/8,192.168.0.0/16"
+```
+
+## Environment variables
+
+The environment variable names for Dgraph mirror the flag names shown in the
+Dgraph CLI `--help` output. These environment variable names are formed the
+concatenation of `DGRAPH`, the subcommand invoked (`ALPHA`, `ZERO`, `LIVE`, or
+`BULK`), and then the name of the flag (in uppercase). For example, instead
+running a command like `dgraph alpha --block_rate 10`, you could set the
+following environment variable: `DGRAPH_ALPHA_BLOCK_RATE=10 dgraph alpha`.
+
+So, the environment variable syntax for a superflag
+(`-- option-a=value; option-b=value`) is
+`="option-a=value;option-b=value"`.
+
+The following is an example of environment variables for `dgraph alpha`:
+
+```bash
+DGRAPH_ALPHA_BADGER="compression=zstd:1"
+DGRAPH_ALPHA_BLOCK_RATE="10"
+DGRAPH_ALPHA_TRACE="jaeger=http://jaeger:14268"
+DGRAPH_ALPHA_TLS="ca-cert=/dgraph/tls/ca.crt;client-auth-type=REQUIREANDVERIFY;server-cert=/dgraph/tls/node.crt;server-key=/dgraph/tls/node.key;use-system-ca=true;internal-port=true;client-cert=/dgraph/tls/client.dgraphuser.crt;client-key=/dgraph/tls/client.dgraphuser.key"
+DGRAPH_ALPHA_SECURITY="whitelist=10.0.0.0/8,172.0.0.0/8,192.168.0.0/16"
+```
+
+## Configuration file
+
+You can specify a configuration file using the Dgraph CLI with the `--config`
+flag (for example, `dgraph alpha --config my_config.json`), or using an
+environment variable, (for example,
+`DGRAPH_ALPHA_CONFIG=my_config.json dgraph alpha`).
+
+Dgraph supports configuration file formats that it detects based on file
+extensions ([`.json`](https://www.json.org/json-en.html),
+[`.yml`](https://yaml.org/) or [`.yaml`](https://yaml.org/)). In these files,
+the name of the superflag is used as a key that points to a hash. The hash
+consists of `key: value` pairs that correspond to the superflag's list of
+`option=value` pairs.
+
+
+ The formats [`.toml`](https://toml.io/en/),
+ [`.hcl`](https://github.com/hashicorp/hcl), and
+ [`.properties`](https://en.wikipedia.org/wiki/.properties) are not supported
+ in release `v21.03.0`.
+
+
+
+ When representing the superflag options in the hash, you can use either
+ _kebab-case_ or _snake_case_ for names of the keys.
+
+
+### JSON config file
+
+In JSON, you can represent a superflag and its options
+(`-- option-a=value;option-b=value`) as follows:
+
+```json
+{
+ "": {
+ "option-a": "value",
+ "option-b": "value"
+ }
+}
+```
+
+The following example JSON config file (`config.json`) using _kebab-case_:
+
+```json
+{
+ "badger": { "compression": "zstd:1" },
+ "trace": { "jaeger": "http://jaeger:14268" },
+ "security": { "whitelist": "10.0.0.0/8,172.0.0.0/8,192.168.0.0/16" },
+ "tls": {
+ "ca-cert": "/dgraph/tls/ca.crt",
+ "client-auth-type": "REQUIREANDVERIFY",
+ "server-cert": "/dgraph/tls/node.crt",
+ "server-key": "/dgraph/tls/node.key",
+ "use-system-ca": true,
+ "internal-port": true,
+ "client-cert": "/dgraph/tls/client.dgraphuser.crt",
+ "client-key": "/dgraph/tls/client.dgraphuser.key"
+ }
+}
+```
+
+The following example JSON config file (`config.json`) using _snake_case_:
+
+```json
+{
+ "badger": { "compression": "zstd:1" },
+ "trace": { "jaeger": "http://jaeger:14268" },
+ "security": { "whitelist": "10.0.0.0/8,172.0.0.0/8,192.168.0.0/16" },
+ "tls": {
+ "ca_cert": "/dgraph/tls/ca.crt",
+ "client_auth_type": "REQUIREANDVERIFY",
+ "server_cert": "/dgraph/tls/node.crt",
+ "server_key": "/dgraph/tls/node.key",
+ "use_system_ca": true,
+ "internal_port": true,
+ "client_cert": "/dgraph/tls/client.dgraphuser.crt",
+ "client_key": "/dgraph/tls/client.dgraphuser.key"
+ }
+}
+```
+
+### YAML config file
+
+In YAML, you can represent a superflag and its options
+(`-- option-a=value;option-b=value`) as follows:
+
+```yaml
+:
+ option-a: value
+ option-b: value
+```
+
+The following example YAML config file (`config.yml`) uses _kebab-case_:
+
+```yaml
+badger:
+ compression: zstd:1
+trace:
+ jaeger: http://jaeger:14268
+security:
+ whitelist: 10.0.0.0/8,172.0.0.0/8,192.168.0.0/16
+tls:
+ ca-cert: /dgraph/tls/ca.crt
+ client-auth-type: REQUIREANDVERIFY
+ server-cert: /dgraph/tls/node.crt
+ server-key: /dgraph/tls/node.key
+ use-system-ca: true
+ internal-port: true
+ client-cert: /dgraph/tls/client.dgraphuser.crt
+ client-key: /dgraph/tls/client.dgraphuser.key
+```
+
+The following example YAML config file (`config.yml`) uses _snake_case_:
+
+```yaml
+badger:
+ compression: zstd:1
+trace:
+ jaeger: http://jaeger:14268
+security:
+ whitelist: 10.0.0.0/8,172.0.0.0/8,192.168.0.0/16
+tls:
+ ca_cert: /dgraph/tls/ca.crt
+ client_auth_type: REQUIREANDVERIFY
+ server_cert: /dgraph/tls/node.crt
+ server_key: /dgraph/tls/node.key
+ use_system_ca: true
+ internal_port: true
+ client_cert: /dgraph/tls/client.dgraphuser.crt
+ client_key: /dgraph/tls/client.dgraphuser.key
+```
diff --git a/dgraph/reference/deploy/decrypt.mdx b/dgraph/reference/deploy/decrypt.mdx
new file mode 100644
index 00000000..85676992
--- /dev/null
+++ b/dgraph/reference/deploy/decrypt.mdx
@@ -0,0 +1,76 @@
+---
+title: Data Decryption
+---
+
+You might need to decrypt data from an encrypted Dgraph cluster for a variety of
+reasons, including:
+
+- Migration of data from an encrypted cluster to a non-encrypted cluster
+- Changing your data or schema by directly editing an RDF file or schema file
+
+To support these scenarios, Dgraph includes a `decrypt` command that decrypts
+encrypted RDF and schema files. To learn how to export RDF and schema files from
+Dgraph, see:
+[Dgraph Administration: Export database](./dgraph-administration.md#export-database).
+
+The `decrypt` command supports a variety of symmetric key lengths, which
+determine the AES cypher used for encryption and decryption, as follows:
+
+| Symmetric key length | AES encryption cypher |
+| -------------------- | --------------------- |
+| 128 bits (16-bytes) | AES-128 |
+| 192 bits (24-bytes) | AES-192 |
+| 256 bits (32-bytes) | AES-256 |
+
+The `decrypt` command also supports the use of
+[Hashicorp Vault](https://www.vaultproject.io/) to store secrets, including
+support for Vault's
+[AppRole authentication](https://www.vaultproject.io/docs/auth/approle.html).
+
+## Decryption options
+
+The following decryption options (or _flags_) are available for the `decrypt`
+command:
+
+| Flag or Superflag | Superflag Option | Notes |
+| ----------------- | ---------------- | ------------------------------------------------------------------------------------------------------------------ |
+| `--encryption` | `key-file` | Encryption key filename |
+| `-f`, `--file` | | Path to file for the encrypted RDF or schema **.gz** file |
+| `-h`, `--help` | | Help for the decrypt command |
+| `-o`, `--out` | | Path to file for the decrypted **.gz** file that decrypt creates |
+| `--vault` | `addr` | Vault server address, in **http://<_ip-address_>:<_port_>** format (default: `http://localhost:8200` ) |
+| | `enc-field` | Name of the Vault server's key/value store field that holds the Base64 encryption key |
+| | `enc-format` | Vault server field format; can be `raw` or `base64` (default: `base64`) |
+| | `path` | Vault server key/value store path (default: `secret/data/dgraph`) |
+| | `role-id-file` | File containing the [Vault](https://www.vaultproject.io/) `role_id` used for AppRole authentication |
+| | `secret-id-file` | File containing the [Vault](https://www.vaultproject.io/) `secret_id` used for AppRole authentication |
+
+To learn more about the `--vault` superflag and its options that have replaced
+the `--vault_*` options in release v20.11 and earlier, see
+[Dgraph CLI Command Reference](./deploy/cli-command-reference).
+
+## Data decryption examples
+
+For example, you could use the following command with an encrypted RDF file
+(**encrypted.rdf.gz**) and an encryption key file (**enc_key_file**), to create
+a decrypted RDF file:
+
+```bash
+# Encryption Key from the file path
+dgraph decrypt --file "encrypted.rdf.gz" --out "decrypted_rdf.gz" --encryption key-file="enc-key-file"
+
+# Encryption Key from HashiCorp Vault
+dgraph decrypt --file "encrypted.rdf.gz" --out "decrypted_rdf.gz" \
+ --vault addr="http://localhost:8200";enc-field="enc_key";enc-format="raw";path="secret/data/dgraph/alpha";role-id-file="./role_id";secret-id-file="./secret_id"
+```
+
+You can use similar syntax to create a decrypted schema file:
+
+```bash
+# Encryption Key from the file path
+dgraph decrypt --file "encrypted.schema.gz" --out "decrypted_schema.gz" --encryption key-file="enc-key-file"
+
+# Encryption Key from HashiCorp Vault
+dgraph decrypt --file "encrypted.schema.gz" --out "decrypted_schema.gz" \
+ --vault addr="http://localhost:8200";enc-field="enc_key";enc-format="raw";path="secret/data/dgraph/alpha";role-id-file="./role_id";secret-id-file="./secret_id"
+```
diff --git a/dgraph/reference/deploy/dgraph-alpha.mdx b/dgraph/reference/deploy/dgraph-alpha.mdx
new file mode 100644
index 00000000..5e306e91
--- /dev/null
+++ b/dgraph/reference/deploy/dgraph-alpha.mdx
@@ -0,0 +1,89 @@
+---
+title: More about Dgraph Alpha
+---
+
+Dgraph Alpha provides several HTTP endpoints for administrators, as follows:
+
+- `/health?all` returns information about the health of all the servers in the
+ cluster.
+- `/admin/shutdown` initiates a proper
+ [shutdown](./dgraph-administration.md#shutting-down-database) of the Alpha.
+
+By default the Alpha listens on `localhost` for admin actions (the loopback
+address only accessible from the same machine). The `--bindall=true` option
+binds to `0.0.0.0` and thus allows external connections.
+
+
+ Set max file descriptors to a high value like 10000 if you are going to load a
+ lot of data.
+
+
+## Querying Health
+
+You can query the `/admin` graphql endpoint with a query like the one below to
+get a JSON consisting of basic information about health of all the servers in
+the cluster.
+
+```graphql
+query {
+ health {
+ instance
+ address
+ version
+ status
+ lastEcho
+ group
+ uptime
+ ongoing
+ indexing
+ }
+}
+```
+
+Here’s an example of JSON returned from the above query:
+
+```json
+{
+ "data": {
+ "health": [
+ {
+ "instance": "zero",
+ "address": "localhost:5080",
+ "version": "v2.0.0-rc1",
+ "status": "healthy",
+ "lastEcho": 1582827418,
+ "group": "0",
+ "uptime": 1504
+ },
+ {
+ "instance": "alpha",
+ "address": "localhost:7080",
+ "version": "v2.0.0-rc1",
+ "status": "healthy",
+ "lastEcho": 1582827418,
+ "group": "1",
+ "uptime": 1505,
+ "ongoing": ["opIndexing"],
+ "indexing": ["name", "age"]
+ }
+ ]
+ }
+}
+```
+
+- `instance`: Name of the instance. Either `alpha` or `zero`.
+- `status`: Health status of the instance. Either `healthy` or `unhealthy`.
+- `version`: Version of Dgraph running the Alpha or Zero server.
+- `uptime`: Time in nanoseconds since the Alpha or Zero server is up and
+ running.
+- `address`: IP_ADDRESS:PORT of the instance.
+- `group`: Group assigned based on the replication factor. Read more
+ [here](/deploy/cluster-setup).
+- `lastEcho`: Last time, in Unix epoch, when the instance was contacted by
+ another Alpha or Zero server.
+- `ongoing`: List of ongoing operations in the background.
+- `indexing`: List of predicates for which indexes are built in the background.
+ Read more [here](./dql-schema.md#indexes-in-background).
+
+The same information (except `ongoing` and `indexing`) is available from the
+`/health` and `/health?all` endpoints of Alpha server.
diff --git a/dgraph/reference/deploy/dgraph-zero.mdx b/dgraph/reference/deploy/dgraph-zero.mdx
new file mode 100644
index 00000000..a7a6b0ef
--- /dev/null
+++ b/dgraph/reference/deploy/dgraph-zero.mdx
@@ -0,0 +1,279 @@
+---
+title: More about Dgraph Zero
+---
+
+Dgraph Zero controls the Dgraph cluster, and stores information about it. It
+automatically moves data between different Dgraph Alpha instances based on the
+size of the data served by each Alpha instance.
+
+Before you can run `dgraph alpha`, you must run at least one `dgraph zero` node.
+You can see the options available for `dgraph zero` by using the following
+command:
+
+```bash
+dgraph zero --help
+```
+
+The `--replicas` option controls the replication factor: the number of replicas
+per data shard, including the original shard. For consensus, the replication
+factor must be set to an odd number, and the following error will occur if it is
+set to an even number (for example, `2`):
+
+```nix
+ERROR: Number of replicas must be odd for consensus. Found: 2
+```
+
+When a new Alpha joins the cluster, it is assigned to a group based on the
+replication factor. If the replication factor is set to `1`, then each Alpha
+node will serve a different group. If the replication factor is set to `3` and
+you then launch six Alpha nodes, the first three Alpha nodes will serve group 1
+and next three nodes will serve group 2. Zero monitors the space occupied by
+predicates in each group and moves predicates between groups as-needed to
+rebalance the cluster.
+
+## Endpoints
+
+Like Alpha, Zero also exposes HTTP on port 6080 (plus any ports specified by
+`--port_offset`). You can query this port using a **GET** request to access the
+following endpoints:
+
+- `/state` returns information about the nodes that are part of the cluster.
+ This includes information about the size of predicates and which groups they
+ belong to.
+- `/assign?what=uids&num=100` allocates a range of UIDs specified by the `num`
+ argument, and returns a JSON map containing the `startId` and `endId` that
+ defines the range of UIDs (inclusive). This UID range can be safely assigned
+ externally to new nodes during data ingestion.
+- `/assign?what=timestamps&num=100` requests timestamps from Zero. This is
+ useful to "fast forward" the state of the Zero node when starting from a
+ postings directory that already has commits higher than Zero's leased
+ timestamp.
+- `/removeNode?id=3&group=2` removes a dead Zero or Alpha node. When a replica
+ node goes offline and can't be recovered, you can remove it and add a new node
+ to th quorum. To remove dead Zero nodes, pass `group=0` and the id of the Zero
+ node to this endpoint.
+
+
+ Before using the API ensure that the node is down and ensure that it doesn't
+ come back up ever again. Do not use the same `idx` of a node that was removed
+ earlier.
+
+
+- `/moveTablet?tablet=name&group=2` Moves a tablet to a group. Zero already
+ rebalances shards every 8 mins, but this endpoint can be used to force move a
+ tablet.
+
+You can also use the following **POST** endpoint on HTTP port 6080:
+
+- `/enterpriseLicense` applies an enterprise license to the cluster by supplying
+ it as part of the body.
+
+### More About the /state Endpoint
+
+The `/state` endpoint of Dgraph Zero returns a JSON document of the current
+group membership info, which includes the following:
+
+- Instances which are part of the cluster.
+- Number of instances in Zero group and each Alpha groups.
+- Current leader of each group.
+- Predicates that belong to a group.
+- Estimated size in bytes of each predicate.
+- Enterprise license information.
+- Max Leased transaction ID.
+- Max Leased UID.
+- CID (Cluster ID).
+
+Here’s an example of JSON for a cluster with three Alpha nodes and three Zero
+nodes returned from the `/state` endpoint:
+
+```json
+{
+ "counter": "22",
+ "groups": {
+ "1": {
+ "members": {
+ "1": {
+ "id": "1",
+ "groupId": 1,
+ "addr": "alpha2:7082",
+ "leader": true,
+ "amDead": false,
+ "lastUpdate": "1603350485",
+ "clusterInfoOnly": false,
+ "forceGroupId": false
+ },
+ "2": {
+ "id": "2",
+ "groupId": 1,
+ "addr": "alpha1:7080",
+ "leader": false,
+ "amDead": false,
+ "lastUpdate": "0",
+ "clusterInfoOnly": false,
+ "forceGroupId": false
+ },
+ "3": {
+ "id": "3",
+ "groupId": 1,
+ "addr": "alpha3:7083",
+ "leader": false,
+ "amDead": false,
+ "lastUpdate": "0",
+ "clusterInfoOnly": false,
+ "forceGroupId": false
+ }
+ },
+ "tablets": {
+ "dgraph.cors": {
+ "groupId": 1,
+ "predicate": "dgraph.cors",
+ "force": false,
+ "space": "0",
+ "remove": false,
+ "readOnly": false,
+ "moveTs": "0"
+ },
+ "dgraph.graphql.schema": {
+ "groupId": 1,
+ "predicate": "dgraph.graphql.schema",
+ "force": false,
+ "space": "0",
+ "remove": false,
+ "readOnly": false,
+ "moveTs": "0"
+ },
+ "dgraph.graphql.schema_created_at": {
+ "groupId": 1,
+ "predicate": "dgraph.graphql.schema_created_at",
+ "force": false,
+ "space": "0",
+ "remove": false,
+ "readOnly": false,
+ "moveTs": "0"
+ },
+ "dgraph.graphql.schema_history": {
+ "groupId": 1,
+ "predicate": "dgraph.graphql.schema_history",
+ "force": false,
+ "space": "0",
+ "remove": false,
+ "readOnly": false,
+ "moveTs": "0"
+ },
+ "dgraph.graphql.xid": {
+ "groupId": 1,
+ "predicate": "dgraph.graphql.xid",
+ "force": false,
+ "space": "0",
+ "remove": false,
+ "readOnly": false,
+ "moveTs": "0"
+ },
+ "dgraph.type": {
+ "groupId": 1,
+ "predicate": "dgraph.type",
+ "force": false,
+ "space": "0",
+ "remove": false,
+ "readOnly": false,
+ "moveTs": "0"
+ }
+ },
+ "snapshotTs": "22",
+ "checksum": "18099480229465877561"
+ }
+ },
+ "zeros": {
+ "1": {
+ "id": "1",
+ "groupId": 0,
+ "addr": "zero1:5080",
+ "leader": true,
+ "amDead": false,
+ "lastUpdate": "0",
+ "clusterInfoOnly": false,
+ "forceGroupId": false
+ },
+ "2": {
+ "id": "2",
+ "groupId": 0,
+ "addr": "zero2:5082",
+ "leader": false,
+ "amDead": false,
+ "lastUpdate": "0",
+ "clusterInfoOnly": false,
+ "forceGroupId": false
+ },
+ "3": {
+ "id": "3",
+ "groupId": 0,
+ "addr": "zero3:5083",
+ "leader": false,
+ "amDead": false,
+ "lastUpdate": "0",
+ "clusterInfoOnly": false,
+ "forceGroupId": false
+ }
+ },
+ "maxUID": "10000",
+ "maxTxnTs": "10000",
+ "maxRaftId": "3",
+ "removed": [],
+ "cid": "2571d268-b574-41fa-ae5e-a6f8da175d6d",
+ "license": {
+ "user": "",
+ "maxNodes": "18446744073709551615",
+ "expiryTs": "1605942487",
+ "enabled": true
+ }
+}
+```
+
+This JSON provides information that includes the following, with node members
+shown with their node name and HTTP port number:
+
+- Group 1 members:
+ - alpha2:7082, id: 1, leader
+ - alpha1:7080, id: 2
+ - alpha3:7083, id: 3
+- Group 0 members (Dgraph Zero nodes)
+ - zero1:5080, id: 1, leader
+ - zero2:5082, id: 2
+ - zero3:5083, id: 3
+- `maxUID`
+ - The current maximum lease of UIDs used for blank node UID assignment.
+ - This increments in batches of 10,000 IDs. Once the maximum lease is reached,
+ another 10,000 IDs are leased. In the event that the Zero leader is lost,
+ the new leader starts a new lease from `maxUID`+1. Any UIDs lost between
+ these leases will never be used for blank-node UID assignment.
+ - An admin can use the Zero endpoint HTTP GET `/assign?what=uids&num=1000` to
+ reserve a range of UIDs (in this case, 1000) to use externally. Zero will
+ **never** use these UIDs for blank node UID assignment, so the user can use
+ the range to assign UIDs manually to their own data sets.
+- `maxTxnTs`
+ - The current maximum lease of transaction timestamps used to hand out start
+ timestamps and commit timestamps. This increments in batches of 10,000 IDs.
+ After the max lease is reached, another 10,000 IDs are leased. If the Zero
+ leader is lost, then the new leader starts a new lease from `maxTxnTs`+1 .
+ Any lost transaction IDs between these leases will never be used.
+ - An admin can use the Zero endpoint HTTP GET
+ `/assign?what=timestamps&num=1000` to increase the current transaction
+ timestamp (in this case, by 1000). This is mainly useful in special-case
+ scenarios; for example, using an existing `-p directory` to create a fresh
+ cluster to be able to query the latest data in the DB.
+- `maxRaftId`
+ - The number of Zeros available to serve as a leader node. Used by the
+ [RAFT](/design-concepts/raft/) consensus algorithm.
+- `CID`
+ - This is a unique UUID representing the _cluster-ID_ for this cluster. It is
+ generated during the initial DB startup and is retained across restarts.
+- Enterprise license
+ - Enabled
+ - `maxNodes`: unlimited
+ - License expiration, shown in seconds since the Unix epoch.
+
+
+ The terms "tablet", "predicate", and "edge" are currently synonymous. In
+ future, Dgraph might improve data scalability to shard a predicate into
+ separate tablets that can be assigned to different groups.
+
diff --git a/dgraph/reference/deploy/index.mdx b/dgraph/reference/deploy/index.mdx
new file mode 100644
index 00000000..a8ef2a27
--- /dev/null
+++ b/dgraph/reference/deploy/index.mdx
@@ -0,0 +1,32 @@
+---
+title: Self-Managed Cluster
+---
+
+You can deploy and manage Dgraph database in a variety of self-managed
+deployment scenarios, including:
+
+- Running Dgraph on your on-premises infrastructure (bare-metal physical
+ servers)
+- Running Dgraph on your cloud infrastructure (AWS, GCP and Azure)
+
+This section focuses exclusively on deployment and management for these
+self-managed scenarios. To learn about fully-managed options that let you focus
+on building apps and websites, rather than managing infrastructure, see the
+[Dgraph cloud services docs](https://dgraph.io/docs/cloud/), or
+[Try Dgraph Cloud](https://cloud.dgraph.io/).
+
+A Dgraph cluster consists of the following:
+
+- **Dgraph Alpha database server nodes**: The Dgraph Alpha server nodes in your
+ deployment host and serve data. These nodes also host an `/admin` HTTP and
+ GRPC endpoint that can be used for data and node administration tasks such as
+ backup, export, draining, and shutdown.
+- **Dgraph Zero management server nodes**: The Dgraph Zero nodes in your
+ deployment control the nodes in your Dgraph cluster. Dgraph Zero automatically
+ moves data between different Dgraph Alpha instances based on the volume of
+ data served by each Alpha instance.
+
+You need at least one node of each type to run Dgraph. You need three nodes of
+each type to run Dgraph in a high-availability (HA) cluster configuration. To
+learn more about 2-node and 6-node deployment options, see the
+[Production Checklist](./installation/production-checklist).
diff --git a/dgraph/reference/deploy/installation/download.mdx b/dgraph/reference/deploy/installation/download.mdx
new file mode 100644
index 00000000..99b3b55a
--- /dev/null
+++ b/dgraph/reference/deploy/installation/download.mdx
@@ -0,0 +1,83 @@
+---
+title: Download
+description:
+ Download the images and source files to build and install for a
+ production-ready Dgraph cluster
+---
+
+You can obtain Dgraph binary for the latest version as well as previous releases
+using automatic install script, manual download, through Docker images or by
+building the binary from the open source code.
+
+
+
+
+
+1. Install Docker.
+
+1. Pull the latest Dgraph image using docker:
+
+ ```sh
+ docker pull dgraph/dgraph:latest
+ ```
+
+ To set up a [learning environment](./single-host-setup), you may pull the
+ [dgraph standalone](https://hub.docker.com/r/dgraph/standalone) image :
+
+ ```sh
+ docker pull dgraph/standalone:latest
+ ```
+
+1. Verify that the image is downloaded:
+
+ ```sh
+ docker images
+ ```
+
+
+
+
+
+On linux system, you can get the binary using the automatic script:
+
+1. Download the Dgraph installation script to install Dgraph automatically:
+
+ ```sh
+ curl https://get.dgraph.io -sSf | bash
+ ```
+
+1. Verify that it works fine, by running: ` dgraph version ` For more
+ information about the various installation scripts that you can use, see
+ [install scripts](https://github.com/dgraph-io/Install-Dgraph).
+
+
+
+
+
+On linux system, you can download a tar file and install manually. Download the
+appropriate tar for your platform from
+**[Dgraph releases](https://github.com/dgraph-io/dgraph/releases)**. After
+downloading the tar for your platform from Github, extract the binary to
+`/usr/local/bin` like so.
+
+1. Download the installation file:
+
+ ```sh
+ sudo tar -C /usr/local/bin -xzf dgraph-linux-amd64-VERSION.tar.gz
+ ```
+
+1. Verify that it works fine, by running: ` dgraph version `
+
+
+
+
+
+You can also build **Dgraph** and **Ratel UI** from the source code by following
+the instructions from
+[Contributing to Dgraph](https://github.com/dgraph-io/dgraph/blob/master/CONTRIBUTING.md)
+or
+[Building and running ratel](https://github.com/dgraph-io/ratel/blob/master/INSTRUCTIONS.md).
+
+
+
+
diff --git a/dgraph/reference/deploy/installation/index.mdx b/dgraph/reference/deploy/installation/index.mdx
new file mode 100644
index 00000000..6607c2de
--- /dev/null
+++ b/dgraph/reference/deploy/installation/index.mdx
@@ -0,0 +1,7 @@
+---
+title: Installation
+---
+
+This section is about installing Dgraph in dev or hobbyist environment as well
+as production environments with HA and horizontal scalability using multiple
+Alpha nodes in a cluster.
diff --git a/dgraph/reference/deploy/installation/kubernetes/cluster-types.mdx b/dgraph/reference/deploy/installation/kubernetes/cluster-types.mdx
new file mode 100644
index 00000000..943ac6e8
--- /dev/null
+++ b/dgraph/reference/deploy/installation/kubernetes/cluster-types.mdx
@@ -0,0 +1,175 @@
+---
+title: Cluster Types
+---
+
+### Terminology
+
+An **N-node cluster** is a Dgraph cluster that contains N number of Dgraph
+instances. For example, a 6-node cluster means six Dgraph instances. The
+**replication setting** specifies the number of Dgraph Alpha replicas that are
+in each group. If this is higher than 1, each alpha in a group will hold a full
+copy of that group's data. The replication setting is a configuration flag
+(`--replicas`) on Dgraph Zero. Sharding is done (typically for databases near
+1TB in size) by creating multiple **Dgraph Alpha groups**. Every Dgraph Alpha
+group is automatically assigned a set of distinct predicates to store and serve,
+thus dividing up the data.
+
+Examples of different cluster settings:
+
+- No sharding
+ - 2-node cluster: 1 Zero, 1 Alpha (one group).
+ - HA equivalent: x3 = 6-node cluster.
+- With 2-way sharding:
+ - 3-node cluster: 1 Zero, 2 Alphas (two groups).
+ - HA equivalent: x3 = 9-node cluster.
+
+In the following examples we outline the two most common cluster configurations:
+a 2-node cluster and a 6-node cluster.
+
+### Basic setup: 2-node cluster
+
+We provide sample configs for both
+[Docker Compose](https://github.com/dgraph-io/dgraph/blob/main/contrib/config/docker/docker-compose.yml)
+and
+[Kubernetes](https://github.com/dgraph-io/dgraph/tree/main/contrib/config/kubernetes/dgraph-single)
+for a 2-node cluster. You can also run Dgraph directly on your host machines.
+
+
+
+Configuration can be set either as command-line flags, environment variables, or
+in a config file (see [Config](./deploy/config)).
+
+Dgraph Zero:
+
+- The `--my` flag should be set to the address:port (the internal-gRPC port)
+ that will be accessible to the Dgraph Alpha (default: `localhost:5080`).
+- The `--raft` superflag's `idx` option should be set to a unique Raft ID within
+ the Dgraph Zero group (default: `1`).
+- The `--wal` flag should be set to the directory path to store write-ahead-log
+ entries on disk (default: `zw`).
+- The `--bindall` flag should be set to true for machine-to-machine
+ communication (default: `true`).
+- Recommended: For better issue diagnostics, set the log level verbosity to 2
+ with the option `--v=2`.
+
+Dgraph Alpha:
+
+- The `--my` flag should be set to the address:port (the internal-gRPC port)
+ that will be accessible to the Dgraph Zero (default: `localhost:7080`).
+- The `--zero` flag should be set to the corresponding Zero address set for
+ Dgraph Zero's `--my` flag.
+- The `--postings` flag should be set to the directory path for data storage
+ (default: `p`).
+- The `--wal` flag should be set to the directory path for write-ahead-log
+ entries (default: `w`)
+- The `--bindall` flag should be set to true for machine-to-machine
+ communication (default: `true`).
+- Recommended: For better issue diagnostics, set the log level verbosity to 2
+ `--v=2`.
+
+### HA setup: 6-node cluster
+
+We provide sample configs for both
+[Docker Compose](https://github.com/dgraph-io/dgraph/blob/main/contrib/config/docker/docker-compose-ha.yml)
+and
+[Kubernetes](https://github.com/dgraph-io/dgraph/tree/main/contrib/config/kubernetes/dgraph-ha)
+for a 6-node cluster with 3 Alpha replicas per group. You can also run Dgraph
+directly on your host machines.
+
+A Dgraph cluster can be configured in a high-availability setup with Dgraph Zero
+and Dgraph Alpha each set up with peers. These peers are part of Raft consensus
+groups, which elect a single leader among themselves. The non-leader peers are
+called followers. In the event that the peers cannot communicate with the leader
+(e.g., a network partition or a machine shuts down), the group automatically
+elects a new leader to continue.
+
+Configuration can be set either as command-line flags, environment variables, or
+in a config file (see [Config](./deploy/config)).
+
+In this setup, we assume the following hostnames are set:
+
+- `zero1`
+- `zero2`
+- `zero3`
+- `alpha1`
+- `alpha2`
+- `alpha3`
+
+We will configure the cluster with 3 Alpha replicas per group. The cluster
+group-membership topology will look like the following:
+
+
+
+#### Set up Dgraph Zero group
+
+In the Dgraph Zero group you must set unique Raft IDs (`--raft` superflag's
+`idx` option) per Dgraph Zero. Dgraph will not auto-assign Raft IDs to Dgraph
+Zero instances.
+
+The first Dgraph Zero that starts will initiate the database cluster. Any
+following Dgraph Zero instances must connect to the cluster via the `--peer`
+flag to join. If the `--peer` flag is omitted from the peers, then the Dgraph
+Zero will create its own independent Dgraph cluster.
+
+**First Dgraph Zero** example:
+`dgraph zero --replicas=3 --raft idx=1 --my=zero1:5080`
+
+The `--my` flag must be set to the address:port of this instance that peers will
+connect to. The `--raft` superflag's `idx` option sets its Raft ID to `1`.
+
+**Second Dgraph Zero** example:
+`dgraph zero --replicas=3 --raft idx=2 --my=zero2:5080 --peer=zero1:5080`
+
+The `--my` flag must be set to the address:port of this instance that peers will
+connect to. The `--raft` superflag's `idx` option sets its Raft ID to 2, and the
+`--peer` flag specifies a request to connect to the Dgraph cluster of zero1
+instead of initializing a new one.
+
+**Third Dgraph Zero** example:
+`dgraph zero --replicas=3 --raft idx=3 --my=zero3:5080 --peer=zero1:5080`:
+
+The `--my` flag must be set to the address:port of this instance that peers will
+connect to. The `--raft` superflag's `idx` option sets its Raft ID to 3, and the
+`--peer` flag specifies a request to connect to the Dgraph cluster of zero1
+instead of initializing a new one.
+
+Dgraph Zero configuration options:
+
+- The `--my` flag should be set to the address:port (the internal-gRPC port)
+ that will be accessible to Dgraph Alpha (default: `localhost:5080`).
+- The `--raft` superflag's `idx` option should be set to a unique Raft ID within
+ the Dgraph Zero group (default: `1`).
+- The `--wal` flag should be set to the directory path to store write-ahead-log
+ entries on disk (default: `zw`).
+- The `--bindall` flag should be set to true for machine-to-machine
+ communication (default: `true`).
+- Recommended: For more informative log info, set the log level verbosity to 2
+ with the option `--v=2`.
+
+#### Set up Dgraph Alpha group
+
+The number of replica members per Alpha group depends on the setting of Dgraph
+Zero's `--replicas` flag. Above, it is set to 3. So when Dgraph Alphas join the
+cluster, Dgraph Zero will assign it to an Alpha group to fill in its members up
+to the limit per group set by the `--replicas` flag.
+
+First Alpha example: `dgraph alpha --my=alpha1:7080 --zero=zero1:5080`
+
+Second Alpha example: `dgraph alpha --my=alpha2:7080 --zero=zero1:5080`
+
+Third Alpha example: `dgraph alpha --my=alpha3:7080 --zero=zero1:5080`
+
+Dgraph Alpha configuration options:
+
+- The `--my` flag should be set to the address:port (the internal-gRPC port)
+ that will be accessible to the Dgraph Zero (default: `localhost:7080`).
+- The `--zero` flag should be set to the corresponding Zero address set for
+ Dgraph Zero's `--my`flag.
+- The `--postings` flag should be set to the directory path for data storage
+ (default: `p`).
+- The `--wal` flag should be set to the directory path for write-ahead-log
+ entries (default: `w`)
+- The `--bindall` flag should be set to true for machine-to-machine
+ communication (default: `true`).
+- Recommended: For more informative log info, set the log level verbosity to 2
+ `--v=2`.
diff --git a/dgraph/reference/deploy/installation/kubernetes/ha-cluster.mdx b/dgraph/reference/deploy/installation/kubernetes/ha-cluster.mdx
new file mode 100644
index 00000000..5b667d14
--- /dev/null
+++ b/dgraph/reference/deploy/installation/kubernetes/ha-cluster.mdx
@@ -0,0 +1,418 @@
+---
+title: Highly Available Cluster Setup
+---
+
+You can run three Dgraph Alpha servers and three Dgraph Zero servers in a highly
+available cluster setup. For a highly available setup, start the Dgraph Zero
+server with `--replicas 3` flag, so that all data is replicated on three Alpha
+servers and forms one Alpha group. You can install a highly available cluster
+using:
+
+- [dgraph-ha.yaml](https://github.com/dgraph-io/dgraph/blob/main/contrib/config/kubernetes/dgraph-ha/dgraph-ha.yaml)
+ file
+- Helm charts.
+
+### Install a highly available Dgraph cluster using YAML or Helm
+
+
+
+
+
+#### Before you begin:
+
+- Install [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/).
+- Ensure that you have a production-ready Kubernetes cluster with at least three
+ worker nodes running in a cloud provider of your choice.
+- (Optional) To run Dgraph Alpha with TLS, see
+ [TLS Configuration](/tls-configuration).
+
+#### Installing a highly available Dgraph cluster
+
+1. Verify that you are able to access the nodes in the Kubernetes cluster:
+
+ ```bash
+ kubectl get nodes
+ ```
+
+ An output similar to this appears:
+
+ ```bash
+ NAME STATUS ROLES AGE VERSION
+ ..compute.internal Ready 1m v1.15.11-eks-af3caf
+ ..compute.internal Ready 1m v1.15.11-eks-af3caf
+ ..compute.internal Ready 1m v1.15.11-eks-af3caf
+ ```
+
+ After your Kubernetes cluster is up, you can use
+ [dgraph-ha.yaml](https://github.com/dgraph-io/dgraph/blob/main/contrib/config/kubernetes/dgraph-ha/dgraph-ha.yaml)
+ to start the cluster.
+
+1. Start a StatefulSet that creates Pods with `Zero`, `Alpha`, and `Ratel UI`:
+
+ ```bash
+ kubectl create --filename https://raw.githubusercontent.com/dgraph-io/dgraph/main/contrib/config/kubernetes/dgraph-ha/dgraph-ha.yaml
+ ```
+
+ An output similar to this appears:
+
+ ```bash
+ service/dgraph-zero-public created
+ service/dgraph-alpha-public created
+ service/dgraph-ratel-public created
+ service/dgraph-zero created
+ service/dgraph-alpha created
+ statefulset.apps/dgraph-zero created
+ statefulset.apps/dgraph-alpha created
+ deployment.apps/dgraph-ratel created
+ ```
+
+1. Confirm that the Pods were created successfully.
+
+ ```bash
+ kubectl get pods
+ ```
+
+ An output similar to this appears:
+
+ ```bash
+ NAME READY STATUS RESTARTS AGE
+ dgraph-alpha-0 1/1 Running 0 6m24s
+ dgraph-alpha-1 1/1 Running 0 5m42s
+ dgraph-alpha-2 1/1 Running 0 5m2s
+ dgraph-ratel- 1/1 Running 0 6m23s
+ dgraph-zero-0 1/1 Running 0 6m24s
+ dgraph-zero-1 1/1 Running 0 5m41s
+ dgraph-zero-2 1/1 Running 0 5m6s
+ ```
+
+ You can check the logs for the Pod using
+ `kubectl logs --follow `..
+
+1. Port forward from your local machine to the Pod:
+
+ ```bash
+ kubectl port-forward service/dgraph-alpha-public 8080:8080
+ kubectl port-forward service/dgraph-ratel-public 8000:8000
+ ```
+
+1. Go to `http://localhost:8000` to access Dgraph using the Ratel UI.
+
+ You can also access the service on its External IP address.
+
+#### Deleting highly available Dgraph resources
+
+Delete all the resources using:
+
+```sh
+kubectl delete --filename https://raw.githubusercontent.com/dgraph-io/dgraph/main/contrib/config/kubernetes/dgraph-ha/dgraph-ha.yaml
+kubectl delete persistentvolumeclaims --selector app=dgraph-zero
+kubectl delete persistentvolumeclaims --selector app=dgraph-alpha
+```
+
+
+
+
+
+#### Before you begin
+
+- Install [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/).
+- Ensure that you have a production-ready Kubernetes cluster with atleast three
+ worker nodes running in a cloud provider of your choice.
+- Install [Helm](https://helm.sh/docs/intro/install/).
+- (Optional) To run Dgraph Alpha with TLS, see
+ [TLS Configuration](/tls-configuration).
+
+#### Installing a highly available Dgraph cluster using Helm
+
+1. Verify that you are able to access the nodes in the Kubernetes cluster:
+
+ ```bash
+ kubectl get nodes
+ ```
+
+ An output similar to this appears:
+
+ ```bash
+ NAME STATUS ROLES AGE VERSION
+ ..compute.internal Ready 1m v1.15.11-eks-af3caf
+ ..compute.internal Ready 1m v1.15.11-eks-af3caf
+ ..compute.internal Ready 1m v1.15.11-eks-af3caf
+ ```
+
+ After your Kubernetes cluster is up and running, you can use of the
+ [Dgraph Helm chart](https://github.com/dgraph-io/charts/) to install a
+ highly available Dgraph cluster
+
+1. Add the Dgraph helm repository::
+
+ ```bash
+ helm repo add dgraph https://charts.dgraph.io
+ ```
+
+1. Install the chart with ``:
+
+ ```bash
+ helm install dgraph/dgraph
+ ```
+
+ You can also specify the version using:
+
+ ```bash
+ helm install dgraph/dgraph --set image.tag="[version]"
+ ```
+
+ When configuring the Dgraph image tag, be careful not to use `latest` or
+ `main` in a production environment. These tags may have the Dgraph version
+ change, causing a mixed-version Dgraph cluster that can lead to an outage
+ and potential data loss.
+
+ An output similar to this appears:
+
+ ```bash
+ NAME:
+ LAST DEPLOYED: Wed Feb 1 21:26:32 2023
+ NAMESPACE: default
+ STATUS: deployed
+ REVISION: 1
+ TEST SUITE: None
+ NOTES:
+ 1. You have just deployed Dgraph, version 'v21.12.0'.
+
+ For further information:
+ * Documentation: https://dgraph.io/docs/
+ * Community and Issues: https://discuss.dgraph.io/
+ 2. Get the Dgraph Alpha HTTP/S endpoint by running these commands.
+ export ALPHA_POD_NAME=$(kubectl get pods --namespace default --selector "statefulset.kubernetes.io/pod-name=-dgraph-alpha-0,release=-dgraph" --output jsonpath="{.items[0].metadata.name}")
+ echo "Access Alpha HTTP/S using http://localhost:8080"
+ kubectl --namespace default port-forward $ALPHA_POD_NAME 8080:8080
+
+ NOTE: Change "http://" to "https://" if TLS was added to the Ingress, Load Balancer, or Dgraph Alpha service.
+ ```
+
+1. Get the name of the Pods in the cluster using `kubectl get pods`:
+
+ ```bash
+ NAME READY STATUS RESTARTS AGE
+ -dgraph-alpha-0 1/1 Running 0 4m48s
+ -dgraph-alpha-1 1/1 Running 0 4m2s
+ -dgraph-alpha-2 1/1 Running 0 3m31s
+ -dgraph-zero-0 1/1 Running 0 4m48s
+ -dgraph-zero-1 1/1 Running 0 4m10s
+ -dgraph-zero-2 1/1 Running 0 3m50s
+
+ ```
+
+1. Get the Dgraph Alpha HTTP/S endpoint by running these commands:
+ ```bash
+ export ALPHA_POD_NAME=$(kubectl get pods --namespace default --selector "statefulset.kubernetes.io/pod-name=-dgraph-alpha-0,release=-dgraph" --output jsonpath="{.items[0].metadata.name}")
+ echo "Access Alpha HTTP/S using http://localhost:8080"
+ kubectl --namespace default port-forward $ALPHA_POD_NAME 8080:8080
+ ```
+
+#### Deleting the resources from the cluster
+
+1. Delete the Helm deployment using:
+
+ ```sh
+ helm delete my-release
+ ```
+
+2. Delete associated Persistent Volume Claims:
+
+ ```sh
+ kubectl delete pvc --selector release=my-release
+ ```
+
+
+
+
+
+### Dgraph configuration files
+
+You can create a Dgraph [Config](./deploy/config) files for Alpha server and
+Zero server with Helm chart configuration values, ``. For more
+information about the values, see the latest
+[configuration settings](https://github.com/dgraph-io/charts/blob/master/charts/dgraph/README.md#configuration).
+
+1. Open an editor of your choice and create a config file named
+ `.yaml`:
+
+```yaml
+# .yaml
+alpha:
+ configFile:
+ config.yaml: |
+ alsologtostderr: true
+ badger:
+ compression_level: 3
+ tables: mmap
+ vlog: mmap
+ postings: /dgraph/data/p
+ wal: /dgraph/data/w
+zero:
+ configFile:
+ config.yaml: |
+ alsologtostderr: true
+ wal: /dgraph/data/zw
+```
+
+2. Change to the director in which you created ``.yaml and
+ then install with Alpha and Zero configuration using:
+
+```sh
+helm install dgraph/dgraph --values .yaml
+```
+
+### Exposing Alpha and Ratel Services
+
+By default Zero and Alpha services are exposed only within the Kubernetes
+cluster as Kubernetes service type `ClusterIP`.
+
+In order to expose the Alpha service and Ratel service publicly you can use
+Kubernetes service type `LoadBalancer` or an Ingress resource.
+
+
+
+
+
+##### Public Internet
+
+To use an external load balancer, set the service type to `LoadBalancer`.
+
+
+ For security purposes we recommend limiting access to any public endpoints,
+ such as using a white list.
+
+
+1. To expose Alpha service to the Internet use:
+
+```sh
+helm install dgraph/dgraph --set alpha.service.type="LoadBalancer"
+```
+
+2. To expose Alpha and Ratel services to the Internet use:
+
+```sh
+helm install dgraph/dgraph --set alpha.service.type="LoadBalancer" --set ratel.service.type="LoadBalancer"
+```
+
+##### Private Internal Network
+
+An external load balancer can be configured to face internally to a private
+subnet rather the public Internet. This way it can be accessed securely by
+clients on the same network, through a VPN, or from a jump server. In
+Kubernetes, this is often configured through service annotations by the
+provider. Here's a small list of annotations from cloud providers:
+
+| Provider | Documentation Reference | Annotation |
+| ------------ | -------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------- |
+| AWS | [Amazon EKS: Load Balancing](https://docs.aws.amazon.com/eks/latest/userguide/load-balancing.html) | `service.beta.kubernetes.io/aws-load-balancer-internal: "true"` |
+| Azure | [AKS: Internal Load Balancer](https://docs.microsoft.com/azure/aks/internal-lb) | `service.beta.kubernetes.io/azure-load-balancer-internal: "true"` |
+| Google Cloud | [GKE: Internal Load Balancing](https://cloud.google.com/kubernetes-engine/docs/how-to/internal-load-balancing) | `cloud.google.com/load-balancer-type: "Internal"` |
+
+As an example, using Amazon [EKS](https://aws.amazon.com/eks/) as the provider.
+
+1. Create a Helm chart configuration values file ``.yaml file:
+
+```yaml
+# .yaml
+alpha:
+ service:
+ type: LoadBalancer
+ annotations:
+ service.beta.kubernetes.io/aws-load-balancer-internal: "true"
+ratel:
+ service:
+ type: LoadBalancer
+ annotations:
+ service.beta.kubernetes.io/aws-load-balancer-internal: "true"
+```
+
+1. To expose Alpha and Ratel services privately, use:
+
+```sh
+helm install dgraph/dgraph --values .yaml
+```
+
+
+
+
+
+You can expose Alpha and Ratel using an
+[ingress](https://kubernetes.io/docs/concepts/services-networking/ingress/)
+resource that can route traffic to service resources. Before using this option
+you may need to install an
+[ingress controller](https://kubernetes.io/docs/concepts/services-networking/ingress-controllers/)
+first, as is the case with [AKS](https://docs.microsoft.com/azure/aks/) and
+[EKS](https://aws.amazon.com/eks/), while in the case of
+[GKE](https://cloud.google.com/kubernetes-engine), this comes bundled with a
+default ingress controller. When routing traffic based on the `hostname`, you
+may want to integrate an addon like
+[ExternalDNS](https://github.com/kubernetes-sigs/external-dns) so that DNS
+records can be registered automatically when deploying Dgraph.
+
+As an example, you can configure a single ingress resource that uses
+[ingress-nginx](https://github.com/kubernetes/ingress-nginx) for Alpha and Ratel
+services.
+
+1. Create a Helm chart configuration values file, ``.yaml
+ file:
+
+```yaml
+# .yaml
+global:
+ ingress:
+ enabled: false
+ annotations:
+ kubernetes.io/ingress.class: nginx
+ ratel_hostname: "ratel."
+ alpha_hostname: "alpha."
+```
+
+2. To expose Alpha and Ratel services through an ingress:
+
+```sh
+helm install dgraph/dgraph --values .yaml
+```
+
+You can run `kubectl get ingress` to see the status and access these through
+their hostname, such as `http://alpha.` and
+`http://ratel.`
+
+
+ Ingress controllers will likely have an option to configure access for private
+ internal networks. Consult documentation from the ingress controller provider
+ for further information.
+
+
+
+
+
+
+### Upgrading the Helm chart
+
+You can update your cluster configuration by updating the configuration of the
+Helm chart. Dgraph is a stateful database that requires some attention on
+upgrading the configuration carefully in order to update your cluster to your
+desired configuration.
+
+In general, you can use [`helm upgrade`][helm-upgrade] to update the
+configuration values of the cluster. Depending on your change, you may need to
+upgrade the configuration in multiple steps.
+
+[helm-upgrade]: https://helm.sh/docs/helm/helm_upgrade/
+
+To upgrade to an [HA cluster setup](./#ha-cluster-setup-using-kubernetes):
+
+1. Ensure that the shard replication setting is more than one and
+ `zero.shardReplicaCount`. For example, set the shard replica flag on the Zero
+ node group to 3,`zero.shardReplicaCount=3`.
+2. Run the Helm upgrade command to restart the Zero node group:
+ ```sh
+ helm upgrade dgraph/dgraph [options]
+ ```
+3. Set the Alpha replica count flag. For example: `alpha.replicaCount=3`.
+4. Run the Helm upgrade command again:
+ ```sh
+ helm upgrade dgraph/dgraph [options]
+ ```
diff --git a/dgraph/reference/deploy/installation/kubernetes/index.mdx b/dgraph/reference/deploy/installation/kubernetes/index.mdx
new file mode 100644
index 00000000..6a64f2a5
--- /dev/null
+++ b/dgraph/reference/deploy/installation/kubernetes/index.mdx
@@ -0,0 +1,3 @@
+---
+title: Production Environment
+---
diff --git a/dgraph/reference/deploy/installation/kubernetes/monitoring-cluster.mdx b/dgraph/reference/deploy/installation/kubernetes/monitoring-cluster.mdx
new file mode 100644
index 00000000..fc231e92
--- /dev/null
+++ b/dgraph/reference/deploy/installation/kubernetes/monitoring-cluster.mdx
@@ -0,0 +1,343 @@
+---
+title: Monitoring the Cluster
+---
+
+## Monitoring the Kubernetes Cluster
+
+Dgraph exposes Prometheus metrics to monitor the state of various components
+involved in the cluster, including Dgraph Alpha and Zero nodes. You can setup
+Prometheus monitoring for your cluster.
+
+You can use Helm to install
+[kube-prometheus-stack](https://github.com/prometheus-operator/kube-prometheus)
+chart. This Helm chart is a collection of Kubernetes manifests,
+[Grafana](http://grafana.com/) dashboards,
+[Prometheus rules](https://prometheus.io/docs/prometheus/latest/configuration/recording_rules/)
+combined with scripts to provide monitoring with
+[Prometheus](https://prometheus.io/) using the
+[Prometheus Operator](https://github.com/prometheus-operator/prometheus-operator).
+This Helm chart also installs [Grafana](http://grafana.com/),
+[node_exporter](https://github.com/prometheus/node_exporter),
+[kube-state-metrics](https://github.com/kubernetes/kube-state-metrics).
+
+### Before you begin:
+
+- Install [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/).
+- Ensure that you have a production-ready Kubernetes cluster with at least three
+ worker nodes running in a cloud provider of your choice.
+- Install [Helm](https://helm.sh/docs/intro/install/)
+
+### Install using Helm Chart
+
+1. Create a `YAML` file named `dgraph-prometheus-operator.yaml` and edit the
+ values as appropriate for adding endpoints, adding alert rules, adjusting
+ alert manager configuration, adding Grafana dashboard, and others. For more
+ information see,
+ [Dgraph helm chart values](https://github.com/dgraph-io/dgraph/tree/main/contrib/config/monitoring/prometheus/chart-values).
+
+ ```yaml
+ prometheusOperator:
+ createCustomResource: true
+
+ grafana:
+ enabled: true
+ persistence:
+ enabled: true
+ accessModes: ["ReadWriteOnce"]
+ size: 5Gi
+ defaultDashboardsEnabled: true
+ service:
+ type: ClusterIP
+
+ alertmanager:
+ service:
+ labels:
+ app: dgraph-io
+ alertmanagerSpec:
+ storage:
+ volumeClaimTemplate:
+ spec:
+ accessModes: ["ReadWriteOnce"]
+ resources:
+ requests:
+ storage: 5Gi
+ replicas: 1
+ logLevel: debug
+ config:
+ global:
+ resolve_timeout: 2m
+ route:
+ group_by: ['job']
+ group_wait: 30s
+ group_interval: 5m
+ repeat_interval: 12h
+ receiver: 'null'
+ routes:
+ - match:
+ alertname: Watchdog
+ receiver: 'null'
+ receivers:
+ - name: 'null'
+
+ prometheus:
+ service:
+ type: ClusterIP
+ serviceAccount:
+ create: true
+ name: prometheus-dgraph-io
+
+ prometheusSpec:
+ storageSpec:
+ volumeClaimTemplate:
+ spec:
+ accessModes: ["ReadWriteOnce"]
+ resources:
+ requests:
+ storage: 25Gi
+ resources:
+ requests:
+ memory: 400Mi
+ enableAdminAPI: false
+
+ additionalServiceMonitors:
+ - name: zero-dgraph-io
+ endpoints:
+ - port: http-zero
+ path: /debug/prometheus_metrics
+ namespaceSelector:
+ any: true
+ selector:
+ matchLabels:
+ monitor: zero-dgraph-io
+ - name: alpha-dgraph-io
+ endpoints:
+ - port: http-alpha
+ path: /debug/prometheus_metrics
+ namespaceSelector:
+ any: true
+ selector:
+ matchLabels:
+ monitor: alpha-dgraph-io
+ ```
+
+1. Create a `YAML` file named `secrets.yaml` that has the credentials for
+ Grafana.
+
+ ```yaml
+ grafana:
+ adminPassword:
+ ```
+
+1. Add the `prometheus-operator` Helm chart:
+
+ ```bash
+ helm repo add stable https://charts.helm.sh/stable
+ helm repo add prometheus-community https://prometheus-community.github.io/helm-charts
+ helm repo update
+ ```
+
+1. Install
+ [kube-prometheus-stack](https://github.com/prometheus-operator/kube-prometheus)
+ with the `` in the namescape named `monitoring`:
+
+ ```bash
+ helm install \
+ --values dgraph-prometheus-operator.yaml \
+ --values secrets.yaml \
+ prometheus-community/kube-prometheus-stack --namespace monitoring
+ ```
+
+ An output similar to the following appears:
+
+ ```bash
+ NAME: dgraph-prometheus-release
+ LAST DEPLOYED: Sun Feb 5 21:35:45 2023
+ NAMESPACE: monitoring
+ STATUS: deployed
+ REVISION: 1
+ NOTES:
+ kube-prometheus-stack has been installed. Check its status by running:
+ kubectl --namespace monitoring get pods -l "release=dgraph-prometheus-release"
+
+ Visit https://github.com/prometheus-operator/kube-prometheus instructions on how to create & configure Alertmanager and Prometheus instances using the Operator.
+ ```
+
+1. Check the list of services in the `monitoring` namespace using
+ `kubectl get svc -n monitoring`:
+
+ ```bash
+ NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
+ alertmanager-operated ClusterIP None 9093/TCP,9094/TCP,9094/UDP 29s
+ dgraph-prometheus-release-alertmanager ClusterIP 10.128.239.240 9093/TCP 32s
+ dgraph-prometheus-release-grafana ClusterIP 10.128.213.70 80/TCP 32s
+ dgraph-prometheus-release-kube-state-metrics ClusterIP 10.128.139.145 8080/TCP 32s
+ dgraph-prometheus-release-operator ClusterIP 10.128.6.5 443/TCP 32s
+ dgraph-prometheus-release-prometheus ClusterIP 10.128.255.88 9090/TCP 32s
+ dgraph-prometheus-release-prometheus-node-exporter ClusterIP 10.128.103.131 9100/TCP 32s
+ prometheus-operated ClusterIP None 9090/TCP 29s
+
+ ```
+
+1. Use
+ `kubectl port-forward svc/dgraph-prometheus-release-prometheus -n monitoring 9090`
+ to access Prometheus at `localhost:9090`.
+1. Use `kubectl --namespace monitoring port-forward svc/grafana 3000:80` to
+ access Grafana at `localhost:3000`.
+1. Log in to Grafna using the password that you had set in the `secrets.yaml`
+ file.
+1. In the **Dashboards** menu of Grafana, select **Import**.
+1. In the **Dashboards/Import dashboard** page copy the contents of the
+ [dgraph-kubernetes-grafana-dashboard.json](https://github.com/dgraph-io/dgraph/blob/main/contrib/config/monitoring/grafana/dgraph-kubernetes-grafana-dashboard.json)
+ file in **Import via panel json** and click **Load**.
+
+ You can visualize all Dgraph Alpha and Zero Kubernetes Pods, using the regex
+ pattern `"/dgraph-.*-[0-9]*$/`. You can change this in the dashboard
+ configuration and select the variable Pod. For example, if you have multiple
+ releases, and only want to visualize the current release named
+ `my-release-3`, change the regex pattern to
+ `"/my-release-3.*dgraph-.*-[0-9]*$/"` in the Pod variable of the dashboard
+ configuration. By default, the Prometheus that you installed is configured as
+ the `Datasource` in Grafana.
+
+## Kubernetes Storage
+
+The Kubernetes configurations in the previous sections were configured to run
+Dgraph with any storage type (`storage-class: anything`). On the common cloud
+environments like AWS, GCP, and Azure, the default storage type are slow disks
+like hard disks or low IOPS SSDs. We highly recommend using faster disks for
+ideal performance when running Dgraph.
+
+### Local storage
+
+The AWS storage-optimized i-class instances provide locally attached NVMe-based
+SSD storage which provide consistent very high IOPS. The Dgraph team uses
+i3.large instances on AWS to test Dgraph.
+
+You can create a Kubernetes `StorageClass` object to provision a specific type
+of storage volume which you can then attach to your Dgraph Pods. You can set up
+your cluster with local SSDs by using
+[Local Persistent Volumes](https://kubernetes.io/blog/2018/04/13/local-persistent-volumes-beta/).
+This Kubernetes feature is in beta at the time of this writing (Kubernetes
+v1.13.1). You can first set up an EC2 instance with locally attached storage.
+Once it is formatted and mounted properly, then you can create a StorageClass to
+access it.:
+
+```yaml
+apiVersion: storage.k8s.io/v1
+kind: StorageClass
+metadata:
+ name:
+provisioner: kubernetes.io/no-provisioner
+volumeBindingMode: WaitForFirstConsumer
+```
+
+Currently, Kubernetes does not allow automatic provisioning of local storage. So
+a PersistentVolume with a specific mount path should be created:
+
+```yaml
+apiVersion: v1
+kind: PersistentVolume
+metadata:
+ name:
+spec:
+ capacity:
+ storage: 475Gi
+ volumeMode: Filesystem
+ accessModes:
+ - ReadWriteOnce
+ persistentVolumeReclaimPolicy: Delete
+ storageClassName:
+ local:
+ path: /data
+ nodeAffinity:
+ required:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: kubernetes.io/hostname
+ operator: In
+ values:
+ -
+```
+
+Then, in the StatefulSet configuration you can claim this local storage in
+.spec.volumeClaimTemplate:
+
+```yaml
+kind: StatefulSet
+---
+volumeClaimTemplates:
+ - metadata:
+ name: datadir
+ spec:
+ accessModes:
+ - ReadWriteOnce
+ storageClassName:
+ resources:
+ requests:
+ storage: 500Gi
+```
+
+You can repeat these steps for each instance that's configured with local node
+storage.
+
+### Non-local persistent disks
+
+EBS volumes on AWS and PDs on GCP are persistent disks that can be configured
+with Dgraph. The disk performance is much lower than locally attached storage
+but can be sufficient for your workload such as testing environments.
+
+When using EBS volumes on AWS, we recommend using Provisioned IOPS SSD EBS
+volumes (the io1 disk type) which provide consistent IOPS. The available IOPS
+for AWS EBS volumes is based on the total disk size. With Kubernetes, you can
+request io1 disks to be provisioned with this config with 50 IOPS/GB using the
+`iopsPerGB` parameter:
+
+```
+kind: StorageClass
+apiVersion: storage.k8s.io/v1
+metadata:
+ name:
+provisioner: kubernetes.io/aws-ebs
+parameters:
+ type: io1
+ iopsPerGB: "50"
+ fsType: ext4
+```
+
+Example: Requesting a disk size of 250Gi with this storage class would provide
+12.5K IOPS.
+
+## Removing a Dgraph Pod
+
+In the event that you need to completely remove a Pod (e.g., its disk got
+corrupted and data cannot be recovered), you can use the `/removeNode` API to
+remove the node from the cluster. With a Kubernetes StatefulSet, you'll need to
+remove the node in this order:
+
+1. On the Zero leader, call `/removeNode` to remove the Dgraph instance from the
+ cluster (see [More about Dgraph Zero](.//deploy/dgraph-zero)). The removed
+ instance will immediately stop running. Any further attempts to join the
+ cluster will fail for that instance since it has been removed.
+2. Remove the PersistentVolumeClaim associated with the Pod to delete its data.
+ This prepares the Pod to join with a clean state.
+3. Restart the Pod. This will create a new PersistentVolumeClaim to create new
+ data directories.
+
+When an Alpha Pod restarts in a replicated cluster, it will join as a new member
+of the cluster, be assigned a group and an unused index from Zero, and receive
+the latest snapshot from the Alpha leader of the group.
+
+When a Zero Pod restarts, it must join the existing group with an unused index
+ID. You set the index ID with the `--raft` superflag's `idx` option. This might
+require you to update the StatefulSet configuration.
+
+## Kubernetes and Bulk Loader
+
+You may want to initialize a new cluster with an existing data set such as data
+from the [Dgraph Bulk Loader](./bulk-loader). You can use
+[Init Containers](https://kubernetes.io/docs/concepts/workloads/pods/init-containers/)
+to copy the data to the Pod volume before the Alpha process runs.
+
+See the `initContainers` configuration in
+[dgraph-ha.yaml](https://github.com/dgraph-io/dgraph/blob/main/contrib/config/kubernetes/dgraph-ha/dgraph-ha.yaml)
+to learn more.
diff --git a/dgraph/reference/deploy/installation/kubernetes/single-server-cluster.mdx b/dgraph/reference/deploy/installation/kubernetes/single-server-cluster.mdx
new file mode 100644
index 00000000..51c61eda
--- /dev/null
+++ b/dgraph/reference/deploy/installation/kubernetes/single-server-cluster.mdx
@@ -0,0 +1,95 @@
+---
+title: Single Server Cluster Setup
+---
+
+You can install a single server Dgraph cluster in Kubernetes.
+
+## Before you begin
+
+- Install [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/).
+- Ensure that you have a production-ready Kubernetes cluster running in a cloud
+ provider of your choice.
+- (Optional) To run Dgraph Alpha with TLS, see
+ [TLS Configuration](/tls-configuration).
+
+## Installing a single server Dgraph cluster
+
+1. Verify that you are able to access the nodes in the Kubernetes cluster:
+
+ ```bash
+ kubectl get nodes
+ ```
+
+ An output similar to this appears:
+
+ ```bash
+ NAME STATUS ROLES AGE VERSION
+ ..compute.internal Ready 1m v1.15.11-eks-af3caf
+ ..compute.internal Ready 1m v1.15.11-eks-af3caf
+ ```
+
+ After your Kubernetes cluster is up, you can use
+ [dgraph-single.yaml](https://github.com/dgraph-io/dgraph/blob/main/contrib/config/kubernetes/dgraph-single/dgraph-single.yaml)
+ to start a Zero, Alpha, and Ratel UI services.
+
+1. Start a StatefulSet that creates a single Pod with `Zero`, `Alpha`, and
+ `Ratel UI`:
+
+ ```bash
+ kubectl create --filename https://raw.githubusercontent.com/dgraph-io/dgraph/main/contrib/config/kubernetes/dgraph-single/dgraph-single.yaml
+ ```
+
+ An output similar to this appears:
+
+ ```bash
+ service/dgraph-public created
+ statefulset.apps/dgraph created
+ ```
+
+1. Confirm that the Pod was created successfully.
+
+ ```bash
+ kubectl get pods
+ ```
+
+ An output similar to this appears:
+
+ ```bash
+ NAME READY STATUS RESTARTS AGE
+ dgraph-0 3/3 Running 0 1m
+ ```
+
+1. List the containers running in the Pod `dgraph-0`:
+
+ ```bash
+ kubectl get pods dgraph-0 -o jsonpath='{range .spec.containers[*]}{.name}{"\n"}{end}'
+ ```
+
+ An output similar to this appears:
+
+ ```bash
+ ratel
+ zero
+ alpha
+ ```
+
+ You can check the logs for the containers in the pod using
+ `kubectl logs --follow dgraph-0 `.
+
+1. Port forward from your local machine to the Pod:
+
+ ```bash
+ kubectl port-forward pod/dgraph-0 8080:8080
+ kubectl port-forward pod/dgraph-0 8000:8000
+ ```
+
+1. Go to `http://localhost:8000` to access Dgraph using the Ratel UI.
+
+## Deleting Dgraph single server resources
+
+Delete all the resources using:
+
+```sh
+kubectl delete --filename https://raw.githubusercontent.com/dgraph-io/dgraph/main/contrib/config/kubernetes/dgraph-single/dgraph-single.yaml
+kubectl delete persistentvolumeclaims --selector app=dgraph
+```
diff --git a/dgraph/reference/deploy/installation/lambda-server.mdx b/dgraph/reference/deploy/installation/lambda-server.mdx
new file mode 100644
index 00000000..3a9fa4a7
--- /dev/null
+++ b/dgraph/reference/deploy/installation/lambda-server.mdx
@@ -0,0 +1,110 @@
+---
+title: Lambda Server
+description:
+ Setup a Dgraph database with a lambda server. Dgraph Lambda is a serverless
+ platform for running JavaScript on Dgraph and Dgraph Cloud
+---
+
+In this article you'll learn how to setup a Dgraph database with a lambda
+server.
+
+## Dgraph Lambda
+
+[Dgraph Lambda](https://github.com/dgraph-io/dgraph-lambda) is a serverless
+platform for running JavaScript on Dgraph and
+[Dgraph Cloud](https://dgraph.io/cloud).
+
+You can
+[download the latest version](https://github.com/dgraph-io/dgraph-lambda/releases/latest)
+or review the implementation in our
+[open-source repository](https://github.com/dgraph-io/dgraph-lambda).
+
+### Running with Docker
+
+To run a Dgraph Lambda server with Docker:
+
+```bash
+docker run -it --rm -p 8686:8686 -v /path/to/script.js:/app/script/script.js -e DGRAPH_URL=http://host.docker.internal:8080 dgraph/dgraph-lambda
+```
+
+
+ `host.docker.internal` doesn't work on older versions of Docker on Linux. You
+ can use `DGRAPH_URL=http://172.17.0.1:8080` instead.
+
+
+### Adding libraries
+
+If you would like to add libraries to Dgraph Lambda, use
+`webpack --target=webworker` to compile your script.
+
+### Working with TypeScript
+
+You can import `@slash-graphql/lambda-types` to get types for
+`addGraphQLResolver` and `addGraphQLMultiParentResolver`.
+
+## Dgraph Alpha
+
+To set up Dgraph Alpha, you need to define the `--graphql` superflag's
+`lambda-url` option, which is used to set the URL of the lambda server. All the
+`@lambda` fields will be resolved through the lambda functions implemented on
+the given lambda server.
+
+For example:
+
+```bash
+dgraph alpha --graphql lambda-url=http://localhost:8686/graphql-worker
+```
+
+Then test it out with the following `curl` command:
+
+```bash
+curl localhost:8686/graphql-worker -H "Content-Type: application/json" -d '{"resolver":"MyType.customField","parent":[{"customField":"Dgraph Labs"}]}'
+```
+
+### Docker settings
+
+If you're using Docker, you need to add the `--graphql` superflag's `lambda-url`
+option to your Alpha configuration. For example:
+
+```yml
+command:
+ /gobin/dgraph alpha --zero=zero1:5180 -o 100 --expose_trace --trace ratio=1.0
+ --profile_mode block --block_rate 10 --logtostderr -v=2 --security
+ whitelist=10.0.0.0/8,172.16.0.0/12,192.168.0.0/16 --my=alpha1:7180 --graphql
+ lambda-url=http://lambda:8686/graphql-worker
+```
+
+Next, you need to add the Dgraph Lambda server configuration, and map the
+JavaScript file that contains the code for lambda functions to the
+`/app/script/script.js` file. Remember to set the `DGRAPH_URL` environment
+variable to your Alpha server.
+
+Here's a complete Docker example that uses the base Dgraph image and adds Lambda
+server support:
+
+```yml
+services:
+ dgraph:
+ image: dgraph/standalone:latest
+ environment:
+ DGRAPH_ALPHA_GRAPHQL: "lambda-url=http://dgraph_lambda:8686/graphql-worker"
+ ports:
+ - "8080:8080"
+ - "9080:9080"
+ - "8000:8000"
+ volumes:
+ - dgraph:/dgraph
+
+ dgraph_lambda:
+ image: dgraph/dgraph-lambda:latest
+
+ ports:
+ - "8686:8686"
+ environment:
+ DGRAPH_URL: http://dgraph:8080
+ volumes:
+ - ./gql/script.js:/app/script/script.js:ro
+
+volumes:
+ dgraph: {}
+```
diff --git a/dgraph/reference/deploy/installation/production-checklist.mdx b/dgraph/reference/deploy/installation/production-checklist.mdx
new file mode 100644
index 00000000..e4154c33
--- /dev/null
+++ b/dgraph/reference/deploy/installation/production-checklist.mdx
@@ -0,0 +1,207 @@
+---
+title: Production Checklist
+description: Requirements to install Dgraph in a production environment
+---
+
+This guide describes important setup recommendations for a production-ready
+Dgraph cluster, ensuring high availability with external persistent storage,
+automatic recovery of failed services, automatic recovery of failed systems such
+as virtual machines, and disaster recovery such as backup/restore or
+export/import with automation.
+
+
+ In this guide, a node refers to a Dgraph instance unless specified otherwise.
+
+
+A **Dgraph cluster** is comprised of multiple **Dgraph instances** or nodes
+connected together to form a single distributed database. A Dgraph instance is
+either a **Dgraph Zero** or **Dgraph Alpha**, each of which serves a different
+role in the cluster.
+
+Once installed you may also install or use a **Dgraph client** to communicate
+with the database and perform queries, mutations, alter schema operations and so
+on. Pure HTTP calls from curl, Postman, or another program are also possible
+without a specific client, but there are a range of clients that provide
+higher-level language bindings, and which use optimized gRPC for communications
+to the database. Any standards-compliant GraphQL client will work with Dgraph to
+run GraphQL operations. To run DQL and other Dgraph-specific operations, use a
+Dgraph client.
+
+Dgraph provides official clients for Go, Java, Python, and JavaScript, and C#,
+and the JavaScript client supports both gRPC and HTTP to run more easily in a
+browser. Community-developed Dgraph clients for other languages are also
+available. The full list of clients can be found in [Clients](./clients) page.
+One particular client, Dgraph Ratel, is a more sophisticated UI tool used to
+visualize queries, run mutations, and manage schemas in both GraphQL and DQL.
+Note that clients are not part of a database cluster, and simply connect to one
+or more Dgraph Alpha instances.
+
+### Cluster Requirements
+
+A minimum of one Dgraph Zero and one Dgraph Alpha is needed for a working
+cluster.
+
+There can be multiple Dgraph Zeros and Dgraph Alphas running in a single
+cluster.
+
+### Machine Requirements
+
+To ensure predictable performance characteristics, Dgraph instances should
+**not** run on "burstable" or throttled machines that limit resources. That
+includes t2 class machines on AWS.
+
+To ensure that Dgraph is highly-available, we recommend each Dgraph instance be
+deployed to a different underlying host machine, and ideally that machines are
+in different availability zones or racks. In the event of an underlying machine
+failure, it is critical that only one Dgraph alpha and one Dgraph zero be
+offline so that 2 of the 3 instances in each group maintain a quorum. Also when
+using VMs or Docker/K8s, ensure machines are not over-subscribed and ideally not
+co-resident with other processes that will interrupt and delay Dgraph
+processing.
+
+If you'd like to run Dgraph with fewer machines, then the recommended
+configuration is to run a single Dgraph Zero and a single Dgraph Alpha per
+machine. In a high availability setup, that allows the cluster to lose a single
+machine (simultaneously losing a Dgraph Zero and a Dgraph Alpha) with continued
+availability of the database.
+
+Do not run multiple Dgraph Zeros or Dgraph Alpha processes on a single machine.
+This can affect performance due to shared resource issues and reduce
+availability in the event of machine failures.
+
+### Operating System
+
+Dgraph is designed to run on Linux. To run Dgraph on Windows and macOS, use the
+[standalone Docker image](./dgraph-overview#to-run-dgraph-using-the-standalone-docker-image).
+
+### CPU and Memory
+
+We recommend 8 vCPUs or cores on each of three HA alpha instances for production
+loads, with 16 GiB+ memory per node.
+
+You'll want a ensure that your CPU and memory resources are sufficient for your
+production workload. A common configuration for Dgraph is 16 CPUs and 32 GiB of
+memory per machine. Dgraph is designed with concurrency in mind, so more cores
+means quicker processing and higher throughput of requests.
+
+You may find you'll need more CPU cores and memory for your specific use case.
+
+In addition, we highly recommend that your CPU clock rate is equal or above
+3.4GHz.
+
+### Disk
+
+Dgraph instances make heavy use of disks, so storage with high IOPS is highly
+recommended to ensure reliable performance. Specifically SSDs, not HDDs.
+
+Regarding disk IOPS, the recommendation is:
+
+- 1000 IOPS minimum
+- 3000 IOPS for medium and large datasets
+
+Instances such as c5d.4xlarge have locally-attached NVMe SSDs with high IOPS.
+You can also use EBS volumes with provisioned IOPS (io1). If you are not running
+performance-critical workloads, you can also choose to use cheaper gp2 EBS
+volumes. Typically, AWS
+[gp3](https://aws.amazon.com/about-aws/whats-new/2020/12/introducing-new-amazon-ebs-general-purpose-volumes-gp3/?nc1=h_ls)
+disks are a good option and have 3000 Baseline IOPS at any disk size.
+
+Recommended disk sizes for Dgraph Zero and Dgraph Alpha:
+
+- Dgraph Zero: 200 GB to 300 GB. Dgraph Zero stores cluster metadata information
+ and maintains a write-ahead log for cluster operations.
+- Dgraph Alpha: 250 GB to 750 GB. Dgraph Alpha stores database data, including
+ the schema, indices, and the data values. It maintains a write-ahead log of
+ changes to the database. Your cloud provider may provide better disk
+ performance based on the volume size.
+- If you plan to store over 1.1TB per Dgraph Alpha instance, you must increase
+ either the MaxLevels or TableSizeMultiplier.
+
+Additional recommendations:
+
+- The recommended Linux filesystem is ext4.
+- Avoid using shared storage such as NFS, CIFS, and CEPH storage.
+
+### Firewall Rules
+
+Dgraph instances communicate over several ports. Firewall rules should be
+configured appropriately for the ports documented in
+[Ports Usage](./ports-usage).
+
+Internal ports must be accessible by all Zero and Alpha peers for proper
+cluster-internal communication. Database clients must be able to connect to
+Dgraph Alpha external ports either directly or through a load balancer.
+
+Dgraph Zeros can be set up in a private network where communication is only with
+Dgraph Alphas, database administrators, internal services (such as Prometheus or
+Jaeger), and possibly developers (see note below). Dgraph Zero's 6080 external
+port is only necessary for database administrators. For example, it can be used
+to inspect the cluster metadata (/state), allocate UIDs or set txn timestamps
+(/assign), move data shards (/moveTablet), or remove cluster nodes
+(/removeNode). The full docs about Zero's administrative tasks are in
+[More About Dgraph Zero](./deploy/dgraph-zero).
+
+
+ Developers using Dgraph Live Loader or Dgraph Bulk Loader require access to
+ both Dgraph Zero port 5080 and Dgraph Alpha port 9080. When using those tools,
+ consider using them within your environment that has network access to both
+ ports of the cluster.
+
+
+### Operating System Tuning
+
+The OS should be configured with the recommended settings to ensure that Dgraph
+runs properly.
+
+#### File Descriptors Limit
+
+Dgraph can use a large number of open file descriptors. Most operating systems
+set a default limit that is lower than what is required.
+
+It is recommended to set the file descriptors limit to unlimited. If that is not
+possible, set it to at least a million (1,048,576) which is recommended to
+account for cluster growth over time.
+
+### Deployment
+
+A Dgraph instance is run as a single process from a single static binary. It
+does not require any additional dependencies or separate services in order to
+run (see the [Supplementary Services](./#supplementary-services) section for
+third-party services that work alongside Dgraph). A Dgraph cluster is set up by
+running multiple Dgraph processes networked together.
+
+### Backup Policy
+
+A backup policy is a predefined, set schedule used to schedule backups of
+information from business applications. A backup policy helps to ensure data
+recoverability in the event of accidental data deletion, data corruption, or a
+system outage.
+
+For Dgraph, backups are created using the
+[backups enterprise feature](.//enterprise-features/binary-backups). You can
+also create full exports of your data and schema using
+[data exports](./dgraph-administration.md#exporting-database) available as an
+open source feature.
+
+We **strongly** recommend that you have a backup policy in place before moving
+your application to the production phase, and we also suggest that you have a
+backup policy even for pre-production apps supported by Dgraph database
+instances running in development, staging, QA or pre-production clusters.
+
+We suggest that your policy include frequent full and incremental backups.
+Accordingly, we suggest the following backup policy for your production apps:
+
+- [full backup](https://dgraph.io/docs/enterprise-features/binary-backups/#forcing-a-full-backup)
+ every 24hrs
+- incremental backup every 2/4hrs
+
+### Supplementary Services
+
+These services are not required for a Dgraph cluster to function but are
+recommended for better insight when operating a Dgraph cluster.
+
+- [Metrics] and [monitoring][] with Prometheus and Grafana.
+- [Distributed tracing][] with Jaeger.
+
+[Metrics]: ./metrics [Monitoring]: ./deploy/monitoring [Distributed tracing]:
+./tracing
diff --git a/dgraph/reference/deploy/installation/single-host-setup.mdx b/dgraph/reference/deploy/installation/single-host-setup.mdx
new file mode 100644
index 00000000..5067e6ac
--- /dev/null
+++ b/dgraph/reference/deploy/installation/single-host-setup.mdx
@@ -0,0 +1,229 @@
+---
+title: Learning Environment
+---
+
+To learn about Dgraph and the components, you can install and run Dgraph cluster
+on a single host using Docker, Docker Compose, or Dgraph command line.
+
+
+
+
+
+Dgraph cluster can be setup running as containers on a single host.
+
+
+ To evaluate Dgraph on Windows and macOS use the [standalone Docker
+ image](./dgraph-overview#to-run-dgraph-using-the-standalone-docker-image).
+
+
+#### Before you begin
+
+Ensure that you have installed:
+
+- Docker [Desktop](https://docs.docker.com/desktop/) (required for windows or
+ mac)
+- Docker [Engine](https://docs.docker.com/engine/install/)
+
+#### Launch a Dgraph standalone cluster using Docker
+
+1. Select a name `` for you Docker container and create a
+ directory `` that will hold the Dgraph data on your local
+ file system.
+1. Run a container with the dgraph/standalone image:
+ ```sh
+ docker run --name -d -p "8080:8080" -p "9080:9080" -v :/dgraph dgraph/standalone:latest
+ ```
+1. Optionally launch [Ratel UI](./ratel/overview) using the dgraph/ratel docker
+ image :
+ `sh docker run --name ratel -d -p "8000:8000" dgraph/ratel:latest `
+ You can now use Ratel UI on your browser at localhost:8000 and connect to
+ you Dgraph cluster at localhost:8080
+
+#### Setup a Dgraph cluster on a single host using Docker
+
+1. Get the `` of the host using:
+ ```sh
+ ip addr # On Arch Linux
+ ifconfig # On Ubuntu/Mac
+ ```
+1. Pull the latest Dgraph image using docker:
+ ```sh
+ docker pull dgraph/dgraph:latest
+ ```
+1. Verify that the image is downloaded:
+
+ ```sh
+ docker images
+ ```
+
+1. Create a `` using:
+ ```sh
+ docker network create
+ ```
+1. Create a directory ``to store data for Dgraph Zero and run the
+ container:
+
+ ```sh
+ mkdir ~/ # Or any other directory where data should be stored.
+
+ docker run -it -p 5080:5080 --network -p 6080:6080 -v ~/:/dgraph dgraph/dgraph:latest dgraph zero --my=:5080
+ ```
+
+1. Create a directory `` to store for Dgraph Alpha and run the
+ container:
+
+ ```sh
+ mkdir ~/ # Or any other directory where data should be stored.
+
+ docker run -it -p 7080:7080 --network -p 8080:8080 -p 9080:9080 -v ~/:/dgraph dgraph/dgraph:latest dgraph alpha --zero=:5080 --my=:7080
+ ```
+
+1. Create a directory `` to store for the second Dgraph Alpha and
+ run the container:
+
+ ```sh
+ mkdir ~/ # Or any other directory where data should be stored.
+
+ docker run -it -p 7081:7081 --network -p 8081:8081 -p 9081:9081 -v ~/:/dgraph dgraph/dgraph:latest dgraph alpha --zero=:5080 --my=:7081 -o=1
+ ```
+ To override the default ports for the second Alpha use `-o`.
+
+1. Connect the Dgraph cluster that are running using https://play.dgraph.io/.
+ For information about connecting, see [Ratel UI](/ratel/connection).
+
+
+
+
+
+You can run Dgraph directly on a single Linux host.
+
+#### Before you begin
+
+Ensure that you have:
+
+- Installed [Dgraph](./download) on the Linux host.
+- Made a note of the `` of the host.
+
+#### Using Dgraph Command Line
+
+You can start Dgraph on a single host using the dgraph command line.
+
+1. Run Dgraph zero
+
+ ```sh
+ dgraph zero --my=:5080
+ ```
+
+ The `--my` flag is the connection that Dgraph alphas dial to talk to zero.
+ So, the port `5080` and the IP address must be visible to all the Dgraph
+ alphas. For all other various flags, run `dgraph zero --help`.
+
+1. Run two Dgraph alpha nodea:
+
+ ```sh
+ dgraph alpha --my=:7080 --zero=localhost:5080
+ dgraph alpha --my=:7081 --zero=localhost:5080 -o=1
+ ```
+
+ Dgraph alpha nodes use two directories to persist data and
+ [WAL logs](/consistency-model), and these directories must be different for
+ each alpha if they are running on the same host. You can use `-p` and `-w` to
+ change the location of the data and WAL directories.To learn more about other
+ flags, run `dgraph alpha --help`.
+
+1. Connect the Dgraph cluster that are running using https://play.dgraph.io/.
+ For information about connecting, see [Ratel UI](/ratel/connection).
+
+
+
+
+
+You can install Dgraph using the Docker Compose on a system hosted on any of the
+cloud provider.
+
+#### Before you begin
+
+- Ensure that you have installed Docker
+ [Compose](https://docs.docker.com/compose/).
+- IP address of the system on cloud ``.
+- IP address of the local host ``.
+
+#### Using Docker Compose
+
+1. Download the Dgraph `docker-compose.yml` file:
+
+ wget https://github.com/dgraph-io/dgraph/raw/main/contrib/config/docker/docker-compose.yml
+
+ By default only the localhost IP 127.0.0.1 is allowed. When you run Dgraph
+ on Docker, the containers are assigned IPs and those IPs need to be added to
+ the allowed list.
+
+1. Add a list of IPs allowed for Dgraph so that you can create the schema. Use
+ an editor of your choice and add the `` of the local host in
+ `docker-compose.yml` file:
+
+ ```txt
+ # This Docker Compose file can be used to quickly boot up Dgraph Zero
+ # and Alpha in different Docker containers.
+ # It mounts /tmp/data on the host machine to /dgraph within the
+ # container. You will need to change /tmp/data to a more appropriate location.
+ # Run `docker-compose up` to start Dgraph.
+ version: "3.2"
+ services:
+ zero:
+ image: dgraph/dgraph:latest
+ volumes:
+ - /tmp/data:/dgraph
+ ports:
+ - 5080:5080
+ - 6080:6080
+ restart: on-failure
+ command: dgraph zero --my=zero:5080
+ alpha:
+ image: dgraph/dgraph:latest
+ volumes:
+ - /tmp/data:/dgraph
+ ports:
+ - 8080:8080
+ - 9080:9080
+ restart: on-failure
+ command: dgraph alpha --my=alpha:7080 --zero=zero:5080 --security whitelist=
+ ratel:
+ image: dgraph/ratel:latest
+ ports:
+ - 8000:8000
+
+ ```
+
+1. Run the `docker-compose` command to start the Dgraph services in the docker
+ container:
+
+ sudo docker-compose up
+
+ After Dgraph is installed on Docker, you can view the images and the
+ containers running in Docker for Dgraph.
+
+1. View the containers running for Dgraph using:
+
+ sudo docker ps -a
+
+ An output similar to the following appears:
+
+ ```bash
+ CONTAINER ID IMAGE COMMAND CREATED
+ 4b67157933b6 dgraph/dgraph:latest "dgraph zero --my=ze…" 2 days ago
+ 3faf9bba3a5b dgraph/ratel:latest "/usr/local/bin/dgra…" 2 days ago
+ a6b5823b668d dgraph/dgraph:latest "dgraph alpha --my=a…" 2 days ago
+ ```
+
+1. To access the Ratel UI for queries, mutations, and altering schema, open
+ your web browser and navigate to `http://:8000`.
+1. Click **Launch Latest** to access the latest stable release of Ratel UI.
+1. In the **Dgraph Server Connection** dialog that set the **Dgraph server
+ URL** as `http://:8080`
+1. Click **Connect** . The connection health appears green.
+1. Click **Continue** to query or run mutations.
+
+
+
+
diff --git a/dgraph/reference/deploy/monitoring.mdx b/dgraph/reference/deploy/monitoring.mdx
new file mode 100644
index 00000000..f8b0a1c4
--- /dev/null
+++ b/dgraph/reference/deploy/monitoring.mdx
@@ -0,0 +1,159 @@
+---
+title: Monitoring
+---
+
+Dgraph exposes metrics via the `/debug/vars` endpoint in json format and the
+`/debug/prometheus_metrics` endpoint in Prometheus's text-based format. Dgraph
+doesn't store the metrics and only exposes the value of the metrics at that
+instant. You can either poll this endpoint to get the data in your monitoring
+systems or install
+**[Prometheus](https://prometheus.io/docs/introduction/install/)**. Replace
+targets in the below config file with the ip of your Dgraph instances and run
+prometheus using the command `prometheus --config.file my_config.yaml`.
+
+```sh
+scrape_configs:
+ - job_name: "dgraph"
+ metrics_path: "/debug/prometheus_metrics"
+ scrape_interval: "2s"
+ static_configs:
+ - targets:
+ - 172.31.9.133:6080 # For Dgraph zero, 6080 is the http endpoint exposing metrics.
+ - 172.31.15.230:8080 # For Dgraph alpha, 8080 is the http endpoint exposing metrics.
+ - 172.31.0.170:8080
+ - 172.31.8.118:8080
+```
+
+
+ Raw data exported by Prometheus is available via `/debug/prometheus_metrics`
+ endpoint on Dgraph alphas.
+
+
+Install **[Grafana](http://docs.grafana.org/installation/)** to plot the
+metrics. Grafana runs at port 3000 in default settings. Create a prometheus
+datasource by following these
+**[steps](https://prometheus.io/docs/visualization/grafana/#creating-a-prometheus-data-source)**.
+Import
+**[grafana_dashboard.json](https://github.com/dgraph-io/benchmarks/blob/master/scripts/grafana_dashboard.json)**
+by following this
+**[link](http://docs.grafana.org/reference/export_import/#importing-a-dashboard)**.
+
+## CloudWatch
+
+Route53's health checks can be leveraged to create standard CloudWatch alarms to
+notify on change in the status of the `/health` endpoints of Alpha and Zero.
+
+Considering that the endpoints to monitor are publicly accessible and you have
+the AWS credentials and [awscli](https://aws.amazon.com/cli/) setup, we’ll go
+through an example of setting up a simple CloudWatch alarm configured to alert
+via email for the Alpha endpoint `alpha.acme.org:8080/health`. Dgraph Zero's
+`/health` endpoint can also be monitored in a similar way.
+
+### Create the Route53 Health Check
+
+```sh
+aws route53 create-health-check \
+ --caller-reference $(date "+%Y%m%d%H%M%S") \
+ --health-check-config file:///tmp/create-healthcheck.json \
+ --query 'HealthCheck.Id'
+```
+
+The file `/tmp/create-healthcheck.json` would need to have the values for the
+parameters required to create the health check as such:
+
+```sh
+{
+ "Type": "HTTPS",
+ "ResourcePath": "/health",
+ "FullyQualifiedDomainName": "alpha.acme.org",
+ "Port": 8080,
+ "RequestInterval": 30,
+ "FailureThreshold": 3
+}
+```
+
+The reference for the values one can specify while creating or updating a health
+check can be found on the AWS
+[documentation](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/health-checks-creating-values.html).
+
+The response to the above command would be the ID of the created health check.
+
+```sh
+"29bdeaaa-f5b5-417e-a5ce-7dba1k5f131b"
+```
+
+Make a note of the health check ID. This will be used to integrate CloudWatch
+alarms with the health check.
+
+
+ Currently, Route53 metrics are only
+ (available)[https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/monitoring-health-checks.html]
+ in the **US East (N. Virginia)** region. The Cloudwatch Alarm (and the SNS
+ Topic) should therefore be created in `us-east-1`.
+
+
+### [Optional] Creating an SNS Topic
+
+SNS topics are used to create message delivery channels. If you do not have any
+SNS topics configured, one can be created by running the following command:
+
+```sh
+aws sns create-topic --region=us-east-1 --name ops --query 'TopicArn'
+```
+
+The response to the above command would be as follows:
+
+```sh
+"arn:aws:sns:us-east-1:123456789012:ops"
+```
+
+Be sure to make a note of the topic ARN. This would be used to configure the
+CloudWatch alarm's action parameter.
+
+Run the following command to subscribe your email to the SNS topic:
+
+```sh
+aws sns subscribe \
+ --topic-arn arn:aws:sns:us-east-1:123456789012:ops \
+ --protocol email \
+ --notification-endpoint ops@acme.org
+```
+
+The subscription will need to be confirmed through _AWS Notification -
+Subscription Confirmation_ sent through email. Once the subscription is
+confirmed, CloudWatch can be configured to use the SNS topic to trigger the
+alarm notification.
+
+### Creating a CloudWatch Alarm
+
+The following command creates a CloudWatch alarm with `--alarm-actions` set to
+the ARN of the SNS topic and the `--dimensions` of the alarm set to the health
+check ID.
+
+```sh
+aws cloudwatch put-metric-alarm \
+ --region=us-east-1 \
+ --alarm-name dgraph-alpha \
+ --alarm-description "Alarm for when Alpha is down" \
+ --metric-name HealthCheckStatus \
+ --dimensions "Name=HealthCheckId,Value=29bdeaaa-f5b5-417e-a5ce-7dba1k5f131b" \
+ --namespace AWS/Route53 \
+ --statistic Minimum \
+ --period 60 \
+ --threshold 1 \
+ --comparison-operator LessThanThreshold \
+ --evaluation-periods 1 \
+ --treat-missing-data breaching \
+ --alarm-actions arn:aws:sns:us-east-1:123456789012:ops
+```
+
+One can verify the alarm status from the CloudWatch or Route53 consoles.
+
+#### Internal Endpoints
+
+If the Alpha endpoint is internal to the VPC network - one would need to create
+a Lambda function that would periodically (triggered using CloudWatch Event
+Rules) request the `/health` path and create CloudWatch metrics which could then
+be used to create the required CloudWatch alarms. The architecture and the
+CloudFormation template to achieve the same can be found
+[here](https://aws.amazon.com/blogs/networking-and-content-delivery/performing-route-53-health-checks-on-private-resources-in-a-vpc-with-aws-lambda-and-amazon-cloudwatch/).
diff --git a/dgraph/reference/deploy/security/index.mdx b/dgraph/reference/deploy/security/index.mdx
new file mode 100644
index 00000000..77350687
--- /dev/null
+++ b/dgraph/reference/deploy/security/index.mdx
@@ -0,0 +1,3 @@
+---
+title: Security
+---
diff --git a/dgraph/reference/deploy/security/ports-usage.mdx b/dgraph/reference/deploy/security/ports-usage.mdx
new file mode 100644
index 00000000..8d2d9eac
--- /dev/null
+++ b/dgraph/reference/deploy/security/ports-usage.mdx
@@ -0,0 +1,117 @@
+---
+title: Ports Usage
+---
+
+Dgraph cluster nodes use a range of ports to communicate over gRPC and HTTP.
+Choose these ports carefully based on your topology and mode of deployment, as
+this will impact the access security rules or firewall configurations required
+for each port.
+
+## Types of ports
+
+Dgraph Alpha and Dgraph Zero nodes use a variety of gRPC and HTTP ports, as
+follows:
+
+- **gRPC-internal-private**: Used between the cluster nodes for internal
+ communication and message exchange. Communication using these ports is
+ TLS-encrypted.
+- **gRPC-external-private**: Used by Dgraph Live Loader and Dgraph Bulk loader
+ to access APIs over gRPC.
+- **gRPC-external-public**: Used by Dgraph clients to access APIs in a session
+ that can persist after a query.
+- **HTTP-external-private**: Used for monitoring and administrative tasks.
+- **HTTP-external-public:** Used by clients to access APIs over HTTP.
+
+## Default ports used by different nodes
+
+| Dgraph Node Type | gRPC-internal-private | gRPC-external-private | gRPC-external-public | HTTP-external-private | HTTP-external-public |
+| ---------------- | --------------------- | --------------------- | -------------------- | --------------------- | -------------------- |
+| zero | 50801 | 50801 | | 60802 |
+| alpha | 7080 | | 9080 | | 8080 |
+| ratel | | | | | 8000 |
+
+1: Dgraph Zero uses port 5080 for internal communication within the
+cluster, and to support the [data import](./about_import) tools: Dgraph Live
+Loader and Dgraph Bulk Loader.
+
+2: Dgraph Zero uses port 6080 for
+[administrative](./deploy/dgraph-zero) operations. Dgraph clients cannot access
+this port.
+
+Users must modify security rules or open firewall ports depending upon their
+underlying network to allow communication between cluster nodes, between the
+Dgraph instances, and between Dgraph clients. In general, you should configure
+the gRPC and HTTP `external-public` ports for open access by Dgraph clients, and
+configure the gRPC-internal ports for open access by the cluster nodes.
+
+**Ratel UI** accesses Dgraph Alpha on the `HTTP-external-public port` (which
+defaults to localhost:8080) and can be configured to talk to a remote Dgraph
+cluster. This way you can run Ratel on your local machine and point to a remote
+cluster. But, if you are deploying Ratel along with Dgraph cluster, then you may
+have to expose port 8000 to the public.
+
+**Port Offset** To make it easier for users to set up a cluster, Dgraph has
+default values for the ports used by Dgraph nodes. To support multiple nodes
+running on a single machine or VM, you can set a node to use different ports
+using an offset (using the command option `--port_offset`). This command
+increments the actual ports used by the node by the offset value provided. You
+can also use port offsets when starting multiple Dgraph Zero nodes in a
+development environment.
+
+For example, when a user runs Dgraph Alpha with the `--port_offset 2` setting,
+then the Alpha node binds to port 7082 (`gRPC-internal-private`), 8082
+(`HTTP-external-public`) and 9082 (`gRPC-external-public`), respectively.
+
+**Ratel UI** by default listens on port 8000. You can use the `-port` flag to
+configure it to listen on any other port.
+
+## High Availability (HA) cluster configuration
+
+In a HA cluster configuration, you should run three or five replicas for the
+Zero node, and three or five replicas for the Alpha node. A Dgraph cluster is
+divided into Raft groups, where Dgraph Zero is group 0 and each shard of Dgraph
+Alpha is a subsequent numbered group (group 1, group 2, etc.). The number of
+replicas in each Raft group must be an odd number for the group to have
+consensus, which will exist when the majority of nodes in a group are available.
+
+
+ If the number of replicas in a Raft group is **2N + 1**, up to **N** nodes can
+ go offline without any impact on reads or writes. So, if there are five
+ replicas, three must be online to avoid an impact to reads or writes.
+
+
+### Dgraph Zero
+
+Run three Dgraph Zero instances, assigning a unique integer ID to each using the
+`--raft` superflag's `idx` option, and passing the address of any healthy Dgraph
+Zero instance using the `--peer` flag.
+
+To run three replicas for the Alpha nodes, set `--replicas=3`. Each time a new
+Alpha node is added, the Zero node will check the existing groups and assign
+them as appropriate.
+
+### Dgraph Alpha
+
+You can run as many Dgraph Alpha nodes as you want. You can manually set the
+`--raft` superflag's `idx` option, or you can leave that flag empty, and the
+Zero node will auto-assign an id to the Alpha node. This id persists in the
+write-ahead log, so be careful not to delete it.
+
+The new Alpha nodes will automatically detect each other by communicating with
+Dgraph Zero and establish connections to each other. If you don't have a proxy
+or load balancer for the Zero nodes, you can provide a list of Zero node
+addresses for Alpha nodes to use at startup with the `--zero` flag. The Alpha
+node will try to connect to one of the Zero nodes starting from the first Zero
+node address in the list. For example: `--zero=zero1,zero2,zero3` where `zero1`
+is the `host:port` of a zero instance.
+
+Typically, a Zero node would first attempt to replicate a group, by assigning a
+new Alpha node to run the same group previously assigned to another. After the
+group has been replicated per the `--replicas` flag, Dgraph Zero creates a new
+group.
+
+Over time, the data will be evenly split across all of the groups. So, it's
+important to ensure that the number of Alpha nodes is a multiple of the
+replication setting. For example, if you set `--replicas=3` in for a Zero node,
+and then run three Alpha nodes for no sharding, but 3x replication. Or, if you
+run six Alpha nodes, sharding the data into two groups, with 3x replication.
diff --git a/dgraph/reference/deploy/security/tls-configuration.mdx b/dgraph/reference/deploy/security/tls-configuration.mdx
new file mode 100644
index 00000000..733d7a6d
--- /dev/null
+++ b/dgraph/reference/deploy/security/tls-configuration.mdx
@@ -0,0 +1,424 @@
+---
+title: TLS Configuration
+---
+
+Connections between Dgraph database and its clients can be secured using TLS. In
+addition, Dgraph can now secure gRPC communications between Dgraph Alpha and
+Dgraph Zero server nodes using mutual TLS (mTLS). Dgraph can now also secure
+communications over the Dgraph Zero `gRPC-external-private` port used by
+Dgraph's Live Loader and Bulk Loader clients. To learn more about the HTTP and
+gRPC ports used by Dgraph Alpha and Dgraph Zero, see
+[Ports Usage](./ports-usage). Password-protected private keys are **not
+supported**.
+
+To further improve TLS security, only TLS v1.2 cypher suites that use 128-bit or
+greater RSA or AES encryption are supported.
+
+
+ If you're generating encrypted private keys with `openssl`, be sure to specify
+ the encryption algorithm explicitly (like `-aes256`). This will force
+ `openssl` to include `DEK-Info` header in private key, which is required to
+ decrypt the key by Dgraph. When default encryption is used, `openssl` doesn't
+ write that header and key can't be decrypted.
+
+
+## Dgraph Certificate Management Tool
+
+
+ This section refers to the `dgraph cert` command which was introduced in
+ v1.0.9. For previous releases, see the previous [TLS configuration
+ documentation](https://github.com/dgraph-io/dgraph/blob/release/v1.0.7/wiki/content/deploy/index.md#tls-configuration).
+
+
+The `dgraph cert` program creates and manages CA-signed certificates and private
+keys using a generated Dgraph Root CA. There are three types of certificate/key
+pairs:
+
+1. Root CA certificate/key pair: This is used to sign and verify node and client
+ certificates. If the root CA certificate is changed then you must regenerate
+ all certificates, and this certificate must be accessible to the Alpha nodes.
+2. Node certificate/key pair: This is shared by the Dgraph Alpha nodes and used
+ for accepting TLS connections.
+3. Client certificate/key pair: This is used by the clients (like live loader
+ and Ratel) to communicate with Dgraph Alpha server nodes where client
+ authentication with mTLS is required.
+
+```sh
+# To see the available flags.
+$ dgraph cert --help
+
+# Create Dgraph Root CA, used to sign all other certificates.
+$ dgraph cert
+
+# Create node certificate and private key
+$ dgraph cert -n localhost
+
+# Create client certificate and private key for mTLS (mutual TLS)
+$ dgraph cert -c dgraphuser
+
+# Combine all in one command
+$ dgraph cert -n localhost -c dgraphuser
+
+# List all your certificates and keys
+$ dgraph cert ls
+```
+
+The default location where the _cert_ command stores certificates (and keys) is
+`tls` under the Dgraph working directory. The default directory path can be
+overridden using the `--dir` option. For example:
+
+```sh
+$ dgraph cert --dir ~/mycerts
+```
+
+### File naming conventions
+
+The following file naming conventions are used by Dgraph for proper TLS setup.
+
+| File name | Description | Use |
+| ----------------- | -------------------------- | ------------------------------------------------- |
+| ca.crt | Dgraph Root CA certificate | Verify all certificates |
+| ca.key | Dgraph CA private key | Validate CA certificate |
+| node.crt | Dgraph node certificate | Shared by all nodes for accepting TLS connections |
+| node.key | Dgraph node private key | Validate node certificate |
+| client._name_.crt | Dgraph client certificate | Authenticate a client _name_ |
+| client._name_.key | Dgraph client private key | Validate _name_ client certificate |
+
+For client authentication, each client must have their own certificate and key.
+These are then used to connect to the Dgraph server nodes.
+
+The node certificate `node.crt` can support multiple node names using multiple
+host names and/or IP address. Just separate the names with commas when
+generating the certificate.
+
+```sh
+$ dgraph cert -n localhost,104.25.165.23,dgraph.io,2400:cb00:2048:1::6819:a417
+```
+
+
+ You must delete the old node cert and key before you can generate a new pair.
+
+
+
+ When using host names for node certificates, including _localhost_, your
+ clients must connect to the matching host name -- such as _localhost_ not
+ 127.0.0.1. If you need to use IP addresses, then add them to the node
+ certificate.
+
+
+### Certificate inspection
+
+The command `dgraph cert ls` lists all certificates and keys in the `--dir`
+directory (default `dgraph-tls`), along with details to inspect and validate
+cert/key pairs.
+
+Example of command output:
+
+```sh
+-rw-r--r-- ca.crt - Dgraph Root CA certificate
+ Issuer: Dgraph Labs, Inc.
+ S/N: 043c4d8fdd347f06
+ Expiration: 02 Apr 29 16:56 UTC
+SHA-256 Digest: 4A2B0F0F 716BF5B6 C603E01A 6229D681 0B2AFDC5 CADF5A0D 17D59299 116119E5
+
+-r-------- ca.key - Dgraph Root CA key
+SHA-256 Digest: 4A2B0F0F 716BF5B6 C603E01A 6229D681 0B2AFDC5 CADF5A0D 17D59299 116119E5
+
+-rw-r--r-- client.admin.crt - Dgraph client certificate: admin
+ Issuer: Dgraph Labs, Inc.
+ CA Verify: PASSED
+ S/N: 297e4cb4f97c71f9
+ Expiration: 03 Apr 24 17:29 UTC
+SHA-256 Digest: D23EFB61 DE03C735 EB07B318 DB70D471 D3FE8556 B15D084C 62675857 788DF26C
+
+-rw------- client.admin.key - Dgraph Client key
+SHA-256 Digest: D23EFB61 DE03C735 EB07B318 DB70D471 D3FE8556 B15D084C 62675857 788DF26C
+
+-rw-r--r-- node.crt - Dgraph Node certificate
+ Issuer: Dgraph Labs, Inc.
+ CA Verify: PASSED
+ S/N: 795ff0e0146fdb2d
+ Expiration: 03 Apr 24 17:00 UTC
+ Hosts: 104.25.165.23, 2400:cb00:2048:1::6819:a417, localhost, dgraph.io
+SHA-256 Digest: 7E243ED5 3286AE71 B9B4E26C 5B2293DA D3E7F336 1B1AFFA7 885E8767 B1A84D28
+
+-rw------- node.key - Dgraph Node key
+SHA-256 Digest: 7E243ED5 3286AE71 B9B4E26C 5B2293DA D3E7F336 1B1AFFA7 885E8767 B1A84D28
+```
+
+Important points:
+
+- The cert/key pairs should always have matching SHA-256 digests. Otherwise, the
+ cert(s) must be regenerated. If the Root CA pair differ, all cert/key must be
+ regenerated; the flag `--force` can help.
+- All certificates must pass Dgraph CA verification.
+- All key files should have the least access permissions, especially the
+ `ca.key`, but be readable.
+- Key files won't be overwritten if they have limited access, even with
+ `--force`.
+- Node certificates are only valid for the hosts listed.
+- Client certificates are only valid for the named client/user.
+
+## TLS options
+
+Starting in release v21.03, pre-existing TLS configuration options have been
+replaced by the `--tls` [superflag](./deploy/cli-command-reference) and its
+options. The following `--tls` configuration options are available for Dgraph
+Alpha and Dgraph Zero nodes:
+
+- `ca-cert ` - Path and filename of the Dgraph Root CA (for example,
+ `ca.crt`)
+- `server-cert ` - Path and filename of the node certificate (for example,
+ `node.crt`)
+- `server-key ` - Path and filename of the node certificate private key
+ (for example, `node.key`)
+- `use-system-ca` - Include System CA with Dgraph Root CA.
+- `client-auth-type ` - TLS client authentication used to validate
+ client connections from external ports. To learn more, see
+ [Client Authentication Options](#client-authentication-options).
+
+
+ Dgraph now allows you to specify the path and filename of the CA root
+ certificate, the node certificate, and the node certificate private key. So,
+ these files do not need to have specific filenames or exist in the same
+ directory, as in previous Dgraph versions that used the `--tls_dir` flag.
+
+
+You can configure Dgraph Live Loader with the following `--tls` options:
+
+- `ca-cert ` - Dgraph root CA, such as `./tls/ca.crt`
+- `use-system-ca` - Include System CA with Dgraph Root CA.
+- `client-cert` - User cert file provided by the client to Alpha
+- `client-key` - User private key file provided by the client to Alpha
+- `server-name ` - Server name, used for validating the server's TLS
+ host name.
+
+### Using TLS with only external ports encrypted
+
+To encrypt communication between Dgraph server nodes and clients over external
+ports, you can configure certificates and run Dgraph Alpha and Dgraph Zero using
+the following commands:
+
+Dgraph Alpha:
+
+```sh
+# First, create the root CA, Alpha node certificate and private keys, if not already created.
+# Note that you must specify in node.crt the host name or IP addresses that clients use connect:
+$ dgraph cert -n localhost,104.25.165.23,104.25.165.25,104.25.165.27
+# Set up Dgraph Alpha nodes using the following default command (after generating certificates and private keys)
+$ dgraph alpha --tls "ca-cert=/dgraph-tls/ca.crt; server-cert=/dgraph-tls/node.crt; server-key=/dgraph-tls/node.key"
+```
+
+Dgraph Zero:
+
+```sh
+# First, copy the root CA, node certificates and private keys used to set up Dgraph Alpha (above) to the Dgraph Zero node.
+# Optionally, you can generate and use a separate Zero node certificate, where you specify the host name or IP addresses used by Live Loader and Bulk Loader to connect to Dgraph Zero.
+# Next, set up Dgraph Zero nodes using the following default command:
+$ dgraph zero --tls "ca-cert=/dgraph-tls/ca.crt; server-cert=/dgraph-tls/node.crt; server-key=/dgraph-tls/node.key"
+```
+
+You can then run Dgraph Live Loader on a Dgraph Alpha node using the following
+command:
+
+```sh
+# Now, connect to server using TLS
+$ dgraph live --tls "ca-cert=./dgraph-tls/ca.crt; server-name=localhost" -s 21million.schema -f 21million.rdf.gz
+```
+
+### Using TLS with internal and external ports encrypted
+
+If you require client authentication (mutual TLS, or mTLS), you can configure
+certificates and run Dgraph Alpha and Dgraph Zero with settings that encrypt
+both internal ports (those used within the cluster) as well as external ports
+(those used by clients that connect to the cluster, including Bulk Loader and
+Live Loader).
+
+The following example shows how to encrypt both internal and external ports:
+
+Dgraph Alpha:
+
+```sh
+# First create the root CA, node certificates and private keys, if not already created.
+# Note that you must specify the host name or IP address for other nodes that will share node.crt.
+$ dgraph cert -n localhost,104.25.165.23,104.25.165.25,104.25.165.27
+# Set up Dgraph Alpha nodes using the following default command (after generating certificates and private keys)
+$ dgraph alpha
+ --tls "ca-cert=/dgraph-tls/ca.crt; server-cert=/dgraph-tls/node.crt; server-key=/dgraph-tls/node.key;
+internal-port=true; client-cert=/dgraph-tls/client.alpha1.crt; client-key=/dgraph-tls/client.alpha1.key"
+```
+
+Dgraph Zero:
+
+```sh
+# First, copy the certificates and private keys used to set up Dgraph Alpha (above) to the Dgraph Zero node.
+# Next, set up Dgraph Zero nodes using the following default command:
+$ dgraph zero
+ --tls "ca-cert=/dgraph-tls/ca.crt; server-cert=/dgraph-tls/node.crt; server-key=/dgraph-tls/node.key; internal-port=true; client-cert=/dgraph-tls/client.zero1.crt; client-key=/dgraph-tls/client.zero1.key"
+```
+
+You can then run Dgraph Live Loader using the following:
+
+```sh
+# Now, connect to server using mTLS (mutual TLS)
+$ dgraph live
+ --tls "ca-cert=./tls/ca.crt; client-cert=./tls/client.dgraphuser.crt; client-key=./tls/client.dgraphuser.key; server-name=localhost; internal-port=true" \
+ -s 21million.schema \
+ -f 21million.rdf.gz
+```
+
+### Client Authentication Options
+
+The server will always **request** client authentication. There are four
+different values for the `client-auth-type` option that change the security
+policy of the client certificate.
+
+| Value | Client Cert/Key | Client Certificate Verified |
+| ------------------ | --------------- | -------------------------------------------------------------- |
+| `REQUEST` | optional | Client certificate is not VERIFIED if provided. (least secure) |
+| `REQUIREANY` | required | Client certificate is never VERIFIED |
+| `VERIFYIFGIVEN` | optional | Client certificate is VERIFIED if provided (default) |
+| `REQUIREANDVERIFY` | required | Client certificate is always VERIFIED (most secure) |
+
+`REQUIREANDVERIFY` is the most secure but also the most difficult to configure
+for clients. When using this value, the value of `server-name` is matched
+against the certificate SANs values and the connection host.
+
+
+ If mTLS is enabled using `internal-port=true`, internal ports (by default,
+ 5080 and 7080) use the `REQUIREANDVERIFY` setting. Unless otherwise
+ configured, external ports (by default, 9080, 8080 and 6080) use the
+ `VERIFYIFGIVEN` setting. Changing the `client-auth-type` option to another
+ setting only affects client authentication on external ports.
+
+
+## Using Ratel UI with Client authentication
+
+Ratel UI (and any other JavaScript clients built on top of `dgraph-js-http`)
+connect to Dgraph servers via HTTP, when TLS is enabled servers begin to expect
+HTTPS requests only.
+
+If you haven't already created the CA certificate and the node certificate for
+alpha servers from the earlier instructions (see
+[Dgraph Certificate Management Tool](#dgraph-certificate-management-tool)), the
+first step would be to generate these certificates, it can be done by the
+following command:
+
+```sh
+# Create rootCA and node certificates/keys
+$ dgraph cert -n localhost
+```
+
+If Dgraph Alpha's `client-auth-type` option is set to `REQUEST` or
+`VERIFYIFGIVEN` (default), then client certificate is not mandatory. The steps
+after generating CA/node certificate are as follows:
+
+### Step 1. Install Dgraph Root CA into System CA
+
+##### Linux (Debian/Ubuntu)
+
+```sh
+# Copy the generated CA to the ca-certificates directory
+$ cp /path/to/ca.crt /usr/local/share/ca-certificates/ca.crt
+# Update the CA store
+$ sudo update-ca-certificates`
+```
+
+### Step 2. Install Dgraph Root CA into Web Browsers Trusted CA List
+
+##### Firefox
+
+- Choose Preferences -> Privacy & Security -> View Certificates -> Authorities
+- Click on Import and import the `ca.crt`
+
+##### Chrome
+
+- Choose Settings -> Privacy and Security -> Security -> Manage Certificates ->
+ Authorities
+- Click on Import and import the `ca.crt`
+
+### Step 3. Point Ratel to the `https://` endpoint of alpha server.
+
+- Change the Dgraph Alpha server address to `https://` instead of `http://`, for
+ example `https://localhost:8080`.
+
+For `REQUIREANY` and `REQUIREANDVERIFY` as `client-auth-type` option, you need
+to follow the steps above and you also need to install client certificate on
+your browser:
+
+1. Generate a client certificate: `dgraph cert -c laptopuser`.
+2. Convert it to a `.p12` file:
+
+ ```sh
+ openssl pkcs12 -export \
+ -out laptopuser.p12 \
+ -in tls/client.laptopuser.crt \
+ -inkey tls/client.laptopuser.key
+ ```
+
+ Use any password you like for export, it is used to encrypt the p12 file.
+
+3. Import the client certificate to your browser. It can be done in Chrome as
+ follows:
+ - Choose Settings -> Privacy and Security -> Security -> Manage Certificates
+ -> Your Certificates
+ - Click on Import and import the `laptopuser.p12`.
+
+
+ Mutual TLS may not work in Firefox because Firefox is unable to send
+ privately-signed client certificates, this issue is filed
+ [here](https://bugzilla.mozilla.org/show_bug.cgi?id=1662607).
+
+
+Next time you use Ratel to connect to an alpha with Client authentication
+enabled the browser will prompt you for a client certificate to use. Select the
+client's certificate you've imported in the step above and queries/mutations
+will succeed.
+
+## Using Curl with Client authentication
+
+When TLS is enabled, `curl` requests to Dgraph will need some specific options
+to work. For instance (for changing draining mode):
+
+```
+curl --silent https://localhost:8080/admin/draining
+```
+
+If you are using `curl` with
+[Client Authentication](#client-authentication-options) set to `REQUIREANY` or
+`REQUIREANDVERIFY`, you will need to provide the client certificate and private
+key. For instance (for an export request):
+
+```
+curl --silent --cacert ./tls/ca.crt --cert ./tls/client.dgraphuser.crt --key ./tls/client.dgraphuser.key https://localhost:8080/admin/draining
+```
+
+Refer to the `curl` documentation for further information on its TLS options.
+
+## Access Data Using a Client
+
+Some examples of connecting via a [Client](/clients) when TLS is in use can be
+found below:
+
+- [dgraph4j](https://github.com/dgraph-io/dgraph4j#creating-a-secure-client-using-tls)
+- [dgraph-js](https://github.com/dgraph-io/dgraph-js/tree/master/examples/tls)
+- [dgo](https://github.com/dgraph-io/dgraph/blob/main/tlstest/acl/acl_over_tls_test.go)
+- [pydgraph](https://github.com/dgraph-io/pydgraph/tree/master/examples/tls)
+
+## Troubleshooting Ratel's Client authentication
+
+If you are getting errors in Ratel when TLS is enabled, try opening your Dgraph
+Alpha URL as a web page.
+
+Assuming you are running Dgraph on your local machine, opening
+`https://localhost:8080/` in the browser should produce a message
+`Dgraph browser is available for running separately using the dgraph-ratel binary`.
+
+In case you are getting a connection error, try not passing the
+`client-auth-type` flag when starting an alpha. If you are still getting an
+error, check that your hostname is correct and the port is open; then make sure
+that "Dgraph Root CA" certificate is installed and trusted correctly.
+
+After that, if things work without passing `client-auth-type` but stop working
+when `REQUIREANY` and `REQUIREANDVERIFY` are set, make sure the `.p12` file is
+installed correctly.
diff --git a/dgraph/reference/deploy/troubleshooting.mdx b/dgraph/reference/deploy/troubleshooting.mdx
new file mode 100644
index 00000000..8407d0b2
--- /dev/null
+++ b/dgraph/reference/deploy/troubleshooting.mdx
@@ -0,0 +1,61 @@
+---
+title: Troubleshooting
+---
+
+This page provides tips on how to troubleshoot issues with running Dgraph.
+
+### Running out of memory (OOM)
+
+When you [bulk load](./bulk-loader) or
+[backup](.//enterprise-features/binary-backups) your data, Dgraph can consume
+more memory than usual due to a high volume of writes. This can cause OOM
+crashes.
+
+You can take the following steps to help avoid OOM crashes:
+
+- **Increase the amount of memory available**: If you run Dgraph with
+ insufficient memory, that can result in OOM crashes. The recommended minimum
+ RAM to run Dgraph on desktops and laptops (single-host deployment) is 16GB.
+ For servers in a cluster deployment, the recommended minimum is 8GB per
+ server. This applies to EC2 and GCE instances, as well as on-premises servers.
+- **Reduce the number of Go routines**: You can troubleshoot OOM issues by
+ reducing the number of Go routines (`goroutines`) used by Dgraph from the
+ default value of eight. For example, you can reduce the `goroutines` that
+ Dgraph uses to four by calling the `dgraph alpha` command with the following
+ option:
+
+ `--badger "goroutines=4"`
+
+### "Too many open files" errors
+
+If Dgraph logs "too many open files" errors, you should increase the per-process
+open file descriptor limit to permit more open files. During normal operations,
+Dgraph must be able to open many files. Your operating system may have an open
+file descriptor limit with a low default value that isn't adequate for a
+database like Dgraph. If so, you might need to increase this limit.
+
+On Linux and Mac, you can get file descriptor limit settings with the `ulimit`
+command, as follows:
+
+- Get hard limit: `ulimit -n -H`
+- Get soft limit: `ulimit -n -S`
+
+A soft limit of `1048576` open files is the recommended minimum to use Dgraph in
+production, but you can try increasing this soft limit if you continue to see
+this error. To learn more, see the `ulimit` documentation for your operating
+system.
+
+
+ Depending on your OS, your shell session limits might not be the same as the
+ Dgraph process limits.
+
+
+For example, to properly set up the `ulimit` values on Ubuntu 20.04 systems:
+
+```sh
+sudo sed -i 's/#DefaultLimitNOFILE=/DefaultLimitNOFILE=1048576/' /etc/systemd/system.conf
+sudo sed -i 's/#DefaultLimitNOFILE=/DefaultLimitNOFILE=1048576/' /etc/systemd/user.conf
+```
+
+This affects the base limits for all processes. After a reboot, your OS will
+pick up the new values.
diff --git a/dgraph/reference/design-concepts/acl-concept.mdx b/dgraph/reference/design-concepts/acl-concept.mdx
new file mode 100644
index 00000000..b66a6b15
--- /dev/null
+++ b/dgraph/reference/design-concepts/acl-concept.mdx
@@ -0,0 +1,20 @@
+---
+title: ACLs
+---
+
+ACLs are a typical mechanism to list who can access what, specifying either
+users or roles and what they can access. ACLs help determine who is "authorized"
+to access what.
+
+Dgraph Access Control Lists (ACLs) are sets of permissions for which
+`Relationships` a user may access. Recall that Dgraph is "predicate based" so
+all data is stored in and is implicit in relationships. This allows
+relationship-based controls to be very powerful in restricting a graph based on
+roles (RBAC).
+
+Note that the Dgraph multi-tenancy feature relies on ACLs to ensure each tenant
+can only see their own data in one server.
+
+Using ACLs requires a client to authenticate (log in) differently and specify
+credentials that will drive which relationships are visible in their view of the
+graph database.
diff --git a/dgraph/reference/design-concepts/badger-concept.mdx b/dgraph/reference/design-concepts/badger-concept.mdx
new file mode 100644
index 00000000..b863f6ae
--- /dev/null
+++ b/dgraph/reference/design-concepts/badger-concept.mdx
@@ -0,0 +1,15 @@
+---
+title: Badger
+---
+
+[Badger](https://github.com/dgraph-io/badger) is a key-value store developed and
+maintained by Dgraph. It is also open source, and it is the backing store for
+Dgraph data.
+
+It is largely transparent to users that Dgraph uses Badger to store data
+internally. Badger is packaged into the Dgraph binary, and is the persistence
+layer. However, various configuration settings and log messages may reference
+Badger, such as cache sizes.
+
+Badger values are `Posting Lists` and indexes. Badger Keys are formed by
+concatenating `+`.
diff --git a/dgraph/reference/design-concepts/clients-concept.mdx b/dgraph/reference/design-concepts/clients-concept.mdx
new file mode 100644
index 00000000..e5ec6313
--- /dev/null
+++ b/dgraph/reference/design-concepts/clients-concept.mdx
@@ -0,0 +1,26 @@
+---
+title: Dgraph Clients
+---
+
+A client is a program that calls dgraph. Broadly, there are stand alone clients
+such as Ratel, which is a graphical web-based application, and programmatic
+client libraries which are embedded in larger programs to efficiently and
+idomatically call Dgraph.
+
+GraphQL is an open standard with many clients (graphical and libraries) also,
+and GraphQL clients work with Dgraph.
+
+Dgraph provides [client libraries](./clients) for many languages. These clients
+send DQL queries, and perform useful functions such as logging in, in idomatic
+ways in each language.
+
+Note that Dgraph does not force or insist on any particular GraphQL client. Any
+GraphQL client, GUI, tool, or library will work well with Dgraph, and it is the
+users' choice which to choose. Dgraph only provides clients for the proprietary
+DQL query language. GraphQL clients are available for free from many
+organizations.
+
+However, Dgraph's cloud console does support basic GraphQL querying, so this is
+something of a tool. We recommend using a mature GraphQL console instead, as
+they are more mature. Dgraph's GraphQL GUI function is for quick start and
+convenience.
diff --git a/dgraph/reference/design-concepts/consistency-model.mdx b/dgraph/reference/design-concepts/consistency-model.mdx
new file mode 100644
index 00000000..b18013b0
--- /dev/null
+++ b/dgraph/reference/design-concepts/consistency-model.mdx
@@ -0,0 +1,91 @@
+---
+title: Consistency Model
+---
+
+### Dgraph supports MVCC, Read Snapshots and Distributed ACID transactions
+
+Multi-version concurrency control (MVCC) is a technique where many versions of
+data are written (but never modified) on disk, so many versions exist. This
+helps control concurrency because the database is queried at a particular
+"timestamp" for the duration of one query to provide snapshot isolation and
+ensure data is consistent for that transaction. (Note that MVCC is losely
+related to LSM trees - in LSM parlance, data is "logged" to write-only files,
+which are later merged via Log Compaction.)
+
+Writes are faster with MVCC because data is always written by flushing a larger
+in-memory buffer (a memtable) to new, contiguous files (SST files), and newer
+data obscures or replaces older data. Consistent updates from each transaction
+share a logical commit timestamp (a 64 bit, increasing number loosely correlated
+to wall clock time), and all reads occur "at a point in time" meaning any read
+accesses a known, stable set of committed data using these same commit
+timestamps. New or in-process commits are associated with a later timestamp so
+they do not affect running queries at earlier timestamps. This allows pure
+queries (reads) to execute without any locks.
+
+One special set of structures are "memtables" which are also referred to as
+being Level 0 of the LSM tree. These are buffers for fast writes, which later
+are flushed to on-disk files called SSTs.
+
+### Dgraph transactions are cluster-wide (not key-only, or any other non-ACID version of transactions)
+
+Dgraph uses the RAFT protocol to synchronize updates and ensure updates are
+durably written to a majority of alpha nodes in a cluster before the transaction
+is considered successful. RAFT ensures true, distributed, cluster wide
+transactions across multiple nodes, keys, edges, indexes and facets. Dgraph
+provides true ACID transactions, and does not impose limitations on what can be
+in a transaction: a transaction can involve multiple predicates, multiple nodes,
+multiple keys and even multiple shards.
+
+### Transactions are lockless
+
+Dgraph transactoins do not use locks, allowing fast, distributed transactions.
+
+For reads, queries execute at a particular timestamp based on snapshot
+isolation, which isolates reads from any concurrent write activity. All reads
+access snapshots across the entire cluster, seeing all previously committed
+transactions in full, regardless of which alpha node received earlier queries.
+
+Writes use optimistic lock semantics, where a transaction will be aborted if
+another (concurrent) transaction updates exactly the same data (same edge on the
+same node) first. This will be reported as an "aborted" transaction to the
+caller.
+
+Dgraph ensures monotonically increasing transaction timestamps to sequence all
+updates in the database. This provides serializability: if any transaction Tx1
+commits before Tx2 starts, then Ts_commit(Tx1) < Ts_start(Tx2), and in turn a
+read at any point in time can never see Tx1 changes but not Tx2 changes.
+
+Dgraph also ensures proper read-after-write semantics. Any commit at timestamp
+Tc is guaranteed to be seen by a read at timestamp Tr by any client, if Tr >=
+Tc.
+
+### Terminology
+
+- **Snapshot isolation:** all reads see a consistent view of the database at the
+ point in time when the read was submitted
+- **Oracle:** a logical process that tracks timestamps and which data (keys,
+ predicates, etc.) has been committed or is being modified. The oracle hands
+ out timestamps and aborts transactions if another transaction has modified its
+ data.
+- **RAFT:** a well-known consistency algorithm to ensure distributed processes
+ durably store data
+- **Write-Ahead Log:** Also WAL. A fast log of updates on each alpha that
+ ensures buffered in-memory structures are persisted.
+- **Proposal:** A process within the RAFT algorithm to track possible updates
+ during the consensus process.
+- **SST:** Persistent files comprising the LSM tree, together with memtables.
+- **Memtable:** An in-memory version of an SST, supporting fast updates.
+ Memtables are mutable, and SSTs are immutable.
+- **Log Compaction:** The process of combining SSTs into newer SSTs while
+ eliminating obsolte data and reclaiming disk space.
+- **Timestamp:** Or point in time. A numeric counter representing the sequential
+ order of all transactions, and indicating when a transaction became valid and
+ query-able.
+- **Optimistic Lock:** a logical process whereby all transactions execute
+ without blocking on other transactions, and are aborted if there is a
+ conflict. Aborted transactions should typically be retried if they occur.
+- **Pessimistic Lock:** a process, not used in Dgraph, where all concurrent
+ transactions mutating the same data except one block and wait for each other
+ to complete.
+- **ACID** An acronym representing attributes of true transactions: Atomic,
+ Consistent, Isolated, and Durable
diff --git a/dgraph/reference/design-concepts/discovery-concept.mdx b/dgraph/reference/design-concepts/discovery-concept.mdx
new file mode 100644
index 00000000..b812849c
--- /dev/null
+++ b/dgraph/reference/design-concepts/discovery-concept.mdx
@@ -0,0 +1,9 @@
+---
+title: Discovery
+---
+
+### New Servers and Discovery
+
+Dgraph clusters will detect new machines allocated to the
+[cluster](./deploy/cluster-setup), establish connections, and transfer data to
+the new server based on the group the new machine is in.
diff --git a/dgraph/reference/design-concepts/dql-concept.mdx b/dgraph/reference/design-concepts/dql-concept.mdx
new file mode 100644
index 00000000..361645b2
--- /dev/null
+++ b/dgraph/reference/design-concepts/dql-concept.mdx
@@ -0,0 +1,11 @@
+---
+title: DQL
+---
+
+DQL is the "Dgraph Query Language" and is based on GraphQL. It is neither a
+superset nor subset of GraphQL, but is generally more powerful than GraphQL. DQL
+coexists nicely with GraphQL so many users perform most access using GraphQL and
+only "drop down" into DQL when there is a particular query mechanism needed that
+is not supported in the GraphQL spec. E.g. @recurse query operations are only in
+DQL. Other customers simply use DQL. DQL supports both queries and mutations, as
+well as hybrid "upsert" operations.
diff --git a/dgraph/reference/design-concepts/dql-graphql-layering-concept.mdx b/dgraph/reference/design-concepts/dql-graphql-layering-concept.mdx
new file mode 100644
index 00000000..2650e617
--- /dev/null
+++ b/dgraph/reference/design-concepts/dql-graphql-layering-concept.mdx
@@ -0,0 +1,27 @@
+---
+title: DQL and GraphQL
+---
+
+## Dgraph Schemas
+
+Dgraph natively supports GraphQL, including `GraphQL Schema`s. GraphQL schemas
+"sit on top of" DQL schemas, in the sense that when a GraphQL schema is added to
+Dgraph, a corresponding `DQL Schema` is automatically created.
+
+Refer to [GraphQL-DQL interoperability](./graphql-dql) section for details.
+
+## Dgraph Queries, Mutations and Upserts
+
+Similarly, GraphQL mutations are implemented on top of DQL in the sense that a
+GraphQL query is converted internally into a DQL query, which is then executed.
+This translation is not particularly complex, since DQL is based on GraphQL,
+with some syntax changes and some extensions.
+
+This is generally transaparent to all callers, however users should be aware
+that
+
+1. Anything done in GraphQL can also be done in DQL if needed. Some small
+ exceptions include the enforcement of non-null constraints and other checks
+ done before Dgraph transpiles GraphQL to DQL and executes it.
+2. Some logging including Request Logging and OpenTrace (Jaeger) tracing may
+ show DQL converted from the GraphQL.
diff --git a/dgraph/reference/design-concepts/facets-concept.mdx b/dgraph/reference/design-concepts/facets-concept.mdx
new file mode 100644
index 00000000..e85aba9f
--- /dev/null
+++ b/dgraph/reference/design-concepts/facets-concept.mdx
@@ -0,0 +1,15 @@
+---
+title: Facets
+---
+
+Dgraph allows a set of properties to be associated with any `Relationship`. E.g.
+if there is a "worksFor" relationships between Node "Bob" and Node "Google",
+this relationship may have facet values of "since": 2002-05-05 and "position":
+"Engineer".
+
+Facets can always be replaced by adding a new Node representing the relationship
+and storing the facet data as attriubutes of the new Node.
+
+The term "facet" is also common in database and search engine technology, and
+indicates a dimension or classification of data. One way to use facets it to
+indicate a relationship type.
diff --git a/dgraph/reference/design-concepts/graphql-concept.mdx b/dgraph/reference/design-concepts/graphql-concept.mdx
new file mode 100644
index 00000000..add2c652
--- /dev/null
+++ b/dgraph/reference/design-concepts/graphql-concept.mdx
@@ -0,0 +1,17 @@
+---
+title: GraphQL
+---
+
+`GraphQL` is a query and update standard defined at
+[GraphQL.org](https://graphql.org/). `GraphQL` is natively supported by Dgraph,
+without requiring additional servers, data mappings or resolvers. Typically,
+"resolving" a data field in GraphQL simply corresponds to walking that
+relationship in Dgraph.
+
+Dgraph also auto-generates access functions for any `GraphQL Schema`, allowing
+users to get up and running in minutes with Dgraph + a GraphQL schema. The APIs
+are auto-generated.
+
+GraphQL is internally converted to the (similar-but-different) `DQL` query
+language before being executed. We can think of GraphQL as "sitting on top" of
+DQL.
diff --git a/dgraph/reference/design-concepts/group-concept.mdx b/dgraph/reference/design-concepts/group-concept.mdx
new file mode 100644
index 00000000..782e6adc
--- /dev/null
+++ b/dgraph/reference/design-concepts/group-concept.mdx
@@ -0,0 +1,39 @@
+---
+title: Group
+---
+
+A group is a set of 1 or 3 or more servers that work together and have a single
+`leader` in the sense defined by the RAFT protocol.
+
+## Alpha Group
+
+An Alpha `Group` in Dgraph is a shard of data, and may or may not be
+highly-available (HA). An HA group typically has three Dgraph instances (servers
+or K8s pods), and a non-HA group is a single instance. Every Alpha instance
+belongs to one group, and each group is responsible for serving a particular set
+of tablets (relations). In an HA configuration, the three or more instances in a
+single group replicate the same data to every instance to ensure redundancy of
+data.
+
+In a sharded Dgraph cluster, tablets are automatically assigned to each group,
+and dynamically relocated as sizes change to keep the groups balanced.
+Predicates can also be moved manually if desired.
+
+In a future version, if a tablet gets too big, it will be split among two
+groups, but currently data is balanced by moving each tablet to one group only.
+
+To avoid confusion, remember that you may have many Dgraph alpha instances due
+to either sharding, or due to HA configuration. If you have both sharding and
+HA, you will have 3\*N groups:
+
+| config | Non-HA | HA |
+| ----------- | ----------------- | ------------------------ |
+| Non-sharded | 1 alpha total | 3 alphas total |
+| Sharded | 1 alpha per group | 3\*N alphas for N groups |
+
+## Zero Group
+
+Group Zero is a lightweight server or group of servers which helps control the
+overall cluster. It manages timestamps and UIDs, determines when data should be
+rebalanced among shards, and other functions. The servers in this group are
+generally called "Zeros."
diff --git a/dgraph/reference/design-concepts/index-tokenize-concept.mdx b/dgraph/reference/design-concepts/index-tokenize-concept.mdx
new file mode 100644
index 00000000..b4d4ab30
--- /dev/null
+++ b/dgraph/reference/design-concepts/index-tokenize-concept.mdx
@@ -0,0 +1,27 @@
+---
+title: Index and Tokenizer
+---
+
+### Indexing
+
+An index is an optimized data structure, stored on disk and loaded into memory,
+that speeds or optimizes query processing. It is created and stored in addition
+to the primary data. E.g. a "hasName" property or relation is the primary
+storage structure for a graph in Dgraph, but may also have an additional index
+structure configured.
+
+Typically, Dgraph query access is optimized for forward access. When other
+access is needed, an index may speed up queries. Indexes are large structures
+that hold all values for some Relation (vs `Posting Lists`, which are typically
+smaller, per-Node structures).
+
+### Tokenizers
+
+Tokenizers are simply small algorithms that create indexed values from some Node
+property. E.g. if a Book Node has a Title attribute, and you add a "term" index,
+each word (term) in the text will be indexed. The word "Tokenizer" derives its
+name from tokenizing operations to create this index type.
+
+Similary if the Book has a publicationDateTime you can add a day or year index.
+The "tokenizer" here extracts the value to be indexed, which may be the day or
+hour of the dateTime, or only the year.
diff --git a/dgraph/reference/design-concepts/index.mdx b/dgraph/reference/design-concepts/index.mdx
new file mode 100644
index 00000000..23044b15
--- /dev/null
+++ b/dgraph/reference/design-concepts/index.mdx
@@ -0,0 +1,8 @@
+---
+title: Design Concepts
+---
+
+This section of the documentation covers various concepts that are relevant to
+the Dgraph system.
+
+### In this section
diff --git a/dgraph/reference/design-concepts/lambda-concept.mdx b/dgraph/reference/design-concepts/lambda-concept.mdx
new file mode 100644
index 00000000..285c8a11
--- /dev/null
+++ b/dgraph/reference/design-concepts/lambda-concept.mdx
@@ -0,0 +1,8 @@
+---
+title: Lambdas
+---
+
+Dgraph Lambdas are JavaScript functions that can be used during query or
+mutation processing to extend GraphQL or DQL queries and mutations. Lambdas are
+not related at all to AWS Lambdas. They are functions that run in an (optional)
+node.js server that is included in the Dgraph Cloud offering.
diff --git a/dgraph/reference/design-concepts/minimizing-network-calls.mdx b/dgraph/reference/design-concepts/minimizing-network-calls.mdx
new file mode 100644
index 00000000..e2fc1b68
--- /dev/null
+++ b/dgraph/reference/design-concepts/minimizing-network-calls.mdx
@@ -0,0 +1,116 @@
+---
+title: Minimal Network Calls
+---
+
+### Predicate-based storage and sharding
+
+Dgraph is unique in its use of predicate-based sharding, which allows complex
+and deep distributed queries to run without incurring high network overhead and
+associated delays.
+
+Rather than store and shard by putting different _nodes_ (aka
+entities\*) on different servers, Dgraph stores predicates or triples
+of the form ``. The nodes are therefore
+implicit in the predicate storage, rather than vice versa.
+
+This makes querying much different and particularly allows network optimizations
+in a distributed database.
+
+### Example
+
+To explain how this works, let's use an example query:
+
+`Find all posts liked by friends of friends of mine over the last year, written by a popular author A.`
+
+### SQL/NoSQL
+
+In a distributed SQL database or (non-graph) NoSQL database, this query requires
+retrieval of a lot of data. Consider two approaches:
+
+Approach 1:
+
+- Find all the friends (~ 338
+ [friends](https://www.pewresearch.org/fact-tank/2014/02/03/what-people-like-dislike-about-facebook/)).
+- Find all their friends (~ 338 \* 338 = 40,000 people).
+- Find all the posts liked by these people over the last year (resulting set in
+ the millions).
+- Intersect these posts with posts authored by person A.
+
+Approach 2:
+
+- Find all posts written by popular author A over the last year (possibly
+ thousands).
+- Find all people who liked those posts (easily millions) (call this
+ `result set 1`).
+- Find all your friends.
+- Find all their friends (call this `result set 2`).
+- Intersect `result set 1` with `result set 2`.
+
+Both approaches wouild result in a lot of data moving back and forth between
+database and application; would be slow to execute, and may require running an
+offline job.
+
+### Dgraph Approach
+
+This is how it would run in Dgraph:
+
+Sharding assumptions (which predicates live where):
+
+- Assume Server X contains the predicate `friends` representing all friend
+ relations.
+- Assume Server Y contains the predicate `posts_liked` representing who likes
+ each post.
+- Assume Server Z contains the predicate `author` representing all who authored
+ each post.
+- Assume Server W contains the predicate `title` representing the uid->string
+ title property of posts.
+
+Algorithm:
+
+- Server X
+ - If the request was not sent to Server X, route it to Server X where the
+ friends predicate lives. **(1 RPC)**.
+ - Seek to my uid within predicate (tablet) `friends` and retrieve a list of my
+ friends as a list of uids.
+ - Still on Server X, use the friends predicate again to get friends for all of
+ those uids, generating a list of my friends of friends. Call this
+ `result set myFOF`.
+- Server Y
+ - Send result set myFOF to Server Y, which holds the posts_liked predicate
+ **(1 RPC)**.
+ - Retrieve all posts liked by my friends-of-friends. Call this
+ `result set postsMyFOFLiked`.
+- Server Z
+ - Send postsMyFOFLiked result set to Server Z **(1 RPC)**.
+ - Retrieve all posts authored by A. Call this `result set authoredByA`.
+ - Still on Server Z, intersect the two sorted lists to get posts that are both
+ liked and authored by A: `result set postsMyFOFLiked` intersect
+ `result set authoredByA`. Call this `result set postsMyFOFLikedByA`
+ - at this point we have done the hard work, but have the uids of the posts,
+ instead of the post titles.
+- Server W
+ - Send `result set postsMyFOFLikedByA` to Server W which holds the title
+ predicate **(1 RPC)**.
+ - Convert uids to names by looking up the title for each uid.
+ `result set postUidsAndTitles`
+- Respond to caller with `result set postUidsAndTitles`.
+
+## Net Result - predictable distributed graph scaling
+
+In at most 4 RPCs, we have figured out all the posts liked by friends of
+friends, written by popular author X, with titles. Typically, all four
+predicates will not live on four different Servers, so this is a worst-case
+scenario. Dgraph network activity is limited to the level of query join depth,
+rather than increasing arbitrarily according to the number of nodes in the
+graph, and how they are broken up across servers.
+
+There is no way we are aware of that a node-based sharding database can avoid
+high network RPC counts during arbitrary queries because "node-hopping" does not
+mix well with a graph that is segmented across servers.
+
+---
+
+\* _Throughout this note, we call entities in a graph "nodes" which
+is a standard terminology when talking about nodes and predicates. These may be
+confused with RAFT or Kubernetes nodes in some contexts, but generally we mean
+nodes in a graph_.
diff --git a/dgraph/reference/design-concepts/namespace-tenant-concept.mdx b/dgraph/reference/design-concepts/namespace-tenant-concept.mdx
new file mode 100644
index 00000000..754b2781
--- /dev/null
+++ b/dgraph/reference/design-concepts/namespace-tenant-concept.mdx
@@ -0,0 +1,14 @@
+---
+title: Namespace and Tenant
+---
+
+A Dgraph `Namespace` (aka Tenant) is a logically separate database within a
+Dgraph cluster. A Dgraph cluster can host many Namespaces (and this is how the
+Dgraph "shared" cloud offering works). Each user must then into their own
+namespace using namespace-specific own credentials, and sees only their own
+data. Note that this usually requires an extra or specific login.
+
+There is no mechanism to query in a way that combines data from two namespaces,
+which simplifies and enforces security in use cases where this is the
+requirement. An API layer or client would have to pull data from multiple
+namespaces using different authenticated queries if data needed to be combined.
diff --git a/dgraph/reference/design-concepts/network-call-minimization-concept.mdx b/dgraph/reference/design-concepts/network-call-minimization-concept.mdx
new file mode 100644
index 00000000..ec8a1bdb
--- /dev/null
+++ b/dgraph/reference/design-concepts/network-call-minimization-concept.mdx
@@ -0,0 +1,11 @@
+---
+title: Network Call Minimization
+---
+
+Compared to RAM or SSD access, network calls are slow, so Dgraph is built from
+the ground up to minimize them. For graph databases which store sub-graphs on
+different shards, this is difficult or impossible, but predicate-based
+(relationship-based) sharding allows fast distributed query with Dgraph.
+
+See [How Dgraph Minmizes Network Calls](./minimizing-network-calls) for more
+details.
diff --git a/dgraph/reference/design-concepts/posting-list-concept.mdx b/dgraph/reference/design-concepts/posting-list-concept.mdx
new file mode 100644
index 00000000..66e0d4d7
--- /dev/null
+++ b/dgraph/reference/design-concepts/posting-list-concept.mdx
@@ -0,0 +1,124 @@
+---
+title: Posting List and Tablet
+---
+
+Posting lists and tablets are internal storage mechanisms and are generally
+hidden from users or developers, but logs, core product code, blog posts and
+discussions about Dgraph may use the terms "posting list" and "tablet."
+
+Posting lists are a form of inverted index. Posting lists correspond closely to
+the RDF concept of a graph, where the entire graph is a collection of triples,
+`