From 1b75f011c62b348471c9eb50c7c31277dd5e6f0b Mon Sep 17 00:00:00 2001 From: liangbowen Date: Fri, 13 Jan 2023 16:48:06 +0800 Subject: [PATCH 01/10] correction of grammar and spelling mistakes --- docs/deployment/settings.md | 222 ++++++++--------- .../org/apache/kyuubi/config/KyuubiConf.scala | 230 +++++++++--------- .../scala/org/apache/kyuubi/ctl/CtlConf.scala | 6 +- .../kyuubi/ha/HighAvailabilityConf.scala | 34 +-- .../metadata/jdbc/JDBCMetadataStoreConf.scala | 21 +- .../config/AllKyuubiConfiguration.scala | 8 +- .../kyuubi/zookeeper/ZookeeperConf.scala | 20 +- 7 files changed, 275 insertions(+), 266 deletions(-) diff --git a/docs/deployment/settings.md b/docs/deployment/settings.md index 539ca823f6b..5075621d8d0 100644 --- a/docs/deployment/settings.md +++ b/docs/deployment/settings.md @@ -98,7 +98,7 @@ You can configure the environment variables in `$KYUUBI_HOME/conf/kyuubi-env.sh` # export KYUUBI_BEELINE_OPTS="-Xmx2g -XX:+UnlockDiagnosticVMOptions -XX:ParGCCardsPerStrideChunk=4096 -XX:+UseParNewGC -XX:+UseConcMarkSweepGC -XX:+CMSConcurrentMTEnabled -XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly -XX:+CMSClassUnloadingEnabled -XX:+CMSParallelRemarkEnabled -XX:+UseCondCardMark" ``` -For the environment variables that only needed to be transferred into engine side, you can set it with a Kyuubi configuration item formatted `kyuubi.engineEnv.VAR_NAME`. For example, with `kyuubi.engineEnv.SPARK_DRIVER_MEMORY=4g`, the environment variable `SPARK_DRIVER_MEMORY` with value `4g` would be transferred into engine side. With `kyuubi.engineEnv.SPARK_CONF_DIR=/apache/confs/spark/conf`, the value of `SPARK_CONF_DIR` in engine side is set to `/apache/confs/spark/conf`. +For the environment variables that only needed to be transferred into engine side, you can set it with a Kyuubi configuration item formatted `kyuubi.engineEnv.VAR_NAME`. For example, with `kyuubi.engineEnv.SPARK_DRIVER_MEMORY=4g`, the environment variable `SPARK_DRIVER_MEMORY` with value `4g` would be transferred into engine side. With `kyuubi.engineEnv.SPARK_CONF_DIR=/apache/confs/spark/conf`, the value of `SPARK_CONF_DIR` on the engine side is set to `/apache/confs/spark/conf`. ## Kyuubi Configurations @@ -136,7 +136,7 @@ You can configure the Kyuubi properties in `$KYUUBI_HOME/conf/kyuubi-defaults.co Key | Default | Meaning | Type | Since --- | --- | --- | --- | --- -kyuubi.authentication|NONE|A comma separated list of client authentication types. Note that: For KERBEROS, it is SASL/GSSAPI mechanism, and for NONE, CUSTOM and LDAP, they are all SASL/PLAIN mechanism. If only NOSASL is specified, the authentication will be NOSASL. For SASL authentication, KERBEROS and PLAIN auth type are supported at the same time, and only the first specified PLAIN auth type is valid.|seq|1.0.0 +kyuubi.authentication|NONE|A comma separated list of client authentication types. Note that: For KERBEROS, it is SASL/GSSAPI mechanism, and for NONE, CUSTOM and LDAP, they are all SASL/PLAIN mechanisms. If only NOSASL is specified, the authentication will be NOSASL. For SASL authentication, KERBEROS and PLAIN auth types are supported at the same time, and only the first specified PLAIN auth type is valid.|seq|1.0.0 kyuubi.authentication.custom.class|<undefined>|User-defined authentication implementation of org.apache.kyuubi.service.authentication.PasswdAuthenticationProvider|string|1.3.0 kyuubi.authentication.jdbc.driver.class|<undefined>|Driver class name for JDBC Authentication Provider.|string|1.6.0 kyuubi.authentication.jdbc.password|<undefined>|Database password for JDBC Authentication Provider.|string|1.6.0 @@ -158,8 +158,8 @@ kyuubi.backend.engine.exec.pool.keepalive.time|PT1M|Time(ms) that an idle async kyuubi.backend.engine.exec.pool.shutdown.timeout|PT10S|Timeout(ms) for the operation execution thread pool to terminate in SQL engine applications|duration|1.0.0 kyuubi.backend.engine.exec.pool.size|100|Number of threads in the operation execution thread pool of SQL engine applications|int|1.0.0 kyuubi.backend.engine.exec.pool.wait.queue.size|100|Size of the wait queue for the operation execution thread pool in SQL engine applications|int|1.0.0 -kyuubi.backend.server.event.json.log.path|file:///tmp/kyuubi/events|The location of server events go for the builtin JSON logger|string|1.4.0 -kyuubi.backend.server.event.loggers||A comma separated list of server history loggers, where session/operation etc events go. Note that: Kyuubi supports custom event handlers with the Java SPI. To register a custom event handler, user need to implement a class which is a child of org.apache.kyuubi.events.handler.CustomEventHandlerProvider which has zero-arg constructor.|seq|1.4.0 +kyuubi.backend.server.event.json.log.path|file:///tmp/kyuubi/events|The location of server events go for the built-in JSON logger|string|1.4.0 +kyuubi.backend.server.event.loggers||A comma separated list of server history loggers, where session/operation etc events go. Note that: Kyuubi supports custom event handlers with the Java SPI. To register a custom event handler, the user needs to implement a class which is a child of org.apache.kyuubi.events.handler.CustomEventHandlerProvider which has a zero-arg constructor.|seq|1.4.0 kyuubi.backend.server.exec.pool.keepalive.time|PT1M|Time(ms) that an idle async thread of the operation execution thread pool will wait for a new task to arrive before terminating in Kyuubi server|duration|1.0.0 kyuubi.backend.server.exec.pool.shutdown.timeout|PT10S|Timeout(ms) for the operation execution thread pool to terminate in Kyuubi server|duration|1.0.0 kyuubi.backend.server.exec.pool.size|100|Number of threads in the operation execution thread pool of Kyuubi server|int|1.0.0 @@ -172,7 +172,7 @@ Key | Default | Meaning | Type | Since --- | --- | --- | --- | --- kyuubi.batch.application.check.interval|PT5S|The interval to check batch job application information.|duration|1.6.0 kyuubi.batch.application.starvation.timeout|PT3M|Threshold above which to warn batch application may be starved.|duration|1.7.0 -kyuubi.batch.conf.ignore.list||A comma separated list of ignored keys for batch conf. If the batch conf contains any of them, the key and the corresponding value will be removed silently during batch job submission. Note that this rule is for server-side protection defined via administrators to prevent some essential configs from tampering. You can also pre-define some config for batch job submission with prefix: kyuubi.batchConf.[batchType]. For example, you can pre-define `spark.master` for spark batch job with key `kyuubi.batchConf.spark.spark.master`.|seq|1.6.0 +kyuubi.batch.conf.ignore.list||A comma separated list of ignored keys for batch conf. If the batch conf contains any of them, the key and the corresponding value will be removed silently during batch job submission. Note that this rule is for server-side protection defined via administrators to prevent some essential configs from tampering. You can also pre-define some config for batch job submission with the prefix: kyuubi.batchConf.[batchType]. For example, you can pre-define `spark.master` for the Spark batch job with key `kyuubi.batchConf.spark.spark.master`.|seq|1.6.0 kyuubi.batch.session.idle.timeout|PT6H|Batch session idle timeout, it will be closed when it's not accessed for this duration|duration|1.6.2 @@ -187,7 +187,7 @@ kyuubi.credentials.hive.enabled|true|Whether to renew Hive metastore delegation kyuubi.credentials.idle.timeout|PT6H|inactive users' credentials will be expired after a configured timeout|duration|1.6.0 kyuubi.credentials.renewal.interval|PT1H|How often Kyuubi renews one user's delegation tokens|duration|1.4.0 kyuubi.credentials.renewal.retry.wait|PT1M|How long to wait before retrying to fetch new credentials after a failure.|duration|1.4.0 -kyuubi.credentials.update.wait.timeout|PT1M|How long to wait until credentials are ready.|duration|1.5.0 +kyuubi.credentials.update.wait.timeout|PT1M|How long to wait until the credentials are ready.|duration|1.5.0 ### Ctl @@ -197,11 +197,11 @@ Key | Default | Meaning | Type | Since kyuubi.ctl.batch.log.on.failure.timeout|PT10S|The timeout for fetching remaining batch logs if the batch failed.|duration|1.6.1 kyuubi.ctl.batch.log.query.interval|PT3S|The interval for fetching batch logs.|duration|1.6.0 kyuubi.ctl.rest.auth.schema|basic|The authentication schema. Valid values are: basic, spnego.|string|1.6.0 -kyuubi.ctl.rest.base.url|<undefined>|The REST API base URL, which contains the scheme (http:// or https://), host name, port number|string|1.6.0 -kyuubi.ctl.rest.connect.timeout|PT30S|The timeout[ms] for establishing the connection with the kyuubi server.A timeout value of zero is interpreted as an infinite timeout.|duration|1.6.0 +kyuubi.ctl.rest.base.url|<undefined>|The REST API base URL, which contains the scheme (http:// or https://), hostname, port number|string|1.6.0 +kyuubi.ctl.rest.connect.timeout|PT30S|The timeout[ms] for establishing the connection with the kyuubi server. A timeout value of zero is interpreted as an infinite timeout.|duration|1.6.0 kyuubi.ctl.rest.request.attempt.wait|PT3S|How long to wait between attempts of ctl rest request.|duration|1.6.0 kyuubi.ctl.rest.request.max.attempts|3|The max attempts number for ctl rest request.|int|1.6.0 -kyuubi.ctl.rest.socket.timeout|PT2M|The timeout[ms] for waiting for data packets after connection is established.A timeout value of zero is interpreted as an infinite timeout.|duration|1.6.0 +kyuubi.ctl.rest.socket.timeout|PT2M|The timeout[ms] for waiting for data packets after connection is established. A timeout value of zero is interpreted as an infinite timeout.|duration|1.6.0 kyuubi.ctl.rest.spnego.host|<undefined>|When auth schema is spnego, need to config spnego host.|string|1.6.0 @@ -219,57 +219,57 @@ kyuubi.delegation.token.renew.interval|PT168H|unused yet|duration|1.0.0 Key | Default | Meaning | Type | Since --- | --- | --- | --- | --- -kyuubi.engine.connection.url.use.hostname|true|(deprecated) When true, engine register with hostname to zookeeper. When spark run on k8s with cluster mode, set to false to ensure that server can connect to engine|boolean|1.3.0 +kyuubi.engine.connection.url.use.hostname|true|(deprecated) When true, the engine registers with hostname to zookeeper. When Spark runs on k8s with cluster mode, set to false to ensure that server can connect to engine|boolean|1.3.0 kyuubi.engine.deregister.exception.classes||A comma separated list of exception classes. If there is any exception thrown, whose class matches the specified classes, the engine would deregister itself.|seq|1.2.0 kyuubi.engine.deregister.exception.messages||A comma separated list of exception messages. If there is any exception thrown, whose message or stacktrace matches the specified message list, the engine would deregister itself.|seq|1.2.0 kyuubi.engine.deregister.exception.ttl|PT30M|Time to live(TTL) for exceptions pattern specified in kyuubi.engine.deregister.exception.classes and kyuubi.engine.deregister.exception.messages to deregister engines. Once the total error count hits the kyuubi.engine.deregister.job.max.failures within the TTL, an engine will deregister itself and wait for self-terminated. Otherwise, we suppose that the engine has recovered from temporary failures.|duration|1.2.0 kyuubi.engine.deregister.job.max.failures|4|Number of failures of job before deregistering the engine.|int|1.2.0 -kyuubi.engine.event.json.log.path|file:///tmp/kyuubi/events|The location of all the engine events go for the builtin JSON logger.|string|1.3.0 -kyuubi.engine.event.loggers|SPARK|A comma separated list of engine history loggers, where engine/session/operation etc events go. Note that: Kyuubi supports custom event handlers with the Java SPI. To register a custom event handler, user need to implement a class which is a child of org.apache.kyuubi.events.handler.CustomEventHandlerProvider which has zero-arg constructor.|seq|1.3.0 -kyuubi.engine.flink.extra.classpath|<undefined>|The extra classpath for the flink sql engine, for configuring location of hadoop client jars, etc|string|1.6.0 -kyuubi.engine.flink.java.options|<undefined>|The extra java options for the flink sql engine|string|1.6.0 -kyuubi.engine.flink.memory|1g|The heap memory for the flink sql engine|string|1.6.0 +kyuubi.engine.event.json.log.path|file:///tmp/kyuubi/events|The location where all the engine events go for the built-in JSON logger.|string|1.3.0 +kyuubi.engine.event.loggers|SPARK|A comma separated list of engine history loggers, where engine/session/operation etc events go. Note that: Kyuubi supports custom event handlers with the Java SPI. To register a custom event handler, the user needs to implement a subclass of org.apache.kyuubi.events.handler.CustomEventHandlerProvider which has a zero-arg constructor.|seq|1.3.0 +kyuubi.engine.flink.extra.classpath|<undefined>|The extra classpath for the Flink SQL engine, for configuring the location of hadoop client jars, etc|string|1.6.0 +kyuubi.engine.flink.java.options|<undefined>|The extra java options for the Flink SQL engine|string|1.6.0 +kyuubi.engine.flink.memory|1g|The heap memory for the Flink SQL engine|string|1.6.0 kyuubi.engine.hive.event.loggers|JSON|A comma separated list of engine history loggers, where engine/session/operation etc events go.|seq|1.7.0 -kyuubi.engine.hive.extra.classpath|<undefined>|The extra classpath for the hive query engine, for configuring location of hadoop client jars, etc|string|1.6.0 +kyuubi.engine.hive.extra.classpath|<undefined>|The extra classpath for the hive query engine, for configuring location of the hadoop client jars and etc.|string|1.6.0 kyuubi.engine.hive.java.options|<undefined>|The extra java options for the hive query engine|string|1.6.0 kyuubi.engine.hive.memory|1g|The heap memory for the hive query engine|string|1.6.0 kyuubi.engine.initialize.sql|SHOW DATABASES|SemiColon-separated list of SQL statements to be initialized in the newly created engine before queries. i.e. use `SHOW DATABASES` to eagerly active HiveClient. This configuration can not be used in JDBC url due to the limitation of Beeline/JDBC driver.|seq|1.2.0 kyuubi.engine.jdbc.connection.password|<undefined>|The password is used for connecting to server|string|1.6.0 kyuubi.engine.jdbc.connection.properties||The additional properties are used for connecting to server|seq|1.6.0 -kyuubi.engine.jdbc.connection.provider|<undefined>|The connection provider is used for getting a connection from server|string|1.6.0 +kyuubi.engine.jdbc.connection.provider|<undefined>|The connection provider is used for getting a connection from the server|string|1.6.0 kyuubi.engine.jdbc.connection.url|<undefined>|The server url that engine will connect to|string|1.6.0 kyuubi.engine.jdbc.connection.user|<undefined>|The user is used for connecting to server|string|1.6.0 -kyuubi.engine.jdbc.driver.class|<undefined>|The driver class for jdbc engine connection|string|1.6.0 -kyuubi.engine.jdbc.extra.classpath|<undefined>|The extra classpath for the jdbc query engine, for configuring location of jdbc driver, etc|string|1.6.0 -kyuubi.engine.jdbc.java.options|<undefined>|The extra java options for the jdbc query engine|string|1.6.0 -kyuubi.engine.jdbc.memory|1g|The heap memory for the jdbc query engine|string|1.6.0 -kyuubi.engine.jdbc.type|<undefined>|The short name of jdbc type|string|1.6.0 +kyuubi.engine.jdbc.driver.class|<undefined>|The driver class for JDBC engine connection|string|1.6.0 +kyuubi.engine.jdbc.extra.classpath|<undefined>|The extra classpath for the JDBC query engine, for configuring the location of the JDBC driver and etc.|string|1.6.0 +kyuubi.engine.jdbc.java.options|<undefined>|The extra java options for the JDBC query engine|string|1.6.0 +kyuubi.engine.jdbc.memory|1g|The heap memory for the JDBC query engine|string|1.6.0 +kyuubi.engine.jdbc.type|<undefined>|The short name of JDBC type|string|1.6.0 kyuubi.engine.operation.convert.catalog.database.enabled|true|When set to true, The engine converts the JDBC methods of set/get Catalog and set/get Schema to the implementation of different engines|boolean|1.6.0 kyuubi.engine.operation.log.dir.root|engine_operation_logs|Root directory for query operation log at engine-side.|string|1.4.0 -kyuubi.engine.pool.name|engine-pool|The name of engine pool.|string|1.5.0 +kyuubi.engine.pool.name|engine-pool|The name of the engine pool.|string|1.5.0 kyuubi.engine.pool.selectPolicy|RANDOM|The select policy of an engine from the corresponding engine pool engine for a session. |string|1.7.0 -kyuubi.engine.pool.size|-1|The size of engine pool. Note that, if the size is less than 1, the engine pool will not be enabled; otherwise, the size of the engine pool will be min(this, kyuubi.engine.pool.size.threshold).|int|1.4.0 -kyuubi.engine.pool.size.threshold|9|This parameter is introduced as a server-side parameter, and controls the upper limit of the engine pool.|int|1.4.0 +kyuubi.engine.pool.size|-1|The size of the engine pool. Note that, if the size is less than 1, the engine pool will not be enabled; otherwise, the size of the engine pool will be min(this, kyuubi.engine.pool.size.threshold).|int|1.4.0 +kyuubi.engine.pool.size.threshold|9|This parameter is introduced as a server-side parameter controlling the upper limit of the engine pool.|int|1.4.0 kyuubi.engine.session.initialize.sql||SemiColon-separated list of SQL statements to be initialized in the newly created engine session before queries. This configuration can not be used in JDBC url due to the limitation of Beeline/JDBC driver.|seq|1.3.0 -kyuubi.engine.share.level|USER|Engines will be shared in different levels, available configs are: |string|1.2.0 +kyuubi.engine.share.level|USER|Engines will be shared in different levels, available configs are: |string|1.2.0 kyuubi.engine.share.level.sub.domain|<undefined>|(deprecated) - Using kyuubi.engine.share.level.subdomain instead|string|1.2.0 -kyuubi.engine.share.level.subdomain|<undefined>|Allow end-users to create a subdomain for the share level of an engine. A subdomain is a case-insensitive string values that must be a valid zookeeper sub path. For example, for `USER` share level, an end-user can share a certain engine within a subdomain, not for all of its clients. End-users are free to create multiple engines in the `USER` share level. When disable engine pool, use 'default' if absent.|string|1.4.0 +kyuubi.engine.share.level.subdomain|<undefined>|Allow end-users to create a subdomain for the share level of an engine. A subdomain is a case-insensitive string values that must be a valid zookeeper subpath. For example, for the `USER` share level, an end-user can share a certain engine within a subdomain, not for all of its clients. End-users are free to create multiple engines in the `USER` share level. When disable engine pool, use 'default' if absent.|string|1.4.0 kyuubi.engine.single.spark.session|false|When set to true, this engine is running in a single session mode. All the JDBC/ODBC connections share the temporary views, function registries, SQL configuration and the current database.|boolean|1.3.0 -kyuubi.engine.spark.event.loggers|SPARK|A comma separated list of engine loggers, where engine/session/operation etc events go.|seq|1.7.0 +kyuubi.engine.spark.event.loggers|SPARK|A comma separated list of engine loggers, where engine/session/operation etc events go.|seq|1.7.0 kyuubi.engine.spark.python.env.archive|<undefined>|Portable python env archive used for Spark engine python language mode.|string|1.7.0 kyuubi.engine.spark.python.env.archive.exec.path|bin/python|The python exec path under the python env archive.|string|1.7.0 kyuubi.engine.spark.python.home.archive|<undefined>|Spark archive containing $SPARK_HOME/python directory, which is used to init session python worker for python language mode.|string|1.7.0 kyuubi.engine.trino.event.loggers|JSON|A comma separated list of engine history loggers, where engine/session/operation etc events go.|seq|1.7.0 -kyuubi.engine.trino.extra.classpath|<undefined>|The extra classpath for the trino query engine, for configuring other libs which may need by the trino engine |string|1.6.0 -kyuubi.engine.trino.java.options|<undefined>|The extra java options for the trino query engine|string|1.6.0 -kyuubi.engine.trino.memory|1g|The heap memory for the trino query engine|string|1.6.0 -kyuubi.engine.type|SPARK_SQL|Specify the detailed engine that supported by the Kyuubi. The engine type bindings to SESSION scope. This configuration is experimental. Currently, available configs are: |string|1.4.0 +kyuubi.engine.trino.extra.classpath|<undefined>|The extra classpath for the Trino query engine, for configuring other libs which may need by the Trino engine |string|1.6.0 +kyuubi.engine.trino.java.options|<undefined>|The extra java options for the Trino query engine|string|1.6.0 +kyuubi.engine.trino.memory|1g|The heap memory for the Trino query engine|string|1.6.0 +kyuubi.engine.type|SPARK_SQL|Specify the detailed engine supported by Kyuubi. The engine type bindings to SESSION scope. This configuration is experimental. Currently, available configs are: |string|1.4.0 kyuubi.engine.ui.retainedSessions|200|The number of SQL client sessions kept in the Kyuubi Query Engine web UI.|int|1.4.0 kyuubi.engine.ui.retainedStatements|200|The number of statements kept in the Kyuubi Query Engine web UI.|int|1.4.0 kyuubi.engine.ui.stop.enabled|true|When true, allows Kyuubi engine to be killed from the Spark Web UI.|boolean|1.3.0 -kyuubi.engine.user.isolated.spark.session|true|When set to false, if the engine is running in a group or server share level, all the JDBC/ODBC connections will be isolated against the user. Including: the temporary views, function registries, SQL configuration and the current database. Note that, it does not affect if the share level is connection or user.|boolean|1.6.0 -kyuubi.engine.user.isolated.spark.session.idle.interval|PT1M|The interval to check if the user isolated spark session is timeout.|duration|1.6.0 -kyuubi.engine.user.isolated.spark.session.idle.timeout|PT6H|If kyuubi.engine.user.isolated.spark.session is false, we will release the spark session if its corresponding user is inactive after this configured timeout.|duration|1.6.0 +kyuubi.engine.user.isolated.spark.session|true|When set to false, if the engine is running in a group or server share level, all the JDBC/ODBC connections will be isolated against the user. Including the temporary views, function registries, SQL configuration, and the current database. Note that, it does not affect if the share level is connection or user.|boolean|1.6.0 +kyuubi.engine.user.isolated.spark.session.idle.interval|PT1M|The interval to check if the user-isolated Spark session is timeout.|duration|1.6.0 +kyuubi.engine.user.isolated.spark.session.idle.timeout|PT6H|If kyuubi.engine.user.isolated.spark.session is false, we will release the Spark session if its corresponding user is inactive after this configured timeout.|duration|1.6.0 ### Event @@ -287,12 +287,12 @@ Key | Default | Meaning | Type | Since --- | --- | --- | --- | --- kyuubi.frontend.backoff.slot.length|PT0.1S|(deprecated) Time to back off during login to the thrift frontend service.|duration|1.0.0 kyuubi.frontend.bind.host|<undefined>|Hostname or IP of the machine on which to run the frontend services.|string|1.0.0 -kyuubi.frontend.bind.port|10009|(deprecated) Port of the machine on which to run the thrift frontend service via binary protocol.|int|1.0.0 -kyuubi.frontend.connection.url.use.hostname|true|When true, frontend services prefer hostname, otherwise, ip address. Note that, the default value is set to `false` when engine running on Kubernetes to prevent potential network issue.|boolean|1.5.0 +kyuubi.frontend.bind.port|10009|(deprecated) Port of the machine on which to run the thrift frontend service via the binary protocol.|int|1.0.0 +kyuubi.frontend.connection.url.use.hostname|true|When true, frontend services prefer hostname, otherwise, ip address. Note that, the default value is set to `false` when engine running on Kubernetes to prevent potential network issues.|boolean|1.5.0 kyuubi.frontend.login.timeout|PT20S|(deprecated) Timeout for Thrift clients during login to the thrift frontend service.|duration|1.0.0 kyuubi.frontend.max.message.size|104857600|(deprecated) Maximum message size in bytes a Kyuubi server will accept.|int|1.0.0 -kyuubi.frontend.max.worker.threads|999|(deprecated) Maximum number of threads in the of frontend worker thread pool for the thrift frontend service|int|1.0.0 -kyuubi.frontend.min.worker.threads|9|(deprecated) Minimum number of threads in the of frontend worker thread pool for the thrift frontend service|int|1.0.0 +kyuubi.frontend.max.worker.threads|999|(deprecated) Maximum number of threads in the frontend worker thread pool for the thrift frontend service|int|1.0.0 +kyuubi.frontend.min.worker.threads|9|(deprecated) Minimum number of threads in the frontend worker thread pool for the thrift frontend service|int|1.0.0 kyuubi.frontend.mysql.bind.host|<undefined>|Hostname or IP of the machine on which to run the MySQL frontend service.|string|1.4.0 kyuubi.frontend.mysql.bind.port|3309|Port of the machine on which to run the MySQL frontend service.|int|1.4.0 kyuubi.frontend.mysql.max.worker.threads|999|Maximum number of threads in the command execution thread pool for the MySQL frontend service|int|1.4.0 @@ -300,17 +300,17 @@ kyuubi.frontend.mysql.min.worker.threads|9|Minimum number of threads in the comm kyuubi.frontend.mysql.netty.worker.threads|<undefined>|Number of thread in the netty worker event loop of MySQL frontend service. Use min(cpu_cores, 8) in default.|int|1.4.0 kyuubi.frontend.mysql.worker.keepalive.time|PT1M|Time(ms) that an idle async thread of the command execution thread pool will wait for a new task to arrive before terminating in MySQL frontend service|duration|1.4.0 kyuubi.frontend.protocols|THRIFT_BINARY|A comma separated list for all frontend protocols |seq|1.4.0 -kyuubi.frontend.proxy.http.client.ip.header|X-Real-IP|The http header to record the real client ip address. If your server is behind a load balancer or other proxy, the server will see this load balancer or proxy IP address as the client IP address, to get around this common issue, most load balancers or proxies offer the ability to record the real remote IP address in an HTTP header that will be added to the request for other devices to use. Note that, because the header value can be specified to any ip address, so it will not be used for authentication.|string|1.6.0 +kyuubi.frontend.proxy.http.client.ip.header|X-Real-IP|The HTTP header to record the real client IP address. If your server is behind a load balancer or other proxy, the server will see this load balancer or proxy IP address as the client IP address, to get around this common issue, most load balancers or proxies offer the ability to record the real remote IP address in an HTTP header that will be added to the request for other devices to use. Note that, because the header value can be specified to any IP address, so it will not be used for authentication.|string|1.6.0 kyuubi.frontend.rest.bind.host|<undefined>|Hostname or IP of the machine on which to run the REST frontend service.|string|1.4.0 kyuubi.frontend.rest.bind.port|10099|Port of the machine on which to run the REST frontend service.|int|1.4.0 -kyuubi.frontend.rest.max.worker.threads|999|Maximum number of threads in the of frontend worker thread pool for the rest frontend service|int|1.6.2 +kyuubi.frontend.rest.max.worker.threads|999|Maximum number of threads in the frontend worker thread pool for the rest frontend service|int|1.6.2 kyuubi.frontend.ssl.keystore.algorithm|<undefined>|SSL certificate keystore algorithm.|string|1.7.0 kyuubi.frontend.ssl.keystore.password|<undefined>|SSL certificate keystore password.|string|1.7.0 kyuubi.frontend.ssl.keystore.path|<undefined>|SSL certificate keystore location.|string|1.7.0 kyuubi.frontend.ssl.keystore.type|<undefined>|SSL certificate keystore type.|string|1.7.0 kyuubi.frontend.thrift.backoff.slot.length|PT0.1S|Time to back off during login to the thrift frontend service.|duration|1.4.0 -kyuubi.frontend.thrift.binary.bind.host|<undefined>|Hostname or IP of the machine on which to run the thrift frontend service via binary protocol.|string|1.4.0 -kyuubi.frontend.thrift.binary.bind.port|10009|Port of the machine on which to run the thrift frontend service via binary protocol.|int|1.4.0 +kyuubi.frontend.thrift.binary.bind.host|<undefined>|Hostname or IP of the machine on which to run the thrift frontend service via the binary protocol.|string|1.4.0 +kyuubi.frontend.thrift.binary.bind.port|10009|Port of the machine on which to run the thrift frontend service via the binary protocol.|int|1.4.0 kyuubi.frontend.thrift.binary.ssl.disallowed.protocols|SSLv2,SSLv3|SSL versions to disallow for Kyuubi thrift binary frontend.|seq|1.7.0 kyuubi.frontend.thrift.binary.ssl.enabled|false|Set this to true for using SSL encryption in thrift binary frontend server.|boolean|1.7.0 kyuubi.frontend.thrift.binary.ssl.include.ciphersuites||A comma separated list of include SSL cipher suite names for thrift binary frontend.|seq|1.7.0 @@ -318,7 +318,7 @@ kyuubi.frontend.thrift.http.allow.user.substitution|true|Allow alternate user to kyuubi.frontend.thrift.http.bind.host|<undefined>|Hostname or IP of the machine on which to run the thrift frontend service via http protocol.|string|1.6.0 kyuubi.frontend.thrift.http.bind.port|10010|Port of the machine on which to run the thrift frontend service via http protocol.|int|1.6.0 kyuubi.frontend.thrift.http.compression.enabled|true|Enable thrift http compression via Jetty compression support|boolean|1.6.0 -kyuubi.frontend.thrift.http.cookie.auth.enabled|true|When true, Kyuubi in HTTP transport mode, will use cookie based authentication mechanism|boolean|1.6.0 +kyuubi.frontend.thrift.http.cookie.auth.enabled|true|When true, Kyuubi in HTTP transport mode, will use cookie-based authentication mechanism|boolean|1.6.0 kyuubi.frontend.thrift.http.cookie.domain|<undefined>|Domain for the Kyuubi generated cookies|string|1.6.0 kyuubi.frontend.thrift.http.cookie.is.httponly|true|HttpOnly attribute of the Kyuubi generated cookie.|boolean|1.6.0 kyuubi.frontend.thrift.http.cookie.max.age|86400|Maximum age in seconds for server side cookie used by Kyuubi in HTTP mode.|int|1.6.0 @@ -332,15 +332,15 @@ kyuubi.frontend.thrift.http.ssl.keystore.password|<undefined>|SSL certific kyuubi.frontend.thrift.http.ssl.keystore.path|<undefined>|SSL certificate keystore location.|string|1.6.0 kyuubi.frontend.thrift.http.ssl.protocol.blacklist|SSLv2,SSLv3|SSL Versions to disable when using HTTP transport mode.|seq|1.6.0 kyuubi.frontend.thrift.http.use.SSL|false|Set this to true for using SSL encryption in http mode.|boolean|1.6.0 -kyuubi.frontend.thrift.http.xsrf.filter.enabled|false|If enabled, Kyuubi will block any requests made to it over http if an X-XSRF-HEADER header is not present|boolean|1.6.0 +kyuubi.frontend.thrift.http.xsrf.filter.enabled|false|If enabled, Kyuubi will block any requests made to it over HTTP if an X-XSRF-HEADER header is not present|boolean|1.6.0 kyuubi.frontend.thrift.login.timeout|PT20S|Timeout for Thrift clients during login to the thrift frontend service.|duration|1.4.0 kyuubi.frontend.thrift.max.message.size|104857600|Maximum message size in bytes a Kyuubi server will accept.|int|1.4.0 -kyuubi.frontend.thrift.max.worker.threads|999|Maximum number of threads in the of frontend worker thread pool for the thrift frontend service|int|1.4.0 -kyuubi.frontend.thrift.min.worker.threads|9|Minimum number of threads in the of frontend worker thread pool for the thrift frontend service|int|1.4.0 +kyuubi.frontend.thrift.max.worker.threads|999|Maximum number of threads in the frontend worker thread pool for the thrift frontend service|int|1.4.0 +kyuubi.frontend.thrift.min.worker.threads|9|Minimum number of threads in the frontend worker thread pool for the thrift frontend service|int|1.4.0 kyuubi.frontend.thrift.worker.keepalive.time|PT1M|Keep-alive time (in milliseconds) for an idle worker thread|duration|1.4.0 kyuubi.frontend.trino.bind.host|<undefined>|Hostname or IP of the machine on which to run the TRINO frontend service.|string|1.7.0 kyuubi.frontend.trino.bind.port|10999|Port of the machine on which to run the TRINO frontend service.|int|1.7.0 -kyuubi.frontend.trino.max.worker.threads|999|Maximum number of threads in the of frontend worker thread pool for the trino frontend service|int|1.7.0 +kyuubi.frontend.trino.max.worker.threads|999|Maximum number of threads in the frontend worker thread pool for the Trino frontend service|int|1.7.0 kyuubi.frontend.worker.keepalive.time|PT1M|(deprecated) Keep-alive time (in milliseconds) for an idle worker thread|duration|1.0.0 @@ -350,27 +350,27 @@ Key | Default | Meaning | Type | Since --- | --- | --- | --- | --- kyuubi.ha.addresses||The connection string for the discovery ensemble|string|1.6.0 kyuubi.ha.client.class|org.apache.kyuubi.ha.client.zookeeper.ZookeeperDiscoveryClient|Class name for service discovery client.|string|1.6.0 -kyuubi.ha.etcd.lease.timeout|PT10S|Timeout for etcd keep alive lease. The kyuubi server will known unexpected loss of engine after up to this seconds.|duration|1.6.0 +kyuubi.ha.etcd.lease.timeout|PT10S|Timeout for etcd keep alive lease. The kyuubi server will know the unexpected loss of engine after up to this seconds.|duration|1.6.0 kyuubi.ha.etcd.ssl.ca.path|<undefined>|Where the etcd CA certificate file is stored.|string|1.6.0 kyuubi.ha.etcd.ssl.client.certificate.path|<undefined>|Where the etcd SSL certificate file is stored.|string|1.6.0 kyuubi.ha.etcd.ssl.client.key.path|<undefined>|Where the etcd SSL key file is stored.|string|1.6.0 -kyuubi.ha.etcd.ssl.enabled|false|When set to true, will build a ssl secured etcd client.|boolean|1.6.0 +kyuubi.ha.etcd.ssl.enabled|false|When set to true, will build an SSL secured etcd client.|boolean|1.6.0 kyuubi.ha.namespace|kyuubi|The root directory for the service to deploy its instance uri|string|1.6.0 -kyuubi.ha.zookeeper.acl.enabled|false|Set to true if the zookeeper ensemble is kerberized|boolean|1.0.0 -kyuubi.ha.zookeeper.auth.digest|<undefined>|The digest auth string is used for zookeeper authentication, like: username:password.|string|1.3.2 -kyuubi.ha.zookeeper.auth.keytab|<undefined>|Location of Kyuubi server's keytab is used for zookeeper authentication.|string|1.3.2 -kyuubi.ha.zookeeper.auth.principal|<undefined>|Name of the Kerberos principal is used for zookeeper authentication.|string|1.3.2 -kyuubi.ha.zookeeper.auth.type|NONE|The type of zookeeper authentication, all candidates are |string|1.3.2 -kyuubi.ha.zookeeper.connection.base.retry.wait|1000|Initial amount of time to wait between retries to the zookeeper ensemble|int|1.0.0 -kyuubi.ha.zookeeper.connection.max.retries|3|Max retry times for connecting to the zookeeper ensemble|int|1.0.0 +kyuubi.ha.zookeeper.acl.enabled|false|Set to true if the ZooKeeper ensemble is kerberized|boolean|1.0.0 +kyuubi.ha.zookeeper.auth.digest|<undefined>|The digest auth string is used for ZooKeeper authentication, like: username:password.|string|1.3.2 +kyuubi.ha.zookeeper.auth.keytab|<undefined>|Location of the Kyuubi server's keytab is used for ZooKeeper authentication.|string|1.3.2 +kyuubi.ha.zookeeper.auth.principal|<undefined>|Name of the Kerberos principal is used for ZooKeeper authentication.|string|1.3.2 +kyuubi.ha.zookeeper.auth.type|NONE|The type of ZooKeeper authentication, all candidates are |string|1.3.2 +kyuubi.ha.zookeeper.connection.base.retry.wait|1000|Initial amount of time to wait between retries to the ZooKeeper ensemble|int|1.0.0 +kyuubi.ha.zookeeper.connection.max.retries|3|Max retry times for connecting to the ZooKeeper ensemble|int|1.0.0 kyuubi.ha.zookeeper.connection.max.retry.wait|30000|Max amount of time to wait between retries for BOUNDED_EXPONENTIAL_BACKOFF policy can reach, or max time until elapsed for UNTIL_ELAPSED policy to connect the zookeeper ensemble|int|1.0.0 -kyuubi.ha.zookeeper.connection.retry.policy|EXPONENTIAL_BACKOFF|The retry policy for connecting to the zookeeper ensemble, all candidates are: |string|1.0.0 -kyuubi.ha.zookeeper.connection.timeout|15000|The timeout(ms) of creating the connection to the zookeeper ensemble|int|1.0.0 -kyuubi.ha.zookeeper.engine.auth.type|NONE|The type of zookeeper authentication for engine, all candidates are |string|1.3.2 +kyuubi.ha.zookeeper.connection.retry.policy|EXPONENTIAL_BACKOFF|The retry policy for connecting to the ZooKeeper ensemble, all candidates are: |string|1.0.0 +kyuubi.ha.zookeeper.connection.timeout|15000|The timeout(ms) of creating the connection to the ZooKeeper ensemble|int|1.0.0 +kyuubi.ha.zookeeper.engine.auth.type|NONE|The type of ZooKeeper authentication for the engine, all candidates are |string|1.3.2 kyuubi.ha.zookeeper.namespace|kyuubi|(deprecated) The root directory for the service to deploy its instance uri|string|1.0.0 -kyuubi.ha.zookeeper.node.creation.timeout|PT2M|Timeout for creating zookeeper node|duration|1.2.0 -kyuubi.ha.zookeeper.publish.configs|false|When set to true, publish Kerberos configs to Zookeeper.Note that the Hive driver needs to be greater than 1.3 or 2.0 or apply HIVE-11581 patch.|boolean|1.4.0 -kyuubi.ha.zookeeper.quorum||(deprecated) The connection string for the zookeeper ensemble|string|1.0.0 +kyuubi.ha.zookeeper.node.creation.timeout|PT2M|Timeout for creating ZooKeeper node|duration|1.2.0 +kyuubi.ha.zookeeper.publish.configs|false|When set to true, publish Kerberos configs to Zookeeper. Note that the Hive driver needs to be greater than 1.3 or 2.0 or apply HIVE-11581 patch.|boolean|1.4.0 +kyuubi.ha.zookeeper.quorum||(deprecated) The connection string for the ZooKeeper ensemble|string|1.0.0 kyuubi.ha.zookeeper.session.timeout|60000|The timeout(ms) of a connected session to be idled|int|1.0.0 @@ -378,7 +378,7 @@ kyuubi.ha.zookeeper.session.timeout|60000|The timeout(ms) of a connected session Key | Default | Meaning | Type | Since --- | --- | --- | --- | --- -kyuubi.kinit.interval|PT1H|How often will Kyuubi server run `kinit -kt [keytab] [principal]` to renew the local Kerberos credentials cache|duration|1.0.0 +kyuubi.kinit.interval|PT1H|How often will the Kyuubi server run `kinit -kt [keytab] [principal]` to renew the local Kerberos credentials cache|duration|1.0.0 kyuubi.kinit.keytab|<undefined>|Location of Kyuubi server's keytab.|string|1.0.0 kyuubi.kinit.max.attempts|10|How many times will `kinit` process retry|int|1.0.0 kyuubi.kinit.principal|<undefined>|Name of the Kerberos principal.|string|1.0.0 @@ -391,7 +391,7 @@ Key | Default | Meaning | Type | Since kyuubi.kubernetes.authenticate.caCertFile|<undefined>|Path to the CA cert file for connecting to the Kubernetes API server over TLS from the kyuubi. Specify this as a path as opposed to a URI (i.e. do not provide a scheme)|string|1.7.0 kyuubi.kubernetes.authenticate.clientCertFile|<undefined>|Path to the client cert file for connecting to the Kubernetes API server over TLS from the kyuubi. Specify this as a path as opposed to a URI (i.e. do not provide a scheme)|string|1.7.0 kyuubi.kubernetes.authenticate.clientKeyFile|<undefined>|Path to the client key file for connecting to the Kubernetes API server over TLS from the kyuubi. Specify this as a path as opposed to a URI (i.e. do not provide a scheme)|string|1.7.0 -kyuubi.kubernetes.authenticate.oauthToken|<undefined>|The OAuth token to use when authenticating against the Kubernetes API server. Note that unlike the other authentication options, this must be the exact string value of the token to use for the authentication.|string|1.7.0 +kyuubi.kubernetes.authenticate.oauthToken|<undefined>|The OAuth token to use when authenticating against the Kubernetes API server. Note that unlike, the other authentication options, this must be the exact string value of the token to use for the authentication.|string|1.7.0 kyuubi.kubernetes.authenticate.oauthTokenFile|<undefined>|Path to the file containing the OAuth token to use when authenticating against the Kubernetes API server. Specify this as a path as opposed to a URI (i.e. do not provide a scheme)|string|1.7.0 kyuubi.kubernetes.context|<undefined>|The desired context from your kubernetes config file used to configure the K8S client for interacting with the cluster.|string|1.6.0 kyuubi.kubernetes.master.address|<undefined>|The internal Kubernetes master (API server) address to be used for kyuubi.|string|1.7.0 @@ -403,20 +403,20 @@ kyuubi.kubernetes.trust.certificates|false|If set to true then client can submit Key | Default | Meaning | Type | Since --- | --- | --- | --- | --- -kyuubi.metadata.cleaner.enabled|true|Whether to clean the metadata periodically. If it is enabled, Kyuubi will clean the metadata that is in terminate state with max age limitation.|boolean|1.6.0 +kyuubi.metadata.cleaner.enabled|true|Whether to clean the metadata periodically. If it is enabled, Kyuubi will clean the metadata that is in the terminate state with max age limitation.|boolean|1.6.0 kyuubi.metadata.cleaner.interval|PT30M|The interval to check and clean expired metadata.|duration|1.6.0 -kyuubi.metadata.max.age|PT72H|The maximum age of metadata, the metadata that exceeds the age will be cleaned.|duration|1.6.0 -kyuubi.metadata.recovery.threads|10|The number of threads for recovery from metadata store when Kyuubi server restarting.|int|1.6.0 +kyuubi.metadata.max.age|PT72H|The maximum age of metadata, the metadata exceeding the age will be cleaned.|duration|1.6.0 +kyuubi.metadata.recovery.threads|10|The number of threads for recovery from the metadata store when the Kyuubi server restarts.|int|1.6.0 kyuubi.metadata.request.retry.interval|PT5S|The interval to check and trigger the metadata request retry tasks.|duration|1.6.0 kyuubi.metadata.request.retry.queue.size|65536|The maximum queue size for buffering metadata requests in memory when the external metadata storage is down. Requests will be dropped if the queue exceeds.|int|1.6.0 -kyuubi.metadata.request.retry.threads|10|Number of threads in the metadata request retry manager thread pool. The metadata store might be unavailable sometimes and the requests will fail, to tolerant for this case and unblock the main thread, we support to retry the failed requests in async way.|int|1.6.0 +kyuubi.metadata.request.retry.threads|10|Number of threads in the metadata request retry manager thread pool. The metadata store might be unavailable sometimes and the requests will fail, tolerant for this case and unblock the main thread, we support retrying the failed requests in an async way.|int|1.6.0 kyuubi.metadata.store.class|org.apache.kyuubi.server.metadata.jdbc.JDBCMetadataStore|Fully qualified class name for server metadata store.|string|1.6.0 -kyuubi.metadata.store.jdbc.database.schema.init|true|Whether to init the jdbc metadata store database schema.|boolean|1.6.0 -kyuubi.metadata.store.jdbc.database.type|DERBY|The database type for server jdbc metadata store.") @@ -1633,7 +1640,7 @@ object KyuubiConf { .fallbackConf(LEGACY_ENGINE_SHARE_LEVEL) val ENGINE_TYPE: ConfigEntry[String] = buildConf("kyuubi.engine.type") - .doc("Specify the detailed engine that supported by the Kyuubi. The engine type bindings to" + + .doc("Specify the detailed engine supported by Kyuubi. The engine type bindings to" + " SESSION scope. This configuration is experimental. Currently, available configs are: ") .version("1.4.0") .stringConf @@ -1662,22 +1669,22 @@ object KyuubiConf { .createWithDefault(false) val ENGINE_POOL_NAME: ConfigEntry[String] = buildConf("kyuubi.engine.pool.name") - .doc("The name of engine pool.") + .doc("The name of the engine pool.") .version("1.5.0") .stringConf .checkValue(validZookeeperSubPath.matcher(_).matches(), "must be valid zookeeper sub path.") .createWithDefault("engine-pool") val ENGINE_POOL_SIZE_THRESHOLD: ConfigEntry[Int] = buildConf("kyuubi.engine.pool.size.threshold") - .doc("This parameter is introduced as a server-side parameter, " + - "and controls the upper limit of the engine pool.") + .doc("This parameter is introduced as a server-side parameter " + + "controlling the upper limit of the engine pool.") .version("1.4.0") .intConf .checkValue(s => s > 0 && s < 33, "Invalid engine pool threshold, it should be in [1, 32]") .createWithDefault(9) val ENGINE_POOL_SIZE: ConfigEntry[Int] = buildConf("kyuubi.engine.pool.size") - .doc("The size of engine pool. Note that, " + + .doc("The size of the engine pool. Note that, " + "if the size is less than 1, the engine pool will not be enabled; " + "otherwise, the size of the engine pool will be " + s"min(this, ${ENGINE_POOL_SIZE_THRESHOLD.key}).") @@ -1760,8 +1767,8 @@ object KyuubiConf { val OPERATION_SCHEDULER_POOL: OptionalConfigEntry[String] = buildConf("kyuubi.operation.scheduler.pool") - .doc("The scheduler pool of job. Note that, this config should be used after change Spark " + - "config spark.scheduler.mode=FAIR.") + .doc("The scheduler pool of job. Note that, this config should be used after changing " + + "Spark config spark.scheduler.mode=FAIR.") .version("1.1.1") .stringConf .createOptional @@ -1778,8 +1785,8 @@ object KyuubiConf { val ENGINE_USER_ISOLATED_SPARK_SESSION: ConfigEntry[Boolean] = buildConf("kyuubi.engine.user.isolated.spark.session") .doc("When set to false, if the engine is running in a group or server share level, " + - "all the JDBC/ODBC connections will be isolated against the user. Including: " + - "the temporary views, function registries, SQL configuration and the current database. " + + "all the JDBC/ODBC connections will be isolated against the user. Including " + + "the temporary views, function registries, SQL configuration, and the current database. " + "Note that, it does not affect if the share level is connection or user.") .version("1.6.0") .booleanConf @@ -1788,21 +1795,21 @@ object KyuubiConf { val ENGINE_USER_ISOLATED_SPARK_SESSION_IDLE_TIMEOUT: ConfigEntry[Long] = buildConf("kyuubi.engine.user.isolated.spark.session.idle.timeout") .doc(s"If ${ENGINE_USER_ISOLATED_SPARK_SESSION.key} is false, we will release the " + - s"spark session if its corresponding user is inactive after this configured timeout.") + s"Spark session if its corresponding user is inactive after this configured timeout.") .version("1.6.0") .timeConf .createWithDefault(Duration.ofHours(6).toMillis) val ENGINE_USER_ISOLATED_SPARK_SESSION_IDLE_INTERVAL: ConfigEntry[Long] = buildConf("kyuubi.engine.user.isolated.spark.session.idle.interval") - .doc(s"The interval to check if the user isolated spark session is timeout.") + .doc(s"The interval to check if the user-isolated Spark session is timeout.") .version("1.6.0") .timeConf .createWithDefault(Duration.ofMinutes(1).toMillis) val SERVER_EVENT_JSON_LOG_PATH: ConfigEntry[String] = buildConf("kyuubi.backend.server.event.json.log.path") - .doc("The location of server events go for the builtin JSON logger") + .doc("The location of server events go for the built-in JSON logger") .version("1.4.0") .serverOnly .stringConf @@ -1810,7 +1817,7 @@ object KyuubiConf { val ENGINE_EVENT_JSON_LOG_PATH: ConfigEntry[String] = buildConf("kyuubi.engine.event.json.log.path") - .doc("The location of all the engine events go for the builtin JSON logger." + " Note that: Kyuubi supports custom event handlers with the Java SPI." + " To register a custom event handler," + - " user need to implement a class" + + " the user needs to implement a class" + " which is a child of org.apache.kyuubi.events.handler.CustomEventHandlerProvider" + - " which has zero-arg constructor.") + " which has a zero-arg constructor.") .version("1.4.0") .serverOnly .stringConf @@ -1843,16 +1850,16 @@ object KyuubiConf { buildConf("kyuubi.engine.event.loggers") .doc("A comma separated list of engine history loggers, where engine/session/operation etc" + " events go." + " Note that: Kyuubi supports custom event handlers with the Java SPI." + " To register a custom event handler," + - " user need to implement a class" + - " which is a child of org.apache.kyuubi.events.handler.CustomEventHandlerProvider" + - " which has zero-arg constructor.") + " the user needs to implement a subclass" + + " of org.apache.kyuubi.events.handler.CustomEventHandlerProvider" + + " which has a zero-arg constructor.") .version("1.3.0") .stringConf .transform(_.toUpperCase(Locale.ROOT)) @@ -1956,8 +1963,8 @@ object KyuubiConf { val SESSION_NAME: OptionalConfigEntry[String] = buildConf("kyuubi.session.name") - .doc("A human readable name of session and we use empty string by default. " + - "This name will be recorded in event. Note that, we only apply this value from " + + .doc("A human readable name of the session and we use empty string by default. " + + "This name will be recorded in the event. Note that, we only apply this value from " + "session conf.") .version("1.4.0") .stringConf @@ -1991,8 +1998,9 @@ object KyuubiConf { val OPERATION_PLAN_ONLY_OUT_STYLE: ConfigEntry[String] = buildConf("kyuubi.operation.plan.only.output.style") - .doc("Configures the planOnly output style, The value can be 'plain' and 'json', default " + - "value is 'plain', this configuration supports only the output styles of the Spark engine") + .doc("Configures the planOnly output style. The value can be 'plain' or 'json', and" + + "the default value is 'plain'. This configuration supports only the output styles " + + "of the Spark engine") .version("1.7.0") .stringConf .transform(_.toUpperCase(Locale.ROOT)) @@ -2006,7 +2014,7 @@ object KyuubiConf { buildConf("kyuubi.operation.plan.only.excludes") .doc("Comma-separated list of query plan names, in the form of simple class names, i.e, " + "for `set abc=xyz`, the value will be `SetCommand`. For those auxiliary plans, such as " + - "`switch databases`, `set properties`, or `create temporary view` e.t.c, " + + "`switch databases`, `set properties`, or `create temporary view` etc., " + "which are used for setup evaluating environments for analyzing actual queries, " + "we can use this config to exclude them and let them take effect. " + s"See also ${OPERATION_PLAN_ONLY_MODE.key}.") @@ -2049,9 +2057,9 @@ object KyuubiConf { val SESSION_CONF_ADVISOR: OptionalConfigEntry[String] = buildConf("kyuubi.session.conf.advisor") .doc("A config advisor plugin for Kyuubi Server. This plugin can provide some custom " + - "configs for different user or session configs and overwrite the session configs before " + - "open a new session. This config value should be a class which is a child of " + - "'org.apache.kyuubi.plugin.SessionConfAdvisor' which has zero-arg constructor.") + "configs for different users or session configs and overwrite the session configs before " + + "open a new session. This config value should be a subclass of " + + "'org.apache.kyuubi.plugin.SessionConfAdvisor' which has a zero-arg constructor.") .version("1.5.0") .stringConf .createOptional @@ -2059,9 +2067,9 @@ object KyuubiConf { val GROUP_PROVIDER: ConfigEntry[String] = buildConf("kyuubi.session.group.provider") .doc("A group provider plugin for Kyuubi Server. This plugin can provide primary group " + - "and groups information for different user or session configs. This config value " + - "should be a class which is a child of 'org.apache.kyuubi.plugin.GroupProvider' which " + - "has zero-arg constructor. Kyuubi provides the following built-in implementations: " + + "and groups information for different users or session configs. This config value " + + "should be a subclass of 'org.apache.kyuubi.plugin.GroupProvider' which " + + "has a zero-arg constructor. Kyuubi provides the following built-in implementations: " + "
  • hadoop: delegate the user group mapping to hadoop UserGroupInformation.
  • ") .version("1.7.0") .stringConf @@ -2091,7 +2099,7 @@ object KyuubiConf { val ENGINE_SPARK_SHOW_PROGRESS: ConfigEntry[Boolean] = buildConf("kyuubi.session.engine.spark.showProgress") - .doc("When true, show the progress bar in the spark engine log.") + .doc("When true, show the progress bar in the Spark's engine log.") .version("1.6.0") .booleanConf .createWithDefault(false) @@ -2113,22 +2121,22 @@ object KyuubiConf { val ENGINE_TRINO_MEMORY: ConfigEntry[String] = buildConf("kyuubi.engine.trino.memory") - .doc("The heap memory for the trino query engine") + .doc("The heap memory for the Trino query engine") .version("1.6.0") .stringConf .createWithDefault("1g") val ENGINE_TRINO_JAVA_OPTIONS: OptionalConfigEntry[String] = buildConf("kyuubi.engine.trino.java.options") - .doc("The extra java options for the trino query engine") + .doc("The extra java options for the Trino query engine") .version("1.6.0") .stringConf .createOptional val ENGINE_TRINO_EXTRA_CLASSPATH: OptionalConfigEntry[String] = buildConf("kyuubi.engine.trino.extra.classpath") - .doc("The extra classpath for the trino query engine, " + - "for configuring other libs which may need by the trino engine ") + .doc("The extra classpath for the Trino query engine, " + + "for configuring other libs which may need by the Trino engine ") .version("1.6.0") .stringConf .createOptional @@ -2150,28 +2158,28 @@ object KyuubiConf { val ENGINE_HIVE_EXTRA_CLASSPATH: OptionalConfigEntry[String] = buildConf("kyuubi.engine.hive.extra.classpath") .doc("The extra classpath for the hive query engine, for configuring location" + - " of hadoop client jars, etc") + " of the hadoop client jars and etc.") .version("1.6.0") .stringConf .createOptional val ENGINE_FLINK_MEMORY: ConfigEntry[String] = buildConf("kyuubi.engine.flink.memory") - .doc("The heap memory for the flink sql engine") + .doc("The heap memory for the Flink SQL engine") .version("1.6.0") .stringConf .createWithDefault("1g") val ENGINE_FLINK_JAVA_OPTIONS: OptionalConfigEntry[String] = buildConf("kyuubi.engine.flink.java.options") - .doc("The extra java options for the flink sql engine") + .doc("The extra java options for the Flink SQL engine") .version("1.6.0") .stringConf .createOptional val ENGINE_FLINK_EXTRA_CLASSPATH: OptionalConfigEntry[String] = buildConf("kyuubi.engine.flink.extra.classpath") - .doc("The extra classpath for the flink sql engine, for configuring location" + + .doc("The extra classpath for the Flink SQL engine, for configuring the location" + " of hadoop client jars, etc") .version("1.6.0") .stringConf @@ -2258,15 +2266,15 @@ object KyuubiConf { val OPERATION_SPARK_LISTENER_ENABLED: ConfigEntry[Boolean] = buildConf("kyuubi.operation.spark.listener.enabled") - .doc("When set to true, Spark engine registers a SQLOperationListener before executing " + - "the statement, logs a few summary statistics when each stage completes.") + .doc("When set to true, Spark engine registers an SQLOperationListener before executing " + + "the statement, logging a few summary statistics when each stage completes.") .version("1.6.0") .booleanConf .createWithDefault(true) val ENGINE_JDBC_DRIVER_CLASS: OptionalConfigEntry[String] = buildConf("kyuubi.engine.jdbc.driver.class") - .doc("The driver class for jdbc engine connection") + .doc("The driver class for JDBC engine connection") .version("1.6.0") .stringConf .createOptional @@ -2302,14 +2310,14 @@ object KyuubiConf { val ENGINE_JDBC_CONNECTION_PROVIDER: OptionalConfigEntry[String] = buildConf("kyuubi.engine.jdbc.connection.provider") - .doc("The connection provider is used for getting a connection from server") + .doc("The connection provider is used for getting a connection from the server") .version("1.6.0") .stringConf .createOptional val ENGINE_JDBC_SHORT_NAME: OptionalConfigEntry[String] = buildConf("kyuubi.engine.jdbc.type") - .doc("The short name of jdbc type") + .doc("The short name of JDBC type") .version("1.6.0") .stringConf .createOptional @@ -2395,22 +2403,22 @@ object KyuubiConf { val ENGINE_JDBC_MEMORY: ConfigEntry[String] = buildConf("kyuubi.engine.jdbc.memory") - .doc("The heap memory for the jdbc query engine") + .doc("The heap memory for the JDBC query engine") .version("1.6.0") .stringConf .createWithDefault("1g") val ENGINE_JDBC_JAVA_OPTIONS: OptionalConfigEntry[String] = buildConf("kyuubi.engine.jdbc.java.options") - .doc("The extra java options for the jdbc query engine") + .doc("The extra java options for the JDBC query engine") .version("1.6.0") .stringConf .createOptional val ENGINE_JDBC_EXTRA_CLASSPATH: OptionalConfigEntry[String] = buildConf("kyuubi.engine.jdbc.extra.classpath") - .doc("The extra classpath for the jdbc query engine, for configuring location" + - " of jdbc driver, etc") + .doc("The extra classpath for the JDBC query engine, for configuring the location" + + " of the JDBC driver and etc.") .version("1.6.0") .stringConf .createOptional @@ -2419,7 +2427,7 @@ object KyuubiConf { buildConf("kyuubi.engine.spark.event.loggers") .doc("A comma separated list of engine loggers, where engine/session/operation etc" + " events go.