代码拉取完成,页面将自动刷新
<configuration xmlns:xi="http://www.w3.org/2001/XInclude">
<property>
<name>hive.execution.engine</name>
<value>tez</value>
</property>
<property>
<name>hive.tez.container.size</name>
<value>3072</value>
</property>
<property>
<name>atlas.hook.hive.maxThreads</name>
<value>1</value>
</property>
<property>
<name>atlas.hook.hive.minThreads</name>
<value>1</value>
</property>
<property>
<name>datanucleus.autoCreateSchema</name>
<value>false</value>
</property>
<property>
<name>datanucleus.cache.level2.type</name>
<value>none</value>
</property>
<property>
<name>datanucleus.fixedDatastore</name>
<value>true</value>
</property>
<property>
<name>dfs.client.mmap.enabled</name>
<value>false</value>
</property>
<property>
<name>dfs.short.circuit.shared.memory.watcher.interrupt.check.ms</name>
<value>0</value>
</property>
<property>
<name>hive.auto.convert.join</name>
<value>true</value>
</property>
<property>
<name>hive.auto.convert.join.noconditionaltask</name>
<value>true</value>
</property>
<property>
<name>hive.auto.convert.sortmerge.join</name>
<value>true</value>
</property>
<property>
<name>hive.auto.convert.sortmerge.join.to.mapjoin</name>
<value>true</value>
</property>
<property>
<name>hive.cbo.enable</name>
<value>true</value>
</property>
<property>
<name>hive.cli.print.header</name>
<value>false</value>
</property>
<property>
<name>hive.compactor.abortedtxn.threshold</name>
<value>1000</value>
</property>
<property>
<name>hive.compactor.check.interval</name>
<value>300L</value>
</property>
<property>
<name>hive.compactor.delta.num.threshold</name>
<value>10</value>
</property>
<property>
<name>hive.compactor.delta.pct.threshold</name>
<value>0.1f</value>
</property>
<property>
<name>hive.compactor.initiator.on</name>
<value>true</value>
</property>
<property>
<name>hive.compactor.worker.threads</name>
<value>1</value>
</property>
<property>
<name>hive.compactor.worker.timeout</name>
<value>86400s</value>
</property>
<property>
<name>hive.compute.query.using.stats</name>
<value>true</value>
</property>
<property>
<name>hive.conf.restricted.list</name>
<value>hive.security.authenticator.manager,hive.security.authorization.manager,hive.users.in.admin.role</value>
</property>
<property>
<name>hive.convert.join.bucket.mapjoin.tez</name>
<value>true</value>
</property>
<property>
<name>hive.default.fileformat</name>
<value>orc</value>
</property>
<property>
<name>hive.default.fileformat.managed</name>
<value>orc</value>
</property>
<property>
<name>hive.driver.parallel.compilation</name>
<value>true</value>
</property>
<property>
<name>hive.enforce.sortmergebucketmapjoin</name>
<value>true</value>
</property>
<property>
<name>hive.exec.compress.intermediate</name>
<value>true</value>
</property>
<property>
<name>hive.exec.compress.output</name>
<value>false</value>
</property>
<property>
<name>hive.exec.dynamic.partition</name>
<value>true</value>
</property>
<property>
<name>hive.exec.dynamic.partition.mode</name>
<value>nonstrict</value>
</property>
<property>
<name>hive.exec.failure.hooks</name>
<value></value>
</property>
<property>
<name>hive.exec.max.created.files</name>
<value>100000</value>
</property>
<property>
<name>hive.exec.max.dynamic.partitions</name>
<value>5000</value>
</property>
<property>
<name>hive.exec.max.dynamic.partitions.pernode</name>
<value>2000</value>
</property>
<property>
<name>hive.exec.orc.compression.strategy</name>
<value>SPEED</value>
</property>
<property>
<name>hive.exec.orc.default.compress</name>
<value>SNAPPY</value>
</property>
<property>
<name>hive.exec.orc.default.stripe.size</name>
<value>67108864</value>
</property>
<property>
<name>hive.exec.orc.encoding.strategy</name>
<value>SPEED</value>
</property>
<property>
<name>hive.exec.orc.split.strategy</name>
<value>HYBRID</value>
</property>
<property>
<name>hive.exec.parallel</name>
<value>false</value>
</property>
<property>
<name>hive.exec.parallel.thread.number</name>
<value>32</value>
</property>
<property>
<name>hive.exec.post.hooks</name>
<value></value>
</property>
<property>
<name>hive.exec.pre.hooks</name>
<value></value>
</property>
<property>
<name>hive.exec.reducers.bytes.per.reducer</name>
<value>67108864</value>
</property>
<property>
<name>hive.exec.reducers.max</name>
<value>1009</value>
</property>
<property>
<name>hive.exec.scratchdir</name>
<value>/tmp/hive4</value>
</property>
<property>
<name>hive.fetch.task.aggr</name>
<value>false</value>
</property>
<property>
<name>hive.fetch.task.conversion</name>
<value>more</value>
</property>
<property>
<name>hive.fetch.task.conversion.threshold</name>
<value>1073741824</value>
</property>
<property>
<name>hive.limit.optimize.enable</name>
<value>true</value>
</property>
<property>
<name>hive.limit.pushdown.memory.usage</name>
<value>0.04</value>
</property>
<property>
<name>hive.llap.auto.allow.uber</name>
<value>false</value>
</property>
<property>
<name>hive.llap.enable.grace.join.in.llap</name>
<value>false</value>
</property>
<property>
<name>hive.llap.io.memory.mode</name>
<value>cache</value>
</property>
<property>
<name>hive.llap.io.use.lrfu</name>
<value>true</value>
</property>
<property>
<name>hive.llap.management.rpc.port</name>
<value>15004</value>
</property>
<property>
<name>hive.llap.object.cache.enabled</name>
<value>true</value>
</property>
<property>
<name>hive.llap.task.scheduler.locality.delay</name>
<value>-1</value>
</property>
<property>
<name>hive.map.aggr</name>
<value>true</value>
</property>
<property>
<name>hive.map.aggr.hash.force.flush.memory.threshold</name>
<value>0.9</value>
</property>
<property>
<name>hive.map.aggr.hash.min.reduction</name>
<value>0.99</value>
</property>
<property>
<name>hive.map.aggr.hash.percentmemory</name>
<value>0.5</value>
</property>
<property>
<name>hive.mapjoin.bucket.cache.size</name>
<value>10000</value>
</property>
<property>
<name>hive.mapjoin.hybridgrace.hashtable</name>
<value>false</value>
</property>
<property>
<name>hive.mapjoin.optimized.hashtable</name>
<value>true</value>
</property>
<property>
<name>hive.merge.mapfiles</name>
<value>true</value>
</property>
<property>
<name>hive.merge.mapredfiles</name>
<value>false</value>
</property>
<property>
<name>hive.merge.nway.joins</name>
<value>true</value>
<description>
Set it to false if necessary. Cf. HIVE-21189
</description>
</property>
<property>
<name>hive.merge.orcfile.stripe.level</name>
<value>true</value>
</property>
<property>
<name>hive.merge.rcfile.block.level</name>
<value>true</value>
</property>
<property>
<name>hive.merge.size.per.task</name>
<value>256000000</value>
</property>
<property>
<name>hive.merge.smallfiles.avgsize</name>
<value>16000000</value>
</property>
<property>
<name>hive.merge.tezfiles</name>
<value>true</value>
</property>
<property>
<name>hive.metastore.authorization.storage.checks</name>
<value>false</value>
</property>
<property>
<name>hive.metastore.cache.pinobjtypes</name>
<value>Table,Database,Type,FieldSchema,Order</value>
</property>
<property>
<name>hive.metastore.client.connect.retry.delay</name>
<value>5s</value>
</property>
<property>
<name>hive.metastore.client.socket.timeout</name>
<value>1800s</value>
</property>
<property>
<name>hive.metastore.connect.retries</name>
<value>24</value>
</property>
<property>
<name>hive.metastore.event.listeners</name>
<value>org.apache.hive.hcatalog.listener.DbNotificationListener</value>
</property>
<property>
<name>hive.metastore.execute.setugi</name>
<value>true</value>
</property>
<property>
<name>hive.metastore.failure.retries</name>
<value>24</value>
</property>
<property>
<name>hive.metastore.pre.event.listeners</name>
<value>org.apache.hadoop.hive.ql.security.authorization.AuthorizationPreEventListener</value>
</property>
<property>
<name>hive.metastore.server.max.threads</name>
<value>100000</value>
</property>
<property>
<name>metastore.stats.fetch.bitvector</name>
<value>true</value>
</property>
<property>
<name>hive.optimize.bucketmapjoin</name>
<value>true</value>
</property>
<property>
<name>hive.optimize.bucketmapjoin.sortedmerge</name>
<value>true</value>
</property>
<property>
<name>hive.optimize.constant.propagation</name>
<value>true</value>
</property>
<property>
<name>hive.optimize.index.filter</name>
<value>true</value>
</property>
<property>
<name>hive.optimize.metadataonly</name>
<value>true</value>
</property>
<property>
<name>hive.optimize.null.scan</name>
<value>true</value>
</property>
<property>
<name>hive.optimize.reducededuplication</name>
<value>true</value>
</property>
<property>
<name>hive.optimize.reducededuplication.min.reducer</name>
<value>4</value>
</property>
<property>
<name>hive.orc.compute.splits.num.threads</name>
<value>20</value>
</property>
<property>
<name>hive.orc.splits.include.file.footer</name>
<value>false</value>
</property>
<property>
<name>hive.security.metastore.authenticator.manager</name>
<value>org.apache.hadoop.hive.ql.security.HadoopDefaultMetastoreAuthenticator</value>
</property>
<property>
<name>hive.security.metastore.authorization.auth.reads</name>
<value>true</value>
</property>
<property>
<name>hive.security.metastore.authorization.manager</name>
<value>org.apache.hadoop.hive.ql.security.authorization.StorageBasedAuthorizationProvider</value>
</property>
<property>
<name>hive.server2.allow.user.substitution</name>
<value>true</value>
</property>
<property>
<name>hive.server2.authentication.spnego.keytab</name>
<value></value>
</property>
<property>
<name>hive.server2.authentication.spnego.principal</name>
<value></value>
</property>
<property>
<name>hive.server2.logging.operation.log.location</name>
<value>/tmp/hive4/operation_logs</value>
</property>
<property>
<name>hive.server2.max.start.attempts</name>
<value>5</value>
</property>
<property>
<name>hive.server2.table.type.mapping</name>
<value>CLASSIC</value>
</property>
<property>
<name>hive.server2.thrift.http.path</name>
<value>cliservice</value>
</property>
<property>
<name>hive.server2.thrift.max.worker.threads</name>
<value>500</value>
</property>
<property>
<name>hive.server2.transport.mode</name>
<value>binary</value>
</property>
<property>
<name>hive.server2.use.SSL</name>
<value>false</value>
</property>
<property>
<name>hive.server2.webui.port</name>
<value>10502</value>
</property>
<property>
<name>hive.server2.webui.use.ssl</name>
<value>false</value>
</property>
<property>
<name>hive.smbjoin.cache.rows</name>
<value>10000</value>
</property>
<property>
<name>hive.start.cleanup.scratchdir</name>
<value>false</value>
</property>
<property>
<name>hive.stats.autogather</name>
<value>true</value>
</property>
<property>
<name>hive.stats.dbclass</name>
<value>fs</value>
</property>
<property>
<name>hive.stats.fetch.column.stats</name>
<value>true</value>
</property>
<property>
<name>hive.support.concurrency</name>
<value>true</value>
</property>
<property>
<name>hive.tez.auto.reducer.parallelism</name>
<value>true</value>
</property>
<property>
<name>hive.tez.bucket.pruning</name>
<value>true</value>
</property>
<property>
<name>hive.tez.cartesian-product.enabled</name>
<value>false</value>
</property>
<property>
<name>hive.tez.dynamic.partition.pruning</name>
<value>true</value>
</property>
<property>
<name>hive.tez.dynamic.partition.pruning.max.data.size</name>
<value>104857600</value>
</property>
<property>
<name>hive.tez.dynamic.partition.pruning.max.event.size</name>
<value>1048576</value>
</property>
<property>
<name>hive.tez.input.format</name>
<value>org.apache.hadoop.hive.ql.io.HiveInputFormat</value>
</property>
<property>
<name>hive.tez.input.generate.consistent.splits</name>
<value>true</value>
</property>
<property>
<name>hive.tez.max.partition.factor</name>
<value>2.0</value>
</property>
<property>
<name>hive.tez.min.partition.factor</name>
<value>0.25</value>
</property>
<property>
<name>hive.tez.smb.number.waves</name>
<value>0.5</value>
</property>
<property>
<name>hive.txn.manager</name>
<value>org.apache.hadoop.hive.ql.lockmgr.DbTxnManager</value>
</property>
<property>
<name>hive.txn.max.open.batch</name>
<value>1000</value>
</property>
<property>
<name>hive.txn.timeout</name>
<value>300</value>
</property>
<property>
<name>hive.user.install.directory</name>
<value>/user/</value>
</property>
<property>
<name>hive.vectorized.execution.enabled</name>
<value>true</value>
</property>
<property>
<name>hive.vectorized.execution.mapjoin.minmax.enabled</name>
<value>true</value>
</property>
<property>
<name>hive.vectorized.execution.mapjoin.native.enabled</name>
<value>true</value>
</property>
<property>
<name>hive.vectorized.execution.mapjoin.native.fast.hashtable.enabled</name>
<value>true</value>
</property>
<property>
<name>hive.vectorized.execution.reduce.enabled</name>
<value>true</value>
</property>
<property>
<name>hive.vectorized.groupby.checkinterval</name>
<value>4096</value>
</property>
<property>
<name>hive.vectorized.groupby.flush.percent</name>
<value>0.1</value>
</property>
<property>
<name>hive.vectorized.groupby.maxentries</name>
<value>1000000</value>
</property>
<property>
<name>hive.vectorized.adaptor.usage.mode</name>
<value>all</value>
<description>
Set to chosen for stability or to avoid vectorizing UDFs that do not have native vectorized versions available. Cf. HIVE-21935
</description>
</property>
<!--metastore-->
<property>
<name>hive.metastore.db.type</name>
<value>MYSQL</value>
</property>
<property>
<name>javax.jdo.option.ConnectionDriverName</name>
<value>com.mysql.jdbc.Driver</value>
</property>
<property>
<name>javax.jdo.option.ConnectionURL</name>
<value>jdbc:mysql://xxxxx/hive4</value>
</property>
<property>
<name>javax.jdo.option.ConnectionUserName</name>
<value>hive</value>
</property>
<property>
<name>javax.jdo.option.ConnectionPassword</name>
<value>xxxx</value>
</property>
<!--
<property>
<name>metastore.create.as.acid</name>
<value>false</value>
</property>
-->
<!-- Correctness -->
<property>
<name>hive.optimize.shared.work</name>
<value>true</value>
</property>
<property>
<name>hive.optimize.shared.work.extended</name>
<value>true</value>
</property>
<!-- set to false by default in HIVE-24812 -->
<property>
<name>hive.optimize.shared.work.semijoin</name>
<value>false</value>
</property>
<property>
<name>hive.optimize.shared.work.dppunion</name>
<value>false</value>
</property>
<property>
<name>hive.optimize.shared.work.dppunion.merge.eventops</name>
<value>false</value>
</property>
<property>
<name>hive.optimize.shared.work.downstream.merge</name>
<value>false</value>
</property>
<property>
<name>hive.optimize.shared.work.parallel.edge.support</name>
<value>false</value>
</property>
<property>
<name>hive.optimize.shared.work.merge.ts.schema</name>
<value>false</value>
</property>
<property>
<name>hive.optimize.cte.materialize.threshold</name>
<value>-1</value>
</property>
<property>
<name>hive.tez.bloom.filter.merge.threads</name>
<value>1</value>
</property>
<property>
<name>hive.auto.convert.anti.join</name>
<value>true</value>
</property>
<!-- Iceberg -->
<property>
<name>iceberg.catalog</name>
<value>iceberg</value>
</property>
<property>
<name>iceberg.catalog.iceberg.type</name>
<value>hive</value>
</property>
<property>
<name>iceberg.catalog.iceberg.clients</name>
<value>10</value>
</property>
<property>
<name>iceberg.catalog.iceberg.uri</name>
<value>thrift://xxx:9083</value>
</property>
<property>
<name>iceberg.catalog.iceberg.warehouse</name>
<value>/hive4/warehouse/tablespace/managed/hive</value>
</property>
<property>
<name>write.format.default</name>
<value>orc</value>
</property>
<property>
<name>iceberg.mr.split.size</name>
<value>16777216</value>
</property>
<!--LDAP-->
<property>
<name>hive.server2.authentication</name>
<value>LDAP</value>
</property>
<property>
<name>hive.server2.authentication.ldap.baseDN</name>
<value>ou=people,dc=hadoop,dc=apache,dc=org</value>
</property>
<property>
<name>hive.server2.authentication.ldap.url</name>
<value>ldap://xxx:33389</value>
</property>
<!--need confirm-->
<property>
<name>hive.cluster.delegation.token.store.zookeeper.znode</name>
<value>/hive4/cluster/delegation</value>
</property>
<property>
<name>hive.exec.scratchdir</name>
<value>/tmp/hive4</value>
</property>
<property>
<name>hive.hook.proto.base-directory</name>
<value>/hive4/warehouse/tablespace/external/hive/sys.db/query_data/</value>
</property>
<!-- Security -->
<property>
<name>hive.server2.enable.doAs</name>
<value>true</value>
</property>
<property>
<name>hive.security.authorization.enabled</name>
<value>false</value>
</property>
<property>
<name>hive.security.authenticator.manager</name>
<value>org.apache.hadoop.hive.ql.security.ProxyUserAuthenticator</value>
</property>
<property>
<name>hive.security.authorization.manager</name>
<value>org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdConfOnlyAuthorizerFactory</value>
</property>
<!--
<property>
<name>hive.security.authorization.sqlstd.confwhitelist.append</name>
<value>fs\..*|hive\..*|tez\..*|mr3\..*</value>
</property>
-->
<!-- Apache Ranger -->
<!-- set hive.server2.enable.doAs to true -->
<!-- set hive.security.authorization.enabled to true -->
<!--
<property>
<name>hive.conf.restricted.list</name>
<value>hive.security.authorization.enabled,hive.security.authorization.manager,hive.security.authenticator.manager</value>
</property>
<property>
<name>hive.security.authenticator.manager</name>
<value>org.apache.hadoop.hive.ql.security.SessionStateUserAuthenticator</value>
</property>
<property>
<name>hive.security.authorization.manager</name>
<value>org.apache.ranger.authorization.hive.authorizer.RangerHiveAuthorizerFactory</value>
</property>
-->
<!--
<property>
<name>hive.security.authorization.enabled</name>
<value>true</value>
</property>
<property>
<name>hive.security.authorization.manager</name>
<value>org.apache.ranger.authorization.hive.authorizer.RangerHiveAuthorizerFactory</value>
</property>
<property>
<name>hive.security.authenticator.manager</name>
<value>org.apache.hadoop.hive.ql.security.SessionStateUserAuthenticator</value>
</property>
-->
<property>
<name>hive.conf.restricted.list</name>
<value>hive.security.authorization.enabled,hive.security.authorization.manager,hive.security.authenticator.manager</value>
</property>
<property>
<name>hive.security.authorization.sqlstd.confwhitelist</name>
<value>.*</value>
</property>
<property>
<name>hive.security.authorization.sqlstd.confwhitelist.append</name>
<value>.*</value>
</property>
<!-- Hive (configurable) -->
<!--<property>
<name>hive.auto.convert.join.noconditionaltask.size</name>
<value>4000000000</value>
<value>572662306</value>
</property>-->
<property>
<name>hive.optimize.dynamic.partition.hashjoin</name>
<value>true</value>
</property>
<property>
<name>hive.async.log.enabled</name>
<value>true</value>
</property>
<property>
<name>metastore.aggregate.stats.cache.enabled</name>
<value>true</value>
</property>
<property>
<name>hive.metastore.aggregate.stats.cache.enabled</name>
<value>true</value>
</property>
<property>
<name>hive.query.reexecution.stats.persist.scope</name>
<value>query</value>
</property>
<property>
<name>hive.query.results.cache.enabled</name>
<value>true</value>
</property>
<property>
<name>hive.server2.idle.operation.timeout</name>
<value>4h</value>
</property>
<property>
<name>hive.server2.idle.session.timeout</name>
<value>4h</value>
</property>
<!-- HiveServer2 -->
<property>
<name>hive.users.in.admin.role</name>
<value>hive</value>
</property>
<!--
<property>
<name>hive.server2.authentication</name>
<value>${hive.server2.authentication.mode}</value>
</property>
<property>
<name>hive.server2.authentication.kerberos.keytab</name>
<value>${hive.server2.keytab.file}</value>
</property>
<property>
<name>hive.server2.authentication.kerberos.principal</name>
<value>${hive.server2.principal}</value>
</property>
-->
<property>
<name>hive.server2.logging.operation.enabled</name>
<value>true</value>
</property>
<property>
<name>hive.server2.thrift.http.port</name>
<value>10001</value>
</property>
<property>
<name>hive.server2.thrift.bind.host</name>
<value>xxx</value>
</property>
<property>
<name>hive.server2.thrift.port</name>
<value>10000</value>
</property>
<property>
<name>hive.server2.thrift.sasl.qop</name>
<!--value>auth</value-->
<value>auth-conf</value>
</property>
<property>
<name>hive.cluster.delegation.token.renew-interval</name>
<value>1</value>
<description>
The unit is days, not milli-seconds.
hive-site.xml is the best place for setting hive.cluster.delegation.token.renew-interval to a non-default value.
If not set here, it is automatically set to the default value of 1 day when any of the following is added: HdfsConfiguration, YarnConfiguration, TezConfiguration.
</description>
</property>
<!--basic-->
<property>
<name>hive.metastore.runworker.in</name>
<value>metastore</value>
</property>
<property>
<name>hive.metastore.uris</name>
<value>thrift://xxx:9083</value>
</property>
<property>
<name>hive.metastore.warehouse.dir</name>
<value>/hive4/warehouse/tablespace/managed/hive</value>
</property>
<property>
<name>hive.metastore.warehouse.external.dir</name>
<value>/hive4/warehouse/tablespace/external/hive</value>
</property>
<property>
<name>hive.zookeeper.killquery.enable</name>
<value>false</value>
</property>
<property>
<name>hive.llap.io.proactive.eviction.enabled</name>
<value>false</value>
</property>
</configuration>
此处可能存在不合适展示的内容,页面不予展示。您可通过相关编辑功能自查并修改。
如您确认内容无涉及 不当用语 / 纯广告导流 / 暴力 / 低俗色情 / 侵权 / 盗版 / 虚假 / 无价值内容或违法国家有关法律法规的内容,可点击提交进行申诉,我们将尽快为您处理。