# 安装Hive
1.上传到/home/Hadoop/software, 解压安装Hive:
[hadoop@master software]tar -xzvf apache-hive-2.3.3-bin.tar.gz ##解压
[hadoop@master software]ln-s apache-hive-2.3.3-bin hive ##创建软链接
2.设置 Hive环境变量。
编辑 .bash_profile 文件, 在其中添加以下内容:
[hadoop@master ~]$vi .bash_profile
~~~
export HIVE_HOME=/home/hadoop/software/hive
export PATH=$PATH:$HIVE_HOME/bin
~~~
使环境变量生效:
[hadoop@master ~]$. .bash_profile
配置Hive:
配置文件重命名:
[hadoop@master ~]$cd /home/hadoop/software/hive/conf
~~~
cp hive-env.sh.template hive-env.sh
cp hive-log4j2.properties.template hive-log4j2.properties
cp hive-exec-log4j2.properties.template hive-exec-log4j2.properties
~~~
修改配置文件:
`vi hive-env.sh`
底部添加
~~~
export JAVA_HOME=/usr/java/jdk1.8.0_131
export HADOOP_HOME=/home/hadoop/software/hadoop-2.7.3
export HIVE_HOME=/home/hadoop/software/hive
export HIVE_CONF_DIR=/home/hadoop/software/hive/conf
~~~
添加 hive-site.xml文件
vi hive-site.xml
~~~
<?xml version="1.0"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<configuration>
<property>
<name>hive.exec.scratchdir</name>
<value>/home/hadoop/software/hive/iotmpdir</value>
</property>
<property>
<name>hive.exec.local.scratchdir</name>
<value>/home/hadoop/software/hive/iotmpdir</value>
</property>
<property>
<name>hive.downloaded.resources.dir</name>
<value>/home/hadoop/software/hive/iotmpdir</value>
<description>Temporary local directory for added resources in the remote file system.</description>
</property>
<property>
<name>hive.querylog.location</name>
<value>/home/hadoop/software/hive/iotmpdir</value>
<description>Location of Hive run time structured log file</description>
</property>
<property>
<name>hive.server2.logging.operation.log.location</name>
<value>/home/hadoop/software/hive/iotmpdir/operation_logs</value>
<description>Top level directory where operation logs are stored if logging functionality is enabled</description>
</property>
<property>
<name>javax.jdo.option.ConnectionURL</name>
<value>jdbc:mysql://192.168.6.252:3306/hive?=createDatabaseIfNotExsit=true</value>
<description>JDBC connect string for a JDBC metastore</description>
</property>
<property>
<name>javax.jdo.option.ConnectionDriverName</name>
<value>com.mysql.jdbc.Driver</value>
<description>Driver class name for a JDBC metastore</description>
</property>
<property>
<name>javax.jdo.option.ConnectionUserName</name>
<value>hive</value>
<description>username to use against metastore database</description>
</property>
<property>
<name>javax.jdo.option.ConnectionPassword</name>
<value>123456</value>
<description>password to use against metastore database</description>
</property>
<property>
<name>hive.metastore.warehouse.dir</name>
<!-- base hdfs path -->
<value>/user/hive/warehouse</value>
<description>location of default database for the warehouse</description>
</property>
<property>
<name>hive.metastore.uris</name>
<value>thrift://slave1:9083</value>
</property>
</configuration>
~~~
拷贝驱动:
上传mysql驱动到/home/hadoop/software/hive/lib/
[hadoop@master ~]$cp /mnt/hgfs/共享文件夹名称/mysql-connector-java-5.1.46 /home/hadoop/software/hive/lib
复制hive系统
[hadoop@master software]$ scp -r apache-hive-2.3.3-bin slave1:~/software/
[hadoop@master software]$ scp -r apache-hive-2.3.3-bin slave2:~/software/
[hadoop@master ~]$scp .bash_profile slave1:~/
[hadoop@master ~]$scp .bash_profile slave2:~/
分别在slave1,slave2上建hive的软链接
[hadoop@slave2 software]$ln -s apache-hive-2.3.3-bin hive
[hadoop@slave1 software]$ln -s apache-hive-2.3.3-bin hive
Mysql 安装(slave2里)
~~~
yum install epel-release -y
wget http://dev.mysql.com/get/mysql57-community-release-el7-8.noarch.rpm
rpm mysql57-community-release-el7-8.noarch.rpm
yum -y install mysql-community-server
systemctl daemon-reload
systemctl start mysqld
获取随机密码:grep password /var/log/mysqld.log
修改密码强度等级:set global validate_password_policy=0;
修改密码长度:set global validate_password_length=4;
修改本地密码:alter user 'root'@'localhost' identified by '123456';
~~~
[root@slave2 ~]#yum install mysql-server -y
[root@slave2 mysql]# service mysqld start
通过命令mysql –u root 进入数据库
~~~
create database hive;
create user hive identified by '123456';
grant all PRIVILEGES on *.* to hive@'%' identified by '123456';
flush privileges;
~~~
![](https://box.kancloud.cn/e1527df2502fe79f3274029b4361ab14_782x426.jpg)
#### 在slave1:初始化数据库,生成元数据
cd /home/hadoop/software/hive/bin/
[hadoop@slave1 bin]$./schematool -initSchema -dbType mysql
### 启动hive(保证Hadoop集群开启)
在hdfs上创建hive.metastore.warehouse.dir目录,并修改权限
[hadoop@master ~] hadoop fs -mkdir -p /user/hive/warehouse
[hadoop@master ~] hadoop fs -chown -R hive:hive /user/hive
#### Slave1 启动命令
hive --service metastore
[hadoop@slave1 ~]$ hive --service metastore
![](https://box.kancloud.cn/fdece6e57b10864b48bb5df419e34589_678x193.png)
需要另建一个slave1的连接 运行以下命令
查看是否成功启动,这里查如果没有结果,需要等待一会端口开启,之后再netstat
[hadoop@slave1 ~]$ netstat -nlpt | grep 9083
![](https://box.kancloud.cn/03e9a09d9c6e279a53994ad879d1a670_704x84.png)
有端口通信,说明成功
#### 在Master中启动
[hadoop@master ~]$ hive --service hiveserver2
![](https://box.kancloud.cn/2df7636c39a6f88216e1adb2221c4d6e_680x183.png)
需要再xsheel另建一个master的连接 运行以下命令
[hadoop@master ~]$ netstat -nlpt | grep 10000
![](https://box.kancloud.cn/c393c9f333a76b41fe66e20d8707395a_743x96.png)
查看是否成功启动有端口通信,说明成功
客户端(slave2)直接使用 hive命令即可