zk安装

经过这几天的学习,对大数据有了一定的了解。这次在我自己的本机上安装了zookeeper、HDFS、HBase集群。并把安装时的过程记录了下来。

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
#1.创建用户
groupadd ngboss
useradd -d /home/zk -g ngboss zk
useradd -d /home/hdfs -g ngboss hdfs
useradd -d /home/hbase -g ngboss hbase
#2.root下添加hosts信息
vi /etc/hosts
192.168.71.132 centos7-node01
192.168.71.131 centos7-node02
192.168.71.133 centos7-node03
#3.拓展
IPv4的IP是32bit的,/xx 表示从右往左多少位是掩码(不变的),余下的是可变的
192.168.0.0/16 表示 192.168.0.0 -> 192.168.255.255
192.168.0.0/16 表示 192.168.0.0 -> 192.168.255.255
192.168.0.0/24 表示 192.168.0.0 -> 192.168.0.255
#4.jdk配置
vi ~/.bash_profile
# .bash_profile
# Get the aliases and functions
if [ -f ~/.bashrc ]; then
. ~/.bashrc
fi
# +-------------------------------------+
# | J2EE'S PROFILE, DON'T MODIFY! |
# +-------------------------------------+
alias grep='grep --colour=auto'
alias vi='vim'
alias ll='ls -l'
alias ls='ls --color=auto'
alias mv='mv -i'
alias rm='rm -i'
export PS1="\[\033[01;32m\]\u@\h\[\033[01;34m\] \w \$\[\033[00m\] "
export TERM=linux
export EDITOR=vim
export PATH=${HOME}/bin:${HOME}/support/jdk/bin:${HOME}/support/ant/bin:${HOME}/support/python/bin:$PATH
export LANG=zh_CN.utf8
export PYTHONUNBUFFERED=1
export TIMOUT=3000
export HISTSIZE=1000
#5.验证jdk安装是否成功
. ~/.bash_profile
which java
#6.zoo.cfg文件配置
vi ~/etc/zoo.cfg
server.1=192.168.71.131:28880:38880
server.2=192.168.71.132:28880:38880
server.3=192.168.71.133:28880:38880
#7.acl.conf文件配置
vi ~/etc/acl.conf
192.18.0.0/16
#8.myid文件配置
vi ~/data/myid
#9.zk启动
zk@hadoop-node02 ~/bin $ ./zkServer.sh start
#10.端口验证
netstat -tnlp | grep 2181
#11.结构
zk@centos7-node01 ~ $ ls
bin data etc lib logs sbin support

hdfs安装

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
#1.解压hdfs和jdk
...
#2.生成密匙
ssh-keygen -t rsa
#3.拷贝公匙到其他主机
ssh-copy-id -i hdfs@192.168.71.131
#4.给用户创建密码
echo '1q1w1e1r' | passwd --stdin hdfs
#5.修改slaves文件
修改:vi /home/hdfs/etc/hadoop/slaves
centos7-node02
centos7-node03
#6.修改core-site.xml
修改:/home/hdfs/etc/hadoop/core-site.xml
hdfs:// 这一行改成第一台主机的IP,端口啥的都不变
#7.修改hdfs-site.xml
vi /home/hdfs/etc/hadoop/hdfs-site.xml
<property>
<name>dfs.name.dir</name>
<value>/home/hdfs/data/name</value>
</property>
<property>
<name>dfs.data.dir</name>
<value>/home/hdfs/data/data</value>
....
#8.初始化hdfs:
cd /home/hdfs/bin
./hadoop namenode -format
#9.启动hdfs
cd ~/sbin
./start-dfs.sh
#10.hdfs目录
hdfs@centos7-node03 ~ $ ls
bin data etc include lib libexec logs sbin share support temp

hbase安装

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
#1.解压hbase和jdk
#2.创建公匙
拷贝公匙到其他主机
#3.创建logs目录
只要logs目录,没有data目录,hbase的数据存到hdfs上去了
#4.在hdfs第一台服务器上执行以下命令
hdfs@centos7-node01 ~ $ cd /home/hdfs/bin
hdfs@centos7-node01 ~/bin $ ./hadoop fs -mkdir /hbase
hdfs@centos7-node01 ~/bin $ ./hadoop fs -chmod 777 /hbase
hdfs@centos7-node01 ~/bin $ ./hadoop fs -ls /
Found 1 items
drwxrwxrwx - hdfs supergroup 0 2018-02-01 00:14 /hbase
#5.配置 hbase,vi /home/hbase/conf/hbase-site.xml
<value>centos7-node01,centos7-node02,centos7-node03</value>
#6.配置regionservers文件
hbase@centos7-node01 ~/bin $ vi ~/conf/regionservers
centos7-node02
centos7-node03
find . -type f | xargs grep 'bst'
#7.启动hbase
cd bin
./start-hbase.sh