日本不卡不码高清免费观看,久久国产精品久久w女人spa,黄色aa久久,三上悠亚国产精品一区二区三区

您的位置:首頁技術文章
文章詳情頁

Docker下安裝zookeeper(單機和集群)

瀏覽:58日期:2024-11-19 17:02:35

啟動Docker后,先看一下我們有哪些選擇。

Docker下安裝zookeeper(單機和集群)

有官方的當然選擇官方啦~

下載:

[root@localhost admin]# docker pull zookeeperUsing default tag: latestTrying to pull repository docker.io/library/zookeeper ...latest: Pulling from docker.io/library/zookeeper1ab2bdfe9778: Already exists7aaf9a088d61: Pull complete80a55c9c9fe8: Pull completea0086b0e6eec: Pull complete4165e7457cad: Pull completebcba13bcf3a1: Pull complete41c03a109e47: Pull complete4d5281c6b0d4: Pull completeDigest: sha256:175d6bb1471e1e37a48bfa41a9da047c80fade60fd585eae3a0e08a4ce1d39edStatus: Downloaded newer image for docker.io/zookeeper:latest

查看鏡像詳情

[root@localhost admin]# docker imagesREPOSITORY TAG IMAGE ID CREATED SIZE192.168.192.128:443/hello-2 latest 0c24558dd388 42 hours ago 660 MB192.168.192.128:443/hello latest a3ba3d430bed 42 hours ago 660 MBdocker.io/nginxlatest 5a3221f0137b 13 days ago 126 MBdocker.io/zookeeper latest 3487af26dee9 13 days ago 225 MBdocker.io/registry latest f32a97de94e1 5 months ago 25.8 MBdocker.io/mongolatest 8bf72137439e 12 months ago 380 MBdocker.io/influxdb latest 34de2bdc2d7f 12 months ago 213 MBdocker.io/centos latest 5182e96772bf 12 months ago 200 MBdocker.io/grafana/grafana latest 3e16e05be9a3 13 months ago 245 MBdocker.io/hello-world latest 2cb0d9787c4d 13 months ago 1.85 kBdocker.io/javalatest d23bdf5b1b1b 2 years ago 643 MB[root@localhost admin]# docker inspect 3487af26dee9[ { 'Id': 'sha256:3487af26dee9ef9eacee9a97521bc4f0243bef0b285247258c32f4a03cab92c5', 'RepoTags': [ 'docker.io/zookeeper:latest' ], 'RepoDigests': [ 'docker.io/zookeeper@sha256:175d6bb1471e1e37a48bfa41a9da047c80fade60fd585eae3a0e08a4ce1d39ed' ], 'Parent': '', 'Comment': '', 'Created': '2019-08-15T06:10:50.178554969Z', 'Container': '9a38467115f1952161d6075135d5c5287967282b834cfe68183339c810f9652b', 'ContainerConfig': { 'Hostname': '9a38467115f1', 'Domainname': '', 'User': '', 'AttachStdin': false, 'AttachStdout': false, 'AttachStderr': false, 'ExposedPorts': {'2181/tcp': {},'2888/tcp': {},'3888/tcp': {},'8080/tcp': {} }, 'Tty': false, 'OpenStdin': false, 'StdinOnce': false, 'Env': ['PATH=/usr/local/openjdk-8/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/apache-zookeeper-3.5.5-bin/bin','LANG=C.UTF-8','JAVA_HOME=/usr/local/openjdk-8','JAVA_VERSION=8u222','JAVA_BASE_URL=https://github.com/AdoptOpenJDK/openjdk8-upstream-binaries/releases/download/jdk8u222-b10/OpenJDK8U-jre_','JAVA_URL_VERSION=8u222b10','ZOO_CONF_DIR=/conf','ZOO_DATA_DIR=/data','ZOO_DATA_LOG_DIR=/datalog','ZOO_LOG_DIR=/logs','ZOO_TICK_TIME=2000','ZOO_INIT_LIMIT=5','ZOO_SYNC_LIMIT=2','ZOO_AUTOPURGE_PURGEINTERVAL=0','ZOO_AUTOPURGE_SNAPRETAINCOUNT=3','ZOO_MAX_CLIENT_CNXNS=60','ZOO_STANDALONE_ENABLED=true','ZOO_ADMINSERVER_ENABLED=true','ZOOCFGDIR=/conf' ], 'Cmd': ['/bin/sh','-c','#(nop) ','CMD ['zkServer.sh' 'start-foreground']' ], 'ArgsEscaped': true, 'Image': 'sha256:20bf3cc1bd5b5766b79da5265e94007d0802ce241df1636d0f63e211a79a0e3e', 'Volumes': {'/data': {},'/datalog': {},'/logs': {} }, 'WorkingDir': '/apache-zookeeper-3.5.5-bin', 'Entrypoint': ['/docker-entrypoint.sh' ], 'OnBuild': null, 'Labels': {} }, 'DockerVersion': '18.06.1-ce', 'Author': '', 'Config': { 'Hostname': '', 'Domainname': '', 'User': '', 'AttachStdin': false, 'AttachStdout': false, 'AttachStderr': false, 'ExposedPorts': {'2181/tcp': {},'2888/tcp': {},'3888/tcp': {},'8080/tcp': {} }, 'Tty': false, 'OpenStdin': false, 'StdinOnce': false, 'Env': ['PATH=/usr/local/openjdk-8/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/apache-zookeeper-3.5.5-bin/bin','LANG=C.UTF-8','JAVA_HOME=/usr/local/openjdk-8','JAVA_VERSION=8u222','JAVA_BASE_URL=https://github.com/AdoptOpenJDK/openjdk8-upstream-binaries/releases/download/jdk8u222-b10/OpenJDK8U-jre_','JAVA_URL_VERSION=8u222b10','ZOO_CONF_DIR=/conf','ZOO_DATA_DIR=/data','ZOO_DATA_LOG_DIR=/datalog','ZOO_LOG_DIR=/logs','ZOO_TICK_TIME=2000','ZOO_INIT_LIMIT=5','ZOO_SYNC_LIMIT=2','ZOO_AUTOPURGE_PURGEINTERVAL=0','ZOO_AUTOPURGE_SNAPRETAINCOUNT=3','ZOO_MAX_CLIENT_CNXNS=60','ZOO_STANDALONE_ENABLED=true','ZOO_ADMINSERVER_ENABLED=true','ZOOCFGDIR=/conf' ], 'Cmd': ['zkServer.sh','start-foreground' ], 'ArgsEscaped': true, 'Image': 'sha256:20bf3cc1bd5b5766b79da5265e94007d0802ce241df1636d0f63e211a79a0e3e', 'Volumes': {'/data': {},'/datalog': {},'/logs': {} }, 'WorkingDir': '/apache-zookeeper-3.5.5-bin', 'Entrypoint': ['/docker-entrypoint.sh' ], 'OnBuild': null, 'Labels': null }, 'Architecture': 'amd64', 'Os': 'linux', 'Size': 225126346, 'VirtualSize': 225126346, 'GraphDriver': { 'Name': 'overlay2', 'Data': {'LowerDir': '/var/lib/docker/overlay2/92185ebf7638a7b34180cfb87795dd758405cbad4fd0139b92a227d1a4b61847/diff:/var/lib/docker/overlay2/8787e91f5c03a7c03cee072019eca49a0402a0a0902be39ed0b5d651a79cce35/diff:/var/lib/docker/overlay2/ce5864ddfa4d1478047aa9fcaa03744e8a4078ebe43b41e7836c96c54c724044/diff:/var/lib/docker/overlay2/fc99437bcfbabb9e8234c06c90d1c60e58c34ac053aff1adc368b7ad3a50c158/diff:/var/lib/docker/overlay2/1779297a8980830229bd4bf58bd741730956d6797332fd07b863a1b48dcb6fa2/diff:/var/lib/docker/overlay2/ee735aa3608d890ac4751dd93581a67cb54a5dd4714081e9d09d0ebd9dbc3501/diff:/var/lib/docker/overlay2/cf6b3cbc42f3c8d1fb09b29db0dafbb4dceb120925970ab8a3871eaa8562414c/diff','MergedDir': '/var/lib/docker/overlay2/a7fcc1b78c472cde943f20d1d4495f145308507b5fe3da8800c33dc4ce426156/merged','UpperDir': '/var/lib/docker/overlay2/a7fcc1b78c472cde943f20d1d4495f145308507b5fe3da8800c33dc4ce426156/diff','WorkDir': '/var/lib/docker/overlay2/a7fcc1b78c472cde943f20d1d4495f145308507b5fe3da8800c33dc4ce426156/work' } }, 'RootFS': { 'Type': 'layers', 'Layers': ['sha256:1c95c77433e8d7bf0f519c9d8c9ca967e2603f0defbf379130d9a841cca2e28e','sha256:2bf534399acac9c6b09a0b1d931223808000b04400a749f08187ed9ee435738d','sha256:eb25e0278d41b9ac637d8cb2e391457cf44ce8d2bfe0646d0c9faefc96413f91','sha256:e54bd3566d9ef3e1309a5af6caf8682f32c6ac4d6adfcbd3e601cfee4e2e0e85','sha256:c79435051d529a7b86f5f9fc32e7e2ec401929434e5596f02a2af731f55c9f28','sha256:76e0d7b2d700e6d17924b985703c7b5b84fb39ddcc0a1181b41217c2a11dffc4','sha256:eecdc37df6afd77091641588f9639f63b65e8eb141e56529e00da44419c5bd04','sha256:36e788f2d91a89375df5901f31cca33776f887c00ddfd3cf9f2466fa4cb794d6' ] } }]

默認拉取最新的是3.5.X版本,如果你需要3.4.X版本的,要指定標簽

Docker下安裝zookeeper(單機和集群)

單機

# 最后那個是鏡像的ID[root@localhost admin]# docker run -d -p 2181:2181 --name some-zookeeper --restart always 3487af26dee9d5c6f857cd88c342acf63dd58e838a4cdf912daa6c8c0115091147136e819307[root@localhost admin]# docker psCONTAINER ID IMAGECOMMAND CREATED STATUS PORTS NAMESd5c6f857cd88 3487af26dee9 '/docker-entrypoin...' 4 seconds ago Up 3 seconds 2888/tcp, 3888/tcp, 0.0.0.0:2181->2181/tcp, 8080/tcp some-zookeeper[root@localhost admin]# docker exec -it d5c6f857cd88 bashroot@d5c6f857cd88:/apache-zookeeper-3.5.5-bin# ./bin/zkCli.shConnecting to localhost:21812019-08-29 07:15:21,623 [myid:] - INFO [main:Environment@109] - Client environment:zookeeper.version=3.5.5-390fe37ea45dee01bf87dc1c042b5e3dcce88653, built on 05/03/2019 12:07 GMT2019-08-29 07:15:21,679 [myid:] - INFO [main:Environment@109] - Client environment:host.name=d5c6f857cd882019-08-29 07:15:21,680 [myid:] - INFO [main:Environment@109] - Client environment:java.version=1.8.0_2222019-08-29 07:15:21,717 [myid:] - INFO [main:Environment@109] - Client environment:java.vendor=Oracle Corporation2019-08-29 07:15:21,718 [myid:] - INFO [main:Environment@109] - Client environment:java.home=/usr/local/openjdk-82019-08-29 07:15:21,725 [myid:] - INFO [main:Environment@109] - Client environment:java.class.path=/apache-zookeeper-3.5.5-bin/bin/../zookeeper-server/target/classes:/apache-zookeeper-3.5.5-bin/bin/../build/classes:/apache-zookeeper-3.5.5-bin/bin/../zookeeper-server/target/lib/*.jar:/apache-zookeeper-3.5.5-bin/bin/../build/lib/*.jar:/apache-zookeeper-3.5.5-bin/bin/../lib/zookeeper-jute-3.5.5.jar:/apache-zookeeper-3.5.5-bin/bin/../lib/zookeeper-3.5.5.jar:/apache-zookeeper-3.5.5-bin/bin/../lib/slf4j-log4j12-1.7.25.jar:/apache-zookeeper-3.5.5-bin/bin/../lib/slf4j-api-1.7.25.jar:/apache-zookeeper-3.5.5-bin/bin/../lib/netty-all-4.1.29.Final.jar:/apache-zookeeper-3.5.5-bin/bin/../lib/log4j-1.2.17.jar:/apache-zookeeper-3.5.5-bin/bin/../lib/json-simple-1.1.1.jar:/apache-zookeeper-3.5.5-bin/bin/../lib/jline-2.11.jar:/apache-zookeeper-3.5.5-bin/bin/../lib/jetty-util-9.4.17.v20190418.jar:/apache-zookeeper-3.5.5-bin/bin/../lib/jetty-servlet-9.4.17.v20190418.jar:/apache-zookeeper-3.5.5-bin/bin/../lib/jetty-server-9.4.17.v20190418.jar:/apache-zookeeper-3.5.5-bin/bin/../lib/jetty-security-9.4.17.v20190418.jar:/apache-zookeeper-3.5.5-bin/bin/../lib/jetty-io-9.4.17.v20190418.jar:/apache-zookeeper-3.5.5-bin/bin/../lib/jetty-http-9.4.17.v20190418.jar:/apache-zookeeper-3.5.5-bin/bin/../lib/javax.servlet-api-3.1.0.jar:/apache-zookeeper-3.5.5-bin/bin/../lib/jackson-databind-2.9.8.jar:/apache-zookeeper-3.5.5-bin/bin/../lib/jackson-core-2.9.8.jar:/apache-zookeeper-3.5.5-bin/bin/../lib/jackson-annotations-2.9.0.jar:/apache-zookeeper-3.5.5-bin/bin/../lib/commons-cli-1.2.jar:/apache-zookeeper-3.5.5-bin/bin/../lib/audience-annotations-0.5.0.jar:/apache-zookeeper-3.5.5-bin/bin/../zookeeper-*.jar:/apache-zookeeper-3.5.5-bin/bin/../zookeeper-server/src/main/resources/lib/*.jar:/conf:2019-08-29 07:15:22,108 [myid:] - INFO [main:Environment@109] - Client environment:java.library.path=/usr/java/packages/lib/amd64:/usr/lib64:/lib64:/lib:/usr/lib2019-08-29 07:15:22,109 [myid:] - INFO [main:Environment@109] - Client environment:java.io.tmpdir=/tmp2019-08-29 07:15:22,109 [myid:] - INFO [main:Environment@109] - Client environment:java.compiler=<NA>2019-08-29 07:15:22,109 [myid:] - INFO [main:Environment@109] - Client environment:os.name=Linux2019-08-29 07:15:22,109 [myid:] - INFO [main:Environment@109] - Client environment:os.arch=amd642019-08-29 07:15:22,110 [myid:] - INFO [main:Environment@109] - Client environment:os.version=3.10.0-862.9.1.el7.x86_642019-08-29 07:15:22,110 [myid:] - INFO [main:Environment@109] - Client environment:user.name=root2019-08-29 07:15:22,110 [myid:] - INFO [main:Environment@109] - Client environment:user.home=/root2019-08-29 07:15:22,110 [myid:] - INFO [main:Environment@109] - Client environment:user.dir=/apache-zookeeper-3.5.5-bin2019-08-29 07:15:22,118 [myid:] - INFO [main:Environment@109] - Client environment:os.memory.free=11MB2019-08-29 07:15:22,148 [myid:] - INFO [main:Environment@109] - Client environment:os.memory.max=247MB2019-08-29 07:15:22,148 [myid:] - INFO [main:Environment@109] - Client environment:os.memory.total=15MB2019-08-29 07:15:22,206 [myid:] - INFO [main:ZooKeeper@868] - Initiating client connection, connectString=localhost:2181 sessionTimeout=30000 watcher=org.apache.zookeeper.ZooKeeperMain$MyWatcher@3b95a09c2019-08-29 07:15:22,239 [myid:] - INFO [main:X509Util@79] - Setting -D jdk.tls.rejectClientInitiatedRenegotiation=true to disable client-initiated TLS renegotiation2019-08-29 07:15:22,285 [myid:] - INFO [main:ClientCnxnSocket@237] - jute.maxbuffer value is 4194304 Bytes2019-08-29 07:15:22,366 [myid:] - INFO [main:ClientCnxn@1653] - zookeeper.request.timeout value is 0. feature enabled=Welcome to ZooKeeper!JLine support is enabled2019-08-29 07:15:22,563 [myid:localhost:2181] - INFO [main-SendThread(localhost:2181):ClientCnxn$SendThread@1112] - Opening socket connection to server localhost/0:0:0:0:0:0:0:1:2181. Will not attempt to authenticate using SASL (unknown error)2019-08-29 07:15:23,443 [myid:localhost:2181] - INFO [main-SendThread(localhost:2181):ClientCnxn$SendThread@959] - Socket connection established, initiating session, client: /0:0:0:0:0:0:0:1:37198, server: localhost/0:0:0:0:0:0:0:1:21812019-08-29 07:15:23,520 [myid:localhost:2181] - INFO [main-SendThread(localhost:2181):ClientCnxn$SendThread@1394] - Session establishment complete on server localhost/0:0:0:0:0:0:0:1:2181, sessionid = 0x10001216d990000, negotiated timeout = 30000 WATCHER:: WatchedEvent state:SyncConnected type:None path:null[zk: localhost:2181(CONNECTED) 0] ls /[zookeeper][zk: localhost:2181(CONNECTED) 1] quit WATCHER:: WatchedEvent state:Closed type:None path:null2019-08-29 07:15:37,042 [myid:] - INFO [main:ZooKeeper@1422] - Session: 0x10001216d990000 closed2019-08-29 07:15:37,043 [myid:] - INFO [main-EventThread:ClientCnxn$EventThread@524] - EventThread shut down for session: 0x10001216d990000root@d5c6f857cd88:/apache-zookeeper-3.5.5-bin# exitexit[root@localhost admin]#

在外部訪問(192.168.192.128:2181)

Docker下安裝zookeeper(單機和集群)

集群

環境:單臺宿主機(192.168.192.128),啟動三個zookeeper容器。

這里涉及一個問題,就是Docker容器之間通信的問題,這個很重要!

Docker下安裝zookeeper(單機和集群)

Docker有三種網絡模式,bridge、host、none,在你創建容器的時候,不指定--network默認是bridge。

bridge:為每一個容器分配IP,并將容器連接到一個docker0虛擬網橋,通過docker0網橋與宿主機通信。也就是說,此模式下,你不能用宿主機的IP+容器映射端口來進行Docker容器之間的通信。

host:容器不會虛擬自己的網卡,配置自己的IP,而是使用宿主機的IP和端口。這樣一來,Docker容器之間的通信就可以用宿主機的IP+容器映射端口

none:無網絡。

=====================================================

先在本地創建目錄:

[root@localhost admin]# mkdir /usr/local/zookeeper-cluster[root@localhost admin]# mkdir /usr/local/zookeeper-cluster/node1[root@localhost admin]# mkdir /usr/local/zookeeper-cluster/node2[root@localhost admin]# mkdir /usr/local/zookeeper-cluster/node3[root@localhost admin]# ll /usr/local/zookeeper-cluster/total 0drwxr-xr-x. 2 root root 6 Aug 28 23:02 node1drwxr-xr-x. 2 root root 6 Aug 28 23:02 node2drwxr-xr-x. 2 root root 6 Aug 28 23:02 node3

然后執行命令啟動

docker run -d -p 2181:2181 -p 2888:2888 -p 3888:3888 --name zookeeper_node1 --privileged --restart always -v /usr/local/zookeeper-cluster/node1/volumes/data:/data -v /usr/local/zookeeper-cluster/node1/volumes/datalog:/datalog -v /usr/local/zookeeper-cluster/node1/volumes/logs:/logs -e ZOO_MY_ID=1 -e 'ZOO_SERVERS=server.1=192.168.192.128:2888:3888;2181 server.2=192.168.192.128:2889:3889;2182 server.3=192.168.192.128:2890:3890;2183' 3487af26dee9docker run -d -p 2182:2181 -p 2889:2888 -p 3889:3888 --name zookeeper_node2 --privileged --restart always -v /usr/local/zookeeper-cluster/node2/volumes/data:/data -v /usr/local/zookeeper-cluster/node2/volumes/datalog:/datalog -v /usr/local/zookeeper-cluster/node2/volumes/logs:/logs -e ZOO_MY_ID=2 -e 'ZOO_SERVERS=server.1=192.168.192.128:2888:3888;2181 server.2=192.168.192.128:2889:3889;2182 server.3=192.168.192.128:2890:3890;2183' 3487af26dee9docker run -d -p 2183:2181 -p 2890:2888 -p 3890:3888 --name zookeeper_node3 --privileged --restart always -v /usr/local/zookeeper-cluster/node3/volumes/data:/data -v /usr/local/zookeeper-cluster/node3/volumes/datalog:/datalog -v /usr/local/zookeeper-cluster/node3/volumes/logs:/logs -e ZOO_MY_ID=3 -e 'ZOO_SERVERS=server.1=192.168.192.128:2888:3888;2181 server.2=192.168.192.128:2889:3889;2182 server.3=192.168.192.128:2890:3890;2183' 3487af26dee9

【坑】

乍一看,沒什么問題啊,首先映射端口到宿主機,然后三個zookeeper之間的訪問地址則是宿主機IP:映射端口,沒毛病啊;

看我前面講的網絡模式就能看出問題,ZOO_SERVERS里面的IP有問題,犯這個錯誤都是不了解Docker的網絡模式的。什么錯誤往下看。

關于ZOO_SERVERS

Docker下安裝zookeeper(單機和集群)

什么意思呢,3.5.0開始,不應該再使用clientPort和clientPortAddress配置參數。相反,這些信息現在是server關鍵字規范的一部分。

端口映射三個容器不一樣,比如2181/2182/2183,因為是一臺宿主機嘛,端口不能沖突,如果你不在同一臺機器,就不用修改端口。

最后的那個參數是鏡像ID,也可以是鏡像名稱:TAG。

--privileged=true參數是為了解決【chown: changing ownership of ’/data’: Permission denied】,也可以省略true

執行結果:

[root@localhost admin]# docker run -d -p 2181:2181 -p 2888:2888 -p 3888:3888 --name zookeeper_node1 --privileged --restart always > -v /usr/local/zookeeper-cluster/node1/volumes/data:/data > -v /usr/local/zookeeper-cluster/node1/volumes/datalog:/datalog > -v /usr/local/zookeeper-cluster/node1/volumes/logs:/logs > -e ZOO_MY_ID=1 > -e 'ZOO_SERVERS=server.1=192.168.192.128:2888:3888;2181 server.2=192.168.192.128:2889:3889;2182 server.3=192.168.192.128:2890:3890;2183' 3487af26dee94bfa6bbeb936037e178a577e5efbd06d4a963e91d67274413b933fd189917776[root@localhost admin]# docker run -d -p 2182:2181 -p 2889:2888 -p 3889:3888 --name zookeeper_node2 --privileged --restart always > -v /usr/local/zookeeper-cluster/node2/volumes/data:/data > -v /usr/local/zookeeper-cluster/node2/volumes/datalog:/datalog > -v /usr/local/zookeeper-cluster/node2/volumes/logs:/logs > -e ZOO_MY_ID=2 > -e 'ZOO_SERVERS=server.1=192.168.192.128:2888:3888;2181 server.2=192.168.192.128:2889:3889;2182 server.3=192.168.192.128:2890:3890;2183' 3487af26dee9dbb7f1f323a09869d043152a4995e73bad5f615fd81bf11143fd1c28180f9869[root@localhost admin]# docker run -d -p 2183:2181 -p 2890:2888 -p 3890:3888 --name zookeeper_node3 --privileged --restart always > -v /usr/local/zookeeper-cluster/node3/volumes/data:/data > -v /usr/local/zookeeper-cluster/node3/volumes/datalog:/datalog > -v /usr/local/zookeeper-cluster/node3/volumes/logs:/logs > -e ZOO_MY_ID=3 > -e 'ZOO_SERVERS=server.1=192.168.192.128:2888:3888;2181 server.2=192.168.192.128:2889:3889;2182 server.3=192.168.192.128:2890:3890;2183' 3487af26dee96dabae1d92f0e861cc7515c014c293f80075c2762b254fc56312a6d3b450a919[root@localhost admin]#

查看啟動的容器

[root@localhost admin]# docker psCONTAINER ID IMAGECOMMAND CREATED STATUS PORTS NAMES6dabae1d92f0 3487af26dee9 '/docker-entrypoin...' 31 seconds ago Up 29 seconds 8080/tcp, 0.0.0.0:2183->2181/tcp, 0.0.0.0:2890->2888/tcp, 0.0.0.0:3890->3888/tcp zookeeper_node3dbb7f1f323a0 3487af26dee9 '/docker-entrypoin...' 36 seconds ago Up 35 seconds 8080/tcp, 0.0.0.0:2182->2181/tcp, 0.0.0.0:2889->2888/tcp, 0.0.0.0:3889->3888/tcp zookeeper_node24bfa6bbeb936 3487af26dee9 '/docker-entrypoin...' 46 seconds ago Up 45 seconds 0.0.0.0:2181->2181/tcp, 0.0.0.0:2888->2888/tcp, 0.0.0.0:3888->3888/tcp, 8080/tcp zookeeper_node1[root@localhost admin]#

不是說有錯誤嗎?怎么還啟動成功了??我們來看下節點1的啟動日志

[root@localhost admin]# docker logs -f 4bfa6bbeb936ZooKeeper JMX enabled by default...2019-08-29 09:20:22,665 [myid:1] - WARN [WorkerSender[myid=1]:QuorumCnxManager@677] - Cannot open channel to 2 at election address /192.168.192.128:3889java.net.ConnectException: Connection refused (Connection refused) at java.net.PlainSocketImpl.socketConnect(Native Method) at java.net.AbstractPlainSocketImpl.doConnect(AbstractPlainSocketImpl.java:350) at java.net.AbstractPlainSocketImpl.connectToAddress(AbstractPlainSocketImpl.java:206) at java.net.AbstractPlainSocketImpl.connect(AbstractPlainSocketImpl.java:188) at java.net.SocksSocketImpl.connect(SocksSocketImpl.java:392) at java.net.Socket.connect(Socket.java:589) at org.apache.zookeeper.server.quorum.QuorumCnxManager.connectOne(QuorumCnxManager.java:648) at org.apache.zookeeper.server.quorum.QuorumCnxManager.connectOne(QuorumCnxManager.java:705) at org.apache.zookeeper.server.quorum.QuorumCnxManager.toSend(QuorumCnxManager.java:618) at org.apache.zookeeper.server.quorum.FastLeaderElection$Messenger$WorkerSender.process(FastLeaderElection.java:477) at org.apache.zookeeper.server.quorum.FastLeaderElection$Messenger$WorkerSender.run(FastLeaderElection.java:456) at java.lang.Thread.run(Thread.java:748)2019-08-29 09:20:22,666 [myid:1] - WARN [WorkerSender[myid=1]:QuorumCnxManager@677] - Cannot open channel to 3 at election address /192.168.192.128:3890java.net.ConnectException: Connection refused (Connection refused) at java.net.PlainSocketImpl.socketConnect(Native Method) at java.net.AbstractPlainSocketImpl.doConnect(AbstractPlainSocketImpl.java:350) at java.net.AbstractPlainSocketImpl.connectToAddress(AbstractPlainSocketImpl.java:206) at java.net.AbstractPlainSocketImpl.connect(AbstractPlainSocketImpl.java:188) at java.net.SocksSocketImpl.connect(SocksSocketImpl.java:392) at java.net.Socket.connect(Socket.java:589) at org.apache.zookeeper.server.quorum.QuorumCnxManager.connectOne(QuorumCnxManager.java:648) at org.apache.zookeeper.server.quorum.QuorumCnxManager.connectOne(QuorumCnxManager.java:705) at org.apache.zookeeper.server.quorum.QuorumCnxManager.toSend(QuorumCnxManager.java:618) at org.apache.zookeeper.server.quorum.FastLeaderElection$Messenger$WorkerSender.process(FastLeaderElection.java:477) at org.apache.zookeeper.server.quorum.FastLeaderElection$Messenger$WorkerSender.run(FastLeaderElection.java:456) at java.lang.Thread.run(Thread.java:748)

連接不上2 和 3,為什么呢,因為在默認的Docker網絡模式下,通過宿主機的IP+映射端口,根本找不到啊!他們有自己的IP啊!如下:

[root@localhost admin]# docker psCONTAINER ID IMAGECOMMAND CREATED STATUS PORTS NAMES6dabae1d92f0 3487af26dee9 '/docker-entrypoin...' 5 minutes ago Up 5 minutes 8080/tcp, 0.0.0.0:2183->2181/tcp, 0.0.0.0:2890->2888/tcp, 0.0.0.0:3890->3888/tcp zookeeper_node3dbb7f1f323a0 3487af26dee9 '/docker-entrypoin...' 6 minutes ago Up 6 minutes 8080/tcp, 0.0.0.0:2182->2181/tcp, 0.0.0.0:2889->2888/tcp, 0.0.0.0:3889->3888/tcp zookeeper_node24bfa6bbeb936 3487af26dee9 '/docker-entrypoin...' 6 minutes ago Up 6 minutes 0.0.0.0:2181->2181/tcp, 0.0.0.0:2888->2888/tcp, 0.0.0.0:3888->3888/tcp, 8080/tcp zookeeper_node1[root@localhost admin]# docker inspect 4bfa6bbeb936 'Networks': {'bridge': { 'IPAMConfig': null, 'Links': null, 'Aliases': null, 'NetworkID': '5fc1ce4362afe3d34fdf260ab0174c36fe4b7daf2189702eae48101a755079f3', 'EndpointID': '368237e4c903cc663111f1fe33ac4626a9100fb5a22aec85f5eccbc6968a1631', 'Gateway': '172.17.0.1', 'IPAddress': '172.17.0.2', 'IPPrefixLen': 16, 'IPv6Gateway': '', 'GlobalIPv6Address': '', 'GlobalIPv6PrefixLen': 0, 'MacAddress': '02:42:ac:11:00:02'} } } }][root@localhost admin]# docker inspect dbb7f1f323a0 'Networks': {'bridge': { 'IPAMConfig': null, 'Links': null, 'Aliases': null, 'NetworkID': '5fc1ce4362afe3d34fdf260ab0174c36fe4b7daf2189702eae48101a755079f3', 'EndpointID': '8a9734044a566d5ddcd7cbbf6661abb2730742f7c73bd8733ede9ed8ef106659', 'Gateway': '172.17.0.1', 'IPAddress': '172.17.0.3', 'IPPrefixLen': 16, 'IPv6Gateway': '', 'GlobalIPv6Address': '', 'GlobalIPv6PrefixLen': 0, 'MacAddress': '02:42:ac:11:00:03'} } } }][root@localhost admin]# docker inspect 6dabae1d92f0 'Networks': {'bridge': { 'IPAMConfig': null, 'Links': null, 'Aliases': null, 'NetworkID': '5fc1ce4362afe3d34fdf260ab0174c36fe4b7daf2189702eae48101a755079f3', 'EndpointID': 'b10329b9940a07aacb016d8d136511ec388de02bf3bd0e0b50f7f4cbb7f138ec', 'Gateway': '172.17.0.1', 'IPAddress': '172.17.0.4', 'IPPrefixLen': 16, 'IPv6Gateway': '', 'GlobalIPv6Address': '', 'GlobalIPv6PrefixLen': 0, 'MacAddress': '02:42:ac:11:00:04'} } } }]

node1---172.17.0.2node2---172.17.0.3node3---172.17.0.4

既然我們知道了它有自己的IP,那又出現另一個問題了,就是它的ip是動態的,啟動之前我們無法得知。有個解決辦法就是創建自己的bridge網絡,然后創建容器的時候指定ip。

【正確方式開始】

[root@localhost admin]# docker network create --driver bridge --subnet=172.18.0.0/16 --gateway=172.18.0.1 zoonet8257c501652a214d27efdf5ef71ff38bfe222c3a2a7898be24b8df9db1fb3b13[root@localhost admin]# docker network lsNETWORK ID NAMEDRIVER SCOPE5fc1ce4362af bridge bridge local6aa33e21444e hosthostlocal20e563b93ce9 nonenulllocal8257c501652a zoonet bridge local[root@localhost admin]# docker network inspect 8257c501652a[ { 'Name': 'zoonet', 'Id': '8257c501652a214d27efdf5ef71ff38bfe222c3a2a7898be24b8df9db1fb3b13', 'Created': '2019-08-29T06:08:01.442601483-04:00', 'Scope': 'local', 'Driver': 'bridge', 'EnableIPv6': false, 'IPAM': { 'Driver': 'default', 'Options': {}, 'Config': [{ 'Subnet': '172.18.0.0/16', 'Gateway': '172.18.0.1'} ] }, 'Internal': false, 'Attachable': false, 'Containers': {}, 'Options': {}, 'Labels': {} }]

然后我們修改一下zookeeper容器的創建命令。

docker run -d -p 2181:2181 --name zookeeper_node1 --privileged --restart always --network zoonet --ip 172.18.0.2 -v /usr/local/zookeeper-cluster/node1/volumes/data:/data -v /usr/local/zookeeper-cluster/node1/volumes/datalog:/datalog -v /usr/local/zookeeper-cluster/node1/volumes/logs:/logs -e ZOO_MY_ID=1 -e 'ZOO_SERVERS=server.1=172.18.0.2:2888:3888;2181 server.2=172.18.0.3:2888:3888;2181 server.3=172.18.0.4:2888:3888;2181' 3487af26dee9docker run -d -p 2182:2181 --name zookeeper_node2 --privileged --restart always --network zoonet --ip 172.18.0.3 -v /usr/local/zookeeper-cluster/node2/volumes/data:/data -v /usr/local/zookeeper-cluster/node2/volumes/datalog:/datalog -v /usr/local/zookeeper-cluster/node2/volumes/logs:/logs -e ZOO_MY_ID=2 -e 'ZOO_SERVERS=server.1=172.18.0.2:2888:3888;2181 server.2=172.18.0.3:2888:3888;2181 server.3=172.18.0.4:2888:3888;2181' 3487af26dee9docker run -d -p 2183:2181 --name zookeeper_node3 --privileged --restart always --network zoonet --ip 172.18.0.4 -v /usr/local/zookeeper-cluster/node3/volumes/data:/data -v /usr/local/zookeeper-cluster/node3/volumes/datalog:/datalog -v /usr/local/zookeeper-cluster/node3/volumes/logs:/logs -e ZOO_MY_ID=3 -e 'ZOO_SERVERS=server.1=172.18.0.2:2888:3888;2181 server.2=172.18.0.3:2888:3888;2181 server.3=172.18.0.4:2888:3888;2181' 3487af26dee9

1. 由于2888 、3888不需要暴露,就不映射了;

2. 指定自己的網絡,并指定IP;

3. 每個容器之間環境是隔離的,所以容器內所用的端口一樣:2181/2888/3888

Docker下安裝zookeeper(單機和集群)

運行結果:

[root@localhost admin]# docker run -d -p 2181:2181 --name zookeeper_node1 --privileged --restart always --network zoonet --ip 172.18.0.2 > -v /usr/local/zookeeper-cluster/node1/volumes/data:/data > -v /usr/local/zookeeper-cluster/node1/volumes/datalog:/datalog > -v /usr/local/zookeeper-cluster/node1/volumes/logs:/logs > -e ZOO_MY_ID=1 > -e 'ZOO_SERVERS=server.1=172.18.0.2:2888:3888;2181 server.2=172.18.0.3:2888:3888;2181 server.3=172.18.0.4:2888:3888;2181' 3487af26dee950c07cf11fab2d3b4da6d8ce48d8ed4a7beaab7d51dd542b8309f781e9920c36[root@localhost admin]# docker run -d -p 2182:2181 --name zookeeper_node2 --privileged --restart always --network zoonet --ip 172.18.0.3 > -v /usr/local/zookeeper-cluster/node2/volumes/data:/data > -v /usr/local/zookeeper-cluster/node2/volumes/datalog:/datalog > -v /usr/local/zookeeper-cluster/node2/volumes/logs:/logs > -e ZOO_MY_ID=2 > -e 'ZOO_SERVERS=server.1=172.18.0.2:2888:3888;2181 server.2=172.18.0.3:2888:3888;2181 server.3=172.18.0.4:2888:3888;2181' 3487af26dee9649a4dbfb694504acfe4b8e11b990877964477bb41f8a230bd191cba7d20996f[root@localhost admin]# docker run -d -p 2183:2181 --name zookeeper_node3 --privileged --restart always --network zoonet --ip 172.18.0.4 > -v /usr/local/zookeeper-cluster/node3/volumes/data:/data > -v /usr/local/zookeeper-cluster/node3/volumes/datalog:/datalog > -v /usr/local/zookeeper-cluster/node3/volumes/logs:/logs > -e ZOO_MY_ID=3 > -e 'ZOO_SERVERS=server.1=172.18.0.2:2888:3888;2181 server.2=172.18.0.3:2888:3888;2181 server.3=172.18.0.4:2888:3888;2181' 3487af26dee9c8bc1b9ae9adf86e9c7f6a3264f883206c6d0e4f6093db3200de80ef39f57160[root@localhost admin]# docker psCONTAINER ID IMAGECOMMAND CREATED STATUS PORTS NAMESc8bc1b9ae9ad 3487af26dee9 '/docker-entrypoin...' 17 seconds ago Up 16 seconds 2888/tcp, 3888/tcp, 8080/tcp, 0.0.0.0:2183->2181/tcp zookeeper_node3649a4dbfb694 3487af26dee9 '/docker-entrypoin...' 22 seconds ago Up 21 seconds 2888/tcp, 3888/tcp, 8080/tcp, 0.0.0.0:2182->2181/tcp zookeeper_node250c07cf11fab 3487af26dee9 '/docker-entrypoin...' 33 seconds ago Up 32 seconds 2888/tcp, 3888/tcp, 0.0.0.0:2181->2181/tcp, 8080/tcp zookeeper_node1[root@localhost admin]#

進入容器內部驗證一下:

[root@localhost admin]# docker exec -it 50c07cf11fab bashroot@50c07cf11fab:/apache-zookeeper-3.5.5-bin# ./bin/zkServer.sh statusZooKeeper JMX enabled by defaultUsing config: /conf/zoo.cfgClient port found: 2181. Client address: localhost.Mode: followerroot@50c07cf11fab:/apache-zookeeper-3.5.5-bin# exitexit[root@localhost admin]# docker exec -it 649a4dbfb694 bashroot@649a4dbfb694:/apache-zookeeper-3.5.5-bin# ./bin/zkServer.sh statusZooKeeper JMX enabled by defaultUsing config: /conf/zoo.cfgClient port found: 2181. Client address: localhost.Mode: leaderroot@649a4dbfb694:/apache-zookeeper-3.5.5-bin# exitexit[root@localhost admin]# docker exec -it c8bc1b9ae9ad bashroot@c8bc1b9ae9ad:/apache-zookeeper-3.5.5-bin# ./bin/zkServer.sh statusZooKeeper JMX enabled by defaultUsing config: /conf/zoo.cfgClient port found: 2181. Client address: localhost.Mode: followerroot@c8bc1b9ae9ad:/apache-zookeeper-3.5.5-bin# exitexit[root@localhost admin]#

在驗證一下創建節點

Docker下安裝zookeeper(單機和集群)

開啟防火墻,以供外部訪問

firewall-cmd --zone=public --add-port=2181/tcp --permanentfirewall-cmd --zone=public --add-port=2182/tcp --permanentfirewall-cmd --zone=public --add-port=2183/tcp --permanentsystemctl restart firewalldfirewall-cmd --list-all

在本地,我用zookeeper的客戶端連接虛擬機上的集群:

Docker下安裝zookeeper(單機和集群)

可以看到連接成功!

Docker下安裝zookeeper(單機和集群)

集群安裝方式二:通過docker stack deploy或docker-compose安裝

這里用docker-compose。先安裝docker-compose

[root@localhost admin]# curl -L 'https://github.com/docker/compose/releases/download/1.24.1/docker-compose-$(uname -s)-$(uname -m)' -o /usr/local/bin/docker-compose % Total % Received % Xferd Average Speed Time Time Time Current Dload Upload Total Spent Left Speed100 617 0 617 0 0 145 0 --:--:-- 0:00:04 --:--:-- 145100 15.4M 100 15.4M 0 0 131k 0 0:02:00 0:02:00 --:--:-- 136k[root@localhost admin]# chmod +x /usr/local/bin/docker-compose

檢查版本(驗證是否安裝成功)

[root@localhost admin]# docker-compose --versiondocker-compose version 1.24.1, build 4667896b

卸載的話

rm /usr/local/bin/docker-compose

開始配置,新建三個掛載目錄

[root@localhost admin]# mkdir /usr/local/zookeeper-cluster/node4[root@localhost admin]# mkdir /usr/local/zookeeper-cluster/node5[root@localhost admin]# mkdir /usr/local/zookeeper-cluster/node6

新建任意目錄,然后在里面新建一個文件

[root@localhost admin]# mkdir DockerComposeFolder[root@localhost admin]# cd DockerComposeFolder/[root@localhost DockerComposeFolder]# vim docker-compose.yml

文件內容如下:(自定義網絡見上面)

version: ’3.1’services: zoo1: image: zookeeper restart: always privileged: true hostname: zoo1 ports: - 2181:2181 volumes: # 掛載數據 - /usr/local/zookeeper-cluster/node4/data:/data - /usr/local/zookeeper-cluster/node4/datalog:/datalog environment: ZOO_MY_ID: 4 ZOO_SERVERS: server.4=0.0.0.0:2888:3888;2181 server.5=zoo2:2888:3888;2181 server.6=zoo3:2888:3888;2181 networks: default: ipv4_address: 172.18.0.14 zoo2: image: zookeeper restart: always privileged: true hostname: zoo2 ports: - 2182:2181 volumes: # 掛載數據 - /usr/local/zookeeper-cluster/node5/data:/data - /usr/local/zookeeper-cluster/node5/datalog:/datalog environment: ZOO_MY_ID: 5 ZOO_SERVERS: server.4=zoo1:2888:3888;2181 server.5=0.0.0.0:2888:3888;2181 server.6=zoo3:2888:3888;2181 networks: default: ipv4_address: 172.18.0.15 zoo3: image: zookeeper restart: always privileged: true hostname: zoo3 ports: - 2183:2181 volumes: # 掛載數據 - /usr/local/zookeeper-cluster/node6/data:/data - /usr/local/zookeeper-cluster/node6/datalog:/datalog environment: ZOO_MY_ID: 6 ZOO_SERVERS: server.4=zoo1:2888:3888;2181 server.5=zoo2:2888:3888;2181 server.6=0.0.0.0:2888:3888;2181 networks: default: ipv4_address: 172.18.0.16networks: # 自定義網絡 default: external: name: zoonet

注意yaml文件里不能有tab,只能有空格。

關于version與Docker版本的關系如下:

Docker下安裝zookeeper(單機和集群)

然后執行(-d后臺啟動)

docker-compose -f docker-compose.yml up -d

Docker下安裝zookeeper(單機和集群)

查看已啟動的容器

[root@localhost DockerComposeFolder]# docker psCONTAINER ID IMAGECOMMAND CREATED STATUS PORTS NAMESa2c14814037d zookeeper '/docker-entrypoin...' 6 minutes ago Up About a minute 2888/tcp, 3888/tcp, 8080/tcp, 0.0.0.0:2183->2181/tcp dockercomposefolder_zoo3_150310229b216 zookeeper '/docker-entrypoin...' 6 minutes ago Up About a minute 2888/tcp, 3888/tcp, 0.0.0.0:2181->2181/tcp, 8080/tcp dockercomposefolder_zoo1_1475d8a9e2d08 zookeeper '/docker-entrypoin...' 6 minutes ago Up About a minute 2888/tcp, 3888/tcp, 8080/tcp, 0.0.0.0:2182->2181/tcp dockercomposefolder_zoo2_1

進入一個容器

[root@localhost DockerComposeFolder]# docker exec -it a2c14814037d bashroot@zoo3:/apache-zookeeper-3.5.5-bin# ./bin/zkCli.shConnecting to localhost:2181....WatchedEvent state:SyncConnected type:None path:null[zk: localhost:2181(CONNECTED) 0] [zk: localhost:2181(CONNECTED) 1] ls /[zookeeper][zk: localhost:2181(CONNECTED) 2] create /hiCreated /hi[zk: localhost:2181(CONNECTED) 3] ls /[hi, zookeeper]

進入另一個容器

[root@localhost DockerComposeFolder]# docker exec -it 50310229b216 bashroot@zoo1:/apache-zookeeper-3.5.5-bin# ./bin/zkCli.shConnecting to localhost:2181...WatchedEvent state:SyncConnected type:None path:null[zk: localhost:2181(CONNECTED) 0] ls /[hi, zookeeper]

本地客戶端連接集群:

zkCli.cmd -server 192.168.192.128:2181,192.168.192.128:2182,192.168.192.128:2183

Docker下安裝zookeeper(單機和集群)

查看

Docker下安裝zookeeper(單機和集群)

停止所有活動容器

Docker下安裝zookeeper(單機和集群)

刪除所有已停止的容器

Docker下安裝zookeeper(單機和集群)

更多docker-compose的命令:

[root@localhost DockerComposeFolder]# docker-compose --helpDefine and run multi-container applications with Docker.Usage: docker-compose [-f <arg>...] [options] [COMMAND] [ARGS...] docker-compose -h|--helpOptions: -f, --file FILE Specify an alternate compose file (default: docker-compose.yml) -p, --project-name NAME Specify an alternate project name (default: directory name) --verbose Show more output --log-level LEVEL Set log level (DEBUG, INFO, WARNING, ERROR, CRITICAL) --no-ansi Do not print ANSI control characters -v, --versionPrint version and exit -H, --host HOST Daemon socket to connect to --tls Use TLS; implied by --tlsverify --tlscacert CA_PATH Trust certs signed only by this CA --tlscert CLIENT_CERT_PATH Path to TLS certificate file --tlskey TLS_KEY_PATH Path to TLS key file --tlsverify Use TLS and verify the remote --skip-hostname-check Don’t check the daemon’s hostname against the name specified in the client certificate --project-directory PATH Specify an alternate working directory (default: the path of the Compose file) --compatibility If set, Compose will attempt to convert keys in v3 files to their non-Swarm equivalentCommands: build Build or rebuild services bundle Generate a Docker bundle from the Compose file config Validate and view the Compose file create Create services downStop and remove containers, networks, images, and volumes events Receive real time events from containers execExecute a command in a running container helpGet help on a command images List images killKill containers logsView output from containers pause Pause services portPrint the public port for a port binding ps List containers pullPull service images pushPush service images restart Restart services rm Remove stopped containers runRun a one-off command scale Set number of containers for a service start Start services stopStop services topDisplay the running processes unpause Unpause services up Create and start containers version Show the Docker-Compose version information

到此這篇關于Docker下安裝zookeeper(單機和集群)的文章就介紹到這了,更多相關Docker安裝zookeeper內容請搜索好吧啦網以前的文章或繼續瀏覽下面的相關文章希望大家以后多多支持好吧啦網!

標簽: Docker
相關文章:
日本不卡不码高清免费观看,久久国产精品久久w女人spa,黄色aa久久,三上悠亚国产精品一区二区三区
激情偷拍久久| 久久xxxx| 国产日韩高清一区二区三区在线| 狠狠干综合网| 亚洲精品午夜av福利久久蜜桃| 精品欠久久久中文字幕加勒比| 精品一区二区三区免费看| 美女久久精品| 韩国久久久久久| 亚洲香蕉网站| 亚洲久久视频| 欧美偷窥清纯综合图区| 国产精品高潮呻吟久久久久| 久久久久伊人| 亚洲永久av| 欧美福利一区| 免费日韩一区二区| 天堂va在线高清一区| 日本精品在线播放| 日本不良网站在线观看| 国产传媒av在线| 欧美日韩亚洲在线观看| 视频一区视频二区中文| 日韩综合一区二区三区| 国产精久久一区二区| 国语对白精品一区二区| 四虎4545www国产精品| 99成人在线视频| 亚洲一区久久| 欧美精品中文字幕亚洲专区| 久久高清免费| 中文字幕亚洲精品乱码| 国产精品久久久久av蜜臀| 国产精品久久久久av电视剧| 九一国产精品| 亚洲视频二区| 久久精品理论片| 狠狠久久婷婷| 国产视频一区二| 成人久久一区| 日本精品久久| 日本精品影院| 日韩av电影一区| 日韩精品专区| 亚洲精品裸体| 97精品国产| 蜜桃av一区二区| 精品久久久久中文字幕小说| 免费精品国产| 国产精区一区二区| 日韩欧美网址| 亚洲精品美女91| 国产资源在线观看入口av| 日韩精品一级中文字幕精品视频免费观看 | 欧美日韩视频网站| 欧美激情日韩| 久久久久久久久久久9不雅视频| 亚洲永久字幕| 麻豆国产一区| 美女少妇全过程你懂的久久| 欧美亚洲综合视频| 久久国产精品成人免费观看的软件| 日韩av电影一区| 香蕉久久精品| 日韩激情网站| 欧美69视频| 久久精品国产网站| 免费精品视频在线| 欧美日韩视频免费观看| 青草av.久久免费一区| 久久精品91| 日韩毛片网站| 999久久久国产精品| 嫩草伊人久久精品少妇av杨幂| 亚洲激情社区| 日本欧美国产| 日韩av一区二区三区四区| 亚洲天堂日韩在线| 欧美亚洲国产精品久久| 久久av免费看| 亚洲tv在线| 午夜久久影院| 亚洲成人不卡| 精品资源在线| 亚洲欧美久久| 亚洲天堂久久| 久久精品九色| 国产精品欧美大片| 蜜臀av一区二区在线免费观看| 欧产日产国产精品视频| 久久久精品国产**网站| 亚洲精品观看| 亚洲一区二区三区四区五区午夜 | 亚洲成a人片| 精品视频一区二区三区在线观看| 亚洲精品乱码| 亚洲影视一区| 亚洲欧美日韩国产一区| 亚洲成人二区| 粉嫩av一区二区三区四区五区 | 精品三级久久| 麻豆国产精品一区二区三区| 91成人福利| 一区二区高清| 首页亚洲欧美制服丝腿| 亚洲天堂日韩在线| 在线一区免费| 欧美久久精品一级c片| 久久蜜桃精品| 国产一区二区视频在线看| 老牛国内精品亚洲成av人片| 国产日韩1区| 国产欧美91| 亚洲精品美女| 久热精品在线| 亚洲欧美日韩专区| 国产亚洲精品v| 欧美一区二区三区高清视频| 日韩成人亚洲| 99免费精品| 亚洲激情国产| 美女网站久久| 色综合视频一区二区三区日韩| 蜜桃免费网站一区二区三区| 精品1区2区3区4区| 在线一区免费| 9国产精品视频| 91精品婷婷色在线观看| 香蕉国产精品| 久久av一区二区三区| 中文字幕av一区二区三区四区| 在线免费观看亚洲| 少妇精品在线| 国产精品三p一区二区| 国产高清精品二区| 精品一区二区三区在线观看视频| 国产伦久视频在线观看| 成人羞羞视频播放网站| 午夜国产欧美理论在线播放 | 日韩精品导航| 国产亚洲一区二区三区不卡| 麻豆一区二区三区| 成人影视亚洲图片在线| 久久久精品午夜少妇| 日韩一级网站| 亚洲日本免费电影| 国产精品1区在线| 欧美日韩国产v| 日韩亚洲在线| 国产日产精品一区二区三区四区的观看方式| 国产精品视频3p| yellow在线观看网址| 欧美精品一区二区久久| 亚洲+小说+欧美+激情+另类| 国产日韩高清一区二区三区在线| 国际精品欧美精品| 久久亚洲国产| 深夜福利一区| 精品国产成人| 99国产精品久久久久久久成人热| 亚洲最大av| 麻豆高清免费国产一区| 久久夜夜操妹子| 久久午夜精品| 99久久夜色精品国产亚洲狼| 在线精品视频一区| 国产精品一区二区av交换| 欧美好骚综合网| 蜜臀久久久久久久| 精品午夜视频| 久久99伊人| 欧美国产另类| 99视频精品| 国产精品亲子伦av一区二区三区| 欧美精品日日操| 日韩精品亚洲专区| 日韩综合一区| 亚洲综合图色| 成人小电影网站| 亚洲精一区二区三区| 久久精品天堂| 亚洲一区二区成人| 欧美日韩国产免费观看 | 久久久精品国产**网站| 婷婷精品视频| 欧美一区不卡| 免费欧美一区| 国产乱论精品| 夜夜精品视频| 美女毛片一区二区三区四区最新中文字幕亚洲 | 国产精品一区二区三区四区在线观看 | 蜜桃视频在线观看一区二区| 欧美一级鲁丝片| 欧美亚洲二区| 国产一区二区三区国产精品| 成人高清一区| 视频一区二区欧美| 欧美成人一二区| 黄色在线一区| 久久精品日韩欧美|