vim /etc/hosts 172.16.213.8 solrmaster s27 172.16.213.12 solrslave1 ss1 172.16.213.13 solrslave2 ss2 172.16.213.9 solrslave3 ss3 172.16.213.10 solrslave4 ss4 172.16.213.14 solrslave5 ss5 172.16.213.11 solrslave6 ss6
./solr create_collection -c collection_coupon -n collection_coupon -shards 6 -replicationFactor 3
-d /opt/lucidworks-hdpsearch/solr/server/solr/configsets/coupon_schema_configs/conf
能夠經過scp把舊的配置的配置文件拷貝到新集羣,保證配置一致,以避免出現莫名的錯誤。html
./hbase-indexer add-indexer -n indexer_coupon_solr -c /opt/lucidworks-hdpsearch/hbase-indexer/demo/coupon_indexer_mapper.xml
-cp solr.zk=solrslave1:2181,solrslave2:2181,solrslave3:2181,solrslave4:2181,solrslave5:2181,solrslave6:2181/solr -cp solr.collection=collection_coupon
這裏要注意solr.zk 的設置,必定要設置爲新加入solr集羣的zkvim
./hbase-indexer list-indexers remote_indexer_coupon + Lifecycle state: ACTIVE + Incremental indexing state: SUBSCRIBE_AND_CONSUME + Batch indexing state: INACTIVE + SEP subscription ID: Indexer_remote_indexer_coupon + SEP subscription timestamp: 2018-05-07T18:04:28.434+08:00 + Connection type: solr + Connection params: + solr.zk = solrslave1:2181,solrslave2:2181,solrslave3:2181,solrslave4:2181,solrslave5:2181,solrslave6:2181/solr + solr.collection = collection_coupon + Indexer config: 940 bytes, use -dump to see content + Indexer component factory: com.ngdata.hbaseindexer.conf.DefaultIndexerComponentFactory + Additional batch index CLI arguments: (none) + Default additional batch index CLI arguments: (none) + Processes + 1 running processes + 0 failed processes
在Processes中,關鍵是否有1個任務是否正在運行,若是沒有或者失敗,檢查修復bash
su hdfs hadoop jar /opt/lucidworks-hdpsearch/hbase-indexer/tools/hbase-indexer-mr-1.6-SNAPSHOT-job.jar
--conf /opt/lucidworks-hdpsearch/hbase-indexer/conf/hbase-site.xml --hbase-indexer-zk localhost:2181
--hbase-indexer-name indexer_coupon_solr --reducers 0
注意觀察日誌信息,若是沒有錯誤,繼續下面的操做服務器
在操做過程當中也能夠參考我之前的文章app