先搭建幾個節點:coordinator、historical、overlord、middleManager。而且啓動服務。java
前提:須要準備好mysql(http://my.oschina.net/u/2460844/blog/637334 該文中說明了mysql的配置)、hdfs集羣、zookeeper(單機版就能夠)node
1. __common 配置:mysql
druid.extensions.coordinates=["io.druid.extensions:druid-examples","io.druid.extensions:druid-kafka-eight","io.druid.extensions:mysql-metadata-storage","io.druid.extensions:druid-hdfs-storage"] druid.extensions.localRepository=extensions-repo druid.zk.service.host=druid01:2181 druid.metadata.storage.type=mysql druid.metadata.storage.connector.connectURI=jdbc:mysql://druid01:3306/druid druid.metadata.storage.connector.user=druid druid.metadata.storage.connector.password=diurd1234 druid.storage.type=hdfs druid.storage.storageDirectory=hdfs://vm1.cci/tmp/druid/localStorage druid.cache.type=local druid.cache.sizeInBytes=10000000 druid.selectors.indexing.serviceName=overlord druid.selectors.coordinator.serviceName=coordinator druid.emitter=logging
2. coordinator 配置:sql
druid.host=druid01 druid.port=8081 druid.service=coordinator druid.coordinator.startDelay=PT5M
3. historical 配置:json
druid.host=druid02 druid.port=8082 druid.service=druid/historical druid.historical.cache.useCache=true druid.historical.cache.populateCache=true druid.processing.buffer.sizeBytes=100000000 druid.processing.numThreads=3 druid.server.http.numThreads=5 druid.server.maxSize=300000000000 druid.segmentCache.locations=[{"path": " /tmp/druid/indexCache", "maxSize": 300000000000}] druid.monitoring.monitors=["io.druid.server.metrics.HistoricalMetricsMonitor", "com.metamx.metrics.JvmMonitor"]
4. overlord 配置:app
druid.host=druid03 druid.port=8090 druid.service=overlord druid.indexer.autoscale.doAutoscale=true druid.indexer.autoscale.strategy=ec2 druid.indexer.autoscale.workerIdleTimeout=PT90m druid.indexer.autoscale.terminatePeriod=PT5M druid.indexer.autoscale.workerVersion=0 druid.indexer.logs.type=local druid.indexer.logs.directory=/tmp/druid/indexlog druid.indexer.runner.type=remote druid.indexer.runner.minWorkerVersion=0 # Store all task state in the metadata storage druid.indexer.storage.type=metadata #druid.indexer.fork.property.druid.processing.numThreads=1 #druid.indexer.fork.property.druid.computation.buffer.size=100000000 druid.indexer.runner.type=remote
5. middleManager 配置:curl
druid.host=druid04 druid.port=8091 druid.service=druid/middlemanager druid.indexer.logs.type=local druid.indexer.logs.directory=/tmp/druid/indexlog druid.indexer.fork.property.druid.processing.numThreads=5 druid.indexer.fork.property.druid.computation.buffer.size=100000000 # Resources for peons druid.indexer.runner.javaOpts=-server -Xmx3g druid.indexer.task.baseTaskDir=/tmp/persistent/task/
6. 分別啓動各個節點,若是出現了啓動問題,很能是由於內存問題,可適當調整java運行參數。oop
7. 須要導入的數據 wikipedia_data.csv , wikipedia_data.jsonui
---wikipedia_data.json:url
{"timestamp": "2013-08-31T01:02:33Z", "page": "Gypsy Danger", "language" : "en", "user" : "nuclear", "unpatrolled" : "true", "newPage" : "true", "robot": "false", "anonymous": "false", "namespace":"article", "continent":"North America", "country":"United States", "region":"Bay Area", "city":"San Francisco", "added": 57, "deleted": 200, "delta": -143} {"timestamp": "2013-08-31T03:32:45Z", "page": "Striker Eureka", "language" : "en", "user" : "speed", "unpatrolled" : "false", "newPage" : "true", "robot": "true", "anonymous": "false", "namespace":"wikipedia", "continent":"Australia", "country":"Australia", "region":"Cantebury", "city":"Syndey", "added": 459, "deleted": 129, "delta": 330} {"timestamp": "2013-08-31T07:11:21Z", "page": "Cherno Alpha", "language" : "ru", "user" : "masterYi", "unpatrolled" : "false", "newPage" : "true", "robot": "true", "anonymous": "false", "namespace":"article", "continent":"Asia", "country":"Russia", "region":"Oblast", "city":"Moscow", "added": 123, "deleted": 12, "delta": 111} {"timestamp": "2013-08-31T11:58:39Z", "page": "Crimson Typhoon", "language" : "zh", "user" : "triplets", "unpatrolled" : "true", "newPage" : "false", "robot": "true", "anonymous": "false", "namespace":"wikipedia", "continent":"Asia", "country":"China", "region":"Shanxi", "city":"Taiyuan", "added": 905, "deleted": 5, "delta": 900} {"timestamp": "2013-08-31T12:41:27Z", "page": "Coyote Tango", "language" : "ja", "user" : "cancer", "unpatrolled" : "true", "newPage" : "false", "robot": "true", "anonymous": "false", "namespace":"wikipedia", "continent":"Asia", "country":"Japan", "region":"Kanto", "city":"Tokyo", "added": 1, "deleted": 10, "delta": -9}
---wikipedia_data.csv:
2013-08-31T01:02:33Z, Gypsy Danger, en, nuclear, true, true, false, false, article, North America, United States, Bay Area, San Francisco, 57, 200, -143 2013-08-31T01:02:33Z, Gypsy Danger, en, nuclear, true, true, false, false, article, North America, United States, Bay Area, San Francisc, 57, 200, -143 2013-08-31T01:02:33Z, Gypsy Danger, en, nuclear, true, true, false, false, article, North America, United States, Bay Area, San Francis, 57, 200, -143 2013-08-31T01:02:33Z, Gypsy Danger, en, nuclear, true, true, false, false, article, North America, United States, Bay Area, San Franci, 57, 200, -143 2013-08-31T01:02:33Z, Gypsy Danger, en, nuclear, true, true, false, false, article, North America, United States, Bay Area, San Franc, 57, 200, -143 2013-08-31T01:02:33Z, Gypsy Danger, en, nuclear, true, true, false, false, article, North America, United States, Bay Area, San Fran, 57, 200, -143 2013-08-31T01:02:33Z, Gypsy Danger, en, nuclear, true, true, false, false, article, North America, United States, Bay Area, San Fra, 57, 200, -143 2013-08-31T01:02:33Z, Gypsy Danger, en, nuclear, true, true, false, false, article, North America, United States, Bay Area, San Fr, 57, 200, -143 2013-08-31T01:02:33Z, Gypsy Danger, en, nuclear, true, true, false, false, article, North America, United States, Bay Area, San F, 57, 200, -143 2013-08-31T01:02:33Z, Gypsy Danger, en, nuclear, true, true, false, false, article, North America, United States, Bay Area, Sa , 57, 200, -143
8. 注意 這裏導入的數據 若是保存在本機磁盤導入時,數據文件必須保存在middleManager節點上,否則提交task後沒法找到文件。若是是從hdfs中導入,只須要先put到hdfs文件系統中。這裏的overlord 節點是druid03(你能夠換成ip)。
9. 在任意一個節點上(保證這個節點可以訪問druid03)。建立一個json的index task任務:
--9.1 導入一個 本地local保存的、json格式的文件,這個task的json怎麼來寫
先將數據wikipedia_data.jso保存在middleManager節點的druid的文件夾下(好比/root/druid-0.8.3)。
命令爲wikipedia_index_local_json_task.json 文件:
{ "type" : "index_hadoop", "spec" : { "dataSchema" : { "dataSource" : "wikipedia", "parser" : { "type" : "string", "parseSpec" : { "format" : "json", "timestampSpec" : { "column" : "timestamp", "format" : "auto" }, "dimensionsSpec" : { "dimensions": ["page","language","user","unpatrolled","newPage","robot","anonymous","namespace","continent","country","region","city"], "dimensionExclusions" : [], "spatialDimensions" : [] } } }, "metricsSpec" : [ { "type" : "count", "name" : "count" }, { "type" : "doubleSum", "name" : "added", "fieldName" : "added" }, { "type" : "doubleSum", "name" : "deleted", "fieldName" : "deleted" }, { "type" : "doubleSum", "name" : "delta", "fieldName" : "delta" } ], "granularitySpec" : { "type" : "uniform", "segmentGranularity" : "DAY", "queryGranularity" : "NONE", "intervals" : [ "2013-08-31/2013-09-01" ] } }, "ioConfig": { "type": "index", "firehose": { "type": "local", "baseDir": "./", "filter": "wikipedia_data.json" } }, "tuningConfig": { "type": "index", "targetPartitionSize": 0, "rowFlushBoundary": 0 } } }
9.2 提交任務,前面已經說過了overlord節點在druid03上,因此想druid03提交任務
curl -X 'POST' -H 'Content-Type:application/json' -d @wikipedia_index_local_json_task.json druid03:8090/druid/indexer/v1/task
在overlord節點的日誌上能夠看出任務的狀況,當出現以下信息表示任務成功
2016-03-29T17:35:11,385 INFO [forking-task-runner-1] io.druid.indexing.overlord.ForkingTaskRunner - Logging task index_hadoop_NN_2016-03-29T17:35:11.510+08:00 output to: /tmp/persistent/task/index_hadoop_NN_2016-03-29T17:35:11.510+08:00/log 2016-03-29T17:42:15,263 INFO [forking-task-runner-1] io.druid.indexing.overlord.ForkingTaskRunner - Process exited with status[0] for task: index_hadoop_NN_2016-03-29T17:35:11.510+08:00 2016-03-29T17:42:15,265 INFO [forking-task-runner-1] io.druid.indexing.common.tasklogs.FileTaskLogs - Wrote task log to: /tmp/druid/indexlog/index_hadoop_NN_2016-03-29T17:35:11.510+08:00.log 2016-03-29T17:42:15,267 INFO [forking-task-runner-1] io.druid.indexing.overlord.ForkingTaskRunner - Removing task directory: /tmp/persistent/task/index_hadoop_NN_2016-03-29T17:35:11.510+08:00 2016-03-29T17:42:15,284 INFO [WorkerTaskMonitor-1] io.druid.indexing.worker.WorkerTaskMonitor - Job's finished. Completed [index_hadoop_NN_2016-03-29T17:35:11.510+08:00] with status [SUCCESS
9.3 本地導入csv格式數據的 task文件示例,wikipedia_data.csv 須要先保存在middleManager節點的druid目錄下(好比/root/druid-0.8.3)。
{ "type": "index_hadoop", "spec": { "dataSchema": { "dataSource": "wikipedia", "parser": { "type": "string", "parseSpec": { "format" : "csv", "timestampSpec" : { "column" : "timestamp" }, "columns" : ["timestamp","page","language","user","unpatrolled","newPage","robot","anonymous","namespace","continent","country","region","city","added","deleted","delta"], "dimensionsSpec" : { "dimensions" : ["page","language","user","unpatrolled","newPage","robot","anonymous","namespace","continent","country","region","city"] } } }, "metricsSpec": [ { "type": "count", "name": "count" }, { "type": "doubleSum", "name": "added", "fieldName": "added" }, { "type": "doubleSum", "name": "deleted", "fieldName": "deleted" }, { "type": "doubleSum", "name": "delta", "fieldName": "delta" } ], "granularitySpec": { "type": "uniform", "segmentGranularity": "DAY", "queryGranularity": "NONE", "intervals": ["2013-08-31/2013-09-01"] } }, "ioConfig": { "type": "index", "firehose": { "type": "local", "baseDir": "./", "filter": "wikipedia_data.csv" } }, "tuningConfig": { "type": "index", "targetPartitionSize": 0, "rowFlushBoundary": 0 } } }
9.4 導入hdfs中的json文件。先須要把wikipedia_data.json put到hdfs中,記住目錄而後在task文件中給定路徑,hdfs路徑中要帶有hdfs 的namenode的 名字或者ip。這裏使用vm1.cci代替namenode的ip。注意對比與本地導入task文件的區別,這些區別決定你可否導入成功。
{ "type" : "index_hadoop", "spec" : { "dataSchema" : { "dataSource" : "wikipedia", "parser" : { "type" : "string", "parseSpec" : { "format" : "json", "timestampSpec" : { "column" : "timestamp", "format" : "auto" }, "dimensionsSpec" : { "dimensions": ["page","language","user","unpatrolled","newPage","robot","anonymous","namespace","continent","country","region","city"], "dimensionExclusions" : [], "spatialDimensions" : [] } } }, "metricsSpec" : [ { "type" : "count", "name" : "count" }, { "type" : "doubleSum", "name" : "added", "fieldName" : "added" }, { "type" : "doubleSum", "name" : "deleted", "fieldName" : "deleted" }, { "type" : "doubleSum", "name" : "delta", "fieldName" : "delta" } ], "granularitySpec" : { "type" : "uniform", "segmentGranularity" : "DAY", "queryGranularity" : "NONE", "intervals" : [ "2013-08-31/2013-09-01" ] } }, "ioConfig" : { "type" : "hadoop", "inputSpec" : { "type" : "static", "paths" : "hdfs://vm1.cci/tmp/druid/datasource/wikipedia_data.json" } }, "tuningConfig" : { "type": "hadoop" } } }
9.5 導入hdfs中的csv格式文件。task文件描述以下:
{ "type": "index_hadoop", "spec": { "dataSchema": { "dataSource": "wikipedia", "parser": { "type": "string", "parseSpec": { "format" : "csv", "timestampSpec" : { "column" : "timestamp" }, "columns" : ["timestamp","page","language","user","unpatrolled","newPage","robot","anonymous","namespace","continent","country","region","city","added","deleted","delta"], "dimensionsSpec" : { "dimensions" : ["page","language","user","unpatrolled","newPage","robot","anonymous","namespace","continent","country","region","city"] } } }, "metricsSpec": [ { "type": "count", "name": "count" }, { "type": "doubleSum", "name": "added", "fieldName": "added" }, { "type": "doubleSum", "name": "deleted", "fieldName": "deleted" }, { "type": "doubleSum", "name": "delta", "fieldName": "delta" } ], "granularitySpec": { "type": "uniform", "segmentGranularity": "DAY", "queryGranularity": "NONE", "intervals": ["2013-08-31/2013-09-01"] } }, "ioConfig" : { "type" : "hadoop", "inputSpec" : { "type" : "static", "paths" : "hdfs://vm1.cci/tmp/druid/datasource/wikipedia_data.csv" } }, "tuningConfig" : { "type": "hadoop" } } }
總結: druid.io 能夠配置的項超級多,任何一個地方配置疏忽均可能會致使task失敗。這裏給出四種示例,仍是有必要細分其中的差異。初學者磕絆在此很不免。