安装 zookeeper :
# The number of milliseconds of each tick tickTime=2000 # The number of ticks that the initial # synchronization phase can take initLimit=10 # The number of ticks that can pass between # sending a request and getting an acknowledgement syncLimit=5 # the directory where the snapshot is stored. # do not use /tmp for storage, /tmp here is just # example sakes. dataDir=/usr/local/zookeeper-3.4.7/data # the port at which the clients will connect clientPort=2181 # the maximum number of client connections. # increase this if you need to handle more clients #maxClientCnxns=60 # # Be sure to read the maintenance section of the # administrator guide before turning on autopurge. # # http://zookeeper.apache.org/doc/current/zookeeperAdmin.html#sc_maintenance # # The number of snapshots to retain in dataDir #autopurge.snapRetainCount=3 # Purge task interval in hours # Set to "0" to disable auto purge feature #autopurge.purgeInterval=1 # log dir dataLogDir=/usr/local/zookeeper-3.4.7/log
cd /usr/local/zookeeper-3.4.7/data echo 1 > myid
cd /usr/local/zookeeper-3.4.7/bin ./zkServer.sh start
安装 storm :
########### These MUST be filled in for a storm configuration
storm.zookeeper.servers:
- "192.168.1.63"
# - "server2"
#
nimbus.host: "192.168.1.63"
#
#
# ##### These may optionally be filled in:
#
## List of custom serializations
# topology.kryo.register:
# - org.mycompany.MyType
# - org.mycompany.MyType2: org.mycompany.MyType2Serializer
#
## List of custom kryo decorators
# topology.kryo.decorators:
# - org.mycompany.MyDecorator
#
## Locations of the drpc servers
# drpc.servers:
# - "server1"
# - "server2"
## Metrics Consumers
# topology.metrics.consumer.register:
# - class: "backtype.storm.metric.LoggingMetricsConsumer"
# parallelism.hint: 1
# - class: "org.mycompany.MyMetricsConsumer"
# parallelism.hint: 1
# argument:
# - endpoint: "metrics-collector.mycompany.org"
cd /usr/local/storm-0.10.0/ ./storm nimbus >/dev/null 2>&1 & ./storm supervisor >/dev/null 2>&1 & ./storm ui >/dev/null 2>&1 &
发布程序
./storm jar /home/jsecode/topology/storm-starter-0.10.0.jar storm.ykd.test.SegmentTopologyMain /home/jsecode/topology/testdata/gpsdata
<configuration monitorInterval="60">
<properties>
<property name="pattern">%d{yyyy-MM-dd HH:mm:ss.SSS} %c{1.} [%p] %msg%n</property>
<property name="patternNoTime">%msg%n</property>
</properties>
<appenders>
<RollingFile name="SegmentLog"
fileName="${sys:storm.log.dir}/gpsSegmentw.log"
filePattern="${sys:storm.log.dir}/gpsSegmentw.%i.log">
<PatternLayout>
<pattern>${pattern}</pattern>
</PatternLayout>
<Policies>
<SizeBasedTriggeringPolicy size="100 MB"/> <!-- Or every 100 MB -->
</Policies>
<DefaultRolloverStrategy max="9"/>
</RollingFile>
<RollingFile name="RsLog"
fileName="${sys:storm.log.dir}/rs.log"
filePattern="${sys:storm.log.dir}/rs.%i.log">
<PatternLayout>
<pattern>${pattern}</pattern>
</PatternLayout>
<Policies>
<SizeBasedTriggeringPolicy size="100 MB"/> <!-- Or every 100 MB -->
</Policies>
<DefaultRolloverStrategy max="9"/>
</RollingFile>
<RollingFile name="A1"
fileName="${sys:storm.log.dir}/${sys:logfile.name}"
filePattern="${sys:storm.log.dir}/${sys:logfile.name}.%i.gz">
<PatternLayout>
<pattern>${pattern}</pattern>
</PatternLayout>
<Policies>
<SizeBasedTriggeringPolicy size="100 MB"/> <!-- Or every 100 MB -->
</Policies>
<DefaultRolloverStrategy max="9"/>
</RollingFile>
<RollingFile name="STDOUT"
fileName="${sys:storm.log.dir}/${sys:logfile.name}.out"
filePattern="${sys:storm.log.dir}/${sys:logfile.name}.out.%i.gz">
<PatternLayout>
<pattern>${patternNoTime}</pattern>
</PatternLayout>
<Policies>
<SizeBasedTriggeringPolicy size="100 MB"/> <!-- Or every 100 MB -->
</Policies>
<DefaultRolloverStrategy max="4"/>
</RollingFile>
<RollingFile name="STDERR"
fileName="${sys:storm.log.dir}/${sys:logfile.name}.err"
filePattern="${sys:storm.log.dir}/${sys:logfile.name}.err.%i.gz">
<PatternLayout>
<pattern>${patternNoTime}</pattern>
</PatternLayout>
<Policies>
<SizeBasedTriggeringPolicy size="100 MB"/> <!-- Or every 100 MB -->
</Policies>
<DefaultRolloverStrategy max="4"/>
</RollingFile>
<Syslog name="syslog" format="RFC5424" host="localhost" port="514"
protocol="UDP" appName="[${sys:storm.id}:${sys:worker.port}]" mdcId="mdc" includeMDC="true"
facility="LOCAL5" enterpriseNumber="18060" newLine="true" exceptionPattern="%rEx{full}"
messageId="[${sys:user.name}:${sys:logging.sensitivity}]" id="storm"/>
</appenders>
<loggers>
<Logger name="storm.ykd.test.bolt.SavePosdataTrackSegmentBolt" level="debug">
<AppenderRef ref="RsLog"/>
</Logger>
<Logger name="storm.ykd.test" level="debug">
<AppenderRef ref="SegmentLog"/>
</Logger>
<root level="info"> <!-- We log everything -->
<appender-ref ref="A1"/>
<appender-ref ref="syslog"/>
</root>
<Logger name="STDERR" level="INFO">
<appender-ref ref="STDERR"/>
<appender-ref ref="syslog"/>
</Logger>
<Logger name="STDOUT" level="INFO">
<appender-ref ref="STDOUT"/>
<appender-ref ref="syslog"/>
</Logger>
</loggers>
</configuration>原文:http://www.cnblogs.com/myibm/p/5939560.html