# To bring logs and send to other source
agent1.channels = ch1
# Define a memory channel called ch1 on agent1
agent1.channels.ch1.type = memory
agent1.channels.ch1.capacity = 100000
agent1.channels.ch1.transactionCapacity = 1000
# Define an Exec Source called exec1
agent1.sources = exec1
agent1.sources.exec1.type = exec
agent1.sources.exec1.command = tail -F /usr/local/tomcat/logs/api/api.log
agent1.sources.exec1.interceptors = ts
agent1.sources.exec1.interceptors.ts.type = timestamp
agent1.sources.exec1.channels = ch1
# properties of hdfs-Cluster1-sink
agent1.sinks = avro-sink1
agent1.sinks.avro-sink1.type = avro
agent1.sinks.avro-sink1.channel = ch1
agent1.sinks.avro-sink1.hostname = 1xx.xxx.111.01
agent1.sinks.avro-sink1.port = 41414
Hi, I'm an architecture engineer and native Korean speaker.
I have started to live in Seoul in Korea.
I have been developping Abilists tool,
Please feel free to visit abilists.com
Friday, June 28, 2013
Flume - This is the flume.conf in Flume NG 1.3.1
# To save logs data to HDFS.
agent1.channels = ch1
# Define a memory channel called ch1 on agent1
agent1.channels.ch1.type = memory
agent1.channels.ch1.capacity = 100000
agent1.channels.ch1.transactionCapacity = 1000
# Define an Avro source called avro-source1 on agent1 and tell it
# to bind to xxx.xxx.xxx.xxx:41414. Connect it to channel ch1.
agent1.sources = avro-source1
agent1.sources.avro-source1.channels = ch1
agent1.sources.avro-source1.type = avro
agent1.sources.avro-source1.bind = 1xx.xxx.111.01
agent1.sources.avro-source1.port = 41414
agent1.sinks = hdfs-sink1
agent1.sinks.hdfs-sink1.type = hdfs
agent1.sinks.hdfs-sink1.channel = ch1
agent1.sinks.hdfs-sink1.hdfs.path = hdfs://xxx.xxx.xxx.xxx:9000/home/hadoop/data/flume/%Y%m%d/%H
agent1.sinks.hdfs-sink1.hdfs.filePrefix = ch1
agent1.sinks.hdfs-sink1.hdfs.inUseSuffix = .txt
agent1.sinks.hdfs-sink1.hdfs.fileType = DataStream
agent1.sinks.hdfs-sink1.hdfs.rollCount = 0
agent1.sinks.hdfs-sink1.hdfs.rollInterval = 1200
agent1.sinks.hdfs-sink1.hdfs.writeFormat = text
agent1.sinks.hdfs-sink1.hdfs.rollSize = 0
agent1.sinks.hdfs-sink1.hdfs.rollCount=1000000
agent1.sinks.hdfs-sink1.hdfs.batchSize = 10
agent1.sinks.hdfs-sink1.hdfs.threadsPoolSize=10
----------------------------------------------------
http://www.nextree.co.kr/p2704/
agent1.channels = ch1
# Define a memory channel called ch1 on agent1
agent1.channels.ch1.type = memory
agent1.channels.ch1.capacity = 100000
agent1.channels.ch1.transactionCapacity = 1000
# Define an Avro source called avro-source1 on agent1 and tell it
# to bind to xxx.xxx.xxx.xxx:41414. Connect it to channel ch1.
agent1.sources = avro-source1
agent1.sources.avro-source1.channels = ch1
agent1.sources.avro-source1.type = avro
agent1.sources.avro-source1.bind = 1xx.xxx.111.01
agent1.sources.avro-source1.port = 41414
agent1.sinks = hdfs-sink1
agent1.sinks.hdfs-sink1.type = hdfs
agent1.sinks.hdfs-sink1.channel = ch1
agent1.sinks.hdfs-sink1.hdfs.path = hdfs://xxx.xxx.xxx.xxx:9000/home/hadoop/data/flume/%Y%m%d/%H
agent1.sinks.hdfs-sink1.hdfs.filePrefix = ch1
agent1.sinks.hdfs-sink1.hdfs.inUseSuffix = .txt
agent1.sinks.hdfs-sink1.hdfs.fileType = DataStream
agent1.sinks.hdfs-sink1.hdfs.rollCount = 0
agent1.sinks.hdfs-sink1.hdfs.rollInterval = 1200
agent1.sinks.hdfs-sink1.hdfs.writeFormat = text
agent1.sinks.hdfs-sink1.hdfs.rollSize = 0
agent1.sinks.hdfs-sink1.hdfs.rollCount=1000000
agent1.sinks.hdfs-sink1.hdfs.batchSize = 10
agent1.sinks.hdfs-sink1.hdfs.threadsPoolSize=10
----------------------------------------------------
http://www.nextree.co.kr/p2704/
Tuesday, June 25, 2013
Install - Memcached
1. libevent
wget http://www.monkey.org/~provos/libevent-1.3a.tar.gz
tar xvfz libevent-1.3a.tar.gz
cd libevent-1.3a
./configure --prefix=/usr/local/libevent1.4.4
make
make install
2. memcached
wget http://www.danga.com/memcached/dist/memcached-1.2.1.tar.gz
tar xvfz memcached-1.2.1.tar.gz
cd memcached-1.2.1
./configure --prefix=/usr/local/memcached-1.2.5 --with-libevent=/usr/local/libevent
make
make install
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/local/libevent/lib
echo "/usr/local/lib" >>/etc/ld.so.conf
echo "/usr/local/libevent/lib" >>/etc/ld.so.conf
/sbin/ldconfig
memcached -d -m 1024 -l 127.0.0.1 -p 11211 -u root
# to replication
./configure --enable-replication --prefix=/usr/local/memcached-1.2.8 --with-libevent=/usr/local/libevent
wget http://www.monkey.org/~provos/libevent-1.3a.tar.gz
tar xvfz libevent-1.3a.tar.gz
cd libevent-1.3a
./configure --prefix=/usr/local/libevent1.4.4
make
make install
2. memcached
wget http://www.danga.com/memcached/dist/memcached-1.2.1.tar.gz
tar xvfz memcached-1.2.1.tar.gz
cd memcached-1.2.1
./configure --prefix=/usr/local/memcached-1.2.5 --with-libevent=/usr/local/libevent
make
make install
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/local/libevent/lib
echo "/usr/local/lib" >>/etc/ld.so.conf
echo "/usr/local/libevent/lib" >>/etc/ld.so.conf
/sbin/ldconfig
memcached -d -m 1024 -l 127.0.0.1 -p 11211 -u root
# to replication
./configure --enable-replication --prefix=/usr/local/memcached-1.2.8 --with-libevent=/usr/local/libevent
MyStory - I went to Tokyo to work in Japan.
I worked as a programmer in Korea, but the IT Bubble had burst.
I was 29 years old and my job was gone, so I was concerned about my future.
At that time, I thought, "maybe I should move to the other company?"
Should I start my a business that services something online Internet?
I hesitated about to make a decision.
At home, I would study the computer programing and English.(2002 year)
But it had been difficult to make steady progress.
Because of my nephew.
While I would study, he would interrupt me.
For example, he would cry in front of me and would loudly knock on the door.
So my parents and I had some problems at home.
I felt that I wanted to move out of the house. but I couldn't afford to live out on my own.
That was the reason why I didn't have enough money to rent a house.
By the way, I have worked a part-time job(a part-time lecturer in a college).
When I was working, I saw a web page recruiting software engineers from a educational institution of Government to work abroad. and I decided that I would apply.
I needed some money, and I had to take classes for Java and Japanese.
But I didn't care about any of that and applied anyway.
For 10 months, I studied advance Java, Oracle and Japanese.
It was particularly difficult to study Japanese.
Honestly, I wondered "can I do this?"
Sometimes, I felt frustrated and despaired.
I just went on, although I was very tired of studying.
One classroom had about 25 students.
I took eight exams. but I failed half of them.
But I didn't get frustrated, and I would try to challenge it again.
In the end, I didn't pass the exam.
However, I passed the interview for a job at the company.
Therefore, I would be to going to Japan to work.
Since it was my first time going abroad.
I was very nervous
I was afraid that an accident might happen while I was in Japan.
So, I wasn't at all calm. I took an airplane in February 2003.
At last, I arrived at my destination Narita Airport.
I was 29 years old and my job was gone, so I was concerned about my future.
At that time, I thought, "maybe I should move to the other company?"
Should I start my a business that services something online Internet?
I hesitated about to make a decision.
At home, I would study the computer programing and English.(2002 year)
But it had been difficult to make steady progress.
Because of my nephew.
While I would study, he would interrupt me.
For example, he would cry in front of me and would loudly knock on the door.
So my parents and I had some problems at home.
I felt that I wanted to move out of the house. but I couldn't afford to live out on my own.
That was the reason why I didn't have enough money to rent a house.
By the way, I have worked a part-time job(a part-time lecturer in a college).
When I was working, I saw a web page recruiting software engineers from a educational institution of Government to work abroad. and I decided that I would apply.
I needed some money, and I had to take classes for Java and Japanese.
But I didn't care about any of that and applied anyway.
For 10 months, I studied advance Java, Oracle and Japanese.
It was particularly difficult to study Japanese.
Honestly, I wondered "can I do this?"
Sometimes, I felt frustrated and despaired.
I just went on, although I was very tired of studying.
One classroom had about 25 students.
I took eight exams. but I failed half of them.
But I didn't get frustrated, and I would try to challenge it again.
In the end, I didn't pass the exam.
However, I passed the interview for a job at the company.
Therefore, I would be to going to Japan to work.
Since it was my first time going abroad.
I was very nervous
I was afraid that an accident might happen while I was in Japan.
So, I wasn't at all calm. I took an airplane in February 2003.
At last, I arrived at my destination Narita Airport.
Monday, June 24, 2013
Mysql - Install mysql5.5
cmake install - manual
--------------
./bootstrap
make
make install
--------------
mysql5.5 install
-------------
$ yum groupinstall "Development Tools"
$ yum install ncurses-devel
$ yum install cmake
$ cmake . \
-DCMAKE_INSTALL_PREFIX=/usr/local/mysql \
-DMYSQL_DATADIR=/usr/local/mysql/data \
-DSYSCONFDIR=/etc \
-DWITH_ARCHIVE_STORAGE_ENGINE=1 \
-DWITH_BLACKHOLE_STORAGE_ENGINE=1 \
-DWITH_FEDERATED_STORAGE_ENGINE=1 \
-DWITH_PARTITION_STORAGE_ENGINE=1 \
-DDEFAULT_CHARSET=utf8 \
-DDEFAULT_COLLATION=utf8_general_ci \
-DENABLED_LOCAL_INFILE=1 \
-DENABLED_PROFILING=1 \
-DMYSQL_TCP_PORT=3306 \
-DMYSQL_UNIX_ADDR=/tmp/mysql.sock \
-DWITH_DEBUG=1 \
-DWITH_EMBEDDED_SERVER=1;
$ make
$ make install
$ useradd mysql
$ cd /usr/local/src/mysql-5.5.38
$ chmod 755 scripts/mysql_install_db
$ scripts/mysql_install_db --user=mysql --basedir=/usr/local/mysql --datadir=/usr/local/mysql/data
@ Start and Stop
$ cp /usr/local/mysql/support-files/mysql.server /etc/init.d/
@ Configuration
$ cp /usr/local/mysql/support-files/my-medium.cnf /etc/my.cnf
mac
http://hoyanet.pe.kr/1942
--------------
./bootstrap
make
make install
--------------
mysql5.5 install
-------------
$ yum groupinstall "Development Tools"
$ yum install ncurses-devel
$ yum install cmake
$ cmake . \
-DCMAKE_INSTALL_PREFIX=/usr/local/mysql \
-DMYSQL_DATADIR=/usr/local/mysql/data \
-DSYSCONFDIR=/etc \
-DWITH_ARCHIVE_STORAGE_ENGINE=1 \
-DWITH_BLACKHOLE_STORAGE_ENGINE=1 \
-DWITH_FEDERATED_STORAGE_ENGINE=1 \
-DWITH_PARTITION_STORAGE_ENGINE=1 \
-DDEFAULT_CHARSET=utf8 \
-DDEFAULT_COLLATION=utf8_general_ci \
-DENABLED_LOCAL_INFILE=1 \
-DENABLED_PROFILING=1 \
-DMYSQL_TCP_PORT=3306 \
-DMYSQL_UNIX_ADDR=/tmp/mysql.sock \
-DWITH_DEBUG=1 \
-DWITH_EMBEDDED_SERVER=1;
$ make
$ make install
$ useradd mysql
$ cd /usr/local/src/mysql-5.5.38
$ chmod 755 scripts/mysql_install_db
$ scripts/mysql_install_db --user=mysql --basedir=/usr/local/mysql --datadir=/usr/local/mysql/data
@ Start and Stop
$ cp /usr/local/mysql/support-files/mysql.server /etc/init.d/
@ Configuration
$ cp /usr/local/mysql/support-files/my-medium.cnf /etc/my.cnf
mac
http://hoyanet.pe.kr/1942
Hadoop - Searching for something
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
/** | |
* Search files system for something in Hadoop | |
* @param path the path in hadoop directory, for example like [/home/hadoop/data/flume] | |
* @param searchWord key words, for example like [test1 test2] | |
* @return result for searching | |
*/ | |
public String readLogsPlural(String path, String searchWord) throws Exception { | |
String findStr; | |
StringBuffer sb = new StringBuffer(); | |
String[] multiWord = searchWord.split(" "); | |
Configuration conf = new Configuration(); | |
conf.set("fs.default.name", "hdfs://172.xx.xxx.xxx:9000"); | |
FileSystem dfs = FileSystem.get(conf); | |
// You need to pass in your hdfs path | |
FileStatus[] status = this.dfs.listStatus(new Path(path)); | |
for (int i=0;i<status.length;i++){ | |
FSDataInputStream fsIn = dfs.open(status[i].getPath()); | |
BufferedReader br=new BufferedReader(new InputStreamReader(fsIn)); | |
String line; | |
while ((line=br.readLine()) != null){ | |
for (int n=0 ; n < multiWord.length; n++) { | |
logger.info("multiWord[" + n + "] >>> " + multiWord[n]); | |
findStr = ".*" + multiWord[n] + ".*"; | |
if (line.matches(findStr)) { | |
if((multiWord.length-1) == n) { | |
logger.info("last n >>> " + n); | |
sb.append(line); | |
} | |
} else { | |
break; | |
} | |
} | |
} | |
} | |
return sb.toString(); | |
} |
Monday, June 17, 2013
Link - Collection
@Iphone Emoji
http://www.easyapns.com/category/just-for-fun
@Best Site(How to install)
http://xmodulo.com/
@UML
http://www.objectaid.com/home
@Data Compression
https://code.google.com/p/snappy/
@Monitor
https://github.com/Netflix/servo/
@Arrange Json
http://jsonformatter.curiousconcept.com/
@Using sequence in Mysql
http://bryan7.tistory.com/101
@C++ Tutorial
http://www.soen.kr/
@Mysql with cash
https://github.com/ahiguti/HandlerSocket-Plugin-for-MySQL/blob/master/docs-en/installation.en.txt
@Oracle Function List
http://jhbench.tistory.com/27
@Java Sample
http://kodejava.org/
http://www.easyapns.com/category/just-for-fun
@Best Site(How to install)
http://xmodulo.com/
@UML
http://www.objectaid.com/home
@Data Compression
https://code.google.com/p/snappy/
@Monitor
https://github.com/Netflix/servo/
@Arrange Json
http://jsonformatter.curiousconcept.com/
@Using sequence in Mysql
http://bryan7.tistory.com/101
@C++ Tutorial
http://www.soen.kr/
@Mysql with cash
https://github.com/ahiguti/HandlerSocket-Plugin-for-MySQL/blob/master/docs-en/installation.en.txt
@Oracle Function List
http://jhbench.tistory.com/27
@Java Sample
http://kodejava.org/
Tuesday, June 4, 2013
Hadoop - Remove node
@1. Get the IP or Hosts list by running "report" command
$ $HADOOP_HOME/hadoop dfsadmin -report | grep Name
@2. You include IP:Port The following file.
@$HADOOP_HOME/conf/excludes
00.xx.xxx.001:50010
@3. invoke command:
$ $HADOOP_HOME/bin/hadoop dfsadmin -refreshNodes
@4. Verification
$ $HADOOP_HOME/bin/hadoop dfsadmin -report | grep -Eiw ‘Name|Decommission’
@4. This time is MapReduce
@If It has the exclude file, You can command this
$ $HADOOP_HOME/bin/hadoop mradmin -refreshNodes
http://pearlin.info/2012/04/best-way-to-blacklist-node-from-live-hadoop-cluster/
$ $HADOOP_HOME/hadoop dfsadmin -report | grep Name
@2. You include IP:Port The following file.
@$HADOOP_HOME/conf/excludes
00.xx.xxx.001:50010
@3. invoke command:
$ $HADOOP_HOME/bin/hadoop dfsadmin -refreshNodes
@4. Verification
$ $HADOOP_HOME/bin/hadoop dfsadmin -report | grep -Eiw ‘Name|Decommission’
@4. This time is MapReduce
@If It has the exclude file, You can command this
$ $HADOOP_HOME/bin/hadoop mradmin -refreshNodes
http://pearlin.info/2012/04/best-way-to-blacklist-node-from-live-hadoop-cluster/
Linux - command
@a user register into the group
#/usr/sbin/usermod -g groupname username
$ /usr/sbin/usermod -g hadoop hadoop
@Find files to include strings
find . -exec grep -l "Contents of directory" {} \; 2>/dev/null
@Find files
find . -name "*Status*" -print
@Make ssh keys
ssh-keygen -t dsa -> Make a dsa
ssh-keygen -t rsa -> Make a rsa
#/usr/sbin/usermod -g groupname username
$ /usr/sbin/usermod -g hadoop hadoop
@Find files to include strings
find . -exec grep -l "Contents of directory" {} \; 2>/dev/null
@Find files
find . -name "*Status*" -print
@Make ssh keys
ssh-keygen -t dsa -> Make a dsa
ssh-keygen -t rsa -> Make a rsa
Monday, June 3, 2013
Logback - Setting for Spring3.1.4
<properties>
<org.slf4j.version>1.7.5</org.slf4j.version>
<org.logback.version>1.0.13</org.logback.version>
</properties>
<!-- Logging -->
<dependency>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-api</artifactId>
<version>${org.slf4j.version}</version>
</dependency>
<dependency>
<groupId>org.slf4j</groupId>
<artifactId>jcl-over-slf4j</artifactId>
<version>${org.slf4j.version}</version>
<scope>runtime</scope>
</dependency>
<dependency>
<groupId>ch.qos.logback</groupId>
<artifactId>logback-core</artifactId>
<version>${org.logback.version}</version>
</dependency>
<dependency>
<groupId>ch.qos.logback</groupId>
<artifactId>logback-classic</artifactId>
<version>${org.logback.version}</version>
</dependency>
#logback.xml
<?xml version="1.0" encoding="UTF-8"?>
<configuration>
<appender name="HADOOP_FLUME" class="ch.qos.logback.core.rolling.RollingFileAppender">
<file>${user.dir}/logs/flumeAdmin.log</file>
<rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
<!-- daily rollover -->
<fileNamePattern>${user.dir}/logs/flumeAdmin.log.%d{yyyy-MM-dd}.log.zip</fileNamePattern>
<!-- keep 90 days' worth of history -->
<maxHistory>90</maxHistory>
</rollingPolicy>
<encoder class="ch.qos.logback.core.encoder.LayoutWrappingEncoder">
<charset>UTF-8</charset>
<layout class="ch.qos.logback.classic.PatternLayout">
<pattern>%d{HH:mm:ss.SSS} [%thread] %-5level %logger{35} - %msg%n</pattern>
</layout>
</encoder>
</appender>
<root>
<level value="info" />
<appender-ref ref="HADOOP_FLUME" />
</root>
</configuration>
<org.slf4j.version>1.7.5</org.slf4j.version>
<org.logback.version>1.0.13</org.logback.version>
</properties>
<!-- Logging -->
<dependency>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-api</artifactId>
<version>${org.slf4j.version}</version>
</dependency>
<dependency>
<groupId>org.slf4j</groupId>
<artifactId>jcl-over-slf4j</artifactId>
<version>${org.slf4j.version}</version>
<scope>runtime</scope>
</dependency>
<dependency>
<groupId>ch.qos.logback</groupId>
<artifactId>logback-core</artifactId>
<version>${org.logback.version}</version>
</dependency>
<dependency>
<groupId>ch.qos.logback</groupId>
<artifactId>logback-classic</artifactId>
<version>${org.logback.version}</version>
</dependency>
#logback.xml
<?xml version="1.0" encoding="UTF-8"?>
<configuration>
<appender name="HADOOP_FLUME" class="ch.qos.logback.core.rolling.RollingFileAppender">
<file>${user.dir}/logs/flumeAdmin.log</file>
<rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
<!-- daily rollover -->
<fileNamePattern>${user.dir}/logs/flumeAdmin.log.%d{yyyy-MM-dd}.log.zip</fileNamePattern>
<!-- keep 90 days' worth of history -->
<maxHistory>90</maxHistory>
</rollingPolicy>
<encoder class="ch.qos.logback.core.encoder.LayoutWrappingEncoder">
<charset>UTF-8</charset>
<layout class="ch.qos.logback.classic.PatternLayout">
<pattern>%d{HH:mm:ss.SSS} [%thread] %-5level %logger{35} - %msg%n</pattern>
</layout>
</encoder>
</appender>
<root>
<level value="info" />
<appender-ref ref="HADOOP_FLUME" />
</root>
</configuration>
Subscribe to:
Posts (Atom)