diff options
author | Patrick Simianer <p@simianer.de> | 2014-06-14 16:46:27 +0200 |
---|---|---|
committer | Patrick Simianer <p@simianer.de> | 2014-06-14 16:46:27 +0200 |
commit | 26c490f404731d053a6205719b6246502c07b449 (patch) | |
tree | 3aa721098f1251dfbf2249ecd2736434c13b1d48 /hadoop/streaming |
init
Diffstat (limited to 'hadoop/streaming')
-rw-r--r-- | hadoop/streaming/mapper_test.input | 10 | ||||
-rw-r--r-- | hadoop/streaming/mapper_test.py | 9 | ||||
-rwxr-xr-x | hadoop/streaming/mapper_test.sh | 23 | ||||
-rw-r--r-- | hadoop/streaming/mapper_test1.py | 9 | ||||
-rwxr-xr-x | hadoop/streaming/mapper_test1.sh | 24 | ||||
-rw-r--r-- | hadoop/streaming/no_reducer.input | 8 | ||||
-rwxr-xr-x | hadoop/streaming/no_reducer.rb | 9 | ||||
-rwxr-xr-x | hadoop/streaming/no_reducer.sh | 23 | ||||
-rw-r--r-- | hadoop/streaming/partitioner_test.input | 8 | ||||
-rwxr-xr-x | hadoop/streaming/partitioner_test.sh | 22 | ||||
-rwxr-xr-x | hadoop/streaming/partitioner_test1.sh | 27 | ||||
-rw-r--r-- | hadoop/streaming/secondary_sort.input | 8 | ||||
-rwxr-xr-x | hadoop/streaming/secondary_sort.sh | 30 | ||||
-rw-r--r-- | hadoop/streaming/test.input | 10 | ||||
-rwxr-xr-x | hadoop/streaming/test.sh | 23 |
15 files changed, 243 insertions, 0 deletions
diff --git a/hadoop/streaming/mapper_test.input b/hadoop/streaming/mapper_test.input new file mode 100644 index 0000000..338fd87 --- /dev/null +++ b/hadoop/streaming/mapper_test.input @@ -0,0 +1,10 @@ +0 1 a c +0 2 b c +0 3 c c +1 4 a c +1 5 b c +1 6 c c +1 7 d c +2 8 a c +2 9 b c +2 10 c c diff --git a/hadoop/streaming/mapper_test.py b/hadoop/streaming/mapper_test.py new file mode 100644 index 0000000..d358bda --- /dev/null +++ b/hadoop/streaming/mapper_test.py @@ -0,0 +1,9 @@ +#!/usr/bin/env python2 + +import sys + + +if __name__ == "__main__": + for line in sys.stdin: + print line.upper() + diff --git a/hadoop/streaming/mapper_test.sh b/hadoop/streaming/mapper_test.sh new file mode 100755 index 0000000..4f6e013 --- /dev/null +++ b/hadoop/streaming/mapper_test.sh @@ -0,0 +1,23 @@ +#!/bin/sh + +hadoop dfs -put mapper_test.input mapper_test.input + +HADOOP_HOME=/usr/lib/hadoop +HADOOP_VERSION=0.20.2-cdh3u1 +JAR=contrib/streaming/hadoop-streaming-$HADOOP_VERSION.jar +HSTREAMING="$HADOOP_HOME/bin/hadoop jar $HADOOP_HOME/$JAR" + +OUT=mapper_test_out + +$HSTREAMING \ + -input mapper_test.input \ + -output $OUT \ + -mapper "python mapper.py" \ + -file mapper_test.py \ + -reducer org.apache.hadoop.mapred.lib.IdentityReducer \ + -jobconf mapred.reduce.tasks=3 + +hadoop dfs -get $OUT . +hadoop dfs -rmr $OUT +hadoop dfs -rm mapper_test.input + diff --git a/hadoop/streaming/mapper_test1.py b/hadoop/streaming/mapper_test1.py new file mode 100644 index 0000000..79c8aa6 --- /dev/null +++ b/hadoop/streaming/mapper_test1.py @@ -0,0 +1,9 @@ +#!/usr/bin/env python2 + +import sys + + +if __name__ == "__main__": + for line in sys.stdin: + print "MYKEY\t%s"%line.upper() + diff --git a/hadoop/streaming/mapper_test1.sh b/hadoop/streaming/mapper_test1.sh new file mode 100755 index 0000000..80611dc --- /dev/null +++ b/hadoop/streaming/mapper_test1.sh @@ -0,0 +1,24 @@ +#!/bin/sh + + +hadoop dfs -put mapper_test.input mapper_test.input + +HADOOP_HOME=/usr/lib/hadoop +HADOOP_VERSION=0.20.2-cdh3u1 +JAR=contrib/streaming/hadoop-streaming-$HADOOP_VERSION.jar +HSTREAMING="$HADOOP_HOME/bin/hadoop jar $HADOOP_HOME/$JAR" + +OUT=mapper_test_out + +$HSTREAMING \ + -input mapper_test.input \ + -output $OUT \ + -mapper "python mapper1.py" \ + -file mapper_test1.py \ + -reducer org.apache.hadoop.mapred.lib.IdentityReducer \ + -jobconf mapred.reduce.tasks=3 + +hadoop dfs -get $OUT . +hadoop dfs -rmr $OUT +hadoop dfs -rm mapper_test.input + diff --git a/hadoop/streaming/no_reducer.input b/hadoop/streaming/no_reducer.input new file mode 100644 index 0000000..71ac1b5 --- /dev/null +++ b/hadoop/streaming/no_reducer.input @@ -0,0 +1,8 @@ +a +b +c +d +e +f +g +h diff --git a/hadoop/streaming/no_reducer.rb b/hadoop/streaming/no_reducer.rb new file mode 100755 index 0000000..4410b93 --- /dev/null +++ b/hadoop/streaming/no_reducer.rb @@ -0,0 +1,9 @@ +#!/usr/bin/env ruby + + +i = 0 +while l = STDIN.gets + puts "line #{i} (#{l.strip})" + i+=1 +end + diff --git a/hadoop/streaming/no_reducer.sh b/hadoop/streaming/no_reducer.sh new file mode 100755 index 0000000..7267166 --- /dev/null +++ b/hadoop/streaming/no_reducer.sh @@ -0,0 +1,23 @@ +#!/bin/sh + + +hadoop dfs -put no_reducer.input no_reducer.input + +HADOOP_HOME=/usr/lib/hadoop +HADOOP_VERSION=0.20.2-cdh3u1 +JAR=contrib/streaming/hadoop-streaming-$HADOOP_VERSION.jar +HSTREAMING="$HADOOP_HOME/bin/hadoop jar $HADOOP_HOME/$JAR" + +OUT=no_reducer_out + +$HSTREAMING \ + -input no_reducer.input \ + -output $OUT \ + -mapper "no_reducer.rb" \ + -file "no_reducer.rb" \ + -reducer NONE + +hadoop dfs -get $OUT . +hadoop dfs -rmr $OUT +hadoop dfs -rm no_reducer.input + diff --git a/hadoop/streaming/partitioner_test.input b/hadoop/streaming/partitioner_test.input new file mode 100644 index 0000000..06c6a28 --- /dev/null +++ b/hadoop/streaming/partitioner_test.input @@ -0,0 +1,8 @@ +1.1 a +2.2 b +3.1 c +4.2 d +4.1 e +1.2 x +2.1 y +4.3 q diff --git a/hadoop/streaming/partitioner_test.sh b/hadoop/streaming/partitioner_test.sh new file mode 100755 index 0000000..dcc7353 --- /dev/null +++ b/hadoop/streaming/partitioner_test.sh @@ -0,0 +1,22 @@ +#!/bin/sh + + +hadoop dfs -put partitioner_test.input partitioner_test.input + +HADOOP_HOME=/usr/lib/hadoop +HADOOP_VERSION=0.20.2-cdh3u1 +JAR=contrib/streaming/hadoop-streaming-$HADOOP_VERSION.jar +HSTREAMING="$HADOOP_HOME/bin/hadoop jar $HADOOP_HOME/$JAR" + +OUT=partitioner_out + +$HSTREAMING \ + -input partitioner_test.input \ + -output $OUT \ + -mapper /bin/cat \ + -jobconf mapred.reduce.tasks=2 + +hadoop dfs -get $OUT . +hadoop dfs -rmr $OUT +hadoop dfs -rm partitioner_test.input + diff --git a/hadoop/streaming/partitioner_test1.sh b/hadoop/streaming/partitioner_test1.sh new file mode 100755 index 0000000..6e6344c --- /dev/null +++ b/hadoop/streaming/partitioner_test1.sh @@ -0,0 +1,27 @@ +#!/bin/sh + + +hadoop dfs -put partitioner_test.input partitioner_test.input + +HADOOP_HOME=/usr/lib/hadoop +HADOOP_VERSION=0.20.2-cdh3u1 +JAR=contrib/streaming/hadoop-streaming-$HADOOP_VERSION.jar +HSTREAMING="$HADOOP_HOME/bin/hadoop jar $HADOOP_HOME/$JAR" + +OUT=partitioner1_out + +$HSTREAMING \ + -input partitioner_test.input \ + -output $OUT \ + -mapper /bin/cat \ + -jobconf mapred.reduce.tasks=2 \ + -partitioner org.apache.hadoop.mapred.lib.KeyFieldBasedPartitioner \ + -jobconf stream.map.output.field.separator="\t" \ + -jobconf map.output.key.field.separator=. \ + -jobconf stream.num.map.output.key.fields=2 \ + -jobconf mapred.text.key.partitioner.options=-k1 + +hadoop dfs -get $OUT . +hadoop dfs -rmr $OUT +hadoop dfs -rm partitioner_test.input + diff --git a/hadoop/streaming/secondary_sort.input b/hadoop/streaming/secondary_sort.input new file mode 100644 index 0000000..5aa7ec9 --- /dev/null +++ b/hadoop/streaming/secondary_sort.input @@ -0,0 +1,8 @@ +0-*-1 a:1 a:2 a:3 +0-*-2 a:1 a:2 a:3 +0-*-10 a:1 a:2 a:3 +1-*-2 a:1 a:2 a:3 +2-*-0 a:1 a:2 a:3 +2-*-2 a:1 a:2 a:3 +3-*-3 a:1 a:2 a:3 +10-*-0 a:1 a:2 a:3 diff --git a/hadoop/streaming/secondary_sort.sh b/hadoop/streaming/secondary_sort.sh new file mode 100755 index 0000000..7fa0c6d --- /dev/null +++ b/hadoop/streaming/secondary_sort.sh @@ -0,0 +1,30 @@ +#!/bin/sh + + +hadoop dfs -put secondary_sort.input secondary_sort.input + +HADOOP_HOME=/usr/lib/hadoop +HADOOP_VERSION=0.20.2-cdh3u1 +JAR=contrib/streaming/hadoop-streaming-$HADOOP_VERSION.jar +HSTREAMING="$HADOOP_HOME/bin/hadoop jar $HADOOP_HOME/$JAR" + +OUT=comp_out + +$HSTREAMING \ + -input secondary_sort.input \ + -output $OUT \ + -partitioner org.apache.hadoop.mapred.lib.KeyFieldBasedPartitioner \ + -jobconf map.output.key.field.separator="-*-" \ + -jobconf mapred.text.key.partitioner.options="-k1,1n" \ + -mapper /bin/cat \ + -reducer org.apache.hadoop.mapred.lib.IdentityReducer \ + -jobconf mapred.output.key.comparator.class=org.apache.hadoop.mapred.lib.KeyFieldBasedComparator \ + -jobconf stream.num.map.output.key.fields=2 \ + -jobconf stream.map.output.field.separator="\t" \ + -jobconf mapred.text.key.comparator.options="-k1,1n -k2,2nr" \ + -jobconf mapred.reduce.tasks=3 + +hadoop dfs -get $OUT . +hadoop dfs -rmr $OUT +hadoop dfs -rm secondary_sort.input + diff --git a/hadoop/streaming/test.input b/hadoop/streaming/test.input new file mode 100644 index 0000000..338fd87 --- /dev/null +++ b/hadoop/streaming/test.input @@ -0,0 +1,10 @@ +0 1 a c +0 2 b c +0 3 c c +1 4 a c +1 5 b c +1 6 c c +1 7 d c +2 8 a c +2 9 b c +2 10 c c diff --git a/hadoop/streaming/test.sh b/hadoop/streaming/test.sh new file mode 100755 index 0000000..3cb47d5 --- /dev/null +++ b/hadoop/streaming/test.sh @@ -0,0 +1,23 @@ +#!/bin/sh + + +hadoop dfs -put test.input test.input + +HADOOP_HOME=/usr/lib/hadoop +HADOOP_VERSION=0.20.2-cdh3u1 +JAR=contrib/streaming/hadoop-streaming-$HADOOP_VERSION.jar +HSTREAMING="$HADOOP_HOME/bin/hadoop jar $HADOOP_HOME/$JAR" + +OUT=test_out + +$HSTREAMING \ + -input test.input \ + -output $OUT \ + -mapper /bin/cat \ + -jobconf "mapred.reduce.tasks=3" \ + -reducer org.apache.hadoop.mapred.lib.IdentityReducer + +hadoop dfs -get $OUT . +hadoop dfs -rmr $OUT +hadoop dfs -rm test.input + |