编写一个简单的日志清洗脚本,原始访问日志如下:
[html]
- 192.168.18.1 - - [16/Feb/2017:13:53:49 +0800] "GET /favicon.ico HTTP/1.1" 404 288
- 192.168.18.2 - - [16/Feb/2017:13:53:49 +0800] "GET /鞋子/男鞋/运动鞋/a001 HTTP/1.1" 404 288
- 192.168.18.2 - - [16/Feb/2017:13:53:49 +0800] "GET /鞋子/男鞋/运动鞋/a001 HTTP/1.1" 404 288
- 192.168.18.2 - - [16/Feb/2017:13:53:49 +0800] "GET /鞋子/男鞋/运动鞋/a001 HTTP/1.1" 404 288
- 192.168.18.2 - - [16/Feb/2017:13:53:49 +0800] "GET /鞋子/男鞋/运动鞋/a001 HTTP/1.1" 404 288
- 192.168.18.2 - - [16/Feb/2017:13:53:49 +0800] "GET /鞋子/男鞋/运动鞋/a001 HTTP/1.1" 404 288
- 192.168.18.2 - - [16/Feb/2017:13:53:49 +0800] "GET /鞋子/男鞋/运动鞋/a001 HTTP/1.1" 404 288
- 192.168.18.2 - - [16/Feb/2017:13:53:49 +0800] "GET /鞋子/男鞋/运动鞋/a001 HTTP/1.1" 404 288
- 192.168.18.2 - - [16/Feb/2017:13:53:49 +0800] "GET /鞋子/男鞋/运动鞋/a001 HTTP/1.1" 404 288
- 192.168.18.2 - - [16/Feb/2017:13:53:49 +0800] "GET /鞋子/男鞋/运动鞋/a001 HTTP/1.1" 404 288
- 192.168.18.2 - - [16/Feb/2017:13:53:49 +0800] "GET /鞋子/男鞋/运动鞋/a001 HTTP/1.1" 404 288
- 192.168.18.2 - - [16/Feb/2017:13:53:49 +0800] "GET /鞋子/男鞋/运动鞋/a001 HTTP/1.1" 404 288
- 192.168.18.2 - - [16/Feb/2017:13:53:49 +0800] "GET /鞋子/男鞋/运动鞋/a007 HTTP/1.1" 404 288
- 192.168.18.2 - - [16/Feb/2017:13:53:49 +0800] "GET /鞋子/男鞋/运动鞋/a003 HTTP/1.1" 404 288
- 192.168.18.2 - - [16/Feb/2017:13:53:49 +0800] "GET /鞋子/男鞋/运动鞋/a003 HTTP/1.1" 404 288
- 192.168.18.2 - - [16/Feb/2017:13:53:49 +0800] "GET /鞋子/男鞋/皮鞋/b001 HTTP/1.1" 404 288
- 192.168.18.2 - - [16/Feb/2017:13:53:49 +0800] "GET /鞋子/男鞋/皮鞋/b002 HTTP/1.1" 404 288
- 192.168.18.2 - - [16/Feb/2017:13:53:49 +0800] "GET /鞋子/男鞋/皮鞋/b003 HTTP/1.1" 404 288
1,按照格式做好样式数据后,将原始数据导入到/user/hadoop/name目录中;
2,创建java数据清洗执行文件:
vim Namecount.java
[html]
- import java.lang.String;
- import java.io.IOException;
- import java.util.*;
- import java.text.SimpleDateFormat;
- import org.apache.hadoop.conf.Configuration;
- import org.apache.hadoop.fs.Path;
- import org.apache.hadoop.io.IntWritable;
- import org.apache.hadoop.io.LongWritable;
- import org.apache.hadoop.io.Text;
- import org.apache.hadoop.mapreduce.Job;
- import org.apache.hadoop.mapreduce.Mapper;
- import org.apache.hadoop.mapreduce.Reducer;
- import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
- import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;
- import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
- import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;
- import org.apache.hadoop.util.GenericOptionsParser;
- import org.apache.hadoop.io.NullWritable;
- public class Namecount {
- public static final SimpleDateFormat FORMAT = new SimpleDateFormat("d/MMM/yyyy:HH:mm:ss", Locale.ENGLISH); //原时间格式
- public static final SimpleDateFormat dateformat1 = new SimpleDateFormat("yyyy-MM-dd");//现时间格式
- private Date parseDateFormat(String string) { //转换时间格式
- Date parse = null;
- try {
- parse = FORMAT.parse(string);
- } catch (Exception e) {
- e.printStackTrace();
- }
- return parse;
- }
- public String[] parse(String line) {
- String ip = parseIP(line); //ip
- String time = parseTime(line); //时间
- String url = parseURL(line); //url
- String status = parseStatus(line); //状态
- String traffic = parseTraffic(line);//流量
- return new String[] { ip, time, url, status, traffic };
- }
- private String parseTraffic(String line) { //流量
- final String trim = line.substring(line.lastIndexOf("\"") + 1)
- .trim();
- String traffic = trim.split(" ")[1];
- return traffic;
- }
- private String parseStatus(String line) { //状态
- final String trim = line.substring(line.lastIndexOf("\"") + 1)
- .trim();
- String status = trim.split(" ")[0];
- return status;
- }
- private String parseURL(String line) { //url
- final int first = line.indexOf("\"");
- final int last = line.lastIndexOf("\"");
- String url = line.substring(first + 1, last);
- return url;
- }
- private String parseTime(String line) { //时间
- final int first = line.indexOf("[");
- final int last = line.indexOf("+0800]");
- String time = line.substring(first + 1, last).trim();
- Date date = parseDateFormat(time);
- return dateformat1.format(date);
- }
- private String parseIP(String line) { //ip
- String ip = line.split("- -")[0].trim();
- return ip;
- }
- public static class Map extends
- Mapper<LongWritable, Text, Text, IntWritable> {
- public void map(LongWritable key, Text value, Context context)
- throws IOException, InterruptedException {
- // 将输入的纯文本文件的数据转化成String
- Text outputValue = new Text();
- String line = value.toString();
- Namecount aa=new Namecount();
- StringTokenizer tokenizerArticle = new StringTokenizer(line, "\n");
- // 分别对每一行进行处理
- while (tokenizerArticle.hasMoreElements()) {
- // 每行按空格划分
- String stra=tokenizerArticle.nextToken().toString();
- String [] Newstr=aa.parse(stra);
- if (Newstr[2].startsWith("GET /")) { //过滤开头字符串
- Newstr[2] = Newstr[2].substring("GET /".length());
- }
- else if (Newstr[2].startsWith("POST /")) {
- Newstr[2] = Newstr[2].substring("POST /".length());
- }
- if (Newstr[2].endsWith(" HTTP/1.1")) { //过滤结尾字符串
- Newstr[2] = Newstr[2].substring(0, Newstr[2].length()
- - " HTTP/1.1".length());
- }
- String[] words = Newstr[2].split("/");
- if(words.length==4){
- outputValue.set(Newstr[0] + "\t" + Newstr[1] + "\t" + words[0]+"\t"+words[1]+"\t"+words[2]+"\t"+words[3]+"\t"+"0");
- context.write(outputValue,new IntWritable(1));
- }
- }
- }
- }
- public static class Reduce extends
- Reducer<Text, IntWritable, Text, IntWritable> {
- // 实现reduce函数
- public void reduce(Text key, Iterable<IntWritable> values,
- Context context) throws IOException, InterruptedException {
- int sum = 0;
- Iterator<IntWritable> iterator = values.iterator();
- while (iterator.hasNext()) {
- sum += iterator.next().get();
- }
- context.write(key, new IntWritable(sum));
- }
- }
- public static void main(String[] args) throws Exception {
- Configuration conf = new Configuration();
- conf.set("mapred.jar","Namecount.jar");
- String[] ioArgs = new String[] { "name", "name_out" };
- String[] otherArgs = new GenericOptionsParser(conf, ioArgs).getRemainingArgs();
- if (otherArgs.length != 2) {
- System.err.println("Usage: Score Average <in> <out>");
- System.exit(2);
- }
- Job job = new Job(conf, "name_goods_count");
- job.setJarByClass(Namecount.class);
- // 设置Map、Combine和Reduce处理类
- job.setMapperClass(Map.class);
- job.setCombinerClass(Reduce.class);
- job.setReducerClass(Reduce.class);
- // 设置输出类型
- job.setOutputKeyClass(Text.class);
- job.setOutputValueClass(IntWritable.class);
- // 将输入的数据集分割成小数据块splites,提供一个RecordReder的实现
- job.setInputFormatClass(TextInputFormat.class);
- // 提供一个RecordWriter的实现,负责数据输出
- job.setOutputFormatClass(TextOutputFormat.class);
- // 设置输入和输出目录
- FileInputFormat.addInputPath(job, new Path(otherArgs[0]));
- FileOutputFormat.setOutputPath(job, new Path(otherArgs[1]));
- System.exit(job.waitForCompletion(true) ? 0 : 1);
- }
- }
3,编译执行
[html]
- [hadoop@h85 mr]$ /usr/jdk1.7.0_25/bin/javac Namecount.java
- [hadoop@h85 mr]$ /usr/jdk1.7.0_25/bin/jar cvf Namecount.jar Namecount*class
- [hadoop@h85 mr]$ hadoop jar Namecount.jar Namecount
输出的结果被保存在/user/hadoop/name_out/part-r-00000
4,hive中创建有相应字段的表:(字段)
[html]
- 例如: ip string acc_date string wp string sex string(鞋子种类) type(鞋子种类) string nid(鞋子编号) string quanzhong(权重) int count int
[html]
- 例如:192.168.18.2 20170216 鞋子 男鞋 运动鞋 a001 0 13
创建表:
[html]
- create table acc_log(ip string,acc_date string,wp string,sex string,type string,nid string,quanzhong int,count int) row format delimited fields terminated by '\t';
抽取数据:
[html]
- load data inpath '/user/hadoop/name_out/part-r-00000' into table acc_log;
登录 | 立即注册