连接模式

reduce端连接

应用场景

通过外键将多个数据集连接起来。包括内连接,左外连接,右外连接,全外连接,反外连接(全外连接-内连接)。

代码实现

import com.alibaba.fastjson.JSONObject;
import filtering.BloomFilterUtil;
import org.apache.commons.lang.StringUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.MultipleInputs;
import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.util.bloom.BloomFilter;
import org.apache.hadoop.util.bloom.Key;
import org.apache.hadoop.util.hash.Hash;

import java.io.File;
import java.io.IOException;
import java.net.URI;
import java.util.ArrayList;
import java.util.List;

/**
 * @Author bluesnail95
 * @Date 2019/7/27 9:20
 * @Description Reduce连接模式(内连接,外连接(左外连接,右外连接,全外连接),反连接
 */
public class ReduceMain {

    private static int vectorSize = BloomFilterUtil.getOptimalBloomFilterSize(10,0.1f);
    private static int nbHash = BloomFilterUtil.getOptimalK(10,vectorSize);
    private static BloomFilter bloomFilter = new BloomFilter(vectorSize, nbHash, Hash.MURMUR_HASH);

    public static class UserJoinMapper extends Mapper<Object,Text,Text,Text> {

        public void map(Object key, Text value, Context context) {
            JSONObject valueJson = JSONObject.parseObject(value.toString());
            String userId = valueJson.getString("userId");
            valueJson.put("type","U");
            try {
                Integer reputation = valueJson.getInteger("reputation");
                if(reputation > 2000) {
                    bloomFilter.add(new Key(userId.getBytes()));
                    context.write(new Text(userId),new Text(valueJson.toString()));
                }
            } catch (IOException e) {
                e.printStackTrace();
            } catch (InterruptedException e) {
                e.printStackTrace();
            }
        }

    }

    public static class CommentJoinMapper extends Mapper<Object,Text,Text,Text> {

        public void map(Object key, Text value, Context context) {
            JSONObject valueJson = JSONObject.parseObject(value.toString());
            String userId = valueJson.getString("userId");
            valueJson.put("type","C");
            try {
                if(bloomFilter.membershipTest(new Key(userId.getBytes()))) {
                    context.write(new Text(userId),new Text(valueJson.toString()));
                }
            } catch (IOException e) {
                e.printStackTrace();
            } catch (InterruptedException e) {
                e.printStackTrace();
            }
        }
    }

    public static class UserCommentReducer extends Reducer<Text,Text,Text,Text> {

        private Text empty = new Text(new JSONObject().toString());

        public void reduce(Text key,Iterable<Text> values,Context context) {
            List<Text> users = new ArrayList<Text>();
            List<Text> comments = new ArrayList<Text>();
            for (Text value:values) {
                JSONObject valueJson = JSONObject.parseObject(value.toString());
                String type = valueJson.getString("type");
                if(StringUtils.isNotBlank(type) && "U".equalsIgnoreCase(type)) {
                    users.add(value);
                }else if(StringUtils.isNotBlank(type) && "C".equalsIgnoreCase(type)) {
                    comments.add(value);
                }
            }

            //进行连接
            String joinType = context.getConfiguration().get("join.type");
            if("innerjoin".equalsIgnoreCase(joinType)) {
                innerJoin(users,comments,context);
            }else if("leftjoin".equalsIgnoreCase(joinType)) {
                leftJoin(users,comments,context);
            }else if("rightjoin".equalsIgnoreCase(joinType)) {
                rightJoin(users,comments,context);
            }else if("outjoin".equalsIgnoreCase(joinType)) {
                outJoin(users,comments,context);
            }else if("antijoin".equalsIgnoreCase(joinType)) {
                antiJoin(users,comments,context);
            }
        }

        //内连接
        public void innerJoin(List<Text> users,List<Text> comments,Context context) {
            if(null == users || users.size() == 0 || null == comments || comments.size() == 0) {
                return;
            }
            for (Text user:users) {
                for(Text comment:comments) {
                    try {
                        context.write(user,comment);
                    } catch (Exception e) {
                        e.printStackTrace();
                    }
                }
            }
        }

        public void leftJoin(List<Text> users,List<Text> comments,Context context) {
            if(null == users || users.size() == 0) {
                return;
            }
            for(Text user:users) {
                try {
                    if(null == comments || comments.size() == 0) {
                        context.write(user,empty);
                    }else{
                        for(Text comment:comments) {
                            context.write(user,comment);
                        }
                    }
                } catch (IOException e) {
                    e.printStackTrace();
                } catch (InterruptedException e) {
                    e.printStackTrace();
                }
            }
        }

        public void rightJoin(List<Text> users,List<Text> comments,Context context) {
            if(null == comments || comments.size() == 0) {
                return;
            }
            for(Text comment:comments) {
                try {
                    if(null == users || users.size() == 0) {
                        context.write(empty,comment);
                    }else{
                        for(Text user:users) {
                            context.write(user,comment);
                        }
                    }
                } catch (IOException e) {
                    e.printStackTrace();
                } catch (InterruptedException e) {
                    e.printStackTrace();
                }
            }
        }

        public void outJoin(List<Text> users,List<Text> comments,Context context) {
            try {
                if(null != users && users.size() > 0) {
                    for (Text user:users) {
                        if(null != comments && comments.size() > 0) {
                            for(Text comment:comments) {
                                context.write(user,comment);
                            }
                        }else{
                            context.write(user,empty);
                        }
                    }
                }else{
                    if(null != comments && comments.size() > 0) {
                        for(Text comment:comments) {
                            context.write(empty,comment);
                        }
                    }
                }
            } catch (IOException e) {
                e.printStackTrace();
            } catch (InterruptedException e) {
                e.printStackTrace();
            }
        }

        public void antiJoin(List<Text> users,List<Text> comments,Context context) {
            try {
                //^表示异或,当users或comments其中一个为空时,为true,当users和comments都为空或都不为空时取false。
                if(users.isEmpty() ^ comments.isEmpty()) {
                    if(users.isEmpty()) {
                        for (Text comment:comments) {
                            context.write(empty,comment);
                        }
                    }
                    if(comments.isEmpty()) {
                        for(Text user:users) {
                            context.write(user,empty);
                        }
                    }
                }
            } catch (IOException e) {
                e.printStackTrace();
            } catch (InterruptedException e) {
                e.printStackTrace();
            }
        }
    }

    public static void main(String[] args) {
        Configuration conf = new Configuration();
        conf.set("join.type",args[0]);
        try {
            Job job = Job.getInstance(conf,"reduce");
            MultipleInputs.addInputPath(job,new Path(args[1]), TextInputFormat.class,UserJoinMapper.class);
            MultipleInputs.addInputPath(job,new Path(args[2]), TextInputFormat.class,CommentJoinMapper.class);
            job.setReducerClass(UserCommentReducer.class);
            job.setOutputKeyClass(Text.class);
            job.setOutputValueClass(Text.class);
            FileUtil.fullyDelete(new File(args[3]));
            FileOutputFormat.setOutputPath(job, new Path(args[3]));
            System.exit(job.waitForCompletion(true) ? 0 : 1);
        } catch (Exception e) {
            e.printStackTrace();
        }
    }
}

代码说明

这段代码包括两个Mapper,UserJoinMapper是用处理User数据,CommentJoinMapper是用来处理Comment数据,最后的输出是(userId,user/comment)。在UserJoinMapper中创建了一个布隆过滤器,将不符合条件(reputation>2000)的userId过滤掉。在UserCommentReducer中,根据userId聚合数据,根据数据类型判断是user数据还是comment数据,最后进行内连接/左外连接/右外连接/全外连接/反连接。

复制连接

应用场景

适合内连接和左外连接。

代码实现

import com.alibaba.fastjson.JSONObject;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.*;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;

import java.io.*;
import java.net.URI;
import java.net.URL;
import java.net.URLConnection;

/**
 * @Author bluesnail95
 * @Date 2019/7/27 17:11
 * @Description
 */
public class ReplicateMain {

    public static class ReplicateMapper extends Mapper<Object, Text,Text,Text> {

        private JSONObject userInfo = new JSONObject();
        private String joinType = "";
        private Text empty = new Text("");

        public void map(Object key,Text value,Context context) {
            JSONObject valueJson = JSONObject.parseObject(value.toString());
            String userId = valueJson.getString("userId");
            JSONObject joinInfo = userInfo.getJSONObject(userId);
            try {
                if(null != joinInfo) {
                    context.write(value,new Text(joinInfo.toString()));
                }else if("leftjoin".equalsIgnoreCase(joinType)) {
                    context.write(value,empty);
                }
            } catch (Exception e) {
                e.printStackTrace();
            }
        }

        public void setup(Context context) {
            try {
                URI uris[] = context.getCacheFiles();
                URL.setURLStreamHandlerFactory(new FsUrlStreamHandlerFactory());
                for(URI uri:uris) {
                    URL url = uri.toURL();
                    URLConnection connection = url.openConnection();
                    InputStream inputStream = connection.getInputStream();
                    BufferedReader reader = new BufferedReader(new InputStreamReader(inputStream));
                    String line =  null;
                    while((line = reader.readLine()) != null) {
                        JSONObject lineJson = JSONObject.parseObject(line);
                        String userId = lineJson.getString("userId");
                        userInfo.put(userId,line);
                    }
                }
                joinType = context.getConfiguration().get("join.type");
            } catch (Exception e) {
                e.printStackTrace();
            }
        }

    }

    public static void main(String[] args) {
        Configuration conf = new Configuration();
        conf.set("join.type",args[0]);
        try {
            Job job = Job.getInstance(conf,"replicate join");
            //将文件加入hdfs的高速缓存
            URI file = new URI("hdfs://127.0.0.1:9000/hadoop/1.txt");
            URI[] files = new URI[1];
            files[0] = file;
            job.setCacheFiles(files);
            job.setMapperClass(ReplicateMapper.class);
            job.setOutputKeyClass(Text.class);
            job.setOutputValueClass(Text.class);
            FileInputFormat.addInputPath(job,new Path(args[1]));
            FileUtil.fullyDelete(new File(args[2]));
            FileOutputFormat.setOutputPath(job, new Path(args[2]));
            System.exit(job.waitForCompletion(true) ? 0 : 1);
        } catch (Exception e) {
            e.printStackTrace();
        }
    }
}

注意几个地方:

(1)URL.setURLStreamHandlerFactory(new FsUrlStreamHandlerFactory());
不设置会提示找不到hdfs类型。

(2)job.setCacheFiles(files);
将文件加入了hdfs的高速缓存中。

(3)在setup阶段将comment的数据读入到一个JSONObject中,在map阶段,通过userId连接user和comment。当进行左外连接时,如果user找不到对应的comment,输出为(user,{});

运行前准备

将Comment文件上传到hdfs中。
hadoop fs -put 本地文件位置 hdfs文件夹
hadoop fs -put E:/project/hadoop/reduce/input/comment/1.txt hdfs://127.0.0.1:9000/hadoop
clipboard.png

clipboard.png

运行结果

(1)innerjoin内连接

clipboard.png

clipboard.png

(2)leftjoin左外连接

clipboard.png

clipboard.png

组合连接

应用场景

适用于内连接和全外连接。

代码实现

import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapred.*;
import org.apache.hadoop.mapred.join.CompositeInputFormat;
import org.apache.hadoop.mapred.join.TupleWritable;

import java.io.File;
import java.io.IOException;

/**
 * @Author bluesnail95
 * @Date 2019/7/27 19:12
 * @Description 组合模式
 */
public class CompositeMain {

    public static class CompositeMapper extends MapReduceBase implements Mapper<Text, TupleWritable,Text, TupleWritable> {


        public void map(Text text, TupleWritable writables, OutputCollector<Text, TupleWritable> outputCollector, Reporter reporter) throws IOException {
            outputCollector.collect(text,writables);
        }
    }

    public static void main(String[] args) {
        Path userPath = new Path(args[0]);
        Path commentPath = new Path(args[1]);
        Path output = new Path(args[2]);
        try {
            JobConf jobconf = new JobConf();
            jobconf.setJarByClass(CompositeMain.class);
            jobconf.setMapperClass(CompositeMapper.class);
            jobconf.setNumReduceTasks(0);
            jobconf.setOutputValueClass(Text.class);
            jobconf.setOutputKeyClass(TupleWritable.class);
            jobconf.setInputFormat(CompositeInputFormat.class);
            jobconf.set("mapreduce.input.keyvaluelinerecordreader.key.value.separator", " ");
            jobconf.set("mapred.join.expr", CompositeInputFormat.compose("inner", KeyValueTextInputFormat.class,userPath,commentPath));
            FileUtil.fullyDelete(new File(args[2]));
            TextOutputFormat.setOutputPath(jobconf,output);
            RunningJob job = JobClient.runJob(jobconf);
            while(!job.isComplete()) {
                Thread.sleep(100);
            }
            System.exit(job.isSuccessful() ? 0 : 1);
        } catch (Exception e) {
            e.printStackTrace();
        }
    }

}

代码说明

CompositeInputFormat.compose("inner", KeyValueTextInputFormat.class,userPath,commentPath)

第一个参数为连接方式,可以为inner(内连接),outer(全外连接)

之前的驱动代码使用的是Job,这里使用的是JobConf。在输入文件中使用空格符来分割key和value。

运行结果

输入参数:user文件路径 comment文件路径 连接结果输出路径
clipboard.png

内连接(inner)
clipboard.png

外连接(outer)
clipboard.png

笛卡尔积

应用场景

笛卡尔积模式是一种有效地将多个输入源的每一个记录与所有其他记录配对的方式。在没有合适的外键做连接,或是需要对各个记录的所有配对关系进行分许,且对执行时间没有限制的情况下可以使用笛卡尔积。

元模式

什么是元模式?

元模式指的是处理模式的模式。

元模式的分类?

包括作用链(将多种模式结合起来以解决复杂多级的问题)和作业合并(对一个MapReduce的多个任务进行优化)等。

参考资料

hadoop实现join(CompositeInputFormat)

JAVA API 读取hdfs系统文件

《MapReduce设计模式》


bluesnail95
69 声望6 粉丝

Java开发工程师