hive(default)> select my_len("abcd");
4
<dependencies> <dependency> <groupId>org.apache.hive</groupId> <artifactId>hive-exec</artifactId> <version>3.1.2</version> </dependency> </dependencies>
package com.atguigu.hive; import org.apache.hadoop.hive.ql.exec.UDFArgumentException; import org.apache.hadoop.hive.ql.exec.UDFArgumentLengthException; import org.apache.hadoop.hive.ql.exec.UDFArgumentTypeException; import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.udf.generic.GenericUDF; import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector; import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectIn spectorFactory; /** * 自定义 UDF 函数,需要继承 GenericUDF 类 * 需求: 计算指定字符串的长度 */ public class MyStringLength extends GenericUDF { /** * * @param arguments 输入参数类型的鉴别器对象 * @return 返回值类型的鉴别器对象 * @throws UDFArgumentException */ @Override public ObjectInspector initialize(ObjectInspector[] arguments) throws UDFArgumentException { // 判断输入参数的个数 if(arguments.length !=1){ throw new UDFArgumentLengthException("Input Args Length Error!!!"); } // 判断输入参数的类型 if(!arguments[0].getCategory().equals(ObjectInspector.Category.PRIMITIVE) ){ throw new UDFArgumentTypeException(0,"Input Args Type Error!!!"); } //函数本身返回值为 int,需要返回 int 类型的鉴别器对象 return PrimitiveObjectInspectorFactory.javaIntObjectInspector; } /** * 函数的逻辑处理 * @param arguments 输入的参数 * @return 返回值 * @throws HiveException */ @Override public Object evaluate(DeferredObject[] arguments) throws HiveException { if(arguments[0].get() == null){ return 0; } return arguments[0].get().toString().length(); } @Override public String getDisplayString(String[] children) { return ""; } }
hive(default)> select myudtf("hello,world,hadoop,hive", ","); hello world hadoop hive
package com.atguigu.udtf; import org.apache.hadoop.hive.ql.exec.UDFArgumentException; import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.udf.generic.GenericUDTF; import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector; import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorFactory; import org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector; import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectIn spectorFactory; import java.util.ArrayList; import java.util.List; public class MyUDTF extends GenericUDTF { private ArrayList<String> outList = new ArrayList<>(); @Override public StructObjectInspector initialize(StructObjectInspector argOIs) throws UDFArgumentException { //1.定义输出数据的列名和类型 List<String> fieldNames = new ArrayList<>(); List<ObjectInspector> fieldOIs = new ArrayList<>(); //2.添加输出数据的列名和类型 fieldNames.add("lineToWord"); fieldOIs.add(PrimitiveObjectInspectorFactory.javaStringObjectInspector); return ObjectInspectorFactory.getStandardStructObjectInspector(fieldNames, fieldOIs); } @Override public void process(Object[] args) throws HiveException { //1.获取原始数据 String arg = args[0].toString(); //2.获取数据传入的第二个参数,此处为分隔符 String splitKey = args[1].toString(); //3.将原始数据按照传入的分隔符进行切分 String[] fields = arg.split(splitKey); //4.遍历切分后的结果,并写出 for (String field : fields) { //集合为复用的,首先清空集合 outList.clear(); //将每一个单词添加至集合 outList.add(field); //将集合内容写出 forward(outList); } } @Override public void close() throws HiveException { } }
Hive基础(45):Hive 函数(2) 自定义函数/自定义 UDF 函数/自定义 UDTF 函数
原文:https://www.cnblogs.com/qiu-hua/p/15141219.html