import java.io.ByteArrayInputStream; import java.io.ByteArrayOutputStream; import java.io.DataInputStream; import java.io.DataOutputStream; import java.io.IOException; import org.apache.hadoop.io.IntWritable; import org.apache.hadoop.io.Writable; public class SerializeOrDeserialize { private static IntWritable intWritable = new IntWritable(163); private static IntWritable intWritable2 = new IntWritable(); public static void main(String[] args) throws IOException { byte[] bytes1 = serialize(intWritable); //输出结果为[B@277c0f21 System.out.println(bytes1); deserialize(intWritable2, bytes1); //输出结果为163 System.out.println(intWritable2.get()); } /** * 序列化 * @param writable * @return * @throws IOException */ public static byte[] serialize(Writable writable) throws IOException{ ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream(); DataOutputStream dataOutputStream = new DataOutputStream(byteArrayOutputStream); //将对象转换为字节流并写入到输出流dataOutputStream中 writable.write(dataOutputStream); dataOutputStream.close(); return byteArrayOutputStream.toByteArray(); } /** * 反序列化 * @param writable * @param bytes * @return * @throws IOException */ public static byte[] deserialize(Writable writable, byte[] bytes) throws IOException{ ByteArrayInputStream byteArrayInputStream = new ByteArrayInputStream(bytes); DataInputStream dataInputStream = new DataInputStream(byteArrayInputStream); //从输入流dataInputStream中读取字节流并反序列化为对象 writable.readFields(dataInputStream); dataInputStream.close(); return bytes; } }
基于Hadoop的数据序列化与反序列化实例
import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.io.DataInputStream;
import java.io.DataOutputStream;
import java.io.IOException;
import org.apache.hadoop.io.Int
0
0
0
上一篇:Mongodb高级篇-性能优化
精彩评论