@Test public void testBarWritable() throws Exception { System.out.println("Testing Writable, Configurable wrapped in GenericWritable"); FooGenericWritable generic = new FooGenericWritable(); generic.setConf(conf); Bar bar = new Bar(); bar.setConf(conf); generic.set(bar); //test writing generic writable FooGenericWritable after = (FooGenericWritable)TestWritable.testWritable(generic, conf); //test configuration System.out.println("Testing if Configuration is passed to wrapped classes"); assertTrue(after.get() instanceof Configurable); assertNotNull(((Configurable)after.get()).getConf()); }
/** * Get the 'value' corresponding to the last read 'key'. * * @param val : The 'value' to be read. */ public synchronized void getCurrentValue(Writable val) throws IOException { if (val instanceof Configurable) { ((Configurable) val).setConf(this.conf); } // Position stream to 'current' value seekToCurrentValue(); val.readFields(valIn); if (valIn.read() > 0) { log.info("available bytes: " + valIn.available()); throw new IOException(val + " read " + (valBuffer.getPosition() - keyLength) + " bytes, should read " + (valBuffer.getLength() - keyLength)); } }
/** * Get the 'value' corresponding to the last read 'key'. * * @param val : The 'value' to be read. */ public synchronized WALEntry getCurrentValue(WALEntry val) throws IOException { if (val instanceof Configurable) { ((Configurable) val).setConf(this.conf); } // Position stream to 'current' value seekToCurrentValue(); val = deserializeValue(val); if (valIn.read() > 0) { log.info("available bytes: " + valIn.available()); throw new IOException(val + " read " + (valBuffer.getPosition() - keyLength) + " bytes, should read " + (valBuffer.getLength() - keyLength)); } return val; }
public void testBarWritable() throws Exception { System.out.println("Testing Writable, Configurable wrapped in GenericWritable"); FooGenericWritable generic = new FooGenericWritable(); generic.setConf(conf); Bar bar = new Bar(); bar.setConf(conf); generic.set(bar); //test writing generic writable FooGenericWritable after = (FooGenericWritable)TestWritable.testWritable(generic, conf); //test configuration System.out.println("Testing if Configuration is passed to wrapped classes"); assertTrue(after.get() instanceof Configurable); assertNotNull(((Configurable)after.get()).getConf()); }
/** * Returns the CompressionCodec for the message. * * @param header * @return * @throws InstantiationException * @throws IllegalAccessException * @throws ClassNotFoundException */ private final CompressionCodec getCodec(Header header) throws InstantiationException, IllegalAccessException, ClassNotFoundException { String codecClassName = header.getCodecClassName(); // although we might create more than one CompressionCodec // we don't care based its more expensive to synchronise than simply // creating more than one instance of the CompressionCodec CompressionCodec codec = codecMap.get(codecClassName); if (codec == null) { codec = (CompressionCodec) Thread.currentThread() .getContextClassLoader().loadClass(codecClassName) .newInstance(); if (codec instanceof Configurable) { ((Configurable) codec).setConf(hadoopConf); } codecMap.put(codecClassName, codec); } return codec; }
private void readObject(ObjectInputStream in) throws IOException, ClassNotFoundException { // read the parent fields and the final fields in.defaultReadObject(); // the job conf knows how to deserialize itself jobConf = new JobConf(); jobConf.readFields(in); try { hadoopInputSplit = (org.apache.hadoop.mapred.InputSplit) WritableFactories.newInstance(splitType); } catch (Exception e) { throw new RuntimeException("Unable to instantiate Hadoop InputSplit", e); } if (hadoopInputSplit instanceof Configurable) { ((Configurable) hadoopInputSplit).setConf(this.jobConf); } else if (hadoopInputSplit instanceof JobConfigurable) { ((JobConfigurable) hadoopInputSplit).configure(this.jobConf); } hadoopInputSplit.readFields(in); }
public FallbackNameNodeAddress(InstanceId id, String nameService, Configurable parent) { super(id, nameService, parent); clientProtocol = new ConfigAddressHolder(DFS_NAMENODE_RPC_ADDRESS_KEY, parent); avatarProtocol = new AddressHolder<InetSocketAddress>(parent) { @Override public String getKey() { return AVATARNODE_PORT; } @Override public InetSocketAddress getAddress() throws UnknownHostException { InetSocketAddress clientProtocolAddress = clientProtocol.getAddress(); return new InetSocketAddress(clientProtocolAddress.getAddress(), getConf().getInt(AVATARNODE_PORT, clientProtocolAddress.getPort() + 1)); } }; httpProtocol = new ConfigAddressHolder(DFS_NAMENODE_HTTP_ADDRESS_KEY, parent); datanodeProtocol = new ConfigAddressHolder(DATANODE_PROTOCOL_ADDRESS, parent); }
public AvatarNameSpaceAddressManager(Configurable confg, String serviceName) { super(confg); znodePath = confg.getConf().get(DFS_CLUSTER_NAME) + "/" + serviceName; nodeZero = new AvatarNodeAddress(InstanceId.NODEZERO, serviceName, confg); nodeOne = new AvatarNodeAddress(InstanceId.NODEONE, serviceName, confg); try { // We would like to update the primary and standby references at construction // but // not throw an exception here. refreshZookeeperInfo(); } catch (IOException ioe) { LOG.error(ioe); } catch (KeeperException kpe) { LOG.error(kpe); } catch (InterruptedException ie) { LOG.error(ie); } }
private static EncryptionMaterialsProvider createEncryptionMaterialsProvider(Configuration hadoopConfig) { String empClassName = hadoopConfig.get(S3_ENCRYPTION_MATERIALS_PROVIDER); if (empClassName == null) { return null; } try { Object instance = Class.forName(empClassName).getConstructor().newInstance(); if (!(instance instanceof EncryptionMaterialsProvider)) { throw new RuntimeException("Invalid encryption materials provider class: " + instance.getClass().getName()); } EncryptionMaterialsProvider emp = (EncryptionMaterialsProvider) instance; if (emp instanceof Configurable) { ((Configurable) emp).setConf(hadoopConfig); } return emp; } catch (ReflectiveOperationException e) { throw new RuntimeException("Unable to load or create S3 encryption materials provider: " + empClassName, e); } }
private static Configuration getHadoopConfiguration() { ResourceConfiguration asakusa = ResourceBroker.find(ResourceConfiguration.class); if (asakusa instanceof Configurable) { Configuration conf = ((Configurable) asakusa).getConf(); if (conf != null) { return conf; } } Configuration hadoop = ResourceBroker.find(Configuration.class); if (hadoop != null) { return hadoop; } throw new IllegalStateException(MessageFormat.format( "required resource has not been prepared yet: {0}", Configuration.class)); }
/** * Create new record record from the original InputFormat and initialize it. * * @throws IOException * @throws InterruptedException */ private void createNewRecordReader() throws IOException, InterruptedException { FileSplit split = new FileSplit(combineFileSplit.getPath(currentFileIndex), combineFileSplit.getOffset(currentFileIndex), combineFileSplit.getLength(currentFileIndex), null); if (split instanceof Configurable) { ((Configurable) split).setConf(context.getConfiguration()); } current = inputFormat.createRecordReader(split, context); current.initialize(split, context); }
private void encodeCellsTo(OutputStream os, CellScanner cellScanner, Codec codec, CompressionCodec compressor) throws IOException { Compressor poolCompressor = null; try { if (compressor != null) { if (compressor instanceof Configurable) { ((Configurable) compressor).setConf(this.conf); } poolCompressor = CodecPool.getCompressor(compressor); os = compressor.createOutputStream(os, poolCompressor); } Codec.Encoder encoder = codec.getEncoder(os); while (cellScanner.advance()) { encoder.write(cellScanner.current()); } encoder.flush(); } catch (BufferOverflowException | IndexOutOfBoundsException e) { throw new DoNotRetryIOException(e); } finally { os.close(); if (poolCompressor != null) { CodecPool.returnCompressor(poolCompressor); } } }
public OutputStream createCompressionStream( OutputStream downStream, Compressor compressor, int downStreamBufferSize) throws IOException { CompressionCodec codec = getCodec(conf); OutputStream bos1 = null; if (downStreamBufferSize > 0) { bos1 = new BufferedOutputStream(downStream, downStreamBufferSize); } else { bos1 = downStream; } ((Configurable)codec).getConf().setInt("io.file.buffer.size", 32 * 1024); CompressionOutputStream cos = codec.createOutputStream(bos1, compressor); BufferedOutputStream bos2 = new BufferedOutputStream(new FinishOnFlushCompressionStream(cos), DATA_OBUF_SIZE); return bos2; }
@Override public void read(DataInputView in) throws IOException { this.splitNumber=in.readInt(); this.hadoopInputSplitTypeName = in.readUTF(); if(hadoopInputSplit == null) { try { Class<? extends org.apache.hadoop.io.Writable> inputSplit = Class.forName(hadoopInputSplitTypeName).asSubclass(org.apache.hadoop.io.Writable.class); this.hadoopInputSplit = (org.apache.hadoop.mapred.InputSplit) WritableFactories.newInstance( inputSplit ); } catch (Exception e) { throw new RuntimeException("Unable to create InputSplit", e); } } jobConf = new JobConf(); jobConf.readFields(in); if (this.hadoopInputSplit instanceof Configurable) { ((Configurable) this.hadoopInputSplit).setConf(this.jobConf); } this.hadoopInputSplit.readFields(in); }
@Override public Configuration getConf() { if (expression instanceof Configurable) { return ((Configurable) expression).getConf(); } return null; }
/** * Check and set 'configuration' if necessary. * * @param theObject object for which to set configuration * @param conf Configuration */ public static void setConf(Object theObject, Configuration conf) { if (conf != null) { if (theObject instanceof Configurable) { ((Configurable) theObject).setConf(conf); } setJobConf(theObject, conf); } }
/** Create a new instance of a class with a defined factory. */ public static Writable newInstance(Class<? extends Writable> c, Configuration conf) { WritableFactory factory = WritableFactories.getFactory(c); if (factory != null) { Writable result = factory.newInstance(); if (result instanceof Configurable) { ((Configurable) result).setConf(conf); } return result; } else { return ReflectionUtils.newInstance(c, conf); } }
private static void initPolicy(VolumeChoosingPolicy<FsVolumeSpi> policy, float preferencePercent) { Configuration conf = new Configuration(); // Set the threshold to consider volumes imbalanced to 1MB conf.setLong( DFS_DATANODE_AVAILABLE_SPACE_VOLUME_CHOOSING_POLICY_BALANCED_SPACE_THRESHOLD_KEY, 1024 * 1024); // 1MB conf.setFloat( DFS_DATANODE_AVAILABLE_SPACE_VOLUME_CHOOSING_POLICY_BALANCED_SPACE_PREFERENCE_FRACTION_KEY, preferencePercent); ((Configurable) policy).setConf(conf); }
public InputStream createDecompressionStream(InputStream downStream, Decompressor decompressor, int downStreamBufferSize) throws IOException { CompressionCodec codec = getCodec(conf); // Set the internal buffer size to read from down stream. if (downStreamBufferSize > 0) { ((Configurable) codec).getConf().setInt("io.file.buffer.size", downStreamBufferSize); } CompressionInputStream cis = codec.createInputStream(downStream, decompressor); BufferedInputStream bis2 = new BufferedInputStream(cis, DATA_IBUF_SIZE); return bis2; }