Java 类org.apache.hadoop.hdfs.TestFileAppend4.DelayAnswer 实例源码

项目:hadoop-EAR    文件:TestStuckDataNode.java   
/** This creates a slow writer and check to see
  * if pipeline heartbeats work fine
  */
 public void testStuckDataNode() throws Exception {
   final int DATANODE_NUM = 3;
   Configuration conf = new Configuration();
   final int timeout = 8000;
   conf.setInt("dfs.socket.timeout",timeout);

   final Path p = new Path("/pipelineHeartbeat/foo");
   System.out.println("p=" + p);

   MiniDFSCluster cluster = new MiniDFSCluster(conf, DATANODE_NUM, true, null);
   DistributedFileSystem fs = (DistributedFileSystem)cluster.getFileSystem();

DataNodeMetrics metrics = cluster.getDataNodes().get(0).myMetrics;
MetricsTimeVaryingLong spyBytesWritten = spy(metrics.bytesWritten);
DelayAnswer delayAnswer = new DelayAnswer(); 
doAnswer(delayAnswer).when(spyBytesWritten).inc(anyInt());
metrics.bytesWritten = spyBytesWritten;

try {
    // create a new file.
    FSDataOutputStream stm = fs.create(p);
    stm.write(1);
    stm.sync();
    stm.write(2);
    stm.close();

    // verify that entire file is good
    FSDataInputStream in = fs.open(p);
    assertEquals(1, in.read());
    assertEquals(2, in.read());
    in.close();
   } finally {
     fs.close();
     cluster.shutdown();
   }
 }
项目:hadoop-on-lustre    文件:TestStuckDataNode.java   
/** This creates a slow writer and check to see
 * if pipeline heartbeats work fine
 */
public void testStuckDataNode() throws Exception {
  final int DATANODE_NUM = 3;
  Configuration conf = new Configuration();
  final int timeout = 8000;
  conf.setInt("dfs.socket.timeout",timeout);

  final Path p = new Path("/pipelineHeartbeat/foo");
  System.out.println("p=" + p);

  MiniDFSCluster cluster = new MiniDFSCluster(conf, DATANODE_NUM, true, null);
  DistributedFileSystem fs = (DistributedFileSystem)cluster.getFileSystem();

  DataNodeInstrumentation metrics = spy(cluster.getDataNodes().get(0).myMetrics);    
  DelayAnswer delayAnswer = new DelayAnswer(); 
  doAnswer(delayAnswer).when(metrics).incrBytesWritten(anyInt());

  try {
    // create a new file.
    FSDataOutputStream stm = fs.create(p);
    stm.write(1);
    stm.sync();
    stm.write(2);
    stm.close();

    // verify that entire file is good
    FSDataInputStream in = fs.open(p);
    assertEquals(1, in.read());
    assertEquals(2, in.read());
    in.close();
  } finally {
    fs.close();
    cluster.shutdown();
  }
}
项目:RDFS    文件:TestStuckDataNode.java   
/** This creates a slow writer and check to see
  * if pipeline heartbeats work fine
  */
 public void testStuckDataNode() throws Exception {
   final int DATANODE_NUM = 3;
   Configuration conf = new Configuration();
   final int timeout = 8000;
   conf.setInt("dfs.socket.timeout",timeout);

   final Path p = new Path("/pipelineHeartbeat/foo");
   System.out.println("p=" + p);

   MiniDFSCluster cluster = new MiniDFSCluster(conf, DATANODE_NUM, true, null);
   DistributedFileSystem fs = (DistributedFileSystem)cluster.getFileSystem();

DataNodeMetrics metrics = cluster.getDataNodes().get(0).myMetrics;
MetricsTimeVaryingLong spyBytesWritten = spy(metrics.bytesWritten);
DelayAnswer delayAnswer = new DelayAnswer(); 
doAnswer(delayAnswer).when(spyBytesWritten).inc(anyInt());
metrics.bytesWritten = spyBytesWritten;

try {
    // create a new file.
    FSDataOutputStream stm = fs.create(p);
    stm.write(1);
    stm.sync();
    stm.write(2);
    stm.close();

    // verify that entire file is good
    FSDataInputStream in = fs.open(p);
    assertEquals(1, in.read());
    assertEquals(2, in.read());
    in.close();
   } finally {
     fs.close();
     cluster.shutdown();
   }
 }
项目:hortonworks-extension    文件:TestStuckDataNode.java   
/** This creates a slow writer and check to see
 * if pipeline heartbeats work fine
 */
public void testStuckDataNode() throws Exception {
  final int DATANODE_NUM = 3;
  Configuration conf = new Configuration();
  final int timeout = 8000;
  conf.setInt("dfs.socket.timeout",timeout);

  final Path p = new Path("/pipelineHeartbeat/foo");
  System.out.println("p=" + p);

  MiniDFSCluster cluster = new MiniDFSCluster(conf, DATANODE_NUM, true, null);
  DistributedFileSystem fs = (DistributedFileSystem)cluster.getFileSystem();

  DataNodeInstrumentation metrics = spy(cluster.getDataNodes().get(0).myMetrics);    
  DelayAnswer delayAnswer = new DelayAnswer(); 
  doAnswer(delayAnswer).when(metrics).incrBytesWritten(anyInt());

  try {
    // create a new file.
    FSDataOutputStream stm = fs.create(p);
    stm.write(1);
    stm.sync();
    stm.write(2);
    stm.close();

    // verify that entire file is good
    FSDataInputStream in = fs.open(p);
    assertEquals(1, in.read());
    assertEquals(2, in.read());
    in.close();
  } finally {
    fs.close();
    cluster.shutdown();
  }
}
项目:hortonworks-extension    文件:TestStuckDataNode.java   
/** This creates a slow writer and check to see
 * if pipeline heartbeats work fine
 */
public void testStuckDataNode() throws Exception {
  final int DATANODE_NUM = 3;
  Configuration conf = new Configuration();
  final int timeout = 8000;
  conf.setInt("dfs.socket.timeout",timeout);

  final Path p = new Path("/pipelineHeartbeat/foo");
  System.out.println("p=" + p);

  MiniDFSCluster cluster = new MiniDFSCluster(conf, DATANODE_NUM, true, null);
  DistributedFileSystem fs = (DistributedFileSystem)cluster.getFileSystem();

  DataNodeInstrumentation metrics = spy(cluster.getDataNodes().get(0).myMetrics);    
  DelayAnswer delayAnswer = new DelayAnswer(); 
  doAnswer(delayAnswer).when(metrics).incrBytesWritten(anyInt());

  try {
    // create a new file.
    FSDataOutputStream stm = fs.create(p);
    stm.write(1);
    stm.sync();
    stm.write(2);
    stm.close();

    // verify that entire file is good
    FSDataInputStream in = fs.open(p);
    assertEquals(1, in.read());
    assertEquals(2, in.read());
    in.close();
  } finally {
    fs.close();
    cluster.shutdown();
  }
}