Java 类org.apache.hadoop.hdfs.protocol.datatransfer.sasl.SaslDataTransferClient 实例源码

项目:hadoop-oss    文件:NuCypherExtUtilClient.java   
/**
 * Connect to the given datanode's datantrasfer port, and return
 * the resulting IOStreamPair. This includes encryption wrapping, etc.
 */
public static IOStreamPair connectToDN(DatanodeInfo dn, int timeout,
                                       Configuration conf,
                                       SaslDataTransferClient saslClient,
                                       SocketFactory socketFactory,
                                       boolean connectToDnViaHostname,
                                       DataEncryptionKeyFactory dekFactory,
                                       Token<BlockTokenIdentifier> blockToken)
    throws IOException {

  boolean success = false;
  Socket sock = null;
  try {
    sock = socketFactory.createSocket();
    String dnAddr = dn.getXferAddr(connectToDnViaHostname);
    LOG.debug("Connecting to datanode {}", dnAddr);
    NetUtils.connect(sock, NetUtils.createSocketAddr(dnAddr), timeout);
    sock.setSoTimeout(timeout);

    OutputStream unbufOut = NetUtils.getOutputStream(sock);
    InputStream unbufIn = NetUtils.getInputStream(sock);
    IOStreamPair pair = saslClient.newSocketSend(sock, unbufOut,
        unbufIn, dekFactory, blockToken, dn);

    IOStreamPair result = new IOStreamPair(
        new DataInputStream(pair.in),
        new DataOutputStream(new BufferedOutputStream(pair.out,
            NuCypherExtUtilClient.getSmallBufferSize(conf)))
    );

    success = true;
    return result;
  } finally {
    if (!success) {
      IOUtils.closeSocket(sock);
    }
  }
}
项目:hadoop-oss    文件:NuCypherExtUtilClient.java   
public static Peer peerFromSocketAndKey(
    SaslDataTransferClient saslClient, Socket s,
    DataEncryptionKeyFactory keyFactory,
    Token<BlockTokenIdentifier> blockToken, DatanodeID datanodeId)
    throws IOException {
  Peer peer = null;
  boolean success = false;
  try {
    peer = peerFromSocket(s);
    peer = saslClient.peerSend(peer, keyFactory, blockToken, datanodeId);
    success = true;
    return peer;
  } finally {
    if (!success) {
      IOUtilsClient.cleanup(null, peer);
    }
  }
}
项目:hadoop    文件:Dispatcher.java   
public Dispatcher(NameNodeConnector nnc, Set<String> includedNodes,
    Set<String> excludedNodes, long movedWinWidth, int moverThreads,
    int dispatcherThreads, int maxConcurrentMovesPerNode, Configuration conf) {
  this.nnc = nnc;
  this.excludedNodes = excludedNodes;
  this.includedNodes = includedNodes;
  this.movedBlocks = new MovedBlocks<StorageGroup>(movedWinWidth);

  this.cluster = NetworkTopology.getInstance(conf);

  this.moveExecutor = Executors.newFixedThreadPool(moverThreads);
  this.dispatchExecutor = dispatcherThreads == 0? null
      : Executors.newFixedThreadPool(dispatcherThreads);
  this.maxConcurrentMovesPerNode = maxConcurrentMovesPerNode;

  this.saslClient = new SaslDataTransferClient(conf,
      DataTransferSaslUtil.getSaslPropertiesResolver(conf),
      TrustedChannelResolver.getInstance(conf), nnc.fallbackToSimpleAuth);
}
项目:hadoop    文件:TcpPeerServer.java   
public static Peer peerFromSocketAndKey(
      SaslDataTransferClient saslClient, Socket s,
      DataEncryptionKeyFactory keyFactory,
      Token<BlockTokenIdentifier> blockToken, DatanodeID datanodeId)
      throws IOException {
  Peer peer = null;
  boolean success = false;
  try {
    peer = peerFromSocket(s);
    peer = saslClient.peerSend(peer, keyFactory, blockToken, datanodeId);
    success = true;
    return peer;
  } finally {
    if (!success) {
      IOUtils.cleanup(null, peer);
    }
  }
}
项目:aliyun-oss-hadoop-fs    文件:DFSUtilClient.java   
public static Peer peerFromSocketAndKey(
      SaslDataTransferClient saslClient, Socket s,
      DataEncryptionKeyFactory keyFactory,
      Token<BlockTokenIdentifier> blockToken, DatanodeID datanodeId)
      throws IOException {
  Peer peer = null;
  boolean success = false;
  try {
    peer = peerFromSocket(s);
    peer = saslClient.peerSend(peer, keyFactory, blockToken, datanodeId);
    success = true;
    return peer;
  } finally {
    if (!success) {
      IOUtilsClient.cleanup(null, peer);
    }
  }
}
项目:big-c    文件:Dispatcher.java   
public Dispatcher(NameNodeConnector nnc, Set<String> includedNodes,
    Set<String> excludedNodes, long movedWinWidth, int moverThreads,
    int dispatcherThreads, int maxConcurrentMovesPerNode, Configuration conf) {
  this.nnc = nnc;
  this.excludedNodes = excludedNodes;
  this.includedNodes = includedNodes;
  this.movedBlocks = new MovedBlocks<StorageGroup>(movedWinWidth);

  this.cluster = NetworkTopology.getInstance(conf);

  this.moveExecutor = Executors.newFixedThreadPool(moverThreads);
  this.dispatchExecutor = dispatcherThreads == 0? null
      : Executors.newFixedThreadPool(dispatcherThreads);
  this.maxConcurrentMovesPerNode = maxConcurrentMovesPerNode;

  this.saslClient = new SaslDataTransferClient(conf,
      DataTransferSaslUtil.getSaslPropertiesResolver(conf),
      TrustedChannelResolver.getInstance(conf), nnc.fallbackToSimpleAuth);
}
项目:big-c    文件:TcpPeerServer.java   
public static Peer peerFromSocketAndKey(
      SaslDataTransferClient saslClient, Socket s,
      DataEncryptionKeyFactory keyFactory,
      Token<BlockTokenIdentifier> blockToken, DatanodeID datanodeId)
      throws IOException {
  Peer peer = null;
  boolean success = false;
  try {
    peer = peerFromSocket(s);
    peer = saslClient.peerSend(peer, keyFactory, blockToken, datanodeId);
    success = true;
    return peer;
  } finally {
    if (!success) {
      IOUtils.cleanup(null, peer);
    }
  }
}
项目:hadoop-2.6.0-cdh5.4.3    文件:Dispatcher.java   
public Dispatcher(NameNodeConnector nnc, Set<String> includedNodes,
    Set<String> excludedNodes, long movedWinWidth, int moverThreads,
    int dispatcherThreads, int maxConcurrentMovesPerNode, Configuration conf) {
  this.nnc = nnc;
  this.excludedNodes = excludedNodes;
  this.includedNodes = includedNodes;
  this.movedBlocks = new MovedBlocks<StorageGroup>(movedWinWidth);

  this.cluster = NetworkTopology.getInstance(conf);

  this.moveExecutor = Executors.newFixedThreadPool(moverThreads);
  this.dispatchExecutor = dispatcherThreads == 0? null
      : Executors.newFixedThreadPool(dispatcherThreads);
  this.maxConcurrentMovesPerNode = maxConcurrentMovesPerNode;

  this.saslClient = new SaslDataTransferClient(conf,
      DataTransferSaslUtil.getSaslPropertiesResolver(conf),
      TrustedChannelResolver.getInstance(conf), nnc.fallbackToSimpleAuth);
}
项目:hadoop-2.6.0-cdh5.4.3    文件:TcpPeerServer.java   
public static Peer peerFromSocketAndKey(
      SaslDataTransferClient saslClient, Socket s,
      DataEncryptionKeyFactory keyFactory,
      Token<BlockTokenIdentifier> blockToken, DatanodeID datanodeId)
      throws IOException {
  Peer peer = null;
  boolean success = false;
  try {
    peer = peerFromSocket(s);
    peer = saslClient.peerSend(peer, keyFactory, blockToken, datanodeId);
    success = true;
    return peer;
  } finally {
    if (!success) {
      IOUtils.cleanup(null, peer);
    }
  }
}
项目:FlexMap    文件:Dispatcher.java   
public Dispatcher(NameNodeConnector nnc, Set<String> includedNodes,
    Set<String> excludedNodes, long movedWinWidth, int moverThreads,
    int dispatcherThreads, int maxConcurrentMovesPerNode, Configuration conf) {
  this.nnc = nnc;
  this.excludedNodes = excludedNodes;
  this.includedNodes = includedNodes;
  this.movedBlocks = new MovedBlocks<StorageGroup>(movedWinWidth);

  this.cluster = NetworkTopology.getInstance(conf);

  this.moveExecutor = Executors.newFixedThreadPool(moverThreads);
  this.dispatchExecutor = dispatcherThreads == 0? null
      : Executors.newFixedThreadPool(dispatcherThreads);
  this.maxConcurrentMovesPerNode = maxConcurrentMovesPerNode;

  this.saslClient = new SaslDataTransferClient(conf,
      DataTransferSaslUtil.getSaslPropertiesResolver(conf),
      TrustedChannelResolver.getInstance(conf), nnc.fallbackToSimpleAuth);
}
项目:FlexMap    文件:TcpPeerServer.java   
public static Peer peerFromSocketAndKey(
      SaslDataTransferClient saslClient, Socket s,
      DataEncryptionKeyFactory keyFactory,
      Token<BlockTokenIdentifier> blockToken, DatanodeID datanodeId)
      throws IOException {
  Peer peer = null;
  boolean success = false;
  try {
    peer = peerFromSocket(s);
    peer = saslClient.peerSend(peer, keyFactory, blockToken, datanodeId);
    success = true;
    return peer;
  } finally {
    if (!success) {
      IOUtils.cleanup(null, peer);
    }
  }
}
项目:aliyun-oss-hadoop-fs    文件:Dispatcher.java   
Dispatcher(NameNodeConnector nnc, Set<String> includedNodes,
    Set<String> excludedNodes, long movedWinWidth, int moverThreads,
    int dispatcherThreads, int maxConcurrentMovesPerNode,
    long getBlocksSize, long getBlocksMinBlockSize, Configuration conf) {
  this.nnc = nnc;
  this.excludedNodes = excludedNodes;
  this.includedNodes = includedNodes;
  this.movedBlocks = new MovedBlocks<StorageGroup>(movedWinWidth);

  this.cluster = NetworkTopology.getInstance(conf);

  this.dispatchExecutor = dispatcherThreads == 0? null
      : Executors.newFixedThreadPool(dispatcherThreads);
  this.moverThreadAllocator = new Allocator(moverThreads);
  this.maxConcurrentMovesPerNode = maxConcurrentMovesPerNode;

  this.getBlocksSize = getBlocksSize;
  this.getBlocksMinBlockSize = getBlocksMinBlockSize;

  this.saslClient = new SaslDataTransferClient(conf,
      DataTransferSaslUtil.getSaslPropertiesResolver(conf),
      TrustedChannelResolver.getInstance(conf), nnc.fallbackToSimpleAuth);
  this.ioFileBufferSize = DFSUtilClient.getIoFileBufferSize(conf);
  this.connectToDnViaHostname = conf.getBoolean(
      HdfsClientConfigKeys.DFS_CLIENT_USE_DN_HOSTNAME,
      HdfsClientConfigKeys.DFS_CLIENT_USE_DN_HOSTNAME_DEFAULT);
  placementPolicies = new BlockPlacementPolicies(conf, null, cluster, null);
}
项目:hadoop-2.6.0-cdh5.4.3    文件:DatanodeJspHelper.java   
/**
 * Gets the {@link SaslDataTransferClient} from the {@link DataNode} attached
 * to the servlet context.
 *
 * @return SaslDataTransferClient from DataNode
 */
private static SaslDataTransferClient getSaslDataTransferClient(
    HttpServletRequest req) {
  DataNode dataNode = (DataNode)req.getSession().getServletContext()
    .getAttribute("datanode");
  return dataNode.saslClient;
}
项目:FlexMap    文件:DatanodeJspHelper.java   
/**
 * Gets the {@link SaslDataTransferClient} from the {@link DataNode} attached
 * to the servlet context.
 *
 * @return SaslDataTransferClient from DataNode
 */
private static SaslDataTransferClient getSaslDataTransferClient(
    HttpServletRequest req) {
  DataNode dataNode = (DataNode)req.getSession().getServletContext()
    .getAttribute("datanode");
  return dataNode.saslClient;
}
项目:aliyun-oss-hadoop-fs    文件:DataNode.java   
public SaslDataTransferClient getSaslClient() {
  return saslClient;
}
项目:hbase    文件:FanOutOneBlockAsyncDFSOutputSaslHelper.java   
static void trySaslNegotiate(Configuration conf, Channel channel, DatanodeInfo dnInfo,
    int timeoutMs, DFSClient client, Token<BlockTokenIdentifier> accessToken,
    Promise<Void> saslPromise) throws IOException {
  SaslDataTransferClient saslClient = client.getSaslDataTransferClient();
  SaslPropertiesResolver saslPropsResolver = SASL_ADAPTOR.getSaslPropsResolver(saslClient);
  TrustedChannelResolver trustedChannelResolver =
      SASL_ADAPTOR.getTrustedChannelResolver(saslClient);
  AtomicBoolean fallbackToSimpleAuth = SASL_ADAPTOR.getFallbackToSimpleAuth(saslClient);
  InetAddress addr = ((InetSocketAddress) channel.remoteAddress()).getAddress();
  if (trustedChannelResolver.isTrusted() || trustedChannelResolver.isTrusted(addr)) {
    saslPromise.trySuccess(null);
    return;
  }
  DataEncryptionKey encryptionKey = client.newDataEncryptionKey();
  if (encryptionKey != null) {
    if (LOG.isDebugEnabled()) {
      LOG.debug(
        "SASL client doing encrypted handshake for addr = " + addr + ", datanodeId = " + dnInfo);
    }
    doSaslNegotiation(conf, channel, timeoutMs, getUserNameFromEncryptionKey(encryptionKey),
      encryptionKeyToPassword(encryptionKey.encryptionKey),
      createSaslPropertiesForEncryption(encryptionKey.encryptionAlgorithm), saslPromise);
  } else if (!UserGroupInformation.isSecurityEnabled()) {
    if (LOG.isDebugEnabled()) {
      LOG.debug("SASL client skipping handshake in unsecured configuration for addr = " + addr
          + ", datanodeId = " + dnInfo);
    }
    saslPromise.trySuccess(null);
  } else if (dnInfo.getXferPort() < 1024) {
    if (LOG.isDebugEnabled()) {
      LOG.debug("SASL client skipping handshake in secured configuration with "
          + "privileged port for addr = " + addr + ", datanodeId = " + dnInfo);
    }
    saslPromise.trySuccess(null);
  } else if (fallbackToSimpleAuth != null && fallbackToSimpleAuth.get()) {
    if (LOG.isDebugEnabled()) {
      LOG.debug("SASL client skipping handshake in secured configuration with "
          + "unsecured cluster for addr = " + addr + ", datanodeId = " + dnInfo);
    }
    saslPromise.trySuccess(null);
  } else if (saslPropsResolver != null) {
    if (LOG.isDebugEnabled()) {
      LOG.debug(
        "SASL client doing general handshake for addr = " + addr + ", datanodeId = " + dnInfo);
    }
    doSaslNegotiation(conf, channel, timeoutMs, buildUsername(accessToken),
      buildClientPassword(accessToken), saslPropsResolver.getClientProperties(addr), saslPromise);
  } else {
    // It's a secured cluster using non-privileged ports, but no SASL. The only way this can
    // happen is if the DataNode has ignore.secure.ports.for.testing configured, so this is a rare
    // edge case.
    if (LOG.isDebugEnabled()) {
      LOG.debug("SASL client skipping handshake in secured configuration with no SASL "
          + "protection configured for addr = " + addr + ", datanodeId = " + dnInfo);
    }
    saslPromise.trySuccess(null);
  }
}
项目:hadoop    文件:DFSClient.java   
/**
 * Returns the SaslDataTransferClient configured for this DFSClient.
 *
 * @return SaslDataTransferClient configured for this DFSClient
 */
public SaslDataTransferClient getSaslDataTransferClient() {
  return saslClient;
}
项目:aliyun-oss-hadoop-fs    文件:DFSClient.java   
/**
 * Returns the SaslDataTransferClient configured for this DFSClient.
 *
 * @return SaslDataTransferClient configured for this DFSClient
 */
public SaslDataTransferClient getSaslDataTransferClient() {
  return saslClient;
}
项目:big-c    文件:DFSClient.java   
/**
 * Returns the SaslDataTransferClient configured for this DFSClient.
 *
 * @return SaslDataTransferClient configured for this DFSClient
 */
public SaslDataTransferClient getSaslDataTransferClient() {
  return saslClient;
}
项目:hadoop-2.6.0-cdh5.4.3    文件:DFSClient.java   
/**
 * Returns the SaslDataTransferClient configured for this DFSClient.
 *
 * @return SaslDataTransferClient configured for this DFSClient
 */
public SaslDataTransferClient getSaslDataTransferClient() {
  return saslClient;
}
项目:FlexMap    文件:DFSClient.java   
/**
 * Returns the SaslDataTransferClient configured for this DFSClient.
 *
 * @return SaslDataTransferClient configured for this DFSClient
 */
public SaslDataTransferClient getSaslDataTransferClient() {
  return saslClient;
}
项目:hbase    文件:FanOutOneBlockAsyncDFSOutputSaslHelper.java   
TrustedChannelResolver getTrustedChannelResolver(SaslDataTransferClient saslClient);
项目:hbase    文件:FanOutOneBlockAsyncDFSOutputSaslHelper.java   
SaslPropertiesResolver getSaslPropsResolver(SaslDataTransferClient saslClient);
项目:hbase    文件:FanOutOneBlockAsyncDFSOutputSaslHelper.java   
AtomicBoolean getFallbackToSimpleAuth(SaslDataTransferClient saslClient);