Java 类org.apache.hadoop.fs.StorageType 实例源码
项目:hadoop
文件:TestStorageMover.java
private static StorageType[][] genStorageTypes(int numDataNodes,
int numAllDisk, int numAllArchive, int numRamDisk) {
Preconditions.checkArgument(
(numAllDisk + numAllArchive + numRamDisk) <= numDataNodes);
StorageType[][] types = new StorageType[numDataNodes][];
int i = 0;
for (; i < numRamDisk; i++)
{
types[i] = new StorageType[]{StorageType.RAM_DISK, StorageType.DISK};
}
for (; i < numRamDisk + numAllDisk; i++) {
types[i] = new StorageType[]{StorageType.DISK, StorageType.DISK};
}
for (; i < numRamDisk + numAllDisk + numAllArchive; i++) {
types[i] = new StorageType[]{StorageType.ARCHIVE, StorageType.ARCHIVE};
}
for (; i < types.length; i++) {
types[i] = new StorageType[]{StorageType.DISK, StorageType.ARCHIVE};
}
return types;
}
项目:hadoop
文件:BlockPlacementPolicyDefault.java
private DatanodeStorageInfo chooseFromNextRack(Node next,
Set<Node> excludedNodes,
long blocksize,
int maxNodesPerRack,
List<DatanodeStorageInfo> results,
boolean avoidStaleNodes,
EnumMap<StorageType, Integer> storageTypes) throws NotEnoughReplicasException {
final String nextRack = next.getNetworkLocation();
try {
return chooseRandom(nextRack, excludedNodes, blocksize, maxNodesPerRack,
results, avoidStaleNodes, storageTypes);
} catch(NotEnoughReplicasException e) {
if (LOG.isDebugEnabled()) {
LOG.debug("Failed to choose from the next rack (location = " + nextRack
+ "), retry choosing ramdomly", e);
}
//otherwise randomly choose one from the network
return chooseRandom(NodeBase.ROOT, excludedNodes, blocksize,
maxNodesPerRack, results, avoidStaleNodes, storageTypes);
}
}
项目:hadoop
文件:TestPBHelper.java
private LocatedBlock createLocatedBlock() {
DatanodeInfo[] dnInfos = {
DFSTestUtil.getLocalDatanodeInfo("127.0.0.1", "h1",
AdminStates.DECOMMISSION_INPROGRESS),
DFSTestUtil.getLocalDatanodeInfo("127.0.0.1", "h2",
AdminStates.DECOMMISSIONED),
DFSTestUtil.getLocalDatanodeInfo("127.0.0.1", "h3",
AdminStates.NORMAL),
DFSTestUtil.getLocalDatanodeInfo("127.0.0.1", "h4",
AdminStates.NORMAL),
};
String[] storageIDs = {"s1", "s2", "s3", "s4"};
StorageType[] media = {
StorageType.DISK,
StorageType.SSD,
StorageType.DISK,
StorageType.RAM_DISK
};
LocatedBlock lb = new LocatedBlock(
new ExtendedBlock("bp12", 12345, 10, 53),
dnInfos, storageIDs, media, 5, false, new DatanodeInfo[]{});
lb.setBlockToken(new Token<BlockTokenIdentifier>(
"identifier".getBytes(), "password".getBytes(), new Text("kind"),
new Text("service")));
return lb;
}
项目:hadoop
文件:TestStoragePolicySummary.java
@Test
public void testMultipleHots() {
BlockStoragePolicySuite bsps = BlockStoragePolicySuite.createDefaultSuite();
StoragePolicySummary sts = new StoragePolicySummary(bsps.getAllPolicies());
BlockStoragePolicy hot = bsps.getPolicy("HOT");
sts.add(new StorageType[]{StorageType.DISK},hot);
sts.add(new StorageType[]{StorageType.DISK,StorageType.DISK},hot);
sts.add(new StorageType[]{StorageType.DISK,
StorageType.DISK,StorageType.DISK},hot);
sts.add(new StorageType[]{StorageType.DISK,
StorageType.DISK,StorageType.DISK,StorageType.DISK},hot);
Map<String, Long> actualOutput = convertToStringMap(sts);
Assert.assertEquals(4,actualOutput.size());
Map<String, Long> expectedOutput = new HashMap<>();
expectedOutput.put("HOT|DISK:1(HOT)", 1l);
expectedOutput.put("HOT|DISK:2(HOT)", 1l);
expectedOutput.put("HOT|DISK:3(HOT)", 1l);
expectedOutput.put("HOT|DISK:4(HOT)", 1l);
Assert.assertEquals(expectedOutput,actualOutput);
}
项目:hadoop
文件:TestQuotaByStorageType.java
@Test(timeout = 60000)
public void testQuotaByStorageTypeParentOnChildOn() throws Exception {
final Path parent = new Path(dir, "parent");
final Path child = new Path(parent, "child");
dfs.mkdirs(parent);
dfs.mkdirs(child);
dfs.setStoragePolicy(parent, HdfsConstants.ONESSD_STORAGE_POLICY_NAME);
dfs.setQuotaByStorageType(parent, StorageType.SSD, 2 * BLOCKSIZE);
dfs.setQuotaByStorageType(child, StorageType.SSD, 3 * BLOCKSIZE);
// Create file of size 2.5 * BLOCKSIZE under child directory
// Verify parent Quota applies
Path createdFile1 = new Path(child, "created_file1.data");
long file1Len = BLOCKSIZE * 2 + BLOCKSIZE / 2;
int bufLen = BLOCKSIZE / 16;
try {
DFSTestUtil.createFile(dfs, createdFile1, bufLen, file1Len, BLOCKSIZE,
REPLICATION, seed);
fail("Should have failed with QuotaByStorageTypeExceededException ");
} catch (Throwable t) {
LOG.info("Got expected exception ", t);
}
}
项目:hadoop
文件:DataXceiver.java
@Override
public void transferBlock(final ExtendedBlock blk,
final Token<BlockTokenIdentifier> blockToken,
final String clientName,
final DatanodeInfo[] targets,
final StorageType[] targetStorageTypes) throws IOException {
checkAccess(socketOut, true, blk, blockToken,
Op.TRANSFER_BLOCK, BlockTokenSecretManager.AccessMode.COPY);
previousOpClientName = clientName;
updateCurrentThreadName(Op.TRANSFER_BLOCK + " " + blk);
final DataOutputStream out = new DataOutputStream(
getOutputStream());
try {
datanode.transferReplicaForPipelineRecovery(blk, targets,
targetStorageTypes, clientName);
writeResponse(Status.SUCCESS, null, out);
} catch (IOException ioe) {
LOG.info("transferBlock " + blk + " received exception " + ioe);
incrDatanodeNetworkErrors();
throw ioe;
} finally {
IOUtils.closeStream(out);
}
}
项目:hadoop
文件:TestFsVolumeList.java
@Test
public void testReleaseVolumeRefIfNoBlockScanner() throws IOException {
FsVolumeList volumeList = new FsVolumeList(
Collections.<VolumeFailureInfo>emptyList(), null, blockChooser);
File volDir = new File(baseDir, "volume-0");
volDir.mkdirs();
FsVolumeImpl volume = new FsVolumeImpl(dataset, "storage-id", volDir,
conf, StorageType.DEFAULT);
FsVolumeReference ref = volume.obtainReference();
volumeList.addVolume(ref);
try {
ref.close();
fail("Should throw exception because the reference is closed in "
+ "VolumeList#addVolume().");
} catch (IllegalStateException e) {
}
}
项目:hadoop
文件:FSImageFormatPBINode.java
private void loadRootINode(INodeSection.INode p) {
INodeDirectory root = loadINodeDirectory(p, parent.getLoaderContext());
final QuotaCounts q = root.getQuotaCounts();
final long nsQuota = q.getNameSpace();
final long dsQuota = q.getStorageSpace();
if (nsQuota != -1 || dsQuota != -1) {
dir.rootDir.getDirectoryWithQuotaFeature().setQuota(nsQuota, dsQuota);
}
final EnumCounters<StorageType> typeQuotas = q.getTypeSpaces();
if (typeQuotas.anyGreaterOrEqual(0)) {
dir.rootDir.getDirectoryWithQuotaFeature().setQuota(typeQuotas);
}
dir.rootDir.cloneModificationTime(root);
dir.rootDir.clonePermissionStatus(root);
// root dir supports having extended attributes according to POSIX
final XAttrFeature f = root.getXAttrFeature();
if (f != null) {
dir.rootDir.addXAttrFeature(f);
}
}
项目:hadoop
文件:DataNode.java
/**
* Connect to the first item in the target list. Pass along the
* entire target list, the block, and the data.
*/
DataTransfer(DatanodeInfo targets[], StorageType[] targetStorageTypes,
ExtendedBlock b, BlockConstructionStage stage,
final String clientname) {
if (DataTransferProtocol.LOG.isDebugEnabled()) {
DataTransferProtocol.LOG.debug(getClass().getSimpleName() + ": "
+ b + " (numBytes=" + b.getNumBytes() + ")"
+ ", stage=" + stage
+ ", clientname=" + clientname
+ ", targets=" + Arrays.asList(targets)
+ ", target storage types=" + (targetStorageTypes == null ? "[]" :
Arrays.asList(targetStorageTypes)));
}
this.targets = targets;
this.targetStorageTypes = targetStorageTypes;
this.b = b;
this.stage = stage;
BPOfferService bpos = blockPoolManager.get(b.getBlockPoolId());
bpReg = bpos.bpRegistration;
this.clientname = clientname;
this.cachingStrategy =
new CachingStrategy(true, getDnConf().readaheadLength);
}
项目:hadoop
文件:Mover.java
boolean scheduleMoves4Block(StorageTypeDiff diff, LocatedBlock lb) {
final List<MLocation> locations = MLocation.toLocations(lb);
Collections.shuffle(locations);
final DBlock db = newDBlock(lb.getBlock().getLocalBlock(), locations);
for (final StorageType t : diff.existing) {
for (final MLocation ml : locations) {
final Source source = storages.getSource(ml);
if (ml.storageType == t && source != null) {
// try to schedule one replica move.
if (scheduleMoveReplica(db, source, diff.expected)) {
return true;
}
}
}
}
return false;
}
项目:hadoop
文件:Mover.java
/**
* Choose the target storage within same Datanode if possible.
*/
boolean chooseTargetInSameNode(DBlock db, Source source,
List<StorageType> targetTypes) {
for (StorageType t : targetTypes) {
StorageGroup target = storages.getTarget(source.getDatanodeInfo()
.getDatanodeUuid(), t);
if (target == null) {
continue;
}
final PendingMove pm = source.addPendingMove(db, target);
if (pm != null) {
dispatcher.executePendingMove(pm);
return true;
}
}
return false;
}
项目:hadoop
文件:TestStorageMover.java
private Replication getOrVerifyReplication(Path file, Replication expected)
throws IOException {
final List<LocatedBlock> lbs = dfs.getClient().getLocatedBlocks(
file.toString(), 0).getLocatedBlocks();
Assert.assertEquals(1, lbs.size());
LocatedBlock lb = lbs.get(0);
StringBuilder types = new StringBuilder();
final Replication r = new Replication();
for(StorageType t : lb.getStorageTypes()) {
types.append(t).append(", ");
if (t == StorageType.DISK) {
r.disk++;
} else if (t == StorageType.ARCHIVE) {
r.archive++;
} else {
Assert.fail("Unexpected storage type " + t);
}
}
if (expected != null) {
final String s = "file = " + file + "\n types = [" + types + "]";
Assert.assertEquals(s, expected, r);
}
return r;
}
项目:hadoop
文件:TestFsVolumeList.java
@Test
public void testGetNextVolumeWithClosedVolume() throws IOException {
FsVolumeList volumeList = new FsVolumeList(
Collections.<VolumeFailureInfo>emptyList(), blockScanner, blockChooser);
List<FsVolumeImpl> volumes = new ArrayList<>();
for (int i = 0; i < 3; i++) {
File curDir = new File(baseDir, "nextvolume-" + i);
curDir.mkdirs();
FsVolumeImpl volume = new FsVolumeImpl(dataset, "storage-id", curDir,
conf, StorageType.DEFAULT);
volume.setCapacityForTesting(1024 * 1024 * 1024);
volumes.add(volume);
volumeList.addVolume(volume.obtainReference());
}
// Close the second volume.
volumes.get(1).closeAndWait();
for (int i = 0; i < 10; i++) {
try (FsVolumeReference ref =
volumeList.getNextVolume(StorageType.DEFAULT, 128)) {
// volume No.2 will not be chosen.
assertNotEquals(ref.getVolume(), volumes.get(1));
}
}
}
项目:hadoop
文件:BlockCommand.java
/**
* Create BlockCommand for transferring blocks to another datanode
* @param blocktargetlist blocks to be transferred
*/
public BlockCommand(int action, String poolId,
List<BlockTargetPair> blocktargetlist) {
super(action);
this.poolId = poolId;
blocks = new Block[blocktargetlist.size()];
targets = new DatanodeInfo[blocks.length][];
targetStorageTypes = new StorageType[blocks.length][];
targetStorageIDs = new String[blocks.length][];
for(int i = 0; i < blocks.length; i++) {
BlockTargetPair p = blocktargetlist.get(i);
blocks[i] = p.block;
targets[i] = DatanodeStorageInfo.toDatanodeInfos(p.targets);
targetStorageTypes[i] = DatanodeStorageInfo.toStorageTypes(p.targets);
targetStorageIDs[i] = DatanodeStorageInfo.toStorageIDs(p.targets);
}
}
项目:hadoop
文件:TestTruncateQuotaUpdate.java
private void testTruncate(long newLength, long expectedDiff,
long expectedUsage) throws Exception {
// before doing the real truncation, make sure the computation is correct
final INodesInPath iip = fsdir.getINodesInPath4Write(file.toString());
final INodeFile fileNode = iip.getLastINode().asFile();
fileNode.recordModification(iip.getLatestSnapshotId(), true);
final long diff = fileNode.computeQuotaDeltaForTruncate(newLength);
Assert.assertEquals(expectedDiff, diff);
// do the real truncation
dfs.truncate(file, newLength);
// wait for truncate to finish
TestFileTruncate.checkBlockRecovery(file, dfs);
final INodeDirectory dirNode = fsdir.getINode4Write(dir.toString())
.asDirectory();
final long spaceUsed = dirNode.getDirectoryWithQuotaFeature()
.getSpaceConsumed().getStorageSpace();
final long diskUsed = dirNode.getDirectoryWithQuotaFeature()
.getSpaceConsumed().getTypeSpaces().get(StorageType.DISK);
Assert.assertEquals(expectedUsage, spaceUsed);
Assert.assertEquals(expectedUsage, diskUsed);
}
项目:hadoop
文件:FSNamesystem.java
/**
* Set the namespace quota and storage space quota for a directory.
* See {@link ClientProtocol#setQuota(String, long, long, StorageType)} for the
* contract.
*
* Note: This does not support ".inodes" relative path.
*/
void setQuota(String src, long nsQuota, long ssQuota, StorageType type)
throws IOException {
checkOperation(OperationCategory.WRITE);
writeLock();
boolean success = false;
try {
checkOperation(OperationCategory.WRITE);
checkNameNodeSafeMode("Cannot set quota on " + src);
FSDirAttrOp.setQuota(dir, src, nsQuota, ssQuota, type);
success = true;
} finally {
writeUnlock();
if (success) {
getEditLog().logSync();
}
logAuditEvent(success, "setQuota", src);
}
}
项目:hadoop
文件:DFSTestUtil.java
public static DatanodeStorageInfo[] createDatanodeStorageInfos(
int n, String[] racks, String[] hostnames, StorageType[] types) {
DatanodeStorageInfo[] storages = new DatanodeStorageInfo[n];
for(int i = storages.length; i > 0; ) {
final String storageID = "s" + i;
final String ip = i + "." + i + "." + i + "." + i;
i--;
final String rack = (racks!=null && i < racks.length)? racks[i]: "defaultRack";
final String hostname = (hostnames!=null && i < hostnames.length)? hostnames[i]: "host";
final StorageType type = (types != null && i < types.length) ? types[i]
: StorageType.DEFAULT;
storages[i] = createDatanodeStorageInfo(storageID, ip, rack, hostname,
type);
}
return storages;
}
项目:hadoop
文件:TestQuotaByStorageType.java
@Test(timeout = 60000)
public void testContentSummaryWithoutStoragePolicy() throws Exception {
final Path foo = new Path(dir, "foo");
Path createdFile1 = new Path(foo, "created_file1.data");
dfs.mkdirs(foo);
INode fnode = fsdir.getINode4Write(foo.toString());
assertTrue(fnode.isDirectory());
assertTrue(!fnode.isQuotaSet());
// Create file of size 2 * BLOCKSIZE under directory "foo"
long file1Len = BLOCKSIZE * 2;
int bufLen = BLOCKSIZE / 16;
DFSTestUtil.createFile(dfs, createdFile1, bufLen, file1Len, BLOCKSIZE, REPLICATION, seed);
// Verify getContentSummary without any quota set
// Expect no type quota and usage information available
ContentSummary cs = dfs.getContentSummary(foo);
assertEquals(cs.getSpaceConsumed(), file1Len * REPLICATION);
for (StorageType t : StorageType.values()) {
assertEquals(cs.getTypeConsumed(t), 0);
assertEquals(cs.getTypeQuota(t), -1);
}
}
项目:hadoop
文件:TestMover.java
@Test
public void testMoverFailedRetry() throws Exception {
// HDFS-8147
final Configuration conf = new HdfsConfiguration();
conf.set(DFSConfigKeys.DFS_MOVER_RETRY_MAX_ATTEMPTS_KEY, "2");
final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
.numDataNodes(3)
.storageTypes(
new StorageType[][] {{StorageType.DISK, StorageType.ARCHIVE},
{StorageType.DISK, StorageType.ARCHIVE},
{StorageType.DISK, StorageType.ARCHIVE}}).build();
try {
cluster.waitActive();
final DistributedFileSystem dfs = cluster.getFileSystem();
final String file = "/testMoverFailedRetry";
// write to DISK
final FSDataOutputStream out = dfs.create(new Path(file), (short) 2);
out.writeChars("testMoverFailedRetry");
out.close();
// Delete block file so, block move will fail with FileNotFoundException
LocatedBlock lb = dfs.getClient().getLocatedBlocks(file, 0).get(0);
cluster.corruptBlockOnDataNodesByDeletingBlockFile(lb.getBlock());
// move to ARCHIVE
dfs.setStoragePolicy(new Path(file), "COLD");
int rc = ToolRunner.run(conf, new Mover.Cli(),
new String[] {"-p", file.toString()});
Assert.assertEquals("Movement should fail after some retry",
ExitStatus.IO_EXCEPTION.getExitCode(), rc);
} finally {
cluster.shutdown();
}
}
项目:hadoop
文件:TestBlockStoragePolicy.java
/**
* Consider a File with Cold temperature. Increase replication factor of
* that file from 3 to 5. Make sure all replicas are created in ARCHIVE.
*/
@Test
public void testChangeColdRep() throws Exception {
testChangeFileRep(HdfsConstants.COLD_STORAGE_POLICY_NAME, COLD,
new StorageType[]{StorageType.ARCHIVE, StorageType.ARCHIVE,
StorageType.ARCHIVE},
new StorageType[]{StorageType.ARCHIVE, StorageType.ARCHIVE,
StorageType.ARCHIVE, StorageType.ARCHIVE, StorageType.ARCHIVE});
}
项目:hadoop
文件:TestSimulatedFSDataset.java
int addSomeBlocks(SimulatedFSDataset fsdataset, int startingBlockId)
throws IOException {
int bytesAdded = 0;
for (int i = startingBlockId; i < startingBlockId+NUMBLOCKS; ++i) {
ExtendedBlock b = new ExtendedBlock(bpid, i, 0, 0);
// we pass expected len as zero, - fsdataset should use the sizeof actual
// data written
ReplicaInPipelineInterface bInfo = fsdataset.createRbw(
StorageType.DEFAULT, b, false).getReplica();
ReplicaOutputStreams out = bInfo.createStreams(true,
DataChecksum.newDataChecksum(DataChecksum.Type.CRC32, 512));
try {
OutputStream dataOut = out.getDataOut();
assertEquals(0, fsdataset.getLength(b));
for (int j=1; j <= blockIdToLen(i); ++j) {
dataOut.write(j);
assertEquals(j, bInfo.getBytesOnDisk()); // correct length even as we write
bytesAdded++;
}
} finally {
out.close();
}
b.setNumBytes(blockIdToLen(i));
fsdataset.finalizeBlock(b);
assertEquals(blockIdToLen(i), fsdataset.getLength(b));
}
return bytesAdded;
}
项目:hadoop
文件:TestBlockStoragePolicy.java
/**
* Consider a File with Warm temperature. Increase replication factor of
* that file from 3 to 5. Make sure all replicas are created in DISKS
* and ARCHIVE.
*/
@Test
public void testChangeWarmRep() throws Exception {
testChangeFileRep(HdfsConstants.WARM_STORAGE_POLICY_NAME, WARM,
new StorageType[]{StorageType.DISK, StorageType.ARCHIVE,
StorageType.ARCHIVE},
new StorageType[]{StorageType.DISK, StorageType.ARCHIVE,
StorageType.ARCHIVE, StorageType.ARCHIVE, StorageType.ARCHIVE});
}
项目:hadoop
文件:PBHelper.java
public static StorageType[] convertStorageTypes(
List<StorageTypeProto> storageTypesList, int expectedSize) {
final StorageType[] storageTypes = new StorageType[expectedSize];
if (storageTypesList.size() != expectedSize) { // missing storage types
Preconditions.checkState(storageTypesList.isEmpty());
Arrays.fill(storageTypes, StorageType.DEFAULT);
} else {
for (int i = 0; i < storageTypes.length; ++i) {
storageTypes[i] = convertStorageType(storageTypesList.get(i));
}
}
return storageTypes;
}
项目:hadoop
文件:Dispatcher.java
/** Decide if the given block is a good candidate to move or not */
private boolean isGoodBlockCandidate(DBlock block) {
// source and target must have the same storage type
final StorageType sourceStorageType = getStorageType();
for (Task t : tasks) {
if (Dispatcher.this.isGoodBlockCandidate(this, t.target,
sourceStorageType, block)) {
return true;
}
}
return false;
}
项目:hadoop
文件:DataNode.java
void transferBlocks(String poolId, Block blocks[],
DatanodeInfo xferTargets[][], StorageType[][] xferTargetStorageTypes) {
for (int i = 0; i < blocks.length; i++) {
try {
transferBlock(new ExtendedBlock(poolId, blocks[i]), xferTargets[i],
xferTargetStorageTypes[i]);
} catch (IOException ie) {
LOG.warn("Failed to transfer block " + blocks[i], ie);
}
}
}
项目:hadoop
文件:FSImageFormatPBINode.java
private static QuotaByStorageTypeFeatureProto.Builder
buildQuotaByStorageTypeEntries(QuotaCounts q) {
QuotaByStorageTypeFeatureProto.Builder b =
QuotaByStorageTypeFeatureProto.newBuilder();
for (StorageType t: StorageType.getTypesSupportingQuota()) {
if (q.getTypeSpace(t) >= 0) {
QuotaByStorageTypeEntryProto.Builder eb =
QuotaByStorageTypeEntryProto.newBuilder().
setStorageType(PBHelper.convertStorageType(t)).
setQuota(q.getTypeSpace(t));
b.addQuotas(eb);
}
}
return b;
}
项目:hadoop
文件:TestBlockStoragePolicy.java
@Override
public void checkChooseStorageTypes(BlockStoragePolicy p,
short replication, List<StorageType> chosen, StorageType... expected) {
final List<StorageType> types = p.chooseStorageTypes(replication,
chosen, none, false);
assertStorageTypes(types, expected);
}
项目:hadoop
文件:FsDatasetImpl.java
private StorageType getStorageTypeFromLocations(
Collection<StorageLocation> dataLocations, File dir) {
for (StorageLocation dataLocation : dataLocations) {
if (dataLocation.getFile().equals(dir)) {
return dataLocation.getStorageType();
}
}
return StorageType.DEFAULT;
}
项目:hadoop
文件:StorageLocation.java
private StorageLocation(StorageType storageType, URI uri) {
this.storageType = storageType;
if (uri.getScheme() == null ||
"file".equalsIgnoreCase(uri.getScheme())) {
// drop any (illegal) authority in the URI for backwards compatibility
this.file = new File(uri.getPath());
} else {
throw new IllegalArgumentException("Unsupported URI schema in " + uri);
}
}
项目:hadoop
文件:TestDiskspaceQuotaUpdate.java
/**
* Test append over a specific type of storage quota does not mark file as
* UC or create a lease
*/
@Test (timeout=60000)
public void testAppendOverTypeQuota() throws Exception {
final Path dir = new Path("/TestAppendOverTypeQuota");
final Path file = new Path(dir, "file");
// create partial block file
dfs.mkdirs(dir);
// set the storage policy on dir
dfs.setStoragePolicy(dir, HdfsConstants.ONESSD_STORAGE_POLICY_NAME);
DFSTestUtil.createFile(dfs, file, BLOCKSIZE/2, REPLICATION, seed);
// set quota of SSD to 1L
dfs.setQuotaByStorageType(dir, StorageType.SSD, 1L);
final INodeDirectory dirNode = fsdir.getINode4Write(dir.toString())
.asDirectory();
final long spaceUsed = dirNode.getDirectoryWithQuotaFeature()
.getSpaceConsumed().getStorageSpace();
try {
DFSTestUtil.appendFile(dfs, file, BLOCKSIZE);
Assert.fail("append didn't fail");
} catch (RemoteException e) {
assertTrue(e.getClassName().contains("QuotaByStorageTypeExceededException"));
}
// check that the file exists, isn't UC, and has no dangling lease
INodeFile inode = fsdir.getINode(file.toString()).asFile();
Assert.assertNotNull(inode);
Assert.assertFalse("should not be UC", inode.isUnderConstruction());
Assert.assertNull("should not have a lease", cluster.getNamesystem()
.getLeaseManager().getLeaseByPath(file.toString()));
// make sure the quota usage is unchanged
final long newSpaceUsed = dirNode.getDirectoryWithQuotaFeature()
.getSpaceConsumed().getStorageSpace();
assertEquals(spaceUsed, newSpaceUsed);
// make sure edits aren't corrupted
dfs.recoverLease(file);
cluster.restartNameNodes();
}
项目:hadoop
文件:DatanodeInfoWithStorage.java
public DatanodeInfoWithStorage(DatanodeInfo from, String storageID,
StorageType storageType) {
super(from);
this.storageID = storageID;
this.storageType = storageType;
setSoftwareVersion(from.getSoftwareVersion());
setDependentHostNames(from.getDependentHostNames());
setLevel(from.getLevel());
setParent(from.getParent());
}
项目:hadoop
文件:BlockStoragePolicy.java
@VisibleForTesting
public BlockStoragePolicy(byte id, String name, StorageType[] storageTypes,
StorageType[] creationFallbacks, StorageType[] replicationFallbacks,
boolean copyOnCreateFile) {
this.id = id;
this.name = name;
this.storageTypes = storageTypes;
this.creationFallbacks = creationFallbacks;
this.replicationFallbacks = replicationFallbacks;
this.copyOnCreateFile = copyOnCreateFile;
}
项目:hadoop
文件:TestBlockStoragePolicy.java
static void assertReplicationFallback(BlockStoragePolicy policy, StorageType noneExpected,
StorageType archiveExpected, StorageType diskExpected) {
Assert.assertEquals(noneExpected, policy.getReplicationFallback(none));
Assert.assertEquals(archiveExpected, policy.getReplicationFallback(archive));
Assert.assertEquals(diskExpected, policy.getReplicationFallback(disk));
Assert.assertEquals(null, policy.getReplicationFallback(both));
}
项目:hadoop
文件:BlockStoragePolicy.java
private static StorageType getFallback(EnumSet<StorageType> unavailables,
StorageType[] fallbacks) {
for(StorageType fb : fallbacks) {
if (!unavailables.contains(fb)) {
return fb;
}
}
return null;
}
项目:hadoop
文件:DFSClient.java
/**
* Sets or resets quotas by storage type for a directory.
* @see ClientProtocol#setQuota(String, long, long, StorageType)
*/
void setQuotaByStorageType(String src, StorageType type, long quota)
throws IOException {
if (quota <= 0 && quota != HdfsConstants.QUOTA_DONT_SET &&
quota != HdfsConstants.QUOTA_RESET) {
throw new IllegalArgumentException("Invalid values for quota :" +
quota);
}
if (type == null) {
throw new IllegalArgumentException("Invalid storage type(null)");
}
if (!type.supportTypeQuota()) {
throw new IllegalArgumentException("Don't support Quota for storage type : "
+ type.toString());
}
TraceScope scope = getPathTraceScope("setQuotaByStorageType", src);
try {
namenode.setQuota(src, HdfsConstants.QUOTA_DONT_SET, quota, type);
} catch (RemoteException re) {
throw re.unwrapRemoteException(AccessControlException.class,
FileNotFoundException.class,
QuotaByStorageTypeExceededException.class,
UnresolvedPathException.class,
SnapshotAccessControlException.class);
} finally {
scope.close();
}
}
项目:hadoop
文件:PBHelper.java
public static LocatedBlockProto convert(LocatedBlock b) {
if (b == null) return null;
Builder builder = LocatedBlockProto.newBuilder();
DatanodeInfo[] locs = b.getLocations();
List<DatanodeInfo> cachedLocs =
Lists.newLinkedList(Arrays.asList(b.getCachedLocations()));
for (int i = 0; i < locs.length; i++) {
DatanodeInfo loc = locs[i];
builder.addLocs(i, PBHelper.convert(loc));
boolean locIsCached = cachedLocs.contains(loc);
builder.addIsCached(locIsCached);
if (locIsCached) {
cachedLocs.remove(loc);
}
}
Preconditions.checkArgument(cachedLocs.size() == 0,
"Found additional cached replica locations that are not in the set of"
+ " storage-backed locations!");
StorageType[] storageTypes = b.getStorageTypes();
if (storageTypes != null) {
for (int i = 0; i < storageTypes.length; ++i) {
builder.addStorageTypes(PBHelper.convertStorageType(storageTypes[i]));
}
}
final String[] storageIDs = b.getStorageIDs();
if (storageIDs != null) {
builder.addAllStorageIDs(Arrays.asList(storageIDs));
}
return builder.setB(PBHelper.convert(b.getBlock()))
.setBlockToken(PBHelper.convert(b.getBlockToken()))
.setCorrupt(b.isCorrupt()).setOffset(b.getStartOffset()).build();
}
项目:hadoop
文件:PBHelper.java
public static BlockStoragePolicy convert(BlockStoragePolicyProto proto) {
List<StorageTypeProto> cList = proto.getCreationPolicy()
.getStorageTypesList();
StorageType[] creationTypes = convertStorageTypes(cList, cList.size());
List<StorageTypeProto> cfList = proto.hasCreationFallbackPolicy() ? proto
.getCreationFallbackPolicy().getStorageTypesList() : null;
StorageType[] creationFallbackTypes = cfList == null ? StorageType
.EMPTY_ARRAY : convertStorageTypes(cfList, cfList.size());
List<StorageTypeProto> rfList = proto.hasReplicationFallbackPolicy() ?
proto.getReplicationFallbackPolicy().getStorageTypesList() : null;
StorageType[] replicationFallbackTypes = rfList == null ? StorageType
.EMPTY_ARRAY : convertStorageTypes(rfList, rfList.size());
return new BlockStoragePolicy((byte) proto.getPolicyId(), proto.getName(),
creationTypes, creationFallbackTypes, replicationFallbackTypes);
}
项目:hadoop
文件:Mover.java
private static long getMaxRemaining(DatanodeStorageReport report, StorageType t) {
long max = 0L;
for(StorageReport r : report.getStorageReports()) {
if (r.getStorage().getStorageType() == t) {
if (r.getRemaining() > max) {
max = r.getRemaining();
}
}
}
return max;
}
项目:hadoop
文件:Mover.java
/** @return true if it is necessary to run another round of migration */
private boolean processFile(String fullPath, HdfsLocatedFileStatus status) {
final byte policyId = status.getStoragePolicy();
// currently we ignore files with unspecified storage policy
if (policyId == BlockStoragePolicySuite.ID_UNSPECIFIED) {
return false;
}
final BlockStoragePolicy policy = blockStoragePolicies[policyId];
if (policy == null) {
LOG.warn("Failed to get the storage policy of file " + fullPath);
return false;
}
final List<StorageType> types = policy.chooseStorageTypes(
status.getReplication());
final LocatedBlocks locatedBlocks = status.getBlockLocations();
boolean hasRemaining = false;
final boolean lastBlkComplete = locatedBlocks.isLastBlockComplete();
List<LocatedBlock> lbs = locatedBlocks.getLocatedBlocks();
for(int i = 0; i < lbs.size(); i++) {
if (i == lbs.size() - 1 && !lastBlkComplete) {
// last block is incomplete, skip it
continue;
}
LocatedBlock lb = lbs.get(i);
final StorageTypeDiff diff = new StorageTypeDiff(types,
lb.getStorageTypes());
if (!diff.removeOverlap(true)) {
if (scheduleMoves4Block(diff, lb)) {
hasRemaining |= (diff.existing.size() > 1 &&
diff.expected.size() > 1);
}
}
}
return hasRemaining;
}
项目:hadoop
文件:TestQuotaByStorageType.java
private void testQuotaByStorageTypeOrTraditionalQuotaExceededCase(
long storageSpaceQuotaInBlocks, long ssdQuotaInBlocks,
long testFileLenInBlocks, short replication) throws Exception {
final String METHOD_NAME = GenericTestUtils.getMethodName();
final Path testDir = new Path(dir, METHOD_NAME);
dfs.mkdirs(testDir);
dfs.setStoragePolicy(testDir, HdfsConstants.ONESSD_STORAGE_POLICY_NAME);
final long ssdQuota = BLOCKSIZE * ssdQuotaInBlocks;
final long storageSpaceQuota = BLOCKSIZE * storageSpaceQuotaInBlocks;
dfs.setQuota(testDir, Long.MAX_VALUE - 1, storageSpaceQuota);
dfs.setQuotaByStorageType(testDir, StorageType.SSD, ssdQuota);
INode testDirNode = fsdir.getINode4Write(testDir.toString());
assertTrue(testDirNode.isDirectory());
assertTrue(testDirNode.isQuotaSet());
Path createdFile = new Path(testDir, "created_file.data");
long fileLen = testFileLenInBlocks * BLOCKSIZE;
try {
DFSTestUtil.createFile(dfs, createdFile, BLOCKSIZE / 16,
fileLen, BLOCKSIZE, replication, seed);
fail("Should have failed with DSQuotaExceededException or " +
"QuotaByStorageTypeExceededException ");
} catch (Throwable t) {
LOG.info("Got expected exception ", t);
long currentSSDConsumed = testDirNode.asDirectory().getDirectoryWithQuotaFeature()
.getSpaceConsumed().getTypeSpaces().get(StorageType.SSD);
assertEquals(Math.min(ssdQuota, storageSpaceQuota/replication),
currentSSDConsumed);
}
}