Java 类org.apache.hadoop.fs.CacheFlag 实例源码
项目:hadoop
文件:FSNDNCacheOp.java
static CacheDirectiveInfo addCacheDirective(
FSNamesystem fsn, CacheManager cacheManager,
CacheDirectiveInfo directive, EnumSet<CacheFlag> flags,
boolean logRetryCache)
throws IOException {
final FSPermissionChecker pc = getFsPermissionChecker(fsn);
if (directive.getId() != null) {
throw new IOException("addDirective: you cannot specify an ID " +
"for this operation.");
}
CacheDirectiveInfo effectiveDirective =
cacheManager.addDirective(directive, pc, flags);
fsn.getEditLog().logAddCacheDirectiveInfo(effectiveDirective,
logRetryCache);
return effectiveDirective;
}
项目:hadoop
文件:NameNodeRpcServer.java
@Override // ClientProtocol
public long addCacheDirective(
CacheDirectiveInfo path, EnumSet<CacheFlag> flags) throws IOException {
checkNNStartup();
CacheEntryWithPayload cacheEntry = RetryCache.waitForCompletion
(retryCache, null);
if (cacheEntry != null && cacheEntry.isSuccess()) {
return (Long) cacheEntry.getPayload();
}
boolean success = false;
long ret = 0;
try {
ret = namesystem.addCacheDirective(path, flags, cacheEntry != null);
success = true;
} finally {
RetryCache.setState(cacheEntry, success, ret);
}
return ret;
}
项目:hadoop
文件:NameNodeRpcServer.java
@Override // ClientProtocol
public void modifyCacheDirective(
CacheDirectiveInfo directive, EnumSet<CacheFlag> flags) throws IOException {
checkNNStartup();
CacheEntry cacheEntry = RetryCache.waitForCompletion(retryCache);
if (cacheEntry != null && cacheEntry.isSuccess()) {
return;
}
boolean success = false;
try {
namesystem.modifyCacheDirective(directive, flags, cacheEntry != null);
success = true;
} finally {
RetryCache.setState(cacheEntry, success);
}
}
项目:hadoop
文件:TestRetryCacheWithHA.java
/**
* Add a list of cache directives, list cache directives,
* switch active NN, and list cache directives again.
*/
@Test (timeout=60000)
public void testListCacheDirectives() throws Exception {
final int poolCount = 7;
HashSet<String> poolNames = new HashSet<String>(poolCount);
Path path = new Path("/p");
for (int i=0; i<poolCount; i++) {
String poolName = "testListCacheDirectives-" + i;
CacheDirectiveInfo directiveInfo =
new CacheDirectiveInfo.Builder().setPool(poolName).setPath(path).build();
dfs.addCachePool(new CachePoolInfo(poolName));
dfs.addCacheDirective(directiveInfo, EnumSet.of(CacheFlag.FORCE));
poolNames.add(poolName);
}
listCacheDirectives(poolNames, 0);
cluster.transitionToStandby(0);
cluster.transitionToActive(1);
cluster.waitActive(1);
listCacheDirectives(poolNames, 1);
}
项目:aliyun-oss-hadoop-fs
文件:FSNamesystem.java
void modifyCacheDirective(CacheDirectiveInfo directive,
EnumSet<CacheFlag> flags, boolean logRetryCache) throws IOException {
boolean success = false;
if (!flags.contains(CacheFlag.FORCE)) {
cacheManager.waitForRescanIfNeeded();
}
writeLock();
try {
checkOperation(OperationCategory.WRITE);
checkNameNodeSafeMode("Cannot add cache directive");
FSNDNCacheOp.modifyCacheDirective(this, cacheManager, directive, flags,
logRetryCache);
success = true;
} finally {
writeUnlock();
if (success) {
getEditLog().logSync();
}
final String idStr = "{id: " + directive.getId() + "}";
logAuditEvent(success, "modifyCacheDirective", idStr,
directive.toString(), null);
}
}
项目:aliyun-oss-hadoop-fs
文件:FSNDNCacheOp.java
static CacheDirectiveInfo addCacheDirective(
FSNamesystem fsn, CacheManager cacheManager,
CacheDirectiveInfo directive, EnumSet<CacheFlag> flags,
boolean logRetryCache)
throws IOException {
final FSPermissionChecker pc = getFsPermissionChecker(fsn);
if (directive.getId() != null) {
throw new IOException("addDirective: you cannot specify an ID " +
"for this operation.");
}
CacheDirectiveInfo effectiveDirective =
cacheManager.addDirective(directive, pc, flags);
fsn.getEditLog().logAddCacheDirectiveInfo(effectiveDirective,
logRetryCache);
return effectiveDirective;
}
项目:aliyun-oss-hadoop-fs
文件:NameNodeRpcServer.java
@Override // ClientProtocol
public long addCacheDirective(
CacheDirectiveInfo path, EnumSet<CacheFlag> flags) throws IOException {
checkNNStartup();
namesystem.checkOperation(OperationCategory.WRITE);
CacheEntryWithPayload cacheEntry = RetryCache.waitForCompletion
(retryCache, null);
if (cacheEntry != null && cacheEntry.isSuccess()) {
return (Long) cacheEntry.getPayload();
}
boolean success = false;
long ret = 0;
try {
ret = namesystem.addCacheDirective(path, flags, cacheEntry != null);
success = true;
} finally {
RetryCache.setState(cacheEntry, success, ret);
}
return ret;
}
项目:aliyun-oss-hadoop-fs
文件:NameNodeRpcServer.java
@Override // ClientProtocol
public void modifyCacheDirective(
CacheDirectiveInfo directive, EnumSet<CacheFlag> flags) throws IOException {
checkNNStartup();
namesystem.checkOperation(OperationCategory.WRITE);
CacheEntry cacheEntry = RetryCache.waitForCompletion(retryCache);
if (cacheEntry != null && cacheEntry.isSuccess()) {
return;
}
boolean success = false;
try {
namesystem.modifyCacheDirective(directive, flags, cacheEntry != null);
success = true;
} finally {
RetryCache.setState(cacheEntry, success);
}
}
项目:aliyun-oss-hadoop-fs
文件:TestRetryCacheWithHA.java
/**
* Add a list of cache directives, list cache directives,
* switch active NN, and list cache directives again.
*/
@Test (timeout=60000)
public void testListCacheDirectives() throws Exception {
final int poolCount = 7;
HashSet<String> poolNames = new HashSet<String>(poolCount);
Path path = new Path("/p");
for (int i=0; i<poolCount; i++) {
String poolName = "testListCacheDirectives-" + i;
CacheDirectiveInfo directiveInfo =
new CacheDirectiveInfo.Builder().setPool(poolName).setPath(path).build();
dfs.addCachePool(new CachePoolInfo(poolName));
dfs.addCacheDirective(directiveInfo, EnumSet.of(CacheFlag.FORCE));
poolNames.add(poolName);
}
listCacheDirectives(poolNames, 0);
cluster.transitionToStandby(0);
cluster.transitionToActive(1);
cluster.waitActive(1);
listCacheDirectives(poolNames, 1);
}
项目:big-c
文件:FSNDNCacheOp.java
static CacheDirectiveInfo addCacheDirective(
FSNamesystem fsn, CacheManager cacheManager,
CacheDirectiveInfo directive, EnumSet<CacheFlag> flags,
boolean logRetryCache)
throws IOException {
final FSPermissionChecker pc = getFsPermissionChecker(fsn);
if (directive.getId() != null) {
throw new IOException("addDirective: you cannot specify an ID " +
"for this operation.");
}
CacheDirectiveInfo effectiveDirective =
cacheManager.addDirective(directive, pc, flags);
fsn.getEditLog().logAddCacheDirectiveInfo(effectiveDirective,
logRetryCache);
return effectiveDirective;
}
项目:big-c
文件:NameNodeRpcServer.java
@Override // ClientProtocol
public long addCacheDirective(
CacheDirectiveInfo path, EnumSet<CacheFlag> flags) throws IOException {
checkNNStartup();
CacheEntryWithPayload cacheEntry = RetryCache.waitForCompletion
(retryCache, null);
if (cacheEntry != null && cacheEntry.isSuccess()) {
return (Long) cacheEntry.getPayload();
}
boolean success = false;
long ret = 0;
try {
ret = namesystem.addCacheDirective(path, flags, cacheEntry != null);
success = true;
} finally {
RetryCache.setState(cacheEntry, success, ret);
}
return ret;
}
项目:big-c
文件:NameNodeRpcServer.java
@Override // ClientProtocol
public void modifyCacheDirective(
CacheDirectiveInfo directive, EnumSet<CacheFlag> flags) throws IOException {
checkNNStartup();
CacheEntry cacheEntry = RetryCache.waitForCompletion(retryCache);
if (cacheEntry != null && cacheEntry.isSuccess()) {
return;
}
boolean success = false;
try {
namesystem.modifyCacheDirective(directive, flags, cacheEntry != null);
success = true;
} finally {
RetryCache.setState(cacheEntry, success);
}
}
项目:big-c
文件:TestRetryCacheWithHA.java
/**
* Add a list of cache directives, list cache directives,
* switch active NN, and list cache directives again.
*/
@Test (timeout=60000)
public void testListCacheDirectives() throws Exception {
final int poolCount = 7;
HashSet<String> poolNames = new HashSet<String>(poolCount);
Path path = new Path("/p");
for (int i=0; i<poolCount; i++) {
String poolName = "testListCacheDirectives-" + i;
CacheDirectiveInfo directiveInfo =
new CacheDirectiveInfo.Builder().setPool(poolName).setPath(path).build();
dfs.addCachePool(new CachePoolInfo(poolName));
dfs.addCacheDirective(directiveInfo, EnumSet.of(CacheFlag.FORCE));
poolNames.add(poolName);
}
listCacheDirectives(poolNames, 0);
cluster.transitionToStandby(0);
cluster.transitionToActive(1);
cluster.waitActive(1);
listCacheDirectives(poolNames, 1);
}
项目:hadoop-2.6.0-cdh5.4.3
文件:TestRetryCacheWithHA.java
/**
* Add a list of cache directives, list cache directives,
* switch active NN, and list cache directives again.
*/
@Test (timeout=60000)
public void testListCacheDirectives() throws Exception {
final int poolCount = 7;
HashSet<String> poolNames = new HashSet<String>(poolCount);
Path path = new Path("/p");
for (int i=0; i<poolCount; i++) {
String poolName = "testListCacheDirectives-" + i;
CacheDirectiveInfo directiveInfo =
new CacheDirectiveInfo.Builder().setPool(poolName).setPath(path).build();
dfs.addCachePool(new CachePoolInfo(poolName));
dfs.addCacheDirective(directiveInfo, EnumSet.of(CacheFlag.FORCE));
poolNames.add(poolName);
}
listCacheDirectives(poolNames, 0);
cluster.transitionToStandby(0);
cluster.transitionToActive(1);
cluster.waitActive(1);
listCacheDirectives(poolNames, 1);
}
项目:FlexMap
文件:TestRetryCacheWithHA.java
/**
* Add a list of cache directives, list cache directives,
* switch active NN, and list cache directives again.
*/
@Test (timeout=60000)
public void testListCacheDirectives() throws Exception {
final int poolCount = 7;
HashSet<String> poolNames = new HashSet<String>(poolCount);
Path path = new Path("/p");
for (int i=0; i<poolCount; i++) {
String poolName = "testListCacheDirectives-" + i;
CacheDirectiveInfo directiveInfo =
new CacheDirectiveInfo.Builder().setPool(poolName).setPath(path).build();
dfs.addCachePool(new CachePoolInfo(poolName));
dfs.addCacheDirective(directiveInfo, EnumSet.of(CacheFlag.FORCE));
poolNames.add(poolName);
}
listCacheDirectives(poolNames, 0);
cluster.transitionToStandby(0);
cluster.transitionToActive(1);
cluster.waitActive(1);
listCacheDirectives(poolNames, 1);
}
项目:hadoop-on-lustre2
文件:TestRetryCacheWithHA.java
/**
* Add a list of cache directives, list cache directives,
* switch active NN, and list cache directives again.
*/
@Test (timeout=60000)
public void testListCacheDirectives() throws Exception {
final int poolCount = 7;
HashSet<String> poolNames = new HashSet<String>(poolCount);
Path path = new Path("/p");
for (int i=0; i<poolCount; i++) {
String poolName = "testListCacheDirectives-" + i;
CacheDirectiveInfo directiveInfo =
new CacheDirectiveInfo.Builder().setPool(poolName).setPath(path).build();
dfs.addCachePool(new CachePoolInfo(poolName));
dfs.addCacheDirective(directiveInfo, EnumSet.of(CacheFlag.FORCE));
poolNames.add(poolName);
}
listCacheDirectives(poolNames, 0);
cluster.transitionToStandby(0);
cluster.transitionToActive(1);
cluster.waitActive(1);
listCacheDirectives(poolNames, 1);
}
项目:hadoop
文件:DFSClient.java
public long addCacheDirective(
CacheDirectiveInfo info, EnumSet<CacheFlag> flags) throws IOException {
checkOpen();
TraceScope scope = Trace.startSpan("addCacheDirective", traceSampler);
try {
return namenode.addCacheDirective(info, flags);
} catch (RemoteException re) {
throw re.unwrapRemoteException();
} finally {
scope.close();
}
}
项目:hadoop
文件:DFSClient.java
public void modifyCacheDirective(
CacheDirectiveInfo info, EnumSet<CacheFlag> flags) throws IOException {
checkOpen();
TraceScope scope = Trace.startSpan("modifyCacheDirective", traceSampler);
try {
namenode.modifyCacheDirective(info, flags);
} catch (RemoteException re) {
throw re.unwrapRemoteException();
} finally {
scope.close();
}
}
项目:hadoop
文件:DistributedFileSystem.java
/**
* Add a new CacheDirective.
*
* @param info Information about a directive to add.
* @param flags {@link CacheFlag}s to use for this operation.
* @return the ID of the directive that was created.
* @throws IOException if the directive could not be added
*/
public long addCacheDirective(
CacheDirectiveInfo info, EnumSet<CacheFlag> flags) throws IOException {
Preconditions.checkNotNull(info.getPath());
Path path = new Path(getPathName(fixRelativePart(info.getPath()))).
makeQualified(getUri(), getWorkingDirectory());
return dfs.addCacheDirective(
new CacheDirectiveInfo.Builder(info).
setPath(path).
build(),
flags);
}
项目:hadoop
文件:DistributedFileSystem.java
/**
* Modify a CacheDirective.
*
* @param info Information about the directive to modify. You must set the ID
* to indicate which CacheDirective you want to modify.
* @param flags {@link CacheFlag}s to use for this operation.
* @throws IOException if the directive could not be modified
*/
public void modifyCacheDirective(
CacheDirectiveInfo info, EnumSet<CacheFlag> flags) throws IOException {
if (info.getPath() != null) {
info = new CacheDirectiveInfo.Builder(info).
setPath(new Path(getPathName(fixRelativePart(info.getPath()))).
makeQualified(getUri(), getWorkingDirectory())).build();
}
dfs.modifyCacheDirective(info, flags);
}
项目:hadoop
文件:FSNamesystem.java
long addCacheDirective(CacheDirectiveInfo directive,
EnumSet<CacheFlag> flags, boolean logRetryCache)
throws IOException {
checkOperation(OperationCategory.WRITE);
CacheDirectiveInfo effectiveDirective = null;
if (!flags.contains(CacheFlag.FORCE)) {
cacheManager.waitForRescanIfNeeded();
}
writeLock();
try {
checkOperation(OperationCategory.WRITE);
if (isInSafeMode()) {
throw new SafeModeException(
"Cannot add cache directive", safeMode);
}
effectiveDirective = FSNDNCacheOp.addCacheDirective(this, cacheManager,
directive, flags, logRetryCache);
} finally {
writeUnlock();
boolean success = effectiveDirective != null;
if (success) {
getEditLog().logSync();
}
String effectiveDirectiveStr = effectiveDirective != null ?
effectiveDirective.toString() : null;
logAuditEvent(success, "addCacheDirective", effectiveDirectiveStr,
null, null);
}
return effectiveDirective != null ? effectiveDirective.getId() : 0;
}
项目:hadoop
文件:FSNamesystem.java
void modifyCacheDirective(CacheDirectiveInfo directive,
EnumSet<CacheFlag> flags, boolean logRetryCache) throws IOException {
checkOperation(OperationCategory.WRITE);
boolean success = false;
if (!flags.contains(CacheFlag.FORCE)) {
cacheManager.waitForRescanIfNeeded();
}
writeLock();
try {
checkOperation(OperationCategory.WRITE);
if (isInSafeMode()) {
throw new SafeModeException(
"Cannot add cache directive", safeMode);
}
FSNDNCacheOp.modifyCacheDirective(this, cacheManager, directive, flags,
logRetryCache);
success = true;
} finally {
writeUnlock();
if (success) {
getEditLog().logSync();
}
String idStr = "{id: " + directive.getId().toString() + "}";
logAuditEvent(success, "modifyCacheDirective", idStr,
directive.toString(), null);
}
}
项目:hadoop
文件:FSNDNCacheOp.java
static void modifyCacheDirective(
FSNamesystem fsn, CacheManager cacheManager, CacheDirectiveInfo directive,
EnumSet<CacheFlag> flags, boolean logRetryCache) throws IOException {
final FSPermissionChecker pc = getFsPermissionChecker(fsn);
cacheManager.modifyDirective(directive, pc, flags);
fsn.getEditLog().logModifyCacheDirectiveInfo(directive, logRetryCache);
}
项目:hadoop
文件:CacheManager.java
public CacheDirectiveInfo addDirective(
CacheDirectiveInfo info, FSPermissionChecker pc, EnumSet<CacheFlag> flags)
throws IOException {
assert namesystem.hasWriteLock();
CacheDirective directive;
try {
CachePool pool = getCachePool(validatePoolName(info));
checkWritePermission(pc, pool);
String path = validatePath(info);
short replication = validateReplication(info, (short)1);
long expiryTime = validateExpiryTime(info, pool.getMaxRelativeExpiryMs());
// Do quota validation if required
if (!flags.contains(CacheFlag.FORCE)) {
checkLimit(pool, path, replication);
}
// All validation passed
// Add a new entry with the next available ID.
long id = getNextDirectiveId();
directive = new CacheDirective(id, path, replication, expiryTime);
addInternal(directive, pool);
} catch (IOException e) {
LOG.warn("addDirective of " + info + " failed: ", e);
throw e;
}
LOG.info("addDirective of {} successful.", info);
return directive.toInfo();
}
项目:hadoop
文件:PBHelper.java
public static int convertCacheFlags(EnumSet<CacheFlag> flags) {
int value = 0;
if (flags.contains(CacheFlag.FORCE)) {
value |= CacheFlagProto.FORCE.getNumber();
}
return value;
}
项目:hadoop
文件:PBHelper.java
public static EnumSet<CacheFlag> convertCacheFlags(int flags) {
EnumSet<CacheFlag> result = EnumSet.noneOf(CacheFlag.class);
if ((flags & CacheFlagProto.FORCE_VALUE) == CacheFlagProto.FORCE_VALUE) {
result.add(CacheFlag.FORCE);
}
return result;
}
项目:hadoop
文件:ClientNamenodeProtocolTranslatorPB.java
@Override
public long addCacheDirective(CacheDirectiveInfo directive,
EnumSet<CacheFlag> flags) throws IOException {
try {
AddCacheDirectiveRequestProto.Builder builder =
AddCacheDirectiveRequestProto.newBuilder().
setInfo(PBHelper.convert(directive));
if (!flags.isEmpty()) {
builder.setCacheFlags(PBHelper.convertCacheFlags(flags));
}
return rpcProxy.addCacheDirective(null, builder.build()).getId();
} catch (ServiceException e) {
throw ProtobufHelper.getRemoteException(e);
}
}
项目:hadoop
文件:ClientNamenodeProtocolTranslatorPB.java
@Override
public void modifyCacheDirective(CacheDirectiveInfo directive,
EnumSet<CacheFlag> flags) throws IOException {
try {
ModifyCacheDirectiveRequestProto.Builder builder =
ModifyCacheDirectiveRequestProto.newBuilder().
setInfo(PBHelper.convert(directive));
if (!flags.isEmpty()) {
builder.setCacheFlags(PBHelper.convertCacheFlags(flags));
}
rpcProxy.modifyCacheDirective(null, builder.build());
} catch (ServiceException e) {
throw ProtobufHelper.getRemoteException(e);
}
}
项目:hadoop
文件:TestRetryCacheWithHA.java
@Override
void invoke() throws Exception {
client.modifyCacheDirective(
new CacheDirectiveInfo.Builder().
setId(id).
setReplication(newReplication).
build(), EnumSet.of(CacheFlag.FORCE));
}
项目:aliyun-oss-hadoop-fs
文件:DFSClient.java
public long addCacheDirective(
CacheDirectiveInfo info, EnumSet<CacheFlag> flags) throws IOException {
checkOpen();
try (TraceScope ignored = tracer.newScope("addCacheDirective")) {
return namenode.addCacheDirective(info, flags);
} catch (RemoteException re) {
throw re.unwrapRemoteException();
}
}
项目:aliyun-oss-hadoop-fs
文件:DFSClient.java
public void modifyCacheDirective(
CacheDirectiveInfo info, EnumSet<CacheFlag> flags) throws IOException {
checkOpen();
try (TraceScope ignored = tracer.newScope("modifyCacheDirective")) {
namenode.modifyCacheDirective(info, flags);
} catch (RemoteException re) {
throw re.unwrapRemoteException();
}
}
项目:aliyun-oss-hadoop-fs
文件:DistributedFileSystem.java
/**
* Add a new CacheDirective.
*
* @param info Information about a directive to add.
* @param flags {@link CacheFlag}s to use for this operation.
* @return the ID of the directive that was created.
* @throws IOException if the directive could not be added
*/
public long addCacheDirective(
CacheDirectiveInfo info, EnumSet<CacheFlag> flags) throws IOException {
Preconditions.checkNotNull(info.getPath());
Path path = new Path(getPathName(fixRelativePart(info.getPath()))).
makeQualified(getUri(), getWorkingDirectory());
return dfs.addCacheDirective(
new CacheDirectiveInfo.Builder(info).
setPath(path).
build(),
flags);
}
项目:aliyun-oss-hadoop-fs
文件:DistributedFileSystem.java
/**
* Modify a CacheDirective.
*
* @param info Information about the directive to modify. You must set the ID
* to indicate which CacheDirective you want to modify.
* @param flags {@link CacheFlag}s to use for this operation.
* @throws IOException if the directive could not be modified
*/
public void modifyCacheDirective(
CacheDirectiveInfo info, EnumSet<CacheFlag> flags) throws IOException {
if (info.getPath() != null) {
info = new CacheDirectiveInfo.Builder(info).
setPath(new Path(getPathName(fixRelativePart(info.getPath()))).
makeQualified(getUri(), getWorkingDirectory())).build();
}
dfs.modifyCacheDirective(info, flags);
}
项目:aliyun-oss-hadoop-fs
文件:PBHelperClient.java
public static int convertCacheFlags(EnumSet<CacheFlag> flags) {
int value = 0;
if (flags.contains(CacheFlag.FORCE)) {
value |= CacheFlagProto.FORCE.getNumber();
}
return value;
}
项目:aliyun-oss-hadoop-fs
文件:PBHelperClient.java
public static EnumSet<CacheFlag> convertCacheFlags(int flags) {
EnumSet<CacheFlag> result = EnumSet.noneOf(CacheFlag.class);
if ((flags & CacheFlagProto.FORCE_VALUE) == CacheFlagProto.FORCE_VALUE) {
result.add(CacheFlag.FORCE);
}
return result;
}
项目:aliyun-oss-hadoop-fs
文件:ClientNamenodeProtocolTranslatorPB.java
@Override
public long addCacheDirective(CacheDirectiveInfo directive,
EnumSet<CacheFlag> flags) throws IOException {
try {
AddCacheDirectiveRequestProto.Builder builder =
AddCacheDirectiveRequestProto.newBuilder().
setInfo(PBHelperClient.convert(directive));
if (!flags.isEmpty()) {
builder.setCacheFlags(PBHelperClient.convertCacheFlags(flags));
}
return rpcProxy.addCacheDirective(null, builder.build()).getId();
} catch (ServiceException e) {
throw ProtobufHelper.getRemoteException(e);
}
}
项目:aliyun-oss-hadoop-fs
文件:ClientNamenodeProtocolTranslatorPB.java
@Override
public void modifyCacheDirective(CacheDirectiveInfo directive,
EnumSet<CacheFlag> flags) throws IOException {
try {
ModifyCacheDirectiveRequestProto.Builder builder =
ModifyCacheDirectiveRequestProto.newBuilder().
setInfo(PBHelperClient.convert(directive));
if (!flags.isEmpty()) {
builder.setCacheFlags(PBHelperClient.convertCacheFlags(flags));
}
rpcProxy.modifyCacheDirective(null, builder.build());
} catch (ServiceException e) {
throw ProtobufHelper.getRemoteException(e);
}
}
项目:aliyun-oss-hadoop-fs
文件:FSNamesystem.java
long addCacheDirective(CacheDirectiveInfo directive,
EnumSet<CacheFlag> flags, boolean logRetryCache)
throws IOException {
CacheDirectiveInfo effectiveDirective = null;
if (!flags.contains(CacheFlag.FORCE)) {
cacheManager.waitForRescanIfNeeded();
}
writeLock();
try {
checkOperation(OperationCategory.WRITE);
checkNameNodeSafeMode("Cannot add cache directive");
effectiveDirective = FSNDNCacheOp.addCacheDirective(this, cacheManager,
directive, flags, logRetryCache);
} finally {
writeUnlock();
boolean success = effectiveDirective != null;
if (success) {
getEditLog().logSync();
}
String effectiveDirectiveStr = effectiveDirective != null ?
effectiveDirective.toString() : null;
logAuditEvent(success, "addCacheDirective", effectiveDirectiveStr,
null, null);
}
return effectiveDirective != null ? effectiveDirective.getId() : 0;
}
项目:aliyun-oss-hadoop-fs
文件:FSNDNCacheOp.java
static void modifyCacheDirective(
FSNamesystem fsn, CacheManager cacheManager, CacheDirectiveInfo directive,
EnumSet<CacheFlag> flags, boolean logRetryCache) throws IOException {
final FSPermissionChecker pc = getFsPermissionChecker(fsn);
cacheManager.modifyDirective(directive, pc, flags);
fsn.getEditLog().logModifyCacheDirectiveInfo(directive, logRetryCache);
}
项目:aliyun-oss-hadoop-fs
文件:CacheManager.java
public CacheDirectiveInfo addDirective(
CacheDirectiveInfo info, FSPermissionChecker pc, EnumSet<CacheFlag> flags)
throws IOException {
assert namesystem.hasWriteLock();
CacheDirective directive;
try {
CachePool pool = getCachePool(validatePoolName(info));
checkWritePermission(pc, pool);
String path = validatePath(info);
short replication = validateReplication(info, (short)1);
long expiryTime = validateExpiryTime(info, pool.getMaxRelativeExpiryMs());
// Do quota validation if required
if (!flags.contains(CacheFlag.FORCE)) {
checkLimit(pool, path, replication);
}
// All validation passed
// Add a new entry with the next available ID.
long id = getNextDirectiveId();
directive = new CacheDirective(id, path, replication, expiryTime);
addInternal(directive, pool);
} catch (IOException e) {
LOG.warn("addDirective of " + info + " failed: ", e);
throw e;
}
LOG.info("addDirective of {} successful.", info);
return directive.toInfo();
}