Java 类org.apache.hadoop.fs.swift.snative.SwiftNativeFileSystem 实例源码
项目:hadoop
文件:TestSwiftFileSystemBasicOps.java
private void delete(SwiftNativeFileSystem fs, Path path) {
try {
if (!fs.delete(path, false)) {
LOG.warn("Failed to delete " + path);
}
} catch (IOException e) {
LOG.warn("deleting " + path, e);
}
}
项目:hadoop
文件:TestSwiftFileSystemBasicOps.java
private void deleteR(SwiftNativeFileSystem fs, Path path) {
try {
if (!fs.delete(path, true)) {
LOG.warn("Failed to delete " + path);
}
} catch (IOException e) {
LOG.warn("deleting " + path, e);
}
}
项目:hadoop
文件:TestSwiftFileSystemPartitionedUploads.java
/**
* Test sticks up a very large partitioned file and verifies that
* it comes back unchanged.
* @throws Throwable
*/
@Test(timeout = SWIFT_BULK_IO_TEST_TIMEOUT)
public void testManyPartitionedFile() throws Throwable {
final Path path = new Path("/test/testManyPartitionedFile");
int len = PART_SIZE_BYTES * 15;
final byte[] src = SwiftTestUtils.dataset(len, 32, 144);
FSDataOutputStream out = fs.create(path,
false,
getBufferSize(),
(short) 1,
BLOCK_SIZE);
out.write(src, 0, src.length);
int expected =
getExpectedPartitionsWritten(len, PART_SIZE_BYTES, true);
out.close();
assertPartitionsWritten("write completed", out, expected);
assertEquals("too few bytes written", len,
SwiftNativeFileSystem.getBytesWritten(out));
assertEquals("too few bytes uploaded", len,
SwiftNativeFileSystem.getBytesUploaded(out));
//now we verify that the data comes back. If it
//doesn't, it means that the ordering of the partitions
//isn't right
byte[] dest = readDataset(fs, path, len);
//compare data
SwiftTestUtils.compareByteArrays(src, dest, len);
//finally, check the data
FileStatus[] stats = fs.listStatus(path);
assertEquals("wrong entry count in "
+ SwiftTestUtils.dumpStats(path.toString(), stats),
expected, stats.length);
}
项目:aliyun-oss-hadoop-fs
文件:TestSwiftFileSystemBasicOps.java
private void delete(SwiftNativeFileSystem fs, Path path) {
try {
if (!fs.delete(path, false)) {
LOG.warn("Failed to delete " + path);
}
} catch (IOException e) {
LOG.warn("deleting " + path, e);
}
}
项目:aliyun-oss-hadoop-fs
文件:TestSwiftFileSystemBasicOps.java
private void deleteR(SwiftNativeFileSystem fs, Path path) {
try {
if (!fs.delete(path, true)) {
LOG.warn("Failed to delete " + path);
}
} catch (IOException e) {
LOG.warn("deleting " + path, e);
}
}
项目:aliyun-oss-hadoop-fs
文件:TestSwiftFileSystemPartitionedUploads.java
/**
* Test sticks up a very large partitioned file and verifies that
* it comes back unchanged.
* @throws Throwable
*/
@Test(timeout = SWIFT_BULK_IO_TEST_TIMEOUT)
public void testManyPartitionedFile() throws Throwable {
final Path path = new Path("/test/testManyPartitionedFile");
int len = PART_SIZE_BYTES * 15;
final byte[] src = SwiftTestUtils.dataset(len, 32, 144);
FSDataOutputStream out = fs.create(path,
false,
getBufferSize(),
(short) 1,
BLOCK_SIZE);
out.write(src, 0, src.length);
int expected =
getExpectedPartitionsWritten(len, PART_SIZE_BYTES, true);
out.close();
assertPartitionsWritten("write completed", out, expected);
assertEquals("too few bytes written", len,
SwiftNativeFileSystem.getBytesWritten(out));
assertEquals("too few bytes uploaded", len,
SwiftNativeFileSystem.getBytesUploaded(out));
//now we verify that the data comes back. If it
//doesn't, it means that the ordering of the partitions
//isn't right
byte[] dest = readDataset(fs, path, len);
//compare data
SwiftTestUtils.compareByteArrays(src, dest, len);
//finally, check the data
FileStatus[] stats = fs.listStatus(path);
assertEquals("wrong entry count in "
+ SwiftTestUtils.dumpStats(path.toString(), stats),
expected, stats.length);
}
项目:big-c
文件:TestSwiftFileSystemBasicOps.java
private void delete(SwiftNativeFileSystem fs, Path path) {
try {
if (!fs.delete(path, false)) {
LOG.warn("Failed to delete " + path);
}
} catch (IOException e) {
LOG.warn("deleting " + path, e);
}
}
项目:big-c
文件:TestSwiftFileSystemBasicOps.java
private void deleteR(SwiftNativeFileSystem fs, Path path) {
try {
if (!fs.delete(path, true)) {
LOG.warn("Failed to delete " + path);
}
} catch (IOException e) {
LOG.warn("deleting " + path, e);
}
}
项目:big-c
文件:TestSwiftFileSystemPartitionedUploads.java
/**
* Test sticks up a very large partitioned file and verifies that
* it comes back unchanged.
* @throws Throwable
*/
@Test(timeout = SWIFT_BULK_IO_TEST_TIMEOUT)
public void testManyPartitionedFile() throws Throwable {
final Path path = new Path("/test/testManyPartitionedFile");
int len = PART_SIZE_BYTES * 15;
final byte[] src = SwiftTestUtils.dataset(len, 32, 144);
FSDataOutputStream out = fs.create(path,
false,
getBufferSize(),
(short) 1,
BLOCK_SIZE);
out.write(src, 0, src.length);
int expected =
getExpectedPartitionsWritten(len, PART_SIZE_BYTES, true);
out.close();
assertPartitionsWritten("write completed", out, expected);
assertEquals("too few bytes written", len,
SwiftNativeFileSystem.getBytesWritten(out));
assertEquals("too few bytes uploaded", len,
SwiftNativeFileSystem.getBytesUploaded(out));
//now we verify that the data comes back. If it
//doesn't, it means that the ordering of the partitions
//isn't right
byte[] dest = readDataset(fs, path, len);
//compare data
SwiftTestUtils.compareByteArrays(src, dest, len);
//finally, check the data
FileStatus[] stats = fs.listStatus(path);
assertEquals("wrong entry count in "
+ SwiftTestUtils.dumpStats(path.toString(), stats),
expected, stats.length);
}
项目:hadoop-2.6.0-cdh5.4.3
文件:TestSwiftFileSystemBasicOps.java
private void delete(SwiftNativeFileSystem fs, Path path) {
try {
if (!fs.delete(path, false)) {
LOG.warn("Failed to delete " + path);
}
} catch (IOException e) {
LOG.warn("deleting " + path, e);
}
}
项目:hadoop-2.6.0-cdh5.4.3
文件:TestSwiftFileSystemBasicOps.java
private void deleteR(SwiftNativeFileSystem fs, Path path) {
try {
if (!fs.delete(path, true)) {
LOG.warn("Failed to delete " + path);
}
} catch (IOException e) {
LOG.warn("deleting " + path, e);
}
}
项目:hadoop-2.6.0-cdh5.4.3
文件:TestSwiftFileSystemPartitionedUploads.java
/**
* Test sticks up a very large partitioned file and verifies that
* it comes back unchanged.
* @throws Throwable
*/
@Test(timeout = SWIFT_BULK_IO_TEST_TIMEOUT)
public void testManyPartitionedFile() throws Throwable {
final Path path = new Path("/test/testManyPartitionedFile");
int len = PART_SIZE_BYTES * 15;
final byte[] src = SwiftTestUtils.dataset(len, 32, 144);
FSDataOutputStream out = fs.create(path,
false,
getBufferSize(),
(short) 1,
BLOCK_SIZE);
out.write(src, 0, src.length);
int expected =
getExpectedPartitionsWritten(len, PART_SIZE_BYTES, true);
out.close();
assertPartitionsWritten("write completed", out, expected);
assertEquals("too few bytes written", len,
SwiftNativeFileSystem.getBytesWritten(out));
assertEquals("too few bytes uploaded", len,
SwiftNativeFileSystem.getBytesUploaded(out));
//now we verify that the data comes back. If it
//doesn't, it means that the ordering of the partitions
//isn't right
byte[] dest = readDataset(fs, path, len);
//compare data
SwiftTestUtils.compareByteArrays(src, dest, len);
//finally, check the data
FileStatus[] stats = fs.listStatus(path);
assertEquals("wrong entry count in "
+ SwiftTestUtils.dumpStats(path.toString(), stats),
expected, stats.length);
}
项目:hops
文件:TestSwiftFileSystemBasicOps.java
private void delete(SwiftNativeFileSystem fs, Path path) {
try {
if (!fs.delete(path, false)) {
LOG.warn("Failed to delete " + path);
}
} catch (IOException e) {
LOG.warn("deleting " + path, e);
}
}
项目:hops
文件:TestSwiftFileSystemBasicOps.java
private void deleteR(SwiftNativeFileSystem fs, Path path) {
try {
if (!fs.delete(path, true)) {
LOG.warn("Failed to delete " + path);
}
} catch (IOException e) {
LOG.warn("deleting " + path, e);
}
}
项目:hops
文件:TestSwiftFileSystemPartitionedUploads.java
/**
* Test sticks up a very large partitioned file and verifies that
* it comes back unchanged.
* @throws Throwable
*/
@Test(timeout = SWIFT_BULK_IO_TEST_TIMEOUT)
public void testManyPartitionedFile() throws Throwable {
final Path path = new Path("/test/testManyPartitionedFile");
int len = PART_SIZE_BYTES * 15;
final byte[] src = SwiftTestUtils.dataset(len, 32, 144);
FSDataOutputStream out = fs.create(path,
false,
getBufferSize(),
(short) 1,
BLOCK_SIZE);
out.write(src, 0, src.length);
int expected =
getExpectedPartitionsWritten(len, PART_SIZE_BYTES, true);
out.close();
assertPartitionsWritten("write completed", out, expected);
assertEquals("too few bytes written", len,
SwiftNativeFileSystem.getBytesWritten(out));
assertEquals("too few bytes uploaded", len,
SwiftNativeFileSystem.getBytesUploaded(out));
//now we verify that the data comes back. If it
//doesn't, it means that the ordering of the partitions
//isn't right
byte[] dest = readDataset(fs, path, len);
//compare data
SwiftTestUtils.compareByteArrays(src, dest, len);
//finally, check the data
FileStatus[] stats = fs.listStatus(path);
assertEquals("wrong entry count in "
+ SwiftTestUtils.dumpStats(path.toString(), stats),
expected, stats.length);
}
项目:hadoop-on-lustre2
文件:TestSwiftFileSystemBasicOps.java
private void delete(SwiftNativeFileSystem fs, Path path) {
try {
if (!fs.delete(path, false)) {
LOG.warn("Failed to delete " + path);
}
} catch (IOException e) {
LOG.warn("deleting " + path, e);
}
}
项目:hadoop-on-lustre2
文件:TestSwiftFileSystemBasicOps.java
private void deleteR(SwiftNativeFileSystem fs, Path path) {
try {
if (!fs.delete(path, true)) {
LOG.warn("Failed to delete " + path);
}
} catch (IOException e) {
LOG.warn("deleting " + path, e);
}
}
项目:hadoop-on-lustre2
文件:TestSwiftFileSystemPartitionedUploads.java
/**
* Test sticks up a very large partitioned file and verifies that
* it comes back unchanged.
* @throws Throwable
*/
@Test(timeout = SWIFT_BULK_IO_TEST_TIMEOUT)
public void testManyPartitionedFile() throws Throwable {
final Path path = new Path("/test/testManyPartitionedFile");
int len = PART_SIZE_BYTES * 15;
final byte[] src = SwiftTestUtils.dataset(len, 32, 144);
FSDataOutputStream out = fs.create(path,
false,
getBufferSize(),
(short) 1,
BLOCK_SIZE);
out.write(src, 0, src.length);
int expected =
getExpectedPartitionsWritten(len, PART_SIZE_BYTES, true);
out.close();
assertPartitionsWritten("write completed", out, expected);
assertEquals("too few bytes written", len,
SwiftNativeFileSystem.getBytesWritten(out));
assertEquals("too few bytes uploaded", len,
SwiftNativeFileSystem.getBytesUploaded(out));
//now we verify that the data comes back. If it
//doesn't, it means that the ordering of the partitions
//isn't right
byte[] dest = readDataset(fs, path, len);
//compare data
SwiftTestUtils.compareByteArrays(src, dest, len);
//finally, check the data
FileStatus[] stats = fs.listStatus(path);
assertEquals("wrong entry count in "
+ SwiftTestUtils.dumpStats(path.toString(), stats),
expected, stats.length);
}
项目:sahara-extra
文件:TestSwiftFileSystemBasicOps.java
private void delete(SwiftNativeFileSystem fs, Path path) {
try {
if (!fs.delete(path, false)) {
LOG.warn("Failed to delete " + path);
}
} catch (IOException e) {
LOG.warn("deleting " + path, e);
}
}
项目:sahara-extra
文件:TestSwiftFileSystemBasicOps.java
private void deleteR(SwiftNativeFileSystem fs, Path path) {
try {
if (!fs.delete(path, true)) {
LOG.warn("Failed to delete " + path);
}
} catch (IOException e) {
LOG.warn("deleting " + path, e);
}
}
项目:sahara-extra
文件:TestSwiftFileSystemPartitionedUploads.java
/**
* Test sticks up a very large partitioned file and verifies that
* it comes back unchanged.
* @throws Throwable
*/
@Test(timeout = SWIFT_BULK_IO_TEST_TIMEOUT)
public void testManyPartitionedFile() throws Throwable {
final Path path = new Path("/test/testManyPartitionedFile");
int len = PART_SIZE_BYTES * 15;
final byte[] src = SwiftTestUtils.dataset(len, 32, 144);
FSDataOutputStream out = fs.create(path,
false,
getBufferSize(),
(short) 1,
BLOCK_SIZE);
out.write(src, 0, src.length);
int expected =
getExpectedPartitionsWritten(len, PART_SIZE_BYTES, true);
out.close();
assertPartitionsWritten("write completed", out, expected);
assertEquals("too few bytes written", len,
SwiftNativeFileSystem.getBytesWritten(out));
assertEquals("too few bytes uploaded", len,
SwiftNativeFileSystem.getBytesUploaded(out));
//now we verify that the data comes back. If it
//doesn't, it means that the ordering of the partitions
//isn't right
byte[] dest = readDataset(fs, path, len);
//compare data
SwiftTestUtils.compareByteArrays(src, dest, len);
//finally, check the data
FileStatus[] stats = getStore().listSegments(fs.getFileStatus(path), true);
assertEquals("wrong entry count in "
+ SwiftTestUtils.dumpStats(path.toString(), stats),
expected, stats.length);
}
项目:sahara-extra
文件:TestSwiftFileSystemPartitionedUploads.java
/**
* Test writes partitioned file writing that path is qualified.
* @throws Throwable
*/
@Test(timeout = SWIFT_BULK_IO_TEST_TIMEOUT)
public void testQualifiedPath() throws Throwable {
final Path path = path("/test/qualifiedPath");
int len = PART_SIZE_BYTES * 4;
final byte[] src = SwiftTestUtils.dataset(len, 32, 144);
FSDataOutputStream out = fs.create(path,
false,
getBufferSize(),
(short) 1,
BLOCK_SIZE);
out.write(src, 0, src.length);
int expected =
getExpectedPartitionsWritten(len, PART_SIZE_BYTES, true);
out.close();
assertPartitionsWritten("write completed", out, expected);
assertEquals("too few bytes written", len,
SwiftNativeFileSystem.getBytesWritten(out));
assertEquals("too few bytes uploaded", len,
SwiftNativeFileSystem.getBytesUploaded(out));
//now we verify that the data comes back. If it
//doesn't, it means that the ordering of the partitions
//isn't right
byte[] dest = readDataset(fs, path, len);
//compare data
SwiftTestUtils.compareByteArrays(src, dest, len);
//finally, check the data
FileStatus[] stats = getStore().listSegments(fs.getFileStatus(path), true);
assertEquals("wrong entry count in "
+ SwiftTestUtils.dumpStats(path.toString(), stats),
expected, stats.length);
}
项目:hadoop
文件:TestSwiftFileSystemExtendedContract.java
@Test(timeout = SWIFT_TEST_TIMEOUT)
public void testGetSchemeImplemented() throws Throwable {
String scheme = fs.getScheme();
assertEquals(SwiftNativeFileSystem.SWIFT,scheme);
}
项目:hadoop
文件:TestSwiftFileSystemContract.java
protected SwiftNativeFileSystem createSwiftFS() throws IOException {
SwiftNativeFileSystem swiftNativeFileSystem =
new SwiftNativeFileSystem();
return swiftNativeFileSystem;
}
项目:hadoop
文件:SwiftFileSystemBaseTest.java
protected SwiftNativeFileSystem createSwiftFS() throws IOException {
SwiftNativeFileSystem swiftNativeFileSystem =
new SwiftNativeFileSystem();
return swiftNativeFileSystem;
}
项目:hadoop
文件:SwiftContract.java
@Override
public String getScheme() {
return SwiftNativeFileSystem.SWIFT;
}
项目:aliyun-oss-hadoop-fs
文件:TestSwiftFileSystemExtendedContract.java
@Test(timeout = SWIFT_TEST_TIMEOUT)
public void testGetSchemeImplemented() throws Throwable {
String scheme = fs.getScheme();
assertEquals(SwiftNativeFileSystem.SWIFT,scheme);
}
项目:aliyun-oss-hadoop-fs
文件:TestSwiftFileSystemContract.java
protected SwiftNativeFileSystem createSwiftFS() throws IOException {
SwiftNativeFileSystem swiftNativeFileSystem =
new SwiftNativeFileSystem();
return swiftNativeFileSystem;
}
项目:aliyun-oss-hadoop-fs
文件:SwiftFileSystemBaseTest.java
protected SwiftNativeFileSystem createSwiftFS() throws IOException {
SwiftNativeFileSystem swiftNativeFileSystem =
new SwiftNativeFileSystem();
return swiftNativeFileSystem;
}
项目:aliyun-oss-hadoop-fs
文件:SwiftContract.java
@Override
public String getScheme() {
return SwiftNativeFileSystem.SWIFT;
}
项目:big-c
文件:TestSwiftFileSystemExtendedContract.java
@Test(timeout = SWIFT_TEST_TIMEOUT)
public void testGetSchemeImplemented() throws Throwable {
String scheme = fs.getScheme();
assertEquals(SwiftNativeFileSystem.SWIFT,scheme);
}
项目:big-c
文件:TestSwiftFileSystemContract.java
protected SwiftNativeFileSystem createSwiftFS() throws IOException {
SwiftNativeFileSystem swiftNativeFileSystem =
new SwiftNativeFileSystem();
return swiftNativeFileSystem;
}
项目:big-c
文件:SwiftFileSystemBaseTest.java
protected SwiftNativeFileSystem createSwiftFS() throws IOException {
SwiftNativeFileSystem swiftNativeFileSystem =
new SwiftNativeFileSystem();
return swiftNativeFileSystem;
}
项目:big-c
文件:SwiftContract.java
@Override
public String getScheme() {
return SwiftNativeFileSystem.SWIFT;
}
项目:hadoop-2.6.0-cdh5.4.3
文件:TestSwiftFileSystemExtendedContract.java
@Test(timeout = SWIFT_TEST_TIMEOUT)
public void testGetSchemeImplemented() throws Throwable {
String scheme = fs.getScheme();
assertEquals(SwiftNativeFileSystem.SWIFT,scheme);
}
项目:hadoop-2.6.0-cdh5.4.3
文件:TestSwiftFileSystemContract.java
protected SwiftNativeFileSystem createSwiftFS() throws IOException {
SwiftNativeFileSystem swiftNativeFileSystem =
new SwiftNativeFileSystem();
return swiftNativeFileSystem;
}
项目:hadoop-2.6.0-cdh5.4.3
文件:SwiftFileSystemBaseTest.java
protected SwiftNativeFileSystem createSwiftFS() throws IOException {
SwiftNativeFileSystem swiftNativeFileSystem =
new SwiftNativeFileSystem();
return swiftNativeFileSystem;
}
项目:hadoop-2.6.0-cdh5.4.3
文件:SwiftContract.java
@Override
public String getScheme() {
return SwiftNativeFileSystem.SWIFT;
}
项目:hops
文件:TestSwiftFileSystemExtendedContract.java
@Test(timeout = SWIFT_TEST_TIMEOUT)
public void testGetSchemeImplemented() throws Throwable {
String scheme = fs.getScheme();
assertEquals(SwiftNativeFileSystem.SWIFT,scheme);
}
项目:hops
文件:TestSwiftFileSystemContract.java
protected SwiftNativeFileSystem createSwiftFS() throws IOException {
SwiftNativeFileSystem swiftNativeFileSystem =
new SwiftNativeFileSystem();
return swiftNativeFileSystem;
}