Skip to content
This repository has been archived by the owner on Feb 4, 2019. It is now read-only.

Updates to the parallel examples, using the new code. #89

Open
wants to merge 1 commit into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
17 changes: 16 additions & 1 deletion rackspace/pom.xml
Original file line number Diff line number Diff line change
Expand Up @@ -26,13 +26,28 @@
<name>rackspace-examples</name>

<properties>
<jclouds.version>1.9.1</jclouds.version>
<jclouds.version>2.0.0-SNAPSHOT</jclouds.version>
</properties>


<!-- Included in case the examples have to run against a snapshot jclouds version -->
<repositories>
<repository>
<id>apache-snapshots</id>
<url>https://repository.apache.org/content/repositories/snapshots</url>
<releases>
<enabled>false</enabled>
</releases>
<snapshots>
<enabled>true</enabled>
</snapshots>
</repository>
<repository>
<id>sonatype-nexus-snapshots</id>
<url>https://oss.sonatype.org/content/repositories/snapshots</url>
<releases>
<enabled>false</enabled>
</releases>
<snapshots>
<enabled>true</enabled>
</snapshots>
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -26,8 +26,15 @@
import java.io.Closeable;
import java.io.File;
import java.io.IOException;
import java.io.RandomAccessFile;
import java.nio.MappedByteBuffer;
import java.nio.channels.FileChannel;
import java.util.Properties;
import java.util.Random;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.TimeUnit;

import org.jclouds.ContextBuilder;
import org.jclouds.blobstore.BlobStore;
Expand All @@ -36,6 +43,7 @@
import org.jclouds.io.Payloads;
import org.jclouds.openstack.swift.v1.blobstore.RegionScopedBlobStoreContext;

import com.google.common.hash.Hashing;
import com.google.common.io.ByteSource;
import com.google.common.io.Closeables;
import com.google.common.io.Files;
Expand All @@ -51,34 +59,45 @@ public class UploadLargeObject implements Closeable {
*
* The first argument (args[0]) must be your username
* The second argument (args[1]) must be your API key
* The third argument (args[2]) must be the absolute path to a large file
*/
public static void main(String[] args) throws IOException {
UploadLargeObject createContainer = new UploadLargeObject(args[0], args[1]);
File largeFile = new File("largefile.dat");
File downloadedFile = new File(largeFile.getName()+".downloaded");

try {
createContainer.uploadLargeObjectFromFile(new File(args[2]));
// Create a 200MB file for this example
createContainer.createRandomFile(200000000, largeFile);
createContainer.uploadLargeObjectFromFile(largeFile);
createContainer.downloadLargeObjectToFile(largeFile.getName());
System.out.println("Random file hash: " + Files.hash(largeFile, Hashing.md5()));
System.out.println("Downloaded file hash: " + Files.hash(downloadedFile, Hashing.md5()));
}
catch (Exception e) {
e.printStackTrace();
}
finally {
createContainer.cleanup();
createContainer.close();
if(largeFile.exists()) largeFile.delete();
if(downloadedFile.exists()) downloadedFile.delete();
}
}

public UploadLargeObject(String username, String apiKey) {
Properties overrides = new Properties();
// This property controls the number of parts being uploaded in parallel, the default is 4
overrides.setProperty("jclouds.mpu.parallel.degree", "5");
overrides.setProperty("jclouds.user-threads", "14");
// This property controls the size (in bytes) of parts being uploaded in parallel, the default is 33554432 bytes = 32 MB
overrides.setProperty("jclouds.mpu.parts.size", "67108864"); // 64 MB
overrides.setProperty("jclouds.mpu.parts.size", "1100000"); // 1 MB
Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I don't think this does anything at all, though....


RegionScopedBlobStoreContext context = ContextBuilder.newBuilder(PROVIDER)
.credentials(username, apiKey)
.overrides(overrides)
.buildView(RegionScopedBlobStoreContext.class);
blobStore = context.getBlobStore(REGION);

blobStore.createContainerInLocation(null, CONTAINER);
}

/**
Expand All @@ -102,7 +121,24 @@ private void uploadLargeObjectFromFile(File largeFile) throws InterruptedExcepti
// configure the blobstore to use multipart uploading of the file
String eTag = blobStore.putBlob(CONTAINER, blob, multipart());

System.out.format(" Uploaded %s eTag=%s", largeFile.getName(), eTag);
System.out.format(" Uploaded %s eTag=%s to %s in %s%n", largeFile.getName(), eTag, REGION, CONTAINER);
}

/**
* Download a large object from a File using the BlobStore API.
*
* @throws ExecutionException
* @throws InterruptedException
*/
private void downloadLargeObjectToFile(String blobName) throws InterruptedException, ExecutionException {
System.out.format("Download large object to file%n");

blobStore.downloadBlob(CONTAINER, blobName, new File(blobName + ".downloaded"));
}

private void cleanup() {
System.out.format("Cleaning up...%n");
blobStore.clearContainer(CONTAINER);
}

/**
Expand All @@ -111,4 +147,68 @@ private void uploadLargeObjectFromFile(File largeFile) throws InterruptedExcepti
public void close() throws IOException {
Closeables.close(blobStore.getContext(), true);
}

/**
* Helper method; so that we don't have to add a large file to the repo
* @param size File size
* @param file The new random file to generate (will overwrite if it exists)
* @throws IOException
* @throws InterruptedException
*/
private void createRandomFile(long size, File file) throws IOException, InterruptedException {
RandomAccessFile raf = null;

// Reserve space for performance reasons
raf = new RandomAccessFile(file.getAbsoluteFile(), "rw");
raf.seek(size - 1);
raf.write(0);

// Loop through ranges within the file
long from;
long to;
long partSize = 1000000;

ExecutorService threadPool = Executors.newFixedThreadPool(16);

for (from = 0; from < size; from = from + partSize) {
to = (from + partSize >= size) ? size - 1 : from + partSize - 1;
RandomFileWriter writer = new RandomFileWriter(raf, from, to);
threadPool.submit(writer);
}

threadPool.shutdown();
threadPool.awaitTermination(1, TimeUnit.DAYS);

raf.close();
}

/**
* Helper class that runs the random file generation
*/
private final class RandomFileWriter implements Runnable {
private final RandomAccessFile raf;
private final long begin;
private final long end;

RandomFileWriter(RandomAccessFile raf, long begin, long end) {
this.raf = raf;
this.begin = begin;
this.end = end;
}

@Override
public void run() {
try {
byte[] targetArray = new byte[(int) (end - begin + 1)];
Random random = new Random();
random.nextBytes(targetArray);
// Map file region
MappedByteBuffer out = raf.getChannel().map(FileChannel.MapMode.READ_WRITE, begin, end - begin + 1);
out.put(targetArray);
out.force();
} catch (IOException e) {
throw new RuntimeException(e);
}
}
}
}