import software.amazon.awssdk.auth.credentials.ProfileCredentialsProvider;
import software.amazon.awssdk.core.async.AsyncRequestBody;
import software.amazon.awssdk.regions.Region;
import software.amazon.awssdk.services.s3.S3AsyncClient;
import software.amazon.awssdk.services.s3.model.*;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.nio.channels.FileChannel;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.nio.file.StandardOpenOption;
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.CompletableFuture;
public class S3MultipartUploadManual {
private static final long CHUNK_SIZE = 10 * 1024 * 1024; // 10 MB chunk size
public static void main(String[] args) {
String bucketName = "your-bucket-name";
String key = "your-file-key"; // e.g., "documents/myfile.txt"
Path filePath = Paths.get("/path/to/your/local/file.txt");
// Create an S3AsyncClient
S3AsyncClient s3AsyncClient = S3AsyncClient.builder()
.region(Region.US_EAST_1)
.credentialsProvider(ProfileCredentialsProvider.create())
.build();
// Perform the multipart upload
multipartUpload(s3AsyncClient, bucketName, key, filePath);
// Close the client
s3AsyncClient.close();
}
private static void multipartUpload(S3AsyncClient s3AsyncClient, String bucketName, String key, Path filePath) {
try {
// Step 1: Initiate the multipart upload
CreateMultipartUploadRequest createRequest = CreateMultipartUploadRequest.builder()
.bucket(bucketName)
.key(key)
.build();
CreateMultipartUploadResponse createResponse = s3AsyncClient.createMultipartUpload(createRequest).join();
String uploadId = createResponse.uploadId();
// Step 2: Upload parts
List<CompletedPart> completedParts = new ArrayList<>();
long fileSize = Files.size(filePath);
long position = 0;
int partNumber = 1;
while (position < fileSize) {
long chunkSize = Math.min(CHUNK_SIZE, fileSize - position);
// Read the chunk from the file using FileChannel
ByteBuffer chunkBuffer = readChunk(filePath, position, chunkSize);
// Create an AsyncRequestBody for the chunk
AsyncRequestBody requestBody = AsyncRequestBody.fromByteBuffer(chunkBuffer);
// Upload the part
UploadPartRequest uploadPartRequest = UploadPartRequest.builder()
.bucket(bucketName)
.key(key)
.uploadId(uploadId)
.partNumber(partNumber)
.contentLength(chunkSize)
.build();
CompletableFuture<UploadPartResponse> uploadFuture = s3AsyncClient.uploadPart(uploadPartRequest, requestBody);
// Wait for the part to upload and add it to the completed parts list
UploadPartResponse uploadResponse = uploadFuture.join();
completedParts.add(CompletedPart.builder()
.partNumber(partNumber)
.eTag(uploadResponse.eTag())
.build());
position += chunkSize;
partNumber++;
}
// Step 3: Complete the multipart upload
CompletedMultipartUpload completedUpload = CompletedMultipartUpload.builder()
.parts(completedParts)
.build();
CompleteMultipartUploadRequest completeRequest = CompleteMultipartUploadRequest.builder()
.bucket(bucketName)
.key(key)
.uploadId(uploadId)
.multipartUpload(completedUpload)
.build();
s3AsyncClient.completeMultipartUpload(completeRequest).join();
System.out.println("Multipart upload completed successfully!");
} catch (Exception e) {
e.printStackTrace();
}
}
/**
* Reads a chunk of the file using FileChannel.
*
* @param filePath The path to the file.
* @param position The starting position in the file.
* @param chunkSize The size of the chunk to read.
* @return A ByteBuffer containing the chunk data.
* @throws IOException If an I/O error occurs.
*/
private static ByteBuffer readChunk(Path filePath, long position, long chunkSize) throws IOException {
try (FileChannel channel = FileChannel.open(filePath, StandardOpenOption.READ)) {
ByteBuffer buffer = ByteBuffer.allocate((int) chunkSize);
channel.position(position);
channel.read(buffer);
buffer.flip(); // Prepare the buffer for reading
return buffer;
}
}
}
///////////////////////////////////////////////////
import software.amazon.awssdk.auth.credentials.ProfileCredentialsProvider;
import software.amazon.awssdk.core.async.AsyncRequestBody;
import software.amazon.awssdk.http.nio.netty.NettyNioAsyncHttpClient;
import software.amazon.awssdk.regions.Region;
import software.amazon.awssdk.services.s3.S3AsyncClient;
import software.amazon.awssdk.services.s3.model.*;
import javax.net.ssl.SSLException;
import javax.net.ssl.TrustManager;
import javax.net.ssl.X509TrustManager;
import java.io.IOException;
import java.net.URI;
import java.nio.ByteBuffer;
import java.nio.channels.FileChannel;
import java.nio.file.*;
import java.security.cert.X509Certificate;
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.CompletableFuture;
import java.util.stream.Collectors;
public class S3MultipartUploadManual {
private static final long CHUNK_SIZE = 10 * 1024 * 1024; // 10 MB chunk size
public static void main(String[] args) {
String bucketName = "your-bucket-name";
Path folderPath = Paths.get("/path/to/your/local/folder");
// Custom endpoint URL (e.g., for MinIO or another S3-compatible service)
String customEndpointUrl = "https://your-custom-endpoint-url";
// Create an S3AsyncClient with custom endpoint, disabled SSL verification, and concurrency settings
S3AsyncClient s3AsyncClient = S3AsyncClient.builder()
.region(Region.US_EAST_1) // Specify your region
.credentialsProvider(ProfileCredentialsProvider.create()) // Use your AWS credentials
.endpointOverride(URI.create(customEndpointUrl)) // Set custom endpoint URL
.httpClientBuilder(NettyNioAsyncHttpClient.builder()
.maxConcurrency(50) // Set max concurrency
.buildWithDefaults(disableSslVerification())) // Disable SSL verification
.build();
try {
// List all files in the folder
List<Path> filePaths = Files.list(folderPath)
.filter(Files::isRegularFile)
.collect(Collectors.toList());
// Perform the multipart upload for each file
for (Path filePath : filePaths) {
String key = folderPath.relativize(filePath).toString();
multipartUpload(s3AsyncClient, bucketName, key, filePath);
}
} catch (IOException e) {
e.printStackTrace();
}
// Close the client
s3AsyncClient.close();
}
private static void multipartUpload(S3AsyncClient s3AsyncClient, String bucketName, String key, Path filePath) {
try {
// Step 1: Initiate the multipart upload
CreateMultipartUploadRequest createRequest = CreateMultipartUploadRequest.builder()
.bucket(bucketName)
.key(key)
.build();
CreateMultipartUploadResponse createResponse = s3AsyncClient.createMultipartUpload(createRequest).join();
String uploadId = createResponse.uploadId();
// Step 2: Upload parts
List<CompletedPart> completedParts = new ArrayList<>();
long fileSize = Files.size(filePath);
long position = 0;
int partNumber = 1;
while (position < fileSize) {
long chunkSize = Math.min(CHUNK_SIZE, fileSize - position);
// Read the chunk from the file using FileChannel
ByteBuffer chunkBuffer = readChunk(filePath, position, chunkSize);
// Create an AsyncRequestBody for the chunk
AsyncRequestBody requestBody = AsyncRequestBody.fromByteBuffer(chunkBuffer);
// Upload the part
UploadPartRequest uploadPartRequest = UploadPartRequest.builder()
.bucket(bucketName)
.key(key)
.uploadId(uploadId)
.partNumber(partNumber)
.contentLength(chunkSize)
.build();
CompletableFuture<UploadPartResponse> uploadFuture = s3AsyncClient.uploadPart(uploadPartRequest, requestBody);
// Wait for the part to upload and add it to the completed parts list
UploadPartResponse uploadResponse = uploadFuture.join();
completedParts.add(CompletedPart.builder()
.partNumber(partNumber)
.eTag(uploadResponse.eTag())
.build());
position += chunkSize;
partNumber++;
}
// Step 3: Complete the multipart upload
CompletedMultipartUpload completedUpload = CompletedMultipartUpload.builder()
.parts(completedParts)
.build();
CompleteMultipartUploadRequest completeRequest = CompleteMultipartUploadRequest.builder()
.bucket(bucketName)
.key(key)
.uploadId(uploadId)
.multipartUpload(completedUpload)
.build();
s3AsyncClient.completeMultipartUpload(completeRequest).join();
System.out.println("Multipart upload completed successfully for file: " + filePath);
} catch (Exception e) {
e.printStackTrace();
}
}
/**
* Reads a chunk of the file using FileChannel.
*
* @param filePath The path to the file.
* @param position The starting position in the file.
* @param chunkSize The size of the chunk to read.
* @return A ByteBuffer containing the chunk data.
* @throws IOException If an I/O error occurs.
*/
private static ByteBuffer readChunk(Path filePath, long position, long chunkSize) throws IOException {
try (FileChannel channel = FileChannel.open(filePath, StandardOpenOption.READ)) {
ByteBuffer buffer = ByteBuffer.allocate((int) chunkSize);
channel.position(position);
channel.read(buffer);
buffer.flip(); // Prepare the buffer for reading
return buffer;
}
}
/**
* Disables SSL verification by using a trust-all TrustManager.
*
* @return A NettyNioAsyncHttpClient.Builder with SSL verification disabled.
* @throws SSLException If SSL configuration fails.
*/
private static NettyNioAsyncHttpClient.Builder disableSslVerification() throws SSLException {
// Create a trust-all TrustManager
TrustManager[] trustAllCerts = new TrustManager[]{
new X509TrustManager() {
public X509Certificate[] getAcceptedIssuers() {
return new X509Certificate[0];
}
public void checkClientTrusted(X509Certificate[] certs, String authType) {
}
public void checkServerTrusted(X509Certificate[] certs, String authType) {
}
}
};
// Configure the HTTP client to use the trust-all TrustManager
return NettyNioAsyncHttpClient.builder()
.buildWithDefaults(builder -> builder.sslContext(
SslContextBuilder.forClient()
.trustManager(trustAllCerts)
.build()));
}
}