From 8280b29d77bb2fe0de2bc860f66ea699d10a938a Mon Sep 17 00:00:00 2001 From: "Doroszlai, Attila" Date: Thu, 18 Dec 2025 20:37:16 +0100 Subject: [PATCH 1/2] HDDS-14209. Reduce parameter count in ObjectEndpoint --- .../ozone/s3/endpoint/ObjectEndpoint.java | 45 ++-- .../s3/endpoint/TestAbortMultipartUpload.java | 8 +- .../ozone/s3/endpoint/TestListParts.java | 31 +-- .../endpoint/TestMultipartUploadComplete.java | 7 +- .../endpoint/TestMultipartUploadWithCopy.java | 13 +- .../ozone/s3/endpoint/TestObjectDelete.java | 2 +- .../ozone/s3/endpoint/TestObjectGet.java | 22 +- .../ozone/s3/endpoint/TestObjectPut.java | 68 +++--- .../s3/endpoint/TestObjectTaggingDelete.java | 13 +- .../s3/endpoint/TestObjectTaggingGet.java | 11 +- .../s3/endpoint/TestObjectTaggingPut.java | 22 +- .../ozone/s3/endpoint/TestPartUpload.java | 21 +- .../s3/endpoint/TestPartUploadWithStream.java | 13 +- .../s3/endpoint/TestPermissionCheck.java | 15 +- .../s3/endpoint/TestUploadWithStream.java | 4 +- .../s3/metrics/TestS3GatewayMetrics.java | 200 ++++++++---------- 16 files changed, 254 insertions(+), 241 deletions(-) diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java index c6a2b6539098..7316aa2a1b99 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java @@ -168,7 +168,7 @@ public class ObjectEndpoint extends EndpointBase { /*FOR the feature Overriding Response Header https://docs.aws.amazon.com/de_de/AmazonS3/latest/API/API_GetObject.html */ - private Map overrideQueryParameter; + private final Map overrideQueryParameter; private int bufferSize; private int chunkSize; private boolean datastreamEnabled; @@ -209,17 +209,18 @@ public void init() { * See: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectPUT.html for * more details. */ - @SuppressWarnings({"checkstyle:MethodLength", "checkstyle:ParameterNumber"}) + @SuppressWarnings("checkstyle:MethodLength") @PUT public Response put( @PathParam(BUCKET) String bucketName, @PathParam(PATH) String keyPath, @HeaderParam(HttpHeaders.CONTENT_LENGTH) long length, - @QueryParam(QueryParams.PART_NUMBER) int partNumber, - @QueryParam(QueryParams.UPLOAD_ID) @DefaultValue("") String uploadID, - @QueryParam(QueryParams.TAGGING) String taggingMarker, - @QueryParam(QueryParams.ACL) String aclMarker, - final InputStream body) throws IOException, OS3Exception { + @QueryParam(QueryParams.PART_NUMBER) int partNumber, + final InputStream body + ) throws IOException, OS3Exception { + final String aclMarker = getQueryParam(QueryParams.ACL); + final String taggingMarker = getQueryParam(QueryParams.TAGGING); + final String uploadID = getQueryParam(QueryParams.UPLOAD_ID); long startNanos = Time.monotonicNowNanos(); S3GAction s3GAction = S3GAction.CREATE_KEY; boolean auditSuccess = true; @@ -403,17 +404,17 @@ public Response put( * https://docs.aws.amazon.com/AmazonS3/latest/API/mpUploadListParts.html * for more details. */ - @SuppressWarnings({"checkstyle:MethodLength", "checkstyle:ParameterNumber"}) + @SuppressWarnings("checkstyle:MethodLength") @GET public Response get( @PathParam(BUCKET) String bucketName, @PathParam(PATH) String keyPath, @QueryParam(QueryParams.PART_NUMBER) int partNumber, - @QueryParam(QueryParams.UPLOAD_ID) String uploadId, - @QueryParam(QueryParams.MAX_PARTS) @DefaultValue("1000") int maxParts, - @QueryParam(QueryParams.PART_NUMBER_MARKER) String partNumberMarker, - @QueryParam(QueryParams.TAGGING) String taggingMarker) - throws IOException, OS3Exception { + @QueryParam(QueryParams.MAX_PARTS) @DefaultValue("1000") int maxParts + ) throws IOException, OS3Exception { + final String uploadId = getQueryParam(QueryParams.UPLOAD_ID); + final String partNumberMarker = getQueryParam(QueryParams.PART_NUMBER_MARKER); + final String taggingMarker = getQueryParam(QueryParams.TAGGING); long startNanos = Time.monotonicNowNanos(); S3GAction s3GAction = S3GAction.GET_KEY; PerformanceStringBuilder perf = new PerformanceStringBuilder(); @@ -720,10 +721,11 @@ private Response abortMultipartUpload(OzoneVolume volume, String bucket, @SuppressWarnings("emptyblock") public Response delete( @PathParam(BUCKET) String bucketName, - @PathParam(PATH) String keyPath, - @QueryParam(QueryParams.UPLOAD_ID) @DefaultValue("") String uploadId, - @QueryParam(QueryParams.TAGGING) String taggingMarker) throws - IOException, OS3Exception { + @PathParam(PATH) String keyPath + ) throws IOException, OS3Exception { + final String taggingMarker = getQueryParam(QueryParams.TAGGING); + final String uploadId = getQueryParam(QueryParams.UPLOAD_ID); + long startNanos = Time.monotonicNowNanos(); S3GAction s3GAction = S3GAction.DELETE_KEY; @@ -798,8 +800,7 @@ public Response delete( public Response initializeMultipartUpload( @PathParam(BUCKET) String bucket, @PathParam(PATH) String key - ) - throws IOException, OS3Exception { + ) throws IOException, OS3Exception { long startNanos = Time.monotonicNowNanos(); S3GAction s3GAction = S3GAction.INIT_MULTIPART_UPLOAD; @@ -863,9 +864,9 @@ private ReplicationConfig getReplicationConfig(OzoneBucket ozoneBucket, public Response completeMultipartUpload( @PathParam(BUCKET) String bucket, @PathParam(PATH) String key, - @QueryParam(QueryParams.UPLOAD_ID) @DefaultValue("") String uploadID, - CompleteMultipartUploadRequest multipartUploadRequest) - throws IOException, OS3Exception { + CompleteMultipartUploadRequest multipartUploadRequest + ) throws IOException, OS3Exception { + final String uploadID = getQueryParam(QueryParams.UPLOAD_ID); long startNanos = Time.monotonicNowNanos(); S3GAction s3GAction = S3GAction.COMPLETE_MULTIPART_UPLOAD; OzoneVolume volume = getVolume(); diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestAbortMultipartUpload.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestAbortMultipartUpload.java index 9c46a718508f..f775ac69fa49 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestAbortMultipartUpload.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestAbortMultipartUpload.java @@ -30,6 +30,7 @@ import org.apache.hadoop.ozone.client.OzoneClientStub; import org.apache.hadoop.ozone.s3.exception.OS3Exception; import org.apache.hadoop.ozone.s3.exception.S3ErrorTable; +import org.apache.hadoop.ozone.s3.util.S3Consts; import org.junit.jupiter.api.Test; /** @@ -63,15 +64,16 @@ public void testAbortMultipartUpload() throws Exception { assertNotNull(multipartUploadInitiateResponse.getUploadID()); String uploadID = multipartUploadInitiateResponse.getUploadID(); - // Abort multipart upload - response = rest.delete(bucket, key, uploadID, null); + rest.getQueryParameters().putSingle(S3Consts.QueryParams.UPLOAD_ID, uploadID); + response = rest.delete(bucket, key); assertEquals(204, response.getStatus()); // test with unknown upload Id. try { - rest.delete(bucket, key, "random", null); + rest.getQueryParameters().putSingle(S3Consts.QueryParams.UPLOAD_ID, "random"); + rest.delete(bucket, key); } catch (OS3Exception ex) { assertEquals(S3ErrorTable.NO_SUCH_UPLOAD.getCode(), ex.getCode()); assertEquals(S3ErrorTable.NO_SUCH_UPLOAD.getErrorMessage(), diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestListParts.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestListParts.java index 30be715b5305..013b33e5a855 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestListParts.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestListParts.java @@ -35,6 +35,7 @@ import org.apache.hadoop.ozone.client.OzoneClientStub; import org.apache.hadoop.ozone.s3.exception.OS3Exception; import org.apache.hadoop.ozone.s3.exception.S3ErrorTable; +import org.apache.hadoop.ozone.s3.util.S3Consts; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; @@ -44,7 +45,6 @@ public class TestListParts { private ObjectEndpoint rest; - private String uploadID; @BeforeEach public void setUp() throws Exception { @@ -67,8 +67,9 @@ public void setUp() throws Exception { OzoneConsts.KEY); MultipartUploadInitiateResponse multipartUploadInitiateResponse = (MultipartUploadInitiateResponse) response.getEntity(); - assertNotNull(multipartUploadInitiateResponse.getUploadID()); - uploadID = multipartUploadInitiateResponse.getUploadID(); + String uploadID = multipartUploadInitiateResponse.getUploadID(); + assertNotNull(uploadID); + rest.getQueryParameters().putSingle(S3Consts.QueryParams.UPLOAD_ID, uploadID); assertEquals(200, response.getStatus()); @@ -76,25 +77,25 @@ public void setUp() throws Exception { ByteArrayInputStream body = new ByteArrayInputStream(content.getBytes(UTF_8)); response = rest.put(OzoneConsts.S3_BUCKET, OzoneConsts.KEY, - content.length(), 1, uploadID, null, null, body); + content.length(), 1, body); assertNotNull(response.getHeaderString(OzoneConsts.ETAG)); response = rest.put(OzoneConsts.S3_BUCKET, OzoneConsts.KEY, - content.length(), 2, uploadID, null, null, body); + content.length(), 2, body); assertNotNull(response.getHeaderString(OzoneConsts.ETAG)); response = rest.put(OzoneConsts.S3_BUCKET, OzoneConsts.KEY, - content.length(), 3, uploadID, null, null, body); + content.length(), 3, body); assertNotNull(response.getHeaderString(OzoneConsts.ETAG)); } @Test public void testListParts() throws Exception { - Response response = rest.get(OzoneConsts.S3_BUCKET, OzoneConsts.KEY, 0, - uploadID, 3, "0", null); + rest.getQueryParameters().putSingle(S3Consts.QueryParams.PART_NUMBER_MARKER, "0"); + Response response = rest.get(OzoneConsts.S3_BUCKET, OzoneConsts.KEY, 0, 3); ListPartsResponse listPartsResponse = (ListPartsResponse) response.getEntity(); @@ -106,8 +107,8 @@ public void testListParts() throws Exception { @Test public void testListPartsContinuation() throws Exception { - Response response = rest.get(OzoneConsts.S3_BUCKET, OzoneConsts.KEY, 0, - uploadID, 2, "0", null); + rest.getQueryParameters().putSingle(S3Consts.QueryParams.PART_NUMBER_MARKER, "0"); + Response response = rest.get(OzoneConsts.S3_BUCKET, OzoneConsts.KEY, 0, 2); ListPartsResponse listPartsResponse = (ListPartsResponse) response.getEntity(); @@ -115,8 +116,9 @@ public void testListPartsContinuation() throws Exception { assertEquals(2, listPartsResponse.getPartList().size()); // Continue - response = rest.get(OzoneConsts.S3_BUCKET, OzoneConsts.KEY, 0, uploadID, 2, - Integer.toString(listPartsResponse.getNextPartNumberMarker()), null); + rest.getQueryParameters().putSingle(S3Consts.QueryParams.PART_NUMBER_MARKER, + Integer.toString(listPartsResponse.getNextPartNumberMarker())); + response = rest.get(OzoneConsts.S3_BUCKET, OzoneConsts.KEY, 0, 2); listPartsResponse = (ListPartsResponse) response.getEntity(); assertFalse(listPartsResponse.getTruncated()); @@ -126,9 +128,10 @@ public void testListPartsContinuation() throws Exception { @Test public void testListPartsWithUnknownUploadID() throws Exception { + rest.getQueryParameters().putSingle(S3Consts.QueryParams.PART_NUMBER_MARKER, "0"); + rest.getQueryParameters().putSingle(S3Consts.QueryParams.UPLOAD_ID, "no-such-upload"); try { - rest.get(OzoneConsts.S3_BUCKET, OzoneConsts.KEY, 0, - uploadID, 2, "0", null); + rest.get(OzoneConsts.S3_BUCKET, OzoneConsts.KEY, 0, 2); } catch (OS3Exception ex) { assertEquals(S3ErrorTable.NO_SUCH_UPLOAD.getErrorMessage(), ex.getErrorMessage()); diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestMultipartUploadComplete.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestMultipartUploadComplete.java index fde336f48079..b2f4dea063ce 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestMultipartUploadComplete.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestMultipartUploadComplete.java @@ -45,6 +45,7 @@ import org.apache.hadoop.ozone.s3.endpoint.CompleteMultipartUploadRequest.Part; import org.apache.hadoop.ozone.s3.exception.OS3Exception; import org.apache.hadoop.ozone.s3.exception.S3ErrorTable; +import org.apache.hadoop.ozone.s3.util.S3Consts; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; @@ -106,8 +107,9 @@ private Part uploadPart(String key, String uploadID, int partNumber, String content) throws IOException, OS3Exception { ByteArrayInputStream body = new ByteArrayInputStream(content.getBytes(UTF_8)); + rest.getQueryParameters().putSingle(S3Consts.QueryParams.UPLOAD_ID, uploadID); Response response = rest.put(OzoneConsts.S3_BUCKET, key, content.length(), - partNumber, uploadID, null, null, body); + partNumber, body); assertEquals(200, response.getStatus()); assertNotNull(response.getHeaderString(OzoneConsts.ETAG)); Part part = new Part(); @@ -120,8 +122,9 @@ private Part uploadPart(String key, String uploadID, int partNumber, String private void completeMultipartUpload(String key, CompleteMultipartUploadRequest completeMultipartUploadRequest, String uploadID) throws IOException, OS3Exception { + rest.getQueryParameters().putSingle(S3Consts.QueryParams.UPLOAD_ID, uploadID); Response response = rest.completeMultipartUpload(OzoneConsts.S3_BUCKET, key, - uploadID, completeMultipartUploadRequest); + completeMultipartUploadRequest); assertEquals(200, response.getStatus()); diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestMultipartUploadWithCopy.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestMultipartUploadWithCopy.java index 702c32d1abab..0da2e241a5ec 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestMultipartUploadWithCopy.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestMultipartUploadWithCopy.java @@ -53,6 +53,7 @@ import org.apache.hadoop.ozone.s3.endpoint.CompleteMultipartUploadRequest.Part; import org.apache.hadoop.ozone.s3.exception.OS3Exception; import org.apache.hadoop.ozone.s3.exception.S3ErrorTable; +import org.apache.hadoop.ozone.s3.util.S3Consts; import org.apache.hadoop.ozone.web.utils.OzoneUtils; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.Test; @@ -330,8 +331,9 @@ private Part uploadPart(String key, String uploadID, int partNumber, String setHeaders(); ByteArrayInputStream body = new ByteArrayInputStream(content.getBytes(UTF_8)); + endpoint.getQueryParameters().putSingle(S3Consts.QueryParams.UPLOAD_ID, uploadID); Response response = endpoint.put(OzoneConsts.S3_BUCKET, key, content.length(), - partNumber, uploadID, null, null, body); + partNumber, body); assertEquals(200, response.getStatus()); assertNotNull(response.getHeaderString(OzoneConsts.ETAG)); Part part = new Part(); @@ -375,8 +377,9 @@ private Part uploadPartWithCopy(String key, String uploadID, int partNumber, setHeaders(additionalHeaders); ByteArrayInputStream body = new ByteArrayInputStream("".getBytes(UTF_8)); + endpoint.getQueryParameters().putSingle(S3Consts.QueryParams.UPLOAD_ID, uploadID); Response response = endpoint.put(OzoneConsts.S3_BUCKET, key, 0, partNumber, - uploadID, null, null, body); + body); assertEquals(200, response.getStatus()); CopyPartResult result = (CopyPartResult) response.getEntity(); @@ -403,7 +406,8 @@ public void testUploadWithRangeCopyContentLength() OzoneConsts.S3_BUCKET + "/" + EXISTING_KEY); additionalHeaders.put(COPY_SOURCE_HEADER_RANGE, "bytes=0-3"); setHeaders(additionalHeaders); - endpoint.put(OzoneConsts.S3_BUCKET, KEY, 0, 1, uploadID, null, null, body); + endpoint.getQueryParameters().putSingle(S3Consts.QueryParams.UPLOAD_ID, uploadID); + endpoint.put(OzoneConsts.S3_BUCKET, KEY, 0, 1, body); OzoneMultipartUploadPartListParts parts = client.getObjectStore().getS3Bucket(OzoneConsts.S3_BUCKET) .listParts(KEY, uploadID, 0, 100); @@ -415,8 +419,9 @@ private void completeMultipartUpload(String key, CompleteMultipartUploadRequest completeMultipartUploadRequest, String uploadID) throws IOException, OS3Exception { setHeaders(); + endpoint.getQueryParameters().putSingle(S3Consts.QueryParams.UPLOAD_ID, uploadID); Response response = endpoint.completeMultipartUpload(OzoneConsts.S3_BUCKET, key, - uploadID, completeMultipartUploadRequest); + completeMultipartUploadRequest); assertEquals(200, response.getStatus()); diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectDelete.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectDelete.java index 3974cfcf9666..3b382c9bc4f1 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectDelete.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectDelete.java @@ -47,7 +47,7 @@ public void delete() throws IOException, OS3Exception { .build(); //WHEN - rest.delete("b1", "key1", null, null); + rest.delete("b1", "key1"); //THEN assertFalse(bucket.listKeys("").hasNext(), diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectGet.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectGet.java index a9fd7da4200e..f56e3b6abc2c 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectGet.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectGet.java @@ -90,17 +90,17 @@ public void init() throws OS3Exception, IOException { ByteArrayInputStream body = new ByteArrayInputStream(CONTENT.getBytes(UTF_8)); rest.put(BUCKET_NAME, KEY_NAME, CONTENT.length(), - 1, null, null, null, body); + 1, body); // Create a key with object tags when(headers.getHeaderString(TAG_HEADER)).thenReturn("tag1=value1&tag2=value2"); rest.put(BUCKET_NAME, KEY_WITH_TAG, CONTENT.length(), - 1, null, null, null, body); + 1, body); } @Test public void get() throws IOException, OS3Exception { //WHEN - Response response = rest.get(BUCKET_NAME, KEY_NAME, 0, null, 0, null, null); + Response response = rest.get(BUCKET_NAME, KEY_NAME, 0, 0); //THEN OzoneInputStream ozoneInputStream = @@ -122,7 +122,7 @@ public void get() throws IOException, OS3Exception { @Test public void getKeyWithTag() throws IOException, OS3Exception { //WHEN - Response response = rest.get(BUCKET_NAME, KEY_WITH_TAG, 0, null, 0, null, null); + Response response = rest.get(BUCKET_NAME, KEY_WITH_TAG, 0, 0); //THEN OzoneInputStream ozoneInputStream = @@ -144,7 +144,7 @@ public void getKeyWithTag() throws IOException, OS3Exception { public void inheritRequestHeader() throws IOException, OS3Exception { setDefaultHeader(); - Response response = rest.get(BUCKET_NAME, KEY_NAME, 0, null, 0, null, null); + Response response = rest.get(BUCKET_NAME, KEY_NAME, 0, 0); assertEquals(CONTENT_TYPE1, response.getHeaderString("Content-Type")); @@ -174,7 +174,7 @@ public void overrideResponseHeader() throws IOException, OS3Exception { CONTENT_DISPOSITION2); queryParameter.putSingle("response-content-encoding", CONTENT_ENCODING2); - Response response = rest.get(BUCKET_NAME, KEY_NAME, 0, null, 0, null, null); + Response response = rest.get(BUCKET_NAME, KEY_NAME, 0, 0); assertEquals(CONTENT_TYPE2, response.getHeaderString("Content-Type")); @@ -195,13 +195,13 @@ public void getRangeHeader() throws IOException, OS3Exception { Response response; when(headers.getHeaderString(RANGE_HEADER)).thenReturn("bytes=0-0"); - response = rest.get(BUCKET_NAME, KEY_NAME, 0, null, 0, null, null); + response = rest.get(BUCKET_NAME, KEY_NAME, 0, 0); assertEquals("1", response.getHeaderString("Content-Length")); assertEquals(String.format("bytes 0-0/%s", CONTENT.length()), response.getHeaderString("Content-Range")); when(headers.getHeaderString(RANGE_HEADER)).thenReturn("bytes=0-"); - response = rest.get(BUCKET_NAME, KEY_NAME, 0, null, 0, null, null); + response = rest.get(BUCKET_NAME, KEY_NAME, 0, 0); assertEquals(String.valueOf(CONTENT.length()), response.getHeaderString("Content-Length")); assertEquals( @@ -214,7 +214,7 @@ public void getRangeHeader() throws IOException, OS3Exception { @Test public void getStatusCode() throws IOException, OS3Exception { Response response; - response = rest.get(BUCKET_NAME, KEY_NAME, 0, null, 0, null, null); + response = rest.get(BUCKET_NAME, KEY_NAME, 0, 0); assertEquals(response.getStatus(), Response.Status.OK.getStatusCode()); @@ -222,7 +222,7 @@ public void getStatusCode() throws IOException, OS3Exception { // The 206 (Partial Content) status code indicates that the server is // successfully fulfilling a range request for the target resource when(headers.getHeaderString(RANGE_HEADER)).thenReturn("bytes=0-1"); - response = rest.get(BUCKET_NAME, KEY_NAME, 0, null, 0, null, null); + response = rest.get(BUCKET_NAME, KEY_NAME, 0, 0); assertEquals(response.getStatus(), Response.Status.PARTIAL_CONTENT.getStatusCode()); assertNull(response.getHeaderString(TAG_COUNT_HEADER)); @@ -256,7 +256,7 @@ public void testGetWhenKeyIsDirectoryAndDoesNotEndWithASlash() // WHEN final OS3Exception ex = assertThrows(OS3Exception.class, - () -> rest.get(BUCKET_NAME, keyPath, 0, null, 0, null, null)); + () -> rest.get(BUCKET_NAME, keyPath, 0, 0)); // THEN assertEquals(NO_SUCH_KEY.getCode(), ex.getCode()); diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectPut.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectPut.java index e5c34fb4e465..476b91020f2c 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectPut.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectPut.java @@ -158,7 +158,7 @@ void testPutObject(int length, ReplicationConfig replication) throws IOException bucket.setReplicationConfig(replication); //WHEN - Response response = objectEndpoint.put(BUCKET_NAME, KEY_NAME, length, 1, null, null, null, body); + Response response = objectEndpoint.put(BUCKET_NAME, KEY_NAME, length, 1, body); //THEN assertEquals(200, response.getStatus()); @@ -185,7 +185,7 @@ void testPutObjectContentLength() throws IOException, OS3Exception { new ByteArrayInputStream(CONTENT.getBytes(UTF_8)); long dataSize = CONTENT.length(); - objectEndpoint.put(BUCKET_NAME, KEY_NAME, dataSize, 0, null, null, null, body); + objectEndpoint.put(BUCKET_NAME, KEY_NAME, dataSize, 0, body); assertEquals(dataSize, getKeyDataSize()); } @@ -202,8 +202,8 @@ void testPutObjectContentLengthForStreaming() when(headers.getHeaderString(DECODED_CONTENT_LENGTH_HEADER)) .thenReturn("15"); - objectEndpoint.put(BUCKET_NAME, KEY_NAME, chunkedContent.length(), 0, null, null, - null, new ByteArrayInputStream(chunkedContent.getBytes(UTF_8))); + objectEndpoint.put(BUCKET_NAME, KEY_NAME, chunkedContent.length(), 0, + new ByteArrayInputStream(chunkedContent.getBytes(UTF_8))); assertEquals(15, getKeyDataSize()); } @@ -218,7 +218,7 @@ public void testPutObjectWithTags() throws IOException, OS3Exception { objectEndpoint.setHeaders(headersWithTags); Response response = objectEndpoint.put(BUCKET_NAME, KEY_NAME, CONTENT.length(), - 1, null, null, null, body); + 1, body); assertEquals(200, response.getStatus()); @@ -242,7 +242,7 @@ public void testPutObjectWithOnlyTagKey() throws Exception { try { objectEndpoint.put(BUCKET_NAME, KEY_NAME, CONTENT.length(), - 1, null, null, null, body); + 1, body); fail("request with invalid query param should fail"); } catch (OS3Exception ex) { assertEquals(INVALID_TAG.getCode(), ex.getCode()); @@ -261,7 +261,7 @@ public void testPutObjectWithDuplicateTagKey() throws Exception { objectEndpoint.setHeaders(headersWithDuplicateTagKey); try { objectEndpoint.put(BUCKET_NAME, KEY_NAME, CONTENT.length(), - 1, null, null, null, body); + 1, body); fail("request with duplicate tag key should fail"); } catch (OS3Exception ex) { assertEquals(INVALID_TAG.getCode(), ex.getCode()); @@ -281,7 +281,7 @@ public void testPutObjectWithLongTagKey() throws Exception { objectEndpoint.setHeaders(headersWithLongTagKey); try { objectEndpoint.put(BUCKET_NAME, KEY_NAME, CONTENT.length(), - 1, null, null, null, body); + 1, body); fail("request with tag key exceeding the length limit should fail"); } catch (OS3Exception ex) { assertEquals(INVALID_TAG.getCode(), ex.getCode()); @@ -301,7 +301,7 @@ public void testPutObjectWithLongTagValue() throws Exception { when(headersWithLongTagValue.getHeaderString(TAG_HEADER)).thenReturn("tag1=" + longTagValue); try { objectEndpoint.put(BUCKET_NAME, KEY_NAME, CONTENT.length(), - 1, null, null, null, body); + 1, body); fail("request with tag value exceeding the length limit should fail"); } catch (OS3Exception ex) { assertEquals(INVALID_TAG.getCode(), ex.getCode()); @@ -327,7 +327,7 @@ public void testPutObjectWithTooManyTags() throws Exception { objectEndpoint.setHeaders(headersWithTooManyTags); try { objectEndpoint.put(BUCKET_NAME, KEY_NAME, CONTENT.length(), - 1, null, null, null, body); + 1, body); fail("request with number of tags exceeding limit should fail"); } catch (OS3Exception ex) { assertEquals(INVALID_TAG.getCode(), ex.getCode()); @@ -356,7 +356,7 @@ void testPutObjectWithSignedChunks() throws IOException, OS3Exception { //WHEN Response response = objectEndpoint.put(BUCKET_NAME, KEY_NAME, - chunkedContent.length(), 1, null, null, null, + chunkedContent.length(), 1, new ByteArrayInputStream(chunkedContent.getBytes(UTF_8))); //THEN @@ -386,7 +386,7 @@ public void testPutObjectMessageDigestResetDuringException() throws OS3Exception new ByteArrayInputStream(CONTENT.getBytes(UTF_8)); try { objectEndpoint.put(BUCKET_NAME, KEY_NAME, CONTENT - .length(), 1, null, null, null, body); + .length(), 1, body); fail("Should throw IOException"); } catch (IOException ignored) { // Verify that the message digest is reset so that the instance can be reused for the @@ -411,7 +411,7 @@ void testCopyObject() throws IOException, OS3Exception { when(headers.getHeaderString(CUSTOM_METADATA_COPY_DIRECTIVE_HEADER)).thenReturn("COPY"); Response response = objectEndpoint.put(BUCKET_NAME, KEY_NAME, - CONTENT.length(), 1, null, null, null, body); + CONTENT.length(), 1, body); OzoneInputStream ozoneInputStream = clientStub.getObjectStore() .getS3Bucket(BUCKET_NAME) @@ -437,7 +437,7 @@ void testCopyObject() throws IOException, OS3Exception { BUCKET_NAME + "/" + urlEncode(KEY_NAME)); response = objectEndpoint.put(DEST_BUCKET_NAME, DEST_KEY, CONTENT.length(), 1, - null, null, null, body); + body); // Check destination key and response ozoneInputStream = clientStub.getObjectStore().getS3Bucket(DEST_BUCKET_NAME) @@ -467,7 +467,7 @@ void testCopyObject() throws IOException, OS3Exception { metadataHeaders.remove(CUSTOM_METADATA_HEADER_PREFIX + "custom-key-2"); response = objectEndpoint.put(DEST_BUCKET_NAME, DEST_KEY, CONTENT.length(), 1, - null, null, null, body); + body); ozoneInputStream = clientStub.getObjectStore().getS3Bucket(DEST_BUCKET_NAME) .readKey(DEST_KEY); @@ -494,7 +494,7 @@ void testCopyObject() throws IOException, OS3Exception { // wrong copy metadata directive when(headers.getHeaderString(CUSTOM_METADATA_COPY_DIRECTIVE_HEADER)).thenReturn("INVALID"); OS3Exception e = assertThrows(OS3Exception.class, () -> objectEndpoint.put( - DEST_BUCKET_NAME, DEST_KEY, CONTENT.length(), 1, null, null, null, body), + DEST_BUCKET_NAME, DEST_KEY, CONTENT.length(), 1, body), "test copy object failed"); assertThat(e.getHttpCode()).isEqualTo(400); assertThat(e.getCode()).isEqualTo("InvalidArgument"); @@ -504,7 +504,7 @@ void testCopyObject() throws IOException, OS3Exception { // source and dest same e = assertThrows(OS3Exception.class, () -> objectEndpoint.put( - BUCKET_NAME, KEY_NAME, CONTENT.length(), 1, null, null, null, body), + BUCKET_NAME, KEY_NAME, CONTENT.length(), 1, body), "test copy object failed"); assertThat(e.getErrorMessage()).contains("This copy request is illegal"); @@ -512,28 +512,28 @@ void testCopyObject() throws IOException, OS3Exception { when(headers.getHeaderString(COPY_SOURCE_HEADER)).thenReturn( NO_SUCH_BUCKET + "/" + urlEncode(KEY_NAME)); e = assertThrows(OS3Exception.class, () -> objectEndpoint.put(DEST_BUCKET_NAME, - DEST_KEY, CONTENT.length(), 1, null, null, null, body), "test copy object failed"); + DEST_KEY, CONTENT.length(), 1, body), "test copy object failed"); assertThat(e.getCode()).contains("NoSuchBucket"); // dest bucket not found when(headers.getHeaderString(COPY_SOURCE_HEADER)).thenReturn( BUCKET_NAME + "/" + urlEncode(KEY_NAME)); e = assertThrows(OS3Exception.class, () -> objectEndpoint.put(NO_SUCH_BUCKET, - DEST_KEY, CONTENT.length(), 1, null, null, null, body), "test copy object failed"); + DEST_KEY, CONTENT.length(), 1, body), "test copy object failed"); assertThat(e.getCode()).contains("NoSuchBucket"); //Both source and dest bucket not found when(headers.getHeaderString(COPY_SOURCE_HEADER)).thenReturn( NO_SUCH_BUCKET + "/" + urlEncode(KEY_NAME)); e = assertThrows(OS3Exception.class, () -> objectEndpoint.put(NO_SUCH_BUCKET, - DEST_KEY, CONTENT.length(), 1, null, null, null, body), "test copy object failed"); + DEST_KEY, CONTENT.length(), 1, body), "test copy object failed"); assertThat(e.getCode()).contains("NoSuchBucket"); // source key not found when(headers.getHeaderString(COPY_SOURCE_HEADER)).thenReturn( BUCKET_NAME + "/" + urlEncode(NO_SUCH_BUCKET)); e = assertThrows(OS3Exception.class, () -> objectEndpoint.put( - "nonexistent", KEY_NAME, CONTENT.length(), 1, null, null, null, body), + "nonexistent", KEY_NAME, CONTENT.length(), 1, body), "test copy object failed"); assertThat(e.getCode()).contains("NoSuchBucket"); } @@ -545,7 +545,7 @@ public void testCopyObjectMessageDigestResetDuringException() throws IOException new ByteArrayInputStream(CONTENT.getBytes(UTF_8)); Response response = objectEndpoint.put(BUCKET_NAME, KEY_NAME, - CONTENT.length(), 1, null, null, null, body); + CONTENT.length(), 1, body); OzoneInputStream ozoneInputStream = clientStub.getObjectStore() .getS3Bucket(BUCKET_NAME) @@ -573,7 +573,7 @@ public void testCopyObjectMessageDigestResetDuringException() throws IOException try { objectEndpoint.put(DEST_BUCKET_NAME, DEST_KEY, CONTENT.length(), 1, - null, null, null, body); + body); fail("Should throw IOException"); } catch (IOException ignored) { // Verify that the message digest is reset so that the instance can be reused for the @@ -596,7 +596,7 @@ public void testCopyObjectWithTags() throws IOException, OS3Exception { String sourceKeyName = "sourceKey"; Response putResponse = objectEndpoint.put(BUCKET_NAME, sourceKeyName, - CONTENT.length(), 1, null, null, null, body); + CONTENT.length(), 1, body); OzoneKeyDetails keyDetails = clientStub.getObjectStore().getS3Bucket(BUCKET_NAME).getKey(sourceKeyName); @@ -614,7 +614,7 @@ public void testCopyObjectWithTags() throws IOException, OS3Exception { BUCKET_NAME + "/" + urlEncode(sourceKeyName)); objectEndpoint.setHeaders(headersForCopy); - Response copyResponse = objectEndpoint.put(DEST_BUCKET_NAME, destKey, CONTENT.length(), 1, null, null, null, body); + Response copyResponse = objectEndpoint.put(DEST_BUCKET_NAME, destKey, CONTENT.length(), 1, body); OzoneKeyDetails destKeyDetails = clientStub.getObjectStore() .getS3Bucket(DEST_BUCKET_NAME).getKey(destKey); @@ -633,7 +633,7 @@ public void testCopyObjectWithTags() throws IOException, OS3Exception { // With x-amz-tagging-directive = COPY with a different x-amz-tagging when(headersForCopy.getHeaderString(TAG_HEADER)).thenReturn("tag3=value3"); - copyResponse = objectEndpoint.put(DEST_BUCKET_NAME, destKey, CONTENT.length(), 1, null, null, null, body); + copyResponse = objectEndpoint.put(DEST_BUCKET_NAME, destKey, CONTENT.length(), 1, body); assertEquals(200, copyResponse.getStatus()); destKeyDetails = clientStub.getObjectStore() @@ -648,7 +648,7 @@ public void testCopyObjectWithTags() throws IOException, OS3Exception { // Copy object with x-amz-tagging-directive = REPLACE when(headersForCopy.getHeaderString(TAG_DIRECTIVE_HEADER)).thenReturn("REPLACE"); - copyResponse = objectEndpoint.put(DEST_BUCKET_NAME, destKey, CONTENT.length(), 1, null, null, null, body); + copyResponse = objectEndpoint.put(DEST_BUCKET_NAME, destKey, CONTENT.length(), 1, body); assertEquals(200, copyResponse.getStatus()); destKeyDetails = clientStub.getObjectStore() @@ -670,7 +670,7 @@ public void testCopyObjectWithInvalidTagCopyDirective() throws Exception { HttpHeaders headersForCopy = Mockito.mock(HttpHeaders.class); when(headersForCopy.getHeaderString(TAG_DIRECTIVE_HEADER)).thenReturn("INVALID"); try { - objectEndpoint.put(DEST_BUCKET_NAME, "somekey", CONTENT.length(), 1, null, null, null, body); + objectEndpoint.put(DEST_BUCKET_NAME, "somekey", CONTENT.length(), 1, body); } catch (OS3Exception ex) { assertEquals(INVALID_ARGUMENT.getCode(), ex.getCode()); assertThat(ex.getErrorMessage()).contains("The tagging copy directive specified is invalid"); @@ -685,7 +685,7 @@ void testInvalidStorageType() { when(headers.getHeaderString(STORAGE_CLASS_HEADER)).thenReturn("random"); OS3Exception e = assertThrows(OS3Exception.class, () -> objectEndpoint.put( - BUCKET_NAME, KEY_NAME, CONTENT.length(), 1, null, null, null, body)); + BUCKET_NAME, KEY_NAME, CONTENT.length(), 1, body)); assertEquals(S3ErrorTable.INVALID_STORAGE_CLASS.getErrorMessage(), e.getErrorMessage()); assertEquals("random", e.getResource()); @@ -698,7 +698,7 @@ void testEmptyStorageType() throws IOException, OS3Exception { when(headers.getHeaderString(STORAGE_CLASS_HEADER)).thenReturn(""); objectEndpoint.put(BUCKET_NAME, KEY_NAME, CONTENT - .length(), 1, null, null, null, body); + .length(), 1, body); OzoneKeyDetails key = clientStub.getObjectStore().getS3Bucket(BUCKET_NAME) .getKey(KEY_NAME); @@ -717,7 +717,7 @@ void testDirectoryCreation() throws IOException, // WHEN try (Response response = objectEndpoint.put(fsoBucket.getName(), path, - 0L, 0, "", null, null, null)) { + 0L, 0, null)) { assertEquals(HttpStatus.SC_OK, response.getStatus()); } @@ -732,12 +732,12 @@ void testDirectoryCreationOverFile() throws IOException, OS3Exception { final String path = "key"; final ByteArrayInputStream body = new ByteArrayInputStream(CONTENT.getBytes(UTF_8)); - objectEndpoint.put(FSO_BUCKET_NAME, path, CONTENT.length(), 0, "", null, null, body); + objectEndpoint.put(FSO_BUCKET_NAME, path, CONTENT.length(), 0, body); // WHEN final OS3Exception exception = assertThrows(OS3Exception.class, () -> objectEndpoint - .put(FSO_BUCKET_NAME, path + "/", 0, 0, "", null, null, null) + .put(FSO_BUCKET_NAME, path + "/", 0, 0, null) .close()); // THEN @@ -753,7 +753,7 @@ public void testPutEmptyObject() throws IOException, OS3Exception { ByteArrayInputStream body = new ByteArrayInputStream(emptyString.getBytes(UTF_8)); objectEndpoint.setHeaders(headersWithTags); - Response putResponse = objectEndpoint.put(BUCKET_NAME, KEY_NAME, emptyString.length(), 1, null, null, null, body); + Response putResponse = objectEndpoint.put(BUCKET_NAME, KEY_NAME, emptyString.length(), 1, body); assertEquals(200, putResponse.getStatus()); OzoneKeyDetails keyDetails = clientStub.getObjectStore().getS3Bucket(BUCKET_NAME).getKey(KEY_NAME); assertEquals(0, keyDetails.getDataSize()); diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectTaggingDelete.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectTaggingDelete.java index 488474e30390..8b292ed1db7b 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectTaggingDelete.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectTaggingDelete.java @@ -47,6 +47,7 @@ import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes; import org.apache.hadoop.ozone.s3.exception.OS3Exception; +import org.apache.hadoop.ozone.s3.util.S3Consts; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.mockito.Mockito; @@ -82,12 +83,13 @@ public void init() throws OS3Exception, IOException { Mockito.when(headers.getHeaderString(X_AMZ_CONTENT_SHA256)) .thenReturn("mockSignature"); rest.put(BUCKET_NAME, KEY_WITH_TAG, CONTENT.length(), - 1, null, null, null, body); + 1, body); + rest.getQueryParameters().putSingle(S3Consts.QueryParams.TAGGING, ""); } @Test public void testDeleteTagging() throws IOException, OS3Exception { - Response response = rest.delete(BUCKET_NAME, KEY_WITH_TAG, null, ""); + Response response = rest.delete(BUCKET_NAME, KEY_WITH_TAG); assertEquals(HTTP_NO_CONTENT, response.getStatus()); assertTrue(client.getObjectStore().getS3Bucket(BUCKET_NAME) @@ -97,7 +99,7 @@ public void testDeleteTagging() throws IOException, OS3Exception { @Test public void testDeleteTaggingNoKeyFound() throws Exception { try { - rest.delete(BUCKET_NAME, "nonexistent", null, ""); + rest.delete(BUCKET_NAME, "nonexistent"); fail("Expected an OS3Exception to be thrown"); } catch (OS3Exception ex) { assertEquals(HTTP_NOT_FOUND, ex.getHttpCode()); @@ -108,7 +110,7 @@ public void testDeleteTaggingNoKeyFound() throws Exception { @Test public void testDeleteTaggingNoBucketFound() throws Exception { try { - rest.delete("nonexistent", "nonexistent", null, ""); + rest.delete("nonexistent", "nonexistent"); fail("Expected an OS3Exception to be thrown"); } catch (OS3Exception ex) { assertEquals(HTTP_NOT_FOUND, ex.getHttpCode()); @@ -135,7 +137,8 @@ public void testDeleteObjectTaggingNotImplemented() throws Exception { ResultCodes.NOT_SUPPORTED_OPERATION)).when(mockBucket).deleteObjectTagging("dir/"); try { - endpoint.delete("fsoBucket", "dir/", null, ""); + endpoint.getQueryParameters().putSingle(S3Consts.QueryParams.TAGGING, ""); + endpoint.delete("fsoBucket", "dir/"); fail("Expected an OS3Exception to be thrown"); } catch (OS3Exception ex) { assertEquals(HTTP_NOT_IMPLEMENTED, ex.getHttpCode()); diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectTaggingGet.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectTaggingGet.java index 1885e7d0cf6f..f7b039e66de9 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectTaggingGet.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectTaggingGet.java @@ -37,6 +37,7 @@ import org.apache.hadoop.ozone.client.OzoneClientStub; import org.apache.hadoop.ozone.s3.endpoint.S3Tagging.Tag; import org.apache.hadoop.ozone.s3.exception.OS3Exception; +import org.apache.hadoop.ozone.s3.util.S3Consts; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.mockito.Mockito; @@ -72,13 +73,15 @@ public void init() throws OS3Exception, IOException { // Create a key with object tags Mockito.when(headers.getHeaderString(TAG_HEADER)).thenReturn("tag1=value1&tag2=value2"); rest.put(BUCKET_NAME, KEY_WITH_TAG, CONTENT.length(), - 1, null, null, null, body); + 1, body); + + rest.getQueryParameters().putSingle(S3Consts.QueryParams.TAGGING, ""); } @Test public void testGetTagging() throws IOException, OS3Exception { //WHEN - Response response = rest.get(BUCKET_NAME, KEY_WITH_TAG, 0, null, 0, null, ""); + Response response = rest.get(BUCKET_NAME, KEY_WITH_TAG, 0, 0); assertEquals(HTTP_OK, response.getStatus()); S3Tagging s3Tagging = (S3Tagging) response.getEntity(); @@ -99,7 +102,7 @@ public void testGetTagging() throws IOException, OS3Exception { @Test public void testGetTaggingNoKeyFound() throws Exception { try { - rest.get(BUCKET_NAME, "nonexistent", 0, null, 0, null, ""); + rest.get(BUCKET_NAME, "nonexistent", 0, 0); fail("Expected an OS3Exception to be thrown"); } catch (OS3Exception ex) { assertEquals(HTTP_NOT_FOUND, ex.getHttpCode()); @@ -110,7 +113,7 @@ public void testGetTaggingNoKeyFound() throws Exception { @Test public void testGetTaggingNoBucketFound() throws Exception { try { - rest.get("nonexistent", "nonexistent", 0, null, 0, null, ""); + rest.get("nonexistent", "nonexistent", 0, 0); fail("Expected an OS3Exception to be thrown"); } catch (OS3Exception ex) { assertEquals(HTTP_NOT_FOUND, ex.getHttpCode()); diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectTaggingPut.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectTaggingPut.java index d1651d6b59c0..9c42dd510d6d 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectTaggingPut.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectTaggingPut.java @@ -90,13 +90,14 @@ void setup() throws IOException, OS3Exception { ByteArrayInputStream body = new ByteArrayInputStream("".getBytes(UTF_8)); - objectEndpoint.put(BUCKET_NAME, KEY_NAME, 0, 1, null, null, null, body); + objectEndpoint.put(BUCKET_NAME, KEY_NAME, 0, 1, body); } @Test public void testPutObjectTaggingWithEmptyBody() throws Exception { try { - objectEndpoint.put(BUCKET_NAME, KEY_NAME, 0, 1, null, "", null, + objectEndpoint.getQueryParameters().putSingle(S3Consts.QueryParams.TAGGING, ""); + objectEndpoint.put(BUCKET_NAME, KEY_NAME, 0, 1, null); fail(); } catch (OS3Exception ex) { @@ -107,8 +108,8 @@ public void testPutObjectTaggingWithEmptyBody() throws Exception { @Test public void testPutValidObjectTagging() throws Exception { - assertEquals(HTTP_OK, objectEndpoint.put(BUCKET_NAME, KEY_NAME, 0, 1, null, - "", null, twoTags()).getStatus()); + objectEndpoint.getQueryParameters().putSingle(S3Consts.QueryParams.TAGGING, ""); + assertEquals(HTTP_OK, objectEndpoint.put(BUCKET_NAME, KEY_NAME, 0, 1, twoTags()).getStatus()); OzoneKeyDetails keyDetails = clientStub.getObjectStore().getS3Bucket(BUCKET_NAME).getKey(KEY_NAME); assertEquals(2, keyDetails.getTags().size()); @@ -129,7 +130,8 @@ public void testPutInvalidObjectTagging() throws Exception { private void testInvalidObjectTagging(Supplier inputStream, int expectedHttpCode, String expectedErrorCode) throws Exception { try { - objectEndpoint.put(BUCKET_NAME, KEY_NAME, 0, 1, null, "", null, + objectEndpoint.getQueryParameters().putSingle(S3Consts.QueryParams.TAGGING, ""); + objectEndpoint.put(BUCKET_NAME, KEY_NAME, 0, 1, inputStream.get()); fail("Expected an OS3Exception to be thrown"); } catch (OS3Exception ex) { @@ -141,8 +143,9 @@ private void testInvalidObjectTagging(Supplier inputStream, @Test public void testPutObjectTaggingNoKeyFound() throws Exception { try { + objectEndpoint.getQueryParameters().putSingle(S3Consts.QueryParams.TAGGING, ""); objectEndpoint.put(BUCKET_NAME, "nonexistent", 0, 1, - null, "", null, twoTags()); + twoTags()); fail("Expected an OS3Exception to be thrown"); } catch (OS3Exception ex) { assertEquals(HTTP_NOT_FOUND, ex.getHttpCode()); @@ -153,8 +156,9 @@ public void testPutObjectTaggingNoKeyFound() throws Exception { @Test public void testPutObjectTaggingNoBucketFound() throws Exception { try { + objectEndpoint.getQueryParameters().putSingle(S3Consts.QueryParams.TAGGING, ""); objectEndpoint.put("nonexistent", "nonexistent", 0, 1, - null, "", null, twoTags()); + twoTags()); fail("Expected an OS3Exception to be thrown"); } catch (OS3Exception ex) { assertEquals(HTTP_NOT_FOUND, ex.getHttpCode()); @@ -185,9 +189,9 @@ public void testPutObjectTaggingNotImplemented() throws Exception { doThrow(new OMException("PutObjectTagging is not currently supported for FSO directory", ResultCodes.NOT_SUPPORTED_OPERATION)).when(mockBucket).putObjectTagging("dir/", twoTagsMap); + endpoint.getQueryParameters().putSingle(S3Consts.QueryParams.TAGGING, ""); try { - endpoint.put("fsoBucket", "dir/", 0, 1, null, "", - null, twoTags()); + endpoint.put("fsoBucket", "dir/", 0, 1, twoTags()); fail("Expected an OS3Exception to be thrown"); } catch (OS3Exception ex) { assertEquals(HTTP_NOT_IMPLEMENTED, ex.getHttpCode()); diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPartUpload.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPartUpload.java index 4981069528a8..57fa0264b509 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPartUpload.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPartUpload.java @@ -50,6 +50,7 @@ import org.apache.hadoop.ozone.client.OzoneClientStub; import org.apache.hadoop.ozone.client.OzoneMultipartUploadPartListParts; import org.apache.hadoop.ozone.s3.exception.OS3Exception; +import org.apache.hadoop.ozone.s3.util.S3Consts; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.mockito.MockedStatic; @@ -96,8 +97,9 @@ public void testPartUpload() throws Exception { String content = "Multipart Upload"; ByteArrayInputStream body = new ByteArrayInputStream(content.getBytes(UTF_8)); + rest.getQueryParameters().putSingle(S3Consts.QueryParams.UPLOAD_ID, uploadID); response = rest.put(OzoneConsts.S3_BUCKET, OzoneConsts.KEY, - content.length(), 1, uploadID, null, null, body); + content.length(), 1, body); assertNotNull(response.getHeaderString(OzoneConsts.ETAG)); @@ -118,8 +120,9 @@ public void testPartUploadWithOverride() throws Exception { String content = "Multipart Upload"; ByteArrayInputStream body = new ByteArrayInputStream(content.getBytes(UTF_8)); + rest.getQueryParameters().putSingle(S3Consts.QueryParams.UPLOAD_ID, uploadID); response = rest.put(OzoneConsts.S3_BUCKET, OzoneConsts.KEY, - content.length(), 1, uploadID, null, null, body); + content.length(), 1, body); assertNotNull(response.getHeaderString(OzoneConsts.ETAG)); @@ -128,7 +131,7 @@ public void testPartUploadWithOverride() throws Exception { // Upload part again with same part Number, the ETag should be changed. content = "Multipart Upload Changed"; response = rest.put(OzoneConsts.S3_BUCKET, OzoneConsts.KEY, - content.length(), 1, uploadID, null, null, body); + content.length(), 1, body); assertNotNull(response.getHeaderString(OzoneConsts.ETAG)); assertNotEquals(eTag, response.getHeaderString(OzoneConsts.ETAG)); @@ -140,8 +143,9 @@ public void testPartUploadWithIncorrectUploadID() throws Exception { String content = "Multipart Upload With Incorrect uploadID"; ByteArrayInputStream body = new ByteArrayInputStream(content.getBytes(UTF_8)); + rest.getQueryParameters().putSingle(S3Consts.QueryParams.UPLOAD_ID, "random"); rest.put(OzoneConsts.S3_BUCKET, OzoneConsts.KEY, content.length(), 1, - "random", null, null, body); + body); }); assertEquals("NoSuchUpload", ex.getCode()); assertEquals(HTTP_NOT_FOUND, ex.getHttpCode()); @@ -176,8 +180,9 @@ public void testPartUploadStreamContentLength() String uploadID = multipartUploadInitiateResponse.getUploadID(); long contentLength = chunkedContent.length(); + objectEndpoint.getQueryParameters().putSingle(S3Consts.QueryParams.UPLOAD_ID, uploadID); objectEndpoint.put(OzoneConsts.S3_BUCKET, keyName, contentLength, 1, - uploadID, null, null, new ByteArrayInputStream(chunkedContent.getBytes(UTF_8))); + new ByteArrayInputStream(chunkedContent.getBytes(UTF_8))); assertContentLength(uploadID, keyName, 15); } @@ -199,8 +204,9 @@ public void testPartUploadContentLength() throws IOException, OS3Exception { ByteArrayInputStream body = new ByteArrayInputStream(content.getBytes(UTF_8)); + rest.getQueryParameters().putSingle(S3Consts.QueryParams.UPLOAD_ID, uploadID); rest.put(OzoneConsts.S3_BUCKET, keyName, - contentLength, 1, uploadID, null, null, body); + contentLength, 1, body); assertContentLength(uploadID, keyName, content.length()); } @@ -244,8 +250,9 @@ public void testPartUploadMessageDigestResetDuringException() throws IOException ByteArrayInputStream body = new ByteArrayInputStream(content.getBytes(UTF_8)); try { + objectEndpoint.getQueryParameters().putSingle(S3Consts.QueryParams.UPLOAD_ID, uploadID); objectEndpoint.put(OzoneConsts.S3_BUCKET, OzoneConsts.KEY, - content.length(), 1, uploadID, null, null, body); + content.length(), 1, body); fail("Should throw IOException"); } catch (IOException ignored) { // Verify that the message digest is reset so that the instance can be reused for the diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPartUploadWithStream.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPartUploadWithStream.java index 736660073d57..1656fa35d438 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPartUploadWithStream.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPartUploadWithStream.java @@ -38,6 +38,7 @@ import org.apache.hadoop.ozone.client.OzoneClient; import org.apache.hadoop.ozone.client.OzoneClientStub; import org.apache.hadoop.ozone.s3.exception.OS3Exception; +import org.apache.hadoop.ozone.s3.util.S3Consts; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; @@ -93,8 +94,9 @@ public void testPartUpload() throws Exception { String content = "Multipart Upload"; ByteArrayInputStream body = new ByteArrayInputStream(content.getBytes(UTF_8)); + rest.getQueryParameters().putSingle(S3Consts.QueryParams.UPLOAD_ID, uploadID); response = rest.put(S3BUCKET, S3KEY, - content.length(), 1, uploadID, null, null, body); + content.length(), 1, body); assertNotNull(response.getHeaderString(OzoneConsts.ETAG)); @@ -114,8 +116,9 @@ public void testPartUploadWithOverride() throws Exception { String content = "Multipart Upload"; ByteArrayInputStream body = new ByteArrayInputStream(content.getBytes(UTF_8)); + rest.getQueryParameters().putSingle(S3Consts.QueryParams.UPLOAD_ID, uploadID); response = rest.put(S3BUCKET, S3KEY, - content.length(), 1, uploadID, null, null, body); + content.length(), 1, body); assertNotNull(response.getHeaderString(OzoneConsts.ETAG)); @@ -123,8 +126,9 @@ public void testPartUploadWithOverride() throws Exception { // Upload part again with same part Number, the ETag should be changed. content = "Multipart Upload Changed"; + rest.getQueryParameters().putSingle(S3Consts.QueryParams.UPLOAD_ID, uploadID); response = rest.put(S3BUCKET, S3KEY, - content.length(), 1, uploadID, null, null, body); + content.length(), 1, body); assertNotNull(response.getHeaderString(OzoneConsts.ETAG)); assertNotEquals(eTag, response.getHeaderString(OzoneConsts.ETAG)); @@ -136,8 +140,9 @@ public void testPartUploadWithIncorrectUploadID() throws Exception { String content = "Multipart Upload With Incorrect uploadID"; ByteArrayInputStream body = new ByteArrayInputStream(content.getBytes(UTF_8)); + rest.getQueryParameters().putSingle(S3Consts.QueryParams.UPLOAD_ID, "random"); rest.put(S3BUCKET, S3KEY, content.length(), 1, - "random", null, null, body); + body); }); assertEquals("NoSuchUpload", ex.getCode()); assertEquals(HTTP_NOT_FOUND, ex.getHttpCode()); diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPermissionCheck.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPermissionCheck.java index 9872a711c639..8bf809c0a57e 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPermissionCheck.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPermissionCheck.java @@ -261,8 +261,9 @@ public void testGetKey() throws IOException { .setConfig(conf) .build(); + objectEndpoint.getQueryParameters().putSingle(S3Consts.QueryParams.PART_NUMBER_MARKER, "marker"); OS3Exception e = assertThrows(OS3Exception.class, () -> objectEndpoint.get( - "bucketName", "keyPath", 0, null, 1000, "marker", null)); + "bucketName", "keyPath", 0, 1000)); assertEquals(HTTP_FORBIDDEN, e.getHttpCode()); } @@ -279,7 +280,7 @@ public void testPutKey() throws IOException { .build(); OS3Exception e = assertThrows(OS3Exception.class, () -> objectEndpoint.put( - "bucketName", "keyPath", 1024, 0, null, null, null, + "bucketName", "keyPath", 1024, 0, new ByteArrayInputStream(new byte[]{}))); assertEquals(HTTP_FORBIDDEN, e.getHttpCode()); } @@ -297,7 +298,7 @@ public void testDeleteKey() throws IOException { .build(); OS3Exception e = assertThrows(OS3Exception.class, () -> - objectEndpoint.delete("bucketName", "keyPath", null, null)); + objectEndpoint.delete("bucketName", "keyPath")); assertEquals(HTTP_FORBIDDEN, e.getHttpCode()); } @@ -341,18 +342,18 @@ public void testObjectTagging() throws Exception { InputStream tagInput = new ByteArrayInputStream(xml.getBytes(UTF_8)); + objectEndpoint.getQueryParameters().putSingle(S3Consts.QueryParams.TAGGING, ""); OS3Exception e = assertThrows(OS3Exception.class, () -> objectEndpoint.put("bucketName", "keyPath", 0, 1, - null, "", null, tagInput)); + tagInput)); assertEquals(HTTP_FORBIDDEN, e.getHttpCode()); e = assertThrows(OS3Exception.class, () -> - objectEndpoint.delete("bucketName", "keyPath", "", "")); + objectEndpoint.delete("bucketName", "keyPath")); assertEquals(HTTP_FORBIDDEN, e.getHttpCode()); e = assertThrows(OS3Exception.class, () -> - objectEndpoint.get("bucketName", "keyPath", 0, null, - 0, null, "")); + objectEndpoint.get("bucketName", "keyPath", 0, 0)); assertEquals(HTTP_FORBIDDEN, e.getHttpCode()); } } diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestUploadWithStream.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestUploadWithStream.java index dbe21601dbd3..4586d477f734 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestUploadWithStream.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestUploadWithStream.java @@ -92,7 +92,7 @@ public void testUpload() throws Exception { byte[] keyContent = S3_COPY_EXISTING_KEY_CONTENT.getBytes(UTF_8); ByteArrayInputStream body = new ByteArrayInputStream(keyContent); - Response response = rest.put(S3BUCKET, S3KEY, 0, 0, null, null, null, body); + Response response = rest.put(S3BUCKET, S3KEY, 0, 0, body); assertEquals(200, response.getStatus()); } @@ -126,7 +126,7 @@ public void testUploadWithCopy() throws Exception { .forEach((k, v) -> when(headers.getHeaderString(k)).thenReturn(v)); rest.setHeaders(headers); - Response response = rest.put(S3BUCKET, S3KEY, 0, 0, null, null, null, null); + Response response = rest.put(S3BUCKET, S3KEY, 0, 0, null); assertEquals(200, response.getStatus()); diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/metrics/TestS3GatewayMetrics.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/metrics/TestS3GatewayMetrics.java index 018ad0f1f5e2..39baae58584b 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/metrics/TestS3GatewayMetrics.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/metrics/TestS3GatewayMetrics.java @@ -20,6 +20,8 @@ import static java.net.HttpURLConnection.HTTP_CONFLICT; import static java.net.HttpURLConnection.HTTP_OK; import static java.nio.charset.StandardCharsets.UTF_8; +import static org.apache.hadoop.ozone.OzoneConsts.BUCKET; +import static org.apache.hadoop.ozone.OzoneConsts.KEY; import static org.apache.hadoop.ozone.s3.exception.S3ErrorTable.BUCKET_ALREADY_EXISTS; import static org.apache.hadoop.ozone.s3.util.S3Consts.COPY_SOURCE_HEADER; import static org.apache.hadoop.ozone.s3.util.S3Consts.STORAGE_CLASS_HEADER; @@ -36,10 +38,10 @@ import java.io.ByteArrayOutputStream; import java.io.IOException; import java.io.InputStream; +import java.nio.charset.StandardCharsets; import javax.ws.rs.core.HttpHeaders; import javax.ws.rs.core.Response; import javax.ws.rs.core.StreamingOutput; -import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.client.OzoneBucket; import org.apache.hadoop.ozone.client.OzoneClient; import org.apache.hadoop.ozone.client.OzoneClientStub; @@ -62,8 +64,6 @@ */ public class TestS3GatewayMetrics { - private String bucketName = OzoneConsts.BUCKET; - private String keyName = OzoneConsts.KEY; private OzoneClient clientStub; private BucketEndpoint bucketEndpoint; private RootEndpoint rootEndpoint; @@ -72,13 +72,15 @@ public class TestS3GatewayMetrics { private HttpHeaders headers; private static final String ACL_MARKER = "acl"; private static final String CONTENT = "0123456789"; + private static final int LENGTH = CONTENT.length(); + private static final byte[] BYTES = CONTENT.getBytes(StandardCharsets.UTF_8); private S3GatewayMetrics metrics; @BeforeEach public void setup() throws Exception { clientStub = new OzoneClientStub(); - clientStub.getObjectStore().createS3Bucket(bucketName); - bucket = clientStub.getObjectStore().getS3Bucket(bucketName); + clientStub.getObjectStore().createS3Bucket(BUCKET); + bucket = clientStub.getObjectStore().getS3Bucket(BUCKET); bucket.createKey("file1", 0).close(); headers = mock(HttpHeaders.class); @@ -113,7 +115,7 @@ public void testHeadBucketSuccess() throws Exception { long oriMetric = metrics.getHeadBucketSuccess(); - bucketEndpoint.head(bucketName); + bucketEndpoint.head(BUCKET); long curMetric = metrics.getHeadBucketSuccess(); assertEquals(1L, curMetric - oriMetric); @@ -134,7 +136,7 @@ public void testListBucketSuccess() throws Exception { public void testGetBucketSuccess() throws Exception { long oriMetric = metrics.getGetBucketSuccess(); - bucketEndpoint.get(bucketName, 1000, 0).getEntity(); + bucketEndpoint.get(BUCKET, 1000, 0).getEntity(); long curMetric = metrics.getGetBucketSuccess(); assertEquals(1L, curMetric - oriMetric); @@ -169,7 +171,7 @@ public void testCreateBucketFailure() throws Exception { // Creating an error by trying to create a bucket that already exists OS3Exception e = assertThrows(OS3Exception.class, () -> bucketEndpoint.put( - bucketName, null)); + BUCKET, null)); assertEquals(HTTP_CONFLICT, e.getHttpCode()); assertEquals(BUCKET_ALREADY_EXISTS.getCode(), e.getCode()); @@ -181,7 +183,7 @@ public void testCreateBucketFailure() throws Exception { public void testDeleteBucketSuccess() throws Exception { long oriMetric = metrics.getDeleteBucketSuccess(); - bucketEndpoint.delete(bucketName); + bucketEndpoint.delete(BUCKET); long curMetric = metrics.getDeleteBucketSuccess(); assertEquals(1L, curMetric - oriMetric); @@ -190,11 +192,11 @@ public void testDeleteBucketSuccess() throws Exception { @Test public void testDeleteBucketFailure() throws Exception { long oriMetric = metrics.getDeleteBucketFailure(); - bucketEndpoint.delete(bucketName); + bucketEndpoint.delete(BUCKET); // Deleting a bucket that does not exist will result in delete failure OS3Exception e = assertThrows(OS3Exception.class, () -> - bucketEndpoint.delete(bucketName)); + bucketEndpoint.delete(BUCKET)); assertEquals(S3ErrorTable.NO_SUCH_BUCKET.getCode(), e.getCode()); assertEquals(S3ErrorTable.NO_SUCH_BUCKET.getErrorMessage(), e.getErrorMessage()); @@ -209,7 +211,7 @@ public void testGetAclSuccess() throws Exception { bucketEndpoint.getQueryParameters().add(QueryParams.ACL, ACL_MARKER); Response response = - bucketEndpoint.get(bucketName, 0, 0); + bucketEndpoint.get(BUCKET, 0, 0); long curMetric = metrics.getGetAclSuccess(); assertEquals(HTTP_OK, response.getStatus()); assertEquals(1L, curMetric - oriMetric); @@ -268,11 +270,11 @@ public void testPutAclFailure() throws Exception { @Test public void testHeadKeySuccess() throws Exception { - bucket.createKey(keyName, 0).close(); + bucket.createKey(KEY, 0).close(); long oriMetric = metrics.getHeadKeySuccess(); - keyEndpoint.head(bucketName, keyName); + keyEndpoint.head(BUCKET, KEY); long curMetric = metrics.getHeadKeySuccess(); assertEquals(1L, curMetric - oriMetric); @@ -282,7 +284,7 @@ public void testHeadKeySuccess() throws Exception { public void testHeadKeyFailure() throws Exception { long oriMetric = metrics.getHeadKeyFailure(); - keyEndpoint.head(bucketName, "unknownKey"); + keyEndpoint.head(BUCKET, "unknownKey"); long curMetric = metrics.getHeadKeyFailure(); assertEquals(1L, curMetric - oriMetric); @@ -292,13 +294,7 @@ public void testHeadKeyFailure() throws Exception { public void testCreateKeySuccess() throws Exception { long oriMetric = metrics.getCreateKeySuccess(); - // Create an input stream - ByteArrayInputStream body = - new ByteArrayInputStream(CONTENT.getBytes(UTF_8)); - // Create the file - keyEndpoint.put(bucketName, keyName, CONTENT - .length(), 1, null, null, null, body); - body.close(); + putObject(BUCKET, KEY); long curMetric = metrics.getCreateKeySuccess(); assertEquals(1L, curMetric - oriMetric); } @@ -308,9 +304,8 @@ public void testCreateKeyFailure() throws Exception { long oriMetric = metrics.getCreateKeyFailure(); // Create the file in a bucket that does not exist - OS3Exception e = assertThrows(OS3Exception.class, () -> keyEndpoint.put( - "unknownBucket", keyName, CONTENT.length(), 1, null, null, - null, null)); + OS3Exception e = assertThrows(OS3Exception.class, + () -> putObject("unknownBucket", KEY)); assertEquals(S3ErrorTable.NO_SUCH_BUCKET.getCode(), e.getCode()); long curMetric = metrics.getCreateKeyFailure(); assertEquals(1L, curMetric - oriMetric); @@ -320,8 +315,8 @@ public void testCreateKeyFailure() throws Exception { public void testDeleteKeySuccess() throws Exception { long oriMetric = metrics.getDeleteKeySuccess(); - bucket.createKey(keyName, 0).close(); - keyEndpoint.delete(bucketName, keyName, null, null); + bucket.createKey(KEY, 0).close(); + keyEndpoint.delete(BUCKET, KEY); long curMetric = metrics.getDeleteKeySuccess(); assertEquals(1L, curMetric - oriMetric); } @@ -329,8 +324,8 @@ public void testDeleteKeySuccess() throws Exception { @Test public void testDeleteKeyFailure() throws Exception { long oriMetric = metrics.getDeleteKeyFailure(); - OS3Exception e = assertThrows(OS3Exception.class, () -> keyEndpoint.delete( - "unknownBucket", keyName, null, null)); + OS3Exception e = assertThrows(OS3Exception.class, + () -> keyEndpoint.delete("unknownBucket", KEY)); assertEquals(S3ErrorTable.NO_SUCH_BUCKET.getCode(), e.getCode()); long curMetric = metrics.getDeleteKeyFailure(); assertEquals(1L, curMetric - oriMetric); @@ -339,15 +334,10 @@ public void testDeleteKeyFailure() throws Exception { @Test public void testGetKeySuccess() throws Exception { long oriMetric = metrics.getGetKeySuccess(); + putObject(BUCKET, KEY); - // Create an input stream - ByteArrayInputStream body = - new ByteArrayInputStream(CONTENT.getBytes(UTF_8)); - // Create the file - keyEndpoint.put(bucketName, keyName, CONTENT - .length(), 1, null, null, null, body); // GET the key from the bucket - Response response = keyEndpoint.get(bucketName, keyName, 0, null, 0, null, null); + Response response = keyEndpoint.get(BUCKET, KEY, 0, 0); StreamingOutput stream = (StreamingOutput) response.getEntity(); stream.write(new ByteArrayOutputStream()); long curMetric = metrics.getGetKeySuccess(); @@ -359,8 +349,8 @@ public void testGetKeyFailure() throws Exception { long oriMetric = metrics.getGetKeyFailure(); // Fetching a non-existent key - OS3Exception e = assertThrows(OS3Exception.class, () -> keyEndpoint.get( - bucketName, "unknownKey", 0, null, 0, null, null)); + OS3Exception e = assertThrows(OS3Exception.class, + () -> keyEndpoint.get(BUCKET, "unknownKey", 0, 0)); assertEquals(S3ErrorTable.NO_SUCH_KEY.getCode(), e.getCode()); long curMetric = metrics.getGetKeyFailure(); assertEquals(1L, curMetric - oriMetric); @@ -368,9 +358,8 @@ public void testGetKeyFailure() throws Exception { @Test public void testInitMultiPartUploadSuccess() throws Exception { - long oriMetric = metrics.getInitMultiPartUploadSuccess(); - keyEndpoint.initializeMultipartUpload(bucketName, keyName); + keyEndpoint.initializeMultipartUpload(BUCKET, KEY); long curMetric = metrics.getInitMultiPartUploadSuccess(); assertEquals(1L, curMetric - oriMetric); } @@ -378,8 +367,8 @@ public void testInitMultiPartUploadSuccess() throws Exception { @Test public void testInitMultiPartUploadFailure() throws Exception { long oriMetric = metrics.getInitMultiPartUploadFailure(); - OS3Exception e = assertThrows(OS3Exception.class, () -> keyEndpoint - .initializeMultipartUpload("unknownBucket", keyName)); + OS3Exception e = assertThrows(OS3Exception.class, + () -> keyEndpoint.initializeMultipartUpload("unknownBucket", KEY)); assertEquals(S3ErrorTable.NO_SUCH_BUCKET.getCode(), e.getCode()); long curMetric = metrics.getInitMultiPartUploadFailure(); assertEquals(1L, curMetric - oriMetric); @@ -389,12 +378,13 @@ public void testInitMultiPartUploadFailure() throws Exception { public void testAbortMultiPartUploadSuccess() throws Exception { // Initiate the Upload and fetch the upload ID - String uploadID = initiateMultipartUpload(bucketName, keyName); + String uploadID = initiateMultipartUpload(BUCKET, KEY); long oriMetric = metrics.getAbortMultiPartUploadSuccess(); // Abort the Upload Successfully by deleting the key using the Upload-Id - keyEndpoint.delete(bucketName, keyName, uploadID, null); + keyEndpoint.getQueryParameters().putSingle(S3Consts.QueryParams.UPLOAD_ID, uploadID); + keyEndpoint.delete(BUCKET, KEY); long curMetric = metrics.getAbortMultiPartUploadSuccess(); assertEquals(1L, curMetric - oriMetric); @@ -405,8 +395,9 @@ public void testAbortMultiPartUploadFailure() throws Exception { long oriMetric = metrics.getAbortMultiPartUploadFailure(); // Fail the Abort Method by providing wrong uploadID - OS3Exception e = assertThrows(OS3Exception.class, () -> keyEndpoint.delete( - bucketName, keyName, "wrongId", null)); + keyEndpoint.getQueryParameters().putSingle(S3Consts.QueryParams.UPLOAD_ID, "wrongId"); + OS3Exception e = assertThrows(OS3Exception.class, + () -> keyEndpoint.delete(BUCKET, KEY)); assertEquals(S3ErrorTable.NO_SUCH_UPLOAD.getCode(), e.getCode()); long curMetric = metrics.getAbortMultiPartUploadFailure(); assertEquals(1L, curMetric - oriMetric); @@ -416,14 +407,13 @@ public void testAbortMultiPartUploadFailure() throws Exception { public void testCompleteMultiPartUploadSuccess() throws Exception { // Initiate the Upload and fetch the upload ID - String uploadID = initiateMultipartUpload(bucketName, keyName); + String uploadID = initiateMultipartUpload(BUCKET, KEY); long oriMetric = metrics.getCompleteMultiPartUploadSuccess(); // complete multipart upload - CompleteMultipartUploadRequest completeMultipartUploadRequest = new - CompleteMultipartUploadRequest(); - Response response = keyEndpoint.completeMultipartUpload(bucketName, keyName, - uploadID, completeMultipartUploadRequest); + keyEndpoint.getQueryParameters().putSingle(S3Consts.QueryParams.UPLOAD_ID, uploadID); + Response response = keyEndpoint.completeMultipartUpload(BUCKET, KEY, + new CompleteMultipartUploadRequest()); long curMetric = metrics.getCompleteMultiPartUploadSuccess(); assertEquals(200, response.getStatus()); assertEquals(1L, curMetric - oriMetric); @@ -432,11 +422,9 @@ public void testCompleteMultiPartUploadSuccess() throws Exception { @Test public void testCompleteMultiPartUploadFailure() throws Exception { long oriMetric = metrics.getCompleteMultiPartUploadFailure(); - CompleteMultipartUploadRequest completeMultipartUploadRequestNew = new - CompleteMultipartUploadRequest(); - OS3Exception e = assertThrows(OS3Exception.class, () -> keyEndpoint - .completeMultipartUpload(bucketName, "key2", "random", - completeMultipartUploadRequestNew)); + keyEndpoint.getQueryParameters().putSingle(S3Consts.QueryParams.UPLOAD_ID, "random"); + OS3Exception e = assertThrows(OS3Exception.class, + () -> keyEndpoint.completeMultipartUpload(BUCKET, "key2", new CompleteMultipartUploadRequest())); assertEquals(S3ErrorTable.NO_SUCH_UPLOAD.getCode(), e.getCode()); long curMetric = metrics.getCompleteMultiPartUploadFailure(); assertEquals(1L, curMetric - oriMetric); @@ -446,13 +434,11 @@ public void testCompleteMultiPartUploadFailure() throws Exception { public void testCreateMultipartKeySuccess() throws Exception { // Initiate the Upload and fetch the upload ID - String uploadID = initiateMultipartUpload(bucketName, keyName); + String uploadID = initiateMultipartUpload(BUCKET, KEY); long oriMetric = metrics.getCreateMultipartKeySuccess(); - ByteArrayInputStream body = - new ByteArrayInputStream(CONTENT.getBytes(UTF_8)); - keyEndpoint.put(bucketName, keyName, CONTENT.length(), - 1, uploadID, null, null, body); + keyEndpoint.getQueryParameters().putSingle(S3Consts.QueryParams.UPLOAD_ID, uploadID); + putObject(BUCKET, KEY); long curMetric = metrics.getCreateMultipartKeySuccess(); assertEquals(1L, curMetric - oriMetric); } @@ -460,8 +446,9 @@ public void testCreateMultipartKeySuccess() throws Exception { @Test public void testCreateMultipartKeyFailure() throws Exception { long oriMetric = metrics.getCreateMultipartKeyFailure(); - OS3Exception e = assertThrows(OS3Exception.class, () -> keyEndpoint.put( - bucketName, keyName, CONTENT.length(), 1, "randomId", null, null, null)); + keyEndpoint.getQueryParameters().putSingle(S3Consts.QueryParams.UPLOAD_ID, "randomId"); + OS3Exception e = assertThrows(OS3Exception.class, + () -> keyEndpoint.put(BUCKET, KEY, LENGTH, 1, null)); assertEquals(S3ErrorTable.NO_SUCH_UPLOAD.getCode(), e.getCode()); long curMetric = metrics.getCreateMultipartKeyFailure(); assertEquals(1L, curMetric - oriMetric); @@ -472,11 +459,11 @@ public void testListPartsSuccess() throws Exception { long oriMetric = metrics.getListPartsSuccess(); // Initiate the Upload and fetch the upload ID - String uploadID = initiateMultipartUpload(bucketName, keyName); + String uploadID = initiateMultipartUpload(BUCKET, KEY); // Listing out the parts by providing the uploadID - keyEndpoint.get(bucketName, keyName, 0, - uploadID, 3, null, null); + keyEndpoint.getQueryParameters().putSingle(S3Consts.QueryParams.UPLOAD_ID, uploadID); + keyEndpoint.get(BUCKET, KEY, 0, 3); long curMetric = metrics.getListPartsSuccess(); assertEquals(1L, curMetric - oriMetric); } @@ -486,8 +473,9 @@ public void testListPartsFailure() throws Exception { long oriMetric = metrics.getListPartsFailure(); // Listing out the parts by providing the uploadID after aborting - OS3Exception e = assertThrows(OS3Exception.class, () -> keyEndpoint.get( - bucketName, keyName, 0, "wrong_id", 3, null, null)); + keyEndpoint.getQueryParameters().putSingle(S3Consts.QueryParams.UPLOAD_ID, "wrong_id"); + OS3Exception e = assertThrows(OS3Exception.class, + () -> keyEndpoint.get(BUCKET, KEY, 0, 3)); assertEquals(S3ErrorTable.NO_SUCH_UPLOAD.getCode(), e.getCode()); long curMetric = metrics.getListPartsFailure(); assertEquals(1L, curMetric - oriMetric); @@ -504,18 +492,13 @@ public void testCopyObject() throws Exception { // Test for Success of CopyObjectSuccess Metric long oriMetric = metrics.getCopyObjectSuccess(); - ByteArrayInputStream body = - new ByteArrayInputStream(CONTENT.getBytes(UTF_8)); - - keyEndpoint.put(bucketName, keyName, - CONTENT.length(), 1, null, null, null, body); + putObject(BUCKET, KEY); // Add copy header, and then call put when(headers.getHeaderString(COPY_SOURCE_HEADER)).thenReturn( - bucketName + "/" + urlEncode(keyName)); + BUCKET + "/" + urlEncode(KEY)); + putObject(destBucket, destKey); - keyEndpoint.put(destBucket, destKey, CONTENT.length(), 1, - null, null, null, body); long curMetric = metrics.getCopyObjectSuccess(); assertEquals(1L, curMetric - oriMetric); @@ -523,9 +506,7 @@ public void testCopyObject() throws Exception { oriMetric = metrics.getCopyObjectFailure(); // source and dest same when(headers.getHeaderString(STORAGE_CLASS_HEADER)).thenReturn(""); - OS3Exception e = assertThrows(OS3Exception.class, () -> keyEndpoint.put( - bucketName, keyName, CONTENT.length(), 1, null, null, null, body), - "Test for CopyObjectMetric failed"); + OS3Exception e = assertThrows(OS3Exception.class, () -> putObject(BUCKET, KEY)); assertThat(e.getErrorMessage()).contains("This copy request is illegal"); curMetric = metrics.getCopyObjectFailure(); assertEquals(1L, curMetric - oriMetric); @@ -535,15 +516,11 @@ public void testCopyObject() throws Exception { public void testPutObjectTaggingSuccess() throws Exception { long oriMetric = metrics.getPutObjectTaggingSuccess(); - ByteArrayInputStream body = - new ByteArrayInputStream(CONTENT.getBytes(UTF_8)); - // Create the file - keyEndpoint.put(bucketName, keyName, CONTENT - .length(), 1, null, null, null, body); - body.close(); + putObject(BUCKET, KEY); // Put object tagging - keyEndpoint.put(bucketName, keyName, 0, 1, null, "", null, getPutTaggingBody()); + keyEndpoint.getQueryParameters().putSingle(QueryParams.TAGGING, ""); + keyEndpoint.put(BUCKET, KEY, 0, 1, getPutTaggingBody()); long curMetric = metrics.getPutObjectTaggingSuccess(); assertEquals(1L, curMetric - oriMetric); @@ -554,10 +531,9 @@ public void testPutObjectTaggingFailure() throws Exception { long oriMetric = metrics.getPutObjectTaggingFailure(); // Put object tagging for nonexistent key - OS3Exception ex = assertThrows(OS3Exception.class, () -> - keyEndpoint.put(bucketName, "nonexistent", 0, 1, null, "", - null, getPutTaggingBody()) - ); + keyEndpoint.getQueryParameters().putSingle(QueryParams.TAGGING, ""); + OS3Exception ex = assertThrows(OS3Exception.class, + () -> keyEndpoint.put(BUCKET, "nonexistent", 0, 1, getPutTaggingBody())); assertEquals(S3ErrorTable.NO_SUCH_KEY.getCode(), ex.getCode()); long curMetric = metrics.getPutObjectTaggingFailure(); @@ -569,18 +545,14 @@ public void testGetObjectTaggingSuccess() throws Exception { long oriMetric = metrics.getGetObjectTaggingSuccess(); // Create the file - ByteArrayInputStream body = - new ByteArrayInputStream(CONTENT.getBytes(UTF_8)); - keyEndpoint.put(bucketName, keyName, CONTENT - .length(), 1, null, null, null, body); - body.close(); + putObject(BUCKET, KEY); // Put object tagging - keyEndpoint.put(bucketName, keyName, 0, 1, null, "", null, getPutTaggingBody()); + keyEndpoint.getQueryParameters().putSingle(QueryParams.TAGGING, ""); + keyEndpoint.put(BUCKET, KEY, 0, 1, getPutTaggingBody()); // Get object tagging - keyEndpoint.get(bucketName, keyName, 0, - null, 0, null, ""); + keyEndpoint.get(BUCKET, KEY, 0, 0); long curMetric = metrics.getGetObjectTaggingSuccess(); assertEquals(1L, curMetric - oriMetric); @@ -591,9 +563,9 @@ public void testGetObjectTaggingFailure() throws Exception { long oriMetric = metrics.getGetObjectTaggingFailure(); // Get object tagging for nonexistent key - OS3Exception ex = assertThrows(OS3Exception.class, () -> - keyEndpoint.get(bucketName, "nonexistent", 0, null, - 0, null, "")); + keyEndpoint.getQueryParameters().putSingle(QueryParams.TAGGING, ""); + OS3Exception ex = assertThrows(OS3Exception.class, + () -> keyEndpoint.get(BUCKET, "nonexistent", 0, 0)); assertEquals(S3ErrorTable.NO_SUCH_KEY.getCode(), ex.getCode()); long curMetric = metrics.getGetObjectTaggingFailure(); assertEquals(1L, curMetric - oriMetric); @@ -604,17 +576,14 @@ public void testDeleteObjectTaggingSuccess() throws Exception { long oriMetric = metrics.getDeleteObjectTaggingSuccess(); // Create the file - ByteArrayInputStream body = - new ByteArrayInputStream(CONTENT.getBytes(UTF_8)); - keyEndpoint.put(bucketName, keyName, CONTENT - .length(), 1, null, null, null, body); - body.close(); + putObject(BUCKET, KEY); // Put object tagging - keyEndpoint.put(bucketName, keyName, 0, 1, null, "", null, getPutTaggingBody()); + keyEndpoint.getQueryParameters().putSingle(QueryParams.TAGGING, ""); + keyEndpoint.put(BUCKET, KEY, 0, 1, getPutTaggingBody()); // Delete object tagging - keyEndpoint.delete(bucketName, keyName, null, ""); + keyEndpoint.delete(BUCKET, KEY); long curMetric = metrics.getDeleteObjectTaggingSuccess(); assertEquals(1L, curMetric - oriMetric); @@ -625,19 +594,26 @@ public void testDeleteObjectTaggingFailure() throws Exception { long oriMetric = metrics.getDeleteObjectTaggingFailure(); // Delete object tagging for nonexistent key - OS3Exception ex = assertThrows(OS3Exception.class, () -> - keyEndpoint.delete(bucketName, "nonexistent", null, "")); + keyEndpoint.getQueryParameters().putSingle(QueryParams.TAGGING, ""); + OS3Exception ex = assertThrows(OS3Exception.class, + () -> keyEndpoint.delete(BUCKET, "nonexistent")); assertEquals(S3ErrorTable.NO_SUCH_KEY.getCode(), ex.getCode()); long curMetric = metrics.getDeleteObjectTaggingFailure(); assertEquals(1L, curMetric - oriMetric); } - private String initiateMultipartUpload(String bktName, String key) + private void putObject(String bucketName, String key) throws IOException, OS3Exception { + try (InputStream body = new ByteArrayInputStream(BYTES)) { + keyEndpoint.put(bucketName, key, LENGTH, 1, body); + } + } + + private String initiateMultipartUpload(String bucketName, String key) throws IOException, OS3Exception { // Initiate the Upload Response response = - keyEndpoint.initializeMultipartUpload(bktName, key); + keyEndpoint.initializeMultipartUpload(bucketName, key); MultipartUploadInitiateResponse multipartUploadInitiateResponse = (MultipartUploadInitiateResponse) response.getEntity(); if (response.getStatus() == 200) { From 6269ead19009be4672adee2792faae3708b0c4fd Mon Sep 17 00:00:00 2001 From: "Doroszlai, Attila" Date: Sat, 20 Dec 2025 11:38:46 +0100 Subject: [PATCH 2/2] set default value for upload ID in completeMultipartUpload --- .../org/apache/hadoop/ozone/s3/endpoint/EndpointBase.java | 5 +++++ .../org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java | 2 +- 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/EndpointBase.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/EndpointBase.java index 99d7adc3042f..86437f329b98 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/EndpointBase.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/EndpointBase.java @@ -118,6 +118,11 @@ protected String getQueryParam(String key) { return getQueryParameters().getFirst(key); } + protected String getQueryParam(String key, String defaultValue) { + final String value = getQueryParam(key); + return value != null ? value : defaultValue; + } + public MultivaluedMap getQueryParameters() { return context.getUriInfo().getQueryParameters(); } diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java index 7316aa2a1b99..6308c98853c7 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java @@ -866,7 +866,7 @@ public Response completeMultipartUpload( @PathParam(PATH) String key, CompleteMultipartUploadRequest multipartUploadRequest ) throws IOException, OS3Exception { - final String uploadID = getQueryParam(QueryParams.UPLOAD_ID); + final String uploadID = getQueryParam(QueryParams.UPLOAD_ID, ""); long startNanos = Time.monotonicNowNanos(); S3GAction s3GAction = S3GAction.COMPLETE_MULTIPART_UPLOAD; OzoneVolume volume = getVolume();