From bb3704c682fceadeb343c8a4e33e6b69d993487f Mon Sep 17 00:00:00 2001 From: deveshsingh Date: Thu, 30 Mar 2023 13:05:03 +0530 Subject: [PATCH 01/21] HDDS-8214. Recon - OM DB Insights - Container Level Info. --- .../ozone/recon/api/OMDBInsightEndpoint.java | 71 ++++++++++ .../ozone/recon/api/types/KeyEntityInfo.java | 109 +++++++++++++++ .../api/types/OpenKeyInsightInfoResp.java | 101 ++++++++++++++ .../spi/OzoneManagerServiceProvider.java | 7 + .../impl/OzoneManagerServiceProviderImpl.java | 125 ++++++++++++++++++ 5 files changed, 413 insertions(+) create mode 100644 hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightEndpoint.java create mode 100644 hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/KeyEntityInfo.java create mode 100644 hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/OpenKeyInsightInfoResp.java diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightEndpoint.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightEndpoint.java new file mode 100644 index 00000000000..61f1036c4ff --- /dev/null +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightEndpoint.java @@ -0,0 +1,71 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.recon.api; + +import org.apache.commons.lang3.StringUtils; +import org.apache.hadoop.ozone.recon.api.types.OpenKeyInsightInfoResp; +import org.apache.hadoop.ozone.recon.spi.OzoneManagerServiceProvider; + +import javax.inject.Inject; +import javax.ws.rs.DefaultValue; +import javax.ws.rs.GET; +import javax.ws.rs.Path; +import javax.ws.rs.Produces; +import javax.ws.rs.QueryParam; +import javax.ws.rs.core.MediaType; +import javax.ws.rs.core.Response; + +import static org.apache.hadoop.ozone.recon.ReconConstants.DEFAULT_FETCH_COUNT; +import static org.apache.hadoop.ozone.recon.ReconConstants.RECON_QUERY_LIMIT; +import static org.apache.hadoop.ozone.recon.ReconConstants.RECON_QUERY_PREVKEY; + +/** + * Endpoint to get following key level info under OM DB Insight page of Recon. + * 1. Number of open keys for Legacy/OBS buckets. + * 2. Number of open files for FSO buckets. + * 3. Amount of data mapped to open keys and open files. + * 4. Number of pending delete keys in legacy/OBS buckets and pending + * delete files in FSO buckets. + * 5. Amount of data mapped to pending delete keys in legacy/OBS buckets and + * pending delete files in FSO buckets. + */ +@Path("/omdbinsight") +@Produces(MediaType.APPLICATION_JSON) +public class OMDBInsightEndpoint { + + private OzoneManagerServiceProvider ozoneManagerServiceProvider; + + @Inject + public OMDBInsightEndpoint( + OzoneManagerServiceProvider ozoneManagerServiceProvider) { + this.ozoneManagerServiceProvider = ozoneManagerServiceProvider; + } + + @GET + @Path("openkeyinfo") + public Response getOpenKeyInfo( + @DefaultValue(DEFAULT_FETCH_COUNT) @QueryParam(RECON_QUERY_LIMIT) + int limit, + @DefaultValue(StringUtils.EMPTY) @QueryParam(RECON_QUERY_PREVKEY) + String prevKeyPrefix) { + OpenKeyInsightInfoResp openKeyInsightInfoResp = + ozoneManagerServiceProvider.retrieveOpenKeyInfo(limit, prevKeyPrefix); + return Response.ok(openKeyInsightInfoResp).build(); + } +} diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/KeyEntityInfo.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/KeyEntityInfo.java new file mode 100644 index 00000000000..cf02c503f41 --- /dev/null +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/KeyEntityInfo.java @@ -0,0 +1,109 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.recon.api.types; + +import com.fasterxml.jackson.annotation.JsonProperty; +import org.apache.hadoop.hdds.client.ReplicationConfig; + +import java.time.Instant; + +/** + * POJO object wrapper for metadata of a given key/file. + */ +public class KeyEntityInfo { + + /** This is key table key of rocksDB and will help UI to implement pagination + * where UI will use the last record key to send in API as preKeyPrefix. */ + @JsonProperty("key") + private String key; + + /** Path of a key/file. */ + @JsonProperty("path") + private String path; + + @JsonProperty("inStateSince") + private long inStateSince; + + @JsonProperty("size") + private long size; + + @JsonProperty("replicatedSize") + private long replicatedSize; + + @JsonProperty("replicationInfo") + private ReplicationConfig replicationConfig; + + public KeyEntityInfo() { + key = ""; + path = ""; + inStateSince = Instant.now().toEpochMilli(); + size = 0L; + replicatedSize = 0L; + replicationConfig = null; + } + + public String getKey() { + return key; + } + + public void setKey(String key) { + this.key = key; + } + + public String getPath() { + return path; + } + + public void setPath(String path) { + this.path = path; + } + + public long getInStateSince() { + return inStateSince; + } + + public void setInStateSince(long inStateSince) { + this.inStateSince = inStateSince; + } + + public long getSize() { + return size; + } + + public void setSize(long size) { + this.size = size; + } + + public long getReplicatedSize() { + return replicatedSize; + } + + public void setReplicatedSize(long replicatedSize) { + this.replicatedSize = replicatedSize; + } + + public ReplicationConfig getReplicationConfig() { + return replicationConfig; + } + + public void setReplicationConfig( + ReplicationConfig replicationConfig) { + this.replicationConfig = replicationConfig; + } +} diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/OpenKeyInsightInfoResp.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/OpenKeyInsightInfoResp.java new file mode 100644 index 00000000000..be61c893ec7 --- /dev/null +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/OpenKeyInsightInfoResp.java @@ -0,0 +1,101 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.recon.api.types; + +import com.fasterxml.jackson.annotation.JsonProperty; + +import java.util.ArrayList; +import java.util.List; + +/** + * HTTP Response wrapped for Open keys insights. + */ +public class OpenKeyInsightInfoResp { + + /** Amount of data mapped to all open keys and files in + * a cluster across all DNs. */ + @JsonProperty("replicatedTotal") + private long replicatedTotal; + + /** Amount of data mapped to all open keys and files on a single DN. */ + @JsonProperty("unreplicatedTotal") + private long unreplicatedTotal; + + /** List of all open non-fso keys. */ + @JsonProperty("non-fso") + private List nonFSOKeyInfoList; + + /** List of all open fso keys. */ + @JsonProperty("non-fso") + private List fsoKeyInfoList; + + /** Path status. */ + @JsonProperty("status") + private ResponseStatus responseCode; + + public OpenKeyInsightInfoResp() { + responseCode = ResponseStatus.OK; + replicatedTotal = 0L; + unreplicatedTotal = 0L; + nonFSOKeyInfoList = new ArrayList<>(); + fsoKeyInfoList = new ArrayList<>(); + } + + public long getReplicatedTotal() { + return replicatedTotal; + } + + public void setReplicatedTotal(long replicatedTotal) { + this.replicatedTotal = replicatedTotal; + } + + public long getUnreplicatedTotal() { + return unreplicatedTotal; + } + + public void setUnreplicatedTotal(long unreplicatedTotal) { + this.unreplicatedTotal = unreplicatedTotal; + } + + public List getNonFSOKeyInfoList() { + return nonFSOKeyInfoList; + } + + public void setNonFSOKeyInfoList( + List nonFSOKeyInfoList) { + this.nonFSOKeyInfoList = nonFSOKeyInfoList; + } + + public List getFsoKeyInfoList() { + return fsoKeyInfoList; + } + + public void setFsoKeyInfoList( + List fsoKeyInfoList) { + this.fsoKeyInfoList = fsoKeyInfoList; + } + + public ResponseStatus getResponseCode() { + return responseCode; + } + + public void setResponseCode(ResponseStatus responseCode) { + this.responseCode = responseCode; + } +} diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/OzoneManagerServiceProvider.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/OzoneManagerServiceProvider.java index 3f4fff5f412..505542da066 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/OzoneManagerServiceProvider.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/OzoneManagerServiceProvider.java @@ -18,6 +18,7 @@ package org.apache.hadoop.ozone.recon.spi; import org.apache.hadoop.ozone.om.OMMetadataManager; +import org.apache.hadoop.ozone.recon.api.types.OpenKeyInsightInfoResp; /** * Interface to access OM endpoints. @@ -45,4 +46,10 @@ public interface OzoneManagerServiceProvider { * @return whether the trigger happened or not */ boolean triggerSyncDataFromOMImmediately(); + + /** + * This method retrieves set of keys/files which are open. + * @return + */ + OpenKeyInsightInfoResp retrieveOpenKeyInfo(int limit, String prevKeyPrefix); } diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/OzoneManagerServiceProviderImpl.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/OzoneManagerServiceProviderImpl.java index a3897d9a554..31c02360470 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/OzoneManagerServiceProviderImpl.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/OzoneManagerServiceProviderImpl.java @@ -25,13 +25,17 @@ import java.io.InputStream; import java.nio.file.Path; import java.nio.file.Paths; +import java.util.Arrays; import java.util.List; import java.util.concurrent.Executors; import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; +import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.hdds.utils.db.RocksDatabase; +import org.apache.hadoop.hdds.utils.db.Table; +import org.apache.hadoop.hdds.utils.db.TableIterator; import org.apache.hadoop.hdds.utils.db.managed.ManagedWriteBatch; import org.apache.hadoop.hdds.utils.db.managed.ManagedWriteOptions; import org.apache.hadoop.hdfs.web.URLConnectionFactory; @@ -44,12 +48,17 @@ import org.apache.hadoop.hdds.utils.db.RocksDBCheckpoint; import org.apache.hadoop.ozone.om.OMConfigKeys; import org.apache.hadoop.ozone.om.OMMetadataManager; +import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.DBUpdates; +import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.protocol.OzoneManagerProtocol; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.DBUpdatesRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ServicePort.Type; import org.apache.hadoop.ozone.recon.ReconServerConfigKeys; import org.apache.hadoop.ozone.recon.ReconUtils; +import org.apache.hadoop.ozone.recon.api.types.ContainerKeyPrefix; +import org.apache.hadoop.ozone.recon.api.types.KeyEntityInfo; +import org.apache.hadoop.ozone.recon.api.types.OpenKeyInsightInfoResp; import org.apache.hadoop.ozone.recon.metrics.OzoneManagerSyncMetrics; import org.apache.hadoop.ozone.recon.recovery.ReconOMMetadataManager; import org.apache.hadoop.ozone.recon.spi.OzoneManagerServiceProvider; @@ -289,6 +298,122 @@ public boolean triggerSyncDataFromOMImmediately() { return false; } + /** + * This method retrieves set of keys/files which are open. + * + * @return the http json response wrapped in below format: + * { + * replicatedTotal: 13824, + * unreplicatedTotal: 4608, + * entities: [ + * { + * path: “/vol1/bucket1/key1”, + * keyState: “Open”, + * inStateSince: 1667564193026, + * size: 1024, + * replicatedSize: 3072, + * unreplicatedSize: 1024, + * replicationType: RATIS, + * replicationFactor: THREE + * }. + * { + * path: “/vol1/bucket1/key2”, + * keyState: “Open”, + * inStateSince: 1667564193026, + * size: 512, + * replicatedSize: 1536, + * unreplicatedSize: 512, + * replicationType: RATIS, + * replicationFactor: THREE + * }. + * { + * path: “/vol1/fso-bucket/dir1/file1”, + * keyState: “Open”, + * inStateSince: 1667564193026, + * size: 1024, + * replicatedSize: 3072, + * unreplicatedSize: 1024, + * replicationType: RATIS, + * replicationFactor: THREE + * }. + * { + * path: “/vol1/fso-bucket/dir1/dir2/file2”, + * keyState: “Open”, + * inStateSince: 1667564193026, + * size: 2048, + * replicatedSize: 6144, + * unreplicatedSize: 2048, + * replicationType: RATIS, + * replicationFactor: THREE + * } + * ] + * } + */ + @Override + public OpenKeyInsightInfoResp retrieveOpenKeyInfo(int limit, + String prevKeyPrefix) { + OpenKeyInsightInfoResp openKeyInsightInfo = new OpenKeyInsightInfoResp(); + List nonFSOKeyInfoList = + openKeyInsightInfo.getNonFSOKeyInfoList(); + boolean isLegacyBucketLayout = true; + boolean recordsFetchedLimitReached = false; + List fsoKeyInfoList = openKeyInsightInfo.getFsoKeyInfoList(); + for (BucketLayout layout : Arrays.asList(BucketLayout.LEGACY, + BucketLayout.FILE_SYSTEM_OPTIMIZED)) { + isLegacyBucketLayout = (layout == BucketLayout.LEGACY); + Table openKeyTable = + omMetadataManager.getOpenKeyTable(layout); + try ( + TableIterator> + keyIter = openKeyTable.iterator()) { + boolean skipPrevKey = false; + String seekKey = prevKeyPrefix; + if (StringUtils.isNotBlank(prevKeyPrefix)) { + skipPrevKey = true; + Table.KeyValue seekKeyValue = + keyIter.seek(seekKey); + // check if RocksDB was able to seek correctly to the given key prefix + // if not, then return empty result + // In case of an empty prevKeyPrefix, all the keys are returned + if (seekKeyValue == null || + (StringUtils.isNotBlank(prevKeyPrefix) && + !seekKeyValue.getKey().equals(prevKeyPrefix))) { + return openKeyInsightInfo; + } + } + while (keyIter.hasNext()) { + Table.KeyValue kv = keyIter.next(); + String key = kv.getKey(); + OmKeyInfo omKeyInfo = kv.getValue(); + // skip the prev key if prev key is present + if (skipPrevKey && key.equals(prevKeyPrefix)) { + continue; + } + KeyEntityInfo keyEntityInfo = new KeyEntityInfo(); + keyEntityInfo.setKey(key); + keyEntityInfo.setPath(omKeyInfo.getKeyName()); + keyEntityInfo.setInStateSince(omKeyInfo.getCreationTime()); + keyEntityInfo.setSize(omKeyInfo.getDataSize()); + keyEntityInfo.setReplicatedSize(omKeyInfo.getReplicatedSize()); + keyEntityInfo.setReplicationConfig(omKeyInfo.getReplicationConfig()); + boolean added = + isLegacyBucketLayout ? nonFSOKeyInfoList.add(keyEntityInfo) : + fsoKeyInfoList.add(keyEntityInfo); + if ((nonFSOKeyInfoList.size() + fsoKeyInfoList.size()) == limit) { + recordsFetchedLimitReached = true; + break; + } + } + } catch (IOException e) { + throw new RuntimeException(e); + } + if (recordsFetchedLimitReached) { + break; + } + } + return openKeyInsightInfo; + } + @Override public void stop() throws Exception { LOG.info("Stopping Ozone Manager Service Provider."); From 823ead5fab1e904de9d1816382b833fd90afdf89 Mon Sep 17 00:00:00 2001 From: deveshsingh Date: Tue, 4 Apr 2023 14:47:20 +0530 Subject: [PATCH 02/21] HDDS-8214. Recon - OM DB Insights - Key Level Info. --- .../ozone/recon/api/OMDBInsightEndpoint.java | 275 +++++++++++++++++- ...tInfoResp.java => KeyInsightInfoResp.java} | 50 +++- .../spi/OzoneManagerServiceProvider.java | 6 - .../impl/OzoneManagerServiceProviderImpl.java | 125 -------- .../recon/api/TestOmDBInsightEndPoint.java | 132 +++++++++ 5 files changed, 441 insertions(+), 147 deletions(-) rename hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/{OpenKeyInsightInfoResp.java => KeyInsightInfoResp.java} (63%) create mode 100644 hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOmDBInsightEndPoint.java diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightEndpoint.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightEndpoint.java index 61f1036c4ff..377d84a3bc3 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightEndpoint.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightEndpoint.java @@ -19,8 +19,14 @@ package org.apache.hadoop.ozone.recon.api; import org.apache.commons.lang3.StringUtils; -import org.apache.hadoop.ozone.recon.api.types.OpenKeyInsightInfoResp; -import org.apache.hadoop.ozone.recon.spi.OzoneManagerServiceProvider; +import org.apache.hadoop.hdds.utils.db.Table; +import org.apache.hadoop.hdds.utils.db.TableIterator; +import org.apache.hadoop.ozone.om.helpers.BucketLayout; +import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo; +import org.apache.hadoop.ozone.recon.api.types.KeyEntityInfo; +import org.apache.hadoop.ozone.recon.api.types.KeyInsightInfoResp; +import org.apache.hadoop.ozone.recon.recovery.ReconOMMetadataManager; import javax.inject.Inject; import javax.ws.rs.DefaultValue; @@ -30,6 +36,9 @@ import javax.ws.rs.QueryParam; import javax.ws.rs.core.MediaType; import javax.ws.rs.core.Response; +import java.io.IOException; +import java.util.Arrays; +import java.util.List; import static org.apache.hadoop.ozone.recon.ReconConstants.DEFAULT_FETCH_COUNT; import static org.apache.hadoop.ozone.recon.ReconConstants.RECON_QUERY_LIMIT; @@ -49,14 +58,65 @@ @Produces(MediaType.APPLICATION_JSON) public class OMDBInsightEndpoint { - private OzoneManagerServiceProvider ozoneManagerServiceProvider; + private final ReconOMMetadataManager omMetadataManager; @Inject public OMDBInsightEndpoint( - OzoneManagerServiceProvider ozoneManagerServiceProvider) { - this.ozoneManagerServiceProvider = ozoneManagerServiceProvider; + ReconOMMetadataManager omMetadataManager) { + this.omMetadataManager = omMetadataManager; } + /** + * This method retrieves set of keys/files which are open. + * + * @return the http json response wrapped in below format: + * { + * replicatedTotal: 13824, + * unreplicatedTotal: 4608, + * entities: [ + * { + * path: “/vol1/bucket1/key1”, + * keyState: “Open”, + * inStateSince: 1667564193026, + * size: 1024, + * replicatedSize: 3072, + * unreplicatedSize: 1024, + * replicationType: RATIS, + * replicationFactor: THREE + * }. + * { + * path: “/vol1/bucket1/key2”, + * keyState: “Open”, + * inStateSince: 1667564193026, + * size: 512, + * replicatedSize: 1536, + * unreplicatedSize: 512, + * replicationType: RATIS, + * replicationFactor: THREE + * }. + * { + * path: “/vol1/fso-bucket/dir1/file1”, + * keyState: “Open”, + * inStateSince: 1667564193026, + * size: 1024, + * replicatedSize: 3072, + * unreplicatedSize: 1024, + * replicationType: RATIS, + * replicationFactor: THREE + * }. + * { + * path: “/vol1/fso-bucket/dir1/dir2/file2”, + * keyState: “Open”, + * inStateSince: 1667564193026, + * size: 2048, + * replicatedSize: 6144, + * unreplicatedSize: 2048, + * replicationType: RATIS, + * replicationFactor: THREE + * } + * ] + * } + */ @GET @Path("openkeyinfo") public Response getOpenKeyInfo( @@ -64,8 +124,207 @@ public Response getOpenKeyInfo( int limit, @DefaultValue(StringUtils.EMPTY) @QueryParam(RECON_QUERY_PREVKEY) String prevKeyPrefix) { - OpenKeyInsightInfoResp openKeyInsightInfoResp = - ozoneManagerServiceProvider.retrieveOpenKeyInfo(limit, prevKeyPrefix); - return Response.ok(openKeyInsightInfoResp).build(); + KeyInsightInfoResp openKeyInsightInfo = new KeyInsightInfoResp(); + List nonFSOKeyInfoList = + openKeyInsightInfo.getNonFSOKeyInfoList(); + boolean isLegacyBucketLayout = true; + boolean recordsFetchedLimitReached = false; + List fsoKeyInfoList = openKeyInsightInfo.getFsoKeyInfoList(); + for (BucketLayout layout : Arrays.asList(BucketLayout.LEGACY, + BucketLayout.FILE_SYSTEM_OPTIMIZED)) { + isLegacyBucketLayout = (layout == BucketLayout.LEGACY); + Table openKeyTable = + omMetadataManager.getOpenKeyTable(layout); + try ( + TableIterator> + keyIter = openKeyTable.iterator()) { + boolean skipPrevKey = false; + String seekKey = prevKeyPrefix; + if (StringUtils.isNotBlank(prevKeyPrefix)) { + skipPrevKey = true; + Table.KeyValue seekKeyValue = + keyIter.seek(seekKey); + // check if RocksDB was able to seek correctly to the given key prefix + // if not, then return empty result + // In case of an empty prevKeyPrefix, all the keys are returned + if (seekKeyValue == null || + (StringUtils.isNotBlank(prevKeyPrefix) && + !seekKeyValue.getKey().equals(prevKeyPrefix))) { + return Response.ok(openKeyInsightInfo).build(); + } + } + while (keyIter.hasNext()) { + Table.KeyValue kv = keyIter.next(); + String key = kv.getKey(); + OmKeyInfo omKeyInfo = kv.getValue(); + // skip the prev key if prev key is present + if (skipPrevKey && key.equals(prevKeyPrefix)) { + continue; + } + KeyEntityInfo keyEntityInfo = new KeyEntityInfo(); + keyEntityInfo.setKey(key); + keyEntityInfo.setPath(omKeyInfo.getKeyName()); + keyEntityInfo.setInStateSince(omKeyInfo.getCreationTime()); + keyEntityInfo.setSize(omKeyInfo.getDataSize()); + keyEntityInfo.setReplicatedSize(omKeyInfo.getReplicatedSize()); + keyEntityInfo.setReplicationConfig(omKeyInfo.getReplicationConfig()); + openKeyInsightInfo.setUnreplicatedTotal( + openKeyInsightInfo.getUnreplicatedTotal() + + keyEntityInfo.getSize()); + openKeyInsightInfo.setReplicatedTotal( + openKeyInsightInfo.getReplicatedTotal() + + keyEntityInfo.getReplicatedSize()); + boolean added = + isLegacyBucketLayout ? nonFSOKeyInfoList.add(keyEntityInfo) : + fsoKeyInfoList.add(keyEntityInfo); + if ((nonFSOKeyInfoList.size() + fsoKeyInfoList.size()) == limit) { + recordsFetchedLimitReached = true; + break; + } + } + } catch (IOException e) { + throw new RuntimeException(e); + } + if (recordsFetchedLimitReached) { + break; + } + } + return Response.ok(openKeyInsightInfo).build(); + } + + /** This method retrieves set of keys/files/dirs pending for deletion. */ + @GET + @Path("deletekeyinfo") + public Response getDeletedKeyInfo( + @DefaultValue(DEFAULT_FETCH_COUNT) @QueryParam(RECON_QUERY_LIMIT) + int limit, + @DefaultValue(StringUtils.EMPTY) @QueryParam(RECON_QUERY_PREVKEY) + String prevKeyPrefix) { + KeyInsightInfoResp deletedKeyAndDirInsightInfo = new KeyInsightInfoResp(); + KeyInsightInfoResp pendingForDeletionKeyInfo = + getPendingForDeletionKeyInfo(limit, prevKeyPrefix, + deletedKeyAndDirInsightInfo); + return Response.ok(getPendingForDeletionDirInfo(limit, prevKeyPrefix, + pendingForDeletionKeyInfo)).build(); + } + + private KeyInsightInfoResp getPendingForDeletionDirInfo( + int limit, String prevKeyPrefix, + KeyInsightInfoResp pendingForDeletionKeyInfo) { + + List deletedDirInfoList = + pendingForDeletionKeyInfo.getDeletedDirInfoList(); + + Table deletedDirTable = + omMetadataManager.getDeletedDirTable(); + try ( + TableIterator> + keyIter = deletedDirTable.iterator()) { + boolean skipPrevKey = false; + String seekKey = prevKeyPrefix; + if (StringUtils.isNotBlank(prevKeyPrefix)) { + skipPrevKey = true; + Table.KeyValue seekKeyValue = + keyIter.seek(seekKey); + // check if RocksDB was able to seek correctly to the given key prefix + // if not, then return empty result + // In case of an empty prevKeyPrefix, all the keys are returned + if (seekKeyValue == null || + (StringUtils.isNotBlank(prevKeyPrefix) && + !seekKeyValue.getKey().equals(prevKeyPrefix))) { + return pendingForDeletionKeyInfo; + } + } + while (keyIter.hasNext()) { + Table.KeyValue kv = keyIter.next(); + String key = kv.getKey(); + OmKeyInfo omKeyInfo = kv.getValue(); + // skip the prev key if prev key is present + if (skipPrevKey && key.equals(prevKeyPrefix)) { + continue; + } + KeyEntityInfo keyEntityInfo = new KeyEntityInfo(); + keyEntityInfo.setKey(key); + keyEntityInfo.setPath(omKeyInfo.getKeyName()); + keyEntityInfo.setInStateSince(omKeyInfo.getCreationTime()); + keyEntityInfo.setSize(omKeyInfo.getDataSize()); + keyEntityInfo.setReplicatedSize(omKeyInfo.getReplicatedSize()); + keyEntityInfo.setReplicationConfig(omKeyInfo.getReplicationConfig()); + pendingForDeletionKeyInfo.setUnreplicatedTotal( + pendingForDeletionKeyInfo.getUnreplicatedTotal() + + keyEntityInfo.getSize()); + pendingForDeletionKeyInfo.setReplicatedTotal( + pendingForDeletionKeyInfo.getReplicatedTotal() + + keyEntityInfo.getReplicatedSize()); + deletedDirInfoList.add(keyEntityInfo); + if (deletedDirInfoList.size() == limit) { + break; + } + } + } catch (IOException e) { + throw new RuntimeException(e); + } + return pendingForDeletionKeyInfo; + } + + private KeyInsightInfoResp getPendingForDeletionKeyInfo( + int limit, + String prevKeyPrefix, + KeyInsightInfoResp deletedKeyAndDirInsightInfo) { + List repeatedOmKeyInfoList = + deletedKeyAndDirInsightInfo.getRepeatedOmKeyInfoList(); + Table deletedTable = + omMetadataManager.getDeletedTable(); + try ( + TableIterator> + keyIter = deletedTable.iterator()) { + boolean skipPrevKey = false; + String seekKey = prevKeyPrefix; + if (StringUtils.isNotBlank(prevKeyPrefix)) { + skipPrevKey = true; + Table.KeyValue seekKeyValue = + keyIter.seek(seekKey); + // check if RocksDB was able to seek correctly to the given key prefix + // if not, then return empty result + // In case of an empty prevKeyPrefix, all the keys are returned + if (seekKeyValue == null || + (StringUtils.isNotBlank(prevKeyPrefix) && + !seekKeyValue.getKey().equals(prevKeyPrefix))) { + return deletedKeyAndDirInsightInfo; + } + } + while (keyIter.hasNext()) { + Table.KeyValue kv = keyIter.next(); + String key = kv.getKey(); + RepeatedOmKeyInfo repeatedOmKeyInfo = kv.getValue(); + // skip the prev key if prev key is present + if (skipPrevKey && key.equals(prevKeyPrefix)) { + continue; + } + updateReplicatedAndUnReplicatedTotal(deletedKeyAndDirInsightInfo, + repeatedOmKeyInfo); + repeatedOmKeyInfoList.add(repeatedOmKeyInfo); + if ((repeatedOmKeyInfoList.size()) == limit) { + break; + } + } + } catch (IOException e) { + throw new RuntimeException(e); + } + return deletedKeyAndDirInsightInfo; + } + + private void updateReplicatedAndUnReplicatedTotal( + KeyInsightInfoResp deletedKeyAndDirInsightInfo, + RepeatedOmKeyInfo repeatedOmKeyInfo) { + repeatedOmKeyInfo.getOmKeyInfoList().forEach(omKeyInfo -> { + deletedKeyAndDirInsightInfo.setUnreplicatedTotal( + deletedKeyAndDirInsightInfo.getUnreplicatedTotal() + + omKeyInfo.getDataSize()); + deletedKeyAndDirInsightInfo.setReplicatedTotal( + deletedKeyAndDirInsightInfo.getReplicatedTotal() + + omKeyInfo.getReplicatedSize()); + }); } } diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/OpenKeyInsightInfoResp.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/KeyInsightInfoResp.java similarity index 63% rename from hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/OpenKeyInsightInfoResp.java rename to hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/KeyInsightInfoResp.java index be61c893ec7..2d65668499c 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/OpenKeyInsightInfoResp.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/KeyInsightInfoResp.java @@ -18,43 +18,58 @@ package org.apache.hadoop.ozone.recon.api.types; +import com.fasterxml.jackson.annotation.JsonInclude; import com.fasterxml.jackson.annotation.JsonProperty; +import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo; import java.util.ArrayList; import java.util.List; /** - * HTTP Response wrapped for Open keys insights. + * HTTP Response wrapped for keys insights. */ -public class OpenKeyInsightInfoResp { +public class KeyInsightInfoResp { - /** Amount of data mapped to all open keys and files in + /** Amount of data mapped to all keys and files in * a cluster across all DNs. */ @JsonProperty("replicatedTotal") private long replicatedTotal; - /** Amount of data mapped to all open keys and files on a single DN. */ + /** Amount of data mapped to all keys and files on a single DN. */ @JsonProperty("unreplicatedTotal") private long unreplicatedTotal; - /** List of all open non-fso keys. */ + /** List of all non-fso keys. */ @JsonProperty("non-fso") + @JsonInclude(JsonInclude.Include.NON_EMPTY) private List nonFSOKeyInfoList; - /** List of all open fso keys. */ - @JsonProperty("non-fso") + /** List of all fso keys. */ + @JsonProperty("fso") + @JsonInclude(JsonInclude.Include.NON_EMPTY) private List fsoKeyInfoList; + /** List of all deleted and repeatedly deleted keys. */ + @JsonProperty("deletedkeyinfo") + @JsonInclude(JsonInclude.Include.NON_EMPTY) + private List repeatedOmKeyInfoList; + + @JsonProperty("deleteddirinfo") + @JsonInclude(JsonInclude.Include.NON_EMPTY) + private List deletedDirInfoList; + /** Path status. */ @JsonProperty("status") private ResponseStatus responseCode; - public OpenKeyInsightInfoResp() { + public KeyInsightInfoResp() { responseCode = ResponseStatus.OK; replicatedTotal = 0L; unreplicatedTotal = 0L; nonFSOKeyInfoList = new ArrayList<>(); fsoKeyInfoList = new ArrayList<>(); + repeatedOmKeyInfoList = new ArrayList<>(); + deletedDirInfoList = new ArrayList<>(); } public long getReplicatedTotal() { @@ -91,6 +106,24 @@ public void setFsoKeyInfoList( this.fsoKeyInfoList = fsoKeyInfoList; } + public List getRepeatedOmKeyInfoList() { + return repeatedOmKeyInfoList; + } + + public void setRepeatedOmKeyInfoList( + List repeatedOmKeyInfoList) { + this.repeatedOmKeyInfoList = repeatedOmKeyInfoList; + } + + public List getDeletedDirInfoList() { + return deletedDirInfoList; + } + + public void setDeletedDirInfoList( + List deletedDirInfoList) { + this.deletedDirInfoList = deletedDirInfoList; + } + public ResponseStatus getResponseCode() { return responseCode; } @@ -98,4 +131,5 @@ public ResponseStatus getResponseCode() { public void setResponseCode(ResponseStatus responseCode) { this.responseCode = responseCode; } + } diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/OzoneManagerServiceProvider.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/OzoneManagerServiceProvider.java index 505542da066..27b9d13a74b 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/OzoneManagerServiceProvider.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/OzoneManagerServiceProvider.java @@ -18,7 +18,6 @@ package org.apache.hadoop.ozone.recon.spi; import org.apache.hadoop.ozone.om.OMMetadataManager; -import org.apache.hadoop.ozone.recon.api.types.OpenKeyInsightInfoResp; /** * Interface to access OM endpoints. @@ -47,9 +46,4 @@ public interface OzoneManagerServiceProvider { */ boolean triggerSyncDataFromOMImmediately(); - /** - * This method retrieves set of keys/files which are open. - * @return - */ - OpenKeyInsightInfoResp retrieveOpenKeyInfo(int limit, String prevKeyPrefix); } diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/OzoneManagerServiceProviderImpl.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/OzoneManagerServiceProviderImpl.java index 31c02360470..a3897d9a554 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/OzoneManagerServiceProviderImpl.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/OzoneManagerServiceProviderImpl.java @@ -25,17 +25,13 @@ import java.io.InputStream; import java.nio.file.Path; import java.nio.file.Paths; -import java.util.Arrays; import java.util.List; import java.util.concurrent.Executors; import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; -import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.hdds.utils.db.RocksDatabase; -import org.apache.hadoop.hdds.utils.db.Table; -import org.apache.hadoop.hdds.utils.db.TableIterator; import org.apache.hadoop.hdds.utils.db.managed.ManagedWriteBatch; import org.apache.hadoop.hdds.utils.db.managed.ManagedWriteOptions; import org.apache.hadoop.hdfs.web.URLConnectionFactory; @@ -48,17 +44,12 @@ import org.apache.hadoop.hdds.utils.db.RocksDBCheckpoint; import org.apache.hadoop.ozone.om.OMConfigKeys; import org.apache.hadoop.ozone.om.OMMetadataManager; -import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.DBUpdates; -import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.protocol.OzoneManagerProtocol; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.DBUpdatesRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ServicePort.Type; import org.apache.hadoop.ozone.recon.ReconServerConfigKeys; import org.apache.hadoop.ozone.recon.ReconUtils; -import org.apache.hadoop.ozone.recon.api.types.ContainerKeyPrefix; -import org.apache.hadoop.ozone.recon.api.types.KeyEntityInfo; -import org.apache.hadoop.ozone.recon.api.types.OpenKeyInsightInfoResp; import org.apache.hadoop.ozone.recon.metrics.OzoneManagerSyncMetrics; import org.apache.hadoop.ozone.recon.recovery.ReconOMMetadataManager; import org.apache.hadoop.ozone.recon.spi.OzoneManagerServiceProvider; @@ -298,122 +289,6 @@ public boolean triggerSyncDataFromOMImmediately() { return false; } - /** - * This method retrieves set of keys/files which are open. - * - * @return the http json response wrapped in below format: - * { - * replicatedTotal: 13824, - * unreplicatedTotal: 4608, - * entities: [ - * { - * path: “/vol1/bucket1/key1”, - * keyState: “Open”, - * inStateSince: 1667564193026, - * size: 1024, - * replicatedSize: 3072, - * unreplicatedSize: 1024, - * replicationType: RATIS, - * replicationFactor: THREE - * }. - * { - * path: “/vol1/bucket1/key2”, - * keyState: “Open”, - * inStateSince: 1667564193026, - * size: 512, - * replicatedSize: 1536, - * unreplicatedSize: 512, - * replicationType: RATIS, - * replicationFactor: THREE - * }. - * { - * path: “/vol1/fso-bucket/dir1/file1”, - * keyState: “Open”, - * inStateSince: 1667564193026, - * size: 1024, - * replicatedSize: 3072, - * unreplicatedSize: 1024, - * replicationType: RATIS, - * replicationFactor: THREE - * }. - * { - * path: “/vol1/fso-bucket/dir1/dir2/file2”, - * keyState: “Open”, - * inStateSince: 1667564193026, - * size: 2048, - * replicatedSize: 6144, - * unreplicatedSize: 2048, - * replicationType: RATIS, - * replicationFactor: THREE - * } - * ] - * } - */ - @Override - public OpenKeyInsightInfoResp retrieveOpenKeyInfo(int limit, - String prevKeyPrefix) { - OpenKeyInsightInfoResp openKeyInsightInfo = new OpenKeyInsightInfoResp(); - List nonFSOKeyInfoList = - openKeyInsightInfo.getNonFSOKeyInfoList(); - boolean isLegacyBucketLayout = true; - boolean recordsFetchedLimitReached = false; - List fsoKeyInfoList = openKeyInsightInfo.getFsoKeyInfoList(); - for (BucketLayout layout : Arrays.asList(BucketLayout.LEGACY, - BucketLayout.FILE_SYSTEM_OPTIMIZED)) { - isLegacyBucketLayout = (layout == BucketLayout.LEGACY); - Table openKeyTable = - omMetadataManager.getOpenKeyTable(layout); - try ( - TableIterator> - keyIter = openKeyTable.iterator()) { - boolean skipPrevKey = false; - String seekKey = prevKeyPrefix; - if (StringUtils.isNotBlank(prevKeyPrefix)) { - skipPrevKey = true; - Table.KeyValue seekKeyValue = - keyIter.seek(seekKey); - // check if RocksDB was able to seek correctly to the given key prefix - // if not, then return empty result - // In case of an empty prevKeyPrefix, all the keys are returned - if (seekKeyValue == null || - (StringUtils.isNotBlank(prevKeyPrefix) && - !seekKeyValue.getKey().equals(prevKeyPrefix))) { - return openKeyInsightInfo; - } - } - while (keyIter.hasNext()) { - Table.KeyValue kv = keyIter.next(); - String key = kv.getKey(); - OmKeyInfo omKeyInfo = kv.getValue(); - // skip the prev key if prev key is present - if (skipPrevKey && key.equals(prevKeyPrefix)) { - continue; - } - KeyEntityInfo keyEntityInfo = new KeyEntityInfo(); - keyEntityInfo.setKey(key); - keyEntityInfo.setPath(omKeyInfo.getKeyName()); - keyEntityInfo.setInStateSince(omKeyInfo.getCreationTime()); - keyEntityInfo.setSize(omKeyInfo.getDataSize()); - keyEntityInfo.setReplicatedSize(omKeyInfo.getReplicatedSize()); - keyEntityInfo.setReplicationConfig(omKeyInfo.getReplicationConfig()); - boolean added = - isLegacyBucketLayout ? nonFSOKeyInfoList.add(keyEntityInfo) : - fsoKeyInfoList.add(keyEntityInfo); - if ((nonFSOKeyInfoList.size() + fsoKeyInfoList.size()) == limit) { - recordsFetchedLimitReached = true; - break; - } - } - } catch (IOException e) { - throw new RuntimeException(e); - } - if (recordsFetchedLimitReached) { - break; - } - } - return openKeyInsightInfo; - } - @Override public void stop() throws Exception { LOG.info("Stopping Ozone Manager Service Provider."); diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOmDBInsightEndPoint.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOmDBInsightEndPoint.java new file mode 100644 index 00000000000..3cb2ac9a221 --- /dev/null +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOmDBInsightEndPoint.java @@ -0,0 +1,132 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.recon.api; + +import org.apache.hadoop.hdds.client.StandaloneReplicationConfig; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.ozone.om.OMMetadataManager; +import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo; +import org.apache.hadoop.ozone.recon.ReconTestInjector; +import org.apache.hadoop.ozone.recon.api.types.KeyInsightInfoResp; +import org.apache.hadoop.ozone.recon.recovery.ReconOMMetadataManager; +import org.apache.hadoop.ozone.recon.spi.impl.OzoneManagerServiceProviderImpl; +import org.junit.Before; +import org.junit.Rule; +import org.junit.Test; +import org.junit.jupiter.api.Assertions; +import org.junit.rules.TemporaryFolder; + +import javax.ws.rs.core.Response; +import java.util.Random; + +import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.getBucketLayout; +import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.getMockOzoneManagerServiceProvider; +import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.getTestReconOmMetadataManager; +import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.initializeNewOmMetadataManager; + +/** + * Unit test for OmDBInsightEndPoint. + */ +public class TestOmDBInsightEndPoint { + + @Rule + public TemporaryFolder temporaryFolder = new TemporaryFolder(); + + private OMMetadataManager omMetadataManager; + private ReconOMMetadataManager reconOMMetadataManager; + private OzoneManagerServiceProviderImpl ozoneManagerServiceProvider; + private OMDBInsightEndpoint omdbInsightEndpoint; + private Random random = new Random(); + + @Before + public void setUp() throws Exception { + omMetadataManager = initializeNewOmMetadataManager( + temporaryFolder.newFolder()); + ozoneManagerServiceProvider = getMockOzoneManagerServiceProvider(); + reconOMMetadataManager = getTestReconOmMetadataManager(omMetadataManager, + temporaryFolder.newFolder()); + ReconTestInjector reconTestInjector = + new ReconTestInjector.Builder(temporaryFolder) + .withReconSqlDb() + .withReconOm(reconOMMetadataManager) + .withOmServiceProvider(ozoneManagerServiceProvider) + .addBinding(OMDBInsightEndpoint.class) + .build(); + omdbInsightEndpoint = reconTestInjector.getInstance( + OMDBInsightEndpoint.class); + } + + @Test + public void testGetOpenKeyInfo() throws Exception { + OmKeyInfo omKeyInfo = getOmKeyInfo("sampleVol", "bucketOne", "key_one"); + + reconOMMetadataManager.getOpenKeyTable(getBucketLayout()) + .put("/sampleVol/bucketOne/key_one", omKeyInfo); + OmKeyInfo omKeyInfo1 = + reconOMMetadataManager.getOpenKeyTable(getBucketLayout()) + .get("/sampleVol/bucketOne/key_one"); + Assertions.assertEquals("key_one", omKeyInfo1.getKeyName()); + Response openKeyInfoResp = omdbInsightEndpoint.getOpenKeyInfo(-1, ""); + KeyInsightInfoResp keyInsightInfoResp = + (KeyInsightInfoResp) openKeyInfoResp.getEntity(); + Assertions.assertNotNull(keyInsightInfoResp); + Assertions.assertEquals("key_one", + keyInsightInfoResp.getNonFSOKeyInfoList().get(0).getPath()); + } + + @Test + public void testGetDeletedKeyInfo() throws Exception { + OmKeyInfo omKeyInfo = getOmKeyInfo("sampleVol", "bucketOne", "key_one"); + + reconOMMetadataManager.getKeyTable(getBucketLayout()) + .put("/sampleVol/bucketOne/key_one", omKeyInfo); + OmKeyInfo omKeyInfo1 = reconOMMetadataManager.getKeyTable(getBucketLayout()) + .get("/sampleVol/bucketOne/key_one"); + Assertions.assertEquals("key_one", omKeyInfo1.getKeyName()); + RepeatedOmKeyInfo repeatedOmKeyInfo = new RepeatedOmKeyInfo(omKeyInfo); + reconOMMetadataManager.getDeletedTable() + .put("/sampleVol/bucketOne/key_one", repeatedOmKeyInfo); + RepeatedOmKeyInfo repeatedOmKeyInfo1 = + reconOMMetadataManager.getDeletedTable() + .get("/sampleVol/bucketOne/key_one"); + Assertions.assertEquals("key_one", + repeatedOmKeyInfo1.getOmKeyInfoList().get(0).getKeyName()); + Response deletedKeyInfo = omdbInsightEndpoint.getDeletedKeyInfo(-1, ""); + KeyInsightInfoResp keyInsightInfoResp = + (KeyInsightInfoResp) deletedKeyInfo.getEntity(); + Assertions.assertNotNull(keyInsightInfoResp); + Assertions.assertEquals("key_one", + keyInsightInfoResp.getRepeatedOmKeyInfoList().get(0).getOmKeyInfoList() + .get(0).getKeyName()); + } + + private OmKeyInfo getOmKeyInfo(String volumeName, String bucketName, + String keyName) { + return new OmKeyInfo.Builder() + .setVolumeName(volumeName) + .setBucketName(bucketName) + .setKeyName(keyName) + .setReplicationConfig(StandaloneReplicationConfig + .getInstance(HddsProtos.ReplicationFactor.ONE)) + .setDataSize(random.nextLong()) + .build(); + } + +} From f8e0d7934782375333749aa6a9d3b6785854bd90 Mon Sep 17 00:00:00 2001 From: deveshsingh Date: Tue, 4 Apr 2023 20:22:20 +0530 Subject: [PATCH 03/21] HDDS-8214. Recon - OM DB Insights - Key Level Info. --- .../ozone/recon/api/OMDBInsightEndpoint.java | 97 ++++++++- .../recon/api/TestOmDBInsightEndPoint.java | 204 +++++++++++++++++- 2 files changed, 291 insertions(+), 10 deletions(-) diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightEndpoint.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightEndpoint.java index 377d84a3bc3..ae5fd350c69 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightEndpoint.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightEndpoint.java @@ -19,14 +19,21 @@ package org.apache.hadoop.ozone.recon.api; import org.apache.commons.lang3.StringUtils; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.scm.container.ContainerInfo; +import org.apache.hadoop.hdds.scm.server.OzoneStorageContainerManager; import org.apache.hadoop.hdds.utils.db.Table; import org.apache.hadoop.hdds.utils.db.TableIterator; import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo; +import org.apache.hadoop.ozone.recon.api.types.ContainerMetadata; import org.apache.hadoop.ozone.recon.api.types.KeyEntityInfo; import org.apache.hadoop.ozone.recon.api.types.KeyInsightInfoResp; +import org.apache.hadoop.ozone.recon.api.types.KeysResponse; import org.apache.hadoop.ozone.recon.recovery.ReconOMMetadataManager; +import org.apache.hadoop.ozone.recon.scm.ReconContainerManager; +import org.apache.hadoop.ozone.recon.spi.ReconContainerMetadataManager; import javax.inject.Inject; import javax.ws.rs.DefaultValue; @@ -34,11 +41,15 @@ import javax.ws.rs.Path; import javax.ws.rs.Produces; import javax.ws.rs.QueryParam; +import javax.ws.rs.WebApplicationException; import javax.ws.rs.core.MediaType; import javax.ws.rs.core.Response; import java.io.IOException; +import java.util.ArrayList; import java.util.Arrays; import java.util.List; +import java.util.Map; +import java.util.stream.Collectors; import static org.apache.hadoop.ozone.recon.ReconConstants.DEFAULT_FETCH_COUNT; import static org.apache.hadoop.ozone.recon.ReconConstants.RECON_QUERY_LIMIT; @@ -58,11 +69,18 @@ @Produces(MediaType.APPLICATION_JSON) public class OMDBInsightEndpoint { + @Inject + private ContainerEndpoint containerEndpoint; + @Inject + private ReconContainerMetadataManager reconContainerMetadataManager; private final ReconOMMetadataManager omMetadataManager; + private final ReconContainerManager containerManager; @Inject - public OMDBInsightEndpoint( - ReconOMMetadataManager omMetadataManager) { + public OMDBInsightEndpoint(OzoneStorageContainerManager reconSCM, + ReconOMMetadataManager omMetadataManager) { + this.containerManager = + (ReconContainerManager) reconSCM.getContainerManager(); this.omMetadataManager = omMetadataManager; } @@ -182,8 +200,14 @@ public Response getOpenKeyInfo( break; } } - } catch (IOException e) { - throw new RuntimeException(e); + } catch (IOException ex) { + throw new WebApplicationException(ex, + Response.Status.INTERNAL_SERVER_ERROR); + } catch (IllegalArgumentException e) { + throw new WebApplicationException(e, Response.Status.BAD_REQUEST); + } catch (Exception ex) { + throw new WebApplicationException(ex, + Response.Status.INTERNAL_SERVER_ERROR); } if (recordsFetchedLimitReached) { break; @@ -261,8 +285,14 @@ private KeyInsightInfoResp getPendingForDeletionDirInfo( break; } } - } catch (IOException e) { - throw new RuntimeException(e); + } catch (IOException ex) { + throw new WebApplicationException(ex, + Response.Status.INTERNAL_SERVER_ERROR); + } catch (IllegalArgumentException e) { + throw new WebApplicationException(e, Response.Status.BAD_REQUEST); + } catch (Exception ex) { + throw new WebApplicationException(ex, + Response.Status.INTERNAL_SERVER_ERROR); } return pendingForDeletionKeyInfo; } @@ -309,8 +339,14 @@ private KeyInsightInfoResp getPendingForDeletionKeyInfo( break; } } - } catch (IOException e) { - throw new RuntimeException(e); + } catch (IOException ex) { + throw new WebApplicationException(ex, + Response.Status.INTERNAL_SERVER_ERROR); + } catch (IllegalArgumentException e) { + throw new WebApplicationException(e, Response.Status.BAD_REQUEST); + } catch (Exception ex) { + throw new WebApplicationException(ex, + Response.Status.INTERNAL_SERVER_ERROR); } return deletedKeyAndDirInsightInfo; } @@ -327,4 +363,49 @@ private void updateReplicatedAndUnReplicatedTotal( omKeyInfo.getReplicatedSize()); }); } + + /** This method retrieves set of keys/files/dirs which are mapped to + * containers in DELETED state in SCM. */ + @GET + @Path("deletedcontainerkeys") + public Response getDeletedContainerKeysInfo( + @DefaultValue(DEFAULT_FETCH_COUNT) @QueryParam(RECON_QUERY_LIMIT) + int limit, + @DefaultValue(StringUtils.EMPTY) @QueryParam(RECON_QUERY_PREVKEY) + String prevKeyPrefix) { + List keysResponseList = new ArrayList<>(); + try { + Map omContainers = + reconContainerMetadataManager.getContainers(-1, 0); + List deletedStateSCMContainers = + containerManager.getContainers(HddsProtos.LifeCycleState.DELETED); + List deletedStateSCMContainerIds = + deletedStateSCMContainers.stream() + .map(containerInfo -> containerInfo.getContainerID()).collect( + Collectors.toList()); + + List omContainerIdsMappedToDeletedSCMContainers = + omContainers.entrySet().stream() + .filter( + map -> deletedStateSCMContainerIds.contains(map.getKey())) + .map(map -> map.getKey()).collect(Collectors.toList()); + + omContainerIdsMappedToDeletedSCMContainers.forEach(containerId -> { + Response keysForContainer = + containerEndpoint.getKeysForContainer(containerId, limit, + prevKeyPrefix); + KeysResponse keysResponse = (KeysResponse) keysForContainer.getEntity(); + keysResponseList.add(keysResponse); + }); + } catch (IOException ex) { + throw new WebApplicationException(ex, + Response.Status.INTERNAL_SERVER_ERROR); + } catch (IllegalArgumentException e) { + throw new WebApplicationException(e, Response.Status.BAD_REQUEST); + } catch (Exception ex) { + throw new WebApplicationException(ex, + Response.Status.INTERNAL_SERVER_ERROR); + } + return Response.ok(keysResponseList).build(); + } } diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOmDBInsightEndPoint.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOmDBInsightEndPoint.java index 3cb2ac9a221..c1728a6fa23 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOmDBInsightEndPoint.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOmDBInsightEndPoint.java @@ -18,15 +18,38 @@ package org.apache.hadoop.ozone.recon.api; +import org.apache.hadoop.hdds.client.BlockID; +import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.hdds.client.StandaloneReplicationConfig; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.scm.container.ContainerID; +import org.apache.hadoop.hdds.scm.container.ContainerInfo; +import org.apache.hadoop.hdds.scm.container.ContainerStateManager; +import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline; +import org.apache.hadoop.hdds.scm.pipeline.Pipeline; +import org.apache.hadoop.hdds.scm.pipeline.PipelineID; +import org.apache.hadoop.hdds.scm.server.OzoneStorageContainerManager; +import org.apache.hadoop.hdds.utils.db.Table; import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; +import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup; import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo; import org.apache.hadoop.ozone.recon.ReconTestInjector; +import org.apache.hadoop.ozone.recon.api.types.ContainerMetadata; import org.apache.hadoop.ozone.recon.api.types.KeyInsightInfoResp; +import org.apache.hadoop.ozone.recon.api.types.KeysResponse; +import org.apache.hadoop.ozone.recon.persistence.ContainerHealthSchemaManager; import org.apache.hadoop.ozone.recon.recovery.ReconOMMetadataManager; +import org.apache.hadoop.ozone.recon.scm.ReconContainerManager; +import org.apache.hadoop.ozone.recon.scm.ReconPipelineManager; +import org.apache.hadoop.ozone.recon.scm.ReconStorageContainerManagerFacade; +import org.apache.hadoop.ozone.recon.spi.ReconContainerMetadataManager; +import org.apache.hadoop.ozone.recon.spi.StorageContainerServiceProvider; import org.apache.hadoop.ozone.recon.spi.impl.OzoneManagerServiceProviderImpl; +import org.apache.hadoop.ozone.recon.spi.impl.StorageContainerServiceProviderImpl; +import org.apache.hadoop.ozone.recon.tasks.ContainerKeyMapperTask; +import org.junit.Assert; import org.junit.Before; import org.junit.Rule; import org.junit.Test; @@ -34,12 +57,26 @@ import org.junit.rules.TemporaryFolder; import javax.ws.rs.core.Response; +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.Map; import java.util.Random; +import java.util.Set; +import java.util.concurrent.TimeoutException; import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.getBucketLayout; import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.getMockOzoneManagerServiceProvider; +import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.getOmKeyLocationInfo; +import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.getRandomPipeline; import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.getTestReconOmMetadataManager; import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.initializeNewOmMetadataManager; +import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.writeDataToOm; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; /** * Unit test for OmDBInsightEndPoint. @@ -48,12 +85,19 @@ public class TestOmDBInsightEndPoint { @Rule public TemporaryFolder temporaryFolder = new TemporaryFolder(); - + private OzoneStorageContainerManager ozoneStorageContainerManager; + private ReconContainerMetadataManager reconContainerMetadataManager; private OMMetadataManager omMetadataManager; + private ReconContainerManager reconContainerManager; + private ContainerStateManager containerStateManager; + private ReconPipelineManager reconPipelineManager; private ReconOMMetadataManager reconOMMetadataManager; private OzoneManagerServiceProviderImpl ozoneManagerServiceProvider; private OMDBInsightEndpoint omdbInsightEndpoint; + private Pipeline pipeline; + private PipelineID pipelineID; private Random random = new Random(); + private long keyCount = 5L; @Before public void setUp() throws Exception { @@ -66,11 +110,106 @@ public void setUp() throws Exception { new ReconTestInjector.Builder(temporaryFolder) .withReconSqlDb() .withReconOm(reconOMMetadataManager) - .withOmServiceProvider(ozoneManagerServiceProvider) + .withOmServiceProvider(mock(OzoneManagerServiceProviderImpl.class)) + // No longer using mock reconSCM as we need nodeDB in Facade + // to establish datanode UUID to hostname mapping + .addBinding(OzoneStorageContainerManager.class, + ReconStorageContainerManagerFacade.class) + .withContainerDB() + .addBinding(StorageContainerServiceProvider.class, + mock(StorageContainerServiceProviderImpl.class)) .addBinding(OMDBInsightEndpoint.class) + .addBinding(ContainerHealthSchemaManager.class) .build(); + reconContainerMetadataManager = + reconTestInjector.getInstance(ReconContainerMetadataManager.class); omdbInsightEndpoint = reconTestInjector.getInstance( OMDBInsightEndpoint.class); + ozoneStorageContainerManager = + reconTestInjector.getInstance(OzoneStorageContainerManager.class); + reconContainerManager = (ReconContainerManager) + ozoneStorageContainerManager.getContainerManager(); + containerStateManager = reconContainerManager + .getContainerStateManager(); + reconPipelineManager = (ReconPipelineManager) + ozoneStorageContainerManager.getPipelineManager(); + pipeline = getRandomPipeline(); + pipelineID = pipeline.getId(); + reconPipelineManager.addPipeline(pipeline); + setUpOmData(); + } + + private void setUpOmData() throws Exception { + List omKeyLocationInfoList = new ArrayList<>(); + BlockID blockID1 = new BlockID(1, 101); + OmKeyLocationInfo omKeyLocationInfo1 = getOmKeyLocationInfo(blockID1, + pipeline); + omKeyLocationInfoList.add(omKeyLocationInfo1); + + BlockID blockID2 = new BlockID(2, 102); + OmKeyLocationInfo omKeyLocationInfo2 = getOmKeyLocationInfo(blockID2, + pipeline); + omKeyLocationInfoList.add(omKeyLocationInfo2); + + OmKeyLocationInfoGroup omKeyLocationInfoGroup = new + OmKeyLocationInfoGroup(0, omKeyLocationInfoList); + + //key = key_one, Blocks = [ {CID = 1, LID = 101}, {CID = 2, LID = 102} ] + writeDataToOm(reconOMMetadataManager, + "key_one", "bucketOne", "sampleVol", + Collections.singletonList(omKeyLocationInfoGroup)); + + List infoGroups = new ArrayList<>(); + BlockID blockID3 = new BlockID(1, 103); + OmKeyLocationInfo omKeyLocationInfo3 = getOmKeyLocationInfo(blockID3, + pipeline); + + List omKeyLocationInfoListNew = new ArrayList<>(); + omKeyLocationInfoListNew.add(omKeyLocationInfo3); + infoGroups.add(new OmKeyLocationInfoGroup(0, + omKeyLocationInfoListNew)); + + BlockID blockID4 = new BlockID(1, 104); + OmKeyLocationInfo omKeyLocationInfo4 = getOmKeyLocationInfo(blockID4, + pipeline); + + omKeyLocationInfoListNew = new ArrayList<>(); + omKeyLocationInfoListNew.add(omKeyLocationInfo4); + infoGroups.add(new OmKeyLocationInfoGroup(1, + omKeyLocationInfoListNew)); + + //key = key_two, Blocks = [ {CID = 1, LID = 103}, {CID = 1, LID = 104} ] + writeDataToOm(reconOMMetadataManager, + "key_two", "bucketOne", "sampleVol", infoGroups); + + List omKeyLocationInfoList2 = new ArrayList<>(); + BlockID blockID5 = new BlockID(2, 2); + OmKeyLocationInfo omKeyLocationInfo5 = getOmKeyLocationInfo(blockID5, + pipeline); + omKeyLocationInfoList2.add(omKeyLocationInfo5); + + BlockID blockID6 = new BlockID(2, 3); + OmKeyLocationInfo omKeyLocationInfo6 = getOmKeyLocationInfo(blockID6, + pipeline); + omKeyLocationInfoList2.add(omKeyLocationInfo6); + + OmKeyLocationInfoGroup omKeyLocationInfoGroup2 = new + OmKeyLocationInfoGroup(0, omKeyLocationInfoList2); + + //key = key_three, Blocks = [ {CID = 2, LID = 2}, {CID = 2, LID = 3} ] + writeDataToOm(reconOMMetadataManager, + "key_three", "bucketOne", "sampleVol", + Collections.singletonList(omKeyLocationInfoGroup2)); + + //Generate Recon container DB data. + OMMetadataManager omMetadataManagerMock = mock(OMMetadataManager.class); + Table tableMock = mock(Table.class); + when(tableMock.getName()).thenReturn("KeyTable"); + when(omMetadataManagerMock.getKeyTable(getBucketLayout())) + .thenReturn(tableMock); + ContainerKeyMapperTask containerKeyMapperTask = + new ContainerKeyMapperTask(reconContainerMetadataManager); + containerKeyMapperTask.reprocess(reconOMMetadataManager); } @Test @@ -129,4 +268,65 @@ private OmKeyInfo getOmKeyInfo(String volumeName, String bucketName, .build(); } + @Test + public void testGetDeletedContainerKeysInfo() throws Exception { + Map omContainers = + reconContainerMetadataManager.getContainers(-1, 0); + putContainerInfos(2); + List scmContainers = reconContainerManager.getContainers(); + assertEquals(omContainers.size(), scmContainers.size()); + // Update container state of Container Id 1 to CLOSING to CLOSED + // and then to DELETED + reconContainerManager.updateContainerState(ContainerID.valueOf(1), + HddsProtos.LifeCycleEvent.FINALIZE); + reconContainerManager.updateContainerState(ContainerID.valueOf(1), + HddsProtos.LifeCycleEvent.CLOSE); + reconContainerManager + .updateContainerState(ContainerID.valueOf(1), + HddsProtos.LifeCycleEvent.DELETE); + Set containerIDs = containerStateManager + .getContainerIDs(HddsProtos.LifeCycleState.DELETING); + Assert.assertEquals(1, containerIDs.size()); + + reconContainerManager + .updateContainerState(ContainerID.valueOf(1), + HddsProtos.LifeCycleEvent.CLEANUP); + containerIDs = containerStateManager + .getContainerIDs(HddsProtos.LifeCycleState.DELETED); + Assert.assertEquals(1, containerIDs.size()); + + List deletedSCMContainers = + reconContainerManager.getContainers(HddsProtos.LifeCycleState.DELETED); + assertEquals(1, deletedSCMContainers.size()); + + Response deletedContainerKeysInfo = + omdbInsightEndpoint.getDeletedContainerKeysInfo(-1, ""); + assertNotNull(deletedContainerKeysInfo); + List keysResponseList = + (List) deletedContainerKeysInfo.getEntity(); + assertEquals(2, keysResponseList.get(0).getKeys().size()); + assertEquals(3, keysResponseList.get(0).getTotalCount()); + assertEquals(1, keysResponseList.size()); + } + + ContainerInfo newContainerInfo(long containerId) { + return new ContainerInfo.Builder() + .setContainerID(containerId) + .setReplicationConfig( + RatisReplicationConfig.getInstance( + HddsProtos.ReplicationFactor.THREE)) + .setState(HddsProtos.LifeCycleState.OPEN) + .setOwner("owner1") + .setNumberOfKeys(keyCount) + .setPipelineID(pipelineID) + .build(); + } + + void putContainerInfos(int num) throws IOException, TimeoutException { + for (int i = 1; i <= num; i++) { + final ContainerInfo info = newContainerInfo(i); + reconContainerManager.addNewContainer( + new ContainerWithPipeline(info, pipeline)); + } + } } From 8dacd12e0ceeb2968d56a001602d2b4e76ed2084 Mon Sep 17 00:00:00 2001 From: deveshsingh Date: Tue, 4 Apr 2023 21:08:01 +0530 Subject: [PATCH 04/21] HDDS-8214. Recon - OM DB Insights - Key Level Info. --- .../apache/hadoop/ozone/recon/api/TestOmDBInsightEndPoint.java | 2 -- 1 file changed, 2 deletions(-) diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOmDBInsightEndPoint.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOmDBInsightEndPoint.java index c1728a6fa23..003a86e994e 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOmDBInsightEndPoint.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOmDBInsightEndPoint.java @@ -92,7 +92,6 @@ public class TestOmDBInsightEndPoint { private ContainerStateManager containerStateManager; private ReconPipelineManager reconPipelineManager; private ReconOMMetadataManager reconOMMetadataManager; - private OzoneManagerServiceProviderImpl ozoneManagerServiceProvider; private OMDBInsightEndpoint omdbInsightEndpoint; private Pipeline pipeline; private PipelineID pipelineID; @@ -103,7 +102,6 @@ public class TestOmDBInsightEndPoint { public void setUp() throws Exception { omMetadataManager = initializeNewOmMetadataManager( temporaryFolder.newFolder()); - ozoneManagerServiceProvider = getMockOzoneManagerServiceProvider(); reconOMMetadataManager = getTestReconOmMetadataManager(omMetadataManager, temporaryFolder.newFolder()); ReconTestInjector reconTestInjector = From 168e651362208568f9e191503e715ce2f5155018 Mon Sep 17 00:00:00 2001 From: deveshsingh Date: Tue, 4 Apr 2023 21:14:57 +0530 Subject: [PATCH 05/21] HDDS-8214. Recon - OM DB Insights - Key Level Info. --- .../apache/hadoop/ozone/recon/api/TestOmDBInsightEndPoint.java | 1 - 1 file changed, 1 deletion(-) diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOmDBInsightEndPoint.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOmDBInsightEndPoint.java index 003a86e994e..aa24ae6216d 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOmDBInsightEndPoint.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOmDBInsightEndPoint.java @@ -67,7 +67,6 @@ import java.util.concurrent.TimeoutException; import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.getBucketLayout; -import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.getMockOzoneManagerServiceProvider; import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.getOmKeyLocationInfo; import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.getRandomPipeline; import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.getTestReconOmMetadataManager; From cace42c6797a8f5958e16087f61584eab1d5bac3 Mon Sep 17 00:00:00 2001 From: deveshsingh Date: Wed, 5 Apr 2023 17:45:05 +0530 Subject: [PATCH 06/21] HDDS-8214. Recon - OM DB Insights - Key Level Info. --- .../org/apache/hadoop/ozone/recon/api/OMDBInsightEndpoint.java | 1 + 1 file changed, 1 insertion(+) diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightEndpoint.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightEndpoint.java index ae5fd350c69..bf1cb70f6ab 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightEndpoint.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightEndpoint.java @@ -67,6 +67,7 @@ */ @Path("/omdbinsight") @Produces(MediaType.APPLICATION_JSON) +@AdminOnly public class OMDBInsightEndpoint { @Inject From f62896899b861016ecb2d836833eec07413d5e77 Mon Sep 17 00:00:00 2001 From: deveshsingh Date: Wed, 5 Apr 2023 20:03:41 +0530 Subject: [PATCH 07/21] HDDS-8214. Recon - OM DB Insights - Key Level Info. --- .../org/apache/hadoop/ozone/recon/api/OMDBInsightEndpoint.java | 2 +- .../hadoop/ozone/recon/spi/OzoneManagerServiceProvider.java | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightEndpoint.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightEndpoint.java index bf1cb70f6ab..a74d71ab606 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightEndpoint.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightEndpoint.java @@ -219,7 +219,7 @@ public Response getOpenKeyInfo( /** This method retrieves set of keys/files/dirs pending for deletion. */ @GET - @Path("deletekeyinfo") + @Path("pendingfordeletionkeyinfo") public Response getDeletedKeyInfo( @DefaultValue(DEFAULT_FETCH_COUNT) @QueryParam(RECON_QUERY_LIMIT) int limit, diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/OzoneManagerServiceProvider.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/OzoneManagerServiceProvider.java index 27b9d13a74b..1e6926c3bde 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/OzoneManagerServiceProvider.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/OzoneManagerServiceProvider.java @@ -45,5 +45,5 @@ public interface OzoneManagerServiceProvider { * @return whether the trigger happened or not */ boolean triggerSyncDataFromOMImmediately(); - + } From 9fc1eafb512243c0f79148e1f0bc9b86b3b3269b Mon Sep 17 00:00:00 2001 From: deveshsingh Date: Wed, 5 Apr 2023 20:04:54 +0530 Subject: [PATCH 08/21] HDDS-8214. Recon - OM DB Insights - Key Level Info. --- .../hadoop/ozone/recon/spi/OzoneManagerServiceProvider.java | 1 - 1 file changed, 1 deletion(-) diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/OzoneManagerServiceProvider.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/OzoneManagerServiceProvider.java index 1e6926c3bde..3f4fff5f412 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/OzoneManagerServiceProvider.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/OzoneManagerServiceProvider.java @@ -45,5 +45,4 @@ public interface OzoneManagerServiceProvider { * @return whether the trigger happened or not */ boolean triggerSyncDataFromOMImmediately(); - } From 285ddb1fa4cbe21ab08fc551d32d6f5477bbfbd8 Mon Sep 17 00:00:00 2001 From: deveshsingh Date: Mon, 17 Apr 2023 10:41:23 +0530 Subject: [PATCH 09/21] HDDS-8214. Recon - OM DB Insights - Key Level Info. --- .../ozone/recon/api/OMDBInsightEndpoint.java | 26 +++++++++---------- 1 file changed, 12 insertions(+), 14 deletions(-) diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightEndpoint.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightEndpoint.java index a74d71ab606..083cf652d43 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightEndpoint.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightEndpoint.java @@ -137,7 +137,7 @@ public OMDBInsightEndpoint(OzoneStorageContainerManager reconSCM, * } */ @GET - @Path("openkeyinfo") + @Path("/keys/open") public Response getOpenKeyInfo( @DefaultValue(DEFAULT_FETCH_COUNT) @QueryParam(RECON_QUERY_LIMIT) int limit, @@ -219,21 +219,21 @@ public Response getOpenKeyInfo( /** This method retrieves set of keys/files/dirs pending for deletion. */ @GET - @Path("pendingfordeletionkeyinfo") + @Path("/keys/deletePending") public Response getDeletedKeyInfo( @DefaultValue(DEFAULT_FETCH_COUNT) @QueryParam(RECON_QUERY_LIMIT) int limit, @DefaultValue(StringUtils.EMPTY) @QueryParam(RECON_QUERY_PREVKEY) String prevKeyPrefix) { KeyInsightInfoResp deletedKeyAndDirInsightInfo = new KeyInsightInfoResp(); - KeyInsightInfoResp pendingForDeletionKeyInfo = - getPendingForDeletionKeyInfo(limit, prevKeyPrefix, - deletedKeyAndDirInsightInfo); - return Response.ok(getPendingForDeletionDirInfo(limit, prevKeyPrefix, - pendingForDeletionKeyInfo)).build(); + getPendingForDeletionKeyInfo(limit, prevKeyPrefix, + deletedKeyAndDirInsightInfo); + getPendingForDeletionDirInfo(limit, prevKeyPrefix, + deletedKeyAndDirInsightInfo); + return Response.ok(deletedKeyAndDirInsightInfo).build(); } - private KeyInsightInfoResp getPendingForDeletionDirInfo( + private void getPendingForDeletionDirInfo( int limit, String prevKeyPrefix, KeyInsightInfoResp pendingForDeletionKeyInfo) { @@ -257,7 +257,7 @@ private KeyInsightInfoResp getPendingForDeletionDirInfo( if (seekKeyValue == null || (StringUtils.isNotBlank(prevKeyPrefix) && !seekKeyValue.getKey().equals(prevKeyPrefix))) { - return pendingForDeletionKeyInfo; + return; } } while (keyIter.hasNext()) { @@ -295,10 +295,9 @@ private KeyInsightInfoResp getPendingForDeletionDirInfo( throw new WebApplicationException(ex, Response.Status.INTERNAL_SERVER_ERROR); } - return pendingForDeletionKeyInfo; } - private KeyInsightInfoResp getPendingForDeletionKeyInfo( + private void getPendingForDeletionKeyInfo( int limit, String prevKeyPrefix, KeyInsightInfoResp deletedKeyAndDirInsightInfo) { @@ -322,7 +321,7 @@ private KeyInsightInfoResp getPendingForDeletionKeyInfo( if (seekKeyValue == null || (StringUtils.isNotBlank(prevKeyPrefix) && !seekKeyValue.getKey().equals(prevKeyPrefix))) { - return deletedKeyAndDirInsightInfo; + return; } } while (keyIter.hasNext()) { @@ -349,7 +348,6 @@ private KeyInsightInfoResp getPendingForDeletionKeyInfo( throw new WebApplicationException(ex, Response.Status.INTERNAL_SERVER_ERROR); } - return deletedKeyAndDirInsightInfo; } private void updateReplicatedAndUnReplicatedTotal( @@ -368,7 +366,7 @@ private void updateReplicatedAndUnReplicatedTotal( /** This method retrieves set of keys/files/dirs which are mapped to * containers in DELETED state in SCM. */ @GET - @Path("deletedcontainerkeys") + @Path("/keys/deletedContainers") public Response getDeletedContainerKeysInfo( @DefaultValue(DEFAULT_FETCH_COUNT) @QueryParam(RECON_QUERY_LIMIT) int limit, From 05141a35bb461282a088c65959788de8e4bb02a3 Mon Sep 17 00:00:00 2001 From: deveshsingh Date: Mon, 17 Apr 2023 11:02:40 +0530 Subject: [PATCH 10/21] HDDS-8214. Recon - OM DB Insights - Key Level Info. --- .../ozone/recon/api/OMDBInsightEndpoint.java | 46 ++++++++++++++++++- 1 file changed, 45 insertions(+), 1 deletion(-) diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightEndpoint.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightEndpoint.java index 083cf652d43..8af623803dc 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightEndpoint.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightEndpoint.java @@ -217,7 +217,51 @@ public Response getOpenKeyInfo( return Response.ok(openKeyInsightInfo).build(); } - /** This method retrieves set of keys/files/dirs pending for deletion. */ + /** This method retrieves set of keys/files/dirs pending for deletion. + * Sample API Response: + * { + * "replicatedTotal": -1530804718628866300, + * "unreplicatedTotal": -1530804718628866300, + * "deletedkeyinfo": [ + * { + * "omKeyInfoList": [ + * { + * "metadata": {}, + * "objectID": 0, + * "updateID": 0, + * "parentObjectID": 0, + * "volumeName": "sampleVol", + * "bucketName": "bucketOne", + * "keyName": "key_one", + * "dataSize": -1530804718628866300, + * "keyLocationVersions": [], + * "creationTime": 0, + * "modificationTime": 0, + * "replicationConfig": { + * "replicationFactor": "ONE", + * "requiredNodes": 1, + * "replicationType": "STANDALONE" + * }, + * "fileChecksum": null, + * "fileName": "key_one", + * "acls": [], + * "path": "0/key_one", + * "file": false, + * "latestVersionLocations": null, + * "replicatedSize": -1530804718628866300, + * "fileEncryptionInfo": null, + * "objectInfo": "OMKeyInfo{volume='sampleVol', bucket='bucketOne', + * key='key_one', dataSize='-1530804718628866186', creationTime='0', + * objectID='0', parentID='0', replication='STANDALONE/ONE', + * fileChecksum='null}", + * "updateIDset": false + * } + * ] + * } + * ], + * "status": "OK" + * } + */ @GET @Path("/keys/deletePending") public Response getDeletedKeyInfo( From 70911980fd6189b4b5edad08dafec214001523a1 Mon Sep 17 00:00:00 2001 From: deveshsingh Date: Wed, 19 Apr 2023 10:09:43 +0530 Subject: [PATCH 11/21] HDDS-8214. Recon - OM DB Insights - Key Level Info. --- .../ozone/recon/api/OMDBInsightEndpoint.java | 19 +- ...oResp.java => KeyInsightInfoResponse.java} | 10 +- .../recon/api/TestOmDBInsightEndPoint.java | 245 +++++++++++++++++- 3 files changed, 252 insertions(+), 22 deletions(-) rename hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/{KeyInsightInfoResp.java => KeyInsightInfoResponse.java} (95%) diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightEndpoint.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightEndpoint.java index 8af623803dc..0efca4a2823 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightEndpoint.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightEndpoint.java @@ -29,7 +29,7 @@ import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo; import org.apache.hadoop.ozone.recon.api.types.ContainerMetadata; import org.apache.hadoop.ozone.recon.api.types.KeyEntityInfo; -import org.apache.hadoop.ozone.recon.api.types.KeyInsightInfoResp; +import org.apache.hadoop.ozone.recon.api.types.KeyInsightInfoResponse; import org.apache.hadoop.ozone.recon.api.types.KeysResponse; import org.apache.hadoop.ozone.recon.recovery.ReconOMMetadataManager; import org.apache.hadoop.ozone.recon.scm.ReconContainerManager; @@ -143,9 +143,10 @@ public Response getOpenKeyInfo( int limit, @DefaultValue(StringUtils.EMPTY) @QueryParam(RECON_QUERY_PREVKEY) String prevKeyPrefix) { - KeyInsightInfoResp openKeyInsightInfo = new KeyInsightInfoResp(); + KeyInsightInfoResponse openKeyInsightInfo = new KeyInsightInfoResponse(); List nonFSOKeyInfoList = openKeyInsightInfo.getNonFSOKeyInfoList(); + boolean skipPrevKeyDone = false; boolean isLegacyBucketLayout = true; boolean recordsFetchedLimitReached = false; List fsoKeyInfoList = openKeyInsightInfo.getFsoKeyInfoList(); @@ -159,7 +160,7 @@ public Response getOpenKeyInfo( keyIter = openKeyTable.iterator()) { boolean skipPrevKey = false; String seekKey = prevKeyPrefix; - if (StringUtils.isNotBlank(prevKeyPrefix)) { + if (!skipPrevKeyDone && StringUtils.isNotBlank(prevKeyPrefix)) { skipPrevKey = true; Table.KeyValue seekKeyValue = keyIter.seek(seekKey); @@ -178,6 +179,7 @@ public Response getOpenKeyInfo( OmKeyInfo omKeyInfo = kv.getValue(); // skip the prev key if prev key is present if (skipPrevKey && key.equals(prevKeyPrefix)) { + skipPrevKeyDone = true; continue; } KeyEntityInfo keyEntityInfo = new KeyEntityInfo(); @@ -269,7 +271,8 @@ public Response getDeletedKeyInfo( int limit, @DefaultValue(StringUtils.EMPTY) @QueryParam(RECON_QUERY_PREVKEY) String prevKeyPrefix) { - KeyInsightInfoResp deletedKeyAndDirInsightInfo = new KeyInsightInfoResp(); + KeyInsightInfoResponse + deletedKeyAndDirInsightInfo = new KeyInsightInfoResponse(); getPendingForDeletionKeyInfo(limit, prevKeyPrefix, deletedKeyAndDirInsightInfo); getPendingForDeletionDirInfo(limit, prevKeyPrefix, @@ -279,7 +282,7 @@ public Response getDeletedKeyInfo( private void getPendingForDeletionDirInfo( int limit, String prevKeyPrefix, - KeyInsightInfoResp pendingForDeletionKeyInfo) { + KeyInsightInfoResponse pendingForDeletionKeyInfo) { List deletedDirInfoList = pendingForDeletionKeyInfo.getDeletedDirInfoList(); @@ -344,7 +347,7 @@ private void getPendingForDeletionDirInfo( private void getPendingForDeletionKeyInfo( int limit, String prevKeyPrefix, - KeyInsightInfoResp deletedKeyAndDirInsightInfo) { + KeyInsightInfoResponse deletedKeyAndDirInsightInfo) { List repeatedOmKeyInfoList = deletedKeyAndDirInsightInfo.getRepeatedOmKeyInfoList(); Table deletedTable = @@ -395,7 +398,7 @@ private void getPendingForDeletionKeyInfo( } private void updateReplicatedAndUnReplicatedTotal( - KeyInsightInfoResp deletedKeyAndDirInsightInfo, + KeyInsightInfoResponse deletedKeyAndDirInsightInfo, RepeatedOmKeyInfo repeatedOmKeyInfo) { repeatedOmKeyInfo.getOmKeyInfoList().forEach(omKeyInfo -> { deletedKeyAndDirInsightInfo.setUnreplicatedTotal( @@ -410,7 +413,7 @@ private void updateReplicatedAndUnReplicatedTotal( /** This method retrieves set of keys/files/dirs which are mapped to * containers in DELETED state in SCM. */ @GET - @Path("/keys/deletedContainers") + @Path("/containers/mismatch/keys") public Response getDeletedContainerKeysInfo( @DefaultValue(DEFAULT_FETCH_COUNT) @QueryParam(RECON_QUERY_LIMIT) int limit, diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/KeyInsightInfoResp.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/KeyInsightInfoResponse.java similarity index 95% rename from hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/KeyInsightInfoResp.java rename to hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/KeyInsightInfoResponse.java index 2d65668499c..d4ffb6a1f5e 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/KeyInsightInfoResp.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/KeyInsightInfoResponse.java @@ -28,7 +28,7 @@ /** * HTTP Response wrapped for keys insights. */ -public class KeyInsightInfoResp { +public class KeyInsightInfoResponse { /** Amount of data mapped to all keys and files in * a cluster across all DNs. */ @@ -40,7 +40,7 @@ public class KeyInsightInfoResp { private long unreplicatedTotal; /** List of all non-fso keys. */ - @JsonProperty("non-fso") + @JsonProperty("nonFSO") @JsonInclude(JsonInclude.Include.NON_EMPTY) private List nonFSOKeyInfoList; @@ -50,11 +50,11 @@ public class KeyInsightInfoResp { private List fsoKeyInfoList; /** List of all deleted and repeatedly deleted keys. */ - @JsonProperty("deletedkeyinfo") + @JsonProperty("deletedKeyInfo") @JsonInclude(JsonInclude.Include.NON_EMPTY) private List repeatedOmKeyInfoList; - @JsonProperty("deleteddirinfo") + @JsonProperty("deletedDirInfo") @JsonInclude(JsonInclude.Include.NON_EMPTY) private List deletedDirInfoList; @@ -62,7 +62,7 @@ public class KeyInsightInfoResp { @JsonProperty("status") private ResponseStatus responseCode; - public KeyInsightInfoResp() { + public KeyInsightInfoResponse() { responseCode = ResponseStatus.OK; replicatedTotal = 0L; unreplicatedTotal = 0L; diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOmDBInsightEndPoint.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOmDBInsightEndPoint.java index aa24ae6216d..9e10114aa86 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOmDBInsightEndPoint.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOmDBInsightEndPoint.java @@ -30,14 +30,16 @@ import org.apache.hadoop.hdds.scm.pipeline.PipelineID; import org.apache.hadoop.hdds.scm.server.OzoneStorageContainerManager; import org.apache.hadoop.hdds.utils.db.Table; +import org.apache.hadoop.ozone.common.statemachine.InvalidStateTransitionException; import org.apache.hadoop.ozone.om.OMMetadataManager; +import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup; import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo; import org.apache.hadoop.ozone.recon.ReconTestInjector; import org.apache.hadoop.ozone.recon.api.types.ContainerMetadata; -import org.apache.hadoop.ozone.recon.api.types.KeyInsightInfoResp; +import org.apache.hadoop.ozone.recon.api.types.KeyInsightInfoResponse; import org.apache.hadoop.ozone.recon.api.types.KeysResponse; import org.apache.hadoop.ozone.recon.persistence.ContainerHealthSchemaManager; import org.apache.hadoop.ozone.recon.recovery.ReconOMMetadataManager; @@ -65,6 +67,7 @@ import java.util.Random; import java.util.Set; import java.util.concurrent.TimeoutException; +import java.util.stream.Collectors; import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.getBucketLayout; import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.getOmKeyLocationInfo; @@ -166,7 +169,7 @@ private void setUpOmData() throws Exception { infoGroups.add(new OmKeyLocationInfoGroup(0, omKeyLocationInfoListNew)); - BlockID blockID4 = new BlockID(1, 104); + BlockID blockID4 = new BlockID(2, 104); OmKeyLocationInfo omKeyLocationInfo4 = getOmKeyLocationInfo(blockID4, pipeline); @@ -180,12 +183,12 @@ private void setUpOmData() throws Exception { "key_two", "bucketOne", "sampleVol", infoGroups); List omKeyLocationInfoList2 = new ArrayList<>(); - BlockID blockID5 = new BlockID(2, 2); + BlockID blockID5 = new BlockID(3, 105); OmKeyLocationInfo omKeyLocationInfo5 = getOmKeyLocationInfo(blockID5, pipeline); omKeyLocationInfoList2.add(omKeyLocationInfo5); - BlockID blockID6 = new BlockID(2, 3); + BlockID blockID6 = new BlockID(3, 106); OmKeyLocationInfo omKeyLocationInfo6 = getOmKeyLocationInfo(blockID6, pipeline); omKeyLocationInfoList2.add(omKeyLocationInfo6); @@ -204,7 +207,7 @@ private void setUpOmData() throws Exception { when(tableMock.getName()).thenReturn("KeyTable"); when(omMetadataManagerMock.getKeyTable(getBucketLayout())) .thenReturn(tableMock); - ContainerKeyMapperTask containerKeyMapperTask = + ContainerKeyMapperTask containerKeyMapperTask = new ContainerKeyMapperTask(reconContainerMetadataManager); containerKeyMapperTask.reprocess(reconOMMetadataManager); } @@ -220,13 +223,157 @@ public void testGetOpenKeyInfo() throws Exception { .get("/sampleVol/bucketOne/key_one"); Assertions.assertEquals("key_one", omKeyInfo1.getKeyName()); Response openKeyInfoResp = omdbInsightEndpoint.getOpenKeyInfo(-1, ""); - KeyInsightInfoResp keyInsightInfoResp = - (KeyInsightInfoResp) openKeyInfoResp.getEntity(); + KeyInsightInfoResponse keyInsightInfoResp = + (KeyInsightInfoResponse) openKeyInfoResp.getEntity(); Assertions.assertNotNull(keyInsightInfoResp); Assertions.assertEquals("key_one", keyInsightInfoResp.getNonFSOKeyInfoList().get(0).getPath()); } + @Test + public void testGetOpenKeyInfoLimitParam() throws Exception { + OmKeyInfo omKeyInfo1 = getOmKeyInfo("sampleVol", "bucketOne", "key_one"); + OmKeyInfo omKeyInfo2 = getOmKeyInfo("sampleVol", "bucketOne", "key_two"); + OmKeyInfo omKeyInfo3 = getOmKeyInfo("sampleVol", "bucketOne", "key_three"); + + reconOMMetadataManager.getOpenKeyTable(getBucketLayout()) + .put("/sampleVol/bucketOne/key_one", omKeyInfo1); + reconOMMetadataManager.getOpenKeyTable(BucketLayout.FILE_SYSTEM_OPTIMIZED) + .put("/sampleVol/bucketOne/key_two", omKeyInfo2); + reconOMMetadataManager.getOpenKeyTable(getBucketLayout()) + .put("/sampleVol/bucketOne/key_three", omKeyInfo3); + Response openKeyInfoResp = omdbInsightEndpoint.getOpenKeyInfo(2, ""); + KeyInsightInfoResponse keyInsightInfoResp = + (KeyInsightInfoResponse) openKeyInfoResp.getEntity(); + Assertions.assertNotNull(keyInsightInfoResp); + Assertions.assertEquals(2, + keyInsightInfoResp.getNonFSOKeyInfoList().size()); + Assertions.assertEquals(0, keyInsightInfoResp.getFsoKeyInfoList().size()); + Assertions.assertEquals(2, keyInsightInfoResp.getFsoKeyInfoList().size() + + keyInsightInfoResp.getNonFSOKeyInfoList().size()); + Assertions.assertEquals("key_three", + keyInsightInfoResp.getNonFSOKeyInfoList().get(1).getPath()); + + openKeyInfoResp = omdbInsightEndpoint.getOpenKeyInfo(3, ""); + keyInsightInfoResp = + (KeyInsightInfoResponse) openKeyInfoResp.getEntity(); + Assertions.assertNotNull(keyInsightInfoResp); + Assertions.assertEquals(2, + keyInsightInfoResp.getNonFSOKeyInfoList().size()); + Assertions.assertEquals(1, keyInsightInfoResp.getFsoKeyInfoList().size()); + Assertions.assertEquals(3, keyInsightInfoResp.getFsoKeyInfoList().size() + + keyInsightInfoResp.getNonFSOKeyInfoList().size()); + Assertions.assertEquals("key_three", + keyInsightInfoResp.getNonFSOKeyInfoList().get(1).getPath()); + } + + @Test + public void testGetOpenKeyInfoPrevKeyParam() throws Exception { + OmKeyInfo omKeyInfo1 = getOmKeyInfo("sampleVol", "bucketOne", "key_one"); + OmKeyInfo omKeyInfo2 = getOmKeyInfo("sampleVol", "bucketOne", "key_two"); + OmKeyInfo omKeyInfo3 = getOmKeyInfo("sampleVol", "bucketOne", "key_three"); + + reconOMMetadataManager.getOpenKeyTable(getBucketLayout()) + .put("/sampleVol/bucketOne/key_one", omKeyInfo1); + reconOMMetadataManager.getOpenKeyTable(BucketLayout.FILE_SYSTEM_OPTIMIZED) + .put("/sampleVol/bucketOne/key_two", omKeyInfo2); + reconOMMetadataManager.getOpenKeyTable(getBucketLayout()) + .put("/sampleVol/bucketOne/key_three", omKeyInfo3); + Response openKeyInfoResp = + omdbInsightEndpoint.getOpenKeyInfo(-1, "/sampleVol/bucketOne/key_one"); + KeyInsightInfoResponse keyInsightInfoResp = + (KeyInsightInfoResponse) openKeyInfoResp.getEntity(); + Assertions.assertNotNull(keyInsightInfoResp); + Assertions.assertEquals(1, + keyInsightInfoResp.getNonFSOKeyInfoList().size()); + Assertions.assertEquals(1, keyInsightInfoResp.getFsoKeyInfoList().size()); + Assertions.assertEquals(2, keyInsightInfoResp.getFsoKeyInfoList().size() + + keyInsightInfoResp.getNonFSOKeyInfoList().size()); + Assertions.assertEquals("key_three", + keyInsightInfoResp.getNonFSOKeyInfoList().get(0).getPath()); + Assertions.assertEquals("key_two", + keyInsightInfoResp.getFsoKeyInfoList().get(0).getPath()); + } + + @Test + public void testGetDeletedKeyInfoLimitParam() throws Exception { + OmKeyInfo omKeyInfo1 = getOmKeyInfo("sampleVol", "bucketOne", "key_one"); + OmKeyInfo omKeyInfo2 = getOmKeyInfo("sampleVol", "bucketOne", "key_two"); + OmKeyInfo omKeyInfo3 = getOmKeyInfo("sampleVol", "bucketOne", "key_three"); + + reconOMMetadataManager.getKeyTable(getBucketLayout()) + .put("/sampleVol/bucketOne/key_one", omKeyInfo1); + reconOMMetadataManager.getKeyTable(getBucketLayout()) + .put("/sampleVol/bucketOne/key_two", omKeyInfo2); + reconOMMetadataManager.getKeyTable(getBucketLayout()) + .put("/sampleVol/bucketOne/key_three", omKeyInfo3); + + OmKeyInfo omKeyInfoCopy = + reconOMMetadataManager.getKeyTable(getBucketLayout()) + .get("/sampleVol/bucketOne/key_one"); + Assertions.assertEquals("key_one", omKeyInfoCopy.getKeyName()); + RepeatedOmKeyInfo repeatedOmKeyInfo1 = new RepeatedOmKeyInfo(omKeyInfoCopy); + + reconOMMetadataManager.getDeletedTable() + .put("/sampleVol/bucketOne/key_one", repeatedOmKeyInfo1); + RepeatedOmKeyInfo repeatedOmKeyInfoCopy1 = + reconOMMetadataManager.getDeletedTable() + .get("/sampleVol/bucketOne/key_one"); + Assertions.assertEquals("key_one", + repeatedOmKeyInfo1.getOmKeyInfoList().get(0).getKeyName()); + + RepeatedOmKeyInfo repeatedOmKeyInfo2 = new RepeatedOmKeyInfo(omKeyInfo2); + RepeatedOmKeyInfo repeatedOmKeyInfo3 = new RepeatedOmKeyInfo(omKeyInfo2); + reconOMMetadataManager.getDeletedTable() + .put("/sampleVol/bucketOne/key_two", repeatedOmKeyInfo2); + reconOMMetadataManager.getDeletedTable() + .put("/sampleVol/bucketOne/key_three", repeatedOmKeyInfo3); + + Response deletedKeyInfo = omdbInsightEndpoint.getDeletedKeyInfo(2, ""); + KeyInsightInfoResponse keyInsightInfoResp = + (KeyInsightInfoResponse) deletedKeyInfo.getEntity(); + Assertions.assertNotNull(keyInsightInfoResp); + Assertions.assertEquals(2, + keyInsightInfoResp.getRepeatedOmKeyInfoList().size()); + Assertions.assertEquals("key_two", + keyInsightInfoResp.getRepeatedOmKeyInfoList().get(1).getOmKeyInfoList() + .get(0).getKeyName()); + } + + @Test + public void testGetDeletedKeyInfoPrevKeyParam() throws Exception { + OmKeyInfo omKeyInfo1 = getOmKeyInfo("sampleVol", "bucketOne", "key_one"); + OmKeyInfo omKeyInfo2 = getOmKeyInfo("sampleVol", "bucketOne", "key_two"); + OmKeyInfo omKeyInfo3 = getOmKeyInfo("sampleVol", "bucketOne", "key_three"); + + RepeatedOmKeyInfo repeatedOmKeyInfo1 = new RepeatedOmKeyInfo(omKeyInfo1); + RepeatedOmKeyInfo repeatedOmKeyInfo2 = new RepeatedOmKeyInfo(omKeyInfo2); + RepeatedOmKeyInfo repeatedOmKeyInfo3 = new RepeatedOmKeyInfo(omKeyInfo3); + + reconOMMetadataManager.getDeletedTable() + .put("/sampleVol/bucketOne/key_one", repeatedOmKeyInfo1); + reconOMMetadataManager.getDeletedTable() + .put("/sampleVol/bucketOne/key_two", repeatedOmKeyInfo2); + reconOMMetadataManager.getDeletedTable() + .put("/sampleVol/bucketOne/key_three", repeatedOmKeyInfo3); + + Response deletedKeyInfo = omdbInsightEndpoint.getDeletedKeyInfo(2, + "/sampleVol/bucketOne/key_one"); + KeyInsightInfoResponse keyInsightInfoResp = + (KeyInsightInfoResponse) deletedKeyInfo.getEntity(); + Assertions.assertNotNull(keyInsightInfoResp); + Assertions.assertEquals(2, + keyInsightInfoResp.getRepeatedOmKeyInfoList().size()); + + List pendingDeleteKeys = + keyInsightInfoResp.getRepeatedOmKeyInfoList().stream() + .map( + repeatedOmKeyInfo -> repeatedOmKeyInfo.getOmKeyInfoList().get(0) + .getKeyName()) + .collect(Collectors.toList()); + Assertions.assertFalse(pendingDeleteKeys.contains("key_one")); + } + @Test public void testGetDeletedKeyInfo() throws Exception { OmKeyInfo omKeyInfo = getOmKeyInfo("sampleVol", "bucketOne", "key_one"); @@ -245,8 +392,8 @@ public void testGetDeletedKeyInfo() throws Exception { Assertions.assertEquals("key_one", repeatedOmKeyInfo1.getOmKeyInfoList().get(0).getKeyName()); Response deletedKeyInfo = omdbInsightEndpoint.getDeletedKeyInfo(-1, ""); - KeyInsightInfoResp keyInsightInfoResp = - (KeyInsightInfoResp) deletedKeyInfo.getEntity(); + KeyInsightInfoResponse keyInsightInfoResp = + (KeyInsightInfoResponse) deletedKeyInfo.getEntity(); Assertions.assertNotNull(keyInsightInfoResp); Assertions.assertEquals("key_one", keyInsightInfoResp.getRepeatedOmKeyInfoList().get(0).getOmKeyInfoList() @@ -306,6 +453,86 @@ public void testGetDeletedContainerKeysInfo() throws Exception { assertEquals(1, keysResponseList.size()); } + @Test + public void testGetDeletedContainerKeysInfoLimitParam() throws Exception { + Map omContainers = + reconContainerMetadataManager.getContainers(-1, 0); + putContainerInfos(3); + List scmContainers = reconContainerManager.getContainers(); + assertEquals(omContainers.size(), scmContainers.size()); + // Update container state of Container Id 1 to CLOSING to CLOSED + // and then to DELETED + updateContainerStateToDeleted(1); + updateContainerStateToDeleted(2); + + Set containerIDs = containerStateManager + .getContainerIDs(HddsProtos.LifeCycleState.DELETED); + Assert.assertEquals(2, containerIDs.size()); + + List deletedSCMContainers = + reconContainerManager.getContainers(HddsProtos.LifeCycleState.DELETED); + assertEquals(2, deletedSCMContainers.size()); + + Response deletedContainerKeysInfo = + omdbInsightEndpoint.getDeletedContainerKeysInfo(1, ""); + assertNotNull(deletedContainerKeysInfo); + List keysResponseList = + (List) deletedContainerKeysInfo.getEntity(); + assertEquals(1, keysResponseList.get(0).getKeys().size()); + assertEquals(2, keysResponseList.get(0).getTotalCount()); + assertEquals(2, keysResponseList.size()); + } + + private void updateContainerStateToDeleted(long containerId) + throws IOException, InvalidStateTransitionException, TimeoutException { + reconContainerManager.updateContainerState(ContainerID.valueOf(containerId), + HddsProtos.LifeCycleEvent.FINALIZE); + reconContainerManager.updateContainerState(ContainerID.valueOf(containerId), + HddsProtos.LifeCycleEvent.CLOSE); + reconContainerManager + .updateContainerState(ContainerID.valueOf(containerId), + HddsProtos.LifeCycleEvent.DELETE); + reconContainerManager + .updateContainerState(ContainerID.valueOf(containerId), + HddsProtos.LifeCycleEvent.CLEANUP); + } + + @Test + public void testGetDeletedContainerKeysInfoPrevKeyParam() throws Exception { + Map omContainers = + reconContainerMetadataManager.getContainers(-1, 0); + putContainerInfos(3); + List scmContainers = reconContainerManager.getContainers(); + assertEquals(omContainers.size(), scmContainers.size()); + // Update container state of Container Id 1 to CLOSING to CLOSED + // and then to DELETED + updateContainerStateToDeleted(1); + updateContainerStateToDeleted(2); + + Set containerIDs = containerStateManager + .getContainerIDs(HddsProtos.LifeCycleState.DELETED); + Assert.assertEquals(2, containerIDs.size()); + + List deletedSCMContainers = + reconContainerManager.getContainers(HddsProtos.LifeCycleState.DELETED); + assertEquals(2, deletedSCMContainers.size()); + + Response deletedContainerKeysInfo = + omdbInsightEndpoint.getDeletedContainerKeysInfo(2, + "/sampleVol/bucketOne/key_one"); + assertNotNull(deletedContainerKeysInfo); + List keysResponseList = + (List) deletedContainerKeysInfo.getEntity(); + assertEquals(1, keysResponseList.get(0).getKeys().size()); + assertEquals(2, keysResponseList.get(0).getTotalCount()); + assertEquals(2, keysResponseList.size()); + List keyList = keysResponseList.get(0).getKeys().stream() + .map(keyMetadata -> keyMetadata.getKey()).collect( + Collectors.toList()); + assertEquals(1, keyList.size()); + assertEquals("key_two", keyList.get(0)); + } + ContainerInfo newContainerInfo(long containerId) { return new ContainerInfo.Builder() .setContainerID(containerId) From fb210c398340cd78d4fa4cd14af8587c91d5623a Mon Sep 17 00:00:00 2001 From: deveshsingh Date: Wed, 19 Apr 2023 11:54:55 +0530 Subject: [PATCH 12/21] HDDS-8214. Recon - OM DB Insights - Key Level Info. --- .../hadoop/ozone/recon/api/OMDBInsightEndpoint.java | 8 ++++---- .../hadoop/ozone/recon/api/TestOmDBInsightEndPoint.java | 5 +++-- 2 files changed, 7 insertions(+), 6 deletions(-) diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightEndpoint.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightEndpoint.java index 0efca4a2823..25ab1da3ad9 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightEndpoint.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightEndpoint.java @@ -65,7 +65,7 @@ * 5. Amount of data mapped to pending delete keys in legacy/OBS buckets and * pending delete files in FSO buckets. */ -@Path("/omdbinsight") +@Path("/keys") @Produces(MediaType.APPLICATION_JSON) @AdminOnly public class OMDBInsightEndpoint { @@ -137,7 +137,7 @@ public OMDBInsightEndpoint(OzoneStorageContainerManager reconSCM, * } */ @GET - @Path("/keys/open") + @Path("/open") public Response getOpenKeyInfo( @DefaultValue(DEFAULT_FETCH_COUNT) @QueryParam(RECON_QUERY_LIMIT) int limit, @@ -265,7 +265,7 @@ public Response getOpenKeyInfo( * } */ @GET - @Path("/keys/deletePending") + @Path("/deletePending") public Response getDeletedKeyInfo( @DefaultValue(DEFAULT_FETCH_COUNT) @QueryParam(RECON_QUERY_LIMIT) int limit, @@ -413,7 +413,7 @@ private void updateReplicatedAndUnReplicatedTotal( /** This method retrieves set of keys/files/dirs which are mapped to * containers in DELETED state in SCM. */ @GET - @Path("/containers/mismatch/keys") + @Path("/containers/mismatch") public Response getDeletedContainerKeysInfo( @DefaultValue(DEFAULT_FETCH_COUNT) @QueryParam(RECON_QUERY_LIMIT) int limit, diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOmDBInsightEndPoint.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOmDBInsightEndPoint.java index 9e10114aa86..b7dc219e4a5 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOmDBInsightEndPoint.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOmDBInsightEndPoint.java @@ -418,7 +418,8 @@ public void testGetDeletedContainerKeysInfo() throws Exception { reconContainerMetadataManager.getContainers(-1, 0); putContainerInfos(2); List scmContainers = reconContainerManager.getContainers(); - assertEquals(omContainers.size(), scmContainers.size()); + assertEquals(3, omContainers.size()); + assertEquals(2, scmContainers.size()); // Update container state of Container Id 1 to CLOSING to CLOSED // and then to DELETED reconContainerManager.updateContainerState(ContainerID.valueOf(1), @@ -449,7 +450,7 @@ public void testGetDeletedContainerKeysInfo() throws Exception { List keysResponseList = (List) deletedContainerKeysInfo.getEntity(); assertEquals(2, keysResponseList.get(0).getKeys().size()); - assertEquals(3, keysResponseList.get(0).getTotalCount()); + assertEquals(2, keysResponseList.get(0).getTotalCount()); assertEquals(1, keysResponseList.size()); } From eb23a87739547ad70f9bb5d1aa372fdff8917c1f Mon Sep 17 00:00:00 2001 From: deveshsingh Date: Wed, 19 Apr 2023 13:47:14 +0530 Subject: [PATCH 13/21] HDDS-8214. Recon - OM DB Insights - Key Level Info. --- .../ozone/recon/api/ContainerEndpoint.java | 92 ++++++--- .../ozone/recon/api/OMDBInsightEndpoint.java | 52 ----- .../recon/api/TestContainerEndpoint.java | 187 +++++++++++++++--- .../recon/api/TestOmDBInsightEndPoint.java | 165 ---------------- 4 files changed, 223 insertions(+), 273 deletions(-) diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/ContainerEndpoint.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/ContainerEndpoint.java index 8059bb4c4d6..1122e1ed3f9 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/ContainerEndpoint.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/ContainerEndpoint.java @@ -17,28 +17,8 @@ */ package org.apache.hadoop.ozone.recon.api; -import java.io.IOException; -import java.time.Instant; -import java.util.Collection; -import java.util.ArrayList; -import java.util.LinkedHashMap; -import java.util.List; -import java.util.Map; -import java.util.UUID; -import java.util.stream.Collectors; - -import javax.ws.rs.DefaultValue; -import javax.ws.rs.GET; -import javax.ws.rs.Path; -import javax.ws.rs.PathParam; -import javax.ws.rs.Produces; -import javax.ws.rs.QueryParam; -import javax.ws.rs.WebApplicationException; -import javax.ws.rs.core.MediaType; -import javax.ws.rs.core.Response; - -import javax.inject.Inject; import org.apache.commons.lang3.StringUtils; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.scm.container.ContainerID; import org.apache.hadoop.hdds.scm.container.ContainerInfo; import org.apache.hadoop.hdds.scm.server.OzoneStorageContainerManager; @@ -50,21 +30,41 @@ import org.apache.hadoop.ozone.recon.api.types.ContainerMetadata; import org.apache.hadoop.ozone.recon.api.types.ContainersResponse; import org.apache.hadoop.ozone.recon.api.types.KeyMetadata; +import org.apache.hadoop.ozone.recon.api.types.KeyMetadata.ContainerBlockMetadata; import org.apache.hadoop.ozone.recon.api.types.KeysResponse; import org.apache.hadoop.ozone.recon.api.types.MissingContainerMetadata; import org.apache.hadoop.ozone.recon.api.types.MissingContainersResponse; import org.apache.hadoop.ozone.recon.api.types.UnhealthyContainerMetadata; -import org.apache.hadoop.ozone.recon.api.types.UnhealthyContainersSummary; import org.apache.hadoop.ozone.recon.api.types.UnhealthyContainersResponse; -import org.apache.hadoop.ozone.recon.api.types.KeyMetadata.ContainerBlockMetadata; -import org.apache.hadoop.ozone.recon.persistence.ContainerHistory; +import org.apache.hadoop.ozone.recon.api.types.UnhealthyContainersSummary; import org.apache.hadoop.ozone.recon.persistence.ContainerHealthSchemaManager; +import org.apache.hadoop.ozone.recon.persistence.ContainerHistory; import org.apache.hadoop.ozone.recon.recovery.ReconOMMetadataManager; import org.apache.hadoop.ozone.recon.scm.ReconContainerManager; import org.apache.hadoop.ozone.recon.spi.ReconContainerMetadataManager; import org.hadoop.ozone.recon.schema.ContainerSchemaDefinition.UnHealthyContainerStates; import org.hadoop.ozone.recon.schema.tables.pojos.UnhealthyContainers; +import javax.inject.Inject; +import javax.ws.rs.DefaultValue; +import javax.ws.rs.GET; +import javax.ws.rs.Path; +import javax.ws.rs.PathParam; +import javax.ws.rs.Produces; +import javax.ws.rs.QueryParam; +import javax.ws.rs.WebApplicationException; +import javax.ws.rs.core.MediaType; +import javax.ws.rs.core.Response; +import java.io.IOException; +import java.time.Instant; +import java.util.ArrayList; +import java.util.Collection; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Map; +import java.util.UUID; +import java.util.stream.Collectors; + import static org.apache.hadoop.ozone.recon.ReconConstants.DEFAULT_BATCH_NUMBER; import static org.apache.hadoop.ozone.recon.ReconConstants.DEFAULT_FETCH_COUNT; import static org.apache.hadoop.ozone.recon.ReconConstants.PREV_CONTAINER_ID_DEFAULT_VALUE; @@ -400,4 +400,48 @@ private List getBlocks( private BucketLayout getBucketLayout() { return BucketLayout.DEFAULT; } + + /** This method retrieves set of keys/files/dirs which are mapped to + * containers in DELETED state in SCM. */ + @GET + @Path("/mismatch/keys") + public Response getDeletedContainerKeysInfo( + @DefaultValue(DEFAULT_FETCH_COUNT) @QueryParam(RECON_QUERY_LIMIT) + int limit, + @DefaultValue(StringUtils.EMPTY) @QueryParam(RECON_QUERY_PREVKEY) + String prevKeyPrefix) { + List keysResponseList = new ArrayList<>(); + try { + Map omContainers = + reconContainerMetadataManager.getContainers(-1, 0); + List deletedStateSCMContainers = + containerManager.getContainers(HddsProtos.LifeCycleState.DELETED); + List deletedStateSCMContainerIds = + deletedStateSCMContainers.stream() + .map(containerInfo -> containerInfo.getContainerID()).collect( + Collectors.toList()); + + List omContainerIdsMappedToDeletedSCMContainers = + omContainers.entrySet().stream() + .filter( + map -> deletedStateSCMContainerIds.contains(map.getKey())) + .map(map -> map.getKey()).collect(Collectors.toList()); + + omContainerIdsMappedToDeletedSCMContainers.forEach(containerId -> { + Response keysForContainer = getKeysForContainer(containerId, limit, + prevKeyPrefix); + KeysResponse keysResponse = (KeysResponse) keysForContainer.getEntity(); + keysResponseList.add(keysResponse); + }); + } catch (IOException ex) { + throw new WebApplicationException(ex, + Response.Status.INTERNAL_SERVER_ERROR); + } catch (IllegalArgumentException e) { + throw new WebApplicationException(e, Response.Status.BAD_REQUEST); + } catch (Exception ex) { + throw new WebApplicationException(ex, + Response.Status.INTERNAL_SERVER_ERROR); + } + return Response.ok(keysResponseList).build(); + } } diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightEndpoint.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightEndpoint.java index 25ab1da3ad9..230fe1f5193 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightEndpoint.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightEndpoint.java @@ -19,18 +19,14 @@ package org.apache.hadoop.ozone.recon.api; import org.apache.commons.lang3.StringUtils; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.scm.container.ContainerInfo; import org.apache.hadoop.hdds.scm.server.OzoneStorageContainerManager; import org.apache.hadoop.hdds.utils.db.Table; import org.apache.hadoop.hdds.utils.db.TableIterator; import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo; -import org.apache.hadoop.ozone.recon.api.types.ContainerMetadata; import org.apache.hadoop.ozone.recon.api.types.KeyEntityInfo; import org.apache.hadoop.ozone.recon.api.types.KeyInsightInfoResponse; -import org.apache.hadoop.ozone.recon.api.types.KeysResponse; import org.apache.hadoop.ozone.recon.recovery.ReconOMMetadataManager; import org.apache.hadoop.ozone.recon.scm.ReconContainerManager; import org.apache.hadoop.ozone.recon.spi.ReconContainerMetadataManager; @@ -45,11 +41,8 @@ import javax.ws.rs.core.MediaType; import javax.ws.rs.core.Response; import java.io.IOException; -import java.util.ArrayList; import java.util.Arrays; import java.util.List; -import java.util.Map; -import java.util.stream.Collectors; import static org.apache.hadoop.ozone.recon.ReconConstants.DEFAULT_FETCH_COUNT; import static org.apache.hadoop.ozone.recon.ReconConstants.RECON_QUERY_LIMIT; @@ -409,49 +402,4 @@ private void updateReplicatedAndUnReplicatedTotal( omKeyInfo.getReplicatedSize()); }); } - - /** This method retrieves set of keys/files/dirs which are mapped to - * containers in DELETED state in SCM. */ - @GET - @Path("/containers/mismatch") - public Response getDeletedContainerKeysInfo( - @DefaultValue(DEFAULT_FETCH_COUNT) @QueryParam(RECON_QUERY_LIMIT) - int limit, - @DefaultValue(StringUtils.EMPTY) @QueryParam(RECON_QUERY_PREVKEY) - String prevKeyPrefix) { - List keysResponseList = new ArrayList<>(); - try { - Map omContainers = - reconContainerMetadataManager.getContainers(-1, 0); - List deletedStateSCMContainers = - containerManager.getContainers(HddsProtos.LifeCycleState.DELETED); - List deletedStateSCMContainerIds = - deletedStateSCMContainers.stream() - .map(containerInfo -> containerInfo.getContainerID()).collect( - Collectors.toList()); - - List omContainerIdsMappedToDeletedSCMContainers = - omContainers.entrySet().stream() - .filter( - map -> deletedStateSCMContainerIds.contains(map.getKey())) - .map(map -> map.getKey()).collect(Collectors.toList()); - - omContainerIdsMappedToDeletedSCMContainers.forEach(containerId -> { - Response keysForContainer = - containerEndpoint.getKeysForContainer(containerId, limit, - prevKeyPrefix); - KeysResponse keysResponse = (KeysResponse) keysForContainer.getEntity(); - keysResponseList.add(keysResponse); - }); - } catch (IOException ex) { - throw new WebApplicationException(ex, - Response.Status.INTERNAL_SERVER_ERROR); - } catch (IllegalArgumentException e) { - throw new WebApplicationException(e, Response.Status.BAD_REQUEST); - } catch (Exception ex) { - throw new WebApplicationException(ex, - Response.Status.INTERNAL_SERVER_ERROR); - } - return Response.ok(keysResponseList).build(); - } } diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestContainerEndpoint.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestContainerEndpoint.java index c383c92589d..2a811e77505 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestContainerEndpoint.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestContainerEndpoint.java @@ -18,35 +18,6 @@ package org.apache.hadoop.ozone.recon.api; -import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.getOmKeyLocationInfo; -import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.getRandomPipeline; -import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.getTestReconOmMetadataManager; -import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.initializeNewOmMetadataManager; -import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.writeDataToOm; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNull; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collection; -import java.util.Collections; -import java.util.HashSet; -import java.util.Iterator; -import java.util.List; -import java.util.Map; -import java.util.Set; -import java.util.UUID; -import java.util.concurrent.TimeoutException; -import java.util.stream.Collectors; -import javax.ws.rs.WebApplicationException; -import javax.ws.rs.core.Response; - import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.hdds.client.BlockID; import org.apache.hadoop.hdds.client.RatisReplicationConfig; @@ -55,10 +26,13 @@ import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor; import org.apache.hadoop.hdds.scm.container.ContainerID; import org.apache.hadoop.hdds.scm.container.ContainerInfo; +import org.apache.hadoop.hdds.scm.container.ContainerStateManager; import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; import org.apache.hadoop.hdds.scm.pipeline.PipelineID; import org.apache.hadoop.hdds.scm.server.OzoneStorageContainerManager; +import org.apache.hadoop.hdds.utils.db.Table; +import org.apache.hadoop.ozone.common.statemachine.InvalidStateTransitionException; import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; @@ -72,8 +46,8 @@ import org.apache.hadoop.ozone.recon.api.types.MissingContainersResponse; import org.apache.hadoop.ozone.recon.api.types.UnhealthyContainerMetadata; import org.apache.hadoop.ozone.recon.api.types.UnhealthyContainersResponse; -import org.apache.hadoop.ozone.recon.persistence.ContainerHistory; import org.apache.hadoop.ozone.recon.persistence.ContainerHealthSchemaManager; +import org.apache.hadoop.ozone.recon.persistence.ContainerHistory; import org.apache.hadoop.ozone.recon.recovery.ReconOMMetadataManager; import org.apache.hadoop.ozone.recon.scm.ReconContainerManager; import org.apache.hadoop.ozone.recon.scm.ReconPipelineManager; @@ -83,7 +57,6 @@ import org.apache.hadoop.ozone.recon.spi.impl.OzoneManagerServiceProviderImpl; import org.apache.hadoop.ozone.recon.spi.impl.StorageContainerServiceProviderImpl; import org.apache.hadoop.ozone.recon.tasks.ContainerKeyMapperTask; -import org.apache.hadoop.hdds.utils.db.Table; import org.hadoop.ozone.recon.schema.ContainerSchemaDefinition.UnHealthyContainerStates; import org.hadoop.ozone.recon.schema.tables.pojos.UnhealthyContainers; import org.junit.Assert; @@ -92,6 +65,35 @@ import org.junit.Test; import org.junit.rules.TemporaryFolder; +import javax.ws.rs.WebApplicationException; +import javax.ws.rs.core.Response; +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.HashSet; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.UUID; +import java.util.concurrent.TimeoutException; +import java.util.stream.Collectors; + +import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.getOmKeyLocationInfo; +import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.getRandomPipeline; +import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.getTestReconOmMetadataManager; +import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.initializeNewOmMetadataManager; +import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.writeDataToOm; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + /** * Test for container endpoint. */ @@ -106,6 +108,7 @@ public class TestContainerEndpoint { private ReconContainerMetadataManager reconContainerMetadataManager; private ContainerEndpoint containerEndpoint; private boolean isSetupDone = false; + private ContainerStateManager containerStateManager; private ContainerHealthSchemaManager containerHealthSchemaManager; private ReconOMMetadataManager reconOMMetadataManager; private ContainerID containerID = ContainerID.valueOf(1L); @@ -150,7 +153,7 @@ private void initializeInjector() throws Exception { containerEndpoint = reconTestInjector.getInstance(ContainerEndpoint.class); containerHealthSchemaManager = reconTestInjector.getInstance(ContainerHealthSchemaManager.class); - + containerStateManager = reconContainerManager.getContainerStateManager(); pipeline = getRandomPipeline(); pipelineID = pipeline.getId(); reconPipelineManager.addPipeline(pipeline); @@ -762,4 +765,124 @@ private void createUnhealthyRecord(int id, String state, int expected, private BucketLayout getBucketLayout() { return BucketLayout.DEFAULT; } + + private void updateContainerStateToDeleted(long containerId) + throws IOException, InvalidStateTransitionException, TimeoutException { + reconContainerManager.updateContainerState(ContainerID.valueOf(containerId), + HddsProtos.LifeCycleEvent.FINALIZE); + reconContainerManager.updateContainerState(ContainerID.valueOf(containerId), + HddsProtos.LifeCycleEvent.CLOSE); + reconContainerManager + .updateContainerState(ContainerID.valueOf(containerId), + HddsProtos.LifeCycleEvent.DELETE); + reconContainerManager + .updateContainerState(ContainerID.valueOf(containerId), + HddsProtos.LifeCycleEvent.CLEANUP); + } + + @Test + public void testGetDeletedContainerKeysInfo() throws Exception { + Map omContainers = + reconContainerMetadataManager.getContainers(-1, 0); + putContainerInfos(2); + List scmContainers = reconContainerManager.getContainers(); + assertEquals(2, omContainers.size()); + assertEquals(2, scmContainers.size()); + // Update container state of Container Id 1 to CLOSING to CLOSED + // and then to DELETED + reconContainerManager.updateContainerState(ContainerID.valueOf(1), + HddsProtos.LifeCycleEvent.FINALIZE); + reconContainerManager.updateContainerState(ContainerID.valueOf(1), + HddsProtos.LifeCycleEvent.CLOSE); + reconContainerManager + .updateContainerState(ContainerID.valueOf(1), + HddsProtos.LifeCycleEvent.DELETE); + Set containerIDs = containerStateManager + .getContainerIDs(HddsProtos.LifeCycleState.DELETING); + Assert.assertEquals(1, containerIDs.size()); + + reconContainerManager + .updateContainerState(ContainerID.valueOf(1), + HddsProtos.LifeCycleEvent.CLEANUP); + containerIDs = containerStateManager + .getContainerIDs(HddsProtos.LifeCycleState.DELETED); + Assert.assertEquals(1, containerIDs.size()); + + List deletedSCMContainers = + reconContainerManager.getContainers(HddsProtos.LifeCycleState.DELETED); + assertEquals(1, deletedSCMContainers.size()); + + Response deletedContainerKeysInfo = + containerEndpoint.getDeletedContainerKeysInfo(-1, ""); + assertNotNull(deletedContainerKeysInfo); + List keysResponseList = + (List) deletedContainerKeysInfo.getEntity(); + assertEquals(2, keysResponseList.get(0).getKeys().size()); + assertEquals(3, keysResponseList.get(0).getTotalCount()); + assertEquals(1, keysResponseList.size()); + } + + @Test + public void testGetDeletedContainerKeysInfoLimitParam() throws Exception { + Map omContainers = + reconContainerMetadataManager.getContainers(-1, 0); + putContainerInfos(2); + List scmContainers = reconContainerManager.getContainers(); + assertEquals(omContainers.size(), scmContainers.size()); + // Update container state of Container Id 1 to CLOSING to CLOSED + // and then to DELETED + updateContainerStateToDeleted(1); + + Set containerIDs = containerStateManager + .getContainerIDs(HddsProtos.LifeCycleState.DELETED); + Assert.assertEquals(1, containerIDs.size()); + + List deletedSCMContainers = + reconContainerManager.getContainers(HddsProtos.LifeCycleState.DELETED); + assertEquals(1, deletedSCMContainers.size()); + + Response deletedContainerKeysInfo = + containerEndpoint.getDeletedContainerKeysInfo(1, ""); + assertNotNull(deletedContainerKeysInfo); + List keysResponseList = + (List) deletedContainerKeysInfo.getEntity(); + assertEquals(1, keysResponseList.get(0).getKeys().size()); + assertEquals(3, keysResponseList.get(0).getTotalCount()); + assertEquals(1, keysResponseList.size()); + } + + @Test + public void testGetDeletedContainerKeysInfoPrevKeyParam() throws Exception { + Map omContainers = + reconContainerMetadataManager.getContainers(-1, 0); + putContainerInfos(2); + List scmContainers = reconContainerManager.getContainers(); + assertEquals(omContainers.size(), scmContainers.size()); + // Update container state of Container Id 1 to CLOSING to CLOSED + // and then to DELETED + updateContainerStateToDeleted(1); + + Set containerIDs = containerStateManager + .getContainerIDs(HddsProtos.LifeCycleState.DELETED); + Assert.assertEquals(1, containerIDs.size()); + + List deletedSCMContainers = + reconContainerManager.getContainers(HddsProtos.LifeCycleState.DELETED); + assertEquals(1, deletedSCMContainers.size()); + + Response deletedContainerKeysInfo = + containerEndpoint.getDeletedContainerKeysInfo(2, + "/sampleVol/bucketOne/key_one"); + assertNotNull(deletedContainerKeysInfo); + List keysResponseList = + (List) deletedContainerKeysInfo.getEntity(); + assertEquals(1, keysResponseList.get(0).getKeys().size()); + assertEquals(3, keysResponseList.get(0).getTotalCount()); + assertEquals(1, keysResponseList.size()); + List keyList = keysResponseList.get(0).getKeys().stream() + .map(keyMetadata -> keyMetadata.getKey()).collect( + Collectors.toList()); + assertEquals(1, keyList.size()); + assertEquals("key_two", keyList.get(0)); + } } diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOmDBInsightEndPoint.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOmDBInsightEndPoint.java index b7dc219e4a5..635f377b846 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOmDBInsightEndPoint.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOmDBInsightEndPoint.java @@ -19,18 +19,11 @@ package org.apache.hadoop.ozone.recon.api; import org.apache.hadoop.hdds.client.BlockID; -import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.hdds.client.StandaloneReplicationConfig; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.scm.container.ContainerID; -import org.apache.hadoop.hdds.scm.container.ContainerInfo; -import org.apache.hadoop.hdds.scm.container.ContainerStateManager; -import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; -import org.apache.hadoop.hdds.scm.pipeline.PipelineID; import org.apache.hadoop.hdds.scm.server.OzoneStorageContainerManager; import org.apache.hadoop.hdds.utils.db.Table; -import org.apache.hadoop.ozone.common.statemachine.InvalidStateTransitionException; import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; @@ -38,9 +31,7 @@ import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup; import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo; import org.apache.hadoop.ozone.recon.ReconTestInjector; -import org.apache.hadoop.ozone.recon.api.types.ContainerMetadata; import org.apache.hadoop.ozone.recon.api.types.KeyInsightInfoResponse; -import org.apache.hadoop.ozone.recon.api.types.KeysResponse; import org.apache.hadoop.ozone.recon.persistence.ContainerHealthSchemaManager; import org.apache.hadoop.ozone.recon.recovery.ReconOMMetadataManager; import org.apache.hadoop.ozone.recon.scm.ReconContainerManager; @@ -51,7 +42,6 @@ import org.apache.hadoop.ozone.recon.spi.impl.OzoneManagerServiceProviderImpl; import org.apache.hadoop.ozone.recon.spi.impl.StorageContainerServiceProviderImpl; import org.apache.hadoop.ozone.recon.tasks.ContainerKeyMapperTask; -import org.junit.Assert; import org.junit.Before; import org.junit.Rule; import org.junit.Test; @@ -59,14 +49,10 @@ import org.junit.rules.TemporaryFolder; import javax.ws.rs.core.Response; -import java.io.IOException; import java.util.ArrayList; import java.util.Collections; import java.util.List; -import java.util.Map; import java.util.Random; -import java.util.Set; -import java.util.concurrent.TimeoutException; import java.util.stream.Collectors; import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.getBucketLayout; @@ -75,8 +61,6 @@ import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.getTestReconOmMetadataManager; import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.initializeNewOmMetadataManager; import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.writeDataToOm; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNotNull; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -91,14 +75,11 @@ public class TestOmDBInsightEndPoint { private ReconContainerMetadataManager reconContainerMetadataManager; private OMMetadataManager omMetadataManager; private ReconContainerManager reconContainerManager; - private ContainerStateManager containerStateManager; private ReconPipelineManager reconPipelineManager; private ReconOMMetadataManager reconOMMetadataManager; private OMDBInsightEndpoint omdbInsightEndpoint; private Pipeline pipeline; - private PipelineID pipelineID; private Random random = new Random(); - private long keyCount = 5L; @Before public void setUp() throws Exception { @@ -129,12 +110,9 @@ public void setUp() throws Exception { reconTestInjector.getInstance(OzoneStorageContainerManager.class); reconContainerManager = (ReconContainerManager) ozoneStorageContainerManager.getContainerManager(); - containerStateManager = reconContainerManager - .getContainerStateManager(); reconPipelineManager = (ReconPipelineManager) ozoneStorageContainerManager.getPipelineManager(); pipeline = getRandomPipeline(); - pipelineID = pipeline.getId(); reconPipelineManager.addPipeline(pipeline); setUpOmData(); } @@ -411,147 +389,4 @@ private OmKeyInfo getOmKeyInfo(String volumeName, String bucketName, .setDataSize(random.nextLong()) .build(); } - - @Test - public void testGetDeletedContainerKeysInfo() throws Exception { - Map omContainers = - reconContainerMetadataManager.getContainers(-1, 0); - putContainerInfos(2); - List scmContainers = reconContainerManager.getContainers(); - assertEquals(3, omContainers.size()); - assertEquals(2, scmContainers.size()); - // Update container state of Container Id 1 to CLOSING to CLOSED - // and then to DELETED - reconContainerManager.updateContainerState(ContainerID.valueOf(1), - HddsProtos.LifeCycleEvent.FINALIZE); - reconContainerManager.updateContainerState(ContainerID.valueOf(1), - HddsProtos.LifeCycleEvent.CLOSE); - reconContainerManager - .updateContainerState(ContainerID.valueOf(1), - HddsProtos.LifeCycleEvent.DELETE); - Set containerIDs = containerStateManager - .getContainerIDs(HddsProtos.LifeCycleState.DELETING); - Assert.assertEquals(1, containerIDs.size()); - - reconContainerManager - .updateContainerState(ContainerID.valueOf(1), - HddsProtos.LifeCycleEvent.CLEANUP); - containerIDs = containerStateManager - .getContainerIDs(HddsProtos.LifeCycleState.DELETED); - Assert.assertEquals(1, containerIDs.size()); - - List deletedSCMContainers = - reconContainerManager.getContainers(HddsProtos.LifeCycleState.DELETED); - assertEquals(1, deletedSCMContainers.size()); - - Response deletedContainerKeysInfo = - omdbInsightEndpoint.getDeletedContainerKeysInfo(-1, ""); - assertNotNull(deletedContainerKeysInfo); - List keysResponseList = - (List) deletedContainerKeysInfo.getEntity(); - assertEquals(2, keysResponseList.get(0).getKeys().size()); - assertEquals(2, keysResponseList.get(0).getTotalCount()); - assertEquals(1, keysResponseList.size()); - } - - @Test - public void testGetDeletedContainerKeysInfoLimitParam() throws Exception { - Map omContainers = - reconContainerMetadataManager.getContainers(-1, 0); - putContainerInfos(3); - List scmContainers = reconContainerManager.getContainers(); - assertEquals(omContainers.size(), scmContainers.size()); - // Update container state of Container Id 1 to CLOSING to CLOSED - // and then to DELETED - updateContainerStateToDeleted(1); - updateContainerStateToDeleted(2); - - Set containerIDs = containerStateManager - .getContainerIDs(HddsProtos.LifeCycleState.DELETED); - Assert.assertEquals(2, containerIDs.size()); - - List deletedSCMContainers = - reconContainerManager.getContainers(HddsProtos.LifeCycleState.DELETED); - assertEquals(2, deletedSCMContainers.size()); - - Response deletedContainerKeysInfo = - omdbInsightEndpoint.getDeletedContainerKeysInfo(1, ""); - assertNotNull(deletedContainerKeysInfo); - List keysResponseList = - (List) deletedContainerKeysInfo.getEntity(); - assertEquals(1, keysResponseList.get(0).getKeys().size()); - assertEquals(2, keysResponseList.get(0).getTotalCount()); - assertEquals(2, keysResponseList.size()); - } - - private void updateContainerStateToDeleted(long containerId) - throws IOException, InvalidStateTransitionException, TimeoutException { - reconContainerManager.updateContainerState(ContainerID.valueOf(containerId), - HddsProtos.LifeCycleEvent.FINALIZE); - reconContainerManager.updateContainerState(ContainerID.valueOf(containerId), - HddsProtos.LifeCycleEvent.CLOSE); - reconContainerManager - .updateContainerState(ContainerID.valueOf(containerId), - HddsProtos.LifeCycleEvent.DELETE); - reconContainerManager - .updateContainerState(ContainerID.valueOf(containerId), - HddsProtos.LifeCycleEvent.CLEANUP); - } - - @Test - public void testGetDeletedContainerKeysInfoPrevKeyParam() throws Exception { - Map omContainers = - reconContainerMetadataManager.getContainers(-1, 0); - putContainerInfos(3); - List scmContainers = reconContainerManager.getContainers(); - assertEquals(omContainers.size(), scmContainers.size()); - // Update container state of Container Id 1 to CLOSING to CLOSED - // and then to DELETED - updateContainerStateToDeleted(1); - updateContainerStateToDeleted(2); - - Set containerIDs = containerStateManager - .getContainerIDs(HddsProtos.LifeCycleState.DELETED); - Assert.assertEquals(2, containerIDs.size()); - - List deletedSCMContainers = - reconContainerManager.getContainers(HddsProtos.LifeCycleState.DELETED); - assertEquals(2, deletedSCMContainers.size()); - - Response deletedContainerKeysInfo = - omdbInsightEndpoint.getDeletedContainerKeysInfo(2, - "/sampleVol/bucketOne/key_one"); - assertNotNull(deletedContainerKeysInfo); - List keysResponseList = - (List) deletedContainerKeysInfo.getEntity(); - assertEquals(1, keysResponseList.get(0).getKeys().size()); - assertEquals(2, keysResponseList.get(0).getTotalCount()); - assertEquals(2, keysResponseList.size()); - List keyList = keysResponseList.get(0).getKeys().stream() - .map(keyMetadata -> keyMetadata.getKey()).collect( - Collectors.toList()); - assertEquals(1, keyList.size()); - assertEquals("key_two", keyList.get(0)); - } - - ContainerInfo newContainerInfo(long containerId) { - return new ContainerInfo.Builder() - .setContainerID(containerId) - .setReplicationConfig( - RatisReplicationConfig.getInstance( - HddsProtos.ReplicationFactor.THREE)) - .setState(HddsProtos.LifeCycleState.OPEN) - .setOwner("owner1") - .setNumberOfKeys(keyCount) - .setPipelineID(pipelineID) - .build(); - } - - void putContainerInfos(int num) throws IOException, TimeoutException { - for (int i = 1; i <= num; i++) { - final ContainerInfo info = newContainerInfo(i); - reconContainerManager.addNewContainer( - new ContainerWithPipeline(info, pipeline)); - } - } } From 72f6c40de45c815f235ed9757735d24f5cf8ab02 Mon Sep 17 00:00:00 2001 From: deveshsingh Date: Wed, 19 Apr 2023 15:58:48 +0530 Subject: [PATCH 14/21] HDDS-8214. Recon - OM DB Insights - Key Level Info. --- .../apache/hadoop/ozone/recon/api/TestOmDBInsightEndPoint.java | 3 --- 1 file changed, 3 deletions(-) diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOmDBInsightEndPoint.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOmDBInsightEndPoint.java index 635f377b846..19f08e71f01 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOmDBInsightEndPoint.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOmDBInsightEndPoint.java @@ -74,7 +74,6 @@ public class TestOmDBInsightEndPoint { private OzoneStorageContainerManager ozoneStorageContainerManager; private ReconContainerMetadataManager reconContainerMetadataManager; private OMMetadataManager omMetadataManager; - private ReconContainerManager reconContainerManager; private ReconPipelineManager reconPipelineManager; private ReconOMMetadataManager reconOMMetadataManager; private OMDBInsightEndpoint omdbInsightEndpoint; @@ -108,8 +107,6 @@ public void setUp() throws Exception { OMDBInsightEndpoint.class); ozoneStorageContainerManager = reconTestInjector.getInstance(OzoneStorageContainerManager.class); - reconContainerManager = (ReconContainerManager) - ozoneStorageContainerManager.getContainerManager(); reconPipelineManager = (ReconPipelineManager) ozoneStorageContainerManager.getPipelineManager(); pipeline = getRandomPipeline(); From 18abcb909be4d07e2d4a8b873fc5e6133dfe2c7d Mon Sep 17 00:00:00 2001 From: deveshsingh Date: Wed, 19 Apr 2023 16:00:00 +0530 Subject: [PATCH 15/21] HDDS-8214. Recon - OM DB Insights - Key Level Info. --- .../apache/hadoop/ozone/recon/api/TestOmDBInsightEndPoint.java | 1 - 1 file changed, 1 deletion(-) diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOmDBInsightEndPoint.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOmDBInsightEndPoint.java index 19f08e71f01..91f11eb53ef 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOmDBInsightEndPoint.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOmDBInsightEndPoint.java @@ -34,7 +34,6 @@ import org.apache.hadoop.ozone.recon.api.types.KeyInsightInfoResponse; import org.apache.hadoop.ozone.recon.persistence.ContainerHealthSchemaManager; import org.apache.hadoop.ozone.recon.recovery.ReconOMMetadataManager; -import org.apache.hadoop.ozone.recon.scm.ReconContainerManager; import org.apache.hadoop.ozone.recon.scm.ReconPipelineManager; import org.apache.hadoop.ozone.recon.scm.ReconStorageContainerManagerFacade; import org.apache.hadoop.ozone.recon.spi.ReconContainerMetadataManager; From 410b8466ddd6dc084d8fb97c319e0bed741e9bc3 Mon Sep 17 00:00:00 2001 From: deveshsingh Date: Wed, 19 Apr 2023 18:11:39 +0530 Subject: [PATCH 16/21] HDDS-8214. Recon - OM DB Insights - Key Level Info. --- .../apache/hadoop/ozone/recon/api/TestOmDBInsightEndPoint.java | 3 --- 1 file changed, 3 deletions(-) diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOmDBInsightEndPoint.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOmDBInsightEndPoint.java index 91f11eb53ef..9fd2b09375b 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOmDBInsightEndPoint.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOmDBInsightEndPoint.java @@ -290,9 +290,6 @@ public void testGetDeletedKeyInfoLimitParam() throws Exception { reconOMMetadataManager.getDeletedTable() .put("/sampleVol/bucketOne/key_one", repeatedOmKeyInfo1); - RepeatedOmKeyInfo repeatedOmKeyInfoCopy1 = - reconOMMetadataManager.getDeletedTable() - .get("/sampleVol/bucketOne/key_one"); Assertions.assertEquals("key_one", repeatedOmKeyInfo1.getOmKeyInfoList().get(0).getKeyName()); From 25171b526215a1c9a3f9dbb3ecb6e0d007e372bd Mon Sep 17 00:00:00 2001 From: deveshsingh Date: Thu, 11 May 2023 14:43:26 +0530 Subject: [PATCH 17/21] HDDS-8214. Recon - OM DB Insights - Key Level Info. --- .../ozone/recon/api/OMDBInsightEndpoint.java | 41 +++++++++++-------- .../api/types/KeyInsightInfoResponse.java | 13 ++++++ 2 files changed, 38 insertions(+), 16 deletions(-) diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightEndpoint.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightEndpoint.java index 230fe1f5193..5176c8968a7 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightEndpoint.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightEndpoint.java @@ -135,13 +135,14 @@ public Response getOpenKeyInfo( @DefaultValue(DEFAULT_FETCH_COUNT) @QueryParam(RECON_QUERY_LIMIT) int limit, @DefaultValue(StringUtils.EMPTY) @QueryParam(RECON_QUERY_PREVKEY) - String prevKeyPrefix) { + String prevKey) { KeyInsightInfoResponse openKeyInsightInfo = new KeyInsightInfoResponse(); List nonFSOKeyInfoList = openKeyInsightInfo.getNonFSOKeyInfoList(); boolean skipPrevKeyDone = false; boolean isLegacyBucketLayout = true; boolean recordsFetchedLimitReached = false; + String lastKey = ""; List fsoKeyInfoList = openKeyInsightInfo.getFsoKeyInfoList(); for (BucketLayout layout : Arrays.asList(BucketLayout.LEGACY, BucketLayout.FILE_SYSTEM_OPTIMIZED)) { @@ -152,8 +153,8 @@ public Response getOpenKeyInfo( TableIterator> keyIter = openKeyTable.iterator()) { boolean skipPrevKey = false; - String seekKey = prevKeyPrefix; - if (!skipPrevKeyDone && StringUtils.isNotBlank(prevKeyPrefix)) { + String seekKey = prevKey; + if (!skipPrevKeyDone && StringUtils.isNotBlank(prevKey)) { skipPrevKey = true; Table.KeyValue seekKeyValue = keyIter.seek(seekKey); @@ -161,17 +162,18 @@ public Response getOpenKeyInfo( // if not, then return empty result // In case of an empty prevKeyPrefix, all the keys are returned if (seekKeyValue == null || - (StringUtils.isNotBlank(prevKeyPrefix) && - !seekKeyValue.getKey().equals(prevKeyPrefix))) { - return Response.ok(openKeyInsightInfo).build(); + (StringUtils.isNotBlank(prevKey) && + !seekKeyValue.getKey().equals(prevKey))) { + continue; } } while (keyIter.hasNext()) { Table.KeyValue kv = keyIter.next(); String key = kv.getKey(); + lastKey = key; OmKeyInfo omKeyInfo = kv.getValue(); // skip the prev key if prev key is present - if (skipPrevKey && key.equals(prevKeyPrefix)) { + if (skipPrevKey && key.equals(prevKey)) { skipPrevKeyDone = true; continue; } @@ -209,6 +211,7 @@ public Response getOpenKeyInfo( break; } } + openKeyInsightInfo.setLastKey(lastKey); return Response.ok(openKeyInsightInfo).build(); } @@ -263,12 +266,12 @@ public Response getDeletedKeyInfo( @DefaultValue(DEFAULT_FETCH_COUNT) @QueryParam(RECON_QUERY_LIMIT) int limit, @DefaultValue(StringUtils.EMPTY) @QueryParam(RECON_QUERY_PREVKEY) - String prevKeyPrefix) { + String prevKey) { KeyInsightInfoResponse deletedKeyAndDirInsightInfo = new KeyInsightInfoResponse(); - getPendingForDeletionKeyInfo(limit, prevKeyPrefix, + getPendingForDeletionKeyInfo(limit, prevKey, deletedKeyAndDirInsightInfo); - getPendingForDeletionDirInfo(limit, prevKeyPrefix, + getPendingForDeletionDirInfo(limit, prevKey, deletedKeyAndDirInsightInfo); return Response.ok(deletedKeyAndDirInsightInfo).build(); } @@ -287,6 +290,7 @@ private void getPendingForDeletionDirInfo( keyIter = deletedDirTable.iterator()) { boolean skipPrevKey = false; String seekKey = prevKeyPrefix; + String lastKey = ""; if (StringUtils.isNotBlank(prevKeyPrefix)) { skipPrevKey = true; Table.KeyValue seekKeyValue = @@ -303,6 +307,7 @@ private void getPendingForDeletionDirInfo( while (keyIter.hasNext()) { Table.KeyValue kv = keyIter.next(); String key = kv.getKey(); + lastKey = key; OmKeyInfo omKeyInfo = kv.getValue(); // skip the prev key if prev key is present if (skipPrevKey && key.equals(prevKeyPrefix)) { @@ -326,6 +331,7 @@ private void getPendingForDeletionDirInfo( break; } } + pendingForDeletionKeyInfo.setLastKey(lastKey); } catch (IOException ex) { throw new WebApplicationException(ex, Response.Status.INTERNAL_SERVER_ERROR); @@ -339,7 +345,7 @@ private void getPendingForDeletionDirInfo( private void getPendingForDeletionKeyInfo( int limit, - String prevKeyPrefix, + String prevKey, KeyInsightInfoResponse deletedKeyAndDirInsightInfo) { List repeatedOmKeyInfoList = deletedKeyAndDirInsightInfo.getRepeatedOmKeyInfoList(); @@ -350,8 +356,9 @@ private void getPendingForDeletionKeyInfo( RepeatedOmKeyInfo>> keyIter = deletedTable.iterator()) { boolean skipPrevKey = false; - String seekKey = prevKeyPrefix; - if (StringUtils.isNotBlank(prevKeyPrefix)) { + String seekKey = prevKey; + String lastKey = ""; + if (StringUtils.isNotBlank(prevKey)) { skipPrevKey = true; Table.KeyValue seekKeyValue = keyIter.seek(seekKey); @@ -359,17 +366,18 @@ private void getPendingForDeletionKeyInfo( // if not, then return empty result // In case of an empty prevKeyPrefix, all the keys are returned if (seekKeyValue == null || - (StringUtils.isNotBlank(prevKeyPrefix) && - !seekKeyValue.getKey().equals(prevKeyPrefix))) { + (StringUtils.isNotBlank(prevKey) && + !seekKeyValue.getKey().equals(prevKey))) { return; } } while (keyIter.hasNext()) { Table.KeyValue kv = keyIter.next(); String key = kv.getKey(); + lastKey = key; RepeatedOmKeyInfo repeatedOmKeyInfo = kv.getValue(); // skip the prev key if prev key is present - if (skipPrevKey && key.equals(prevKeyPrefix)) { + if (skipPrevKey && key.equals(prevKey)) { continue; } updateReplicatedAndUnReplicatedTotal(deletedKeyAndDirInsightInfo, @@ -379,6 +387,7 @@ private void getPendingForDeletionKeyInfo( break; } } + deletedKeyAndDirInsightInfo.setLastKey(lastKey); } catch (IOException ex) { throw new WebApplicationException(ex, Response.Status.INTERNAL_SERVER_ERROR); diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/KeyInsightInfoResponse.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/KeyInsightInfoResponse.java index d4ffb6a1f5e..18da6b438e6 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/KeyInsightInfoResponse.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/KeyInsightInfoResponse.java @@ -30,6 +30,10 @@ */ public class KeyInsightInfoResponse { + /** last key sent. */ + @JsonProperty("lastKey") + private String lastKey; + /** Amount of data mapped to all keys and files in * a cluster across all DNs. */ @JsonProperty("replicatedTotal") @@ -64,6 +68,7 @@ public class KeyInsightInfoResponse { public KeyInsightInfoResponse() { responseCode = ResponseStatus.OK; + lastKey = ""; replicatedTotal = 0L; unreplicatedTotal = 0L; nonFSOKeyInfoList = new ArrayList<>(); @@ -72,6 +77,14 @@ public KeyInsightInfoResponse() { deletedDirInfoList = new ArrayList<>(); } + public String getLastKey() { + return lastKey; + } + + public void setLastKey(String lastKey) { + this.lastKey = lastKey; + } + public long getReplicatedTotal() { return replicatedTotal; } From 90c5a156bccd7630cd9e60559211f3f33e366fd3 Mon Sep 17 00:00:00 2001 From: deveshsingh Date: Mon, 15 May 2023 13:09:07 +0530 Subject: [PATCH 18/21] HDDS-8214. Recon - OM DB Insights - Key Level Info. --- .../apache/hadoop/ozone/recon/api/OMDBInsightEndpoint.java | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightEndpoint.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightEndpoint.java index 1b68534a9fd..a507778500e 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightEndpoint.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightEndpoint.java @@ -282,6 +282,8 @@ private void getPendingForDeletionDirInfo( List deletedDirInfoList = pendingForDeletionKeyInfo.getDeletedDirInfoList(); + List repeatedOmKeyInfoList = + pendingForDeletionKeyInfo.getRepeatedOmKeyInfoList(); Table deletedDirTable = omMetadataManager.getDeletedDirTable(); @@ -327,7 +329,7 @@ private void getPendingForDeletionDirInfo( pendingForDeletionKeyInfo.getReplicatedTotal() + keyEntityInfo.getReplicatedSize()); deletedDirInfoList.add(keyEntityInfo); - if (deletedDirInfoList.size() == limit) { + if (deletedDirInfoList.size() + repeatedOmKeyInfoList.size() == limit) { break; } } From 6135735dace0ccd1a6b1ee74c65edebc318391d5 Mon Sep 17 00:00:00 2001 From: deveshsingh Date: Mon, 15 May 2023 14:28:23 +0530 Subject: [PATCH 19/21] HDDS-8214. Recon - OM DB Insights - Key Level Info. --- .../ozone/recon/api/ContainerEndpoint.java | 95 ---------------- .../recon/api/TestContainerEndpoint.java | 106 ------------------ 2 files changed, 201 deletions(-) diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/ContainerEndpoint.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/ContainerEndpoint.java index 289a143bbe5..6ad01af5ce6 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/ContainerEndpoint.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/ContainerEndpoint.java @@ -566,99 +566,4 @@ public Response getContainerMisMatchInsights() { } return Response.ok(containerDiscrepancyInfoList).build(); } - - /** This API retrieves set of keys/files/dirs which are mapped to - * containers in DELETED state in SCM. - * limit - limits the number of keys per container. - * API Response: - * [ - * { - * "totalCount": 3, - * "keys": [ - * { - * "volume": "sampleVol", - * "bucket": "bucketOne", - * "key": "key_one", - * "dataSize": 0, - * "versions": [ - * 0 - * ], - * "blockIds": { - * "0": [ - * { - * "containerID": 1, - * "localID": 101 - * } - * ] - * } - * }, - * { - * "volume": "sampleVol", - * "bucket": "bucketOne", - * "key": "key_two", - * "dataSize": 0, - * "versions": [ - * 0, - * 1 - * ], - * "blockIds": { - * "0": [ - * { - * "containerID": 1, - * "localID": 103 - * } - * ], - * "1": [ - * { - * "containerID": 1, - * "localID": 104 - * } - * ] - * } - * } - * ] - * } - * ] - */ - @GET - @Path("/mismatch/keys") - public Response getDeletedContainerKeysInfo( - @DefaultValue(DEFAULT_FETCH_COUNT) @QueryParam(RECON_QUERY_LIMIT) - int limit, - @DefaultValue(StringUtils.EMPTY) @QueryParam(RECON_QUERY_PREVKEY) - String prevKeyPrefix) { - List keysResponseList = new ArrayList<>(); - try { - Map omContainers = - reconContainerMetadataManager.getContainers(-1, 0); - List deletedStateSCMContainers = - containerManager.getContainers(HddsProtos.LifeCycleState.DELETED); - List deletedStateSCMContainerIds = - deletedStateSCMContainers.stream() - .map(containerInfo -> containerInfo.getContainerID()).collect( - Collectors.toList()); - - List omContainerIdsMappedToDeletedSCMContainers = - omContainers.entrySet().stream() - .filter( - map -> deletedStateSCMContainerIds.contains(map.getKey())) - .map(map -> map.getKey()).collect(Collectors.toList()); - - omContainerIdsMappedToDeletedSCMContainers.forEach(containerId -> { - Response keysForContainer = getKeysForContainer(containerId, limit, - prevKeyPrefix); - KeysResponse keysResponse = (KeysResponse) keysForContainer.getEntity(); - keysResponseList.add(keysResponse); - }); - } catch (IOException ex) { - throw new WebApplicationException(ex, - Response.Status.INTERNAL_SERVER_ERROR); - } catch (IllegalArgumentException e) { - throw new WebApplicationException(e, Response.Status.BAD_REQUEST); - } catch (Exception ex) { - throw new WebApplicationException(ex, - Response.Status.INTERNAL_SERVER_ERROR); - } - return Response.ok(keysResponseList).build(); - } } diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestContainerEndpoint.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestContainerEndpoint.java index af0673690c2..5f55570ff2d 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestContainerEndpoint.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestContainerEndpoint.java @@ -1236,110 +1236,4 @@ public void testGetContainerInsightsNonOMContainers() assertEquals(1, containerDiscrepancyInfoList.size()); assertEquals("SCM", containerDiscrepancyInfo.getExistsAt()); } - - @Test - public void testGetDeletedContainerKeysInfo() throws Exception { - Map omContainers = - reconContainerMetadataManager.getContainers(-1, 0); - putContainerInfos(2); - List scmContainers = reconContainerManager.getContainers(); - assertEquals(2, omContainers.size()); - assertEquals(2, scmContainers.size()); - // Update container state of Container Id 1 to CLOSING to CLOSED - // and then to DELETED - reconContainerManager.updateContainerState(ContainerID.valueOf(1), - HddsProtos.LifeCycleEvent.FINALIZE); - reconContainerManager.updateContainerState(ContainerID.valueOf(1), - HddsProtos.LifeCycleEvent.CLOSE); - reconContainerManager - .updateContainerState(ContainerID.valueOf(1), - HddsProtos.LifeCycleEvent.DELETE); - Set containerIDs = containerStateManager - .getContainerIDs(HddsProtos.LifeCycleState.DELETING); - Assert.assertEquals(1, containerIDs.size()); - - reconContainerManager - .updateContainerState(ContainerID.valueOf(1), - HddsProtos.LifeCycleEvent.CLEANUP); - containerIDs = containerStateManager - .getContainerIDs(HddsProtos.LifeCycleState.DELETED); - Assert.assertEquals(1, containerIDs.size()); - - List deletedSCMContainers = - reconContainerManager.getContainers(HddsProtos.LifeCycleState.DELETED); - assertEquals(1, deletedSCMContainers.size()); - - Response deletedContainerKeysInfo = - containerEndpoint.getDeletedContainerKeysInfo(-1, ""); - assertNotNull(deletedContainerKeysInfo); - List keysResponseList = - (List) deletedContainerKeysInfo.getEntity(); - assertEquals(2, keysResponseList.get(0).getKeys().size()); - assertEquals(3, keysResponseList.get(0).getTotalCount()); - assertEquals(1, keysResponseList.size()); - } - - @Test - public void testGetDeletedContainerKeysInfoLimitParam() throws Exception { - Map omContainers = - reconContainerMetadataManager.getContainers(-1, 0); - putContainerInfos(2); - List scmContainers = reconContainerManager.getContainers(); - assertEquals(omContainers.size(), scmContainers.size()); - // Update container state of Container Id 1 to CLOSING to CLOSED - // and then to DELETED - updateContainerStateToDeleted(1); - - Set containerIDs = containerStateManager - .getContainerIDs(HddsProtos.LifeCycleState.DELETED); - Assert.assertEquals(1, containerIDs.size()); - - List deletedSCMContainers = - reconContainerManager.getContainers(HddsProtos.LifeCycleState.DELETED); - assertEquals(1, deletedSCMContainers.size()); - - Response deletedContainerKeysInfo = - containerEndpoint.getDeletedContainerKeysInfo(1, ""); - assertNotNull(deletedContainerKeysInfo); - List keysResponseList = - (List) deletedContainerKeysInfo.getEntity(); - assertEquals(1, keysResponseList.get(0).getKeys().size()); - assertEquals(3, keysResponseList.get(0).getTotalCount()); - assertEquals(1, keysResponseList.size()); - } - - @Test - public void testGetDeletedContainerKeysInfoPrevKeyParam() throws Exception { - Map omContainers = - reconContainerMetadataManager.getContainers(-1, 0); - putContainerInfos(2); - List scmContainers = reconContainerManager.getContainers(); - assertEquals(omContainers.size(), scmContainers.size()); - // Update container state of Container Id 1 to CLOSING to CLOSED - // and then to DELETED - updateContainerStateToDeleted(1); - - Set containerIDs = containerStateManager - .getContainerIDs(HddsProtos.LifeCycleState.DELETED); - Assert.assertEquals(1, containerIDs.size()); - - List deletedSCMContainers = - reconContainerManager.getContainers(HddsProtos.LifeCycleState.DELETED); - assertEquals(1, deletedSCMContainers.size()); - - Response deletedContainerKeysInfo = - containerEndpoint.getDeletedContainerKeysInfo(2, - "/sampleVol/bucketOne/key_one"); - assertNotNull(deletedContainerKeysInfo); - List keysResponseList = - (List) deletedContainerKeysInfo.getEntity(); - assertEquals(1, keysResponseList.get(0).getKeys().size()); - assertEquals(3, keysResponseList.get(0).getTotalCount()); - assertEquals(1, keysResponseList.size()); - List keyList = keysResponseList.get(0).getKeys().stream() - .map(keyMetadata -> keyMetadata.getKey()).collect( - Collectors.toList()); - assertEquals(1, keyList.size()); - assertEquals("key_two", keyList.get(0)); - } } From 5ae1ee46858d6da07983b01f76c34a2e5803b28d Mon Sep 17 00:00:00 2001 From: deveshsingh Date: Mon, 15 May 2023 19:34:18 +0530 Subject: [PATCH 20/21] HDDS-8214. Recon - OM DB Insights - Key Level Info. --- .../ozone/recon/api/ContainerEndpoint.java | 72 +++++++ .../ozone/recon/api/OMDBInsightEndpoint.java | 189 ++++++++++++------ .../api/types/ContainerDiscrepancyInfo.java | 2 + .../recon/api/TestContainerEndpoint.java | 107 ++++++++++ .../recon/api/TestOmDBInsightEndPoint.java | 143 +++++++++++-- 5 files changed, 435 insertions(+), 78 deletions(-) diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/ContainerEndpoint.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/ContainerEndpoint.java index 6ad01af5ce6..7371a235ae1 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/ContainerEndpoint.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/ContainerEndpoint.java @@ -566,4 +566,76 @@ public Response getContainerMisMatchInsights() { } return Response.ok(containerDiscrepancyInfoList).build(); } + + /** This API retrieves set of deleted containers in SCM which are present + * in OM to find out list of keys mapped to such DELETED state containers. + * + * limit - limits the number of such SCM DELETED containers present in OM. + * prevKey - Skip containers till it seeks correctly to the previous + * containerId. + * Sample API Response: + * [ + * { + * "containerId": 2, + * "numberOfKeys": 2, + * "pipelines": [] + * } + * ] + */ + @GET + @Path("/mismatch/deleted") + public Response getOmContainersDeletedInSCM( + @DefaultValue(DEFAULT_FETCH_COUNT) @QueryParam(RECON_QUERY_LIMIT) + int limit, + @DefaultValue(PREV_CONTAINER_ID_DEFAULT_VALUE) + @QueryParam(RECON_QUERY_PREVKEY) long prevKey) { + if (prevKey < 0) { + // Send back an empty response + return Response.status(Response.Status.NOT_ACCEPTABLE).build(); + } + List containerDiscrepancyInfoList = + new ArrayList<>(); + try { + Map omContainers = + reconContainerMetadataManager.getContainers(limit, prevKey); + + List deletedStateSCMContainerIds = + containerManager.getContainers().stream() + .filter(containerInfo -> (containerInfo.getState() == + HddsProtos.LifeCycleState.DELETED)) + .map(containerInfo -> containerInfo.getContainerID()).collect( + Collectors.toList()); + + List> + omContainersDeletedInSCM = + omContainers.entrySet().stream().filter(containerMetadataEntry -> + (deletedStateSCMContainerIds.contains( + containerMetadataEntry.getKey()))) + .collect( + Collectors.toList()); + + omContainersDeletedInSCM.forEach( + containerMetadataEntry -> { + ContainerDiscrepancyInfo containerDiscrepancyInfo = + new ContainerDiscrepancyInfo(); + containerDiscrepancyInfo.setContainerID( + containerMetadataEntry.getKey()); + containerDiscrepancyInfo.setNumberOfKeys( + containerMetadataEntry.getValue().getNumberOfKeys()); + containerDiscrepancyInfo.setPipelines( + containerMetadataEntry.getValue() + .getPipelines()); + containerDiscrepancyInfoList.add(containerDiscrepancyInfo); + }); + } catch (IOException ex) { + throw new WebApplicationException(ex, + Response.Status.INTERNAL_SERVER_ERROR); + } catch (IllegalArgumentException e) { + throw new WebApplicationException(e, Response.Status.BAD_REQUEST); + } catch (Exception ex) { + throw new WebApplicationException(ex, + Response.Status.INTERNAL_SERVER_ERROR); + } + return Response.ok(containerDiscrepancyInfoList).build(); + } } diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightEndpoint.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightEndpoint.java index a507778500e..6ff38bb1a82 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightEndpoint.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightEndpoint.java @@ -215,7 +215,67 @@ public Response getOpenKeyInfo( return Response.ok(openKeyInsightInfo).build(); } - /** This method retrieves set of keys/files/dirs pending for deletion. + private void getPendingForDeletionKeyInfo( + int limit, + String prevKey, + KeyInsightInfoResponse deletedKeyAndDirInsightInfo) { + List repeatedOmKeyInfoList = + deletedKeyAndDirInsightInfo.getRepeatedOmKeyInfoList(); + Table deletedTable = + omMetadataManager.getDeletedTable(); + try ( + TableIterator> + keyIter = deletedTable.iterator()) { + boolean skipPrevKey = false; + String seekKey = prevKey; + String lastKey = ""; + if (StringUtils.isNotBlank(prevKey)) { + skipPrevKey = true; + Table.KeyValue seekKeyValue = + keyIter.seek(seekKey); + // check if RocksDB was able to seek correctly to the given key prefix + // if not, then return empty result + // In case of an empty prevKeyPrefix, all the keys are returned + if (seekKeyValue == null || + (StringUtils.isNotBlank(prevKey) && + !seekKeyValue.getKey().equals(prevKey))) { + return; + } + } + while (keyIter.hasNext()) { + Table.KeyValue kv = keyIter.next(); + String key = kv.getKey(); + lastKey = key; + RepeatedOmKeyInfo repeatedOmKeyInfo = kv.getValue(); + // skip the prev key if prev key is present + if (skipPrevKey && key.equals(prevKey)) { + continue; + } + updateReplicatedAndUnReplicatedTotal(deletedKeyAndDirInsightInfo, + repeatedOmKeyInfo); + repeatedOmKeyInfoList.add(repeatedOmKeyInfo); + if ((repeatedOmKeyInfoList.size()) == limit) { + break; + } + } + deletedKeyAndDirInsightInfo.setLastKey(lastKey); + } catch (IOException ex) { + throw new WebApplicationException(ex, + Response.Status.INTERNAL_SERVER_ERROR); + } catch (IllegalArgumentException e) { + throw new WebApplicationException(e, Response.Status.BAD_REQUEST); + } catch (Exception ex) { + throw new WebApplicationException(ex, + Response.Status.INTERNAL_SERVER_ERROR); + } + } + + /** This method retrieves set of keys/files pending for deletion. + * + * limit - limits the number of key/files returned. + * prevKey - E.g. /vol1/bucket1/key1, this will skip keys till it + * seeks correctly to the given prevKey. * Sample API Response: * { * "replicatedTotal": -1530804718628866300, @@ -261,19 +321,17 @@ public Response getOpenKeyInfo( * } */ @GET - @Path("/deletePending") + @Path("/deletePending/keys") public Response getDeletedKeyInfo( @DefaultValue(DEFAULT_FETCH_COUNT) @QueryParam(RECON_QUERY_LIMIT) int limit, @DefaultValue(StringUtils.EMPTY) @QueryParam(RECON_QUERY_PREVKEY) String prevKey) { KeyInsightInfoResponse - deletedKeyAndDirInsightInfo = new KeyInsightInfoResponse(); + deletedKeyInsightInfo = new KeyInsightInfoResponse(); getPendingForDeletionKeyInfo(limit, prevKey, - deletedKeyAndDirInsightInfo); - getPendingForDeletionDirInfo(limit, prevKey, - deletedKeyAndDirInsightInfo); - return Response.ok(deletedKeyAndDirInsightInfo).build(); + deletedKeyInsightInfo); + return Response.ok(deletedKeyInsightInfo).build(); } private void getPendingForDeletionDirInfo( @@ -282,8 +340,6 @@ private void getPendingForDeletionDirInfo( List deletedDirInfoList = pendingForDeletionKeyInfo.getDeletedDirInfoList(); - List repeatedOmKeyInfoList = - pendingForDeletionKeyInfo.getRepeatedOmKeyInfoList(); Table deletedDirTable = omMetadataManager.getDeletedDirTable(); @@ -329,7 +385,7 @@ private void getPendingForDeletionDirInfo( pendingForDeletionKeyInfo.getReplicatedTotal() + keyEntityInfo.getReplicatedSize()); deletedDirInfoList.add(keyEntityInfo); - if (deletedDirInfoList.size() + repeatedOmKeyInfoList.size() == limit) { + if (deletedDirInfoList.size() == limit) { break; } } @@ -345,60 +401,67 @@ private void getPendingForDeletionDirInfo( } } - private void getPendingForDeletionKeyInfo( + /** This method retrieves set of directories pending for deletion. + * + * limit - limits the number of directories returned. + * prevKey - E.g. /vol1/bucket1/bucket1/dir1, this will skip dirs till it + * seeks correctly to the given prevKey. + * Sample API Response: + * { + * "replicatedTotal": -1530804718628866300, + * "unreplicatedTotal": -1530804718628866300, + * "deletedkeyinfo": [ + * { + * "omKeyInfoList": [ + * { + * "metadata": {}, + * "objectID": 0, + * "updateID": 0, + * "parentObjectID": 0, + * "volumeName": "sampleVol", + * "bucketName": "bucketOne", + * "keyName": "key_one", + * "dataSize": -1530804718628866300, + * "keyLocationVersions": [], + * "creationTime": 0, + * "modificationTime": 0, + * "replicationConfig": { + * "replicationFactor": "ONE", + * "requiredNodes": 1, + * "replicationType": "STANDALONE" + * }, + * "fileChecksum": null, + * "fileName": "key_one", + * "acls": [], + * "path": "0/key_one", + * "file": false, + * "latestVersionLocations": null, + * "replicatedSize": -1530804718628866300, + * "fileEncryptionInfo": null, + * "objectInfo": "OMKeyInfo{volume='sampleVol', bucket='bucketOne', + * key='key_one', dataSize='-1530804718628866186', creationTime='0', + * objectID='0', parentID='0', replication='STANDALONE/ONE', + * fileChecksum='null}", + * "updateIDset": false + * } + * ] + * } + * ], + * "status": "OK" + * } + */ + @GET + @Path("/deletePending/dirs") + public Response getDeletedDirInfo( + @DefaultValue(DEFAULT_FETCH_COUNT) @QueryParam(RECON_QUERY_LIMIT) int limit, - String prevKey, - KeyInsightInfoResponse deletedKeyAndDirInsightInfo) { - List repeatedOmKeyInfoList = - deletedKeyAndDirInsightInfo.getRepeatedOmKeyInfoList(); - Table deletedTable = - omMetadataManager.getDeletedTable(); - try ( - TableIterator> - keyIter = deletedTable.iterator()) { - boolean skipPrevKey = false; - String seekKey = prevKey; - String lastKey = ""; - if (StringUtils.isNotBlank(prevKey)) { - skipPrevKey = true; - Table.KeyValue seekKeyValue = - keyIter.seek(seekKey); - // check if RocksDB was able to seek correctly to the given key prefix - // if not, then return empty result - // In case of an empty prevKeyPrefix, all the keys are returned - if (seekKeyValue == null || - (StringUtils.isNotBlank(prevKey) && - !seekKeyValue.getKey().equals(prevKey))) { - return; - } - } - while (keyIter.hasNext()) { - Table.KeyValue kv = keyIter.next(); - String key = kv.getKey(); - lastKey = key; - RepeatedOmKeyInfo repeatedOmKeyInfo = kv.getValue(); - // skip the prev key if prev key is present - if (skipPrevKey && key.equals(prevKey)) { - continue; - } - updateReplicatedAndUnReplicatedTotal(deletedKeyAndDirInsightInfo, - repeatedOmKeyInfo); - repeatedOmKeyInfoList.add(repeatedOmKeyInfo); - if ((repeatedOmKeyInfoList.size()) == limit) { - break; - } - } - deletedKeyAndDirInsightInfo.setLastKey(lastKey); - } catch (IOException ex) { - throw new WebApplicationException(ex, - Response.Status.INTERNAL_SERVER_ERROR); - } catch (IllegalArgumentException e) { - throw new WebApplicationException(e, Response.Status.BAD_REQUEST); - } catch (Exception ex) { - throw new WebApplicationException(ex, - Response.Status.INTERNAL_SERVER_ERROR); - } + @DefaultValue(StringUtils.EMPTY) @QueryParam(RECON_QUERY_PREVKEY) + String prevKey) { + KeyInsightInfoResponse + deletedDirInsightInfo = new KeyInsightInfoResponse(); + getPendingForDeletionDirInfo(limit, prevKey, + deletedDirInsightInfo); + return Response.ok(deletedDirInsightInfo).build(); } private void updateReplicatedAndUnReplicatedTotal( diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/ContainerDiscrepancyInfo.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/ContainerDiscrepancyInfo.java index 33702bf3a05..91bfb8630d9 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/ContainerDiscrepancyInfo.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/ContainerDiscrepancyInfo.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.ozone.recon.api.types; +import com.fasterxml.jackson.annotation.JsonInclude; import com.fasterxml.jackson.annotation.JsonProperty; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; @@ -37,6 +38,7 @@ public class ContainerDiscrepancyInfo { private List pipelines; @JsonProperty("existsAt") + @JsonInclude(JsonInclude.Include.NON_EMPTY) private String existsAt; public ContainerDiscrepancyInfo() { diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestContainerEndpoint.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestContainerEndpoint.java index 5f55570ff2d..cd6e31ec1ba 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestContainerEndpoint.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestContainerEndpoint.java @@ -1236,4 +1236,111 @@ public void testGetContainerInsightsNonOMContainers() assertEquals(1, containerDiscrepancyInfoList.size()); assertEquals("SCM", containerDiscrepancyInfo.getExistsAt()); } + + @Test + public void testGetOmContainersDeletedInSCM() throws Exception { + Map omContainers = + reconContainerMetadataManager.getContainers(-1, 0); + putContainerInfos(2); + List scmContainers = reconContainerManager.getContainers(); + assertEquals(2, omContainers.size()); + assertEquals(2, scmContainers.size()); + // Update container state of Container Id 1 to CLOSING to CLOSED + // and then to DELETED + reconContainerManager.updateContainerState(ContainerID.valueOf(1), + HddsProtos.LifeCycleEvent.FINALIZE); + reconContainerManager.updateContainerState(ContainerID.valueOf(1), + HddsProtos.LifeCycleEvent.CLOSE); + reconContainerManager + .updateContainerState(ContainerID.valueOf(1), + HddsProtos.LifeCycleEvent.DELETE); + Set containerIDs = containerStateManager + .getContainerIDs(HddsProtos.LifeCycleState.DELETING); + Assert.assertEquals(1, containerIDs.size()); + + reconContainerManager + .updateContainerState(ContainerID.valueOf(1), + HddsProtos.LifeCycleEvent.CLEANUP); + containerIDs = containerStateManager + .getContainerIDs(HddsProtos.LifeCycleState.DELETED); + Assert.assertEquals(1, containerIDs.size()); + + List deletedSCMContainers = + reconContainerManager.getContainers(HddsProtos.LifeCycleState.DELETED); + assertEquals(1, deletedSCMContainers.size()); + + Response omContainersDeletedInSCMResponse = + containerEndpoint.getOmContainersDeletedInSCM(-1, 0); + assertNotNull(omContainersDeletedInSCMResponse); + List containerDiscrepancyInfoList = + (List) + omContainersDeletedInSCMResponse.getEntity(); + assertEquals(3, containerDiscrepancyInfoList.get(0) + .getNumberOfKeys()); + assertEquals(1, containerDiscrepancyInfoList.size()); + } + + @Test + public void testGetOmContainersDeletedInSCMLimitParam() throws Exception { + Map omContainers = + reconContainerMetadataManager.getContainers(-1, 0); + putContainerInfos(2); + List scmContainers = reconContainerManager.getContainers(); + assertEquals(omContainers.size(), scmContainers.size()); + // Update container state of Container Id 1 to CLOSING to CLOSED + // and then to DELETED + updateContainerStateToDeleted(1); + + Set containerIDs = containerStateManager + .getContainerIDs(HddsProtos.LifeCycleState.DELETED); + Assert.assertEquals(1, containerIDs.size()); + + List deletedSCMContainers = + reconContainerManager.getContainers(HddsProtos.LifeCycleState.DELETED); + assertEquals(1, deletedSCMContainers.size()); + + Response omContainersDeletedInSCMResponse = + containerEndpoint.getOmContainersDeletedInSCM(1, 0); + assertNotNull(omContainersDeletedInSCMResponse); + List containerDiscrepancyInfoList = + (List) + omContainersDeletedInSCMResponse.getEntity(); + assertEquals(3, containerDiscrepancyInfoList.get(0) + .getNumberOfKeys()); + assertEquals(1, containerDiscrepancyInfoList.size()); + } + + @Test + public void testGetOmContainersDeletedInSCMPrevContainerParam() + throws Exception { + Map omContainers = + reconContainerMetadataManager.getContainers(-1, 0); + putContainerInfos(2); + List scmContainers = reconContainerManager.getContainers(); + assertEquals(omContainers.size(), scmContainers.size()); + // Update container state of Container Id 1 to CLOSING to CLOSED + // and then to DELETED + updateContainerStateToDeleted(1); + updateContainerStateToDeleted(2); + + Set containerIDs = containerStateManager + .getContainerIDs(HddsProtos.LifeCycleState.DELETED); + Assert.assertEquals(2, containerIDs.size()); + + List deletedSCMContainers = + reconContainerManager.getContainers(HddsProtos.LifeCycleState.DELETED); + assertEquals(2, deletedSCMContainers.size()); + + Response omContainersDeletedInSCMResponse = + containerEndpoint.getOmContainersDeletedInSCM(2, + 1); + assertNotNull(omContainersDeletedInSCMResponse); + List containerDiscrepancyInfoList = + (List) + omContainersDeletedInSCMResponse.getEntity(); + assertEquals(2, containerDiscrepancyInfoList.get(0) + .getNumberOfKeys()); + assertEquals(1, containerDiscrepancyInfoList.size()); + assertEquals(2, containerDiscrepancyInfoList.get(0).getContainerID()); + } } diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOmDBInsightEndPoint.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOmDBInsightEndPoint.java index 9fd2b09375b..23b94e4ccfd 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOmDBInsightEndPoint.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOmDBInsightEndPoint.java @@ -188,7 +188,8 @@ private void setUpOmData() throws Exception { @Test public void testGetOpenKeyInfo() throws Exception { - OmKeyInfo omKeyInfo = getOmKeyInfo("sampleVol", "bucketOne", "key_one"); + OmKeyInfo omKeyInfo = + getOmKeyInfo("sampleVol", "bucketOne", "key_one", true); reconOMMetadataManager.getOpenKeyTable(getBucketLayout()) .put("/sampleVol/bucketOne/key_one", omKeyInfo); @@ -206,9 +207,12 @@ public void testGetOpenKeyInfo() throws Exception { @Test public void testGetOpenKeyInfoLimitParam() throws Exception { - OmKeyInfo omKeyInfo1 = getOmKeyInfo("sampleVol", "bucketOne", "key_one"); - OmKeyInfo omKeyInfo2 = getOmKeyInfo("sampleVol", "bucketOne", "key_two"); - OmKeyInfo omKeyInfo3 = getOmKeyInfo("sampleVol", "bucketOne", "key_three"); + OmKeyInfo omKeyInfo1 = + getOmKeyInfo("sampleVol", "bucketOne", "key_one", true); + OmKeyInfo omKeyInfo2 = + getOmKeyInfo("sampleVol", "bucketOne", "key_two", true); + OmKeyInfo omKeyInfo3 = + getOmKeyInfo("sampleVol", "bucketOne", "key_three", true); reconOMMetadataManager.getOpenKeyTable(getBucketLayout()) .put("/sampleVol/bucketOne/key_one", omKeyInfo1); @@ -243,9 +247,12 @@ public void testGetOpenKeyInfoLimitParam() throws Exception { @Test public void testGetOpenKeyInfoPrevKeyParam() throws Exception { - OmKeyInfo omKeyInfo1 = getOmKeyInfo("sampleVol", "bucketOne", "key_one"); - OmKeyInfo omKeyInfo2 = getOmKeyInfo("sampleVol", "bucketOne", "key_two"); - OmKeyInfo omKeyInfo3 = getOmKeyInfo("sampleVol", "bucketOne", "key_three"); + OmKeyInfo omKeyInfo1 = + getOmKeyInfo("sampleVol", "bucketOne", "key_one", true); + OmKeyInfo omKeyInfo2 = + getOmKeyInfo("sampleVol", "bucketOne", "key_two", true); + OmKeyInfo omKeyInfo3 = + getOmKeyInfo("sampleVol", "bucketOne", "key_three", true); reconOMMetadataManager.getOpenKeyTable(getBucketLayout()) .put("/sampleVol/bucketOne/key_one", omKeyInfo1); @@ -271,9 +278,12 @@ public void testGetOpenKeyInfoPrevKeyParam() throws Exception { @Test public void testGetDeletedKeyInfoLimitParam() throws Exception { - OmKeyInfo omKeyInfo1 = getOmKeyInfo("sampleVol", "bucketOne", "key_one"); - OmKeyInfo omKeyInfo2 = getOmKeyInfo("sampleVol", "bucketOne", "key_two"); - OmKeyInfo omKeyInfo3 = getOmKeyInfo("sampleVol", "bucketOne", "key_three"); + OmKeyInfo omKeyInfo1 = + getOmKeyInfo("sampleVol", "bucketOne", "key_one", true); + OmKeyInfo omKeyInfo2 = + getOmKeyInfo("sampleVol", "bucketOne", "key_two", true); + OmKeyInfo omKeyInfo3 = + getOmKeyInfo("sampleVol", "bucketOne", "key_three", true); reconOMMetadataManager.getKeyTable(getBucketLayout()) .put("/sampleVol/bucketOne/key_one", omKeyInfo1); @@ -313,9 +323,12 @@ public void testGetDeletedKeyInfoLimitParam() throws Exception { @Test public void testGetDeletedKeyInfoPrevKeyParam() throws Exception { - OmKeyInfo omKeyInfo1 = getOmKeyInfo("sampleVol", "bucketOne", "key_one"); - OmKeyInfo omKeyInfo2 = getOmKeyInfo("sampleVol", "bucketOne", "key_two"); - OmKeyInfo omKeyInfo3 = getOmKeyInfo("sampleVol", "bucketOne", "key_three"); + OmKeyInfo omKeyInfo1 = + getOmKeyInfo("sampleVol", "bucketOne", "key_one", true); + OmKeyInfo omKeyInfo2 = + getOmKeyInfo("sampleVol", "bucketOne", "key_two", true); + OmKeyInfo omKeyInfo3 = + getOmKeyInfo("sampleVol", "bucketOne", "key_three", true); RepeatedOmKeyInfo repeatedOmKeyInfo1 = new RepeatedOmKeyInfo(omKeyInfo1); RepeatedOmKeyInfo repeatedOmKeyInfo2 = new RepeatedOmKeyInfo(omKeyInfo2); @@ -347,7 +360,8 @@ public void testGetDeletedKeyInfoPrevKeyParam() throws Exception { @Test public void testGetDeletedKeyInfo() throws Exception { - OmKeyInfo omKeyInfo = getOmKeyInfo("sampleVol", "bucketOne", "key_one"); + OmKeyInfo omKeyInfo = + getOmKeyInfo("sampleVol", "bucketOne", "key_one", true); reconOMMetadataManager.getKeyTable(getBucketLayout()) .put("/sampleVol/bucketOne/key_one", omKeyInfo); @@ -372,14 +386,113 @@ public void testGetDeletedKeyInfo() throws Exception { } private OmKeyInfo getOmKeyInfo(String volumeName, String bucketName, - String keyName) { + String keyName, boolean isFile) { return new OmKeyInfo.Builder() .setVolumeName(volumeName) .setBucketName(bucketName) .setKeyName(keyName) + .setFile(isFile) .setReplicationConfig(StandaloneReplicationConfig .getInstance(HddsProtos.ReplicationFactor.ONE)) .setDataSize(random.nextLong()) .build(); } + + @Test + public void testGetDeletedDirInfoLimitParam() throws Exception { + OmKeyInfo omKeyInfo1 = + getOmKeyInfo("sampleVol", "bucketOne", "dir_one", false); + OmKeyInfo omKeyInfo2 = + getOmKeyInfo("sampleVol", "bucketOne", "dir_two", false); + OmKeyInfo omKeyInfo3 = + getOmKeyInfo("sampleVol", "bucketOne", "dir_three", false); + + reconOMMetadataManager.getDeletedDirTable() + .put("/sampleVol/bucketOne/dir_one", omKeyInfo1); + reconOMMetadataManager.getDeletedDirTable() + .put("/sampleVol/bucketOne/dir_two", omKeyInfo2); + reconOMMetadataManager.getDeletedDirTable() + .put("/sampleVol/bucketOne/dir_three", omKeyInfo3); + + OmKeyInfo omKeyInfoCopy = + reconOMMetadataManager.getDeletedDirTable() + .get("/sampleVol/bucketOne/dir_one"); + Assertions.assertEquals("dir_one", omKeyInfoCopy.getKeyName()); + + Response deletedDirInfo = omdbInsightEndpoint.getDeletedDirInfo(2, ""); + KeyInsightInfoResponse keyInsightInfoResp = + (KeyInsightInfoResponse) deletedDirInfo.getEntity(); + Assertions.assertNotNull(keyInsightInfoResp); + Assertions.assertEquals(2, + keyInsightInfoResp.getDeletedDirInfoList().size()); + Assertions.assertEquals("/sampleVol/bucketOne/dir_one", + keyInsightInfoResp.getDeletedDirInfoList().get(0).getKey()); + } + + @Test + public void testGetDeletedDirInfoPrevKeyParam() throws Exception { + OmKeyInfo omKeyInfo1 = + getOmKeyInfo("sampleVol", "bucketOne", "dir_one", false); + OmKeyInfo omKeyInfo2 = + getOmKeyInfo("sampleVol", "bucketOne", "dir_two", false); + OmKeyInfo omKeyInfo3 = + getOmKeyInfo("sampleVol", "bucketOne", "dir_three", false); + + reconOMMetadataManager.getDeletedDirTable() + .put("/sampleVol/bucketOne/dir_one", omKeyInfo1); + reconOMMetadataManager.getDeletedDirTable() + .put("/sampleVol/bucketOne/dir_two", omKeyInfo2); + reconOMMetadataManager.getDeletedDirTable() + .put("/sampleVol/bucketOne/dir_three", omKeyInfo3); + + OmKeyInfo omKeyInfoCopy = + reconOMMetadataManager.getDeletedDirTable() + .get("/sampleVol/bucketOne/dir_one"); + Assertions.assertEquals("dir_one", omKeyInfoCopy.getKeyName()); + + Response deletedDirInfo = omdbInsightEndpoint.getDeletedDirInfo(2, + "/sampleVol/bucketOne/dir_one"); + KeyInsightInfoResponse keyInsightInfoResp = + (KeyInsightInfoResponse) deletedDirInfo.getEntity(); + Assertions.assertNotNull(keyInsightInfoResp); + Assertions.assertEquals(2, + keyInsightInfoResp.getDeletedDirInfoList().size()); + Assertions.assertEquals("/sampleVol/bucketOne/dir_three", + keyInsightInfoResp.getDeletedDirInfoList().get(0).getKey()); + Assertions.assertEquals("/sampleVol/bucketOne/dir_two", + keyInsightInfoResp.getLastKey()); + } + + @Test + public void testGetDeletedDirInfo() throws Exception { + OmKeyInfo omKeyInfo1 = + getOmKeyInfo("sampleVol", "bucketOne", "dir_one", false); + OmKeyInfo omKeyInfo2 = + getOmKeyInfo("sampleVol", "bucketOne", "dir_two", false); + OmKeyInfo omKeyInfo3 = + getOmKeyInfo("sampleVol", "bucketOne", "dir_three", false); + + reconOMMetadataManager.getDeletedDirTable() + .put("/sampleVol/bucketOne/dir_one", omKeyInfo1); + reconOMMetadataManager.getDeletedDirTable() + .put("/sampleVol/bucketOne/dir_two", omKeyInfo2); + reconOMMetadataManager.getDeletedDirTable() + .put("/sampleVol/bucketOne/dir_three", omKeyInfo3); + + OmKeyInfo omKeyInfoCopy = + reconOMMetadataManager.getDeletedDirTable() + .get("/sampleVol/bucketOne/dir_one"); + Assertions.assertEquals("dir_one", omKeyInfoCopy.getKeyName()); + + Response deletedDirInfo = omdbInsightEndpoint.getDeletedDirInfo(-1, ""); + KeyInsightInfoResponse keyInsightInfoResp = + (KeyInsightInfoResponse) deletedDirInfo.getEntity(); + Assertions.assertNotNull(keyInsightInfoResp); + Assertions.assertEquals(3, + keyInsightInfoResp.getDeletedDirInfoList().size()); + Assertions.assertEquals("/sampleVol/bucketOne/dir_one", + keyInsightInfoResp.getDeletedDirInfoList().get(0).getKey()); + Assertions.assertEquals("/sampleVol/bucketOne/dir_two", + keyInsightInfoResp.getLastKey()); + } } From 76dd31ac169d908409769743dcec7e2abfeebcd4 Mon Sep 17 00:00:00 2001 From: deveshsingh Date: Wed, 17 May 2023 11:29:17 +0530 Subject: [PATCH 21/21] HDDS-8214. Recon - OM DB Insights - Key Level Info. --- .../apache/hadoop/ozone/recon/api/OMDBInsightEndpoint.java | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightEndpoint.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightEndpoint.java index 6ff38bb1a82..51fba63b73d 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightEndpoint.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightEndpoint.java @@ -278,6 +278,7 @@ private void getPendingForDeletionKeyInfo( * seeks correctly to the given prevKey. * Sample API Response: * { + * "lastKey": "vol1/bucket1/key1", * "replicatedTotal": -1530804718628866300, * "unreplicatedTotal": -1530804718628866300, * "deletedkeyinfo": [ @@ -321,7 +322,7 @@ private void getPendingForDeletionKeyInfo( * } */ @GET - @Path("/deletePending/keys") + @Path("/deletePending") public Response getDeletedKeyInfo( @DefaultValue(DEFAULT_FETCH_COUNT) @QueryParam(RECON_QUERY_LIMIT) int limit, @@ -408,6 +409,7 @@ private void getPendingForDeletionDirInfo( * seeks correctly to the given prevKey. * Sample API Response: * { + * "lastKey": "vol1/bucket1/bucket1/dir1" * "replicatedTotal": -1530804718628866300, * "unreplicatedTotal": -1530804718628866300, * "deletedkeyinfo": [