diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/FileSubclusterResolver.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/FileSubclusterResolver.java index 3ad53f6972188..000d78d11e248 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/FileSubclusterResolver.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/FileSubclusterResolver.java @@ -19,15 +19,18 @@ package org.apache.hadoop.hdfs.server.federation.resolver; import java.io.IOException; -import java.util.List; +import java.util.Collection; +import java.util.IdentityHashMap; import java.util.LinkedList; +import java.util.List; +import java.util.Map; import java.util.Set; +import java.util.SortedMap; import java.util.TreeSet; -import java.util.Collection; - import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hdfs.server.federation.store.records.MountTable; /** * Interface to map a file path in the global name space to a specific @@ -74,6 +77,20 @@ public interface FileSubclusterResolver { */ List getMountPoints(String path) throws IOException; + /** + * Get a IdentityHashMap (child, mountTable source path) of mount points for a path. + * Results are from the mount table cache. + * + * @param path Path to get the mount points under. + * @return IdentityHashMap of mount points present at this path. Return zero-length + * IdentityHashMap if the path is a mount point but there are no mount points + * under the path. Return null if the path is not a mount point + * and there are no mount points under the path. + * @throws IOException Throws exception if the data is not available. + */ + IdentityHashMap getMountPointsWithSrc(String path) throws IOException; + + /** * Get the default namespace for the cluster. * @@ -127,4 +144,52 @@ static List getMountPoints(String path, } return new LinkedList<>(children); } + + /** + * Get a IdentityHashMap (child, mountTable source path) for a path. The child can be repetitive. + * + * @param path Path to get the mount points under. + * @param tree the mount points tree. + * @return Return empty IdentityHashMap if the path is a mount point but there are no + * mount points under the path. Return null if the path is not a mount + * point and there are no mount points under the path. + */ + static IdentityHashMap getMountPointsWithSrc(String path, SortedMap tree) { + IdentityHashMap childWithSourcePaths = new IdentityHashMap<>(); + boolean exists = false; + for (Map.Entry record : tree.entrySet()) { + String subPath = record.getKey(); + String child = subPath; + + // Special case for / + if (!path.equals(Path.SEPARATOR)) { + // Get the children + int ini = path.length(); + child = subPath.substring(ini); + } + + if (child.isEmpty()) { + // This is a mount point but without children + exists = true; + } else if (child.startsWith(Path.SEPARATOR)) { + // This is a mount point with children + exists = true; + child = child.substring(1); + + // We only return immediate children + int fin = child.indexOf(Path.SEPARATOR); + if (fin > -1) { + child = child.substring(0, fin); + } + if (!child.isEmpty()) { + childWithSourcePaths.put(child, record.getValue().getSourcePath()); + } + } + } + if (!exists) { + return null; + } + return childWithSourcePaths; + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/MountTableResolver.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/MountTableResolver.java index a5ca8acfb4b3b..12fcec13d707e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/MountTableResolver.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/MountTableResolver.java @@ -29,6 +29,7 @@ import java.io.IOException; import java.util.Collection; import java.util.Collections; +import java.util.IdentityHashMap; import java.util.Iterator; import java.util.LinkedList; import java.util.List; @@ -46,6 +47,7 @@ import java.util.concurrent.locks.Lock; import java.util.concurrent.locks.ReadWriteLock; import java.util.concurrent.locks.ReentrantReadWriteLock; +import java.util.regex.Matcher; import java.util.regex.Pattern; import java.util.ArrayList; @@ -383,6 +385,17 @@ public static String subtractTrashCurrentPath(String path) getTrashRoot() + TRASH_PATTERN, ""); } + public static String getTrashCurrentPath(String path) + throws IOException { + Pattern pattern = Pattern.compile( + "^" + getTrashRoot() + TRASH_PATTERN); + Matcher matcher = pattern.matcher(path); + if (matcher.find()) { + return matcher.group(); + } + return "/"; + } + /** * If path is a path related to the trash can, * subtract TrashCurrent to return a new path. @@ -541,6 +554,25 @@ public List getMountPoints(final String str) throws IOException { } } + @Override + public IdentityHashMap getMountPointsWithSrc(final String str) + throws IOException { + verifyMountTable(); + String path = RouterAdmin.normalizeFileSystemPath(str); + if (isTrashPath(path)) { + path = subtractTrashCurrentPath(path); + } + readLock.lock(); + try { + String from = path; + String to = path + Character.MAX_VALUE; + SortedMap subMap = this.tree.subMap(from, to); + return FileSubclusterResolver.getMountPointsWithSrc(path, subMap); + } finally { + readLock.unlock(); + } + } + /** * Get all the mount records at or beneath a given path. * @param path Path to get the mount points from. diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterClientProtocol.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterClientProtocol.java index a85c0320470d8..418e6975be4d0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterClientProtocol.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterClientProtocol.java @@ -106,9 +106,11 @@ import java.net.ConnectException; import java.util.ArrayList; import java.util.Collection; +import java.util.Collections; import java.util.Comparator; import java.util.EnumSet; import java.util.HashMap; +import java.util.IdentityHashMap; import java.util.Iterator; import java.util.LinkedHashMap; import java.util.LinkedList; @@ -902,13 +904,31 @@ public DirectoryListing getListing(String src, byte[] startAfter, } // Add mount points at this level in the tree - final List children = subclusterResolver.getMountPoints(src); + IdentityHashMap childrenMountTableWithSrc = + subclusterResolver.getMountPointsWithSrc(src); + List children = null; + // Sort the list as the entries from subcluster are also sorted + if (childrenMountTableWithSrc != null) { + children = new ArrayList<>(childrenMountTableWithSrc.keySet()); + } if (children != null) { // Get the dates for each mount point Map dates = getMountPointDates(src); // Create virtual folder with the mount name - for (String child : children) { + boolean isTrashPath = MountTableResolver.isTrashPath(src); + for (int i = 0; i < children.size(); i++) { + String child = children.get(i); + if (isTrashPath) { + HdfsFileStatus dir = getFileInfo( + MountTableResolver.getTrashCurrentPath(src) + childrenMountTableWithSrc.get(child), + false); + if (dir == null) { + children.remove(child); + i--; + continue; + } + } long date = 0; if (dates != null && dates.containsKey(child)) { date = dates.get(child); @@ -964,6 +984,10 @@ public BatchedDirectoryListing getBatchedListing(String[] srcs, @Override public HdfsFileStatus getFileInfo(String src) throws IOException { + return getFileInfo(src, true); + } + + public HdfsFileStatus getFileInfo(String src, boolean withMountTable) throws IOException { rpcServer.checkOperation(NameNode.OperationCategory.READ); HdfsFileStatus ret = null; @@ -984,6 +1008,10 @@ public HdfsFileStatus getFileInfo(String src) throws IOException { noLocationException = e; } + if (!withMountTable) { + return ret; + } + // If there is no real path, check mount points if (ret == null) { List children = subclusterResolver.getMountPoints(src); diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/MockResolver.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/MockResolver.java index 554879856ac1b..83c908bf1f101 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/MockResolver.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/MockResolver.java @@ -24,12 +24,13 @@ import java.util.HashMap; import java.util.HashSet; import java.util.Iterator; +import java.util.IdentityHashMap; import java.util.LinkedList; import java.util.List; import java.util.Map; import java.util.Set; +import java.util.TreeMap; import java.util.TreeSet; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.server.federation.resolver.ActiveNamenodeResolver; import org.apache.hadoop.hdfs.server.federation.resolver.FederationNamenodeContext; @@ -42,6 +43,7 @@ import org.apache.hadoop.hdfs.server.federation.resolver.RemoteLocation; import org.apache.hadoop.hdfs.server.federation.router.Router; import org.apache.hadoop.hdfs.server.federation.store.StateStoreService; +import org.apache.hadoop.hdfs.server.federation.store.records.MountTable; import org.apache.hadoop.util.Time; /** @@ -393,6 +395,22 @@ public List getMountPoints(String path) throws IOException { return FileSubclusterResolver.getMountPoints(path, mountPoints); } + @Override + public IdentityHashMap getMountPointsWithSrc(String path) throws IOException { + TreeMap sortedMap = new TreeMap<>(); + for (Map.Entry> record : this.locations.entrySet()) { + String mp = record.getKey(); + if (mp.startsWith(path)) { + Map map = new HashMap<>(); + for (RemoteLocation remoteLocation : record.getValue()) { + map.put(remoteLocation.getNameserviceId(), remoteLocation.getDest()); + } + sortedMap.put(mp, MountTable.newInstance(mp, map)); + } + } + return FileSubclusterResolver.getMountPointsWithSrc(path, sortedMap); + } + @Override public void setRouterId(String router) { } diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterTrash.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterTrash.java index 8ec61b6ee737e..2d8d10e636f8b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterTrash.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterTrash.java @@ -276,7 +276,7 @@ public void testMultipleMountPoint() throws IOException, // Client user see global trash view, wo should see all three mount point. FileStatus[] fileStatuses = fs.listStatus(new Path("/user/test-trash/.Trash/Current/")); - assertEquals(3, fileStatuses.length); + assertEquals(2, fileStatuses.length); // This should return empty fileStatuses rather than NotFound Exception. fileStatuses = fs.listStatus(new Path("/user/test-trash/.Trash/Current/" + MOUNT_POINT2));