001/** 002 * Licensed to the Apache Software Foundation (ASF) under one 003 * or more contributor license agreements. See the NOTICE file 004 * distributed with this work for additional information 005 * regarding copyright ownership. The ASF licenses this file 006 * to you under the Apache License, Version 2.0 (the 007 * "License"); you may not use this file except in compliance 008 * with the License. You may obtain a copy of the License at 009 * 010 * http://www.apache.org/licenses/LICENSE-2.0 011 * 012 * Unless required by applicable law or agreed to in writing, software 013 * distributed under the License is distributed on an "AS IS" BASIS, 014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 015 * See the License for the specific language governing permissions and 016 * limitations under the License. 017 */ 018package org.apache.hadoop.hdfs.server.namenode; 019 020import static org.apache.hadoop.util.Time.now; 021 022import java.io.Closeable; 023import java.io.FileNotFoundException; 024import java.io.IOException; 025import java.util.ArrayList; 026import java.util.Arrays; 027import java.util.List; 028import java.util.concurrent.TimeUnit; 029import java.util.concurrent.locks.Condition; 030import java.util.concurrent.locks.ReentrantReadWriteLock; 031 032import org.apache.hadoop.HadoopIllegalArgumentException; 033import org.apache.hadoop.conf.Configuration; 034import org.apache.hadoop.fs.ContentSummary; 035import org.apache.hadoop.fs.FileAlreadyExistsException; 036import org.apache.hadoop.fs.Options; 037import org.apache.hadoop.fs.Options.Rename; 038import org.apache.hadoop.fs.ParentNotDirectoryException; 039import org.apache.hadoop.fs.Path; 040import org.apache.hadoop.fs.PathIsNotDirectoryException; 041import org.apache.hadoop.fs.UnresolvedLinkException; 042import org.apache.hadoop.fs.permission.FsAction; 043import org.apache.hadoop.fs.permission.FsPermission; 044import org.apache.hadoop.fs.permission.PermissionStatus; 045import org.apache.hadoop.hdfs.DFSConfigKeys; 046import org.apache.hadoop.hdfs.DFSUtil; 047import org.apache.hadoop.hdfs.DistributedFileSystem; 048import org.apache.hadoop.hdfs.protocol.Block; 049import org.apache.hadoop.hdfs.protocol.ClientProtocol; 050import org.apache.hadoop.hdfs.protocol.DirectoryListing; 051import org.apache.hadoop.hdfs.protocol.FSLimitException.MaxDirectoryItemsExceededException; 052import org.apache.hadoop.hdfs.protocol.FSLimitException.PathComponentTooLongException; 053import org.apache.hadoop.hdfs.protocol.HdfsConstants; 054import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; 055import org.apache.hadoop.hdfs.protocol.HdfsLocatedFileStatus; 056import org.apache.hadoop.hdfs.protocol.LocatedBlock; 057import org.apache.hadoop.hdfs.protocol.LocatedBlocks; 058import org.apache.hadoop.hdfs.protocol.QuotaExceededException; 059import org.apache.hadoop.hdfs.protocol.SnapshotAccessControlException; 060import org.apache.hadoop.hdfs.protocol.SnapshotException; 061import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo; 062import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction; 063import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager; 064import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor; 065import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo; 066import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState; 067import org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo; 068import org.apache.hadoop.hdfs.server.namenode.INodeReference.WithCount; 069import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectorySnapshottable; 070import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectoryWithSnapshot; 071import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot; 072import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot.Root; 073import org.apache.hadoop.hdfs.util.ByteArray; 074import org.apache.hadoop.hdfs.util.ChunkedArrayList; 075import org.apache.hadoop.hdfs.util.ReadOnlyList; 076 077import com.google.common.annotations.VisibleForTesting; 078import com.google.common.base.Preconditions; 079 080/************************************************* 081 * FSDirectory stores the filesystem directory state. 082 * It handles writing/loading values to disk, and logging 083 * changes as we go. 084 * 085 * It keeps the filename->blockset mapping always-current 086 * and logged to disk. 087 * 088 *************************************************/ 089public class FSDirectory implements Closeable { 090 private static INodeDirectoryWithQuota createRoot(FSNamesystem namesystem) { 091 final INodeDirectoryWithQuota r = new INodeDirectoryWithQuota( 092 INodeId.ROOT_INODE_ID, 093 INodeDirectory.ROOT_NAME, 094 namesystem.createFsOwnerPermissions(new FsPermission((short) 0755))); 095 final INodeDirectorySnapshottable s = new INodeDirectorySnapshottable(r); 096 s.setSnapshotQuota(0); 097 return s; 098 } 099 100 @VisibleForTesting 101 static boolean CHECK_RESERVED_FILE_NAMES = true; 102 public final static String DOT_RESERVED_STRING = ".reserved"; 103 public final static String DOT_RESERVED_PATH_PREFIX = Path.SEPARATOR 104 + DOT_RESERVED_STRING; 105 public final static byte[] DOT_RESERVED = 106 DFSUtil.string2Bytes(DOT_RESERVED_STRING); 107 public final static String DOT_INODES_STRING = ".inodes"; 108 public final static byte[] DOT_INODES = 109 DFSUtil.string2Bytes(DOT_INODES_STRING); 110 INodeDirectoryWithQuota rootDir; 111 FSImage fsImage; 112 private final FSNamesystem namesystem; 113 private volatile boolean ready = false; 114 private final int maxComponentLength; 115 private final int maxDirItems; 116 private final int lsLimit; // max list limit 117 private final int contentCountLimit; // max content summary counts per run 118 private final INodeMap inodeMap; // Synchronized by dirLock 119 private long yieldCount = 0; // keep track of lock yield count. 120 121 // lock to protect the directory and BlockMap 122 private ReentrantReadWriteLock dirLock; 123 private Condition cond; 124 125 // utility methods to acquire and release read lock and write lock 126 void readLock() { 127 this.dirLock.readLock().lock(); 128 } 129 130 void readUnlock() { 131 this.dirLock.readLock().unlock(); 132 } 133 134 void writeLock() { 135 this.dirLock.writeLock().lock(); 136 } 137 138 void writeUnlock() { 139 this.dirLock.writeLock().unlock(); 140 } 141 142 boolean hasWriteLock() { 143 return this.dirLock.isWriteLockedByCurrentThread(); 144 } 145 146 boolean hasReadLock() { 147 return this.dirLock.getReadHoldCount() > 0; 148 } 149 150 public int getReadHoldCount() { 151 return this.dirLock.getReadHoldCount(); 152 } 153 154 public int getWriteHoldCount() { 155 return this.dirLock.getWriteHoldCount(); 156 } 157 158 /** 159 * Caches frequently used file names used in {@link INode} to reuse 160 * byte[] objects and reduce heap usage. 161 */ 162 private final NameCache<ByteArray> nameCache; 163 164 FSDirectory(FSImage fsImage, FSNamesystem ns, Configuration conf) { 165 this.dirLock = new ReentrantReadWriteLock(true); // fair 166 this.cond = dirLock.writeLock().newCondition(); 167 rootDir = createRoot(ns); 168 inodeMap = INodeMap.newInstance(rootDir); 169 this.fsImage = fsImage; 170 int configuredLimit = conf.getInt( 171 DFSConfigKeys.DFS_LIST_LIMIT, DFSConfigKeys.DFS_LIST_LIMIT_DEFAULT); 172 this.lsLimit = configuredLimit>0 ? 173 configuredLimit : DFSConfigKeys.DFS_LIST_LIMIT_DEFAULT; 174 this.contentCountLimit = conf.getInt( 175 DFSConfigKeys.DFS_CONTENT_SUMMARY_LIMIT_KEY, 176 DFSConfigKeys.DFS_CONTENT_SUMMARY_LIMIT_DEFAULT); 177 178 // filesystem limits 179 this.maxComponentLength = conf.getInt( 180 DFSConfigKeys.DFS_NAMENODE_MAX_COMPONENT_LENGTH_KEY, 181 DFSConfigKeys.DFS_NAMENODE_MAX_COMPONENT_LENGTH_DEFAULT); 182 this.maxDirItems = conf.getInt( 183 DFSConfigKeys.DFS_NAMENODE_MAX_DIRECTORY_ITEMS_KEY, 184 DFSConfigKeys.DFS_NAMENODE_MAX_DIRECTORY_ITEMS_DEFAULT); 185 186 int threshold = conf.getInt( 187 DFSConfigKeys.DFS_NAMENODE_NAME_CACHE_THRESHOLD_KEY, 188 DFSConfigKeys.DFS_NAMENODE_NAME_CACHE_THRESHOLD_DEFAULT); 189 NameNode.LOG.info("Caching file names occuring more than " + threshold 190 + " times"); 191 nameCache = new NameCache<ByteArray>(threshold); 192 namesystem = ns; 193 } 194 195 private FSNamesystem getFSNamesystem() { 196 return namesystem; 197 } 198 199 private BlockManager getBlockManager() { 200 return getFSNamesystem().getBlockManager(); 201 } 202 203 /** @return the root directory inode. */ 204 public INodeDirectoryWithQuota getRoot() { 205 return rootDir; 206 } 207 208 /** 209 * Notify that loading of this FSDirectory is complete, and 210 * it is ready for use 211 */ 212 void imageLoadComplete() { 213 Preconditions.checkState(!ready, "FSDirectory already loaded"); 214 setReady(); 215 } 216 217 void setReady() { 218 if(ready) return; 219 writeLock(); 220 try { 221 setReady(true); 222 this.nameCache.initialized(); 223 cond.signalAll(); 224 } finally { 225 writeUnlock(); 226 } 227 } 228 229 //This is for testing purposes only 230 @VisibleForTesting 231 boolean isReady() { 232 return ready; 233 } 234 235 // exposed for unit tests 236 protected void setReady(boolean flag) { 237 ready = flag; 238 } 239 240 private void incrDeletedFileCount(long count) { 241 if (getFSNamesystem() != null) 242 NameNode.getNameNodeMetrics().incrFilesDeleted(count); 243 } 244 245 /** 246 * Shutdown the filestore 247 */ 248 @Override 249 public void close() throws IOException { 250 fsImage.close(); 251 } 252 253 /** 254 * Block until the object is ready to be used. 255 */ 256 void waitForReady() { 257 if (!ready) { 258 writeLock(); 259 try { 260 while (!ready) { 261 try { 262 cond.await(5000, TimeUnit.MILLISECONDS); 263 } catch (InterruptedException ie) { 264 } 265 } 266 } finally { 267 writeUnlock(); 268 } 269 } 270 } 271 272 /** 273 * Add the given filename to the fs. 274 * @throws FileAlreadyExistsException 275 * @throws QuotaExceededException 276 * @throws UnresolvedLinkException 277 * @throws SnapshotAccessControlException 278 */ 279 INodeFileUnderConstruction addFile(String path, 280 PermissionStatus permissions, 281 short replication, 282 long preferredBlockSize, 283 String clientName, 284 String clientMachine, 285 DatanodeDescriptor clientNode) 286 throws FileAlreadyExistsException, QuotaExceededException, 287 UnresolvedLinkException, SnapshotAccessControlException { 288 waitForReady(); 289 290 // Always do an implicit mkdirs for parent directory tree. 291 long modTime = now(); 292 293 Path parent = new Path(path).getParent(); 294 if (parent == null) { 295 // Trying to add "/" as a file - this path has no 296 // parent -- avoids an NPE below. 297 return null; 298 } 299 300 if (!mkdirs(parent.toString(), permissions, true, modTime)) { 301 return null; 302 } 303 INodeFileUnderConstruction newNode = new INodeFileUnderConstruction( 304 namesystem.allocateNewInodeId(), 305 permissions,replication, 306 preferredBlockSize, modTime, clientName, 307 clientMachine, clientNode); 308 boolean added = false; 309 writeLock(); 310 try { 311 added = addINode(path, newNode); 312 } finally { 313 writeUnlock(); 314 } 315 if (!added) { 316 NameNode.stateChangeLog.info("DIR* addFile: failed to add " + path); 317 return null; 318 } 319 320 if(NameNode.stateChangeLog.isDebugEnabled()) { 321 NameNode.stateChangeLog.debug("DIR* addFile: " + path + " is added"); 322 } 323 return newNode; 324 } 325 326 INodeFile unprotectedAddFile( long id, 327 String path, 328 PermissionStatus permissions, 329 short replication, 330 long modificationTime, 331 long atime, 332 long preferredBlockSize, 333 boolean underConstruction, 334 String clientName, 335 String clientMachine) { 336 final INodeFile newNode; 337 assert hasWriteLock(); 338 if (underConstruction) { 339 newNode = new INodeFileUnderConstruction(id, permissions, replication, 340 preferredBlockSize, modificationTime, clientName, clientMachine, null); 341 } else { 342 newNode = new INodeFile(id, null, permissions, modificationTime, atime, 343 BlockInfo.EMPTY_ARRAY, replication, preferredBlockSize); 344 } 345 346 try { 347 if (addINode(path, newNode)) { 348 return newNode; 349 } 350 } catch (IOException e) { 351 if(NameNode.stateChangeLog.isDebugEnabled()) { 352 NameNode.stateChangeLog.debug( 353 "DIR* FSDirectory.unprotectedAddFile: exception when add " + path 354 + " to the file system", e); 355 } 356 } 357 return null; 358 } 359 360 /** 361 * Add a block to the file. Returns a reference to the added block. 362 */ 363 BlockInfo addBlock(String path, INodesInPath inodesInPath, Block block, 364 DatanodeStorageInfo[] targets) throws IOException { 365 waitForReady(); 366 367 writeLock(); 368 try { 369 final INodeFileUnderConstruction fileINode = 370 INodeFileUnderConstruction.valueOf(inodesInPath.getLastINode(), path); 371 372 // check quota limits and updated space consumed 373 updateCount(inodesInPath, 0, fileINode.getBlockDiskspace(), true); 374 375 // associate new last block for the file 376 BlockInfoUnderConstruction blockInfo = 377 new BlockInfoUnderConstruction( 378 block, 379 fileINode.getFileReplication(), 380 BlockUCState.UNDER_CONSTRUCTION, 381 targets); 382 getBlockManager().addBlockCollection(blockInfo, fileINode); 383 fileINode.addBlock(blockInfo); 384 385 if(NameNode.stateChangeLog.isDebugEnabled()) { 386 NameNode.stateChangeLog.debug("DIR* FSDirectory.addBlock: " 387 + path + " with " + block 388 + " block is added to the in-memory " 389 + "file system"); 390 } 391 return blockInfo; 392 } finally { 393 writeUnlock(); 394 } 395 } 396 397 /** 398 * Persist the block list for the inode. 399 */ 400 void persistBlocks(String path, INodeFileUnderConstruction file, 401 boolean logRetryCache) { 402 waitForReady(); 403 404 writeLock(); 405 try { 406 fsImage.getEditLog().logUpdateBlocks(path, file, logRetryCache); 407 if(NameNode.stateChangeLog.isDebugEnabled()) { 408 NameNode.stateChangeLog.debug("DIR* FSDirectory.persistBlocks: " 409 +path+" with "+ file.getBlocks().length 410 +" blocks is persisted to the file system"); 411 } 412 } finally { 413 writeUnlock(); 414 } 415 } 416 417 /** 418 * Persist the new block (the last block of the given file). 419 */ 420 void persistNewBlock(String path, INodeFileUnderConstruction file) { 421 waitForReady(); 422 423 writeLock(); 424 try { 425 fsImage.getEditLog().logAddBlock(path, file); 426 } finally { 427 writeUnlock(); 428 } 429 if (NameNode.stateChangeLog.isDebugEnabled()) { 430 NameNode.stateChangeLog.debug("DIR* FSDirectory.persistNewBlock: " 431 + path + " with new block " + file.getLastBlock().toString() 432 + ", current total block count is " + file.getBlocks().length); 433 } 434 } 435 436 /** 437 * Close file. 438 */ 439 void closeFile(String path, INodeFile file) { 440 waitForReady(); 441 writeLock(); 442 try { 443 // file is closed 444 fsImage.getEditLog().logCloseFile(path, file); 445 if (NameNode.stateChangeLog.isDebugEnabled()) { 446 NameNode.stateChangeLog.debug("DIR* FSDirectory.closeFile: " 447 +path+" with "+ file.getBlocks().length 448 +" blocks is persisted to the file system"); 449 } 450 } finally { 451 writeUnlock(); 452 } 453 } 454 455 /** 456 * Remove a block from the file. 457 * @return Whether the block exists in the corresponding file 458 */ 459 boolean removeBlock(String path, INodeFileUnderConstruction fileNode, 460 Block block) throws IOException { 461 waitForReady(); 462 463 writeLock(); 464 try { 465 return unprotectedRemoveBlock(path, fileNode, block); 466 } finally { 467 writeUnlock(); 468 } 469 } 470 471 boolean unprotectedRemoveBlock(String path, 472 INodeFileUnderConstruction fileNode, Block block) throws IOException { 473 // modify file-> block and blocksMap 474 boolean removed = fileNode.removeLastBlock(block); 475 if (!removed) { 476 return false; 477 } 478 getBlockManager().removeBlockFromMap(block); 479 480 if(NameNode.stateChangeLog.isDebugEnabled()) { 481 NameNode.stateChangeLog.debug("DIR* FSDirectory.removeBlock: " 482 +path+" with "+block 483 +" block is removed from the file system"); 484 } 485 486 // update space consumed 487 final INodesInPath iip = rootDir.getINodesInPath4Write(path, true); 488 updateCount(iip, 0, -fileNode.getBlockDiskspace(), true); 489 return true; 490 } 491 492 /** 493 * @throws SnapshotAccessControlException 494 * @see #unprotectedRenameTo(String, String, long) 495 * @deprecated Use {@link #renameTo(String, String, Rename...)} instead. 496 */ 497 @Deprecated 498 boolean renameTo(String src, String dst, boolean logRetryCache) 499 throws QuotaExceededException, UnresolvedLinkException, 500 FileAlreadyExistsException, SnapshotAccessControlException, IOException { 501 if (NameNode.stateChangeLog.isDebugEnabled()) { 502 NameNode.stateChangeLog.debug("DIR* FSDirectory.renameTo: " 503 +src+" to "+dst); 504 } 505 waitForReady(); 506 long now = now(); 507 writeLock(); 508 try { 509 if (!unprotectedRenameTo(src, dst, now)) 510 return false; 511 } finally { 512 writeUnlock(); 513 } 514 fsImage.getEditLog().logRename(src, dst, now, logRetryCache); 515 return true; 516 } 517 518 /** 519 * @see #unprotectedRenameTo(String, String, long, Options.Rename...) 520 */ 521 void renameTo(String src, String dst, boolean logRetryCache, 522 Options.Rename... options) 523 throws FileAlreadyExistsException, FileNotFoundException, 524 ParentNotDirectoryException, QuotaExceededException, 525 UnresolvedLinkException, IOException { 526 if (NameNode.stateChangeLog.isDebugEnabled()) { 527 NameNode.stateChangeLog.debug("DIR* FSDirectory.renameTo: " + src 528 + " to " + dst); 529 } 530 waitForReady(); 531 long now = now(); 532 writeLock(); 533 try { 534 if (unprotectedRenameTo(src, dst, now, options)) { 535 incrDeletedFileCount(1); 536 } 537 } finally { 538 writeUnlock(); 539 } 540 fsImage.getEditLog().logRename(src, dst, now, logRetryCache, options); 541 } 542 543 /** 544 * Change a path name 545 * 546 * @param src source path 547 * @param dst destination path 548 * @return true if rename succeeds; false otherwise 549 * @throws QuotaExceededException if the operation violates any quota limit 550 * @throws FileAlreadyExistsException if the src is a symlink that points to dst 551 * @throws SnapshotAccessControlException if path is in RO snapshot 552 * @deprecated See {@link #renameTo(String, String)} 553 */ 554 @Deprecated 555 boolean unprotectedRenameTo(String src, String dst, long timestamp) 556 throws QuotaExceededException, UnresolvedLinkException, 557 FileAlreadyExistsException, SnapshotAccessControlException, IOException { 558 assert hasWriteLock(); 559 INodesInPath srcIIP = rootDir.getINodesInPath4Write(src, false); 560 final INode srcInode = srcIIP.getLastINode(); 561 562 // check the validation of the source 563 if (srcInode == null) { 564 NameNode.stateChangeLog.warn("DIR* FSDirectory.unprotectedRenameTo: " 565 + "failed to rename " + src + " to " + dst 566 + " because source does not exist"); 567 return false; 568 } 569 if (srcIIP.getINodes().length == 1) { 570 NameNode.stateChangeLog.warn("DIR* FSDirectory.unprotectedRenameTo: " 571 +"failed to rename "+src+" to "+dst+ " because source is the root"); 572 return false; 573 } 574 575 // srcInode and its subtree cannot contain snapshottable directories with 576 // snapshots 577 List<INodeDirectorySnapshottable> snapshottableDirs = 578 new ArrayList<INodeDirectorySnapshottable>(); 579 checkSnapshot(srcInode, snapshottableDirs); 580 581 if (isDir(dst)) { 582 dst += Path.SEPARATOR + new Path(src).getName(); 583 } 584 585 // check the validity of the destination 586 if (dst.equals(src)) { 587 return true; 588 } 589 if (srcInode.isSymlink() && 590 dst.equals(srcInode.asSymlink().getSymlinkString())) { 591 throw new FileAlreadyExistsException( 592 "Cannot rename symlink "+src+" to its target "+dst); 593 } 594 595 // dst cannot be directory or a file under src 596 if (dst.startsWith(src) && 597 dst.charAt(src.length()) == Path.SEPARATOR_CHAR) { 598 NameNode.stateChangeLog.warn("DIR* FSDirectory.unprotectedRenameTo: " 599 + "failed to rename " + src + " to " + dst 600 + " because destination starts with src"); 601 return false; 602 } 603 604 byte[][] dstComponents = INode.getPathComponents(dst); 605 INodesInPath dstIIP = getExistingPathINodes(dstComponents); 606 if (dstIIP.isSnapshot()) { 607 throw new SnapshotAccessControlException( 608 "Modification on RO snapshot is disallowed"); 609 } 610 if (dstIIP.getLastINode() != null) { 611 NameNode.stateChangeLog.warn("DIR* FSDirectory.unprotectedRenameTo: " 612 +"failed to rename "+src+" to "+dst+ 613 " because destination exists"); 614 return false; 615 } 616 INode dstParent = dstIIP.getINode(-2); 617 if (dstParent == null) { 618 NameNode.stateChangeLog.warn("DIR* FSDirectory.unprotectedRenameTo: " 619 +"failed to rename "+src+" to "+dst+ 620 " because destination's parent does not exist"); 621 return false; 622 } 623 624 // Ensure dst has quota to accommodate rename 625 verifyQuotaForRename(srcIIP.getINodes(), dstIIP.getINodes()); 626 627 boolean added = false; 628 INode srcChild = srcIIP.getLastINode(); 629 final byte[] srcChildName = srcChild.getLocalNameBytes(); 630 final boolean isSrcInSnapshot = srcChild.isInLatestSnapshot( 631 srcIIP.getLatestSnapshot()); 632 final boolean srcChildIsReference = srcChild.isReference(); 633 634 // Record the snapshot on srcChild. After the rename, before any new 635 // snapshot is taken on the dst tree, changes will be recorded in the latest 636 // snapshot of the src tree. 637 if (isSrcInSnapshot) { 638 srcChild = srcChild.recordModification(srcIIP.getLatestSnapshot(), 639 inodeMap); 640 srcIIP.setLastINode(srcChild); 641 } 642 643 // check srcChild for reference 644 final INodeReference.WithCount withCount; 645 Quota.Counts oldSrcCounts = Quota.Counts.newInstance(); 646 int srcRefDstSnapshot = srcChildIsReference ? srcChild.asReference() 647 .getDstSnapshotId() : Snapshot.INVALID_ID; 648 if (isSrcInSnapshot) { 649 final INodeReference.WithName withName = 650 srcIIP.getINode(-2).asDirectory().replaceChild4ReferenceWithName( 651 srcChild, srcIIP.getLatestSnapshot()); 652 withCount = (INodeReference.WithCount) withName.getReferredINode(); 653 srcChild = withName; 654 srcIIP.setLastINode(srcChild); 655 // get the counts before rename 656 withCount.getReferredINode().computeQuotaUsage(oldSrcCounts, true, 657 Snapshot.INVALID_ID); 658 } else if (srcChildIsReference) { 659 // srcChild is reference but srcChild is not in latest snapshot 660 withCount = (WithCount) srcChild.asReference().getReferredINode(); 661 } else { 662 withCount = null; 663 } 664 665 try { 666 // remove src 667 final long removedSrc = removeLastINode(srcIIP); 668 if (removedSrc == -1) { 669 NameNode.stateChangeLog.warn("DIR* FSDirectory.unprotectedRenameTo: " 670 + "failed to rename " + src + " to " + dst 671 + " because the source can not be removed"); 672 return false; 673 } 674 675 if (dstParent.getParent() == null) { 676 // src and dst file/dir are in the same directory, and the dstParent has 677 // been replaced when we removed the src. Refresh the dstIIP and 678 // dstParent. 679 dstIIP = getExistingPathINodes(dstComponents); 680 dstParent = dstIIP.getINode(-2); 681 } 682 683 // add src to the destination 684 685 srcChild = srcIIP.getLastINode(); 686 final byte[] dstChildName = dstIIP.getLastLocalName(); 687 final INode toDst; 688 if (withCount == null) { 689 srcChild.setLocalName(dstChildName); 690 toDst = srcChild; 691 } else { 692 withCount.getReferredINode().setLocalName(dstChildName); 693 Snapshot dstSnapshot = dstIIP.getLatestSnapshot(); 694 final INodeReference.DstReference ref = new INodeReference.DstReference( 695 dstParent.asDirectory(), withCount, 696 dstSnapshot == null ? Snapshot.INVALID_ID : dstSnapshot.getId()); 697 toDst = ref; 698 } 699 700 added = addLastINodeNoQuotaCheck(dstIIP, toDst); 701 if (added) { 702 if (NameNode.stateChangeLog.isDebugEnabled()) { 703 NameNode.stateChangeLog.debug("DIR* FSDirectory.unprotectedRenameTo: " 704 + src + " is renamed to " + dst); 705 } 706 // update modification time of dst and the parent of src 707 final INode srcParent = srcIIP.getINode(-2); 708 srcParent.updateModificationTime(timestamp, srcIIP.getLatestSnapshot(), 709 inodeMap); 710 dstParent = dstIIP.getINode(-2); // refresh dstParent 711 dstParent.updateModificationTime(timestamp, dstIIP.getLatestSnapshot(), 712 inodeMap); 713 // update moved leases with new filename 714 getFSNamesystem().unprotectedChangeLease(src, dst); 715 716 // update the quota usage in src tree 717 if (isSrcInSnapshot) { 718 // get the counts after rename 719 Quota.Counts newSrcCounts = srcChild.computeQuotaUsage( 720 Quota.Counts.newInstance(), false, Snapshot.INVALID_ID); 721 newSrcCounts.subtract(oldSrcCounts); 722 srcParent.addSpaceConsumed(newSrcCounts.get(Quota.NAMESPACE), 723 newSrcCounts.get(Quota.DISKSPACE), false); 724 } 725 726 return true; 727 } 728 } finally { 729 if (!added) { 730 final INodeDirectory srcParent = srcIIP.getINode(-2).asDirectory(); 731 final INode oldSrcChild = srcChild; 732 // put it back 733 if (withCount == null) { 734 srcChild.setLocalName(srcChildName); 735 } else if (!srcChildIsReference) { // src must be in snapshot 736 // the withCount node will no longer be used thus no need to update 737 // its reference number here 738 final INode originalChild = withCount.getReferredINode(); 739 srcChild = originalChild; 740 srcChild.setLocalName(srcChildName); 741 } else { 742 withCount.removeReference(oldSrcChild.asReference()); 743 final INodeReference originalRef = new INodeReference.DstReference( 744 srcParent, withCount, srcRefDstSnapshot); 745 srcChild = originalRef; 746 withCount.getReferredINode().setLocalName(srcChildName); 747 } 748 749 if (isSrcInSnapshot) { 750 // srcParent must be an INodeDirectoryWithSnapshot instance since 751 // isSrcInSnapshot is true and src node has been removed from 752 // srcParent 753 ((INodeDirectoryWithSnapshot) srcParent).undoRename4ScrParent( 754 oldSrcChild.asReference(), srcChild, srcIIP.getLatestSnapshot()); 755 } else { 756 // original srcChild is not in latest snapshot, we only need to add 757 // the srcChild back 758 addLastINodeNoQuotaCheck(srcIIP, srcChild); 759 } 760 } 761 } 762 NameNode.stateChangeLog.warn("DIR* FSDirectory.unprotectedRenameTo: " 763 +"failed to rename "+src+" to "+dst); 764 return false; 765 } 766 767 /** 768 * Rename src to dst. 769 * See {@link DistributedFileSystem#rename(Path, Path, Options.Rename...)} 770 * for details related to rename semantics and exceptions. 771 * 772 * @param src source path 773 * @param dst destination path 774 * @param timestamp modification time 775 * @param options Rename options 776 */ 777 boolean unprotectedRenameTo(String src, String dst, long timestamp, 778 Options.Rename... options) throws FileAlreadyExistsException, 779 FileNotFoundException, ParentNotDirectoryException, 780 QuotaExceededException, UnresolvedLinkException, IOException { 781 assert hasWriteLock(); 782 boolean overwrite = false; 783 if (null != options) { 784 for (Rename option : options) { 785 if (option == Rename.OVERWRITE) { 786 overwrite = true; 787 } 788 } 789 } 790 String error = null; 791 final INodesInPath srcIIP = rootDir.getINodesInPath4Write(src, false); 792 final INode srcInode = srcIIP.getLastINode(); 793 // validate source 794 if (srcInode == null) { 795 error = "rename source " + src + " is not found."; 796 NameNode.stateChangeLog.warn("DIR* FSDirectory.unprotectedRenameTo: " 797 + error); 798 throw new FileNotFoundException(error); 799 } 800 if (srcIIP.getINodes().length == 1) { 801 error = "rename source cannot be the root"; 802 NameNode.stateChangeLog.warn("DIR* FSDirectory.unprotectedRenameTo: " 803 + error); 804 throw new IOException(error); 805 } 806 // srcInode and its subtree cannot contain snapshottable directories with 807 // snapshots 808 checkSnapshot(srcInode, null); 809 810 // validate the destination 811 if (dst.equals(src)) { 812 throw new FileAlreadyExistsException( 813 "The source "+src+" and destination "+dst+" are the same"); 814 } 815 if (srcInode.isSymlink() && 816 dst.equals(srcInode.asSymlink().getSymlinkString())) { 817 throw new FileAlreadyExistsException( 818 "Cannot rename symlink "+src+" to its target "+dst); 819 } 820 // dst cannot be a directory or a file under src 821 if (dst.startsWith(src) && 822 dst.charAt(src.length()) == Path.SEPARATOR_CHAR) { 823 error = "Rename destination " + dst 824 + " is a directory or file under source " + src; 825 NameNode.stateChangeLog.warn("DIR* FSDirectory.unprotectedRenameTo: " 826 + error); 827 throw new IOException(error); 828 } 829 INodesInPath dstIIP = rootDir.getINodesInPath4Write(dst, false); 830 if (dstIIP.getINodes().length == 1) { 831 error = "rename destination cannot be the root"; 832 NameNode.stateChangeLog.warn("DIR* FSDirectory.unprotectedRenameTo: " 833 + error); 834 throw new IOException(error); 835 } 836 837 final INode dstInode = dstIIP.getLastINode(); 838 List<INodeDirectorySnapshottable> snapshottableDirs = 839 new ArrayList<INodeDirectorySnapshottable>(); 840 if (dstInode != null) { // Destination exists 841 // It's OK to rename a file to a symlink and vice versa 842 if (dstInode.isDirectory() != srcInode.isDirectory()) { 843 error = "Source " + src + " and destination " + dst 844 + " must both be directories"; 845 NameNode.stateChangeLog.warn("DIR* FSDirectory.unprotectedRenameTo: " 846 + error); 847 throw new IOException(error); 848 } 849 if (!overwrite) { // If destination exists, overwrite flag must be true 850 error = "rename destination " + dst + " already exists"; 851 NameNode.stateChangeLog.warn("DIR* FSDirectory.unprotectedRenameTo: " 852 + error); 853 throw new FileAlreadyExistsException(error); 854 } 855 if (dstInode.isDirectory()) { 856 final ReadOnlyList<INode> children = dstInode.asDirectory() 857 .getChildrenList(null); 858 if (!children.isEmpty()) { 859 error = "rename destination directory is not empty: " + dst; 860 NameNode.stateChangeLog.warn( 861 "DIR* FSDirectory.unprotectedRenameTo: " + error); 862 throw new IOException(error); 863 } 864 } 865 checkSnapshot(dstInode, snapshottableDirs); 866 } 867 868 INode dstParent = dstIIP.getINode(-2); 869 if (dstParent == null) { 870 error = "rename destination parent " + dst + " not found."; 871 NameNode.stateChangeLog.warn("DIR* FSDirectory.unprotectedRenameTo: " 872 + error); 873 throw new FileNotFoundException(error); 874 } 875 if (!dstParent.isDirectory()) { 876 error = "rename destination parent " + dst + " is a file."; 877 NameNode.stateChangeLog.warn("DIR* FSDirectory.unprotectedRenameTo: " 878 + error); 879 throw new ParentNotDirectoryException(error); 880 } 881 882 // Ensure dst has quota to accommodate rename 883 verifyQuotaForRename(srcIIP.getINodes(), dstIIP.getINodes()); 884 885 INode srcChild = srcIIP.getLastINode(); 886 final byte[] srcChildName = srcChild.getLocalNameBytes(); 887 final boolean isSrcInSnapshot = srcChild.isInLatestSnapshot( 888 srcIIP.getLatestSnapshot()); 889 final boolean srcChildIsReference = srcChild.isReference(); 890 891 // Record the snapshot on srcChild. After the rename, before any new 892 // snapshot is taken on the dst tree, changes will be recorded in the latest 893 // snapshot of the src tree. 894 if (isSrcInSnapshot) { 895 srcChild = srcChild.recordModification(srcIIP.getLatestSnapshot(), 896 inodeMap); 897 srcIIP.setLastINode(srcChild); 898 } 899 900 // check srcChild for reference 901 final INodeReference.WithCount withCount; 902 int srcRefDstSnapshot = srcChildIsReference ? srcChild.asReference() 903 .getDstSnapshotId() : Snapshot.INVALID_ID; 904 Quota.Counts oldSrcCounts = Quota.Counts.newInstance(); 905 if (isSrcInSnapshot) { 906 final INodeReference.WithName withName = srcIIP.getINode(-2).asDirectory() 907 .replaceChild4ReferenceWithName(srcChild, srcIIP.getLatestSnapshot()); 908 withCount = (INodeReference.WithCount) withName.getReferredINode(); 909 srcChild = withName; 910 srcIIP.setLastINode(srcChild); 911 // get the counts before rename 912 withCount.getReferredINode().computeQuotaUsage(oldSrcCounts, true, 913 Snapshot.INVALID_ID); 914 } else if (srcChildIsReference) { 915 // srcChild is reference but srcChild is not in latest snapshot 916 withCount = (WithCount) srcChild.asReference().getReferredINode(); 917 } else { 918 withCount = null; 919 } 920 921 boolean undoRemoveSrc = true; 922 final long removedSrc = removeLastINode(srcIIP); 923 if (removedSrc == -1) { 924 error = "Failed to rename " + src + " to " + dst 925 + " because the source can not be removed"; 926 NameNode.stateChangeLog.warn("DIR* FSDirectory.unprotectedRenameTo: " 927 + error); 928 throw new IOException(error); 929 } 930 931 if (dstParent.getParent() == null) { 932 // src and dst file/dir are in the same directory, and the dstParent has 933 // been replaced when we removed the src. Refresh the dstIIP and 934 // dstParent. 935 dstIIP = rootDir.getINodesInPath4Write(dst, false); 936 } 937 938 boolean undoRemoveDst = false; 939 INode removedDst = null; 940 try { 941 if (dstInode != null) { // dst exists remove it 942 if (removeLastINode(dstIIP) != -1) { 943 removedDst = dstIIP.getLastINode(); 944 undoRemoveDst = true; 945 } 946 } 947 948 srcChild = srcIIP.getLastINode(); 949 950 final byte[] dstChildName = dstIIP.getLastLocalName(); 951 final INode toDst; 952 if (withCount == null) { 953 srcChild.setLocalName(dstChildName); 954 toDst = srcChild; 955 } else { 956 withCount.getReferredINode().setLocalName(dstChildName); 957 Snapshot dstSnapshot = dstIIP.getLatestSnapshot(); 958 final INodeReference.DstReference ref = new INodeReference.DstReference( 959 dstIIP.getINode(-2).asDirectory(), withCount, 960 dstSnapshot == null ? Snapshot.INVALID_ID : dstSnapshot.getId()); 961 toDst = ref; 962 } 963 964 // add src as dst to complete rename 965 if (addLastINodeNoQuotaCheck(dstIIP, toDst)) { 966 undoRemoveSrc = false; 967 if (NameNode.stateChangeLog.isDebugEnabled()) { 968 NameNode.stateChangeLog.debug( 969 "DIR* FSDirectory.unprotectedRenameTo: " + src 970 + " is renamed to " + dst); 971 } 972 973 final INode srcParent = srcIIP.getINode(-2); 974 srcParent.updateModificationTime(timestamp, srcIIP.getLatestSnapshot(), 975 inodeMap); 976 dstParent = dstIIP.getINode(-2); 977 dstParent.updateModificationTime(timestamp, dstIIP.getLatestSnapshot(), 978 inodeMap); 979 // update moved lease with new filename 980 getFSNamesystem().unprotectedChangeLease(src, dst); 981 982 // Collect the blocks and remove the lease for previous dst 983 long filesDeleted = -1; 984 if (removedDst != null) { 985 undoRemoveDst = false; 986 BlocksMapUpdateInfo collectedBlocks = new BlocksMapUpdateInfo(); 987 List<INode> removedINodes = new ChunkedArrayList<INode>(); 988 filesDeleted = removedDst.cleanSubtree(null, 989 dstIIP.getLatestSnapshot(), collectedBlocks, removedINodes, true) 990 .get(Quota.NAMESPACE); 991 getFSNamesystem().removePathAndBlocks(src, collectedBlocks, 992 removedINodes); 993 } 994 995 if (snapshottableDirs.size() > 0) { 996 // There are snapshottable directories (without snapshots) to be 997 // deleted. Need to update the SnapshotManager. 998 namesystem.removeSnapshottableDirs(snapshottableDirs); 999 } 1000 1001 // update the quota usage in src tree 1002 if (isSrcInSnapshot) { 1003 // get the counts after rename 1004 Quota.Counts newSrcCounts = srcChild.computeQuotaUsage( 1005 Quota.Counts.newInstance(), false, Snapshot.INVALID_ID); 1006 newSrcCounts.subtract(oldSrcCounts); 1007 srcParent.addSpaceConsumed(newSrcCounts.get(Quota.NAMESPACE), 1008 newSrcCounts.get(Quota.DISKSPACE), false); 1009 } 1010 1011 return filesDeleted >= 0; 1012 } 1013 } finally { 1014 if (undoRemoveSrc) { 1015 // Rename failed - restore src 1016 final INodeDirectory srcParent = srcIIP.getINode(-2).asDirectory(); 1017 final INode oldSrcChild = srcChild; 1018 // put it back 1019 if (withCount == null) { 1020 srcChild.setLocalName(srcChildName); 1021 } else if (!srcChildIsReference) { // src must be in snapshot 1022 // the withCount node will no longer be used thus no need to update 1023 // its reference number here 1024 final INode originalChild = withCount.getReferredINode(); 1025 srcChild = originalChild; 1026 srcChild.setLocalName(srcChildName); 1027 } else { 1028 withCount.removeReference(oldSrcChild.asReference()); 1029 final INodeReference originalRef = new INodeReference.DstReference( 1030 srcParent, withCount, srcRefDstSnapshot); 1031 srcChild = originalRef; 1032 withCount.getReferredINode().setLocalName(srcChildName); 1033 } 1034 1035 if (srcParent instanceof INodeDirectoryWithSnapshot) { 1036 ((INodeDirectoryWithSnapshot) srcParent).undoRename4ScrParent( 1037 oldSrcChild.asReference(), srcChild, srcIIP.getLatestSnapshot()); 1038 } else { 1039 // srcParent is not an INodeDirectoryWithSnapshot, we only need to add 1040 // the srcChild back 1041 addLastINodeNoQuotaCheck(srcIIP, srcChild); 1042 } 1043 } 1044 if (undoRemoveDst) { 1045 // Rename failed - restore dst 1046 if (dstParent instanceof INodeDirectoryWithSnapshot) { 1047 ((INodeDirectoryWithSnapshot) dstParent).undoRename4DstParent( 1048 removedDst, dstIIP.getLatestSnapshot()); 1049 } else { 1050 addLastINodeNoQuotaCheck(dstIIP, removedDst); 1051 } 1052 if (removedDst.isReference()) { 1053 final INodeReference removedDstRef = removedDst.asReference(); 1054 final INodeReference.WithCount wc = 1055 (WithCount) removedDstRef.getReferredINode().asReference(); 1056 wc.addReference(removedDstRef); 1057 } 1058 } 1059 } 1060 NameNode.stateChangeLog.warn("DIR* FSDirectory.unprotectedRenameTo: " 1061 + "failed to rename " + src + " to " + dst); 1062 throw new IOException("rename from " + src + " to " + dst + " failed."); 1063 } 1064 1065 /** 1066 * Set file replication 1067 * 1068 * @param src file name 1069 * @param replication new replication 1070 * @param blockRepls block replications - output parameter 1071 * @return array of file blocks 1072 * @throws QuotaExceededException 1073 * @throws SnapshotAccessControlException 1074 */ 1075 Block[] setReplication(String src, short replication, short[] blockRepls) 1076 throws QuotaExceededException, UnresolvedLinkException, 1077 SnapshotAccessControlException { 1078 waitForReady(); 1079 writeLock(); 1080 try { 1081 final Block[] fileBlocks = unprotectedSetReplication( 1082 src, replication, blockRepls); 1083 if (fileBlocks != null) // log replication change 1084 fsImage.getEditLog().logSetReplication(src, replication); 1085 return fileBlocks; 1086 } finally { 1087 writeUnlock(); 1088 } 1089 } 1090 1091 Block[] unprotectedSetReplication(String src, short replication, 1092 short[] blockRepls) throws QuotaExceededException, 1093 UnresolvedLinkException, SnapshotAccessControlException { 1094 assert hasWriteLock(); 1095 1096 final INodesInPath iip = rootDir.getINodesInPath4Write(src, true); 1097 final INode inode = iip.getLastINode(); 1098 if (inode == null || !inode.isFile()) { 1099 return null; 1100 } 1101 INodeFile file = inode.asFile(); 1102 final short oldBR = file.getBlockReplication(); 1103 1104 // before setFileReplication, check for increasing block replication. 1105 // if replication > oldBR, then newBR == replication. 1106 // if replication < oldBR, we don't know newBR yet. 1107 if (replication > oldBR) { 1108 long dsDelta = (replication - oldBR)*(file.diskspaceConsumed()/oldBR); 1109 updateCount(iip, 0, dsDelta, true); 1110 } 1111 1112 file = file.setFileReplication(replication, iip.getLatestSnapshot(), 1113 inodeMap); 1114 1115 final short newBR = file.getBlockReplication(); 1116 // check newBR < oldBR case. 1117 if (newBR < oldBR) { 1118 long dsDelta = (newBR - oldBR)*(file.diskspaceConsumed()/newBR); 1119 updateCount(iip, 0, dsDelta, true); 1120 } 1121 1122 if (blockRepls != null) { 1123 blockRepls[0] = oldBR; 1124 blockRepls[1] = newBR; 1125 } 1126 return file.getBlocks(); 1127 } 1128 1129 /** 1130 * @param path the file path 1131 * @return the block size of the file. 1132 */ 1133 long getPreferredBlockSize(String path) throws UnresolvedLinkException, 1134 FileNotFoundException, IOException { 1135 readLock(); 1136 try { 1137 return INodeFile.valueOf(rootDir.getNode(path, false), path 1138 ).getPreferredBlockSize(); 1139 } finally { 1140 readUnlock(); 1141 } 1142 } 1143 1144 boolean exists(String src) throws UnresolvedLinkException { 1145 src = normalizePath(src); 1146 readLock(); 1147 try { 1148 INode inode = rootDir.getNode(src, false); 1149 if (inode == null) { 1150 return false; 1151 } 1152 return !inode.isFile() || inode.asFile().getBlocks() != null; 1153 } finally { 1154 readUnlock(); 1155 } 1156 } 1157 1158 void setPermission(String src, FsPermission permission) 1159 throws FileNotFoundException, UnresolvedLinkException, 1160 QuotaExceededException, SnapshotAccessControlException { 1161 writeLock(); 1162 try { 1163 unprotectedSetPermission(src, permission); 1164 } finally { 1165 writeUnlock(); 1166 } 1167 fsImage.getEditLog().logSetPermissions(src, permission); 1168 } 1169 1170 void unprotectedSetPermission(String src, FsPermission permissions) 1171 throws FileNotFoundException, UnresolvedLinkException, 1172 QuotaExceededException, SnapshotAccessControlException { 1173 assert hasWriteLock(); 1174 final INodesInPath inodesInPath = rootDir.getINodesInPath4Write(src, true); 1175 final INode inode = inodesInPath.getLastINode(); 1176 if (inode == null) { 1177 throw new FileNotFoundException("File does not exist: " + src); 1178 } 1179 inode.setPermission(permissions, inodesInPath.getLatestSnapshot(), 1180 inodeMap); 1181 } 1182 1183 void setOwner(String src, String username, String groupname) 1184 throws FileNotFoundException, UnresolvedLinkException, 1185 QuotaExceededException, SnapshotAccessControlException { 1186 writeLock(); 1187 try { 1188 unprotectedSetOwner(src, username, groupname); 1189 } finally { 1190 writeUnlock(); 1191 } 1192 fsImage.getEditLog().logSetOwner(src, username, groupname); 1193 } 1194 1195 void unprotectedSetOwner(String src, String username, String groupname) 1196 throws FileNotFoundException, UnresolvedLinkException, 1197 QuotaExceededException, SnapshotAccessControlException { 1198 assert hasWriteLock(); 1199 final INodesInPath inodesInPath = rootDir.getINodesInPath4Write(src, true); 1200 INode inode = inodesInPath.getLastINode(); 1201 if (inode == null) { 1202 throw new FileNotFoundException("File does not exist: " + src); 1203 } 1204 if (username != null) { 1205 inode = inode.setUser(username, inodesInPath.getLatestSnapshot(), 1206 inodeMap); 1207 } 1208 if (groupname != null) { 1209 inode.setGroup(groupname, inodesInPath.getLatestSnapshot(), inodeMap); 1210 } 1211 } 1212 1213 /** 1214 * Concat all the blocks from srcs to trg and delete the srcs files 1215 */ 1216 void concat(String target, String [] srcs, boolean supportRetryCache) 1217 throws UnresolvedLinkException, QuotaExceededException, 1218 SnapshotAccessControlException, SnapshotException { 1219 writeLock(); 1220 try { 1221 // actual move 1222 waitForReady(); 1223 long timestamp = now(); 1224 unprotectedConcat(target, srcs, timestamp); 1225 // do the commit 1226 fsImage.getEditLog().logConcat(target, srcs, timestamp, 1227 supportRetryCache); 1228 } finally { 1229 writeUnlock(); 1230 } 1231 } 1232 1233 /** 1234 * Concat all the blocks from srcs to trg and delete the srcs files 1235 * @param target target file to move the blocks to 1236 * @param srcs list of file to move the blocks from 1237 */ 1238 void unprotectedConcat(String target, String [] srcs, long timestamp) 1239 throws UnresolvedLinkException, QuotaExceededException, 1240 SnapshotAccessControlException, SnapshotException { 1241 assert hasWriteLock(); 1242 if (NameNode.stateChangeLog.isDebugEnabled()) { 1243 NameNode.stateChangeLog.debug("DIR* FSNamesystem.concat to "+target); 1244 } 1245 // do the move 1246 1247 final INodesInPath trgIIP = rootDir.getINodesInPath4Write(target, true); 1248 final INode[] trgINodes = trgIIP.getINodes(); 1249 final INodeFile trgInode = trgIIP.getLastINode().asFile(); 1250 INodeDirectory trgParent = trgINodes[trgINodes.length-2].asDirectory(); 1251 final Snapshot trgLatestSnapshot = trgIIP.getLatestSnapshot(); 1252 1253 final INodeFile [] allSrcInodes = new INodeFile[srcs.length]; 1254 for(int i = 0; i < srcs.length; i++) { 1255 final INodesInPath iip = getINodesInPath4Write(srcs[i]); 1256 final Snapshot latest = iip.getLatestSnapshot(); 1257 final INode inode = iip.getLastINode(); 1258 1259 // check if the file in the latest snapshot 1260 if (inode.isInLatestSnapshot(latest)) { 1261 throw new SnapshotException("Concat: the source file " + srcs[i] 1262 + " is in snapshot " + latest); 1263 } 1264 1265 // check if the file has other references. 1266 if (inode.isReference() && ((INodeReference.WithCount) 1267 inode.asReference().getReferredINode()).getReferenceCount() > 1) { 1268 throw new SnapshotException("Concat: the source file " + srcs[i] 1269 + " is referred by some other reference in some snapshot."); 1270 } 1271 1272 allSrcInodes[i] = inode.asFile(); 1273 } 1274 trgInode.concatBlocks(allSrcInodes); 1275 1276 // since we are in the same dir - we can use same parent to remove files 1277 int count = 0; 1278 for(INodeFile nodeToRemove: allSrcInodes) { 1279 if(nodeToRemove == null) continue; 1280 1281 nodeToRemove.setBlocks(null); 1282 trgParent.removeChild(nodeToRemove, trgLatestSnapshot, null); 1283 inodeMap.remove(nodeToRemove); 1284 count++; 1285 } 1286 1287 // update inodeMap 1288 removeFromInodeMap(Arrays.asList(allSrcInodes)); 1289 1290 trgInode.setModificationTime(timestamp, trgLatestSnapshot, inodeMap); 1291 trgParent.updateModificationTime(timestamp, trgLatestSnapshot, inodeMap); 1292 // update quota on the parent directory ('count' files removed, 0 space) 1293 unprotectedUpdateCount(trgIIP, trgINodes.length-1, -count, 0); 1294 } 1295 1296 /** 1297 * Delete the target directory and collect the blocks under it 1298 * 1299 * @param src Path of a directory to delete 1300 * @param collectedBlocks Blocks under the deleted directory 1301 * @param removedINodes INodes that should be removed from {@link #inodeMap} 1302 * @param logRetryCache Whether to record RPC IDs in editlog to support retry 1303 * cache rebuilding. 1304 * @return true on successful deletion; else false 1305 */ 1306 boolean delete(String src, BlocksMapUpdateInfo collectedBlocks, 1307 List<INode> removedINodes, boolean logRetryCache) throws IOException { 1308 if (NameNode.stateChangeLog.isDebugEnabled()) { 1309 NameNode.stateChangeLog.debug("DIR* FSDirectory.delete: " + src); 1310 } 1311 waitForReady(); 1312 long now = now(); 1313 final long filesRemoved; 1314 writeLock(); 1315 try { 1316 final INodesInPath inodesInPath = rootDir.getINodesInPath4Write( 1317 normalizePath(src), false); 1318 if (!deleteAllowed(inodesInPath, src) ) { 1319 filesRemoved = -1; 1320 } else { 1321 // Before removing the node, first check if the targetNode is for a 1322 // snapshottable dir with snapshots, or its descendants have 1323 // snapshottable dir with snapshots 1324 final INode targetNode = inodesInPath.getLastINode(); 1325 List<INodeDirectorySnapshottable> snapshottableDirs = 1326 new ArrayList<INodeDirectorySnapshottable>(); 1327 checkSnapshot(targetNode, snapshottableDirs); 1328 filesRemoved = unprotectedDelete(inodesInPath, collectedBlocks, 1329 removedINodes, now); 1330 if (snapshottableDirs.size() > 0) { 1331 // There are some snapshottable directories without snapshots to be 1332 // deleted. Need to update the SnapshotManager. 1333 namesystem.removeSnapshottableDirs(snapshottableDirs); 1334 } 1335 } 1336 } finally { 1337 writeUnlock(); 1338 } 1339 if (filesRemoved < 0) { 1340 return false; 1341 } 1342 fsImage.getEditLog().logDelete(src, now, logRetryCache); 1343 incrDeletedFileCount(filesRemoved); 1344 // Blocks/INodes will be handled later by the caller of this method 1345 getFSNamesystem().removePathAndBlocks(src, null, null); 1346 return true; 1347 } 1348 1349 private static boolean deleteAllowed(final INodesInPath iip, 1350 final String src) { 1351 final INode[] inodes = iip.getINodes(); 1352 if (inodes == null || inodes.length == 0 1353 || inodes[inodes.length - 1] == null) { 1354 if(NameNode.stateChangeLog.isDebugEnabled()) { 1355 NameNode.stateChangeLog.debug("DIR* FSDirectory.unprotectedDelete: " 1356 + "failed to remove " + src + " because it does not exist"); 1357 } 1358 return false; 1359 } else if (inodes.length == 1) { // src is the root 1360 NameNode.stateChangeLog.warn("DIR* FSDirectory.unprotectedDelete: " 1361 + "failed to remove " + src 1362 + " because the root is not allowed to be deleted"); 1363 return false; 1364 } 1365 return true; 1366 } 1367 1368 /** 1369 * @return true if the path is a non-empty directory; otherwise, return false. 1370 */ 1371 boolean isNonEmptyDirectory(String path) throws UnresolvedLinkException { 1372 readLock(); 1373 try { 1374 final INodesInPath inodesInPath = rootDir.getLastINodeInPath(path, false); 1375 final INode inode = inodesInPath.getINode(0); 1376 if (inode == null || !inode.isDirectory()) { 1377 //not found or not a directory 1378 return false; 1379 } 1380 final Snapshot s = inodesInPath.getPathSnapshot(); 1381 return !inode.asDirectory().getChildrenList(s).isEmpty(); 1382 } finally { 1383 readUnlock(); 1384 } 1385 } 1386 1387 /** 1388 * Delete a path from the name space 1389 * Update the count at each ancestor directory with quota 1390 * <br> 1391 * Note: This is to be used by {@link FSEditLog} only. 1392 * <br> 1393 * @param src a string representation of a path to an inode 1394 * @param mtime the time the inode is removed 1395 * @throws SnapshotAccessControlException if path is in RO snapshot 1396 */ 1397 void unprotectedDelete(String src, long mtime) throws UnresolvedLinkException, 1398 QuotaExceededException, SnapshotAccessControlException { 1399 assert hasWriteLock(); 1400 BlocksMapUpdateInfo collectedBlocks = new BlocksMapUpdateInfo(); 1401 List<INode> removedINodes = new ChunkedArrayList<INode>(); 1402 1403 final INodesInPath inodesInPath = rootDir.getINodesInPath4Write( 1404 normalizePath(src), false); 1405 final long filesRemoved = deleteAllowed(inodesInPath, src) ? 1406 unprotectedDelete(inodesInPath, collectedBlocks, 1407 removedINodes, mtime) : -1; 1408 if (filesRemoved >= 0) { 1409 getFSNamesystem().removePathAndBlocks(src, collectedBlocks, 1410 removedINodes); 1411 } 1412 } 1413 1414 /** 1415 * Delete a path from the name space 1416 * Update the count at each ancestor directory with quota 1417 * @param iip the inodes resolved from the path 1418 * @param collectedBlocks blocks collected from the deleted path 1419 * @param removedINodes inodes that should be removed from {@link #inodeMap} 1420 * @param mtime the time the inode is removed 1421 * @return the number of inodes deleted; 0 if no inodes are deleted. 1422 */ 1423 long unprotectedDelete(INodesInPath iip, BlocksMapUpdateInfo collectedBlocks, 1424 List<INode> removedINodes, long mtime) throws QuotaExceededException { 1425 assert hasWriteLock(); 1426 1427 // check if target node exists 1428 INode targetNode = iip.getLastINode(); 1429 if (targetNode == null) { 1430 return -1; 1431 } 1432 1433 // record modification 1434 final Snapshot latestSnapshot = iip.getLatestSnapshot(); 1435 targetNode = targetNode.recordModification(latestSnapshot, inodeMap); 1436 iip.setLastINode(targetNode); 1437 1438 // Remove the node from the namespace 1439 long removed = removeLastINode(iip); 1440 if (removed == -1) { 1441 return -1; 1442 } 1443 1444 // set the parent's modification time 1445 final INodeDirectory parent = targetNode.getParent(); 1446 parent.updateModificationTime(mtime, latestSnapshot, inodeMap); 1447 if (removed == 0) { 1448 return 0; 1449 } 1450 1451 // collect block 1452 if (!targetNode.isInLatestSnapshot(latestSnapshot)) { 1453 targetNode.destroyAndCollectBlocks(collectedBlocks, removedINodes); 1454 } else { 1455 Quota.Counts counts = targetNode.cleanSubtree(null, latestSnapshot, 1456 collectedBlocks, removedINodes, true); 1457 parent.addSpaceConsumed(-counts.get(Quota.NAMESPACE), 1458 -counts.get(Quota.DISKSPACE), true); 1459 removed = counts.get(Quota.NAMESPACE); 1460 } 1461 if (NameNode.stateChangeLog.isDebugEnabled()) { 1462 NameNode.stateChangeLog.debug("DIR* FSDirectory.unprotectedDelete: " 1463 + targetNode.getFullPathName() + " is removed"); 1464 } 1465 return removed; 1466 } 1467 1468 /** 1469 * Check if the given INode (or one of its descendants) is snapshottable and 1470 * already has snapshots. 1471 * 1472 * @param target The given INode 1473 * @param snapshottableDirs The list of directories that are snapshottable 1474 * but do not have snapshots yet 1475 */ 1476 private static void checkSnapshot(INode target, 1477 List<INodeDirectorySnapshottable> snapshottableDirs) throws IOException { 1478 if (target.isDirectory()) { 1479 INodeDirectory targetDir = target.asDirectory(); 1480 if (targetDir.isSnapshottable()) { 1481 INodeDirectorySnapshottable ssTargetDir = 1482 (INodeDirectorySnapshottable) targetDir; 1483 if (ssTargetDir.getNumSnapshots() > 0) { 1484 throw new IOException("The directory " + ssTargetDir.getFullPathName() 1485 + " cannot be deleted since " + ssTargetDir.getFullPathName() 1486 + " is snapshottable and already has snapshots"); 1487 } else { 1488 if (snapshottableDirs != null) { 1489 snapshottableDirs.add(ssTargetDir); 1490 } 1491 } 1492 } 1493 for (INode child : targetDir.getChildrenList(null)) { 1494 checkSnapshot(child, snapshottableDirs); 1495 } 1496 } 1497 } 1498 1499 /** 1500 * Replaces the specified INodeFile with the specified one. 1501 */ 1502 void replaceINodeFile(String path, INodeFile oldnode, 1503 INodeFile newnode) throws IOException { 1504 writeLock(); 1505 try { 1506 unprotectedReplaceINodeFile(path, oldnode, newnode); 1507 } finally { 1508 writeUnlock(); 1509 } 1510 } 1511 1512 /** Replace an INodeFile and record modification for the latest snapshot. */ 1513 void unprotectedReplaceINodeFile(final String path, final INodeFile oldnode, 1514 final INodeFile newnode) { 1515 Preconditions.checkState(hasWriteLock()); 1516 1517 oldnode.getParent().replaceChild(oldnode, newnode, inodeMap); 1518 oldnode.clear(); 1519 1520 /* Currently oldnode and newnode are assumed to contain the same 1521 * blocks. Otherwise, blocks need to be removed from the blocksMap. 1522 */ 1523 int index = 0; 1524 for (BlockInfo b : newnode.getBlocks()) { 1525 BlockInfo info = getBlockManager().addBlockCollection(b, newnode); 1526 newnode.setBlock(index, info); // inode refers to the block in BlocksMap 1527 index++; 1528 } 1529 } 1530 1531 /** 1532 * Get a partial listing of the indicated directory 1533 * 1534 * We will stop when any of the following conditions is met: 1535 * 1) this.lsLimit files have been added 1536 * 2) needLocation is true AND enough files have been added such 1537 * that at least this.lsLimit block locations are in the response 1538 * 1539 * @param src the directory name 1540 * @param startAfter the name to start listing after 1541 * @param needLocation if block locations are returned 1542 * @return a partial listing starting after startAfter 1543 */ 1544 DirectoryListing getListing(String src, byte[] startAfter, 1545 boolean needLocation) throws UnresolvedLinkException, IOException { 1546 String srcs = normalizePath(src); 1547 1548 readLock(); 1549 try { 1550 if (srcs.endsWith(HdfsConstants.SEPARATOR_DOT_SNAPSHOT_DIR)) { 1551 return getSnapshotsListing(srcs, startAfter); 1552 } 1553 final INodesInPath inodesInPath = rootDir.getLastINodeInPath(srcs, true); 1554 final Snapshot snapshot = inodesInPath.getPathSnapshot(); 1555 final INode targetNode = inodesInPath.getINode(0); 1556 if (targetNode == null) 1557 return null; 1558 1559 if (!targetNode.isDirectory()) { 1560 return new DirectoryListing( 1561 new HdfsFileStatus[]{createFileStatus(HdfsFileStatus.EMPTY_NAME, 1562 targetNode, needLocation, snapshot)}, 0); 1563 } 1564 1565 final INodeDirectory dirInode = targetNode.asDirectory(); 1566 final ReadOnlyList<INode> contents = dirInode.getChildrenList(snapshot); 1567 int startChild = INodeDirectory.nextChild(contents, startAfter); 1568 int totalNumChildren = contents.size(); 1569 int numOfListing = Math.min(totalNumChildren-startChild, this.lsLimit); 1570 int locationBudget = this.lsLimit; 1571 int listingCnt = 0; 1572 HdfsFileStatus listing[] = new HdfsFileStatus[numOfListing]; 1573 for (int i=0; i<numOfListing && locationBudget>0; i++) { 1574 INode cur = contents.get(startChild+i); 1575 listing[i] = createFileStatus(cur.getLocalNameBytes(), cur, 1576 needLocation, snapshot); 1577 listingCnt++; 1578 if (needLocation) { 1579 // Once we hit lsLimit locations, stop. 1580 // This helps to prevent excessively large response payloads. 1581 // Approximate #locations with locatedBlockCount() * repl_factor 1582 LocatedBlocks blks = 1583 ((HdfsLocatedFileStatus)listing[i]).getBlockLocations(); 1584 locationBudget -= (blks == null) ? 0 : 1585 blks.locatedBlockCount() * listing[i].getReplication(); 1586 } 1587 } 1588 // truncate return array if necessary 1589 if (listingCnt < numOfListing) { 1590 listing = Arrays.copyOf(listing, listingCnt); 1591 } 1592 return new DirectoryListing( 1593 listing, totalNumChildren-startChild-listingCnt); 1594 } finally { 1595 readUnlock(); 1596 } 1597 } 1598 1599 /** 1600 * Get a listing of all the snapshots of a snapshottable directory 1601 */ 1602 private DirectoryListing getSnapshotsListing(String src, byte[] startAfter) 1603 throws UnresolvedLinkException, IOException { 1604 Preconditions.checkState(hasReadLock()); 1605 Preconditions.checkArgument( 1606 src.endsWith(HdfsConstants.SEPARATOR_DOT_SNAPSHOT_DIR), 1607 "%s does not end with %s", src, HdfsConstants.SEPARATOR_DOT_SNAPSHOT_DIR); 1608 1609 final String dirPath = normalizePath(src.substring(0, 1610 src.length() - HdfsConstants.DOT_SNAPSHOT_DIR.length())); 1611 1612 final INode node = this.getINode(dirPath); 1613 final INodeDirectorySnapshottable dirNode = INodeDirectorySnapshottable 1614 .valueOf(node, dirPath); 1615 final ReadOnlyList<Snapshot> snapshots = dirNode.getSnapshotList(); 1616 int skipSize = ReadOnlyList.Util.binarySearch(snapshots, startAfter); 1617 skipSize = skipSize < 0 ? -skipSize - 1 : skipSize + 1; 1618 int numOfListing = Math.min(snapshots.size() - skipSize, this.lsLimit); 1619 final HdfsFileStatus listing[] = new HdfsFileStatus[numOfListing]; 1620 for (int i = 0; i < numOfListing; i++) { 1621 Root sRoot = snapshots.get(i + skipSize).getRoot(); 1622 listing[i] = createFileStatus(sRoot.getLocalNameBytes(), sRoot, null); 1623 } 1624 return new DirectoryListing( 1625 listing, snapshots.size() - skipSize - numOfListing); 1626 } 1627 1628 /** Get the file info for a specific file. 1629 * @param src The string representation of the path to the file 1630 * @param resolveLink whether to throw UnresolvedLinkException 1631 * @return object containing information regarding the file 1632 * or null if file not found 1633 */ 1634 HdfsFileStatus getFileInfo(String src, boolean resolveLink) 1635 throws UnresolvedLinkException { 1636 String srcs = normalizePath(src); 1637 readLock(); 1638 try { 1639 if (srcs.endsWith(HdfsConstants.SEPARATOR_DOT_SNAPSHOT_DIR)) { 1640 return getFileInfo4DotSnapshot(srcs); 1641 } 1642 final INodesInPath inodesInPath = rootDir.getLastINodeInPath(srcs, resolveLink); 1643 final INode i = inodesInPath.getINode(0); 1644 return i == null? null: createFileStatus(HdfsFileStatus.EMPTY_NAME, i, 1645 inodesInPath.getPathSnapshot()); 1646 } finally { 1647 readUnlock(); 1648 } 1649 } 1650 1651 /** 1652 * Currently we only support "ls /xxx/.snapshot" which will return all the 1653 * snapshots of a directory. The FSCommand Ls will first call getFileInfo to 1654 * make sure the file/directory exists (before the real getListing call). 1655 * Since we do not have a real INode for ".snapshot", we return an empty 1656 * non-null HdfsFileStatus here. 1657 */ 1658 private HdfsFileStatus getFileInfo4DotSnapshot(String src) 1659 throws UnresolvedLinkException { 1660 Preconditions.checkArgument( 1661 src.endsWith(HdfsConstants.SEPARATOR_DOT_SNAPSHOT_DIR), 1662 "%s does not end with %s", src, HdfsConstants.SEPARATOR_DOT_SNAPSHOT_DIR); 1663 1664 final String dirPath = normalizePath(src.substring(0, 1665 src.length() - HdfsConstants.DOT_SNAPSHOT_DIR.length())); 1666 1667 final INode node = this.getINode(dirPath); 1668 if (node != null 1669 && node.isDirectory() 1670 && node.asDirectory() instanceof INodeDirectorySnapshottable) { 1671 return new HdfsFileStatus(0, true, 0, 0, 0, 0, null, null, null, null, 1672 HdfsFileStatus.EMPTY_NAME, -1L, 0); 1673 } 1674 return null; 1675 } 1676 1677 /** 1678 * Get the blocks associated with the file. 1679 */ 1680 Block[] getFileBlocks(String src) throws UnresolvedLinkException { 1681 waitForReady(); 1682 readLock(); 1683 try { 1684 final INode i = rootDir.getNode(src, false); 1685 return i != null && i.isFile()? i.asFile().getBlocks(): null; 1686 } finally { 1687 readUnlock(); 1688 } 1689 } 1690 1691 1692 INodesInPath getExistingPathINodes(byte[][] components) 1693 throws UnresolvedLinkException { 1694 return INodesInPath.resolve(rootDir, components); 1695 } 1696 1697 /** 1698 * Get {@link INode} associated with the file / directory. 1699 */ 1700 public INode getINode(String src) throws UnresolvedLinkException { 1701 return getLastINodeInPath(src).getINode(0); 1702 } 1703 1704 /** 1705 * Get {@link INode} associated with the file / directory. 1706 */ 1707 public INodesInPath getLastINodeInPath(String src) 1708 throws UnresolvedLinkException { 1709 readLock(); 1710 try { 1711 return rootDir.getLastINodeInPath(src, true); 1712 } finally { 1713 readUnlock(); 1714 } 1715 } 1716 1717 /** 1718 * Get {@link INode} associated with the file / directory. 1719 */ 1720 public INodesInPath getINodesInPath4Write(String src 1721 ) throws UnresolvedLinkException, SnapshotAccessControlException { 1722 readLock(); 1723 try { 1724 return rootDir.getINodesInPath4Write(src, true); 1725 } finally { 1726 readUnlock(); 1727 } 1728 } 1729 1730 /** 1731 * Get {@link INode} associated with the file / directory. 1732 * @throws SnapshotAccessControlException if path is in RO snapshot 1733 */ 1734 public INode getINode4Write(String src) throws UnresolvedLinkException, 1735 SnapshotAccessControlException { 1736 readLock(); 1737 try { 1738 return rootDir.getINode4Write(src, true); 1739 } finally { 1740 readUnlock(); 1741 } 1742 } 1743 1744 /** 1745 * Check whether the filepath could be created 1746 * @throws SnapshotAccessControlException if path is in RO snapshot 1747 */ 1748 boolean isValidToCreate(String src) throws UnresolvedLinkException, 1749 SnapshotAccessControlException { 1750 String srcs = normalizePath(src); 1751 readLock(); 1752 try { 1753 if (srcs.startsWith("/") && !srcs.endsWith("/") 1754 && rootDir.getINode4Write(srcs, false) == null) { 1755 return true; 1756 } else { 1757 return false; 1758 } 1759 } finally { 1760 readUnlock(); 1761 } 1762 } 1763 1764 /** 1765 * Check whether the path specifies a directory 1766 */ 1767 boolean isDir(String src) throws UnresolvedLinkException { 1768 src = normalizePath(src); 1769 readLock(); 1770 try { 1771 INode node = rootDir.getNode(src, false); 1772 return node != null && node.isDirectory(); 1773 } finally { 1774 readUnlock(); 1775 } 1776 } 1777 1778 /** 1779 * Check whether the path specifies a directory 1780 * @throws SnapshotAccessControlException if path is in RO snapshot 1781 */ 1782 boolean isDirMutable(String src) throws UnresolvedLinkException, 1783 SnapshotAccessControlException { 1784 src = normalizePath(src); 1785 readLock(); 1786 try { 1787 INode node = rootDir.getINode4Write(src, false); 1788 return node != null && node.isDirectory(); 1789 } finally { 1790 readUnlock(); 1791 } 1792 } 1793 1794 /** Updates namespace and diskspace consumed for all 1795 * directories until the parent directory of file represented by path. 1796 * 1797 * @param path path for the file. 1798 * @param nsDelta the delta change of namespace 1799 * @param dsDelta the delta change of diskspace 1800 * @throws QuotaExceededException if the new count violates any quota limit 1801 * @throws FileNotFoundException if path does not exist. 1802 */ 1803 void updateSpaceConsumed(String path, long nsDelta, long dsDelta) 1804 throws QuotaExceededException, FileNotFoundException, 1805 UnresolvedLinkException, SnapshotAccessControlException { 1806 writeLock(); 1807 try { 1808 final INodesInPath iip = rootDir.getINodesInPath4Write(path, false); 1809 if (iip.getLastINode() == null) { 1810 throw new FileNotFoundException("Path not found: " + path); 1811 } 1812 updateCount(iip, nsDelta, dsDelta, true); 1813 } finally { 1814 writeUnlock(); 1815 } 1816 } 1817 1818 private void updateCount(INodesInPath iip, long nsDelta, long dsDelta, 1819 boolean checkQuota) throws QuotaExceededException { 1820 updateCount(iip, iip.getINodes().length - 1, nsDelta, dsDelta, checkQuota); 1821 } 1822 1823 /** update count of each inode with quota 1824 * 1825 * @param iip inodes in a path 1826 * @param numOfINodes the number of inodes to update starting from index 0 1827 * @param nsDelta the delta change of namespace 1828 * @param dsDelta the delta change of diskspace 1829 * @param checkQuota if true then check if quota is exceeded 1830 * @throws QuotaExceededException if the new count violates any quota limit 1831 */ 1832 private void updateCount(INodesInPath iip, int numOfINodes, 1833 long nsDelta, long dsDelta, boolean checkQuota) 1834 throws QuotaExceededException { 1835 assert hasWriteLock(); 1836 if (!ready) { 1837 //still initializing. do not check or update quotas. 1838 return; 1839 } 1840 final INode[] inodes = iip.getINodes(); 1841 if (numOfINodes > inodes.length) { 1842 numOfINodes = inodes.length; 1843 } 1844 if (checkQuota) { 1845 verifyQuota(inodes, numOfINodes, nsDelta, dsDelta, null); 1846 } 1847 unprotectedUpdateCount(iip, numOfINodes, nsDelta, dsDelta); 1848 } 1849 1850 /** 1851 * update quota of each inode and check to see if quota is exceeded. 1852 * See {@link #updateCount(INode[], int, long, long, boolean)} 1853 */ 1854 private void updateCountNoQuotaCheck(INodesInPath inodesInPath, 1855 int numOfINodes, long nsDelta, long dsDelta) { 1856 assert hasWriteLock(); 1857 try { 1858 updateCount(inodesInPath, numOfINodes, nsDelta, dsDelta, false); 1859 } catch (QuotaExceededException e) { 1860 NameNode.LOG.error("BUG: unexpected exception ", e); 1861 } 1862 } 1863 1864 /** 1865 * updates quota without verification 1866 * callers responsibility is to make sure quota is not exceeded 1867 */ 1868 private static void unprotectedUpdateCount(INodesInPath inodesInPath, 1869 int numOfINodes, long nsDelta, long dsDelta) { 1870 final INode[] inodes = inodesInPath.getINodes(); 1871 for(int i=0; i < numOfINodes; i++) { 1872 if (inodes[i].isQuotaSet()) { // a directory with quota 1873 INodeDirectoryWithQuota node = (INodeDirectoryWithQuota) inodes[i] 1874 .asDirectory(); 1875 node.addSpaceConsumed2Cache(nsDelta, dsDelta); 1876 } 1877 } 1878 } 1879 1880 /** Return the name of the path represented by inodes at [0, pos] */ 1881 static String getFullPathName(INode[] inodes, int pos) { 1882 StringBuilder fullPathName = new StringBuilder(); 1883 if (inodes[0].isRoot()) { 1884 if (pos == 0) return Path.SEPARATOR; 1885 } else { 1886 fullPathName.append(inodes[0].getLocalName()); 1887 } 1888 1889 for (int i=1; i<=pos; i++) { 1890 fullPathName.append(Path.SEPARATOR_CHAR).append(inodes[i].getLocalName()); 1891 } 1892 return fullPathName.toString(); 1893 } 1894 1895 /** 1896 * @return the relative path of an inode from one of its ancestors, 1897 * represented by an array of inodes. 1898 */ 1899 private static INode[] getRelativePathINodes(INode inode, INode ancestor) { 1900 // calculate the depth of this inode from the ancestor 1901 int depth = 0; 1902 for (INode i = inode; i != null && !i.equals(ancestor); i = i.getParent()) { 1903 depth++; 1904 } 1905 INode[] inodes = new INode[depth]; 1906 1907 // fill up the inodes in the path from this inode to root 1908 for (int i = 0; i < depth; i++) { 1909 if (inode == null) { 1910 NameNode.stateChangeLog.warn("Could not get full path." 1911 + " Corresponding file might have deleted already."); 1912 return null; 1913 } 1914 inodes[depth-i-1] = inode; 1915 inode = inode.getParent(); 1916 } 1917 return inodes; 1918 } 1919 1920 private static INode[] getFullPathINodes(INode inode) { 1921 return getRelativePathINodes(inode, null); 1922 } 1923 1924 /** Return the full path name of the specified inode */ 1925 static String getFullPathName(INode inode) { 1926 INode[] inodes = getFullPathINodes(inode); 1927 // inodes can be null only when its called without holding lock 1928 return inodes == null ? "" : getFullPathName(inodes, inodes.length - 1); 1929 } 1930 1931 /** 1932 * Create a directory 1933 * If ancestor directories do not exist, automatically create them. 1934 1935 * @param src string representation of the path to the directory 1936 * @param permissions the permission of the directory 1937 * @param isAutocreate if the permission of the directory should inherit 1938 * from its parent or not. u+wx is implicitly added to 1939 * the automatically created directories, and to the 1940 * given directory if inheritPermission is true 1941 * @param now creation time 1942 * @return true if the operation succeeds false otherwise 1943 * @throws FileNotFoundException if an ancestor or itself is a file 1944 * @throws QuotaExceededException if directory creation violates 1945 * any quota limit 1946 * @throws UnresolvedLinkException if a symlink is encountered in src. 1947 * @throws SnapshotAccessControlException if path is in RO snapshot 1948 */ 1949 boolean mkdirs(String src, PermissionStatus permissions, 1950 boolean inheritPermission, long now) 1951 throws FileAlreadyExistsException, QuotaExceededException, 1952 UnresolvedLinkException, SnapshotAccessControlException { 1953 src = normalizePath(src); 1954 String[] names = INode.getPathNames(src); 1955 byte[][] components = INode.getPathComponents(names); 1956 final int lastInodeIndex = components.length - 1; 1957 1958 writeLock(); 1959 try { 1960 INodesInPath iip = getExistingPathINodes(components); 1961 if (iip.isSnapshot()) { 1962 throw new SnapshotAccessControlException( 1963 "Modification on RO snapshot is disallowed"); 1964 } 1965 INode[] inodes = iip.getINodes(); 1966 1967 // find the index of the first null in inodes[] 1968 StringBuilder pathbuilder = new StringBuilder(); 1969 int i = 1; 1970 for(; i < inodes.length && inodes[i] != null; i++) { 1971 pathbuilder.append(Path.SEPARATOR).append(names[i]); 1972 if (!inodes[i].isDirectory()) { 1973 throw new FileAlreadyExistsException("Parent path is not a directory: " 1974 + pathbuilder+ " "+inodes[i].getLocalName()); 1975 } 1976 } 1977 1978 // default to creating parent dirs with the given perms 1979 PermissionStatus parentPermissions = permissions; 1980 1981 // if not inheriting and it's the last inode, there's no use in 1982 // computing perms that won't be used 1983 if (inheritPermission || (i < lastInodeIndex)) { 1984 // if inheriting (ie. creating a file or symlink), use the parent dir, 1985 // else the supplied permissions 1986 // NOTE: the permissions of the auto-created directories violate posix 1987 FsPermission parentFsPerm = inheritPermission 1988 ? inodes[i-1].getFsPermission() : permissions.getPermission(); 1989 1990 // ensure that the permissions allow user write+execute 1991 if (!parentFsPerm.getUserAction().implies(FsAction.WRITE_EXECUTE)) { 1992 parentFsPerm = new FsPermission( 1993 parentFsPerm.getUserAction().or(FsAction.WRITE_EXECUTE), 1994 parentFsPerm.getGroupAction(), 1995 parentFsPerm.getOtherAction() 1996 ); 1997 } 1998 1999 if (!parentPermissions.getPermission().equals(parentFsPerm)) { 2000 parentPermissions = new PermissionStatus( 2001 parentPermissions.getUserName(), 2002 parentPermissions.getGroupName(), 2003 parentFsPerm 2004 ); 2005 // when inheriting, use same perms for entire path 2006 if (inheritPermission) permissions = parentPermissions; 2007 } 2008 } 2009 2010 // create directories beginning from the first null index 2011 for(; i < inodes.length; i++) { 2012 pathbuilder.append(Path.SEPARATOR + names[i]); 2013 unprotectedMkdir(namesystem.allocateNewInodeId(), iip, i, 2014 components[i], (i < lastInodeIndex) ? parentPermissions 2015 : permissions, now); 2016 if (inodes[i] == null) { 2017 return false; 2018 } 2019 // Directory creation also count towards FilesCreated 2020 // to match count of FilesDeleted metric. 2021 if (getFSNamesystem() != null) 2022 NameNode.getNameNodeMetrics().incrFilesCreated(); 2023 2024 final String cur = pathbuilder.toString(); 2025 fsImage.getEditLog().logMkDir(cur, inodes[i]); 2026 if(NameNode.stateChangeLog.isDebugEnabled()) { 2027 NameNode.stateChangeLog.debug( 2028 "DIR* FSDirectory.mkdirs: created directory " + cur); 2029 } 2030 } 2031 } finally { 2032 writeUnlock(); 2033 } 2034 return true; 2035 } 2036 2037 INode unprotectedMkdir(long inodeId, String src, PermissionStatus permissions, 2038 long timestamp) throws QuotaExceededException, 2039 UnresolvedLinkException { 2040 assert hasWriteLock(); 2041 byte[][] components = INode.getPathComponents(src); 2042 INodesInPath iip = getExistingPathINodes(components); 2043 INode[] inodes = iip.getINodes(); 2044 final int pos = inodes.length - 1; 2045 unprotectedMkdir(inodeId, iip, pos, components[pos], permissions, 2046 timestamp); 2047 return inodes[pos]; 2048 } 2049 2050 /** create a directory at index pos. 2051 * The parent path to the directory is at [0, pos-1]. 2052 * All ancestors exist. Newly created one stored at index pos. 2053 */ 2054 private void unprotectedMkdir(long inodeId, INodesInPath inodesInPath, 2055 int pos, byte[] name, PermissionStatus permission, long timestamp) 2056 throws QuotaExceededException { 2057 assert hasWriteLock(); 2058 final INodeDirectory dir = new INodeDirectory(inodeId, name, permission, 2059 timestamp); 2060 if (addChild(inodesInPath, pos, dir, true)) { 2061 inodesInPath.setINode(pos, dir); 2062 } 2063 } 2064 2065 /** 2066 * Add the given child to the namespace. 2067 * @param src The full path name of the child node. 2068 * @throw QuotaExceededException is thrown if it violates quota limit 2069 */ 2070 private boolean addINode(String src, INode child 2071 ) throws QuotaExceededException, UnresolvedLinkException { 2072 byte[][] components = INode.getPathComponents(src); 2073 child.setLocalName(components[components.length-1]); 2074 cacheName(child); 2075 writeLock(); 2076 try { 2077 return addLastINode(getExistingPathINodes(components), child, true); 2078 } finally { 2079 writeUnlock(); 2080 } 2081 } 2082 2083 /** 2084 * Verify quota for adding or moving a new INode with required 2085 * namespace and diskspace to a given position. 2086 * 2087 * @param inodes INodes corresponding to a path 2088 * @param pos position where a new INode will be added 2089 * @param nsDelta needed namespace 2090 * @param dsDelta needed diskspace 2091 * @param commonAncestor Last node in inodes array that is a common ancestor 2092 * for a INode that is being moved from one location to the other. 2093 * Pass null if a node is not being moved. 2094 * @throws QuotaExceededException if quota limit is exceeded. 2095 */ 2096 private static void verifyQuota(INode[] inodes, int pos, long nsDelta, 2097 long dsDelta, INode commonAncestor) throws QuotaExceededException { 2098 if (nsDelta <= 0 && dsDelta <= 0) { 2099 // if quota is being freed or not being consumed 2100 return; 2101 } 2102 2103 // check existing components in the path 2104 for(int i = (pos > inodes.length? inodes.length: pos) - 1; i >= 0; i--) { 2105 if (commonAncestor == inodes[i]) { 2106 // Stop checking for quota when common ancestor is reached 2107 return; 2108 } 2109 if (inodes[i].isQuotaSet()) { // a directory with quota 2110 try { 2111 ((INodeDirectoryWithQuota) inodes[i].asDirectory()).verifyQuota( 2112 nsDelta, dsDelta); 2113 } catch (QuotaExceededException e) { 2114 e.setPathName(getFullPathName(inodes, i)); 2115 throw e; 2116 } 2117 } 2118 } 2119 } 2120 2121 /** 2122 * Verify quota for rename operation where srcInodes[srcInodes.length-1] moves 2123 * dstInodes[dstInodes.length-1] 2124 * 2125 * @param src directory from where node is being moved. 2126 * @param dst directory to where node is moved to. 2127 * @throws QuotaExceededException if quota limit is exceeded. 2128 */ 2129 private void verifyQuotaForRename(INode[] src, INode[] dst) 2130 throws QuotaExceededException { 2131 if (!ready) { 2132 // Do not check quota if edits log is still being processed 2133 return; 2134 } 2135 int i = 0; 2136 for(; src[i] == dst[i]; i++); 2137 // src[i - 1] is the last common ancestor. 2138 2139 final Quota.Counts delta = src[src.length - 1].computeQuotaUsage(); 2140 2141 // Reduce the required quota by dst that is being removed 2142 final int dstIndex = dst.length - 1; 2143 if (dst[dstIndex] != null) { 2144 delta.subtract(dst[dstIndex].computeQuotaUsage()); 2145 } 2146 verifyQuota(dst, dstIndex, delta.get(Quota.NAMESPACE), 2147 delta.get(Quota.DISKSPACE), src[i - 1]); 2148 } 2149 2150 /** Verify if the snapshot name is legal. */ 2151 void verifySnapshotName(String snapshotName, String path) 2152 throws PathComponentTooLongException { 2153 if (snapshotName.contains(Path.SEPARATOR)) { 2154 throw new HadoopIllegalArgumentException( 2155 "Snapshot name cannot contain \"" + Path.SEPARATOR + "\""); 2156 } 2157 final byte[] bytes = DFSUtil.string2Bytes(snapshotName); 2158 verifyINodeName(bytes); 2159 verifyMaxComponentLength(bytes, path, 0); 2160 } 2161 2162 /** Verify if the inode name is legal. */ 2163 void verifyINodeName(byte[] childName) throws HadoopIllegalArgumentException { 2164 if (Arrays.equals(HdfsConstants.DOT_SNAPSHOT_DIR_BYTES, childName)) { 2165 String s = "\"" + HdfsConstants.DOT_SNAPSHOT_DIR + "\" is a reserved name."; 2166 if (!ready) { 2167 s += " Please rename it before upgrade."; 2168 } 2169 throw new HadoopIllegalArgumentException(s); 2170 } 2171 } 2172 2173 /** 2174 * Verify child's name for fs limit. 2175 * @throws PathComponentTooLongException child's name is too long. 2176 */ 2177 void verifyMaxComponentLength(byte[] childName, Object parentPath, int pos) 2178 throws PathComponentTooLongException { 2179 if (maxComponentLength == 0) { 2180 return; 2181 } 2182 2183 final int length = childName.length; 2184 if (length > maxComponentLength) { 2185 final String p = parentPath instanceof INode[]? 2186 getFullPathName((INode[])parentPath, pos - 1): (String)parentPath; 2187 final PathComponentTooLongException e = new PathComponentTooLongException( 2188 maxComponentLength, length, p, DFSUtil.bytes2String(childName)); 2189 if (ready) { 2190 throw e; 2191 } else { 2192 // Do not throw if edits log is still being processed 2193 NameNode.LOG.error("ERROR in FSDirectory.verifyINodeName", e); 2194 } 2195 } 2196 } 2197 2198 /** 2199 * Verify children size for fs limit. 2200 * @throws MaxDirectoryItemsExceededException too many children. 2201 */ 2202 void verifyMaxDirItems(INode[] pathComponents, int pos) 2203 throws MaxDirectoryItemsExceededException { 2204 if (maxDirItems == 0) { 2205 return; 2206 } 2207 2208 final INodeDirectory parent = pathComponents[pos-1].asDirectory(); 2209 final int count = parent.getChildrenList(null).size(); 2210 if (count >= maxDirItems) { 2211 final MaxDirectoryItemsExceededException e 2212 = new MaxDirectoryItemsExceededException(maxDirItems, count); 2213 if (ready) { 2214 e.setPathName(getFullPathName(pathComponents, pos - 1)); 2215 throw e; 2216 } else { 2217 // Do not throw if edits log is still being processed 2218 NameNode.LOG.error("FSDirectory.verifyMaxDirItems: " 2219 + e.getLocalizedMessage()); 2220 } 2221 } 2222 } 2223 2224 /** 2225 * The same as {@link #addChild(INodesInPath, int, INode, boolean)} 2226 * with pos = length - 1. 2227 */ 2228 private boolean addLastINode(INodesInPath inodesInPath, 2229 INode inode, boolean checkQuota) throws QuotaExceededException { 2230 final int pos = inodesInPath.getINodes().length - 1; 2231 return addChild(inodesInPath, pos, inode, checkQuota); 2232 } 2233 2234 /** Add a node child to the inodes at index pos. 2235 * Its ancestors are stored at [0, pos-1]. 2236 * @return false if the child with this name already exists; 2237 * otherwise return true; 2238 * @throw QuotaExceededException is thrown if it violates quota limit 2239 */ 2240 private boolean addChild(INodesInPath iip, int pos, 2241 INode child, boolean checkQuota) throws QuotaExceededException { 2242 final INode[] inodes = iip.getINodes(); 2243 // Disallow creation of /.reserved. This may be created when loading 2244 // editlog/fsimage during upgrade since /.reserved was a valid name in older 2245 // release. This may also be called when a user tries to create a file 2246 // or directory /.reserved. 2247 if (pos == 1 && inodes[0] == rootDir && isReservedName(child)) { 2248 throw new HadoopIllegalArgumentException( 2249 "File name \"" + child.getLocalName() + "\" is reserved and cannot " 2250 + "be created. If this is during upgrade change the name of the " 2251 + "existing file or directory to another name before upgrading " 2252 + "to the new release."); 2253 } 2254 // The filesystem limits are not really quotas, so this check may appear 2255 // odd. It's because a rename operation deletes the src, tries to add 2256 // to the dest, if that fails, re-adds the src from whence it came. 2257 // The rename code disables the quota when it's restoring to the 2258 // original location becase a quota violation would cause the the item 2259 // to go "poof". The fs limits must be bypassed for the same reason. 2260 if (checkQuota) { 2261 verifyMaxComponentLength(child.getLocalNameBytes(), inodes, pos); 2262 verifyMaxDirItems(inodes, pos); 2263 } 2264 // always verify inode name 2265 verifyINodeName(child.getLocalNameBytes()); 2266 2267 final Quota.Counts counts = child.computeQuotaUsage(); 2268 updateCount(iip, pos, 2269 counts.get(Quota.NAMESPACE), counts.get(Quota.DISKSPACE), checkQuota); 2270 final INodeDirectory parent = inodes[pos-1].asDirectory(); 2271 boolean added = false; 2272 try { 2273 added = parent.addChild(child, true, iip.getLatestSnapshot(), 2274 inodeMap); 2275 } catch (QuotaExceededException e) { 2276 updateCountNoQuotaCheck(iip, pos, 2277 -counts.get(Quota.NAMESPACE), -counts.get(Quota.DISKSPACE)); 2278 throw e; 2279 } 2280 if (!added) { 2281 updateCountNoQuotaCheck(iip, pos, 2282 -counts.get(Quota.NAMESPACE), -counts.get(Quota.DISKSPACE)); 2283 } else { 2284 iip.setINode(pos - 1, child.getParent()); 2285 addToInodeMap(child); 2286 } 2287 return added; 2288 } 2289 2290 private boolean addLastINodeNoQuotaCheck(INodesInPath inodesInPath, INode i) { 2291 try { 2292 return addLastINode(inodesInPath, i, false); 2293 } catch (QuotaExceededException e) { 2294 NameNode.LOG.warn("FSDirectory.addChildNoQuotaCheck - unexpected", e); 2295 } 2296 return false; 2297 } 2298 2299 /** 2300 * Remove the last inode in the path from the namespace. 2301 * Count of each ancestor with quota is also updated. 2302 * @return -1 for failing to remove; 2303 * 0 for removing a reference whose referred inode has other 2304 * reference nodes; 2305 * >0 otherwise. 2306 */ 2307 private long removeLastINode(final INodesInPath iip) 2308 throws QuotaExceededException { 2309 final Snapshot latestSnapshot = iip.getLatestSnapshot(); 2310 final INode last = iip.getLastINode(); 2311 final INodeDirectory parent = iip.getINode(-2).asDirectory(); 2312 if (!parent.removeChild(last, latestSnapshot, inodeMap)) { 2313 return -1; 2314 } 2315 INodeDirectory newParent = last.getParent(); 2316 if (parent != newParent) { 2317 iip.setINode(-2, newParent); 2318 } 2319 2320 if (!last.isInLatestSnapshot(latestSnapshot)) { 2321 final Quota.Counts counts = last.computeQuotaUsage(); 2322 updateCountNoQuotaCheck(iip, iip.getINodes().length - 1, 2323 -counts.get(Quota.NAMESPACE), -counts.get(Quota.DISKSPACE)); 2324 2325 if (INodeReference.tryRemoveReference(last) > 0) { 2326 return 0; 2327 } else { 2328 return counts.get(Quota.NAMESPACE); 2329 } 2330 } 2331 return 1; 2332 } 2333 2334 /** 2335 */ 2336 String normalizePath(String src) { 2337 if (src.length() > 1 && src.endsWith("/")) { 2338 src = src.substring(0, src.length() - 1); 2339 } 2340 return src; 2341 } 2342 2343 ContentSummary getContentSummary(String src) 2344 throws FileNotFoundException, UnresolvedLinkException { 2345 String srcs = normalizePath(src); 2346 readLock(); 2347 try { 2348 INode targetNode = rootDir.getNode(srcs, false); 2349 if (targetNode == null) { 2350 throw new FileNotFoundException("File does not exist: " + srcs); 2351 } 2352 else { 2353 // Make it relinquish locks everytime contentCountLimit entries are 2354 // processed. 0 means disabled. I.e. blocking for the entire duration. 2355 ContentSummaryComputationContext cscc = 2356 2357 new ContentSummaryComputationContext(this, getFSNamesystem(), 2358 contentCountLimit); 2359 ContentSummary cs = targetNode.computeAndConvertContentSummary(cscc); 2360 yieldCount += cscc.getYieldCount(); 2361 return cs; 2362 } 2363 } finally { 2364 readUnlock(); 2365 } 2366 } 2367 2368 @VisibleForTesting 2369 public long getYieldCount() { 2370 return yieldCount; 2371 } 2372 2373 public INodeMap getINodeMap() { 2374 return inodeMap; 2375 } 2376 2377 /** 2378 * This method is always called with writeLock of FSDirectory held. 2379 */ 2380 public final void addToInodeMap(INode inode) { 2381 if (inode instanceof INodeWithAdditionalFields) { 2382 inodeMap.put((INodeWithAdditionalFields)inode); 2383 } 2384 } 2385 2386 2387 /** 2388 * This method is always called with writeLock of FSDirectory held. 2389 */ 2390 public final void removeFromInodeMap(List<? extends INode> inodes) { 2391 if (inodes != null) { 2392 for (INode inode : inodes) { 2393 if (inode != null && inode instanceof INodeWithAdditionalFields) { 2394 inodeMap.remove(inode); 2395 } 2396 } 2397 } 2398 } 2399 2400 /** 2401 * Get the inode from inodeMap based on its inode id. 2402 * @param id The given id 2403 * @return The inode associated with the given id 2404 */ 2405 public INode getInode(long id) { 2406 readLock(); 2407 try { 2408 return inodeMap.get(id); 2409 } finally { 2410 readUnlock(); 2411 } 2412 } 2413 2414 @VisibleForTesting 2415 int getInodeMapSize() { 2416 return inodeMap.size(); 2417 } 2418 2419 /** 2420 * See {@link ClientProtocol#setQuota(String, long, long)} for the contract. 2421 * Sets quota for for a directory. 2422 * @returns INodeDirectory if any of the quotas have changed. null other wise. 2423 * @throws FileNotFoundException if the path does not exist. 2424 * @throws PathIsNotDirectoryException if the path is not a directory. 2425 * @throws QuotaExceededException if the directory tree size is 2426 * greater than the given quota 2427 * @throws UnresolvedLinkException if a symlink is encountered in src. 2428 * @throws SnapshotAccessControlException if path is in RO snapshot 2429 */ 2430 INodeDirectory unprotectedSetQuota(String src, long nsQuota, long dsQuota) 2431 throws FileNotFoundException, PathIsNotDirectoryException, 2432 QuotaExceededException, UnresolvedLinkException, 2433 SnapshotAccessControlException { 2434 assert hasWriteLock(); 2435 // sanity check 2436 if ((nsQuota < 0 && nsQuota != HdfsConstants.QUOTA_DONT_SET && 2437 nsQuota < HdfsConstants.QUOTA_RESET) || 2438 (dsQuota < 0 && dsQuota != HdfsConstants.QUOTA_DONT_SET && 2439 dsQuota < HdfsConstants.QUOTA_RESET)) { 2440 throw new IllegalArgumentException("Illegal value for nsQuota or " + 2441 "dsQuota : " + nsQuota + " and " + 2442 dsQuota); 2443 } 2444 2445 String srcs = normalizePath(src); 2446 final INodesInPath iip = rootDir.getINodesInPath4Write(srcs, true); 2447 INodeDirectory dirNode = INodeDirectory.valueOf(iip.getLastINode(), srcs); 2448 if (dirNode.isRoot() && nsQuota == HdfsConstants.QUOTA_RESET) { 2449 throw new IllegalArgumentException("Cannot clear namespace quota on root."); 2450 } else { // a directory inode 2451 long oldNsQuota = dirNode.getNsQuota(); 2452 long oldDsQuota = dirNode.getDsQuota(); 2453 if (nsQuota == HdfsConstants.QUOTA_DONT_SET) { 2454 nsQuota = oldNsQuota; 2455 } 2456 if (dsQuota == HdfsConstants.QUOTA_DONT_SET) { 2457 dsQuota = oldDsQuota; 2458 } 2459 2460 final Snapshot latest = iip.getLatestSnapshot(); 2461 if (dirNode instanceof INodeDirectoryWithQuota) { 2462 INodeDirectoryWithQuota quotaNode = (INodeDirectoryWithQuota) dirNode; 2463 Quota.Counts counts = null; 2464 if (!quotaNode.isQuotaSet()) { 2465 // dirNode must be an INodeDirectoryWithSnapshot whose quota has not 2466 // been set yet 2467 counts = quotaNode.computeQuotaUsage(); 2468 } 2469 // a directory with quota; so set the quota to the new value 2470 quotaNode.setQuota(nsQuota, dsQuota); 2471 if (quotaNode.isQuotaSet() && counts != null) { 2472 quotaNode.setSpaceConsumed(counts.get(Quota.NAMESPACE), 2473 counts.get(Quota.DISKSPACE)); 2474 } else if (!quotaNode.isQuotaSet() && latest == null) { 2475 // do not replace the node if the node is a snapshottable directory 2476 // without snapshots 2477 if (!(quotaNode instanceof INodeDirectoryWithSnapshot)) { 2478 // will not come here for root because root is snapshottable and 2479 // root's nsQuota is always set 2480 return quotaNode.replaceSelf4INodeDirectory(inodeMap); 2481 } 2482 } 2483 } else { 2484 // a non-quota directory; so replace it with a directory with quota 2485 return dirNode.replaceSelf4Quota(latest, nsQuota, dsQuota, inodeMap); 2486 } 2487 return (oldNsQuota != nsQuota || oldDsQuota != dsQuota) ? dirNode : null; 2488 } 2489 } 2490 2491 /** 2492 * See {@link ClientProtocol#setQuota(String, long, long)} for the contract. 2493 * @throws SnapshotAccessControlException if path is in RO snapshot 2494 * @see #unprotectedSetQuota(String, long, long) 2495 */ 2496 void setQuota(String src, long nsQuota, long dsQuota) 2497 throws FileNotFoundException, PathIsNotDirectoryException, 2498 QuotaExceededException, UnresolvedLinkException, 2499 SnapshotAccessControlException { 2500 writeLock(); 2501 try { 2502 INodeDirectory dir = unprotectedSetQuota(src, nsQuota, dsQuota); 2503 if (dir != null) { 2504 fsImage.getEditLog().logSetQuota(src, dir.getNsQuota(), 2505 dir.getDsQuota()); 2506 } 2507 } finally { 2508 writeUnlock(); 2509 } 2510 } 2511 2512 long totalInodes() { 2513 readLock(); 2514 try { 2515 return rootDir.numItemsInTree(); 2516 } finally { 2517 readUnlock(); 2518 } 2519 } 2520 2521 /** 2522 * Sets the access time on the file/directory. Logs it in the transaction log. 2523 */ 2524 void setTimes(String src, INode inode, long mtime, long atime, boolean force, 2525 Snapshot latest) throws QuotaExceededException { 2526 boolean status = false; 2527 writeLock(); 2528 try { 2529 status = unprotectedSetTimes(inode, mtime, atime, force, latest); 2530 } finally { 2531 writeUnlock(); 2532 } 2533 if (status) { 2534 fsImage.getEditLog().logTimes(src, mtime, atime); 2535 } 2536 } 2537 2538 boolean unprotectedSetTimes(String src, long mtime, long atime, boolean force) 2539 throws UnresolvedLinkException, QuotaExceededException { 2540 assert hasWriteLock(); 2541 final INodesInPath i = getLastINodeInPath(src); 2542 return unprotectedSetTimes(i.getLastINode(), mtime, atime, force, 2543 i.getLatestSnapshot()); 2544 } 2545 2546 private boolean unprotectedSetTimes(INode inode, long mtime, 2547 long atime, boolean force, Snapshot latest) throws QuotaExceededException { 2548 assert hasWriteLock(); 2549 boolean status = false; 2550 if (mtime != -1) { 2551 inode = inode.setModificationTime(mtime, latest, inodeMap); 2552 status = true; 2553 } 2554 if (atime != -1) { 2555 long inodeTime = inode.getAccessTime(null); 2556 2557 // if the last access time update was within the last precision interval, then 2558 // no need to store access time 2559 if (atime <= inodeTime + getFSNamesystem().getAccessTimePrecision() && !force) { 2560 status = false; 2561 } else { 2562 inode.setAccessTime(atime, latest, inodeMap); 2563 status = true; 2564 } 2565 } 2566 return status; 2567 } 2568 2569 /** 2570 * Reset the entire namespace tree. 2571 */ 2572 void reset() { 2573 writeLock(); 2574 try { 2575 setReady(false); 2576 rootDir = createRoot(getFSNamesystem()); 2577 inodeMap.clear(); 2578 addToInodeMap(rootDir); 2579 nameCache.reset(); 2580 } finally { 2581 writeUnlock(); 2582 } 2583 } 2584 2585 /** 2586 * create an hdfs file status from an inode 2587 * 2588 * @param path the local name 2589 * @param node inode 2590 * @param needLocation if block locations need to be included or not 2591 * @return a file status 2592 * @throws IOException if any error occurs 2593 */ 2594 private HdfsFileStatus createFileStatus(byte[] path, INode node, 2595 boolean needLocation, Snapshot snapshot) throws IOException { 2596 if (needLocation) { 2597 return createLocatedFileStatus(path, node, snapshot); 2598 } else { 2599 return createFileStatus(path, node, snapshot); 2600 } 2601 } 2602 /** 2603 * Create FileStatus by file INode 2604 */ 2605 HdfsFileStatus createFileStatus(byte[] path, INode node, 2606 Snapshot snapshot) { 2607 long size = 0; // length is zero for directories 2608 short replication = 0; 2609 long blocksize = 0; 2610 if (node.isFile()) { 2611 final INodeFile fileNode = node.asFile(); 2612 size = fileNode.computeFileSize(snapshot); 2613 replication = fileNode.getFileReplication(snapshot); 2614 blocksize = fileNode.getPreferredBlockSize(); 2615 } 2616 int childrenNum = node.isDirectory() ? 2617 node.asDirectory().getChildrenNum(snapshot) : 0; 2618 2619 return new HdfsFileStatus( 2620 size, 2621 node.isDirectory(), 2622 replication, 2623 blocksize, 2624 node.getModificationTime(snapshot), 2625 node.getAccessTime(snapshot), 2626 node.getFsPermission(snapshot), 2627 node.getUserName(snapshot), 2628 node.getGroupName(snapshot), 2629 node.isSymlink() ? node.asSymlink().getSymlink() : null, 2630 path, 2631 node.getId(), 2632 childrenNum); 2633 } 2634 2635 /** 2636 * Create FileStatus with location info by file INode 2637 */ 2638 private HdfsLocatedFileStatus createLocatedFileStatus(byte[] path, 2639 INode node, Snapshot snapshot) throws IOException { 2640 assert hasReadLock(); 2641 long size = 0; // length is zero for directories 2642 short replication = 0; 2643 long blocksize = 0; 2644 LocatedBlocks loc = null; 2645 if (node.isFile()) { 2646 final INodeFile fileNode = node.asFile(); 2647 size = fileNode.computeFileSize(snapshot); 2648 replication = fileNode.getFileReplication(snapshot); 2649 blocksize = fileNode.getPreferredBlockSize(); 2650 2651 final boolean inSnapshot = snapshot != null; 2652 final boolean isUc = inSnapshot ? false : fileNode.isUnderConstruction(); 2653 final long fileSize = !inSnapshot && isUc ? 2654 fileNode.computeFileSizeNotIncludingLastUcBlock() : size; 2655 loc = getFSNamesystem().getBlockManager().createLocatedBlocks( 2656 fileNode.getBlocks(), fileSize, isUc, 0L, size, false, 2657 inSnapshot); 2658 if (loc == null) { 2659 loc = new LocatedBlocks(); 2660 } 2661 } 2662 int childrenNum = node.isDirectory() ? 2663 node.asDirectory().getChildrenNum(snapshot) : 0; 2664 2665 HdfsLocatedFileStatus status = 2666 new HdfsLocatedFileStatus(size, node.isDirectory(), replication, 2667 blocksize, node.getModificationTime(snapshot), 2668 node.getAccessTime(snapshot), node.getFsPermission(snapshot), 2669 node.getUserName(snapshot), node.getGroupName(snapshot), 2670 node.isSymlink() ? node.asSymlink().getSymlink() : null, path, 2671 node.getId(), loc, childrenNum); 2672 // Set caching information for the located blocks. 2673 if (loc != null) { 2674 CacheManager cacheManager = namesystem.getCacheManager(); 2675 for (LocatedBlock lb: loc.getLocatedBlocks()) { 2676 cacheManager.setCachedLocations(lb); 2677 } 2678 } 2679 return status; 2680 } 2681 2682 2683 /** 2684 * Add the given symbolic link to the fs. Record it in the edits log. 2685 */ 2686 INodeSymlink addSymlink(String path, String target, 2687 PermissionStatus dirPerms, boolean createParent, boolean logRetryCache) 2688 throws UnresolvedLinkException, FileAlreadyExistsException, 2689 QuotaExceededException, SnapshotAccessControlException { 2690 waitForReady(); 2691 2692 final long modTime = now(); 2693 if (createParent) { 2694 final String parent = new Path(path).getParent().toString(); 2695 if (!mkdirs(parent, dirPerms, true, modTime)) { 2696 return null; 2697 } 2698 } 2699 final String userName = dirPerms.getUserName(); 2700 INodeSymlink newNode = null; 2701 long id = namesystem.allocateNewInodeId(); 2702 writeLock(); 2703 try { 2704 newNode = unprotectedAddSymlink(id, path, target, modTime, modTime, 2705 new PermissionStatus(userName, null, FsPermission.getDefault())); 2706 } finally { 2707 writeUnlock(); 2708 } 2709 if (newNode == null) { 2710 NameNode.stateChangeLog.info("DIR* addSymlink: failed to add " + path); 2711 return null; 2712 } 2713 fsImage.getEditLog().logSymlink(path, target, modTime, modTime, newNode, 2714 logRetryCache); 2715 2716 if(NameNode.stateChangeLog.isDebugEnabled()) { 2717 NameNode.stateChangeLog.debug("DIR* addSymlink: " + path + " is added"); 2718 } 2719 return newNode; 2720 } 2721 2722 /** 2723 * Add the specified path into the namespace. Invoked from edit log processing. 2724 */ 2725 INodeSymlink unprotectedAddSymlink(long id, String path, String target, 2726 long mtime, long atime, PermissionStatus perm) 2727 throws UnresolvedLinkException, QuotaExceededException { 2728 assert hasWriteLock(); 2729 final INodeSymlink symlink = new INodeSymlink(id, null, perm, mtime, atime, 2730 target); 2731 return addINode(path, symlink) ? symlink : null; 2732 } 2733 2734 /** 2735 * Caches frequently used file names to reuse file name objects and 2736 * reduce heap size. 2737 */ 2738 void cacheName(INode inode) { 2739 // Name is cached only for files 2740 if (!inode.isFile()) { 2741 return; 2742 } 2743 ByteArray name = new ByteArray(inode.getLocalNameBytes()); 2744 name = nameCache.put(name); 2745 if (name != null) { 2746 inode.setLocalName(name.getBytes()); 2747 } 2748 } 2749 2750 void shutdown() { 2751 nameCache.reset(); 2752 inodeMap.clear(); 2753 } 2754 2755 /** 2756 * Given an INode get all the path complents leading to it from the root. 2757 * If an Inode corresponding to C is given in /A/B/C, the returned 2758 * patch components will be {root, A, B, C} 2759 */ 2760 static byte[][] getPathComponents(INode inode) { 2761 List<byte[]> components = new ArrayList<byte[]>(); 2762 components.add(0, inode.getLocalNameBytes()); 2763 while(inode.getParent() != null) { 2764 components.add(0, inode.getParent().getLocalNameBytes()); 2765 inode = inode.getParent(); 2766 } 2767 return components.toArray(new byte[components.size()][]); 2768 } 2769 2770 /** 2771 * @return path components for reserved path, else null. 2772 */ 2773 static byte[][] getPathComponentsForReservedPath(String src) { 2774 return !isReservedName(src) ? null : INode.getPathComponents(src); 2775 } 2776 2777 /** 2778 * Resolve the path of /.reserved/.inodes/<inodeid>/... to a regular path 2779 * 2780 * @param src path that is being processed 2781 * @param pathComponents path components corresponding to the path 2782 * @param fsd FSDirectory 2783 * @return if the path indicates an inode, return path after replacing upto 2784 * <inodeid> with the corresponding path of the inode, else the path 2785 * in {@code src} as is. 2786 * @throws FileNotFoundException if inodeid is invalid 2787 */ 2788 static String resolvePath(String src, byte[][] pathComponents, FSDirectory fsd) 2789 throws FileNotFoundException { 2790 if (pathComponents == null || pathComponents.length <= 3) { 2791 return src; 2792 } 2793 // Not /.reserved/.inodes 2794 if (!Arrays.equals(DOT_RESERVED, pathComponents[1]) 2795 || !Arrays.equals(DOT_INODES, pathComponents[2])) { // Not .inodes path 2796 return src; 2797 } 2798 final String inodeId = DFSUtil.bytes2String(pathComponents[3]); 2799 long id = 0; 2800 try { 2801 id = Long.valueOf(inodeId); 2802 } catch (NumberFormatException e) { 2803 throw new FileNotFoundException("Invalid inode path: " + src); 2804 } 2805 if (id == INodeId.ROOT_INODE_ID && pathComponents.length == 4) { 2806 return Path.SEPARATOR; 2807 } 2808 INode inode = fsd.getInode(id); 2809 if (inode == null) { 2810 throw new FileNotFoundException( 2811 "File for given inode path does not exist: " + src); 2812 } 2813 2814 // Handle single ".." for NFS lookup support. 2815 if ((pathComponents.length > 4) 2816 && DFSUtil.bytes2String(pathComponents[4]).equals("..")) { 2817 INode parent = inode.getParent(); 2818 if (parent == null || parent.getId() == INodeId.ROOT_INODE_ID) { 2819 // inode is root, or its parent is root. 2820 return Path.SEPARATOR; 2821 } else { 2822 return parent.getFullPathName(); 2823 } 2824 } 2825 2826 StringBuilder path = id == INodeId.ROOT_INODE_ID ? new StringBuilder() 2827 : new StringBuilder(inode.getFullPathName()); 2828 for (int i = 4; i < pathComponents.length; i++) { 2829 path.append(Path.SEPARATOR).append(DFSUtil.bytes2String(pathComponents[i])); 2830 } 2831 if (NameNode.LOG.isDebugEnabled()) { 2832 NameNode.LOG.debug("Resolved path is " + path); 2833 } 2834 return path.toString(); 2835 } 2836 2837 /** Check if a given inode name is reserved */ 2838 public static boolean isReservedName(INode inode) { 2839 return CHECK_RESERVED_FILE_NAMES 2840 && Arrays.equals(inode.getLocalNameBytes(), DOT_RESERVED); 2841 } 2842 2843 /** Check if a given path is reserved */ 2844 public static boolean isReservedName(String src) { 2845 return src.startsWith(DOT_RESERVED_PATH_PREFIX); 2846 } 2847}