001/** 002 * Licensed to the Apache Software Foundation (ASF) under one 003 * or more contributor license agreements. See the NOTICE file 004 * distributed with this work for additional information 005 * regarding copyright ownership. The ASF licenses this file 006 * to you under the Apache License, Version 2.0 (the 007 * "License"); you may not use this file except in compliance 008 * with the License. You may obtain a copy of the License at 009 * 010 * http://www.apache.org/licenses/LICENSE-2.0 011 * 012 * Unless required by applicable law or agreed to in writing, software 013 * distributed under the License is distributed on an "AS IS" BASIS, 014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 015 * See the License for the specific language governing permissions and 016 * limitations under the License. 017 */ 018 019package org.apache.hadoop.hdfs.server.namenode; 020 021import java.io.IOException; 022import java.io.InputStream; 023import java.io.OutputStream; 024import java.util.ArrayList; 025import java.util.Iterator; 026import java.util.List; 027import java.util.Map; 028 029import org.apache.commons.logging.Log; 030import org.apache.commons.logging.LogFactory; 031import org.apache.hadoop.HadoopIllegalArgumentException; 032import org.apache.hadoop.classification.InterfaceAudience; 033import org.apache.hadoop.fs.permission.AclEntry; 034import org.apache.hadoop.fs.permission.AclEntryScope; 035import org.apache.hadoop.fs.permission.AclEntryType; 036import org.apache.hadoop.fs.permission.FsAction; 037import org.apache.hadoop.fs.permission.FsPermission; 038import org.apache.hadoop.fs.permission.PermissionStatus; 039import org.apache.hadoop.fs.XAttr; 040import org.apache.hadoop.hdfs.protocol.Block; 041import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto; 042import org.apache.hadoop.hdfs.protocolPB.PBHelper; 043import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo; 044import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction; 045import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager; 046import org.apache.hadoop.hdfs.server.namenode.FSImageFormatProtobuf.LoaderContext; 047import org.apache.hadoop.hdfs.server.namenode.FSImageFormatProtobuf.SaverContext; 048import org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary; 049import org.apache.hadoop.hdfs.server.namenode.FsImageProto.FilesUnderConstructionSection.FileUnderConstructionEntry; 050import org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeDirectorySection; 051import org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection; 052import org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto; 053import org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrCompactProto; 054import org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto; 055import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot; 056import org.apache.hadoop.hdfs.util.ReadOnlyList; 057 058import com.google.common.base.Preconditions; 059import com.google.common.collect.ImmutableList; 060import com.google.protobuf.ByteString; 061 062@InterfaceAudience.Private 063public final class FSImageFormatPBINode { 064 private final static long USER_GROUP_STRID_MASK = (1 << 24) - 1; 065 private final static int USER_STRID_OFFSET = 40; 066 private final static int GROUP_STRID_OFFSET = 16; 067 private static final Log LOG = LogFactory.getLog(FSImageFormatPBINode.class); 068 069 private static final int ACL_ENTRY_NAME_MASK = (1 << 24) - 1; 070 private static final int ACL_ENTRY_NAME_OFFSET = 6; 071 private static final int ACL_ENTRY_TYPE_OFFSET = 3; 072 private static final int ACL_ENTRY_SCOPE_OFFSET = 5; 073 private static final int ACL_ENTRY_PERM_MASK = 7; 074 private static final int ACL_ENTRY_TYPE_MASK = 3; 075 private static final int ACL_ENTRY_SCOPE_MASK = 1; 076 private static final FsAction[] FSACTION_VALUES = FsAction.values(); 077 private static final AclEntryScope[] ACL_ENTRY_SCOPE_VALUES = AclEntryScope 078 .values(); 079 private static final AclEntryType[] ACL_ENTRY_TYPE_VALUES = AclEntryType 080 .values(); 081 082 private static final int XATTR_NAMESPACE_MASK = 3; 083 private static final int XATTR_NAMESPACE_OFFSET = 30; 084 private static final int XATTR_NAME_MASK = (1 << 24) - 1; 085 private static final int XATTR_NAME_OFFSET = 6; 086 087 /* See the comments in fsimage.proto for an explanation of the following. */ 088 private static final int XATTR_NAMESPACE_EXT_OFFSET = 5; 089 private static final int XATTR_NAMESPACE_EXT_MASK = 1; 090 091 private static final XAttr.NameSpace[] XATTR_NAMESPACE_VALUES = 092 XAttr.NameSpace.values(); 093 094 095 public final static class Loader { 096 public static PermissionStatus loadPermission(long id, 097 final String[] stringTable) { 098 short perm = (short) (id & ((1 << GROUP_STRID_OFFSET) - 1)); 099 int gsid = (int) ((id >> GROUP_STRID_OFFSET) & USER_GROUP_STRID_MASK); 100 int usid = (int) ((id >> USER_STRID_OFFSET) & USER_GROUP_STRID_MASK); 101 return new PermissionStatus(stringTable[usid], stringTable[gsid], 102 new FsPermission(perm)); 103 } 104 105 public static ImmutableList<AclEntry> loadAclEntries( 106 AclFeatureProto proto, final String[] stringTable) { 107 ImmutableList.Builder<AclEntry> b = ImmutableList.builder(); 108 for (int v : proto.getEntriesList()) { 109 int p = v & ACL_ENTRY_PERM_MASK; 110 int t = (v >> ACL_ENTRY_TYPE_OFFSET) & ACL_ENTRY_TYPE_MASK; 111 int s = (v >> ACL_ENTRY_SCOPE_OFFSET) & ACL_ENTRY_SCOPE_MASK; 112 int nid = (v >> ACL_ENTRY_NAME_OFFSET) & ACL_ENTRY_NAME_MASK; 113 String name = stringTable[nid]; 114 b.add(new AclEntry.Builder().setName(name) 115 .setPermission(FSACTION_VALUES[p]) 116 .setScope(ACL_ENTRY_SCOPE_VALUES[s]) 117 .setType(ACL_ENTRY_TYPE_VALUES[t]).build()); 118 } 119 return b.build(); 120 } 121 122 public static ImmutableList<XAttr> loadXAttrs( 123 XAttrFeatureProto proto, final String[] stringTable) { 124 ImmutableList.Builder<XAttr> b = ImmutableList.builder(); 125 for (XAttrCompactProto xAttrCompactProto : proto.getXAttrsList()) { 126 int v = xAttrCompactProto.getName(); 127 int nid = (v >> XATTR_NAME_OFFSET) & XATTR_NAME_MASK; 128 int ns = (v >> XATTR_NAMESPACE_OFFSET) & XATTR_NAMESPACE_MASK; 129 ns |= 130 ((v >> XATTR_NAMESPACE_EXT_OFFSET) & XATTR_NAMESPACE_EXT_MASK) << 2; 131 String name = stringTable[nid]; 132 byte[] value = null; 133 if (xAttrCompactProto.getValue() != null) { 134 value = xAttrCompactProto.getValue().toByteArray(); 135 } 136 b.add(new XAttr.Builder().setNameSpace(XATTR_NAMESPACE_VALUES[ns]) 137 .setName(name).setValue(value).build()); 138 } 139 140 return b.build(); 141 } 142 143 public static INodeDirectory loadINodeDirectory(INodeSection.INode n, 144 LoaderContext state) { 145 assert n.getType() == INodeSection.INode.Type.DIRECTORY; 146 INodeSection.INodeDirectory d = n.getDirectory(); 147 148 final PermissionStatus permissions = loadPermission(d.getPermission(), 149 state.getStringTable()); 150 final INodeDirectory dir = new INodeDirectory(n.getId(), n.getName() 151 .toByteArray(), permissions, d.getModificationTime()); 152 153 final long nsQuota = d.getNsQuota(), dsQuota = d.getDsQuota(); 154 if (nsQuota >= 0 || dsQuota >= 0) { 155 dir.addDirectoryWithQuotaFeature(nsQuota, dsQuota); 156 } 157 158 if (d.hasAcl()) { 159 dir.addAclFeature(new AclFeature(loadAclEntries(d.getAcl(), 160 state.getStringTable()))); 161 } 162 if (d.hasXAttrs()) { 163 dir.addXAttrFeature(new XAttrFeature( 164 loadXAttrs(d.getXAttrs(), state.getStringTable()))); 165 } 166 return dir; 167 } 168 169 public static void updateBlocksMap(INodeFile file, BlockManager bm) { 170 // Add file->block mapping 171 final BlockInfo[] blocks = file.getBlocks(); 172 if (blocks != null) { 173 for (int i = 0; i < blocks.length; i++) { 174 file.setBlock(i, bm.addBlockCollection(blocks[i], file)); 175 } 176 } 177 } 178 179 private final FSDirectory dir; 180 private final FSNamesystem fsn; 181 private final FSImageFormatProtobuf.Loader parent; 182 private final List<INodeFile> ucFiles; 183 184 Loader(FSNamesystem fsn, final FSImageFormatProtobuf.Loader parent) { 185 this.fsn = fsn; 186 this.dir = fsn.dir; 187 this.parent = parent; 188 this.ucFiles = new ArrayList<INodeFile>(); 189 } 190 191 void loadINodeDirectorySection(InputStream in) throws IOException { 192 final List<INodeReference> refList = parent.getLoaderContext() 193 .getRefList(); 194 while (true) { 195 INodeDirectorySection.DirEntry e = INodeDirectorySection.DirEntry 196 .parseDelimitedFrom(in); 197 // note that in is a LimitedInputStream 198 if (e == null) { 199 break; 200 } 201 INodeDirectory p = dir.getInode(e.getParent()).asDirectory(); 202 for (long id : e.getChildrenList()) { 203 INode child = dir.getInode(id); 204 addToParent(p, child); 205 } 206 for (int refId : e.getRefChildrenList()) { 207 INodeReference ref = refList.get(refId); 208 addToParent(p, ref); 209 } 210 } 211 } 212 213 void loadINodeSection(InputStream in) throws IOException { 214 INodeSection s = INodeSection.parseDelimitedFrom(in); 215 fsn.resetLastInodeId(s.getLastInodeId()); 216 LOG.info("Loading " + s.getNumInodes() + " INodes."); 217 for (int i = 0; i < s.getNumInodes(); ++i) { 218 INodeSection.INode p = INodeSection.INode.parseDelimitedFrom(in); 219 if (p.getId() == INodeId.ROOT_INODE_ID) { 220 loadRootINode(p); 221 } else { 222 INode n = loadINode(p); 223 dir.addToInodeMap(n); 224 } 225 } 226 } 227 228 /** 229 * Load the under-construction files section, and update the lease map 230 */ 231 void loadFilesUnderConstructionSection(InputStream in) throws IOException { 232 // This section is consumed, but not actually used for restoring leases. 233 while (true) { 234 FileUnderConstructionEntry entry = FileUnderConstructionEntry 235 .parseDelimitedFrom(in); 236 if (entry == null) { 237 break; 238 } 239 } 240 241 // Add a lease for each and every file under construction. 242 for (INodeFile file : ucFiles) { 243 FileUnderConstructionFeature uc = file.getFileUnderConstructionFeature(); 244 Preconditions.checkState(uc != null); // file must be under-construction 245 String path = file.getFullPathName(); 246 // Skip the deleted files in snapshot. This leaks UC inodes that are 247 // deleted from the current view. 248 if (path.startsWith("/")) { 249 fsn.leaseManager.addLease(uc.getClientName(), path); 250 } 251 } 252 } 253 254 private void addToParent(INodeDirectory parent, INode child) { 255 if (parent == dir.rootDir && FSDirectory.isReservedName(child)) { 256 throw new HadoopIllegalArgumentException("File name \"" 257 + child.getLocalName() + "\" is reserved. Please " 258 + " change the name of the existing file or directory to another " 259 + "name before upgrading to this release."); 260 } 261 // NOTE: This does not update space counts for parents 262 if (!parent.addChild(child)) { 263 return; 264 } 265 dir.cacheName(child); 266 267 if (child.isFile()) { 268 updateBlocksMap(child.asFile(), fsn.getBlockManager()); 269 } 270 } 271 272 private INode loadINode(INodeSection.INode n) { 273 switch (n.getType()) { 274 case FILE: 275 return loadINodeFile(n); 276 case DIRECTORY: 277 return loadINodeDirectory(n, parent.getLoaderContext()); 278 case SYMLINK: 279 return loadINodeSymlink(n); 280 default: 281 break; 282 } 283 return null; 284 } 285 286 private INodeFile loadINodeFile(INodeSection.INode n) { 287 assert n.getType() == INodeSection.INode.Type.FILE; 288 INodeSection.INodeFile f = n.getFile(); 289 List<BlockProto> bp = f.getBlocksList(); 290 short replication = (short) f.getReplication(); 291 LoaderContext state = parent.getLoaderContext(); 292 293 BlockInfo[] blocks = new BlockInfo[bp.size()]; 294 for (int i = 0, e = bp.size(); i < e; ++i) { 295 blocks[i] = new BlockInfo(PBHelper.convert(bp.get(i)), replication); 296 } 297 final PermissionStatus permissions = loadPermission(f.getPermission(), 298 parent.getLoaderContext().getStringTable()); 299 300 final INodeFile file = new INodeFile(n.getId(), 301 n.getName().toByteArray(), permissions, f.getModificationTime(), 302 f.getAccessTime(), blocks, replication, f.getPreferredBlockSize(), 303 (byte)f.getStoragePolicyID()); 304 305 if (f.hasAcl()) { 306 file.addAclFeature(new AclFeature(loadAclEntries(f.getAcl(), 307 state.getStringTable()))); 308 } 309 310 if (f.hasXAttrs()) { 311 file.addXAttrFeature(new XAttrFeature( 312 loadXAttrs(f.getXAttrs(), state.getStringTable()))); 313 } 314 315 // under-construction information 316 if (f.hasFileUC()) { 317 ucFiles.add(file); 318 INodeSection.FileUnderConstructionFeature uc = f.getFileUC(); 319 file.toUnderConstruction(uc.getClientName(), uc.getClientMachine()); 320 if (blocks.length > 0) { 321 BlockInfo lastBlk = file.getLastBlock(); 322 // replace the last block of file 323 file.setBlock(file.numBlocks() - 1, new BlockInfoUnderConstruction( 324 lastBlk, replication)); 325 } 326 } 327 return file; 328 } 329 330 331 private INodeSymlink loadINodeSymlink(INodeSection.INode n) { 332 assert n.getType() == INodeSection.INode.Type.SYMLINK; 333 INodeSection.INodeSymlink s = n.getSymlink(); 334 final PermissionStatus permissions = loadPermission(s.getPermission(), 335 parent.getLoaderContext().getStringTable()); 336 INodeSymlink sym = new INodeSymlink(n.getId(), n.getName().toByteArray(), 337 permissions, s.getModificationTime(), s.getAccessTime(), 338 s.getTarget().toStringUtf8()); 339 return sym; 340 } 341 342 private void loadRootINode(INodeSection.INode p) { 343 INodeDirectory root = loadINodeDirectory(p, parent.getLoaderContext()); 344 final Quota.Counts q = root.getQuotaCounts(); 345 final long nsQuota = q.get(Quota.NAMESPACE); 346 final long dsQuota = q.get(Quota.DISKSPACE); 347 if (nsQuota != -1 || dsQuota != -1) { 348 dir.rootDir.getDirectoryWithQuotaFeature().setQuota(nsQuota, dsQuota); 349 } 350 dir.rootDir.cloneModificationTime(root); 351 dir.rootDir.clonePermissionStatus(root); 352 final AclFeature af = root.getFeature(AclFeature.class); 353 if (af != null) { 354 dir.rootDir.addAclFeature(af); 355 } 356 // root dir supports having extended attributes according to POSIX 357 final XAttrFeature f = root.getXAttrFeature(); 358 if (f != null) { 359 dir.rootDir.addXAttrFeature(f); 360 } 361 dir.addRootDirToEncryptionZone(f); 362 } 363 } 364 365 public final static class Saver { 366 private static long buildPermissionStatus(INodeAttributes n, 367 final SaverContext.DeduplicationMap<String> stringMap) { 368 long userId = stringMap.getId(n.getUserName()); 369 long groupId = stringMap.getId(n.getGroupName()); 370 return ((userId & USER_GROUP_STRID_MASK) << USER_STRID_OFFSET) 371 | ((groupId & USER_GROUP_STRID_MASK) << GROUP_STRID_OFFSET) 372 | n.getFsPermissionShort(); 373 } 374 375 private static AclFeatureProto.Builder buildAclEntries(AclFeature f, 376 final SaverContext.DeduplicationMap<String> map) { 377 AclFeatureProto.Builder b = AclFeatureProto.newBuilder(); 378 for (AclEntry e : f.getEntries()) { 379 int v = ((map.getId(e.getName()) & ACL_ENTRY_NAME_MASK) << ACL_ENTRY_NAME_OFFSET) 380 | (e.getType().ordinal() << ACL_ENTRY_TYPE_OFFSET) 381 | (e.getScope().ordinal() << ACL_ENTRY_SCOPE_OFFSET) 382 | (e.getPermission().ordinal()); 383 b.addEntries(v); 384 } 385 return b; 386 } 387 388 private static XAttrFeatureProto.Builder buildXAttrs(XAttrFeature f, 389 final SaverContext.DeduplicationMap<String> stringMap) { 390 XAttrFeatureProto.Builder b = XAttrFeatureProto.newBuilder(); 391 for (XAttr a : f.getXAttrs()) { 392 XAttrCompactProto.Builder xAttrCompactBuilder = XAttrCompactProto. 393 newBuilder(); 394 int nsOrd = a.getNameSpace().ordinal(); 395 Preconditions.checkArgument(nsOrd < 8, "Too many namespaces."); 396 int v = ((nsOrd & XATTR_NAMESPACE_MASK) << XATTR_NAMESPACE_OFFSET) 397 | ((stringMap.getId(a.getName()) & XATTR_NAME_MASK) << 398 XATTR_NAME_OFFSET); 399 v |= (((nsOrd >> 2) & XATTR_NAMESPACE_EXT_MASK) << 400 XATTR_NAMESPACE_EXT_OFFSET); 401 xAttrCompactBuilder.setName(v); 402 if (a.getValue() != null) { 403 xAttrCompactBuilder.setValue(PBHelper.getByteString(a.getValue())); 404 } 405 b.addXAttrs(xAttrCompactBuilder.build()); 406 } 407 408 return b; 409 } 410 411 public static INodeSection.INodeFile.Builder buildINodeFile( 412 INodeFileAttributes file, final SaverContext state) { 413 INodeSection.INodeFile.Builder b = INodeSection.INodeFile.newBuilder() 414 .setAccessTime(file.getAccessTime()) 415 .setModificationTime(file.getModificationTime()) 416 .setPermission(buildPermissionStatus(file, state.getStringMap())) 417 .setPreferredBlockSize(file.getPreferredBlockSize()) 418 .setReplication(file.getFileReplication()) 419 .setStoragePolicyID(file.getLocalStoragePolicyID()); 420 421 AclFeature f = file.getAclFeature(); 422 if (f != null) { 423 b.setAcl(buildAclEntries(f, state.getStringMap())); 424 } 425 XAttrFeature xAttrFeature = file.getXAttrFeature(); 426 if (xAttrFeature != null) { 427 b.setXAttrs(buildXAttrs(xAttrFeature, state.getStringMap())); 428 } 429 return b; 430 } 431 432 public static INodeSection.INodeDirectory.Builder buildINodeDirectory( 433 INodeDirectoryAttributes dir, final SaverContext state) { 434 Quota.Counts quota = dir.getQuotaCounts(); 435 INodeSection.INodeDirectory.Builder b = INodeSection.INodeDirectory 436 .newBuilder().setModificationTime(dir.getModificationTime()) 437 .setNsQuota(quota.get(Quota.NAMESPACE)) 438 .setDsQuota(quota.get(Quota.DISKSPACE)) 439 .setPermission(buildPermissionStatus(dir, state.getStringMap())); 440 441 AclFeature f = dir.getAclFeature(); 442 if (f != null) { 443 b.setAcl(buildAclEntries(f, state.getStringMap())); 444 } 445 XAttrFeature xAttrFeature = dir.getXAttrFeature(); 446 if (xAttrFeature != null) { 447 b.setXAttrs(buildXAttrs(xAttrFeature, state.getStringMap())); 448 } 449 return b; 450 } 451 452 private final FSNamesystem fsn; 453 private final FileSummary.Builder summary; 454 private final SaveNamespaceContext context; 455 private final FSImageFormatProtobuf.Saver parent; 456 457 Saver(FSImageFormatProtobuf.Saver parent, FileSummary.Builder summary) { 458 this.parent = parent; 459 this.summary = summary; 460 this.context = parent.getContext(); 461 this.fsn = context.getSourceNamesystem(); 462 } 463 464 void serializeINodeDirectorySection(OutputStream out) throws IOException { 465 Iterator<INodeWithAdditionalFields> iter = fsn.getFSDirectory() 466 .getINodeMap().getMapIterator(); 467 final ArrayList<INodeReference> refList = parent.getSaverContext() 468 .getRefList(); 469 int i = 0; 470 while (iter.hasNext()) { 471 INodeWithAdditionalFields n = iter.next(); 472 if (!n.isDirectory()) { 473 continue; 474 } 475 476 ReadOnlyList<INode> children = n.asDirectory().getChildrenList( 477 Snapshot.CURRENT_STATE_ID); 478 if (children.size() > 0) { 479 INodeDirectorySection.DirEntry.Builder b = INodeDirectorySection. 480 DirEntry.newBuilder().setParent(n.getId()); 481 for (INode inode : children) { 482 if (!inode.isReference()) { 483 b.addChildren(inode.getId()); 484 } else { 485 refList.add(inode.asReference()); 486 b.addRefChildren(refList.size() - 1); 487 } 488 } 489 INodeDirectorySection.DirEntry e = b.build(); 490 e.writeDelimitedTo(out); 491 } 492 493 ++i; 494 if (i % FSImageFormatProtobuf.Saver.CHECK_CANCEL_INTERVAL == 0) { 495 context.checkCancelled(); 496 } 497 } 498 parent.commitSection(summary, 499 FSImageFormatProtobuf.SectionName.INODE_DIR); 500 } 501 502 void serializeINodeSection(OutputStream out) throws IOException { 503 INodeMap inodesMap = fsn.dir.getINodeMap(); 504 505 INodeSection.Builder b = INodeSection.newBuilder() 506 .setLastInodeId(fsn.getLastInodeId()).setNumInodes(inodesMap.size()); 507 INodeSection s = b.build(); 508 s.writeDelimitedTo(out); 509 510 int i = 0; 511 Iterator<INodeWithAdditionalFields> iter = inodesMap.getMapIterator(); 512 while (iter.hasNext()) { 513 INodeWithAdditionalFields n = iter.next(); 514 save(out, n); 515 ++i; 516 if (i % FSImageFormatProtobuf.Saver.CHECK_CANCEL_INTERVAL == 0) { 517 context.checkCancelled(); 518 } 519 } 520 parent.commitSection(summary, FSImageFormatProtobuf.SectionName.INODE); 521 } 522 523 void serializeFilesUCSection(OutputStream out) throws IOException { 524 Map<String, INodeFile> ucMap = fsn.getFilesUnderConstruction(); 525 for (Map.Entry<String, INodeFile> entry : ucMap.entrySet()) { 526 String path = entry.getKey(); 527 INodeFile file = entry.getValue(); 528 FileUnderConstructionEntry.Builder b = FileUnderConstructionEntry 529 .newBuilder().setInodeId(file.getId()).setFullPath(path); 530 FileUnderConstructionEntry e = b.build(); 531 e.writeDelimitedTo(out); 532 } 533 parent.commitSection(summary, 534 FSImageFormatProtobuf.SectionName.FILES_UNDERCONSTRUCTION); 535 } 536 537 private void save(OutputStream out, INode n) throws IOException { 538 if (n.isDirectory()) { 539 save(out, n.asDirectory()); 540 } else if (n.isFile()) { 541 save(out, n.asFile()); 542 } else if (n.isSymlink()) { 543 save(out, n.asSymlink()); 544 } 545 } 546 547 private void save(OutputStream out, INodeDirectory n) throws IOException { 548 INodeSection.INodeDirectory.Builder b = buildINodeDirectory(n, 549 parent.getSaverContext()); 550 INodeSection.INode r = buildINodeCommon(n) 551 .setType(INodeSection.INode.Type.DIRECTORY).setDirectory(b).build(); 552 r.writeDelimitedTo(out); 553 } 554 555 private void save(OutputStream out, INodeFile n) throws IOException { 556 INodeSection.INodeFile.Builder b = buildINodeFile(n, 557 parent.getSaverContext()); 558 559 if (n.getBlocks() != null) { 560 for (Block block : n.getBlocks()) { 561 b.addBlocks(PBHelper.convert(block)); 562 } 563 } 564 565 FileUnderConstructionFeature uc = n.getFileUnderConstructionFeature(); 566 if (uc != null) { 567 INodeSection.FileUnderConstructionFeature f = 568 INodeSection.FileUnderConstructionFeature 569 .newBuilder().setClientName(uc.getClientName()) 570 .setClientMachine(uc.getClientMachine()).build(); 571 b.setFileUC(f); 572 } 573 574 INodeSection.INode r = buildINodeCommon(n) 575 .setType(INodeSection.INode.Type.FILE).setFile(b).build(); 576 r.writeDelimitedTo(out); 577 } 578 579 private void save(OutputStream out, INodeSymlink n) throws IOException { 580 SaverContext state = parent.getSaverContext(); 581 INodeSection.INodeSymlink.Builder b = INodeSection.INodeSymlink 582 .newBuilder() 583 .setPermission(buildPermissionStatus(n, state.getStringMap())) 584 .setTarget(ByteString.copyFrom(n.getSymlink())) 585 .setModificationTime(n.getModificationTime()) 586 .setAccessTime(n.getAccessTime()); 587 588 INodeSection.INode r = buildINodeCommon(n) 589 .setType(INodeSection.INode.Type.SYMLINK).setSymlink(b).build(); 590 r.writeDelimitedTo(out); 591 } 592 593 private final INodeSection.INode.Builder buildINodeCommon(INode n) { 594 return INodeSection.INode.newBuilder() 595 .setId(n.getId()) 596 .setName(ByteString.copyFrom(n.getLocalNameBytes())); 597 } 598 } 599 600 private FSImageFormatPBINode() { 601 } 602}