001/**
002 * Licensed to the Apache Software Foundation (ASF) under one
003 * or more contributor license agreements.  See the NOTICE file
004 * distributed with this work for additional information
005 * regarding copyright ownership.  The ASF licenses this file
006 * to you under the Apache License, Version 2.0 (the
007 * "License"); you may not use this file except in compliance
008 * with the License.  You may obtain a copy of the License at
009 *
010 *     http://www.apache.org/licenses/LICENSE-2.0
011 *
012 * Unless required by applicable law or agreed to in writing, software
013 * distributed under the License is distributed on an "AS IS" BASIS,
014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
015 * See the License for the specific language governing permissions and
016 * limitations under the License.
017 */
018package org.apache.hadoop.hdfs.server.blockmanagement;
019
020import java.util.ArrayList;
021import java.util.BitSet;
022import java.util.Collection;
023import java.util.Collections;
024import java.util.HashMap;
025import java.util.HashSet;
026import java.util.Iterator;
027import java.util.LinkedList;
028import java.util.List;
029import java.util.Map;
030import java.util.Queue;
031import java.util.Set;
032
033import com.google.common.annotations.VisibleForTesting;
034
035import com.google.common.collect.ImmutableList;
036import org.apache.commons.logging.Log;
037import org.apache.commons.logging.LogFactory;
038import org.apache.hadoop.classification.InterfaceAudience;
039import org.apache.hadoop.classification.InterfaceStability;
040import org.apache.hadoop.hdfs.StorageType;
041import org.apache.hadoop.hdfs.protocol.Block;
042import org.apache.hadoop.hdfs.protocol.DatanodeID;
043import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
044import org.apache.hadoop.hdfs.server.namenode.CachedBlock;
045import org.apache.hadoop.hdfs.server.protocol.BlockReportContext;
046import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
047import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage.State;
048import org.apache.hadoop.hdfs.server.protocol.StorageReport;
049import org.apache.hadoop.hdfs.util.EnumCounters;
050import org.apache.hadoop.hdfs.util.LightWeightHashSet;
051import org.apache.hadoop.util.IntrusiveCollection;
052import org.apache.hadoop.util.Time;
053
054import com.google.common.annotations.VisibleForTesting;
055
056/**
057 * This class extends the DatanodeInfo class with ephemeral information (eg
058 * health, capacity, what blocks are associated with the Datanode) that is
059 * private to the Namenode, ie this class is not exposed to clients.
060 */
061@InterfaceAudience.Private
062@InterfaceStability.Evolving
063public class DatanodeDescriptor extends DatanodeInfo {
064  public static final Log LOG = LogFactory.getLog(DatanodeDescriptor.class);
065  public static final DatanodeDescriptor[] EMPTY_ARRAY = {};
066
067  // Stores status of decommissioning.
068  // If node is not decommissioning, do not use this object for anything.
069  public final DecommissioningStatus decommissioningStatus = new DecommissioningStatus();
070
071  private long curBlockReportId = 0;
072
073  private BitSet curBlockReportRpcsSeen = null;
074
075  public int updateBlockReportContext(BlockReportContext context) {
076    if (curBlockReportId != context.getReportId()) {
077      curBlockReportId = context.getReportId();
078      curBlockReportRpcsSeen = new BitSet(context.getTotalRpcs());
079    }
080    curBlockReportRpcsSeen.set(context.getCurRpc());
081    return curBlockReportRpcsSeen.cardinality();
082  }
083
084  public void clearBlockReportContext() {
085    curBlockReportId = 0;
086    curBlockReportRpcsSeen = null;
087  }
088
089  /** Block and targets pair */
090  @InterfaceAudience.Private
091  @InterfaceStability.Evolving
092  public static class BlockTargetPair {
093    public final Block block;
094    public final DatanodeStorageInfo[] targets;    
095
096    BlockTargetPair(Block block, DatanodeStorageInfo[] targets) {
097      this.block = block;
098      this.targets = targets;
099    }
100  }
101
102  /** A BlockTargetPair queue. */
103  private static class BlockQueue<E> {
104    private final Queue<E> blockq = new LinkedList<E>();
105
106    /** Size of the queue */
107    synchronized int size() {return blockq.size();}
108
109    /** Enqueue */
110    synchronized boolean offer(E e) { 
111      return blockq.offer(e);
112    }
113
114    /** Dequeue */
115    synchronized List<E> poll(int numBlocks) {
116      if (numBlocks <= 0 || blockq.isEmpty()) {
117        return null;
118      }
119
120      List<E> results = new ArrayList<E>();
121      for(; !blockq.isEmpty() && numBlocks > 0; numBlocks--) {
122        results.add(blockq.poll());
123      }
124      return results;
125    }
126
127    /**
128     * Returns <tt>true</tt> if the queue contains the specified element.
129     */
130    boolean contains(E e) {
131      return blockq.contains(e);
132    }
133
134    synchronized void clear() {
135      blockq.clear();
136    }
137  }
138
139  private final Map<String, DatanodeStorageInfo> storageMap = 
140      new HashMap<String, DatanodeStorageInfo>();
141
142  /**
143   * A list of CachedBlock objects on this datanode.
144   */
145  public static class CachedBlocksList extends IntrusiveCollection<CachedBlock> {
146    public enum Type {
147      PENDING_CACHED,
148      CACHED,
149      PENDING_UNCACHED
150    }
151
152    private final DatanodeDescriptor datanode;
153
154    private final Type type;
155
156    CachedBlocksList(DatanodeDescriptor datanode, Type type) {
157      this.datanode = datanode;
158      this.type = type;
159    }
160
161    public DatanodeDescriptor getDatanode() {
162      return datanode;
163    }
164
165    public Type getType() {
166      return type;
167    }
168  }
169
170  /**
171   * The blocks which we want to cache on this DataNode.
172   */
173  private final CachedBlocksList pendingCached = 
174      new CachedBlocksList(this, CachedBlocksList.Type.PENDING_CACHED);
175
176  /**
177   * The blocks which we know are cached on this datanode.
178   * This list is updated by periodic cache reports.
179   */
180  private final CachedBlocksList cached = 
181      new CachedBlocksList(this, CachedBlocksList.Type.CACHED);
182
183  /**
184   * The blocks which we want to uncache on this DataNode.
185   */
186  private final CachedBlocksList pendingUncached = 
187      new CachedBlocksList(this, CachedBlocksList.Type.PENDING_UNCACHED);
188
189  public CachedBlocksList getPendingCached() {
190    return pendingCached;
191  }
192
193  public CachedBlocksList getCached() {
194    return cached;
195  }
196
197  public CachedBlocksList getPendingUncached() {
198    return pendingUncached;
199  }
200
201  /**
202   * The time when the last batch of caching directives was sent, in
203   * monotonic milliseconds.
204   */
205  private long lastCachingDirectiveSentTimeMs;
206
207  // isAlive == heartbeats.contains(this)
208  // This is an optimization, because contains takes O(n) time on Arraylist
209  public boolean isAlive = false;
210  public boolean needKeyUpdate = false;
211
212  
213  // A system administrator can tune the balancer bandwidth parameter
214  // (dfs.balance.bandwidthPerSec) dynamically by calling
215  // "dfsadmin -setBalanacerBandwidth <newbandwidth>", at which point the
216  // following 'bandwidth' variable gets updated with the new value for each
217  // node. Once the heartbeat command is issued to update the value on the
218  // specified datanode, this value will be set back to 0.
219  private long bandwidth;
220
221  /** A queue of blocks to be replicated by this datanode */
222  private final BlockQueue<BlockTargetPair> replicateBlocks = new BlockQueue<BlockTargetPair>();
223  /** A queue of blocks to be recovered by this datanode */
224  private final BlockQueue<BlockInfoUnderConstruction> recoverBlocks =
225                                new BlockQueue<BlockInfoUnderConstruction>();
226  /** A set of blocks to be invalidated by this datanode */
227  private final LightWeightHashSet<Block> invalidateBlocks = new LightWeightHashSet<Block>();
228
229  /* Variables for maintaining number of blocks scheduled to be written to
230   * this storage. This count is approximate and might be slightly bigger
231   * in case of errors (e.g. datanode does not report if an error occurs
232   * while writing the block).
233   */
234  private EnumCounters<StorageType> currApproxBlocksScheduled
235      = new EnumCounters<StorageType>(StorageType.class);
236  private EnumCounters<StorageType> prevApproxBlocksScheduled
237      = new EnumCounters<StorageType>(StorageType.class);
238  private long lastBlocksScheduledRollTime = 0;
239  private static final int BLOCKS_SCHEDULED_ROLL_INTERVAL = 600*1000; //10min
240  private int volumeFailures = 0;
241  
242  /** 
243   * When set to true, the node is not in include list and is not allowed
244   * to communicate with the namenode
245   */
246  private boolean disallowed = false;
247
248  // HB processing can use it to tell if it is the first HB since DN restarted
249  private boolean heartbeatedSinceRegistration = false;
250
251  // The number of replication work pending before targets are determined
252  private int PendingReplicationWithoutTargets = 0;
253
254  /**
255   * DatanodeDescriptor constructor
256   * @param nodeID id of the data node
257   */
258  public DatanodeDescriptor(DatanodeID nodeID) {
259    super(nodeID);
260    updateHeartbeatState(StorageReport.EMPTY_ARRAY, 0L, 0L, 0, 0);
261  }
262
263  /**
264   * DatanodeDescriptor constructor
265   * @param nodeID id of the data node
266   * @param networkLocation location of the data node in network
267   */
268  public DatanodeDescriptor(DatanodeID nodeID, 
269                            String networkLocation) {
270    super(nodeID, networkLocation);
271    updateHeartbeatState(StorageReport.EMPTY_ARRAY, 0L, 0L, 0, 0);
272  }
273
274  @VisibleForTesting
275  public DatanodeStorageInfo getStorageInfo(String storageID) {
276    synchronized (storageMap) {
277      return storageMap.get(storageID);
278    }
279  }
280  DatanodeStorageInfo[] getStorageInfos() {
281    synchronized (storageMap) {
282      final Collection<DatanodeStorageInfo> storages = storageMap.values();
283      return storages.toArray(new DatanodeStorageInfo[storages.size()]);
284    }
285  }
286
287  public StorageReport[] getStorageReports() {
288    final DatanodeStorageInfo[] infos = getStorageInfos();
289    final StorageReport[] reports = new StorageReport[infos.length];
290    for(int i = 0; i < infos.length; i++) {
291      reports[i] = infos[i].toStorageReport();
292    }
293    return reports;
294  }
295
296  boolean hasStaleStorages() {
297    synchronized (storageMap) {
298      for (DatanodeStorageInfo storage : storageMap.values()) {
299        if (storage.areBlockContentsStale()) {
300          return true;
301        }
302      }
303      return false;
304    }
305  }
306
307  static final private List<DatanodeStorageInfo> EMPTY_STORAGE_INFO_LIST =
308      ImmutableList.of();
309
310  List<DatanodeStorageInfo> removeZombieStorages() {
311    List<DatanodeStorageInfo> zombies = null;
312    synchronized (storageMap) {
313      Iterator<Map.Entry<String, DatanodeStorageInfo>> iter =
314          storageMap.entrySet().iterator();
315      while (iter.hasNext()) {
316        Map.Entry<String, DatanodeStorageInfo> entry = iter.next();
317        DatanodeStorageInfo storageInfo = entry.getValue();
318        if (storageInfo.getLastBlockReportId() != curBlockReportId) {
319          LOG.info(storageInfo.getStorageID() + " had lastBlockReportId 0x" +
320              Long.toHexString(storageInfo.getLastBlockReportId()) +
321              ", but curBlockReportId = 0x" +
322              Long.toHexString(curBlockReportId));
323          iter.remove();
324          if (zombies == null) {
325            zombies = new LinkedList<DatanodeStorageInfo>();
326          }
327          zombies.add(storageInfo);
328        }
329        storageInfo.setLastBlockReportId(0);
330      }
331    }
332    return zombies == null ? EMPTY_STORAGE_INFO_LIST : zombies;
333  }
334
335  /**
336   * Remove block from the list of blocks belonging to the data-node. Remove
337   * data-node from the block.
338   */
339  boolean removeBlock(BlockInfo b) {
340    final DatanodeStorageInfo s = b.findStorageInfo(this);
341    // if block exists on this datanode
342    if (s != null) {
343      return s.removeBlock(b);
344    }
345    return false;
346  }
347  
348  /**
349   * Remove block from the list of blocks belonging to the data-node. Remove
350   * data-node from the block.
351   */
352  boolean removeBlock(String storageID, BlockInfo b) {
353    DatanodeStorageInfo s = getStorageInfo(storageID);
354    if (s != null) {
355      return s.removeBlock(b);
356    }
357    return false;
358  }
359
360  public void resetBlocks() {
361    setCapacity(0);
362    setRemaining(0);
363    setBlockPoolUsed(0);
364    setDfsUsed(0);
365    setXceiverCount(0);
366    this.invalidateBlocks.clear();
367    this.volumeFailures = 0;
368    // pendingCached, cached, and pendingUncached are protected by the
369    // FSN lock.
370    this.pendingCached.clear();
371    this.cached.clear();
372    this.pendingUncached.clear();
373  }
374  
375  public void clearBlockQueues() {
376    synchronized (invalidateBlocks) {
377      this.invalidateBlocks.clear();
378      this.recoverBlocks.clear();
379      this.replicateBlocks.clear();
380    }
381    // pendingCached, cached, and pendingUncached are protected by the
382    // FSN lock.
383    this.pendingCached.clear();
384    this.cached.clear();
385    this.pendingUncached.clear();
386  }
387
388  public int numBlocks() {
389    int blocks = 0;
390    for (DatanodeStorageInfo entry : getStorageInfos()) {
391      blocks += entry.numBlocks();
392    }
393    return blocks;
394  }
395
396  /**
397   * Updates stats from datanode heartbeat.
398   */
399  public void updateHeartbeat(StorageReport[] reports, long cacheCapacity,
400      long cacheUsed, int xceiverCount, int volFailures) {
401    updateHeartbeatState(reports, cacheCapacity, cacheUsed, xceiverCount,
402        volFailures);
403    heartbeatedSinceRegistration = true;
404  }
405
406  /**
407   * process datanode heartbeat or stats initialization.
408   */
409  public void updateHeartbeatState(StorageReport[] reports, long cacheCapacity,
410      long cacheUsed, int xceiverCount, int volFailures) {
411    long totalCapacity = 0;
412    long totalRemaining = 0;
413    long totalBlockPoolUsed = 0;
414    long totalDfsUsed = 0;
415    Set<DatanodeStorageInfo> failedStorageInfos = null;
416
417    // Decide if we should check for any missing StorageReport and mark it as
418    // failed. There are different scenarios.
419    // 1. When DN is running, a storage failed. Given the current DN
420    //    implementation doesn't add recovered storage back to its storage list
421    //    until DN restart, we can assume volFailures won't decrease
422    //    during the current DN registration session.
423    //    When volumeFailures == this.volumeFailures, it implies there is no
424    //    state change. No need to check for failed storage. This is an
425    //    optimization.
426    // 2. After DN restarts, volFailures might not increase and it is possible
427    //    we still have new failed storage. For example, admins reduce
428    //    available storages in configuration. Another corner case
429    //    is the failed volumes might change after restart; a) there
430    //    is one good storage A, one restored good storage B, so there is
431    //    one element in storageReports and that is A. b) A failed. c) Before
432    //    DN sends HB to NN to indicate A has failed, DN restarts. d) After DN
433    //    restarts, storageReports has one element which is B.
434    boolean checkFailedStorages = (volFailures > this.volumeFailures) ||
435        !heartbeatedSinceRegistration;
436
437    if (checkFailedStorages) {
438      LOG.info("Number of failed storage changes from "
439          + this.volumeFailures + " to " + volFailures);
440      failedStorageInfos = new HashSet<DatanodeStorageInfo>(
441          storageMap.values());
442    }
443
444    setCacheCapacity(cacheCapacity);
445    setCacheUsed(cacheUsed);
446    setXceiverCount(xceiverCount);
447    setLastUpdate(Time.now());    
448    this.volumeFailures = volFailures;
449    for (StorageReport report : reports) {
450      DatanodeStorageInfo storage = updateStorage(report.getStorage());
451      if (checkFailedStorages) {
452        failedStorageInfos.remove(storage);
453      }
454
455      storage.receivedHeartbeat(report);
456      totalCapacity += report.getCapacity();
457      totalRemaining += report.getRemaining();
458      totalBlockPoolUsed += report.getBlockPoolUsed();
459      totalDfsUsed += report.getDfsUsed();
460    }
461    rollBlocksScheduled(getLastUpdate());
462
463    // Update total metrics for the node.
464    setCapacity(totalCapacity);
465    setRemaining(totalRemaining);
466    setBlockPoolUsed(totalBlockPoolUsed);
467    setDfsUsed(totalDfsUsed);
468    if (checkFailedStorages) {
469      updateFailedStorage(failedStorageInfos);
470    }
471
472    if (storageMap.size() != reports.length) {
473      pruneStorageMap(reports);
474    }
475  }
476
477  /**
478   * Remove stale storages from storageMap. We must not remove any storages
479   * as long as they have associated block replicas.
480   */
481  private void pruneStorageMap(final StorageReport[] reports) {
482    if (LOG.isDebugEnabled()) {
483      LOG.debug("Number of storages reported in heartbeat=" + reports.length +
484                    "; Number of storages in storageMap=" + storageMap.size());
485    }
486
487    HashMap<String, DatanodeStorageInfo> excessStorages;
488
489    synchronized (storageMap) {
490      // Init excessStorages with all known storages.
491      excessStorages = new HashMap<String, DatanodeStorageInfo>(storageMap);
492
493      // Remove storages that the DN reported in the heartbeat.
494      for (final StorageReport report : reports) {
495        excessStorages.remove(report.getStorage().getStorageID());
496      }
497
498      // For each remaining storage, remove it if there are no associated
499      // blocks.
500      for (final DatanodeStorageInfo storageInfo : excessStorages.values()) {
501        if (storageInfo.numBlocks() == 0) {
502          storageMap.remove(storageInfo.getStorageID());
503          LOG.info("Removed storage " + storageInfo + " from DataNode" + this);
504        } else if (LOG.isDebugEnabled()) {
505          // This can occur until all block reports are received.
506          LOG.debug("Deferring removal of stale storage " + storageInfo +
507                        " with " + storageInfo.numBlocks() + " blocks");
508        }
509      }
510    }
511  }
512
513  private void updateFailedStorage(
514      Set<DatanodeStorageInfo> failedStorageInfos) {
515    for (DatanodeStorageInfo storageInfo : failedStorageInfos) {
516      if (storageInfo.getState() != DatanodeStorage.State.FAILED) {
517        LOG.info(storageInfo + " failed.");
518        storageInfo.setState(DatanodeStorage.State.FAILED);
519      }
520    }
521  }
522
523  private static class BlockIterator implements Iterator<BlockInfo> {
524    private int index = 0;
525    private final List<Iterator<BlockInfo>> iterators;
526    
527    private BlockIterator(final DatanodeStorageInfo... storages) {
528      List<Iterator<BlockInfo>> iterators = new ArrayList<Iterator<BlockInfo>>();
529      for (DatanodeStorageInfo e : storages) {
530        iterators.add(e.getBlockIterator());
531      }
532      this.iterators = Collections.unmodifiableList(iterators);
533    }
534
535    @Override
536    public boolean hasNext() {
537      update();
538      return !iterators.isEmpty() && iterators.get(index).hasNext();
539    }
540
541    @Override
542    public BlockInfo next() {
543      update();
544      return iterators.get(index).next();
545    }
546    
547    @Override
548    public void remove() {
549      throw new UnsupportedOperationException("Remove unsupported.");
550    }
551    
552    private void update() {
553      while(index < iterators.size() - 1 && !iterators.get(index).hasNext()) {
554        index++;
555      }
556    }
557  }
558
559  Iterator<BlockInfo> getBlockIterator() {
560    return new BlockIterator(getStorageInfos());
561  }
562  Iterator<BlockInfo> getBlockIterator(final String storageID) {
563    return new BlockIterator(getStorageInfo(storageID));
564  }
565
566  void incrementPendingReplicationWithoutTargets() {
567    PendingReplicationWithoutTargets++;
568  }
569
570  void decrementPendingReplicationWithoutTargets() {
571    PendingReplicationWithoutTargets--;
572  }
573
574  /**
575   * Store block replication work.
576   */
577  void addBlockToBeReplicated(Block block, DatanodeStorageInfo[] targets) {
578    assert(block != null && targets != null && targets.length > 0);
579    replicateBlocks.offer(new BlockTargetPair(block, targets));
580  }
581
582  /**
583   * Store block recovery work.
584   */
585  void addBlockToBeRecovered(BlockInfoUnderConstruction block) {
586    if(recoverBlocks.contains(block)) {
587      // this prevents adding the same block twice to the recovery queue
588      BlockManager.LOG.info(block + " is already in the recovery queue");
589      return;
590    }
591    recoverBlocks.offer(block);
592  }
593
594  /**
595   * Store block invalidation work.
596   */
597  void addBlocksToBeInvalidated(List<Block> blocklist) {
598    assert(blocklist != null && blocklist.size() > 0);
599    synchronized (invalidateBlocks) {
600      for(Block blk : blocklist) {
601        invalidateBlocks.add(blk);
602      }
603    }
604  }
605
606  /**
607   * The number of work items that are pending to be replicated
608   */
609  int getNumberOfBlocksToBeReplicated() {
610    return PendingReplicationWithoutTargets + replicateBlocks.size();
611  }
612
613  /**
614   * The number of block invalidation items that are pending to 
615   * be sent to the datanode
616   */
617  int getNumberOfBlocksToBeInvalidated() {
618    synchronized (invalidateBlocks) {
619      return invalidateBlocks.size();
620    }
621  }
622
623  public List<BlockTargetPair> getReplicationCommand(int maxTransfers) {
624    return replicateBlocks.poll(maxTransfers);
625  }
626
627  public BlockInfoUnderConstruction[] getLeaseRecoveryCommand(int maxTransfers) {
628    List<BlockInfoUnderConstruction> blocks = recoverBlocks.poll(maxTransfers);
629    if(blocks == null)
630      return null;
631    return blocks.toArray(new BlockInfoUnderConstruction[blocks.size()]);
632  }
633
634  /**
635   * Remove the specified number of blocks to be invalidated
636   */
637  public Block[] getInvalidateBlocks(int maxblocks) {
638    synchronized (invalidateBlocks) {
639      Block[] deleteList = invalidateBlocks.pollToArray(new Block[Math.min(
640          invalidateBlocks.size(), maxblocks)]);
641      return deleteList.length == 0 ? null : deleteList;
642    }
643  }
644
645  /**
646   * Return the sum of remaining spaces of the specified type. If the remaining
647   * space of a storage is less than minSize, it won't be counted toward the
648   * sum.
649   *
650   * @param t The storage type. If null, the type is ignored.
651   * @param minSize The minimum free space required.
652   * @return the sum of remaining spaces that are bigger than minSize.
653   */
654  public long getRemaining(StorageType t, long minSize) {
655    long remaining = 0;
656    for (DatanodeStorageInfo s : getStorageInfos()) {
657      if (s.getState() == State.NORMAL &&
658          (t == null || s.getStorageType() == t)) {
659        long r = s.getRemaining();
660        if (r >= minSize) {
661          remaining += r;
662        }
663      }
664    }
665    return remaining;
666  }
667
668  /**
669   * @return Approximate number of blocks currently scheduled to be written 
670   * to the given storage type of this datanode.
671   */
672  public int getBlocksScheduled(StorageType t) {
673    return (int)(currApproxBlocksScheduled.get(t)
674        + prevApproxBlocksScheduled.get(t));
675  }
676
677  /**
678   * @return Approximate number of blocks currently scheduled to be written 
679   * to this datanode.
680   */
681  public int getBlocksScheduled() {
682    return (int)(currApproxBlocksScheduled.sum()
683        + prevApproxBlocksScheduled.sum());
684  }
685
686  /** Increment the number of blocks scheduled. */
687  void incrementBlocksScheduled(StorageType t) {
688    currApproxBlocksScheduled.add(t, 1);;
689  }
690  
691  /** Decrement the number of blocks scheduled. */
692  void decrementBlocksScheduled(StorageType t) {
693    if (prevApproxBlocksScheduled.get(t) > 0) {
694      prevApproxBlocksScheduled.subtract(t, 1);
695    } else if (currApproxBlocksScheduled.get(t) > 0) {
696      currApproxBlocksScheduled.subtract(t, 1);
697    } 
698    // its ok if both counters are zero.
699  }
700  
701  /** Adjusts curr and prev number of blocks scheduled every few minutes. */
702  private void rollBlocksScheduled(long now) {
703    if (now - lastBlocksScheduledRollTime > BLOCKS_SCHEDULED_ROLL_INTERVAL) {
704      prevApproxBlocksScheduled.set(currApproxBlocksScheduled);
705      currApproxBlocksScheduled.reset();
706      lastBlocksScheduledRollTime = now;
707    }
708  }
709  
710  @Override
711  public int hashCode() {
712    // Super implementation is sufficient
713    return super.hashCode();
714  }
715  
716  @Override
717  public boolean equals(Object obj) {
718    // Sufficient to use super equality as datanodes are uniquely identified
719    // by DatanodeID
720    return (this == obj) || super.equals(obj);
721  }
722
723  /** Decommissioning status */
724  public class DecommissioningStatus {
725    private int underReplicatedBlocks;
726    private int decommissionOnlyReplicas;
727    private int underReplicatedInOpenFiles;
728    private long startTime;
729    
730    synchronized void set(int underRep,
731        int onlyRep, int underConstruction) {
732      if (isDecommissionInProgress() == false) {
733        return;
734      }
735      underReplicatedBlocks = underRep;
736      decommissionOnlyReplicas = onlyRep;
737      underReplicatedInOpenFiles = underConstruction;
738    }
739
740    /** @return the number of under-replicated blocks */
741    public synchronized int getUnderReplicatedBlocks() {
742      if (isDecommissionInProgress() == false) {
743        return 0;
744      }
745      return underReplicatedBlocks;
746    }
747    /** @return the number of decommission-only replicas */
748    public synchronized int getDecommissionOnlyReplicas() {
749      if (isDecommissionInProgress() == false) {
750        return 0;
751      }
752      return decommissionOnlyReplicas;
753    }
754    /** @return the number of under-replicated blocks in open files */
755    public synchronized int getUnderReplicatedInOpenFiles() {
756      if (isDecommissionInProgress() == false) {
757        return 0;
758      }
759      return underReplicatedInOpenFiles;
760    }
761    /** Set start time */
762    public synchronized void setStartTime(long time) {
763      startTime = time;
764    }
765    /** @return start time */
766    public synchronized long getStartTime() {
767      if (isDecommissionInProgress() == false) {
768        return 0;
769      }
770      return startTime;
771    }
772  }  // End of class DecommissioningStatus
773
774  /**
775   * Set the flag to indicate if this datanode is disallowed from communicating
776   * with the namenode.
777   */
778  public void setDisallowed(boolean flag) {
779    disallowed = flag;
780  }
781  /** Is the datanode disallowed from communicating with the namenode? */
782  public boolean isDisallowed() {
783    return disallowed;
784  }
785
786  /**
787   * @return number of failed volumes in the datanode.
788   */
789  public int getVolumeFailures() {
790    return volumeFailures;
791  }
792
793  /**
794   * @param nodeReg DatanodeID to update registration for.
795   */
796  @Override
797  public void updateRegInfo(DatanodeID nodeReg) {
798    super.updateRegInfo(nodeReg);
799    
800    // must re-process IBR after re-registration
801    for(DatanodeStorageInfo storage : getStorageInfos()) {
802      storage.setBlockReportCount(0);
803    }
804    heartbeatedSinceRegistration = false;
805  }
806
807  /**
808   * @return balancer bandwidth in bytes per second for this datanode
809   */
810  public long getBalancerBandwidth() {
811    return this.bandwidth;
812  }
813
814  /**
815   * @param bandwidth balancer bandwidth in bytes per second for this datanode
816   */
817  public void setBalancerBandwidth(long bandwidth) {
818    this.bandwidth = bandwidth;
819  }
820
821  @Override
822  public String dumpDatanode() {
823    StringBuilder sb = new StringBuilder(super.dumpDatanode());
824    int repl = replicateBlocks.size();
825    if (repl > 0) {
826      sb.append(" ").append(repl).append(" blocks to be replicated;");
827    }
828    int inval = invalidateBlocks.size();
829    if (inval > 0) {
830      sb.append(" ").append(inval).append(" blocks to be invalidated;");      
831    }
832    int recover = recoverBlocks.size();
833    if (recover > 0) {
834      sb.append(" ").append(recover).append(" blocks to be recovered;");
835    }
836    return sb.toString();
837  }
838
839  DatanodeStorageInfo updateStorage(DatanodeStorage s) {
840    synchronized (storageMap) {
841      DatanodeStorageInfo storage = storageMap.get(s.getStorageID());
842      if (storage == null) {
843        LOG.info("Adding new storage ID " + s.getStorageID() +
844                 " for DN " + getXferAddr());
845        storage = new DatanodeStorageInfo(this, s);
846        storageMap.put(s.getStorageID(), storage);
847      } else if (storage.getState() != s.getState() ||
848                 storage.getStorageType() != s.getStorageType()) {
849        // For backwards compatibility, make sure that the type and
850        // state are updated. Some reports from older datanodes do
851        // not include these fields so we may have assumed defaults.
852        storage.updateFromStorage(s);
853        storageMap.put(storage.getStorageID(), storage);
854      }
855      return storage;
856    }
857  }
858
859  /**
860   * @return   The time at which we last sent caching directives to this 
861   *           DataNode, in monotonic milliseconds.
862   */
863  public long getLastCachingDirectiveSentTimeMs() {
864    return this.lastCachingDirectiveSentTimeMs;
865  }
866
867  /**
868   * @param time  The time at which we last sent caching directives to this 
869   *              DataNode, in monotonic milliseconds.
870   */
871  public void setLastCachingDirectiveSentTimeMs(long time) {
872    this.lastCachingDirectiveSentTimeMs = time;
873  }
874  
875  /**
876   * checks whether atleast first block report has been received
877   * @return
878   */
879  public boolean checkBlockReportReceived() {
880    if(this.getStorageInfos().length == 0) {
881      return false;
882    }
883    for(DatanodeStorageInfo storageInfo: this.getStorageInfos()) {
884      if(storageInfo.getBlockReportCount() == 0 )
885        return false;
886    }
887    return true;
888 }
889}
890