001/**
002 * Licensed to the Apache Software Foundation (ASF) under one
003 * or more contributor license agreements.  See the NOTICE file
004 * distributed with this work for additional information
005 * regarding copyright ownership.  The ASF licenses this file
006 * to you under the Apache License, Version 2.0 (the
007 * "License"); you may not use this file except in compliance
008 * with the License.  You may obtain a copy of the License at
009 *
010 *     http://www.apache.org/licenses/LICENSE-2.0
011 *
012 * Unless required by applicable law or agreed to in writing, software
013 * distributed under the License is distributed on an "AS IS" BASIS,
014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
015 * See the License for the specific language governing permissions and
016 * limitations under the License.
017 */
018
019package org.apache.hadoop.hdfs;
020
021import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ADMIN;
022import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_HTTPS_NEED_AUTH_DEFAULT;
023import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_HTTPS_NEED_AUTH_KEY;
024import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_NAMENODES_KEY_PREFIX;
025import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_NAMENODE_ID_KEY;
026import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_BACKUP_ADDRESS_KEY;
027import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_DEFAULT;
028import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_KEY;
029import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_DEFAULT;
030import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY;
031import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY;
032import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY;
033import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY;
034import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMESERVICES;
035import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMESERVICE_ID;
036import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_SERVER_HTTPS_KEYPASSWORD_KEY;
037import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_SERVER_HTTPS_KEYSTORE_PASSWORD_KEY;
038import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_SERVER_HTTPS_TRUSTSTORE_PASSWORD_KEY;
039
040import java.io.IOException;
041import java.io.PrintStream;
042import java.io.UnsupportedEncodingException;
043import java.net.InetAddress;
044import java.net.InetSocketAddress;
045import java.net.URI;
046import java.net.URISyntaxException;
047import java.security.SecureRandom;
048import java.text.SimpleDateFormat;
049import java.util.Arrays;
050import java.util.Collection;
051import java.util.Collections;
052import java.util.Comparator;
053import java.util.Date;
054import java.util.HashSet;
055import java.util.List;
056import java.util.Locale;
057import java.util.Map;
058import java.util.Random;
059import java.util.Set;
060
061import javax.net.SocketFactory;
062
063import com.google.common.collect.Sets;
064import org.apache.commons.cli.CommandLine;
065import org.apache.commons.cli.CommandLineParser;
066import org.apache.commons.cli.Option;
067import org.apache.commons.cli.Options;
068import org.apache.commons.cli.ParseException;
069import org.apache.commons.cli.PosixParser;
070import org.apache.commons.logging.Log;
071import org.apache.commons.logging.LogFactory;
072import org.apache.hadoop.HadoopIllegalArgumentException;
073import org.apache.hadoop.classification.InterfaceAudience;
074import org.apache.hadoop.conf.Configuration;
075import org.apache.hadoop.crypto.key.KeyProvider;
076import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension;
077import org.apache.hadoop.crypto.key.KeyProviderFactory;
078import org.apache.hadoop.fs.BlockLocation;
079import org.apache.hadoop.fs.CommonConfigurationKeys;
080import org.apache.hadoop.fs.FileSystem;
081import org.apache.hadoop.fs.Path;
082import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol;
083import org.apache.hadoop.hdfs.protocol.DatanodeID;
084import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
085import org.apache.hadoop.hdfs.protocol.HdfsConstants;
086import org.apache.hadoop.hdfs.protocol.LocatedBlock;
087import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
088import org.apache.hadoop.hdfs.protocolPB.ClientDatanodeProtocolTranslatorPB;
089import org.apache.hadoop.hdfs.server.namenode.FSDirectory;
090import org.apache.hadoop.hdfs.server.namenode.NameNode;
091import org.apache.hadoop.hdfs.web.SWebHdfsFileSystem;
092import org.apache.hadoop.hdfs.web.WebHdfsFileSystem;
093import org.apache.hadoop.http.HttpConfig;
094import org.apache.hadoop.http.HttpServer2;
095import org.apache.hadoop.ipc.ProtobufRpcEngine;
096import org.apache.hadoop.ipc.RPC;
097import org.apache.hadoop.net.NetUtils;
098import org.apache.hadoop.net.NodeBase;
099import org.apache.hadoop.security.SecurityUtil;
100import org.apache.hadoop.security.UserGroupInformation;
101import org.apache.hadoop.security.authorize.AccessControlList;
102import org.apache.hadoop.util.StringUtils;
103import org.apache.hadoop.util.ToolRunner;
104
105import com.google.common.annotations.VisibleForTesting;
106import com.google.common.base.Charsets;
107import com.google.common.base.Joiner;
108import com.google.common.base.Preconditions;
109import com.google.common.collect.Lists;
110import com.google.common.collect.Maps;
111import com.google.common.primitives.SignedBytes;
112import com.google.protobuf.BlockingService;
113
114@InterfaceAudience.Private
115public class DFSUtil {
116  public static final Log LOG = LogFactory.getLog(DFSUtil.class.getName());
117  
118  public static final byte[] EMPTY_BYTES = {};
119
120  /** Compare two byte arrays by lexicographical order. */
121  public static int compareBytes(byte[] left, byte[] right) {
122    if (left == null) {
123      left = EMPTY_BYTES;
124    }
125    if (right == null) {
126      right = EMPTY_BYTES;
127    }
128    return SignedBytes.lexicographicalComparator().compare(left, right);
129  }
130
131  private DFSUtil() { /* Hidden constructor */ }
132  private static final ThreadLocal<Random> RANDOM = new ThreadLocal<Random>() {
133    @Override
134    protected Random initialValue() {
135      return new Random();
136    }
137  };
138  
139  private static final ThreadLocal<SecureRandom> SECURE_RANDOM = new ThreadLocal<SecureRandom>() {
140    @Override
141    protected SecureRandom initialValue() {
142      return new SecureRandom();
143    }
144  };
145
146  /** @return a pseudo random number generator. */
147  public static Random getRandom() {
148    return RANDOM.get();
149  }
150  
151  /** @return a pseudo secure random number generator. */
152  public static SecureRandom getSecureRandom() {
153    return SECURE_RANDOM.get();
154  }
155
156  /** Shuffle the elements in the given array. */
157  public static <T> T[] shuffle(final T[] array) {
158    if (array != null && array.length > 0) {
159      final Random random = getRandom();
160      for (int n = array.length; n > 1; ) {
161        final int randomIndex = random.nextInt(n);
162        n--;
163        if (n != randomIndex) {
164          final T tmp = array[randomIndex];
165          array[randomIndex] = array[n];
166          array[n] = tmp;
167        }
168      }
169    }
170    return array;
171  }
172
173  /**
174   * Compartor for sorting DataNodeInfo[] based on decommissioned states.
175   * Decommissioned nodes are moved to the end of the array on sorting with
176   * this compartor.
177   */
178  public static final Comparator<DatanodeInfo> DECOM_COMPARATOR = 
179    new Comparator<DatanodeInfo>() {
180      @Override
181      public int compare(DatanodeInfo a, DatanodeInfo b) {
182        return a.isDecommissioned() == b.isDecommissioned() ? 0 : 
183          a.isDecommissioned() ? 1 : -1;
184      }
185    };
186    
187      
188  /**
189   * Comparator for sorting DataNodeInfo[] based on decommissioned/stale states.
190   * Decommissioned/stale nodes are moved to the end of the array on sorting
191   * with this comparator.
192   */ 
193  @InterfaceAudience.Private 
194  public static class DecomStaleComparator implements Comparator<DatanodeInfo> {
195    private final long staleInterval;
196
197    /**
198     * Constructor of DecomStaleComparator
199     * 
200     * @param interval
201     *          The time interval for marking datanodes as stale is passed from
202     *          outside, since the interval may be changed dynamically
203     */
204    public DecomStaleComparator(long interval) {
205      this.staleInterval = interval;
206    }
207
208    @Override
209    public int compare(DatanodeInfo a, DatanodeInfo b) {
210      // Decommissioned nodes will still be moved to the end of the list
211      if (a.isDecommissioned()) {
212        return b.isDecommissioned() ? 0 : 1;
213      } else if (b.isDecommissioned()) {
214        return -1;
215      }
216      // Stale nodes will be moved behind the normal nodes
217      boolean aStale = a.isStale(staleInterval);
218      boolean bStale = b.isStale(staleInterval);
219      return aStale == bStale ? 0 : (aStale ? 1 : -1);
220    }
221  }    
222    
223  /**
224   * Address matcher for matching an address to local address
225   */
226  static final AddressMatcher LOCAL_ADDRESS_MATCHER = new AddressMatcher() {
227    @Override
228    public boolean match(InetSocketAddress s) {
229      return NetUtils.isLocalAddress(s.getAddress());
230    };
231  };
232  
233  /**
234   * Whether the pathname is valid.  Currently prohibits relative paths, 
235   * names which contain a ":" or "//", or other non-canonical paths.
236   */
237  public static boolean isValidName(String src) {
238    // Path must be absolute.
239    if (!src.startsWith(Path.SEPARATOR)) {
240      return false;
241    }
242      
243    // Check for ".." "." ":" "/"
244    String[] components = StringUtils.split(src, '/');
245    for (int i = 0; i < components.length; i++) {
246      String element = components[i];
247      if (element.equals(".")  ||
248          (element.indexOf(":") >= 0)  ||
249          (element.indexOf("/") >= 0)) {
250        return false;
251      }
252      // ".." is allowed in path starting with /.reserved/.inodes
253      if (element.equals("..")) {
254        if (components.length > 4
255            && components[1].equals(FSDirectory.DOT_RESERVED_STRING)
256            && components[2].equals(FSDirectory.DOT_INODES_STRING)) {
257          continue;
258        }
259        return false;
260      }
261      // The string may start or end with a /, but not have
262      // "//" in the middle.
263      if (element.isEmpty() && i != components.length - 1 &&
264          i != 0) {
265        return false;
266      }
267    }
268    return true;
269  }
270
271  /**
272   * Checks if a string is a valid path component. For instance, components
273   * cannot contain a ":" or "/", and cannot be equal to a reserved component
274   * like ".snapshot".
275   * <p>
276   * The primary use of this method is for validating paths when loading the
277   * FSImage. During normal NN operation, paths are sometimes allowed to
278   * contain reserved components.
279   * 
280   * @return If component is valid
281   */
282  public static boolean isValidNameForComponent(String component) {
283    if (component.equals(".") ||
284        component.equals("..") ||
285        component.indexOf(":") >= 0 ||
286        component.indexOf("/") >= 0) {
287      return false;
288    }
289    return !isReservedPathComponent(component);
290  }
291
292
293  /**
294   * Returns if the component is reserved.
295   * 
296   * <p>
297   * Note that some components are only reserved under certain directories, e.g.
298   * "/.reserved" is reserved, while "/hadoop/.reserved" is not.
299   * @return true, if the component is reserved
300   */
301  public static boolean isReservedPathComponent(String component) {
302    for (String reserved : HdfsConstants.RESERVED_PATH_COMPONENTS) {
303      if (component.equals(reserved)) {
304        return true;
305      }
306    }
307    return false;
308  }
309
310  /**
311   * Converts a byte array to a string using UTF8 encoding.
312   */
313  public static String bytes2String(byte[] bytes) {
314    return bytes2String(bytes, 0, bytes.length);
315  }
316  
317  /**
318   * Decode a specific range of bytes of the given byte array to a string
319   * using UTF8.
320   * 
321   * @param bytes The bytes to be decoded into characters
322   * @param offset The index of the first byte to decode
323   * @param length The number of bytes to decode
324   * @return The decoded string
325   */
326  public static String bytes2String(byte[] bytes, int offset, int length) {
327    try {
328      return new String(bytes, offset, length, "UTF8");
329    } catch(UnsupportedEncodingException e) {
330      assert false : "UTF8 encoding is not supported ";
331    }
332    return null;
333  }
334
335  /**
336   * Converts a string to a byte array using UTF8 encoding.
337   */
338  public static byte[] string2Bytes(String str) {
339    return str.getBytes(Charsets.UTF_8);
340  }
341
342  /**
343   * Given a list of path components returns a path as a UTF8 String
344   */
345  public static String byteArray2PathString(byte[][] pathComponents) {
346    if (pathComponents.length == 0) {
347      return "";
348    } else if (pathComponents.length == 1
349        && (pathComponents[0] == null || pathComponents[0].length == 0)) {
350      return Path.SEPARATOR;
351    }
352    StringBuilder result = new StringBuilder();
353    for (int i = 0; i < pathComponents.length; i++) {
354      result.append(new String(pathComponents[i], Charsets.UTF_8));
355      if (i < pathComponents.length - 1) {
356        result.append(Path.SEPARATOR_CHAR);
357      }
358    }
359    return result.toString();
360  }
361
362  /**
363   * Converts a list of path components into a path using Path.SEPARATOR.
364   * 
365   * @param components Path components
366   * @return Combined path as a UTF-8 string
367   */
368  public static String strings2PathString(String[] components) {
369    if (components.length == 0) {
370      return "";
371    }
372    if (components.length == 1) {
373      if (components[0] == null || components[0].isEmpty()) {
374        return Path.SEPARATOR;
375      }
376    }
377    return Joiner.on(Path.SEPARATOR).join(components);
378  }
379
380  /**
381   * Given a list of path components returns a byte array
382   */
383  public static byte[] byteArray2bytes(byte[][] pathComponents) {
384    if (pathComponents.length == 0) {
385      return EMPTY_BYTES;
386    } else if (pathComponents.length == 1
387        && (pathComponents[0] == null || pathComponents[0].length == 0)) {
388      return new byte[]{(byte) Path.SEPARATOR_CHAR};
389    }
390    int length = 0;
391    for (int i = 0; i < pathComponents.length; i++) {
392      length += pathComponents[i].length;
393      if (i < pathComponents.length - 1) {
394        length++; // for SEPARATOR
395      }
396    }
397    byte[] path = new byte[length];
398    int index = 0;
399    for (int i = 0; i < pathComponents.length; i++) {
400      System.arraycopy(pathComponents[i], 0, path, index,
401          pathComponents[i].length);
402      index += pathComponents[i].length;
403      if (i < pathComponents.length - 1) {
404        path[index] = (byte) Path.SEPARATOR_CHAR;
405        index++;
406      }
407    }
408    return path;
409  }
410
411  /** Convert an object representing a path to a string. */
412  public static String path2String(final Object path) {
413    return path == null? null
414        : path instanceof String? (String)path
415        : path instanceof byte[][]? byteArray2PathString((byte[][])path)
416        : path.toString();
417  }
418
419  /**
420   * Convert a UTF8 string to an array of byte arrays.
421   */
422  public static byte[][] getPathComponents(String path) {
423    // avoid intermediate split to String[]
424    final byte[] bytes = string2Bytes(path);
425    return bytes2byteArray(bytes, bytes.length, (byte)Path.SEPARATOR_CHAR);
426  }
427
428  /**
429   * Splits the array of bytes into array of arrays of bytes
430   * on byte separator
431   * @param bytes the array of bytes to split
432   * @param separator the delimiting byte
433   */
434  public static byte[][] bytes2byteArray(byte[] bytes, byte separator) {
435    return bytes2byteArray(bytes, bytes.length, separator);
436  }
437
438  /**
439   * Splits first len bytes in bytes to array of arrays of bytes
440   * on byte separator
441   * @param bytes the byte array to split
442   * @param len the number of bytes to split
443   * @param separator the delimiting byte
444   */
445  public static byte[][] bytes2byteArray(byte[] bytes,
446                                         int len,
447                                         byte separator) {
448    assert len <= bytes.length;
449    int splits = 0;
450    if (len == 0) {
451      return new byte[][]{null};
452    }
453    // Count the splits. Omit multiple separators and the last one
454    for (int i = 0; i < len; i++) {
455      if (bytes[i] == separator) {
456        splits++;
457      }
458    }
459    int last = len - 1;
460    while (last > -1 && bytes[last--] == separator) {
461      splits--;
462    }
463    if (splits == 0 && bytes[0] == separator) {
464      return new byte[][]{null};
465    }
466    splits++;
467    byte[][] result = new byte[splits][];
468    int startIndex = 0;
469    int nextIndex = 0;
470    int index = 0;
471    // Build the splits
472    while (index < splits) {
473      while (nextIndex < len && bytes[nextIndex] != separator) {
474        nextIndex++;
475      }
476      result[index] = new byte[nextIndex - startIndex];
477      System.arraycopy(bytes, startIndex, result[index], 0, nextIndex
478              - startIndex);
479      index++;
480      startIndex = nextIndex + 1;
481      nextIndex = startIndex;
482    }
483    return result;
484  }
485  
486  /**
487   * Convert a LocatedBlocks to BlockLocations[]
488   * @param blocks a LocatedBlocks
489   * @return an array of BlockLocations
490   */
491  public static BlockLocation[] locatedBlocks2Locations(LocatedBlocks blocks) {
492    if (blocks == null) {
493      return new BlockLocation[0];
494    }
495    return locatedBlocks2Locations(blocks.getLocatedBlocks());
496  }
497  
498  /**
499   * Convert a List<LocatedBlock> to BlockLocation[]
500   * @param blocks A List<LocatedBlock> to be converted
501   * @return converted array of BlockLocation
502   */
503  public static BlockLocation[] locatedBlocks2Locations(List<LocatedBlock> blocks) {
504    if (blocks == null) {
505      return new BlockLocation[0];
506    }
507    int nrBlocks = blocks.size();
508    BlockLocation[] blkLocations = new BlockLocation[nrBlocks];
509    if (nrBlocks == 0) {
510      return blkLocations;
511    }
512    int idx = 0;
513    for (LocatedBlock blk : blocks) {
514      assert idx < nrBlocks : "Incorrect index";
515      DatanodeInfo[] locations = blk.getLocations();
516      String[] hosts = new String[locations.length];
517      String[] xferAddrs = new String[locations.length];
518      String[] racks = new String[locations.length];
519      for (int hCnt = 0; hCnt < locations.length; hCnt++) {
520        hosts[hCnt] = locations[hCnt].getHostName();
521        xferAddrs[hCnt] = locations[hCnt].getXferAddr();
522        NodeBase node = new NodeBase(xferAddrs[hCnt], 
523                                     locations[hCnt].getNetworkLocation());
524        racks[hCnt] = node.toString();
525      }
526      DatanodeInfo[] cachedLocations = blk.getCachedLocations();
527      String[] cachedHosts = new String[cachedLocations.length];
528      for (int i=0; i<cachedLocations.length; i++) {
529        cachedHosts[i] = cachedLocations[i].getHostName();
530      }
531      blkLocations[idx] = new BlockLocation(xferAddrs, hosts, cachedHosts,
532                                            racks,
533                                            blk.getStartOffset(),
534                                            blk.getBlockSize(),
535                                            blk.isCorrupt());
536      idx++;
537    }
538    return blkLocations;
539  }
540
541  /**
542   * Returns collection of nameservice Ids from the configuration.
543   * @param conf configuration
544   * @return collection of nameservice Ids, or null if not specified
545   */
546  public static Collection<String> getNameServiceIds(Configuration conf) {
547    return conf.getTrimmedStringCollection(DFS_NAMESERVICES);
548  }
549
550  /**
551   * @return <code>coll</code> if it is non-null and non-empty. Otherwise,
552   * returns a list with a single null value.
553   */
554  private static Collection<String> emptyAsSingletonNull(Collection<String> coll) {
555    if (coll == null || coll.isEmpty()) {
556      return Collections.singletonList(null);
557    } else {
558      return coll;
559    }
560  }
561  
562  /**
563   * Namenode HighAvailability related configuration.
564   * Returns collection of namenode Ids from the configuration. One logical id
565   * for each namenode in the in the HA setup.
566   * 
567   * @param conf configuration
568   * @param nsId the nameservice ID to look at, or null for non-federated 
569   * @return collection of namenode Ids
570   */
571  public static Collection<String> getNameNodeIds(Configuration conf, String nsId) {
572    String key = addSuffix(DFS_HA_NAMENODES_KEY_PREFIX, nsId);
573    return conf.getTrimmedStringCollection(key);
574  }
575  
576  /**
577   * Given a list of keys in the order of preference, returns a value
578   * for the key in the given order from the configuration.
579   * @param defaultValue default value to return, when key was not found
580   * @param keySuffix suffix to add to the key, if it is not null
581   * @param conf Configuration
582   * @param keys list of keys in the order of preference
583   * @return value of the key or default if a key was not found in configuration
584   */
585  private static String getConfValue(String defaultValue, String keySuffix,
586      Configuration conf, String... keys) {
587    String value = null;
588    for (String key : keys) {
589      key = addSuffix(key, keySuffix);
590      value = conf.get(key);
591      if (value != null) {
592        break;
593      }
594    }
595    if (value == null) {
596      value = defaultValue;
597    }
598    return value;
599  }
600  
601  /** Add non empty and non null suffix to a key */
602  private static String addSuffix(String key, String suffix) {
603    if (suffix == null || suffix.isEmpty()) {
604      return key;
605    }
606    assert !suffix.startsWith(".") :
607      "suffix '" + suffix + "' should not already have '.' prepended.";
608    return key + "." + suffix;
609  }
610  
611  /** Concatenate list of suffix strings '.' separated */
612  private static String concatSuffixes(String... suffixes) {
613    if (suffixes == null) {
614      return null;
615    }
616    return Joiner.on(".").skipNulls().join(suffixes);
617  }
618  
619  /**
620   * Return configuration key of format key.suffix1.suffix2...suffixN
621   */
622  public static String addKeySuffixes(String key, String... suffixes) {
623    String keySuffix = concatSuffixes(suffixes);
624    return addSuffix(key, keySuffix);
625  }
626
627  /**
628   * Returns the configured address for all NameNodes in the cluster.
629   * @param conf configuration
630   * @param defaultAddress default address to return in case key is not found.
631   * @param keys Set of keys to look for in the order of preference
632   * @return a map(nameserviceId to map(namenodeId to InetSocketAddress))
633   */
634  private static Map<String, Map<String, InetSocketAddress>>
635    getAddresses(Configuration conf, String defaultAddress, String... keys) {
636    Collection<String> nameserviceIds = getNameServiceIds(conf);
637    return getAddressesForNsIds(conf, nameserviceIds, defaultAddress, keys);
638  }
639
640  /**
641   * Returns the configured address for all NameNodes in the cluster.
642   * @param conf configuration
643   * @param nsIds
644   *@param defaultAddress default address to return in case key is not found.
645   * @param keys Set of keys to look for in the order of preference   @return a map(nameserviceId to map(namenodeId to InetSocketAddress))
646   */
647  private static Map<String, Map<String, InetSocketAddress>>
648    getAddressesForNsIds(Configuration conf, Collection<String> nsIds,
649                         String defaultAddress, String... keys) {
650    // Look for configurations of the form <key>[.<nameserviceId>][.<namenodeId>]
651    // across all of the configured nameservices and namenodes.
652    Map<String, Map<String, InetSocketAddress>> ret = Maps.newLinkedHashMap();
653    for (String nsId : emptyAsSingletonNull(nsIds)) {
654      Map<String, InetSocketAddress> isas =
655        getAddressesForNameserviceId(conf, nsId, defaultAddress, keys);
656      if (!isas.isEmpty()) {
657        ret.put(nsId, isas);
658      }
659    }
660    return ret;
661  }
662  
663  /**
664   * Get all of the RPC addresses of the individual NNs in a given nameservice.
665   * 
666   * @param conf Configuration
667   * @param nsId the nameservice whose NNs addresses we want.
668   * @param defaultValue default address to return in case key is not found.
669   * @return A map from nnId -> RPC address of each NN in the nameservice.
670   */
671  public static Map<String, InetSocketAddress> getRpcAddressesForNameserviceId(
672      Configuration conf, String nsId, String defaultValue) {
673    return getAddressesForNameserviceId(conf, nsId, defaultValue,
674        DFS_NAMENODE_RPC_ADDRESS_KEY);
675  }
676
677  private static Map<String, InetSocketAddress> getAddressesForNameserviceId(
678      Configuration conf, String nsId, String defaultValue,
679      String... keys) {
680    Collection<String> nnIds = getNameNodeIds(conf, nsId);
681    Map<String, InetSocketAddress> ret = Maps.newHashMap();
682    for (String nnId : emptyAsSingletonNull(nnIds)) {
683      String suffix = concatSuffixes(nsId, nnId);
684      String address = getConfValue(defaultValue, suffix, conf, keys);
685      if (address != null) {
686        InetSocketAddress isa = NetUtils.createSocketAddr(address);
687        if (isa.isUnresolved()) {
688          LOG.warn("Namenode for " + nsId +
689                   " remains unresolved for ID " + nnId +
690                   ".  Check your hdfs-site.xml file to " +
691                   "ensure namenodes are configured properly.");
692        }
693        ret.put(nnId, isa);
694      }
695    }
696    return ret;
697  }
698
699  /**
700   * @return a collection of all configured NN Kerberos principals.
701   */
702  public static Set<String> getAllNnPrincipals(Configuration conf) throws IOException {
703    Set<String> principals = new HashSet<String>();
704    for (String nsId : DFSUtil.getNameServiceIds(conf)) {
705      if (HAUtil.isHAEnabled(conf, nsId)) {
706        for (String nnId : DFSUtil.getNameNodeIds(conf, nsId)) {
707          Configuration confForNn = new Configuration(conf);
708          NameNode.initializeGenericKeys(confForNn, nsId, nnId);
709          String principal = SecurityUtil.getServerPrincipal(confForNn
710              .get(DFSConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY),
711              NameNode.getAddress(confForNn).getHostName());
712          principals.add(principal);
713        }
714      } else {
715        Configuration confForNn = new Configuration(conf);
716        NameNode.initializeGenericKeys(confForNn, nsId, null);
717        String principal = SecurityUtil.getServerPrincipal(confForNn
718            .get(DFSConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY),
719            NameNode.getAddress(confForNn).getHostName());
720        principals.add(principal);
721      }
722    }
723
724    return principals;
725  }
726
727  /**
728   * Returns list of InetSocketAddress corresponding to HA NN RPC addresses from
729   * the configuration.
730   * 
731   * @param conf configuration
732   * @return list of InetSocketAddresses
733   */
734  public static Map<String, Map<String, InetSocketAddress>> getHaNnRpcAddresses(
735      Configuration conf) {
736    return getAddresses(conf, null, DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY);
737  }
738
739  /**
740   * Returns list of InetSocketAddress corresponding to HA NN HTTP addresses from
741   * the configuration.
742   *
743   * @return list of InetSocketAddresses
744   */
745  public static Map<String, Map<String, InetSocketAddress>> getHaNnWebHdfsAddresses(
746      Configuration conf, String scheme) {
747    if (WebHdfsFileSystem.SCHEME.equals(scheme)) {
748      return getAddresses(conf, null,
749          DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY);
750    } else if (SWebHdfsFileSystem.SCHEME.equals(scheme)) {
751      return getAddresses(conf, null,
752          DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_KEY);
753    } else {
754      throw new IllegalArgumentException("Unsupported scheme: " + scheme);
755    }
756  }
757
758  /**
759   * Returns list of InetSocketAddress corresponding to  backup node rpc 
760   * addresses from the configuration.
761   * 
762   * @param conf configuration
763   * @return list of InetSocketAddresses
764   * @throws IOException on error
765   */
766  public static Map<String, Map<String, InetSocketAddress>> getBackupNodeAddresses(
767      Configuration conf) throws IOException {
768    Map<String, Map<String, InetSocketAddress>> addressList = getAddresses(conf,
769        null, DFS_NAMENODE_BACKUP_ADDRESS_KEY);
770    if (addressList.isEmpty()) {
771      throw new IOException("Incorrect configuration: backup node address "
772          + DFS_NAMENODE_BACKUP_ADDRESS_KEY + " is not configured.");
773    }
774    return addressList;
775  }
776
777  /**
778   * Returns list of InetSocketAddresses of corresponding to secondary namenode
779   * http addresses from the configuration.
780   * 
781   * @param conf configuration
782   * @return list of InetSocketAddresses
783   * @throws IOException on error
784   */
785  public static Map<String, Map<String, InetSocketAddress>> getSecondaryNameNodeAddresses(
786      Configuration conf) throws IOException {
787    Map<String, Map<String, InetSocketAddress>> addressList = getAddresses(conf, null,
788        DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY);
789    if (addressList.isEmpty()) {
790      throw new IOException("Incorrect configuration: secondary namenode address "
791          + DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY + " is not configured.");
792    }
793    return addressList;
794  }
795
796  /**
797   * Returns list of InetSocketAddresses corresponding to namenodes from the
798   * configuration.
799   * 
800   * Returns namenode address specifically configured for datanodes (using
801   * service ports), if found. If not, regular RPC address configured for other
802   * clients is returned.
803   * 
804   * @param conf configuration
805   * @return list of InetSocketAddress
806   * @throws IOException on error
807   */
808  public static Map<String, Map<String, InetSocketAddress>> getNNServiceRpcAddresses(
809      Configuration conf) throws IOException {
810    // Use default address as fall back
811    String defaultAddress;
812    try {
813      defaultAddress = NetUtils.getHostPortString(NameNode.getAddress(conf));
814    } catch (IllegalArgumentException e) {
815      defaultAddress = null;
816    }
817    
818    Map<String, Map<String, InetSocketAddress>> addressList =
819      getAddresses(conf, defaultAddress,
820        DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, DFS_NAMENODE_RPC_ADDRESS_KEY);
821    if (addressList.isEmpty()) {
822      throw new IOException("Incorrect configuration: namenode address "
823          + DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY + " or "  
824          + DFS_NAMENODE_RPC_ADDRESS_KEY
825          + " is not configured.");
826    }
827    return addressList;
828  }
829
830  /**
831   * Returns list of InetSocketAddresses corresponding to the namenode
832   * that manages this cluster. Note this is to be used by datanodes to get
833   * the list of namenode addresses to talk to.
834   *
835   * Returns namenode address specifically configured for datanodes (using
836   * service ports), if found. If not, regular RPC address configured for other
837   * clients is returned.
838   *
839   * @param conf configuration
840   * @return list of InetSocketAddress
841   * @throws IOException on error
842   */
843  public static Map<String, Map<String, InetSocketAddress>>
844    getNNServiceRpcAddressesForCluster(Configuration conf) throws IOException {
845    // Use default address as fall back
846    String defaultAddress;
847    try {
848      defaultAddress = NetUtils.getHostPortString(NameNode.getAddress(conf));
849    } catch (IllegalArgumentException e) {
850      defaultAddress = null;
851    }
852
853    Collection<String> parentNameServices = conf.getTrimmedStringCollection
854            (DFSConfigKeys.DFS_INTERNAL_NAMESERVICES_KEY);
855
856    if (parentNameServices.isEmpty()) {
857      parentNameServices = conf.getTrimmedStringCollection
858              (DFSConfigKeys.DFS_NAMESERVICES);
859    } else {
860      // Ensure that the internal service is ineed in the list of all available
861      // nameservices.
862      Set<String> availableNameServices = Sets.newHashSet(conf
863              .getTrimmedStringCollection(DFSConfigKeys.DFS_NAMESERVICES));
864      for (String nsId : parentNameServices) {
865        if (!availableNameServices.contains(nsId)) {
866          throw new IOException("Unknown nameservice: " + nsId);
867        }
868      }
869    }
870
871    Map<String, Map<String, InetSocketAddress>> addressList =
872            getAddressesForNsIds(conf, parentNameServices, defaultAddress,
873                    DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, DFS_NAMENODE_RPC_ADDRESS_KEY);
874    if (addressList.isEmpty()) {
875      throw new IOException("Incorrect configuration: namenode address "
876              + DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY + " or "
877              + DFS_NAMENODE_RPC_ADDRESS_KEY
878              + " is not configured.");
879    }
880    return addressList;
881  }
882
883  /**
884   * Flatten the given map, as returned by other functions in this class,
885   * into a flat list of {@link ConfiguredNNAddress} instances.
886   */
887  public static List<ConfiguredNNAddress> flattenAddressMap(
888      Map<String, Map<String, InetSocketAddress>> map) {
889    List<ConfiguredNNAddress> ret = Lists.newArrayList();
890    
891    for (Map.Entry<String, Map<String, InetSocketAddress>> entry :
892      map.entrySet()) {
893      String nsId = entry.getKey();
894      Map<String, InetSocketAddress> nnMap = entry.getValue();
895      for (Map.Entry<String, InetSocketAddress> e2 : nnMap.entrySet()) {
896        String nnId = e2.getKey();
897        InetSocketAddress addr = e2.getValue();
898        
899        ret.add(new ConfiguredNNAddress(nsId, nnId, addr));
900      }
901    }
902    return ret;
903  }
904
905  /**
906   * Format the given map, as returned by other functions in this class,
907   * into a string suitable for debugging display. The format of this string
908   * should not be considered an interface, and is liable to change.
909   */
910  public static String addressMapToString(
911      Map<String, Map<String, InetSocketAddress>> map) {
912    StringBuilder b = new StringBuilder();
913    for (Map.Entry<String, Map<String, InetSocketAddress>> entry :
914         map.entrySet()) {
915      String nsId = entry.getKey();
916      Map<String, InetSocketAddress> nnMap = entry.getValue();
917      b.append("Nameservice <").append(nsId).append(">:").append("\n");
918      for (Map.Entry<String, InetSocketAddress> e2 : nnMap.entrySet()) {
919        b.append("  NN ID ").append(e2.getKey())
920          .append(" => ").append(e2.getValue()).append("\n");
921      }
922    }
923    return b.toString();
924  }
925  
926  public static String nnAddressesAsString(Configuration conf) {
927    Map<String, Map<String, InetSocketAddress>> addresses =
928      getHaNnRpcAddresses(conf);
929    return addressMapToString(addresses);
930  }
931
932  /**
933   * Represent one of the NameNodes configured in the cluster.
934   */
935  public static class ConfiguredNNAddress {
936    private final String nameserviceId;
937    private final String namenodeId;
938    private final InetSocketAddress addr;
939
940    private ConfiguredNNAddress(String nameserviceId, String namenodeId,
941        InetSocketAddress addr) {
942      this.nameserviceId = nameserviceId;
943      this.namenodeId = namenodeId;
944      this.addr = addr;
945    }
946
947    public String getNameserviceId() {
948      return nameserviceId;
949    }
950
951    public String getNamenodeId() {
952      return namenodeId;
953    }
954
955    public InetSocketAddress getAddress() {
956      return addr;
957    }
958    
959    @Override
960    public String toString() {
961      return "ConfiguredNNAddress[nsId=" + nameserviceId + ";" +
962        "nnId=" + namenodeId + ";addr=" + addr + "]";
963    }
964  }
965
966  /** @return Internal name services specified in the conf. */
967  static Collection<String> getInternalNameServices(Configuration conf) {
968    final Collection<String> ids = conf.getTrimmedStringCollection(
969        DFSConfigKeys.DFS_INTERNAL_NAMESERVICES_KEY);
970    return !ids.isEmpty()? ids: getNameServiceIds(conf);
971  }
972
973  /**
974   * Get a URI for each internal nameservice. If a nameservice is
975   * HA-enabled, and the configured failover proxy provider supports logical
976   * URIs, then the logical URI of the nameservice is returned.
977   * Otherwise, a URI corresponding to an RPC address of the single NN for that
978   * nameservice is returned, preferring the service RPC address over the
979   * client RPC address.
980   * 
981   * @param conf configuration
982   * @return a collection of all configured NN URIs, preferring service
983   *         addresses
984   */
985  public static Collection<URI> getInternalNsRpcUris(Configuration conf) {
986    return getNameServiceUris(conf, getInternalNameServices(conf),
987        DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY,
988        DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY);
989  }
990
991  /**
992   * Get a URI for each configured nameservice. If a nameservice is
993   * HA-enabled, and the configured failover proxy provider supports logical
994   * URIs, then the logical URI of the nameservice is returned.
995   * Otherwise, a URI corresponding to the address of the single NN for that
996   * nameservice is returned.
997   * 
998   * @param conf configuration
999   * @param keys configuration keys to try in order to get the URI for non-HA
1000   *        nameservices
1001   * @return a collection of all configured NN URIs
1002   */
1003  static Collection<URI> getNameServiceUris(Configuration conf,
1004      Collection<String> nameServices, String... keys) {
1005    Set<URI> ret = new HashSet<URI>();
1006    
1007    // We're passed multiple possible configuration keys for any given NN or HA
1008    // nameservice, and search the config in order of these keys. In order to
1009    // make sure that a later config lookup (e.g. fs.defaultFS) doesn't add a
1010    // URI for a config key for which we've already found a preferred entry, we
1011    // keep track of non-preferred keys here.
1012    Set<URI> nonPreferredUris = new HashSet<URI>();
1013
1014    for (String nsId : nameServices) {
1015      URI nsUri;
1016      try {
1017        nsUri = new URI(HdfsConstants.HDFS_URI_SCHEME + "://" + nsId);
1018      } catch (URISyntaxException ue) {
1019        throw new IllegalArgumentException(ue);
1020      }
1021      /**
1022       * Determine whether the logical URI of the name service can be resolved
1023       * by the configured failover proxy provider. If not, we should try to
1024       * resolve the URI here
1025       */
1026      boolean useLogicalUri = false;
1027      try {
1028        useLogicalUri = HAUtil.useLogicalUri(conf, nsUri);
1029      } catch (IOException e){
1030        LOG.warn("Getting exception  while trying to determine if nameservice "
1031            + nsId + " can use logical URI: " + e);
1032      }
1033      if (HAUtil.isHAEnabled(conf, nsId) && useLogicalUri) {
1034        // Add the logical URI of the nameservice.
1035        ret.add(nsUri);
1036      } else {
1037        // Add the URI corresponding to the address of the NN.
1038        boolean uriFound = false;
1039        for (String key : keys) {
1040          String addr = conf.get(concatSuffixes(key, nsId));
1041          if (addr != null) {
1042            URI uri = createUri(HdfsConstants.HDFS_URI_SCHEME,
1043                NetUtils.createSocketAddr(addr));
1044            if (!uriFound) {
1045              uriFound = true;
1046              ret.add(uri);
1047            } else {
1048              nonPreferredUris.add(uri);
1049            }
1050          }
1051        }
1052      }
1053    }
1054    
1055    // Add the generic configuration keys.
1056    boolean uriFound = false;
1057    for (String key : keys) {
1058      String addr = conf.get(key);
1059      if (addr != null) {
1060        URI uri = createUri("hdfs", NetUtils.createSocketAddr(addr));
1061        if (!uriFound) {
1062          uriFound = true;
1063          ret.add(uri);
1064        } else {
1065          nonPreferredUris.add(uri);
1066        }
1067      }
1068    }
1069    
1070    // Add the default URI if it is an HDFS URI.
1071    URI defaultUri = FileSystem.getDefaultUri(conf);
1072    // checks if defaultUri is ip:port format
1073    // and convert it to hostname:port format
1074    if (defaultUri != null && (defaultUri.getPort() != -1)) {
1075      defaultUri = createUri(defaultUri.getScheme(),
1076          NetUtils.createSocketAddr(defaultUri.getHost(), 
1077              defaultUri.getPort()));
1078    }
1079    if (defaultUri != null &&
1080        HdfsConstants.HDFS_URI_SCHEME.equals(defaultUri.getScheme()) &&
1081        !nonPreferredUris.contains(defaultUri)) {
1082      ret.add(defaultUri);
1083    }
1084    
1085    return ret;
1086  }
1087
1088  /**
1089   * Given the InetSocketAddress this method returns the nameservice Id
1090   * corresponding to the key with matching address, by doing a reverse 
1091   * lookup on the list of nameservices until it finds a match.
1092   * 
1093   * Since the process of resolving URIs to Addresses is slightly expensive,
1094   * this utility method should not be used in performance-critical routines.
1095   * 
1096   * @param conf - configuration
1097   * @param address - InetSocketAddress for configured communication with NN.
1098   *     Configured addresses are typically given as URIs, but we may have to
1099   *     compare against a URI typed in by a human, or the server name may be
1100   *     aliased, so we compare unambiguous InetSocketAddresses instead of just
1101   *     comparing URI substrings.
1102   * @param keys - list of configured communication parameters that should
1103   *     be checked for matches.  For example, to compare against RPC addresses,
1104   *     provide the list DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY,
1105   *     DFS_NAMENODE_RPC_ADDRESS_KEY.  Use the generic parameter keys,
1106   *     not the NameServiceId-suffixed keys.
1107   * @return nameserviceId, or null if no match found
1108   */
1109  public static String getNameServiceIdFromAddress(final Configuration conf, 
1110      final InetSocketAddress address, String... keys) {
1111    // Configuration with a single namenode and no nameserviceId
1112    String[] ids = getSuffixIDs(conf, address, keys);
1113    return (ids != null) ? ids[0] : null;
1114  }
1115  
1116  /**
1117   * return server http or https address from the configuration for a
1118   * given namenode rpc address.
1119   * @param namenodeAddr - namenode RPC address
1120   * @param conf configuration
1121   * @param scheme - the scheme (http / https)
1122   * @return server http or https address
1123   * @throws IOException 
1124   */
1125  public static URI getInfoServer(InetSocketAddress namenodeAddr,
1126      Configuration conf, String scheme) throws IOException {
1127    String[] suffixes = null;
1128    if (namenodeAddr != null) {
1129      // if non-default namenode, try reverse look up 
1130      // the nameServiceID if it is available
1131      suffixes = getSuffixIDs(conf, namenodeAddr,
1132          DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY,
1133          DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY);
1134    }
1135
1136    String authority;
1137    if ("http".equals(scheme)) {
1138      authority = getSuffixedConf(conf, DFS_NAMENODE_HTTP_ADDRESS_KEY,
1139          DFS_NAMENODE_HTTP_ADDRESS_DEFAULT, suffixes);
1140    } else if ("https".equals(scheme)) {
1141      authority = getSuffixedConf(conf, DFS_NAMENODE_HTTPS_ADDRESS_KEY,
1142          DFS_NAMENODE_HTTPS_ADDRESS_DEFAULT, suffixes);
1143    } else {
1144      throw new IllegalArgumentException("Invalid scheme:" + scheme);
1145    }
1146
1147    if (namenodeAddr != null) {
1148      authority = substituteForWildcardAddress(authority,
1149          namenodeAddr.getHostName());
1150    }
1151    return URI.create(scheme + "://" + authority);
1152  }
1153
1154  /**
1155   * Lookup the HTTP / HTTPS address of the namenode, and replace its hostname
1156   * with defaultHost when it found out that the address is a wildcard / local
1157   * address.
1158   *
1159   * @param defaultHost
1160   *          The default host name of the namenode.
1161   * @param conf
1162   *          The configuration
1163   * @param scheme
1164   *          HTTP or HTTPS
1165   * @throws IOException
1166   */
1167  public static URI getInfoServerWithDefaultHost(String defaultHost,
1168      Configuration conf, final String scheme) throws IOException {
1169    URI configuredAddr = getInfoServer(null, conf, scheme);
1170    String authority = substituteForWildcardAddress(
1171        configuredAddr.getAuthority(), defaultHost);
1172    return URI.create(scheme + "://" + authority);
1173  }
1174
1175  /**
1176   * Determine whether HTTP or HTTPS should be used to connect to the remote
1177   * server. Currently the client only connects to the server via HTTPS if the
1178   * policy is set to HTTPS_ONLY.
1179   *
1180   * @return the scheme (HTTP / HTTPS)
1181   */
1182  public static String getHttpClientScheme(Configuration conf) {
1183    HttpConfig.Policy policy = DFSUtil.getHttpPolicy(conf);
1184    return policy == HttpConfig.Policy.HTTPS_ONLY ? "https" : "http";
1185  }
1186
1187  /**
1188   * Substitute a default host in the case that an address has been configured
1189   * with a wildcard. This is used, for example, when determining the HTTP
1190   * address of the NN -- if it's configured to bind to 0.0.0.0, we want to
1191   * substitute the hostname from the filesystem URI rather than trying to
1192   * connect to 0.0.0.0.
1193   * @param configuredAddress the address found in the configuration
1194   * @param defaultHost the host to substitute with, if configuredAddress
1195   * is a local/wildcard address.
1196   * @return the substituted address
1197   * @throws IOException if it is a wildcard address and security is enabled
1198   */
1199  @VisibleForTesting
1200  static String substituteForWildcardAddress(String configuredAddress,
1201    String defaultHost) throws IOException {
1202    InetSocketAddress sockAddr = NetUtils.createSocketAddr(configuredAddress);
1203    InetSocketAddress defaultSockAddr = NetUtils.createSocketAddr(defaultHost
1204        + ":0");
1205    final InetAddress addr = sockAddr.getAddress();
1206    if (addr != null && addr.isAnyLocalAddress()) {
1207      if (UserGroupInformation.isSecurityEnabled() &&
1208          defaultSockAddr.getAddress().isAnyLocalAddress()) {
1209        throw new IOException("Cannot use a wildcard address with security. " +
1210            "Must explicitly set bind address for Kerberos");
1211      }
1212      return defaultHost + ":" + sockAddr.getPort();
1213    } else {
1214      return configuredAddress;
1215    }
1216  }
1217  
1218  private static String getSuffixedConf(Configuration conf,
1219      String key, String defaultVal, String[] suffixes) {
1220    String ret = conf.get(DFSUtil.addKeySuffixes(key, suffixes));
1221    if (ret != null) {
1222      return ret;
1223    }
1224    return conf.get(key, defaultVal);
1225  }
1226  
1227  /**
1228   * Sets the node specific setting into generic configuration key. Looks up
1229   * value of "key.nameserviceId.namenodeId" and if found sets that value into 
1230   * generic key in the conf. If this is not found, falls back to
1231   * "key.nameserviceId" and then the unmodified key.
1232   *
1233   * Note that this only modifies the runtime conf.
1234   * 
1235   * @param conf
1236   *          Configuration object to lookup specific key and to set the value
1237   *          to the key passed. Note the conf object is modified.
1238   * @param nameserviceId
1239   *          nameservice Id to construct the node specific key. Pass null if
1240   *          federation is not configuration.
1241   * @param nnId
1242   *          namenode Id to construct the node specific key. Pass null if
1243   *          HA is not configured.
1244   * @param keys
1245   *          The key for which node specific value is looked up
1246   */
1247  public static void setGenericConf(Configuration conf,
1248      String nameserviceId, String nnId, String... keys) {
1249    for (String key : keys) {
1250      String value = conf.get(addKeySuffixes(key, nameserviceId, nnId));
1251      if (value != null) {
1252        conf.set(key, value);
1253        continue;
1254      }
1255      value = conf.get(addKeySuffixes(key, nameserviceId));
1256      if (value != null) {
1257        conf.set(key, value);
1258      }
1259    }
1260  }
1261  
1262  /** Return used as percentage of capacity */
1263  public static float getPercentUsed(long used, long capacity) {
1264    return capacity <= 0 ? 100 : (used * 100.0f)/capacity; 
1265  }
1266  
1267  /** Return remaining as percentage of capacity */
1268  public static float getPercentRemaining(long remaining, long capacity) {
1269    return capacity <= 0 ? 0 : (remaining * 100.0f)/capacity; 
1270  }
1271
1272  /** Convert percentage to a string. */
1273  public static String percent2String(double percentage) {
1274    return StringUtils.format("%.2f%%", percentage);
1275  }
1276
1277  /**
1278   * Round bytes to GiB (gibibyte)
1279   * @param bytes number of bytes
1280   * @return number of GiB
1281   */
1282  public static int roundBytesToGB(long bytes) {
1283    return Math.round((float)bytes/ 1024 / 1024 / 1024);
1284  }
1285  
1286  /** Create a {@link ClientDatanodeProtocol} proxy */
1287  public static ClientDatanodeProtocol createClientDatanodeProtocolProxy(
1288      DatanodeID datanodeid, Configuration conf, int socketTimeout,
1289      boolean connectToDnViaHostname, LocatedBlock locatedBlock) throws IOException {
1290    return new ClientDatanodeProtocolTranslatorPB(datanodeid, conf, socketTimeout,
1291        connectToDnViaHostname, locatedBlock);
1292  }
1293  
1294  /** Create {@link ClientDatanodeProtocol} proxy using kerberos ticket */
1295  public static ClientDatanodeProtocol createClientDatanodeProtocolProxy(
1296      DatanodeID datanodeid, Configuration conf, int socketTimeout,
1297      boolean connectToDnViaHostname) throws IOException {
1298    return new ClientDatanodeProtocolTranslatorPB(
1299        datanodeid, conf, socketTimeout, connectToDnViaHostname);
1300  }
1301  
1302  /** Create a {@link ClientDatanodeProtocol} proxy */
1303  public static ClientDatanodeProtocol createClientDatanodeProtocolProxy(
1304      InetSocketAddress addr, UserGroupInformation ticket, Configuration conf,
1305      SocketFactory factory) throws IOException {
1306    return new ClientDatanodeProtocolTranslatorPB(addr, ticket, conf, factory);
1307  }
1308
1309  /**
1310   * Get nameservice Id for the {@link NameNode} based on namenode RPC address
1311   * matching the local node address.
1312   */
1313  public static String getNamenodeNameServiceId(Configuration conf) {
1314    return getNameServiceId(conf, DFS_NAMENODE_RPC_ADDRESS_KEY);
1315  }
1316  
1317  /**
1318   * Get nameservice Id for the BackupNode based on backup node RPC address
1319   * matching the local node address.
1320   */
1321  public static String getBackupNameServiceId(Configuration conf) {
1322    return getNameServiceId(conf, DFS_NAMENODE_BACKUP_ADDRESS_KEY);
1323  }
1324  
1325  /**
1326   * Get nameservice Id for the secondary node based on secondary http address
1327   * matching the local node address.
1328   */
1329  public static String getSecondaryNameServiceId(Configuration conf) {
1330    return getNameServiceId(conf, DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY);
1331  }
1332  
1333  /**
1334   * Get the nameservice Id by matching the {@code addressKey} with the
1335   * the address of the local node. 
1336   * 
1337   * If {@link DFSConfigKeys#DFS_NAMESERVICE_ID} is not specifically
1338   * configured, and more than one nameservice Id is configured, this method 
1339   * determines the nameservice Id by matching the local node's address with the
1340   * configured addresses. When a match is found, it returns the nameservice Id
1341   * from the corresponding configuration key.
1342   * 
1343   * @param conf Configuration
1344   * @param addressKey configuration key to get the address.
1345   * @return nameservice Id on success, null if federation is not configured.
1346   * @throws HadoopIllegalArgumentException on error
1347   */
1348  private static String getNameServiceId(Configuration conf, String addressKey) {
1349    String nameserviceId = conf.get(DFS_NAMESERVICE_ID);
1350    if (nameserviceId != null) {
1351      return nameserviceId;
1352    }
1353    Collection<String> nsIds = getNameServiceIds(conf);
1354    if (1 == nsIds.size()) {
1355      return nsIds.toArray(new String[1])[0];
1356    }
1357    String nnId = conf.get(DFS_HA_NAMENODE_ID_KEY);
1358    
1359    return getSuffixIDs(conf, addressKey, null, nnId, LOCAL_ADDRESS_MATCHER)[0];
1360  }
1361  
1362  /**
1363   * Returns nameservice Id and namenode Id when the local host matches the
1364   * configuration parameter {@code addressKey}.<nameservice Id>.<namenode Id>
1365   * 
1366   * @param conf Configuration
1367   * @param addressKey configuration key corresponding to the address.
1368   * @param knownNsId only look at configs for the given nameservice, if not-null
1369   * @param knownNNId only look at configs for the given namenode, if not null
1370   * @param matcher matching criteria for matching the address
1371   * @return Array with nameservice Id and namenode Id on success. First element
1372   *         in the array is nameservice Id and second element is namenode Id.
1373   *         Null value indicates that the configuration does not have the the
1374   *         Id.
1375   * @throws HadoopIllegalArgumentException on error
1376   */
1377  static String[] getSuffixIDs(final Configuration conf, final String addressKey,
1378      String knownNsId, String knownNNId,
1379      final AddressMatcher matcher) {
1380    String nameserviceId = null;
1381    String namenodeId = null;
1382    int found = 0;
1383    
1384    Collection<String> nsIds = getNameServiceIds(conf);
1385    for (String nsId : emptyAsSingletonNull(nsIds)) {
1386      if (knownNsId != null && !knownNsId.equals(nsId)) {
1387        continue;
1388      }
1389      
1390      Collection<String> nnIds = getNameNodeIds(conf, nsId);
1391      for (String nnId : emptyAsSingletonNull(nnIds)) {
1392        if (LOG.isTraceEnabled()) {
1393          LOG.trace(String.format("addressKey: %s nsId: %s nnId: %s",
1394              addressKey, nsId, nnId));
1395        }
1396        if (knownNNId != null && !knownNNId.equals(nnId)) {
1397          continue;
1398        }
1399        String key = addKeySuffixes(addressKey, nsId, nnId);
1400        String addr = conf.get(key);
1401        if (addr == null) {
1402          continue;
1403        }
1404        InetSocketAddress s = null;
1405        try {
1406          s = NetUtils.createSocketAddr(addr);
1407        } catch (Exception e) {
1408          LOG.warn("Exception in creating socket address " + addr, e);
1409          continue;
1410        }
1411        if (!s.isUnresolved() && matcher.match(s)) {
1412          nameserviceId = nsId;
1413          namenodeId = nnId;
1414          found++;
1415        }
1416      }
1417    }
1418    if (found > 1) { // Only one address must match the local address
1419      String msg = "Configuration has multiple addresses that match "
1420          + "local node's address. Please configure the system with "
1421          + DFS_NAMESERVICE_ID + " and "
1422          + DFS_HA_NAMENODE_ID_KEY;
1423      throw new HadoopIllegalArgumentException(msg);
1424    }
1425    return new String[] { nameserviceId, namenodeId };
1426  }
1427  
1428  /**
1429   * For given set of {@code keys} adds nameservice Id and or namenode Id
1430   * and returns {nameserviceId, namenodeId} when address match is found.
1431   * @see #getSuffixIDs(Configuration, String, String, String, AddressMatcher)
1432   */
1433  static String[] getSuffixIDs(final Configuration conf,
1434      final InetSocketAddress address, final String... keys) {
1435    AddressMatcher matcher = new AddressMatcher() {
1436     @Override
1437      public boolean match(InetSocketAddress s) {
1438        return address.equals(s);
1439      } 
1440    };
1441    
1442    for (String key : keys) {
1443      String[] ids = getSuffixIDs(conf, key, null, null, matcher);
1444      if (ids != null && (ids [0] != null || ids[1] != null)) {
1445        return ids;
1446      }
1447    }
1448    return null;
1449  }
1450  
1451  private interface AddressMatcher {
1452    public boolean match(InetSocketAddress s);
1453  }
1454
1455  /** Create a URI from the scheme and address */
1456  public static URI createUri(String scheme, InetSocketAddress address) {
1457    try {
1458      return new URI(scheme, null, address.getHostName(), address.getPort(),
1459          null, null, null);
1460    } catch (URISyntaxException ue) {
1461      throw new IllegalArgumentException(ue);
1462    }
1463  }
1464  
1465  /**
1466   * Add protobuf based protocol to the {@link org.apache.hadoop.ipc.RPC.Server}
1467   * @param conf configuration
1468   * @param protocol Protocol interface
1469   * @param service service that implements the protocol
1470   * @param server RPC server to which the protocol & implementation is added to
1471   * @throws IOException
1472   */
1473  public static void addPBProtocol(Configuration conf, Class<?> protocol,
1474      BlockingService service, RPC.Server server) throws IOException {
1475    RPC.setProtocolEngine(conf, protocol, ProtobufRpcEngine.class);
1476    server.addProtocol(RPC.RpcKind.RPC_PROTOCOL_BUFFER, protocol, service);
1477  }
1478
1479  /**
1480   * Map a logical namenode ID to its service address. Use the given
1481   * nameservice if specified, or the configured one if none is given.
1482   *
1483   * @param conf Configuration
1484   * @param nsId which nameservice nnId is a part of, optional
1485   * @param nnId the namenode ID to get the service addr for
1486   * @return the service addr, null if it could not be determined
1487   */
1488  public static String getNamenodeServiceAddr(final Configuration conf,
1489      String nsId, String nnId) {
1490
1491    if (nsId == null) {
1492      nsId = getOnlyNameServiceIdOrNull(conf);
1493    }
1494
1495    String serviceAddrKey = concatSuffixes(
1496        DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, nsId, nnId);
1497
1498    String addrKey = concatSuffixes(
1499        DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY, nsId, nnId);
1500
1501    String serviceRpcAddr = conf.get(serviceAddrKey);
1502    if (serviceRpcAddr == null) {
1503      serviceRpcAddr = conf.get(addrKey);
1504    }
1505    return serviceRpcAddr;
1506  }
1507
1508  /**
1509   * If the configuration refers to only a single nameservice, return the
1510   * name of that nameservice. If it refers to 0 or more than 1, return null.
1511   */
1512  public static String getOnlyNameServiceIdOrNull(Configuration conf) {
1513    Collection<String> nsIds = getNameServiceIds(conf);
1514    if (1 == nsIds.size()) {
1515      return nsIds.toArray(new String[1])[0];
1516    } else {
1517      // No nameservice ID was given and more than one is configured
1518      return null;
1519    }
1520  }
1521  
1522  public static final Options helpOptions = new Options();
1523  public static final Option helpOpt = new Option("h", "help", false,
1524      "get help information");
1525
1526  static {
1527    helpOptions.addOption(helpOpt);
1528  }
1529
1530  /**
1531   * Parse the arguments for commands
1532   * 
1533   * @param args the argument to be parsed
1534   * @param helpDescription help information to be printed out
1535   * @param out Printer
1536   * @param printGenericCommandUsage whether to print the 
1537   *              generic command usage defined in ToolRunner
1538   * @return true when the argument matches help option, false if not
1539   */
1540  public static boolean parseHelpArgument(String[] args,
1541      String helpDescription, PrintStream out, boolean printGenericCommandUsage) {
1542    if (args.length == 1) {
1543      try {
1544        CommandLineParser parser = new PosixParser();
1545        CommandLine cmdLine = parser.parse(helpOptions, args);
1546        if (cmdLine.hasOption(helpOpt.getOpt())
1547            || cmdLine.hasOption(helpOpt.getLongOpt())) {
1548          // should print out the help information
1549          out.println(helpDescription + "\n");
1550          if (printGenericCommandUsage) {
1551            ToolRunner.printGenericCommandUsage(out);
1552          }
1553          return true;
1554        }
1555      } catch (ParseException pe) {
1556        return false;
1557      }
1558    }
1559    return false;
1560  }
1561  
1562  /**
1563   * Get DFS_NAMENODE_INVALIDATE_WORK_PCT_PER_ITERATION from configuration.
1564   * 
1565   * @param conf Configuration
1566   * @return Value of DFS_NAMENODE_INVALIDATE_WORK_PCT_PER_ITERATION
1567   */
1568  public static float getInvalidateWorkPctPerIteration(Configuration conf) {
1569    float blocksInvalidateWorkPct = conf.getFloat(
1570        DFSConfigKeys.DFS_NAMENODE_INVALIDATE_WORK_PCT_PER_ITERATION,
1571        DFSConfigKeys.DFS_NAMENODE_INVALIDATE_WORK_PCT_PER_ITERATION_DEFAULT);
1572    Preconditions.checkArgument(
1573        (blocksInvalidateWorkPct > 0 && blocksInvalidateWorkPct <= 1.0f),
1574        DFSConfigKeys.DFS_NAMENODE_INVALIDATE_WORK_PCT_PER_ITERATION +
1575        " = '" + blocksInvalidateWorkPct + "' is invalid. " +
1576        "It should be a positive, non-zero float value, not greater than 1.0f, " +
1577        "to indicate a percentage.");
1578    return blocksInvalidateWorkPct;
1579  }
1580
1581  /**
1582   * Get DFS_NAMENODE_REPLICATION_WORK_MULTIPLIER_PER_ITERATION from
1583   * configuration.
1584   * 
1585   * @param conf Configuration
1586   * @return Value of DFS_NAMENODE_REPLICATION_WORK_MULTIPLIER_PER_ITERATION
1587   */
1588  public static int getReplWorkMultiplier(Configuration conf) {
1589    int blocksReplWorkMultiplier = conf.getInt(
1590            DFSConfigKeys.DFS_NAMENODE_REPLICATION_WORK_MULTIPLIER_PER_ITERATION,
1591            DFSConfigKeys.DFS_NAMENODE_REPLICATION_WORK_MULTIPLIER_PER_ITERATION_DEFAULT);
1592    Preconditions.checkArgument(
1593        (blocksReplWorkMultiplier > 0),
1594        DFSConfigKeys.DFS_NAMENODE_REPLICATION_WORK_MULTIPLIER_PER_ITERATION +
1595        " = '" + blocksReplWorkMultiplier + "' is invalid. " +
1596        "It should be a positive, non-zero integer value.");
1597    return blocksReplWorkMultiplier;
1598  }
1599  
1600  /**
1601   * Get SPNEGO keytab Key from configuration
1602   * 
1603   * @param conf Configuration
1604   * @param defaultKey default key to be used for config lookup
1605   * @return DFS_WEB_AUTHENTICATION_KERBEROS_KEYTAB_KEY if the key is not empty
1606   *         else return defaultKey
1607   */
1608  public static String getSpnegoKeytabKey(Configuration conf, String defaultKey) {
1609    String value = 
1610        conf.get(DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_KEYTAB_KEY);
1611    return (value == null || value.isEmpty()) ?
1612        defaultKey : DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_KEYTAB_KEY;
1613  }
1614
1615  /**
1616   * Get http policy. Http Policy is chosen as follows:
1617   * <ol>
1618   * <li>If hadoop.ssl.enabled is set, http endpoints are not started. Only
1619   * https endpoints are started on configured https ports</li>
1620   * <li>This configuration is overridden by dfs.https.enable configuration, if
1621   * it is set to true. In that case, both http and https endpoints are stared.</li>
1622   * <li>All the above configurations are overridden by dfs.http.policy
1623   * configuration. With this configuration you can set http-only, https-only
1624   * and http-and-https endpoints.</li>
1625   * </ol>
1626   * See hdfs-default.xml documentation for more details on each of the above
1627   * configuration settings.
1628   */
1629  public static HttpConfig.Policy getHttpPolicy(Configuration conf) {
1630    String policyStr = conf.get(DFSConfigKeys.DFS_HTTP_POLICY_KEY);
1631    if (policyStr == null) {
1632      boolean https = conf.getBoolean(DFSConfigKeys.DFS_HTTPS_ENABLE_KEY,
1633          DFSConfigKeys.DFS_HTTPS_ENABLE_DEFAULT);
1634
1635      boolean hadoopSsl = conf.getBoolean(
1636          CommonConfigurationKeys.HADOOP_SSL_ENABLED_KEY,
1637          CommonConfigurationKeys.HADOOP_SSL_ENABLED_DEFAULT);
1638
1639      if (hadoopSsl) {
1640        LOG.warn(CommonConfigurationKeys.HADOOP_SSL_ENABLED_KEY
1641            + " is deprecated. Please use " + DFSConfigKeys.DFS_HTTP_POLICY_KEY
1642            + ".");
1643      }
1644      if (https) {
1645        LOG.warn(DFSConfigKeys.DFS_HTTPS_ENABLE_KEY
1646            + " is deprecated. Please use " + DFSConfigKeys.DFS_HTTP_POLICY_KEY
1647            + ".");
1648      }
1649
1650      return (hadoopSsl || https) ? HttpConfig.Policy.HTTP_AND_HTTPS
1651          : HttpConfig.Policy.HTTP_ONLY;
1652    }
1653
1654    HttpConfig.Policy policy = HttpConfig.Policy.fromString(policyStr);
1655    if (policy == null) {
1656      throw new HadoopIllegalArgumentException("Unregonized value '"
1657          + policyStr + "' for " + DFSConfigKeys.DFS_HTTP_POLICY_KEY);
1658    }
1659
1660    conf.set(DFSConfigKeys.DFS_HTTP_POLICY_KEY, policy.name());
1661    return policy;
1662  }
1663
1664  public static HttpServer2.Builder loadSslConfToHttpServerBuilder(HttpServer2.Builder builder,
1665      Configuration sslConf) {
1666    return builder
1667        .needsClientAuth(
1668            sslConf.getBoolean(DFS_CLIENT_HTTPS_NEED_AUTH_KEY,
1669                DFS_CLIENT_HTTPS_NEED_AUTH_DEFAULT))
1670        .keyPassword(getPassword(sslConf, DFS_SERVER_HTTPS_KEYPASSWORD_KEY))
1671        .keyStore(sslConf.get("ssl.server.keystore.location"),
1672            getPassword(sslConf, DFS_SERVER_HTTPS_KEYSTORE_PASSWORD_KEY),
1673            sslConf.get("ssl.server.keystore.type", "jks"))
1674        .trustStore(sslConf.get("ssl.server.truststore.location"),
1675            getPassword(sslConf, DFS_SERVER_HTTPS_TRUSTSTORE_PASSWORD_KEY),
1676            sslConf.get("ssl.server.truststore.type", "jks"));
1677  }
1678
1679  /**
1680   * Load HTTPS-related configuration.
1681   */
1682  public static Configuration loadSslConfiguration(Configuration conf) {
1683    Configuration sslConf = new Configuration(false);
1684
1685    sslConf.addResource(conf.get(
1686        DFSConfigKeys.DFS_SERVER_HTTPS_KEYSTORE_RESOURCE_KEY,
1687        DFSConfigKeys.DFS_SERVER_HTTPS_KEYSTORE_RESOURCE_DEFAULT));
1688
1689    boolean requireClientAuth = conf.getBoolean(DFS_CLIENT_HTTPS_NEED_AUTH_KEY,
1690        DFS_CLIENT_HTTPS_NEED_AUTH_DEFAULT);
1691    sslConf.setBoolean(DFS_CLIENT_HTTPS_NEED_AUTH_KEY, requireClientAuth);
1692    return sslConf;
1693  }
1694
1695  /**
1696   * Return a HttpServer.Builder that the journalnode / namenode / secondary
1697   * namenode can use to initialize their HTTP / HTTPS server.
1698   *
1699   */
1700  public static HttpServer2.Builder httpServerTemplateForNNAndJN(
1701      Configuration conf, final InetSocketAddress httpAddr,
1702      final InetSocketAddress httpsAddr, String name, String spnegoUserNameKey,
1703      String spnegoKeytabFileKey) throws IOException {
1704    HttpConfig.Policy policy = getHttpPolicy(conf);
1705
1706    HttpServer2.Builder builder = new HttpServer2.Builder().setName(name)
1707        .setConf(conf).setACL(new AccessControlList(conf.get(DFS_ADMIN, " ")))
1708        .setSecurityEnabled(UserGroupInformation.isSecurityEnabled())
1709        .setUsernameConfKey(spnegoUserNameKey)
1710        .setKeytabConfKey(getSpnegoKeytabKey(conf, spnegoKeytabFileKey));
1711
1712    // initialize the webserver for uploading/downloading files.
1713    if (UserGroupInformation.isSecurityEnabled()) {
1714      LOG.info("Starting web server as: "
1715          + SecurityUtil.getServerPrincipal(conf.get(spnegoUserNameKey),
1716              httpAddr.getHostName()));
1717    }
1718
1719    if (policy.isHttpEnabled()) {
1720      if (httpAddr.getPort() == 0) {
1721        builder.setFindPort(true);
1722      }
1723
1724      URI uri = URI.create("http://" + NetUtils.getHostPortString(httpAddr));
1725      builder.addEndpoint(uri);
1726      LOG.info("Starting Web-server for " + name + " at: " + uri);
1727    }
1728
1729    if (policy.isHttpsEnabled() && httpsAddr != null) {
1730      Configuration sslConf = loadSslConfiguration(conf);
1731      loadSslConfToHttpServerBuilder(builder, sslConf);
1732
1733      if (httpsAddr.getPort() == 0) {
1734        builder.setFindPort(true);
1735      }
1736
1737      URI uri = URI.create("https://" + NetUtils.getHostPortString(httpsAddr));
1738      builder.addEndpoint(uri);
1739      LOG.info("Starting Web-server for " + name + " at: " + uri);
1740    }
1741    return builder;
1742  }
1743
1744  /**
1745   * Leverages the Configuration.getPassword method to attempt to get
1746   * passwords from the CredentialProvider API before falling back to
1747   * clear text in config - if falling back is allowed.
1748   * @param conf Configuration instance
1749   * @param alias name of the credential to retreive
1750   * @return String credential value or null
1751   */
1752  static String getPassword(Configuration conf, String alias) {
1753    String password = null;
1754    try {
1755      char[] passchars = conf.getPassword(alias);
1756      if (passchars != null) {
1757        password = new String(passchars);
1758      }
1759    }
1760    catch (IOException ioe) {
1761      password = null;
1762    }
1763    return password;
1764  }
1765
1766  /**
1767   * Converts a Date into an ISO-8601 formatted datetime string.
1768   */
1769  public static String dateToIso8601String(Date date) {
1770    SimpleDateFormat df =
1771        new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ssZ", Locale.ENGLISH);
1772    return df.format(date);
1773  }
1774
1775  /**
1776   * Converts a time duration in milliseconds into DDD:HH:MM:SS format.
1777   */
1778  public static String durationToString(long durationMs) {
1779    boolean negative = false;
1780    if (durationMs < 0) {
1781      negative = true;
1782      durationMs = -durationMs;
1783    }
1784    // Chop off the milliseconds
1785    long durationSec = durationMs / 1000;
1786    final int secondsPerMinute = 60;
1787    final int secondsPerHour = 60*60;
1788    final int secondsPerDay = 60*60*24;
1789    final long days = durationSec / secondsPerDay;
1790    durationSec -= days * secondsPerDay;
1791    final long hours = durationSec / secondsPerHour;
1792    durationSec -= hours * secondsPerHour;
1793    final long minutes = durationSec / secondsPerMinute;
1794    durationSec -= minutes * secondsPerMinute;
1795    final long seconds = durationSec;
1796    final long milliseconds = durationMs % 1000;
1797    String format = "%03d:%02d:%02d:%02d.%03d";
1798    if (negative)  {
1799      format = "-" + format;
1800    }
1801    return String.format(format, days, hours, minutes, seconds, milliseconds);
1802  }
1803
1804  /**
1805   * Converts a relative time string into a duration in milliseconds.
1806   */
1807  public static long parseRelativeTime(String relTime) throws IOException {
1808    if (relTime.length() < 2) {
1809      throw new IOException("Unable to parse relative time value of " + relTime
1810          + ": too short");
1811    }
1812    String ttlString = relTime.substring(0, relTime.length()-1);
1813    long ttl;
1814    try {
1815      ttl = Long.parseLong(ttlString);
1816    } catch (NumberFormatException e) {
1817      throw new IOException("Unable to parse relative time value of " + relTime
1818          + ": " + ttlString + " is not a number");
1819    }
1820    if (relTime.endsWith("s")) {
1821      // pass
1822    } else if (relTime.endsWith("m")) {
1823      ttl *= 60;
1824    } else if (relTime.endsWith("h")) {
1825      ttl *= 60*60;
1826    } else if (relTime.endsWith("d")) {
1827      ttl *= 60*60*24;
1828    } else {
1829      throw new IOException("Unable to parse relative time value of " + relTime
1830          + ": unknown time unit " + relTime.charAt(relTime.length() - 1));
1831    }
1832    return ttl*1000;
1833  }
1834
1835  /**
1836   * Assert that all objects in the collection are equal. Returns silently if
1837   * so, throws an AssertionError if any object is not equal. All null values
1838   * are considered equal.
1839   * 
1840   * @param objects the collection of objects to check for equality.
1841   */
1842  public static void assertAllResultsEqual(Collection<?> objects)
1843      throws AssertionError {
1844    if (objects.size() == 0 || objects.size() == 1)
1845      return;
1846    
1847    Object[] resultsArray = objects.toArray();
1848    for (int i = 1; i < resultsArray.length; i++) {
1849      Object currElement = resultsArray[i];
1850      Object lastElement = resultsArray[i - 1];
1851      if ((currElement == null && currElement != lastElement) ||
1852          (currElement != null && !currElement.equals(lastElement))) {
1853        throw new AssertionError("Not all elements match in results: " +
1854          Arrays.toString(resultsArray));
1855      }
1856    }
1857  }
1858
1859  /**
1860   * Creates a new KeyProvider from the given Configuration.
1861   *
1862   * @param conf Configuration
1863   * @return new KeyProvider, or null if no provider was found.
1864   * @throws IOException if the KeyProvider is improperly specified in
1865   *                             the Configuration
1866   */
1867  public static KeyProvider createKeyProvider(
1868      final Configuration conf) throws IOException {
1869    final String providerUriStr =
1870        conf.get(DFSConfigKeys.DFS_ENCRYPTION_KEY_PROVIDER_URI, null);
1871    // No provider set in conf
1872    if (providerUriStr == null) {
1873      return null;
1874    }
1875    final URI providerUri;
1876    try {
1877      providerUri = new URI(providerUriStr);
1878    } catch (URISyntaxException e) {
1879      throw new IOException(e);
1880    }
1881    KeyProvider keyProvider = KeyProviderFactory.get(providerUri, conf);
1882    if (keyProvider == null) {
1883      throw new IOException("Could not instantiate KeyProvider from " + 
1884          DFSConfigKeys.DFS_ENCRYPTION_KEY_PROVIDER_URI + " setting of '" + 
1885          providerUriStr +"'");
1886    }
1887    if (keyProvider.isTransient()) {
1888      throw new IOException("KeyProvider " + keyProvider.toString()
1889          + " was found but it is a transient provider.");
1890    }
1891    return keyProvider;
1892  }
1893
1894  /**
1895   * Creates a new KeyProviderCryptoExtension by wrapping the
1896   * KeyProvider specified in the given Configuration.
1897   *
1898   * @param conf Configuration
1899   * @return new KeyProviderCryptoExtension, or null if no provider was found.
1900   * @throws IOException if the KeyProvider is improperly specified in
1901   *                             the Configuration
1902   */
1903  public static KeyProviderCryptoExtension createKeyProviderCryptoExtension(
1904      final Configuration conf) throws IOException {
1905    KeyProvider keyProvider = createKeyProvider(conf);
1906    if (keyProvider == null) {
1907      return null;
1908    }
1909    KeyProviderCryptoExtension cryptoProvider = KeyProviderCryptoExtension
1910        .createKeyProviderCryptoExtension(keyProvider);
1911    return cryptoProvider;
1912  }
1913}