Snap for 7924970 from 0f54b3300ea7902266ae501b604e0d916a6d8f0c to androidx-preference-release

Change-Id: I7da3be49f324ca0e49102d24129a326bb89e6b41
diff --git a/PREUPLOAD.cfg b/PREUPLOAD.cfg
index 6871e1b..81f505d 100644
--- a/PREUPLOAD.cfg
+++ b/PREUPLOAD.cfg
@@ -1,17 +1,14 @@
 [Hook Scripts]
 # Only list fast unittests here.
-py2_config_unittest = python2 ./rh/config_unittest.py
-py3_config_unittest = python3 ./rh/config_unittest.py
-py2_hooks_unittest  = python2 ./rh/hooks_unittest.py
-py3_hooks_unittest  = python3 ./rh/hooks_unittest.py
-py2_shell_unittest  = python2 ./rh/shell_unittest.py
-py3_shell_unittest  = python3 ./rh/shell_unittest.py
-py2_android_test_mapping_format_unittest = python2 ./tools/android_test_mapping_format_unittest.py
-py3_android_test_mapping_format_unittest = python3 ./tools/android_test_mapping_format_unittest.py
+config_unittest = ./rh/config_unittest.py
+hooks_unittest  = ./rh/hooks_unittest.py
+shell_unittest  = ./rh/shell_unittest.py
+utils_unittest  = ./rh/utils_unittest.py
+android_test_mapping_format_unittest = ./tools/android_test_mapping_format_unittest.py
+config_test = ./rh/config_test.py --check-env --commit-id ${PREUPLOAD_COMMIT} --commit-msg ${PREUPLOAD_COMMIT_MESSAGE} --repo-root ${REPO_ROOT} -- ${PREUPLOAD_FILES}
 
 [Builtin Hooks]
 commit_msg_bug_field = true
 commit_msg_changeid_field = true
 commit_msg_test_field = true
-pylint2 = true
 pylint3 = true
diff --git a/README.md b/README.md
index 20e4d5a..1954d0a 100644
--- a/README.md
+++ b/README.md
@@ -1,6 +1,4 @@
-# AOSP Presubmit Hooks
-
-[TOC]
+# AOSP Preupload Hooks
 
 This repo holds hooks that get run by repo during the upload phase.  They
 perform various checks automatically such as running linters on your code.
@@ -8,6 +6,8 @@
 Note: Currently all hooks are disabled by default.  Each repo must explicitly
 turn on any hook it wishes to enforce.
 
+[TOC]
+
 ## Usage
 
 Normally these execute automatically when you run `repo upload`.  If you want to
@@ -49,12 +49,15 @@
 
 ## PREUPLOAD.cfg
 
-This file are checked in the top of a specific git repository.  Stacking them
+This file is checked in the top of a specific git repository.  Stacking them
 in subdirectories (to try and override parent settings) is not supported.
 
 ## Example
 
 ```
+# Per-project `repo upload` hook settings.
+# https://android.googlesource.com/platform/tools/repohooks
+
 [Options]
 ignore_merged_commits = true
 
@@ -100,6 +103,10 @@
 force your own quote handling.
 
 * `${PREUPLOAD_FILES}`: List of files to operate on.
+* `${PREUPLOAD_FILES_PREFIXED}`: A list of files to operate on.
+   Any string preceding/attached to the keyword ${PREUPLOAD_FILES_PREFIXED}
+   will be repeated for each file automatically. If no string is preceding/attached
+   to the keyword, the previous argument will be repeated before each file.
 * `${PREUPLOAD_COMMIT}`: Commit hash.
 * `${PREUPLOAD_COMMIT_MESSAGE}`: Commit message.
 
@@ -110,6 +117,22 @@
 * `${BUILD_OS}`: The string `darwin-x86` for macOS and the string `linux-x86`
   for Linux/x86.
 
+### Examples
+
+Here are some examples of using the placeholders.
+Consider this sample config file.
+```
+[Hook Scripts]
+lister = ls ${PREUPLOAD_FILES}
+checker prefix = check --file=${PREUPLOAD_FILES_PREFIXED}
+checker flag = check --file ${PREUPLOAD_FILES_PREFIXED}
+```
+With a commit that changes `path1/file1` and `path2/file2`, then this will run
+programs with the arguments:
+* ['ls', 'path1/file1', 'path2/file2']
+* ['check', '--file=path1/file1', '--file=path2/file2']
+* ['check', '--file', 'path1/file1', '--file', 'path2/file2']
+
 ## [Options]
 
 This section allows for setting options that affect the overall behavior of the
@@ -126,13 +149,20 @@
 that is executed. The key is used as the name of the hook for reporting purposes,
 so it should be at least somewhat descriptive.
 
+Whitespace in the key name is OK!
+
+The keys must be unique as duplicates will silently clobber earlier values.
+
+You do not need to send stderr to stdout.  The tooling will take care of
+merging them together for you automatically.
+
 ```
 [Hook Scripts]
-my_first_hook = program --gogog ${PREUPLOAD_FILES}
-another_hook = funtimes --i-need "some space" ${PREUPLOAD_FILES}
-some_fish = linter --ate-a-cat ${PREUPLOAD_FILES}
-some_cat = formatter --cat-commit ${PREUPLOAD_COMMIT}
-some_dog = tool --no-cat-in-commit-message ${PREUPLOAD_COMMIT_MESSAGE}
+my first hook = program --gogog ${PREUPLOAD_FILES}
+another hook = funtimes --i-need "some space" ${PREUPLOAD_FILES}
+some fish = linter --ate-a-cat ${PREUPLOAD_FILES}
+some cat = formatter --cat-commit ${PREUPLOAD_COMMIT}
+some dog = tool --no-cat-in-commit-message ${PREUPLOAD_COMMIT_MESSAGE}
 ```
 
 ## [Builtin Hooks]
@@ -140,6 +170,9 @@
 This section allows for turning on common/builtin hooks.  There are a bunch of
 canned hooks already included geared towards AOSP style guidelines.
 
+* `aidl_format`: Run AIDL files (.aidl) through `aidl-format`.
+* `android_test_mapping_format`: Validate TEST_MAPPING files in Android source
+  code. Refer to go/test-mapping for more details.
 * `bpfmt`: Run Blueprint files (.bp) through `bpfmt`.
 * `checkpatch`: Run commits through the Linux kernel's `checkpatch.pl` script.
 * `clang_format`: Run git-clang-format against the commit. The default style is
@@ -148,6 +181,12 @@
 * `commit_msg_changeid_field`: Require a valid `Change-Id:` Gerrit line.
 * `commit_msg_prebuilt_apk_fields`: Require badging and build information for
   prebuilt APKs.
+* `commit_msg_relnote_field_format`: Check for possible misspellings of the
+  `Relnote:` field and that multiline release notes are properly formatted with
+  quotes.
+* `commit_msg_relnote_for_current_txt`: Check that CLs with changes to
+  current.txt or public_plus_experimental_current.txt also contain a
+  `Relnote:` field in the commit message.
 * `commit_msg_test_field`: Require a `Test:` line.
 * `cpplint`: Run through the cpplint tool (for C++ code).
 * `gofmt`: Run Go code through `gofmt`.
@@ -157,9 +196,8 @@
 * `pylint`: Alias of `pylint2`.  Will change to `pylint3` by end of 2019.
 * `pylint2`: Run Python code through `pylint` using Python 2.
 * `pylint3`: Run Python code through `pylint` using Python 3.
+* `rustfmt`: Run Rust code through `rustfmt`.
 * `xmllint`: Run XML code through `xmllint`.
-* `android_test_mapping_format`: Validate TEST_MAPPING files in Android source
-  code. Refer to go/test-mapping for more details.
 
 Note: Builtin hooks tend to match specific filenames (e.g. `.json`).  If no
 files match in a specific commit, then the hook will be skipped for that commit.
@@ -190,6 +228,34 @@
 cpplint = --filter=-x ${PREUPLOAD_FILES}
 ```
 
+## [Builtin Hooks Exclude Paths]
+
+*** note
+This section can only be added to the repo project-wide settings
+[GLOBAL-PREUPLOAD.cfg].
+***
+
+Used to explicitly exclude some projects when processing a hook. With this
+section, it is possible to define a hook that should apply to the majority of
+projects except a few.
+
+An entry must completely match the project's `REPO_PATH`. The paths can use the
+[shell-style wildcards](https://docs.python.org/library/fnmatch.html) and
+quotes. For advanced cases, it is possible to use a [regular
+expression](https://docs.python.org/howto/regex.html) by using the `^` prefix.
+
+```
+[Builtin Hooks Exclude Paths]
+# Run cpplint on all projects except ones under external/ and vendor/.
+# The "external" and "vendor" projects, if they exist, will still run cpplint.
+cpplint = external/* vendor/*
+
+# Run rustfmt on all projects except ones under external/.  All projects under
+# hardware/ will be excluded except for ones starting with hardware/google (due to
+# the negative regex match).
+rustfmt = external/ ^hardware/(!?google)
+```
+
 ## [Tool Paths]
 
 Some builtin hooks need to call external executables to work correctly.  By
@@ -198,6 +264,9 @@
 provide consistent behavior for developers across different OS and Linux
 distros/versions.  The following tools are recognized:
 
+* `aidl-format`: used for the `aidl_format` builtin hook.
+* `android-test-mapping-format`: used for the `android_test_mapping_format`
+  builtin hook.
 * `bpfmt`: used for the `bpfmt` builtin hook.
 * `clang-format`: used for the `clang_format` builtin hook.
 * `cpplint`: used for the `cpplint` builtin hook.
@@ -206,8 +275,7 @@
 * `google-java-format`: used for the `google_java_format` builtin hook.
 * `google-java-format-diff`: used for the `google_java_format` builtin hook.
 * `pylint`: used for the `pylint` builtin hook.
-* `android-test-mapping-format`: used for the `android_test_mapping_format`
-  builtin hook.
+* `rustfmt`: used for the `rustfmt` builtin hook.
 
 See [Placeholders](#Placeholders) for variables you can expand automatically.
 
@@ -230,13 +298,13 @@
 * New hooks can be added in `rh/hooks.py`.  Be sure to keep the list up-to-date
   with the documentation in this file.
 
-### Warnings
+## Warnings
 
 If the return code of a hook is 77, then it is assumed to be a warning.  The
 output will be printed to the terminal, but uploading will still be allowed
 without a bypass being required.
 
-## TODO/Limitations
+# TODO/Limitations
 
 * `pylint` should support per-directory pylintrc files.
 * Some checkers operate on the files as they exist in the filesystem.  This is
diff --git a/pre-upload.py b/pre-upload.py
index 081b814..7eb11b8 100755
--- a/pre-upload.py
+++ b/pre-upload.py
@@ -1,5 +1,4 @@
-#!/usr/bin/python
-# -*- coding:utf-8 -*-
+#!/usr/bin/env python3
 # Copyright 2016 The Android Open Source Project
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
@@ -20,18 +19,18 @@
 when developing.
 """
 
-from __future__ import print_function
-
 import argparse
+import datetime
 import os
 import sys
 
-try:
-    __file__
-except NameError:
-    # Work around repo until it gets fixed.
-    # https://gerrit-review.googlesource.com/75481
-    __file__ = os.path.join(os.getcwd(), 'pre-upload.py')
+
+# Assert some minimum Python versions as we don't test or support any others.
+if sys.version_info < (3, 6):
+    print('repohooks: error: Python-3.6+ is required', file=sys.stderr)
+    sys.exit(1)
+
+
 _path = os.path.dirname(os.path.realpath(__file__))
 if sys.path[0] != _path:
     sys.path.insert(0, _path)
@@ -63,6 +62,9 @@
     FAILED = COLOR.color(COLOR.RED, 'FAILED')
     WARNING = COLOR.color(COLOR.YELLOW, 'WARNING')
 
+    # How long a hook is allowed to run before we warn that it is "too slow".
+    _SLOW_HOOK_DURATION = datetime.timedelta(seconds=30)
+
     def __init__(self, project_name):
         """Create a new Output object for a specified project.
 
@@ -73,6 +75,9 @@
         self.num_hooks = None
         self.hook_index = 0
         self.success = True
+        self.start_time = datetime.datetime.now()
+        self.hook_start_time = None
+        self._curr_hook_name = None
 
     def set_num_hooks(self, num_hooks):
         """Keep track of how many hooks we'll be running.
@@ -99,28 +104,38 @@
         Args:
           hook_name: name of the hook.
         """
+        self._curr_hook_name = hook_name
+        self.hook_start_time = datetime.datetime.now()
         status_line = '[%s %d/%d] %s' % (self.RUNNING, self.hook_index,
                                          self.num_hooks, hook_name)
         self.hook_index += 1
         rh.terminal.print_status_line(status_line)
 
-    def hook_error(self, hook_name, error):
+    def hook_finish(self):
+        """Finish processing any per-hook state."""
+        duration = datetime.datetime.now() - self.hook_start_time
+        if duration >= self._SLOW_HOOK_DURATION:
+            self.hook_warning(
+                'This hook took %s to finish which is fairly slow for '
+                'developers.\nPlease consider moving the check to the '
+                'server/CI system instead.' %
+                (rh.utils.timedelta_str(duration),))
+
+    def hook_error(self, error):
         """Print an error for a single hook.
 
         Args:
-          hook_name: name of the hook.
           error: error string.
         """
-        self.error(hook_name, error)
+        self.error(self._curr_hook_name, error)
 
-    def hook_warning(self, hook_name, warning):
+    def hook_warning(self, warning):
         """Print a warning for a single hook.
 
         Args:
-          hook_name: name of the hook.
           warning: warning string.
         """
-        status_line = '[%s] %s' % (self.WARNING, hook_name)
+        status_line = '[%s] %s' % (self.WARNING, self._curr_hook_name)
         rh.terminal.print_status_line(status_line, print_newline=True)
         print(warning, file=sys.stderr)
 
@@ -138,10 +153,11 @@
 
     def finish(self):
         """Print summary for all the hooks."""
-        status_line = '[%s] repohooks for %s %s' % (
+        status_line = '[%s] repohooks for %s %s in %s' % (
             self.PASSED if self.success else self.FAILED,
             self.project_name,
-            'passed' if self.success else 'failed')
+            'passed' if self.success else 'failed',
+            rh.utils.timedelta_str(datetime.datetime.now() - self.start_time))
         rh.terminal.print_status_line(status_line, print_newline=True)
 
 
@@ -158,6 +174,11 @@
     if not results:
         return (None, None)
 
+    # We track these as dedicated fields in case a hook doesn't output anything.
+    # We want to treat silent non-zero exits as failures too.
+    has_error = False
+    has_warning = False
+
     error_ret = ''
     warning_ret = ''
     for result in results:
@@ -168,11 +189,14 @@
             lines = result.error.splitlines()
             ret += '\n'.join('    %s' % (x,) for x in lines)
             if result.is_warning():
+                has_warning = True
                 warning_ret += ret
             else:
+                has_error = True
                 error_ret += ret
 
-    return (error_ret or None, warning_ret or None)
+    return (error_ret if has_error else None,
+            warning_ret if has_warning else None)
 
 
 def _get_project_config():
@@ -190,7 +214,7 @@
         # Load the config for this git repo.
         '.',
     )
-    return rh.config.PreSubmitConfig(paths=paths, global_paths=global_paths)
+    return rh.config.PreUploadSettings(paths=paths, global_paths=global_paths)
 
 
 def _attempt_fixes(fixup_func_list, commit_list):
@@ -255,22 +279,23 @@
     try:
         remote = rh.git.get_upstream_remote()
         upstream_branch = rh.git.get_upstream_branch()
-    except rh.utils.RunCommandError as e:
+    except rh.utils.CalledProcessError as e:
         output.error('Upstream remote/tracking branch lookup',
                      '%s\nDid you run repo start?  Is your HEAD detached?' %
                      (e,))
         return False
 
+    project = rh.Project(name=project_name, dir=proj_dir, remote=remote)
+    rel_proj_dir = os.path.relpath(proj_dir, rh.git.find_repo_root())
+
     os.environ.update({
         'REPO_LREV': rh.git.get_commit_for_ref(upstream_branch),
-        'REPO_PATH': proj_dir,
+        'REPO_PATH': rel_proj_dir,
         'REPO_PROJECT': project_name,
         'REPO_REMOTE': remote,
         'REPO_RREV': rh.git.get_remote_revision(upstream_branch, remote),
     })
 
-    project = rh.Project(name=project_name, dir=proj_dir, remote=remote)
-
     if not commit_list:
         commit_list = rh.git.get_commits(
             ignore_merged_commits=config.ignore_merged_commits)
@@ -288,16 +313,19 @@
         commit_summary = desc.split('\n', 1)[0]
         output.commit_start(commit=commit, commit_summary=commit_summary)
 
-        for name, hook in hooks:
+        for name, hook, exclusion_scope in hooks:
             output.hook_start(name)
+            if rel_proj_dir in exclusion_scope:
+                break
             hook_results = hook(project, commit, desc, diff)
+            output.hook_finish()
             (error, warning) = _process_hook_results(hook_results)
-            if error or warning:
-                if warning:
-                    output.hook_warning(name, warning)
-                if error:
+            if error is not None or warning is not None:
+                if warning is not None:
+                    output.hook_warning(warning)
+                if error is not None:
                     ret = False
-                    output.hook_error(name, error)
+                    output.hook_error(error)
                 for result in hook_results:
                     if result.fixup_func:
                         fixup_func_list.append((name, commit,
@@ -327,8 +355,8 @@
 
     if proj_dir is None:
         cmd = ['repo', 'forall', project_name, '-c', 'pwd']
-        result = rh.utils.run_command(cmd, capture_output=True)
-        proj_dirs = result.output.split()
+        result = rh.utils.run(cmd, capture_output=True)
+        proj_dirs = result.stdout.split()
         if not proj_dirs:
             print('%s cannot be found.' % project_name, file=sys.stderr)
             print('Please specify a valid project.', file=sys.stderr)
@@ -396,8 +424,7 @@
       a blank string upon failure.
     """
     cmd = ['repo', 'forall', '.', '-c', 'echo ${REPO_PROJECT}']
-    return rh.utils.run_command(cmd, capture_output=True, redirect_stderr=True,
-                                cwd=path).output.strip()
+    return rh.utils.run(cmd, capture_output=True, cwd=path).stdout.strip()
 
 
 def direct_main(argv):
@@ -429,8 +456,7 @@
     # project from CWD.
     if opts.dir is None:
         cmd = ['git', 'rev-parse', '--git-dir']
-        git_dir = rh.utils.run_command(cmd, capture_output=True,
-                                       redirect_stderr=True).output.strip()
+        git_dir = rh.utils.run(cmd, capture_output=True).stdout.strip()
         if not git_dir:
             parser.error('The current directory is not part of a git project.')
         opts.dir = os.path.dirname(os.path.abspath(git_dir))
diff --git a/rh/__init__.py b/rh/__init__.py
index c36cb89..9050fb6 100644
--- a/rh/__init__.py
+++ b/rh/__init__.py
@@ -1,4 +1,3 @@
-# -*- coding:utf-8 -*-
 # Copyright 2016 The Android Open Source Project
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
@@ -15,8 +14,6 @@
 
 """Common repohook objects/constants."""
 
-from __future__ import print_function
-
 import collections
 
 
diff --git a/rh/config.py b/rh/config.py
index 6dbecdf..1eb93a7 100644
--- a/rh/config.py
+++ b/rh/config.py
@@ -1,4 +1,3 @@
-# -*- coding:utf-8 -*-
 # Copyright 2016 The Android Open Source Project
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
@@ -15,9 +14,9 @@
 
 """Manage various config files."""
 
-from __future__ import print_function
-
+import configparser
 import functools
+import itertools
 import os
 import shlex
 import sys
@@ -30,7 +29,6 @@
 # pylint: disable=wrong-import-position
 import rh.hooks
 import rh.shell
-from rh.sixish import configparser
 
 
 class Error(Exception):
@@ -41,70 +39,277 @@
     """Config file has unknown sections/keys or other values."""
 
 
+# Sentinel so we can handle None-vs-unspecified.
+_UNSET = object()
+
+
 class RawConfigParser(configparser.RawConfigParser):
     """Like RawConfigParser but with some default helpers."""
 
-    @staticmethod
-    def _check_args(name, cnt_min, cnt_max, args):
-        cnt = len(args)
-        if cnt not in (0, cnt_max - cnt_min):
-            raise TypeError('%s() takes %i or %i arguments (got %i)' %
-                            (name, cnt_min, cnt_max, cnt,))
-        return cnt
-
-    # pylint can't seem to grok our use of *args here.
+    # pylint doesn't like it when we extend the API.
     # pylint: disable=arguments-differ
 
-    def options(self, section, *args):
-        """Return the options in |section| (with default |args|).
+    def options(self, section, default=_UNSET):
+        """Return the options in |section|.
 
         Args:
           section: The section to look up.
-          args: What to return if |section| does not exist.
+          default: What to return if |section| does not exist.
         """
-        cnt = self._check_args('options', 2, 3, args)
         try:
             return configparser.RawConfigParser.options(self, section)
         except configparser.NoSectionError:
-            if cnt == 1:
-                return args[0]
+            if default is not _UNSET:
+                return default
             raise
 
-    def get(self, section, option, *args):
-        """Return the value for |option| in |section| (with default |args|)."""
-        cnt = self._check_args('get', 3, 4, args)
-        try:
-            return configparser.RawConfigParser.get(self, section, option)
-        except (configparser.NoSectionError, configparser.NoOptionError):
-            if cnt == 1:
-                return args[0]
-            raise
-
-    def items(self, section, *args):
+    def items(self, section=_UNSET, default=_UNSET):
         """Return a list of (key, value) tuples for the options in |section|."""
-        cnt = self._check_args('items', 2, 3, args)
+        if section is _UNSET:
+            return super().items()
+
         try:
             return configparser.RawConfigParser.items(self, section)
         except configparser.NoSectionError:
-            if cnt == 1:
-                return args[0]
+            if default is not _UNSET:
+                return default
             raise
 
 
-class PreSubmitConfig(object):
-    """Config file used for per-project `repo upload` hooks."""
-
-    FILENAME = 'PREUPLOAD.cfg'
-    GLOBAL_FILENAME = 'GLOBAL-PREUPLOAD.cfg'
+class PreUploadConfig(object):
+    """A single (abstract) config used for `repo upload` hooks."""
 
     CUSTOM_HOOKS_SECTION = 'Hook Scripts'
     BUILTIN_HOOKS_SECTION = 'Builtin Hooks'
     BUILTIN_HOOKS_OPTIONS_SECTION = 'Builtin Hooks Options'
+    BUILTIN_HOOKS_EXCLUDE_SECTION = 'Builtin Hooks Exclude Paths'
     TOOL_PATHS_SECTION = 'Tool Paths'
     OPTIONS_SECTION = 'Options'
+    VALID_SECTIONS = {
+        CUSTOM_HOOKS_SECTION,
+        BUILTIN_HOOKS_SECTION,
+        BUILTIN_HOOKS_OPTIONS_SECTION,
+        BUILTIN_HOOKS_EXCLUDE_SECTION,
+        TOOL_PATHS_SECTION,
+        OPTIONS_SECTION,
+    }
 
     OPTION_IGNORE_MERGED_COMMITS = 'ignore_merged_commits'
-    VALID_OPTIONS = (OPTION_IGNORE_MERGED_COMMITS,)
+    VALID_OPTIONS = {OPTION_IGNORE_MERGED_COMMITS}
+
+    def __init__(self, config=None, source=None):
+        """Initialize.
+
+        Args:
+          config: A configparse.ConfigParser instance.
+          source: Where this config came from. This is used in error messages to
+              facilitate debugging. It is not necessarily a valid path.
+        """
+        self.config = config if config else RawConfigParser()
+        self.source = source
+        if config:
+            self._validate()
+
+    @property
+    def custom_hooks(self):
+        """List of custom hooks to run (their keys/names)."""
+        return self.config.options(self.CUSTOM_HOOKS_SECTION, [])
+
+    def custom_hook(self, hook):
+        """The command to execute for |hook|."""
+        return shlex.split(self.config.get(
+            self.CUSTOM_HOOKS_SECTION, hook, fallback=''))
+
+    @property
+    def builtin_hooks(self):
+        """List of all enabled builtin hooks (their keys/names)."""
+        return [k for k, v in self.config.items(self.BUILTIN_HOOKS_SECTION, ())
+                if rh.shell.boolean_shell_value(v, None)]
+
+    def builtin_hook_option(self, hook):
+        """The options to pass to |hook|."""
+        return shlex.split(self.config.get(
+            self.BUILTIN_HOOKS_OPTIONS_SECTION, hook, fallback=''))
+
+    def builtin_hook_exclude_paths(self, hook):
+        """List of paths for which |hook| should not be executed."""
+        return shlex.split(self.config.get(
+            self.BUILTIN_HOOKS_EXCLUDE_SECTION, hook, fallback=''))
+
+    @property
+    def tool_paths(self):
+        """List of all tool paths."""
+        return dict(self.config.items(self.TOOL_PATHS_SECTION, ()))
+
+    def callable_hooks(self):
+        """Yield a CallableHook for each hook to be executed."""
+        scope = rh.hooks.ExclusionScope([])
+        for hook in self.custom_hooks:
+            options = rh.hooks.HookOptions(hook,
+                                           self.custom_hook(hook),
+                                           self.tool_paths)
+            func = functools.partial(rh.hooks.check_custom, options=options)
+            yield rh.hooks.CallableHook(hook, func, scope)
+
+        for hook in self.builtin_hooks:
+            options = rh.hooks.HookOptions(hook,
+                                           self.builtin_hook_option(hook),
+                                           self.tool_paths)
+            func = functools.partial(rh.hooks.BUILTIN_HOOKS[hook],
+                                     options=options)
+            scope = rh.hooks.ExclusionScope(
+                self.builtin_hook_exclude_paths(hook))
+            yield rh.hooks.CallableHook(hook, func, scope)
+
+    @property
+    def ignore_merged_commits(self):
+        """Whether to skip hooks for merged commits."""
+        return rh.shell.boolean_shell_value(
+            self.config.get(self.OPTIONS_SECTION,
+                            self.OPTION_IGNORE_MERGED_COMMITS, fallback=None),
+            False)
+
+    def update(self, preupload_config):
+        """Merge settings from |preupload_config| into ourself."""
+        self.config.read_dict(preupload_config.config)
+
+    def _validate(self):
+        """Run consistency checks on the config settings."""
+        config = self.config
+
+        # Reject unknown sections.
+        bad_sections = set(config.sections()) - self.VALID_SECTIONS
+        if bad_sections:
+            raise ValidationError('%s: unknown sections: %s' %
+                                  (self.source, bad_sections))
+
+        # Reject blank custom hooks.
+        for hook in self.custom_hooks:
+            if not config.get(self.CUSTOM_HOOKS_SECTION, hook):
+                raise ValidationError('%s: custom hook "%s" cannot be blank' %
+                                      (self.source, hook))
+
+        # Reject unknown builtin hooks.
+        valid_builtin_hooks = set(rh.hooks.BUILTIN_HOOKS.keys())
+        if config.has_section(self.BUILTIN_HOOKS_SECTION):
+            hooks = set(config.options(self.BUILTIN_HOOKS_SECTION))
+            bad_hooks = hooks - valid_builtin_hooks
+            if bad_hooks:
+                raise ValidationError('%s: unknown builtin hooks: %s' %
+                                      (self.source, bad_hooks))
+        elif config.has_section(self.BUILTIN_HOOKS_OPTIONS_SECTION):
+            raise ValidationError('Builtin hook options specified, but missing '
+                                  'builtin hook settings')
+
+        if config.has_section(self.BUILTIN_HOOKS_OPTIONS_SECTION):
+            hooks = set(config.options(self.BUILTIN_HOOKS_OPTIONS_SECTION))
+            bad_hooks = hooks - valid_builtin_hooks
+            if bad_hooks:
+                raise ValidationError('%s: unknown builtin hook options: %s' %
+                                      (self.source, bad_hooks))
+
+        # Verify hooks are valid shell strings.
+        for hook in self.custom_hooks:
+            try:
+                self.custom_hook(hook)
+            except ValueError as e:
+                raise ValidationError('%s: hook "%s" command line is invalid: '
+                                      '%s' % (self.source, hook, e)) from e
+
+        # Verify hook options are valid shell strings.
+        for hook in self.builtin_hooks:
+            try:
+                self.builtin_hook_option(hook)
+            except ValueError as e:
+                raise ValidationError('%s: hook options "%s" are invalid: %s' %
+                                      (self.source, hook, e)) from e
+
+        # Reject unknown tools.
+        valid_tools = set(rh.hooks.TOOL_PATHS.keys())
+        if config.has_section(self.TOOL_PATHS_SECTION):
+            tools = set(config.options(self.TOOL_PATHS_SECTION))
+            bad_tools = tools - valid_tools
+            if bad_tools:
+                raise ValidationError('%s: unknown tools: %s' %
+                                      (self.source, bad_tools))
+
+        # Reject unknown options.
+        if config.has_section(self.OPTIONS_SECTION):
+            options = set(config.options(self.OPTIONS_SECTION))
+            bad_options = options - self.VALID_OPTIONS
+            if bad_options:
+                raise ValidationError('%s: unknown options: %s' %
+                                      (self.source, bad_options))
+
+
+class PreUploadFile(PreUploadConfig):
+    """A single config (file) used for `repo upload` hooks.
+
+    This is an abstract class that requires subclasses to define the FILENAME
+    constant.
+
+    Attributes:
+      path: The path of the file.
+    """
+    FILENAME = None
+
+    def __init__(self, path):
+        """Initialize.
+
+        Args:
+          path: The config file to load.
+        """
+        super().__init__(source=path)
+
+        self.path = path
+        try:
+            self.config.read(path)
+        except configparser.ParsingError as e:
+            raise ValidationError('%s: %s' % (path, e)) from e
+
+        self._validate()
+
+    @classmethod
+    def from_paths(cls, paths):
+        """Search for files within paths that matches the class FILENAME.
+
+        Args:
+          paths: List of directories to look for config files.
+
+        Yields:
+          For each valid file found, an instance is created and returned.
+        """
+        for path in paths:
+            path = os.path.join(path, cls.FILENAME)
+            if os.path.exists(path):
+                yield cls(path)
+
+
+class LocalPreUploadFile(PreUploadFile):
+    """A single config file for a project (PREUPLOAD.cfg)."""
+    FILENAME = 'PREUPLOAD.cfg'
+
+    def _validate(self):
+        super()._validate()
+
+        # Reject Exclude Paths section for local config.
+        if self.config.has_section(self.BUILTIN_HOOKS_EXCLUDE_SECTION):
+            raise ValidationError('%s: [%s] is not valid in local files' %
+                                  (self.path,
+                                   self.BUILTIN_HOOKS_EXCLUDE_SECTION))
+
+
+class GlobalPreUploadFile(PreUploadFile):
+    """A single config file for a repo (GLOBAL-PREUPLOAD.cfg)."""
+    FILENAME = 'GLOBAL-PREUPLOAD.cfg'
+
+
+class PreUploadSettings(PreUploadConfig):
+    """Settings for `repo upload` hooks.
+
+    This encompasses multiple config files and provides the final (merged)
+    settings for a particular project.
+    """
 
     def __init__(self, paths=('',), global_paths=()):
         """Initialize.
@@ -115,147 +320,16 @@
           paths: The directories to look for config files.
           global_paths: The directories to look for global config files.
         """
-        config = RawConfigParser()
-
-        def _search(paths, filename):
-            for path in paths:
-                path = os.path.join(path, filename)
-                if os.path.exists(path):
-                    self.paths.append(path)
-                    try:
-                        config.read(path)
-                    except configparser.ParsingError as e:
-                        raise ValidationError('%s: %s' % (path, e))
+        super().__init__()
 
         self.paths = []
-        _search(global_paths, self.GLOBAL_FILENAME)
-        _search(paths, self.FILENAME)
+        for config in itertools.chain(
+                GlobalPreUploadFile.from_paths(global_paths),
+                LocalPreUploadFile.from_paths(paths)):
+            self.paths.append(config.path)
+            self.update(config)
 
-        self.config = config
 
+        # We validated configs in isolation, now do one final pass altogether.
+        self.source = '{%s}' % '|'.join(self.paths)
         self._validate()
-
-    @property
-    def custom_hooks(self):
-        """List of custom hooks to run (their keys/names)."""
-        return self.config.options(self.CUSTOM_HOOKS_SECTION, [])
-
-    def custom_hook(self, hook):
-        """The command to execute for |hook|."""
-        return shlex.split(self.config.get(self.CUSTOM_HOOKS_SECTION, hook, ''))
-
-    @property
-    def builtin_hooks(self):
-        """List of all enabled builtin hooks (their keys/names)."""
-        return [k for k, v in self.config.items(self.BUILTIN_HOOKS_SECTION, ())
-                if rh.shell.boolean_shell_value(v, None)]
-
-    def builtin_hook_option(self, hook):
-        """The options to pass to |hook|."""
-        return shlex.split(self.config.get(self.BUILTIN_HOOKS_OPTIONS_SECTION,
-                                           hook, ''))
-
-    @property
-    def tool_paths(self):
-        """List of all tool paths."""
-        return dict(self.config.items(self.TOOL_PATHS_SECTION, ()))
-
-    def callable_hooks(self):
-        """Yield a name and callback for each hook to be executed."""
-        for hook in self.custom_hooks:
-            options = rh.hooks.HookOptions(hook,
-                                           self.custom_hook(hook),
-                                           self.tool_paths)
-            yield (hook, functools.partial(rh.hooks.check_custom,
-                                           options=options))
-
-        for hook in self.builtin_hooks:
-            options = rh.hooks.HookOptions(hook,
-                                           self.builtin_hook_option(hook),
-                                           self.tool_paths)
-            yield (hook, functools.partial(rh.hooks.BUILTIN_HOOKS[hook],
-                                           options=options))
-
-    @property
-    def ignore_merged_commits(self):
-        """Whether to skip hooks for merged commits."""
-        return rh.shell.boolean_shell_value(
-            self.config.get(self.OPTIONS_SECTION,
-                            self.OPTION_IGNORE_MERGED_COMMITS, None),
-            False)
-
-    def _validate(self):
-        """Run consistency checks on the config settings."""
-        config = self.config
-
-        # Reject unknown sections.
-        valid_sections = set((
-            self.CUSTOM_HOOKS_SECTION,
-            self.BUILTIN_HOOKS_SECTION,
-            self.BUILTIN_HOOKS_OPTIONS_SECTION,
-            self.TOOL_PATHS_SECTION,
-            self.OPTIONS_SECTION,
-        ))
-        bad_sections = set(config.sections()) - valid_sections
-        if bad_sections:
-            raise ValidationError('%s: unknown sections: %s' %
-                                  (self.paths, bad_sections))
-
-        # Reject blank custom hooks.
-        for hook in self.custom_hooks:
-            if not config.get(self.CUSTOM_HOOKS_SECTION, hook):
-                raise ValidationError('%s: custom hook "%s" cannot be blank' %
-                                      (self.paths, hook))
-
-        # Reject unknown builtin hooks.
-        valid_builtin_hooks = set(rh.hooks.BUILTIN_HOOKS.keys())
-        if config.has_section(self.BUILTIN_HOOKS_SECTION):
-            hooks = set(config.options(self.BUILTIN_HOOKS_SECTION))
-            bad_hooks = hooks - valid_builtin_hooks
-            if bad_hooks:
-                raise ValidationError('%s: unknown builtin hooks: %s' %
-                                      (self.paths, bad_hooks))
-        elif config.has_section(self.BUILTIN_HOOKS_OPTIONS_SECTION):
-            raise ValidationError('Builtin hook options specified, but missing '
-                                  'builtin hook settings')
-
-        if config.has_section(self.BUILTIN_HOOKS_OPTIONS_SECTION):
-            hooks = set(config.options(self.BUILTIN_HOOKS_OPTIONS_SECTION))
-            bad_hooks = hooks - valid_builtin_hooks
-            if bad_hooks:
-                raise ValidationError('%s: unknown builtin hook options: %s' %
-                                      (self.paths, bad_hooks))
-
-        # Verify hooks are valid shell strings.
-        for hook in self.custom_hooks:
-            try:
-                self.custom_hook(hook)
-            except ValueError as e:
-                raise ValidationError('%s: hook "%s" command line is invalid: '
-                                      '%s' % (self.paths, hook, e))
-
-        # Verify hook options are valid shell strings.
-        for hook in self.builtin_hooks:
-            try:
-                self.builtin_hook_option(hook)
-            except ValueError as e:
-                raise ValidationError('%s: hook options "%s" are invalid: %s' %
-                                      (self.paths, hook, e))
-
-        # Reject unknown tools.
-        valid_tools = set(rh.hooks.TOOL_PATHS.keys())
-        if config.has_section(self.TOOL_PATHS_SECTION):
-            tools = set(config.options(self.TOOL_PATHS_SECTION))
-            bad_tools = tools - valid_tools
-            if bad_tools:
-                raise ValidationError('%s: unknown tools: %s' %
-                                      (self.paths, bad_tools))
-
-        # Reject unknown options.
-        valid_options = set(self.VALID_OPTIONS)
-        if config.has_section(self.OPTIONS_SECTION):
-            options = set(config.options(self.OPTIONS_SECTION))
-            bad_options = options - valid_options
-            if bad_options:
-                raise ValidationError('%s: unknown options: %s' %
-                                      (self.paths, bad_options))
diff --git a/rh/config_test.py b/rh/config_test.py
new file mode 100755
index 0000000..80fc832
--- /dev/null
+++ b/rh/config_test.py
@@ -0,0 +1,110 @@
+#!/usr/bin/env python3
+# Copyright 2019 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Integration tests for the config module (via PREUPLOAD.cfg)."""
+
+import argparse
+import os
+import re
+import sys
+
+
+REPOTOOLS = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
+REPO_ROOT = os.path.dirname(os.path.dirname(REPOTOOLS))
+
+
+def assertEqual(msg, exp, actual):
+    """Assert |exp| equals |actual|."""
+    assert exp == actual, '%s: expected "%s" but got "%s"' % (msg, exp, actual)
+
+
+def assertEnv(var, value):
+    """Assert |var| is set in the environment as |value|."""
+    assert var in os.environ, '$%s missing in environment' % (var,)
+    assertEqual('env[%s]' % (var,), value, os.environ[var])
+
+
+def check_commit_id(commit):
+    """Check |commit| looks like a git commit id."""
+    assert len(commit) == 40, 'commit "%s" must be 40 chars' % (commit,)
+    assert re.match(r'^[a-f0-9]+$', commit), \
+        'commit "%s" must be all hex' % (commit,)
+
+
+def check_commit_msg(msg):
+    """Check the ${PREUPLOAD_COMMIT_MESSAGE} setting."""
+    assert len(msg) > 1, 'commit message must be at least 2 bytes: %s'
+
+
+def check_repo_root(root):
+    """Check the ${REPO_ROOT} setting."""
+    assertEqual('REPO_ROOT', REPO_ROOT, root)
+
+
+def check_files(files):
+    """Check the ${PREUPLOAD_FILES} setting."""
+    assert files
+
+
+def check_env():
+    """Verify all exported env vars look sane."""
+    assertEnv('REPO_PROJECT', 'platform/tools/repohooks')
+    assertEnv('REPO_PATH', 'tools/repohooks')
+    assertEnv('REPO_REMOTE', 'aosp')
+    check_commit_id(os.environ['REPO_LREV'])
+    print(os.environ['REPO_RREV'])
+    check_commit_id(os.environ['PREUPLOAD_COMMIT'])
+
+
+def get_parser():
+    """Return a command line parser."""
+    parser = argparse.ArgumentParser(description=__doc__)
+    parser.add_argument('--check-env', action='store_true',
+                        help='Check all exported env vars.')
+    parser.add_argument('--commit-id',
+                        help='${PREUPLOAD_COMMIT} setting.')
+    parser.add_argument('--commit-msg',
+                        help='${PREUPLOAD_COMMIT_MESSAGE} setting.')
+    parser.add_argument('--repo-root',
+                        help='${REPO_ROOT} setting.')
+    parser.add_argument('files', nargs='+',
+                        help='${PREUPLOAD_FILES} paths.')
+    return parser
+
+
+def main(argv):
+    """The main entry."""
+    parser = get_parser()
+    opts = parser.parse_args(argv)
+
+    try:
+        if opts.check_env:
+            check_env()
+        if opts.commit_id is not None:
+            check_commit_id(opts.commit_id)
+        if opts.commit_msg is not None:
+            check_commit_msg(opts.commit_msg)
+        if opts.repo_root is not None:
+            check_repo_root(opts.repo_root)
+        check_files(opts.files)
+    except AssertionError as e:
+        print('error: %s' % (e,), file=sys.stderr)
+        return 1
+
+    return 0
+
+
+if __name__ == '__main__':
+    sys.exit(main(sys.argv[1:]))
diff --git a/rh/config_unittest.py b/rh/config_unittest.py
index 4cbaea2..3e3e470 100755
--- a/rh/config_unittest.py
+++ b/rh/config_unittest.py
@@ -1,5 +1,4 @@
 #!/usr/bin/env python3
-# -*- coding:utf-8 -*-
 # Copyright 2016 The Android Open Source Project
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
@@ -16,8 +15,6 @@
 
 """Unittests for the config module."""
 
-from __future__ import print_function
-
 import os
 import shutil
 import sys
@@ -36,8 +33,16 @@
 import rh.config
 
 
-class PreSubmitConfigTests(unittest.TestCase):
-    """Tests for the PreSubmitConfig class."""
+class PreUploadConfigTests(unittest.TestCase):
+    """Tests for the PreUploadConfig class."""
+
+    def testMissing(self):
+        """Instantiating a non-existent config file should be fine."""
+        rh.config.PreUploadConfig()
+
+
+class FileTestCase(unittest.TestCase):
+    """Helper class for tests cases to setup configuration files."""
 
     def setUp(self):
         self.tempdir = tempfile.mkdtemp()
@@ -45,31 +50,39 @@
     def tearDown(self):
         shutil.rmtree(self.tempdir)
 
-    def _write_config(self, data, filename=None):
-        """Helper to write out a config file for testing."""
-        if filename is None:
-            filename = rh.config.PreSubmitConfig.FILENAME
+    def _write_config(self, data, filename='temp.cfg'):
+        """Helper to write out a config file for testing.
+
+        Returns:
+          Path to the file where the configuration was written.
+        """
         path = os.path.join(self.tempdir, filename)
         with open(path, 'w') as fp:
             fp.write(data)
+        return path
+
+    def _write_local_config(self, data):
+        """Helper to write out a local config file for testing."""
+        return self._write_config(
+            data, filename=rh.config.LocalPreUploadFile.FILENAME)
 
     def _write_global_config(self, data):
         """Helper to write out a global config file for testing."""
-        self._write_config(
-            data, filename=rh.config.PreSubmitConfig.GLOBAL_FILENAME)
+        return self._write_config(
+            data, filename=rh.config.GlobalPreUploadFile.FILENAME)
 
-    def testMissing(self):
-        """Instantiating a non-existent config file should be fine."""
-        rh.config.PreSubmitConfig()
+
+class PreUploadFileTests(FileTestCase):
+    """Tests for the PreUploadFile class."""
 
     def testEmpty(self):
         """Instantiating an empty config file should be fine."""
-        self._write_config('')
-        rh.config.PreSubmitConfig(paths=(self.tempdir,))
+        path = self._write_config('')
+        rh.config.PreUploadFile(path)
 
     def testValid(self):
         """Verify a fully valid file works."""
-        self._write_config("""# This be a comment me matey.
+        path = self._write_config("""# This be a comment me matey.
 [Hook Scripts]
 name = script --with "some args"
 
@@ -82,39 +95,56 @@
 [Options]
 ignore_merged_commits = true
 """)
-        rh.config.PreSubmitConfig(paths=(self.tempdir,))
+        rh.config.PreUploadFile(path)
 
     def testUnknownSection(self):
         """Reject unknown sections."""
-        self._write_config('[BOOGA]')
-        self.assertRaises(rh.config.ValidationError, rh.config.PreSubmitConfig,
-                          paths=(self.tempdir,))
+        path = self._write_config('[BOOGA]')
+        self.assertRaises(rh.config.ValidationError, rh.config.PreUploadFile,
+                          path)
 
     def testUnknownBuiltin(self):
         """Reject unknown builtin hooks."""
-        self._write_config('[Builtin Hooks]\nbooga = borg!')
-        self.assertRaises(rh.config.ValidationError, rh.config.PreSubmitConfig,
-                          paths=(self.tempdir,))
+        path = self._write_config('[Builtin Hooks]\nbooga = borg!')
+        self.assertRaises(rh.config.ValidationError, rh.config.PreUploadFile,
+                          path)
 
     def testEmptyCustomHook(self):
         """Reject empty custom hooks."""
-        self._write_config('[Hook Scripts]\nbooga = \t \n')
-        self.assertRaises(rh.config.ValidationError, rh.config.PreSubmitConfig,
-                          paths=(self.tempdir,))
+        path = self._write_config('[Hook Scripts]\nbooga = \t \n')
+        self.assertRaises(rh.config.ValidationError, rh.config.PreUploadFile,
+                          path)
 
     def testInvalidIni(self):
         """Reject invalid ini files."""
-        self._write_config('[Hook Scripts]\n =')
-        self.assertRaises(rh.config.ValidationError, rh.config.PreSubmitConfig,
-                          paths=(self.tempdir,))
+        path = self._write_config('[Hook Scripts]\n =')
+        self.assertRaises(rh.config.ValidationError, rh.config.PreUploadFile,
+                          path)
 
     def testInvalidString(self):
         """Catch invalid string quoting."""
-        self._write_config("""[Hook Scripts]
+        path = self._write_config("""[Hook Scripts]
 name = script --'bad-quotes
 """)
-        self.assertRaises(rh.config.ValidationError, rh.config.PreSubmitConfig,
-                          paths=(self.tempdir,))
+        self.assertRaises(rh.config.ValidationError, rh.config.PreUploadFile,
+                          path)
+
+
+class LocalPreUploadFileTests(FileTestCase):
+    """Test for the LocalPreUploadFile class."""
+
+    def testInvalidSectionConfig(self):
+        """Reject local config that uses invalid sections."""
+        path = self._write_config("""[Builtin Hooks Exclude Paths]
+cpplint = external/ 'test directory' ^vendor/(?!google/)
+""")
+        self.assertRaises(rh.config.ValidationError,
+                          rh.config.LocalPreUploadFile,
+                          path)
+
+
+class PreUploadSettingsTests(FileTestCase):
+    """Tests for the PreUploadSettings class."""
 
     def testGlobalConfigs(self):
         """Verify global configs stack properly."""
@@ -122,14 +152,21 @@
 commit_msg_bug_field = true
 commit_msg_changeid_field = true
 commit_msg_test_field = false""")
-        self._write_config("""[Builtin Hooks]
+        self._write_local_config("""[Builtin Hooks]
 commit_msg_bug_field = false
 commit_msg_test_field = true""")
-        config = rh.config.PreSubmitConfig(paths=(self.tempdir,),
-                                           global_paths=(self.tempdir,))
+        config = rh.config.PreUploadSettings(paths=(self.tempdir,),
+                                             global_paths=(self.tempdir,))
         self.assertEqual(config.builtin_hooks,
                          ['commit_msg_changeid_field', 'commit_msg_test_field'])
 
+    def testGlobalExcludeScope(self):
+        """Verify exclude scope is valid for global config."""
+        self._write_global_config("""[Builtin Hooks Exclude Paths]
+cpplint = external/ 'test directory' ^vendor/(?!google/)
+""")
+        rh.config.PreUploadSettings(global_paths=(self.tempdir,))
+
 
 if __name__ == '__main__':
     unittest.main()
diff --git a/rh/git.py b/rh/git.py
index cbe8c42..ab1e35f 100644
--- a/rh/git.py
+++ b/rh/git.py
@@ -1,4 +1,3 @@
-# -*- coding:utf-8 -*-
 # Copyright 2016 The Android Open Source Project
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
@@ -15,8 +14,6 @@
 
 """Git helper functions."""
 
-from __future__ import print_function
-
 import os
 import re
 import sys
@@ -34,13 +31,13 @@
     """Returns the current upstream remote name."""
     # First get the current branch name.
     cmd = ['git', 'rev-parse', '--abbrev-ref', 'HEAD']
-    result = rh.utils.run_command(cmd, capture_output=True)
-    branch = result.output.strip()
+    result = rh.utils.run(cmd, capture_output=True)
+    branch = result.stdout.strip()
 
     # Then get the remote associated with this branch.
     cmd = ['git', 'config', 'branch.%s.remote' % branch]
-    result = rh.utils.run_command(cmd, capture_output=True)
-    return result.output.strip()
+    result = rh.utils.run(cmd, capture_output=True)
+    return result.stdout.strip()
 
 
 def get_upstream_branch():
@@ -50,21 +47,21 @@
       Error if there is no tracking branch
     """
     cmd = ['git', 'symbolic-ref', 'HEAD']
-    result = rh.utils.run_command(cmd, capture_output=True)
-    current_branch = result.output.strip().replace('refs/heads/', '')
+    result = rh.utils.run(cmd, capture_output=True)
+    current_branch = result.stdout.strip().replace('refs/heads/', '')
     if not current_branch:
         raise ValueError('Need to be on a tracking branch')
 
     cfg_option = 'branch.' + current_branch + '.%s'
     cmd = ['git', 'config', cfg_option % 'merge']
-    result = rh.utils.run_command(cmd, capture_output=True)
-    full_upstream = result.output.strip()
+    result = rh.utils.run(cmd, capture_output=True)
+    full_upstream = result.stdout.strip()
     # If remote is not fully qualified, add an implicit namespace.
     if '/' not in full_upstream:
         full_upstream = 'refs/heads/%s' % full_upstream
     cmd = ['git', 'config', cfg_option % 'remote']
-    result = rh.utils.run_command(cmd, capture_output=True)
-    remote = result.output.strip()
+    result = rh.utils.run(cmd, capture_output=True)
+    remote = result.stdout.strip()
     if not remote or not full_upstream:
         raise ValueError('Need to be on a tracking branch')
 
@@ -74,8 +71,8 @@
 def get_commit_for_ref(ref):
     """Returns the latest commit for this ref."""
     cmd = ['git', 'rev-parse', ref]
-    result = rh.utils.run_command(cmd, capture_output=True)
-    return result.output.strip()
+    result = rh.utils.run(cmd, capture_output=True)
+    return result.stdout.strip()
 
 
 def get_remote_revision(ref, remote):
@@ -89,7 +86,7 @@
 def get_patch(commit):
     """Returns the patch for this commit."""
     cmd = ['git', 'format-patch', '--stdout', '-1', commit]
-    return rh.utils.run_command(cmd, capture_output=True).output
+    return rh.utils.run(cmd, capture_output=True).stdout
 
 
 def get_file_content(commit, path):
@@ -103,7 +100,7 @@
     content will not have any newlines.
     """
     cmd = ['git', 'show', '%s:%s' % (commit, path)]
-    return rh.utils.run_command(cmd, capture_output=True).output
+    return rh.utils.run(cmd, capture_output=True).stdout
 
 
 class RawDiffEntry(object):
@@ -145,7 +142,7 @@
     entries = []
 
     cmd = ['git', 'diff', '--no-ext-diff', '-M', '--raw', target]
-    diff = rh.utils.run_command(cmd, cwd=path, capture_output=True).output
+    diff = rh.utils.run(cmd, cwd=path, capture_output=True).stdout
     diff_lines = diff.strip().splitlines()
     for line in diff_lines:
         match = DIFF_RE.match(line)
@@ -167,21 +164,21 @@
     Returns:
       A list of modified/added (and perhaps deleted) files
     """
-    return raw_diff(os.getcwd(), '%s^!' % commit)
+    return raw_diff(os.getcwd(), '%s^-' % commit)
 
 
 def get_commits(ignore_merged_commits=False):
     """Returns a list of commits for this review."""
-    cmd = ['git', 'log', '%s..' % get_upstream_branch(), '--format=%H']
+    cmd = ['git', 'rev-list', '%s..' % get_upstream_branch()]
     if ignore_merged_commits:
         cmd.append('--first-parent')
-    return rh.utils.run_command(cmd, capture_output=True).output.split()
+    return rh.utils.run(cmd, capture_output=True).stdout.split()
 
 
 def get_commit_desc(commit):
     """Returns the full commit message of a commit."""
-    cmd = ['git', 'log', '--format=%B', commit + '^!']
-    return rh.utils.run_command(cmd, capture_output=True).output
+    cmd = ['git', 'diff-tree', '-s', '--always', '--format=%B', commit]
+    return rh.utils.run(cmd, capture_output=True).stdout
 
 
 def find_repo_root(path=None):
@@ -202,5 +199,5 @@
 def is_git_repository(path):
     """Returns True if the path is a valid git repository."""
     cmd = ['git', 'rev-parse', '--resolve-git-dir', os.path.join(path, '.git')]
-    result = rh.utils.run_command(cmd, quiet=True, error_code_ok=True)
+    result = rh.utils.run(cmd, capture_output=True, check=False)
     return result.returncode == 0
diff --git a/rh/hooks.py b/rh/hooks.py
index e2eed96..0b3bb29 100644
--- a/rh/hooks.py
+++ b/rh/hooks.py
@@ -1,4 +1,3 @@
-# -*- coding:utf-8 -*-
 # Copyright 2016 The Android Open Source Project
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
@@ -15,8 +14,8 @@
 
 """Functions that implement the actual checks."""
 
-from __future__ import print_function
-
+import collections
+import fnmatch
 import json
 import os
 import platform
@@ -29,8 +28,8 @@
 del _path
 
 # pylint: disable=wrong-import-position
-import rh.results
 import rh.git
+import rh.results
 import rh.utils
 
 
@@ -68,26 +67,39 @@
 
         ret = []
         for arg in args:
-            # First scan for exact matches
-            for key, val in replacements.items():
-                var = '${%s}' % (key,)
-                if arg == var:
-                    if isinstance(val, str):
-                        ret.append(val)
-                    else:
-                        ret.extend(val)
-                    # We break on first hit to avoid double expansion.
-                    break
+            if arg.endswith('${PREUPLOAD_FILES_PREFIXED}'):
+                if arg == '${PREUPLOAD_FILES_PREFIXED}':
+                    assert len(ret) > 1, ('PREUPLOAD_FILES_PREFIXED cannot be '
+                                          'the 1st or 2nd argument')
+                    prev_arg = ret[-1]
+                    ret = ret[0:-1]
+                    for file in self.get('PREUPLOAD_FILES'):
+                        ret.append(prev_arg)
+                        ret.append(file)
+                else:
+                    prefix = arg[0:-len('${PREUPLOAD_FILES_PREFIXED}')]
+                    ret.extend(
+                        prefix + file for file in self.get('PREUPLOAD_FILES'))
             else:
-                # If no exact matches, do an inline replacement.
-                def replace(m):
-                    val = self.get(m.group(1))
-                    if isinstance(val, str):
-                        return val
-                    return ' '.join(val)
-                ret.append(re.sub(r'\$\{(%s)\}' % ('|'.join(all_vars),),
-                                  replace, arg))
-
+                # First scan for exact matches
+                for key, val in replacements.items():
+                    var = '${%s}' % (key,)
+                    if arg == var:
+                        if isinstance(val, str):
+                            ret.append(val)
+                        else:
+                            ret.extend(val)
+                        # We break on first hit to avoid double expansion.
+                        break
+                else:
+                    # If no exact matches, do an inline replacement.
+                    def replace(m):
+                        val = self.get(m.group(1))
+                        if isinstance(val, str):
+                            return val
+                        return ' '.join(val)
+                    ret.append(re.sub(r'\$\{(%s)\}' % ('|'.join(all_vars),),
+                                      replace, arg))
         return ret
 
     @classmethod
@@ -127,6 +139,42 @@
         return _get_build_os_name()
 
 
+class ExclusionScope(object):
+    """Exclusion scope for a hook.
+
+    An exclusion scope can be used to determine if a hook has been disabled for
+    a specific project.
+    """
+
+    def __init__(self, scope):
+        """Initialize.
+
+        Args:
+          scope: A list of shell-style wildcards (fnmatch) or regular
+              expression. Regular expressions must start with the ^ character.
+        """
+        self._scope = []
+        for path in scope:
+            if path.startswith('^'):
+                self._scope.append(re.compile(path))
+            else:
+                self._scope.append(path)
+
+    def __contains__(self, proj_dir):
+        """Checks if |proj_dir| matches the excluded paths.
+
+        Args:
+          proj_dir: The relative path of the project.
+        """
+        for exclusion_path in self._scope:
+            if hasattr(exclusion_path, 'match'):
+                if exclusion_path.match(proj_dir):
+                    return True
+            elif fnmatch.fnmatch(proj_dir, exclusion_path):
+                return True
+        return False
+
+
 class HookOptions(object):
     """Holder class for hook options."""
 
@@ -185,13 +233,19 @@
         return self.expand_vars([tool_path])[0]
 
 
-def _run_command(cmd, **kwargs):
+# A callable hook.
+CallableHook = collections.namedtuple('CallableHook', ('name', 'hook', 'scope'))
+
+
+def _run(cmd, **kwargs):
     """Helper command for checks that tend to gather output."""
-    kwargs.setdefault('redirect_stderr', True)
     kwargs.setdefault('combine_stdout_stderr', True)
     kwargs.setdefault('capture_output', True)
-    kwargs.setdefault('error_code_ok', True)
-    return rh.utils.run_command(cmd, **kwargs)
+    kwargs.setdefault('check', False)
+    # Make sure hooks run with stdin disconnected to avoid accidentally
+    # interactive tools causing pauses.
+    kwargs.setdefault('input', '')
+    return rh.utils.run(cmd, **kwargs)
 
 
 def _match_regex_list(subject, expressions):
@@ -258,9 +312,9 @@
     parameter in HookCommandResult.
     """
     def wrapper():
-        result = _run_command(cmd, **kwargs)
+        result = _run(cmd, **kwargs)
         if result.returncode not in (None, 0):
-            return result.output
+            return result.stdout
         return None
     return wrapper
 
@@ -268,7 +322,7 @@
 def _check_cmd(hook_name, project, commit, cmd, fixup_func=None, **kwargs):
     """Runs |cmd| and returns its result as a HookCommandResult."""
     return [rh.results.HookCommandResult(hook_name, project, commit,
-                                         _run_command(cmd, **kwargs),
+                                         _run(cmd, **kwargs),
                                          fixup_func=fixup_func)]
 
 
@@ -297,10 +351,10 @@
     ret = []
     for d in filtered:
         data = rh.git.get_file_content(commit, d.file)
-        result = _run_command(cmd, input=data)
-        if result.output:
+        result = _run(cmd, input=data)
+        if result.stdout:
             ret.append(rh.results.HookResult(
-                'bpfmt', project, commit, error=result.output,
+                'bpfmt', project, commit, error=result.stdout,
                 files=(d.file,)))
     return ret
 
@@ -503,6 +557,251 @@
                                   project, commit, error=error)]
 
 
+RELNOTE_MISSPELL_MSG = """Commit message contains something that looks
+similar to the "Relnote:" tag.  It must match the regex:
+
+    %s
+
+The Relnote: stanza is free-form and should describe what developers need to
+know about your change.
+
+Some examples below:
+
+Relnote: "Added a new API `Class#isBetter` to determine whether or not the
+class is better"
+Relnote: Fixed an issue where the UI would hang on a double tap.
+
+Check the git history for more examples. It's a free-form field, so we urge
+you to develop conventions that make sense for your project.
+"""
+
+RELNOTE_MISSING_QUOTES_MSG = """Commit message contains something that looks
+similar to the "Relnote:" tag but might be malformatted.  For multiline
+release notes, you need to include a starting and closing quote.
+
+Multi-line Relnote example:
+
+Relnote: "Added a new API `Class#getSize` to get the size of the class.
+This is useful if you need to know the size of the class."
+
+Single-line Relnote example:
+
+Relnote: Added a new API `Class#containsData`
+"""
+
+RELNOTE_INVALID_QUOTES_MSG = """Commit message contains something that looks
+similar to the "Relnote:" tag but might be malformatted.  If you are using
+quotes that do not mark the start or end of a Relnote, you need to escape them
+with a backslash.
+
+Non-starting/non-ending quote Relnote examples:
+
+Relnote: "Fixed an error with `Class#getBar()` where \"foo\" would be returned
+in edge cases."
+Relnote: Added a new API to handle strings like \"foo\"
+"""
+
+def check_commit_msg_relnote_field_format(project, commit, desc, _diff,
+                                          options=None):
+    """Check the commit for one correctly formatted 'Relnote:' line.
+
+    Checks the commit message for two things:
+    (1) Checks for possible misspellings of the 'Relnote:' tag.
+    (2) Ensures that multiline release notes are properly formatted with a
+    starting quote and an endling quote.
+    (3) Checks that release notes that contain non-starting or non-ending
+    quotes are escaped with a backslash.
+    """
+    field = 'Relnote'
+    regex_relnote = r'^%s:.*$' % (field,)
+    check_re_relnote = re.compile(regex_relnote, re.IGNORECASE)
+
+    if options.args():
+        raise ValueError('commit msg %s check takes no options' % (field,))
+
+    # Check 1: Check for possible misspellings of the `Relnote:` field.
+
+    # Regex for misspelled fields.
+    possible_field_misspells = {'Relnotes', 'ReleaseNote',
+                                'Rel-note', 'Rel note',
+                                'rel-notes', 'releasenotes',
+                                'release-note', 'release-notes'}
+    regex_field_misspells = r'^(%s): .*$' % (
+        '|'.join(possible_field_misspells),
+    )
+    check_re_field_misspells = re.compile(regex_field_misspells, re.IGNORECASE)
+
+    ret = []
+    for line in desc.splitlines():
+        if check_re_field_misspells.match(line):
+            error = RELNOTE_MISSPELL_MSG % (regex_relnote, )
+            ret.append(
+                rh.results.HookResult(('commit msg: "%s:" '
+                                       'tag spelling error') % (field,),
+                                      project, commit, error=error))
+
+    # Check 2: Check that multiline Relnotes are quoted.
+
+    check_re_empty_string = re.compile(r'^$')
+
+    # Regex to find other fields that could be used.
+    regex_other_fields = r'^[a-zA-Z0-9-]+:'
+    check_re_other_fields = re.compile(regex_other_fields)
+
+    desc_lines = desc.splitlines()
+    for i, cur_line in enumerate(desc_lines):
+        # Look for a Relnote tag that is before the last line and
+        # lacking any quotes.
+        if (check_re_relnote.match(cur_line) and
+                i < len(desc_lines) - 1 and
+                '"' not in cur_line):
+            next_line = desc_lines[i + 1]
+            # Check that the next line does not contain any other field
+            # and it's not an empty string.
+            if (not check_re_other_fields.findall(next_line) and
+                    not check_re_empty_string.match(next_line)):
+                ret.append(
+                    rh.results.HookResult(('commit msg: "%s:" '
+                                           'tag missing quotes') % (field,),
+                                          project, commit,
+                                          error=RELNOTE_MISSING_QUOTES_MSG))
+                break
+
+    # Check 3: Check that multiline Relnotes contain matching quotes.
+    first_quote_found = False
+    second_quote_found = False
+    for cur_line in desc_lines:
+        contains_quote = '"' in cur_line
+        contains_field = check_re_other_fields.findall(cur_line)
+        # If we have found the first quote and another field, break and fail.
+        if first_quote_found and contains_field:
+            break
+        # If we have found the first quote, this line contains a quote,
+        # and this line is not another field, break and succeed.
+        if first_quote_found and contains_quote:
+            second_quote_found = True
+            break
+        # Check that the `Relnote:` tag exists and it contains a starting quote.
+        if check_re_relnote.match(cur_line) and contains_quote:
+            first_quote_found = True
+            # A single-line Relnote containing a start and ending triple quote
+            # is valid.
+            if cur_line.count('"""') == 2:
+                second_quote_found = True
+                break
+            # A single-line Relnote containing a start and ending quote
+            # is valid.
+            if cur_line.count('"') - cur_line.count('\\"') == 2:
+                second_quote_found = True
+                break
+    if first_quote_found != second_quote_found:
+        ret.append(
+            rh.results.HookResult(('commit msg: "%s:" '
+                                   'tag missing closing quote') % (field,),
+                                  project, commit,
+                                  error=RELNOTE_MISSING_QUOTES_MSG))
+
+    # Check 4: Check that non-starting or non-ending quotes are escaped with a
+    # backslash.
+    line_needs_checking = False
+    uses_invalid_quotes = False
+    for cur_line in desc_lines:
+        if check_re_other_fields.findall(cur_line):
+            line_needs_checking = False
+        on_relnote_line = check_re_relnote.match(cur_line)
+        # Determine if we are parsing the base `Relnote:` line.
+        if on_relnote_line and '"' in cur_line:
+            line_needs_checking = True
+            # We don't think anyone will type '"""' and then forget to
+            # escape it, so we're not checking for this.
+            if '"""' in cur_line:
+                break
+        if line_needs_checking:
+            stripped_line = re.sub('^%s:' % field, '', cur_line,
+                                   flags=re.IGNORECASE).strip()
+            for i, character in enumerate(stripped_line):
+                if i == 0:
+                    # Case 1: Valid quote at the beginning of the
+                    # base `Relnote:` line.
+                    if on_relnote_line:
+                        continue
+                    # Case 2: Invalid quote at the beginning of following
+                    # lines, where we are not terminating the release note.
+                    if character == '"' and stripped_line != '"':
+                        uses_invalid_quotes = True
+                        break
+                # Case 3: Check all other cases.
+                if (character == '"'
+                        and 0 < i < len(stripped_line) - 1
+                        and stripped_line[i-1] != '"'
+                        and stripped_line[i-1] != "\\"):
+                    uses_invalid_quotes = True
+                    break
+
+    if uses_invalid_quotes:
+        ret.append(rh.results.HookResult(('commit msg: "%s:" '
+                                          'tag using unescaped '
+                                          'quotes') % (field,),
+                                         project, commit,
+                                         error=RELNOTE_INVALID_QUOTES_MSG))
+    return ret
+
+
+RELNOTE_REQUIRED_CURRENT_TXT_MSG = """\
+Commit contains a change to current.txt or public_plus_experimental_current.txt,
+but the commit message does not contain the required `Relnote:` tag.  It must
+match the regex:
+
+    %s
+
+The Relnote: stanza is free-form and should describe what developers need to
+know about your change.  If you are making infrastructure changes, you
+can set the Relnote: stanza to be "N/A" for the commit to not be included
+in release notes.
+
+Some examples:
+
+Relnote: "Added a new API `Class#isBetter` to determine whether or not the
+class is better"
+Relnote: Fixed an issue where the UI would hang on a double tap.
+Relnote: N/A
+
+Check the git history for more examples.
+"""
+
+def check_commit_msg_relnote_for_current_txt(project, commit, desc, diff,
+                                             options=None):
+    """Check changes to current.txt contain the 'Relnote:' stanza."""
+    field = 'Relnote'
+    regex = r'^%s: .+$' % (field,)
+    check_re = re.compile(regex, re.IGNORECASE)
+
+    if options.args():
+        raise ValueError('commit msg %s check takes no options' % (field,))
+
+    filtered = _filter_diff(
+        diff,
+        [r'(^|/)(public_plus_experimental_current|current)\.txt$']
+    )
+    # If the commit does not contain a change to *current.txt, then this repo
+    # hook check no longer applies.
+    if not filtered:
+        return None
+
+    found = []
+    for line in desc.splitlines():
+        if check_re.match(line):
+            found.append(line)
+
+    if not found:
+        error = RELNOTE_REQUIRED_CURRENT_TXT_MSG % (regex)
+    else:
+        return None
+
+    return [rh.results.HookResult('commit msg: "%s:" check' % (field,),
+                                  project, commit, error=error)]
+
+
 def check_cpplint(project, commit, _desc, diff, options=None):
     """Run cpplint."""
     # This list matches what cpplint expects.  We could run on more (like .cxx),
@@ -527,11 +826,12 @@
     ret = []
     for d in filtered:
         data = rh.git.get_file_content(commit, d.file)
-        result = _run_command(cmd, input=data)
-        if result.output:
+        result = _run(cmd, input=data)
+        if result.stdout:
+            fixup_func = _fixup_func_caller([gofmt, '-w', d.file])
             ret.append(rh.results.HookResult(
-                'gofmt', project, commit, error=result.output,
-                files=(d.file,)))
+                'gofmt', project, commit, error=result.stdout,
+                files=(d.file,), fixup_func=fixup_func))
     return ret
 
 
@@ -581,10 +881,40 @@
 def check_pylint3(project, commit, desc, diff, options=None):
     """Run pylint through Python 3."""
     return _check_pylint(project, commit, desc, diff,
-                         extra_args=['--executable-path=pylint3'],
+                         extra_args=['--py3'],
                          options=options)
 
 
+def check_rustfmt(project, commit, _desc, diff, options=None):
+    """Run "rustfmt --check" on diffed rust files"""
+    filtered = _filter_diff(diff, [r'\.rs$'])
+    if not filtered:
+        return None
+
+    rustfmt = options.tool_path('rustfmt')
+    cmd = [rustfmt] + options.args((), filtered)
+    ret = []
+    for d in filtered:
+        data = rh.git.get_file_content(commit, d.file)
+        result = _run(cmd, input=data)
+        # If the parsing failed, stdout will contain enough details on the
+        # location of the error.
+        if result.returncode:
+            ret.append(rh.results.HookResult(
+                'rustfmt', project, commit, error=result.stdout,
+                files=(d.file,)))
+            continue
+        # TODO(b/164111102): rustfmt stable does not support --check on stdin.
+        # If no error is reported, compare stdin with stdout.
+        if data != result.stdout:
+            msg = ('To fix, please run: %s' %
+                   rh.shell.cmd_to_str(cmd + [d.file]))
+            ret.append(rh.results.HookResult(
+                'rustfmt', project, commit, error=msg,
+                files=(d.file,)))
+    return ret
+
+
 def check_xmllint(project, commit, _desc, diff, options=None):
     """Run xmllint."""
     # XXX: Should we drop most of these and probe for <?xml> tags?
@@ -638,14 +968,36 @@
         return None
 
     testmapping_format = options.tool_path('android-test-mapping-format')
+    testmapping_args = ['--commit', commit]
     cmd = [testmapping_format] + options.args(
-        (project.dir, '${PREUPLOAD_FILES}',), filtered)
+        (project.dir, '${PREUPLOAD_FILES}'), filtered) + testmapping_args
     return _check_cmd('android-test-mapping-format', project, commit, cmd)
 
 
+def check_aidl_format(project, commit, _desc, diff, options=None):
+    """Checks that AIDL files are formatted with aidl-format."""
+    # All *.aidl files except for those under aidl_api directory.
+    filtered = _filter_diff(diff, [r'\.aidl$'], [r'/aidl_api/'])
+    if not filtered:
+        return None
+    aidl_format = options.tool_path('aidl-format')
+    cmd = [aidl_format, '-d'] + options.args((), filtered)
+    ret = []
+    for d in filtered:
+        data = rh.git.get_file_content(commit, d.file)
+        result = _run(cmd, input=data)
+        if result.stdout:
+            fixup_func = _fixup_func_caller([aidl_format, '-w', d.file])
+            ret.append(rh.results.HookResult(
+                'aidl-format', project, commit, error=result.stdout,
+                files=(d.file,), fixup_func=fixup_func))
+    return ret
+
+
 # Hooks that projects can opt into.
 # Note: Make sure to keep the top level README.md up to date when adding more!
 BUILTIN_HOOKS = {
+    'aidl_format': check_aidl_format,
     'android_test_mapping_format': check_android_test_mapping,
     'bpfmt': check_bpfmt,
     'checkpatch': check_checkpatch,
@@ -654,6 +1006,9 @@
     'commit_msg_changeid_field': check_commit_msg_changeid_field,
     'commit_msg_prebuilt_apk_fields': check_commit_msg_prebuilt_apk_fields,
     'commit_msg_test_field': check_commit_msg_test_field,
+    'commit_msg_relnote_field_format': check_commit_msg_relnote_field_format,
+    'commit_msg_relnote_for_current_txt':
+        check_commit_msg_relnote_for_current_txt,
     'cpplint': check_cpplint,
     'gofmt': check_gofmt,
     'google_java_format': check_google_java_format,
@@ -661,12 +1016,14 @@
     'pylint': check_pylint2,
     'pylint2': check_pylint2,
     'pylint3': check_pylint3,
+    'rustfmt': check_rustfmt,
     'xmllint': check_xmllint,
 }
 
 # Additional tools that the hooks can call with their default values.
 # Note: Make sure to keep the top level README.md up to date when adding more!
 TOOL_PATHS = {
+    'aidl-format': 'aidl-format',
     'android-test-mapping-format':
         os.path.join(TOOLS_DIR, 'android_test_mapping_format.py'),
     'bpfmt': 'bpfmt',
@@ -677,4 +1034,5 @@
     'google-java-format': 'google-java-format',
     'google-java-format-diff': 'google-java-format-diff.py',
     'pylint': 'pylint',
+    'rustfmt': 'rustfmt',
 }
diff --git a/rh/hooks_unittest.py b/rh/hooks_unittest.py
index 2864088..8466319 100755
--- a/rh/hooks_unittest.py
+++ b/rh/hooks_unittest.py
@@ -1,5 +1,4 @@
 #!/usr/bin/env python3
-# -*- coding:utf-8 -*-
 # Copyright 2016 The Android Open Source Project
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
@@ -16,11 +15,10 @@
 
 """Unittests for the hooks module."""
 
-from __future__ import print_function
-
 import os
 import sys
 import unittest
+from unittest import mock
 
 _path = os.path.realpath(__file__ + '/../..')
 if sys.path[0] != _path:
@@ -31,9 +29,8 @@
 # relative imports because this is an executable program, not a module.
 # pylint: disable=wrong-import-position
 import rh
-import rh.hooks
 import rh.config
-from rh.sixish import mock
+import rh.hooks
 
 
 class HooksDocsTests(unittest.TestCase):
@@ -96,7 +93,9 @@
             'PREUPLOAD_COMMIT_MESSAGE': 'commit message',
             'PREUPLOAD_COMMIT': '5c4c293174bb61f0f39035a71acd9084abfa743d',
         })
-        self.replacer = rh.hooks.Placeholders()
+        self.replacer = rh.hooks.Placeholders(
+            [rh.git.RawDiffEntry(file=x)
+             for x in ['path1/file1', 'path2/file2']])
 
     def tearDown(self):
         os.environ.clear()
@@ -116,9 +115,13 @@
             # We also make sure that things in ${REPO_ROOT} are not double
             # expanded (which is why the return includes ${BUILD_OS}).
             '${REPO_ROOT}/some/prog/REPO_ROOT/ok',
-            # Verify lists are merged rather than inserted.  In this case, the
-            # list is empty, but we'd hit an error still if we saw [] in args.
+            # Verify lists are merged rather than inserted.
             '${PREUPLOAD_FILES}',
+            # Verify each file is preceded with '--file=' prefix.
+            '--file=${PREUPLOAD_FILES_PREFIXED}',
+            # Verify each file is preceded with '--file' argument.
+            '--file',
+            '${PREUPLOAD_FILES_PREFIXED}',
             # Verify values with whitespace don't expand into multiple args.
             '${PREUPLOAD_COMMIT_MESSAGE}',
             # Verify multiple values get replaced.
@@ -129,6 +132,14 @@
         output_args = self.replacer.expand_vars(input_args)
         exp_args = [
             '/ ${BUILD_OS}/some/prog/REPO_ROOT/ok',
+            'path1/file1',
+            'path2/file2',
+            '--file=path1/file1',
+            '--file=path2/file2',
+            '--file',
+            'path1/file1',
+            '--file',
+            'path2/file2',
             'commit message',
             '5c4c293174bb61f0f39035a71acd9084abfa743d^commit message',
             '${THIS_VAR_IS_GOOD}',
@@ -153,7 +164,8 @@
 
     def testPREUPLOAD_FILES(self):
         """Verify handling of PREUPLOAD_FILES."""
-        self.assertEqual(self.replacer.get('PREUPLOAD_FILES'), [])
+        self.assertEqual(self.replacer.get('PREUPLOAD_FILES'),
+                         ['path1/file1', 'path2/file2'])
 
     @mock.patch.object(rh.git, 'find_repo_root', return_value='/repo!')
     def testREPO_ROOT(self, m):
@@ -166,6 +178,28 @@
         self.assertEqual(self.replacer.get('BUILD_OS'), m.return_value)
 
 
+class ExclusionScopeTests(unittest.TestCase):
+    """Verify behavior of ExclusionScope class."""
+
+    def testEmpty(self):
+        """Verify the in operator for an empty scope."""
+        scope = rh.hooks.ExclusionScope([])
+        self.assertNotIn('external/*', scope)
+
+    def testGlob(self):
+        """Verify the in operator for a scope using wildcards."""
+        scope = rh.hooks.ExclusionScope(['vendor/*', 'external/*'])
+        self.assertIn('external/tools', scope)
+
+    def testRegex(self):
+        """Verify the in operator for a scope using regular expressions."""
+        scope = rh.hooks.ExclusionScope(['^vendor/(?!google)',
+                                         'external/*'])
+        self.assertIn('vendor/', scope)
+        self.assertNotIn('vendor/google/', scope)
+        self.assertIn('vendor/other/', scope)
+
+
 class HookOptionsTests(unittest.TestCase):
     """Verify behavior of HookOptions object."""
 
@@ -212,10 +246,10 @@
     """Verify misc utility functions."""
 
     def testRunCommand(self):
-        """Check _run_command behavior."""
+        """Check _run behavior."""
         # Most testing is done against the utils.RunCommand already.
         # pylint: disable=protected-access
-        ret = rh.hooks._run_command(['true'])
+        ret = rh.hooks._run(['true'])
         self.assertEqual(ret.returncode, 0)
 
     def testBuildOs(self):
@@ -235,7 +269,7 @@
 
 
 
-@mock.patch.object(rh.utils, 'run_command')
+@mock.patch.object(rh.utils, 'run')
 @mock.patch.object(rh.hooks, '_check_cmd', return_value=['check_cmd'])
 class BuiltinHooksTests(unittest.TestCase):
     """Verify the builtin hooks."""
@@ -261,11 +295,11 @@
         for desc in msgs:
             ret = func(self.project, 'commit', desc, diff, options=self.options)
             if accept:
-                self.assertEqual(
-                    ret, None, msg='Should have accepted: {{{%s}}}' % (desc,))
+                self.assertFalse(
+                    bool(ret), msg='Should have accepted: {{{%s}}}' % (desc,))
             else:
-                self.assertNotEqual(
-                    ret, None, msg='Should have rejected: {{{%s}}}' % (desc,))
+                self.assertTrue(
+                    bool(ret), msg='Should have rejected: {{{%s}}}' % (desc,))
 
     def _test_file_filter(self, mock_check, func, files):
         """Helper for testing hooks that filter by files and run external tools.
@@ -277,7 +311,7 @@
         """
         # First call should do nothing as there are no files to check.
         ret = func(self.project, 'commit', 'desc', (), options=self.options)
-        self.assertEqual(ret, None)
+        self.assertIsNone(ret)
         self.assertFalse(mock_check.called)
 
         # Second call should include some checks.
@@ -338,6 +372,8 @@
                 'subj',
                 'subj\n\nBUG=1234\n',
                 'subj\n\nBUG: 1234\n',
+                'subj\n\nBug: N/A\n',
+                'subj\n\nBug:\n',
             ))
 
     def test_commit_msg_changeid_field(self, _mock_check, _mock_run):
@@ -478,6 +514,239 @@
                 'subj\n\nTEST: I1234\n',
             ))
 
+    def test_commit_msg_relnote_field_format(self, _mock_check, _mock_run):
+        """Verify the commit_msg_relnote_field_format builtin hook."""
+        # Check some good messages.
+        self._test_commit_messages(
+            rh.hooks.check_commit_msg_relnote_field_format,
+            True,
+            (
+                'subj',
+                'subj\n\nTest: i did done dood it\nBug: 1234',
+                'subj\n\nMore content\n\nTest: i did done dood it\nBug: 1234',
+                'subj\n\nRelnote: This is a release note\nBug: 1234',
+                'subj\n\nRelnote:This is a release note\nBug: 1234',
+                'subj\n\nRelnote: This is a release note.\nBug: 1234',
+                'subj\n\nRelnote: "This is a release note."\nBug: 1234',
+                'subj\n\nRelnote: "This is a \\"release note\\"."\n\nBug: 1234',
+                'subj\n\nRelnote: This is a release note.\nChange-Id: 1234',
+                'subj\n\nRelnote: This is a release note.\n\nChange-Id: 1234',
+                ('subj\n\nRelnote: "This is a release note."\n\n'
+                 'Change-Id: 1234'),
+                ('subj\n\nRelnote: This is a release note.\n\n'
+                 'It has more info, but it is not part of the release note'
+                 '\nChange-Id: 1234'),
+                ('subj\n\nRelnote: "This is a release note.\n'
+                 'It contains a correct second line."'),
+                ('subj\n\nRelnote:"This is a release note.\n'
+                 'It contains a correct second line."'),
+                ('subj\n\nRelnote: "This is a release note.\n'
+                 'It contains a correct second line.\n'
+                 'And even a third line."\n'
+                 'Bug: 1234'),
+                ('subj\n\nRelnote: "This is a release note.\n'
+                 'It contains a correct second line.\n'
+                 '\\"Quotes\\" are even used on the third line."\n'
+                 'Bug: 1234'),
+                ('subj\n\nRelnote: This is release note 1.\n'
+                 'Relnote: This is release note 2.\n'
+                 'Bug: 1234'),
+                ('subj\n\nRelnote: This is release note 1.\n'
+                 'Relnote: "This is release note 2, and it\n'
+                 'contains a correctly formatted third line."\n'
+                 'Bug: 1234'),
+                ('subj\n\nRelnote: "This is release note 1 with\n'
+                 'a correctly formatted second line."\n\n'
+                 'Relnote: "This is release note 2, and it\n'
+                 'contains a correctly formatted second line."\n'
+                 'Bug: 1234'),
+                ('subj\n\nRelnote: "This is a release note with\n'
+                 'a correctly formatted second line."\n\n'
+                 'Bug: 1234'
+                 'Here is some extra "quoted" content.'),
+                ('subj\n\nRelnote: """This is a release note.\n\n'
+                 'This relnote contains an empty line.\n'
+                 'Then a non-empty line.\n\n'
+                 'And another empty line."""\n\n'
+                 'Bug: 1234'),
+                ('subj\n\nRelnote: """This is a release note.\n\n'
+                 'This relnote contains an empty line.\n'
+                 'Then an acceptable "quoted" line.\n\n'
+                 'And another empty line."""\n\n'
+                 'Bug: 1234'),
+                ('subj\n\nRelnote: """This is a release note."""\n\n'
+                 'Bug: 1234'),
+                ('subj\n\nRelnote: """This is a release note.\n'
+                 'It has a second line."""\n\n'
+                 'Bug: 1234'),
+                ('subj\n\nRelnote: """This is a release note.\n'
+                 'It has a second line, but does not end here.\n'
+                 '"""\n\n'
+                 'Bug: 1234'),
+                ('subj\n\nRelnote: """This is a release note.\n'
+                 '"It" has a second line, but does not end here.\n'
+                 '"""\n\n'
+                 'Bug: 1234'),
+                ('subj\n\nRelnote: "This is a release note.\n'
+                 'It has a second line, but does not end here.\n'
+                 '"\n\n'
+                 'Bug: 1234'),
+            ))
+
+        # Check some bad messages.
+        self._test_commit_messages(
+            rh.hooks.check_commit_msg_relnote_field_format,
+            False,
+            (
+                'subj\n\nReleaseNote: This is a release note.\n',
+                'subj\n\nRelnotes: This is a release note.\n',
+                'subj\n\nRel-note: This is a release note.\n',
+                'subj\n\nrelnoTes: This is a release note.\n',
+                'subj\n\nrel-Note: This is a release note.\n',
+                'subj\n\nRelnote: "This is a "release note"."\nBug: 1234',
+                'subj\n\nRelnote: This is a "release note".\nBug: 1234',
+                ('subj\n\nRelnote: This is a release note.\n'
+                 'It contains an incorrect second line.'),
+                ('subj\n\nRelnote: "This is a release note.\n'
+                 'It contains multiple lines.\n'
+                 'But it does not provide an ending quote.\n'),
+                ('subj\n\nRelnote: "This is a release note.\n'
+                 'It contains multiple lines but no closing quote.\n'
+                 'Test: my test "hello world"\n'),
+                ('subj\n\nRelnote: This is release note 1.\n'
+                 'Relnote: "This is release note 2, and it\n'
+                 'contains an incorrectly formatted third line.\n'
+                 'Bug: 1234'),
+                ('subj\n\nRelnote: This is release note 1 with\n'
+                 'an incorrectly formatted second line.\n\n'
+                 'Relnote: "This is release note 2, and it\n'
+                 'contains a correctly formatted second line."\n'
+                 'Bug: 1234'),
+                ('subj\n\nRelnote: "This is release note 1 with\n'
+                 'a correctly formatted second line."\n\n'
+                 'Relnote: This is release note 2, and it\n'
+                 'contains an incorrectly formatted second line.\n'
+                 'Bug: 1234'),
+                ('subj\n\nRelnote: "This is a release note.\n'
+                 'It contains a correct second line.\n'
+                 'But incorrect "quotes" on the third line."\n'
+                 'Bug: 1234'),
+                ('subj\n\nRelnote: """This is a release note.\n'
+                 'It has a second line, but no closing triple quote.\n\n'
+                 'Bug: 1234'),
+                ('subj\n\nRelnote: "This is a release note.\n'
+                 '"It" has a second line, but does not end here.\n'
+                 '"\n\n'
+                 'Bug: 1234'),
+            ))
+
+    def test_commit_msg_relnote_for_current_txt(self, _mock_check, _mock_run):
+        """Verify the commit_msg_relnote_for_current_txt builtin hook."""
+        diff_without_current_txt = ['bar/foo.txt',
+                                    'foo.cpp',
+                                    'foo.java',
+                                    'foo_current.java',
+                                    'foo_current.txt',
+                                    'baz/current.java',
+                                    'baz/foo_current.txt']
+        diff_with_current_txt = diff_without_current_txt + ['current.txt']
+        diff_with_subdir_current_txt = \
+            diff_without_current_txt + ['foo/current.txt']
+        diff_with_experimental_current_txt = \
+            diff_without_current_txt + ['public_plus_experimental_current.txt']
+        # Check some good messages.
+        self._test_commit_messages(
+            rh.hooks.check_commit_msg_relnote_for_current_txt,
+            True,
+            (
+                'subj\n\nRelnote: This is a release note\n',
+                'subj\n\nRelnote: This is a release note.\n\nChange-Id: 1234',
+                ('subj\n\nRelnote: This is release note 1 with\n'
+                 'an incorrectly formatted second line.\n\n'
+                 'Relnote: "This is release note 2, and it\n'
+                 'contains a correctly formatted second line."\n'
+                 'Bug: 1234'),
+            ),
+            files=diff_with_current_txt,
+        )
+        # Check some good messages.
+        self._test_commit_messages(
+            rh.hooks.check_commit_msg_relnote_for_current_txt,
+            True,
+            (
+                'subj\n\nRelnote: This is a release note\n',
+                'subj\n\nRelnote: This is a release note.\n\nChange-Id: 1234',
+                ('subj\n\nRelnote: This is release note 1 with\n'
+                 'an incorrectly formatted second line.\n\n'
+                 'Relnote: "This is release note 2, and it\n'
+                 'contains a correctly formatted second line."\n'
+                 'Bug: 1234'),
+            ),
+            files=diff_with_experimental_current_txt,
+        )
+        # Check some good messages.
+        self._test_commit_messages(
+            rh.hooks.check_commit_msg_relnote_for_current_txt,
+            True,
+            (
+                'subj\n\nRelnote: This is a release note\n',
+                'subj\n\nRelnote: This is a release note.\n\nChange-Id: 1234',
+                ('subj\n\nRelnote: This is release note 1 with\n'
+                 'an incorrectly formatted second line.\n\n'
+                 'Relnote: "This is release note 2, and it\n'
+                 'contains a correctly formatted second line."\n'
+                 'Bug: 1234'),
+            ),
+            files=diff_with_subdir_current_txt,
+        )
+        # Check some good messages.
+        self._test_commit_messages(
+            rh.hooks.check_commit_msg_relnote_for_current_txt,
+            True,
+            (
+                'subj',
+                'subj\nBug: 12345\nChange-Id: 1234',
+                'subj\n\nRelnote: This is a release note\n',
+                'subj\n\nRelnote: This is a release note.\n\nChange-Id: 1234',
+                ('subj\n\nRelnote: This is release note 1 with\n'
+                 'an incorrectly formatted second line.\n\n'
+                 'Relnote: "This is release note 2, and it\n'
+                 'contains a correctly formatted second line."\n'
+                 'Bug: 1234'),
+            ),
+            files=diff_without_current_txt,
+        )
+        # Check some bad messages.
+        self._test_commit_messages(
+            rh.hooks.check_commit_msg_relnote_for_current_txt,
+            False,
+            (
+                'subj'
+                'subj\nBug: 12345\nChange-Id: 1234',
+            ),
+            files=diff_with_current_txt,
+        )
+        # Check some bad messages.
+        self._test_commit_messages(
+            rh.hooks.check_commit_msg_relnote_for_current_txt,
+            False,
+            (
+                'subj'
+                'subj\nBug: 12345\nChange-Id: 1234',
+            ),
+            files=diff_with_experimental_current_txt,
+        )
+        # Check some bad messages.
+        self._test_commit_messages(
+            rh.hooks.check_commit_msg_relnote_for_current_txt,
+            False,
+            (
+                'subj'
+                'subj\nBug: 12345\nChange-Id: 1234',
+            ),
+            files=diff_with_subdir_current_txt,
+        )
+
     def test_cpplint(self, mock_check, _mock_run):
         """Verify the cpplint builtin hook."""
         self._test_file_filter(mock_check, rh.hooks.check_cpplint,
@@ -488,21 +757,21 @@
         # First call should do nothing as there are no files to check.
         ret = rh.hooks.check_gofmt(
             self.project, 'commit', 'desc', (), options=self.options)
-        self.assertEqual(ret, None)
+        self.assertIsNone(ret)
         self.assertFalse(mock_check.called)
 
         # Second call will have some results.
         diff = [rh.git.RawDiffEntry(file='foo.go')]
         ret = rh.hooks.check_gofmt(
             self.project, 'commit', 'desc', diff, options=self.options)
-        self.assertNotEqual(ret, None)
+        self.assertIsNotNone(ret)
 
     def test_jsonlint(self, mock_check, _mock_run):
         """Verify the jsonlint builtin hook."""
         # First call should do nothing as there are no files to check.
         ret = rh.hooks.check_json(
             self.project, 'commit', 'desc', (), options=self.options)
-        self.assertEqual(ret, None)
+        self.assertIsNone(ret)
         self.assertFalse(mock_check.called)
 
         # TODO: Actually pass some valid/invalid json data down.
@@ -522,6 +791,19 @@
         self._test_file_filter(mock_check, rh.hooks.check_pylint3,
                                ('foo.py',))
 
+    def test_rustfmt(self, mock_check, _mock_run):
+        # First call should do nothing as there are no files to check.
+        ret = rh.hooks.check_rustfmt(
+            self.project, 'commit', 'desc', (), options=self.options)
+        self.assertEqual(ret, None)
+        self.assertFalse(mock_check.called)
+
+        # Second call will have some results.
+        diff = [rh.git.RawDiffEntry(file='lib.rs')]
+        ret = rh.hooks.check_rustfmt(
+            self.project, 'commit', 'desc', diff, options=self.options)
+        self.assertNotEqual(ret, None)
+
     def test_xmllint(self, mock_check, _mock_run):
         """Verify the xmllint builtin hook."""
         self._test_file_filter(mock_check, rh.hooks.check_xmllint,
@@ -541,6 +823,20 @@
             self.project, 'commit', 'desc', diff, options=self.options)
         self.assertIsNotNone(ret)
 
+    def test_aidl_format(self, mock_check, _mock_run):
+        """Verify the aidl_format builtin hook."""
+        # First call should do nothing as there are no files to check.
+        ret = rh.hooks.check_aidl_format(
+            self.project, 'commit', 'desc', (), options=self.options)
+        self.assertIsNone(ret)
+        self.assertFalse(mock_check.called)
+
+        # Second call will have some results.
+        diff = [rh.git.RawDiffEntry(file='IFoo.go')]
+        ret = rh.hooks.check_gofmt(
+            self.project, 'commit', 'desc', diff, options=self.options)
+        self.assertIsNotNone(ret)
+
 
 if __name__ == '__main__':
     unittest.main()
diff --git a/rh/results.py b/rh/results.py
index 6d85dbc..bdf7626 100644
--- a/rh/results.py
+++ b/rh/results.py
@@ -1,4 +1,3 @@
-# -*- coding:utf-8 -*-
 # Copyright 2016 The Android Open Source Project
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
@@ -13,9 +12,7 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-"""Common errors thrown when repo presubmit checks fail."""
-
-from __future__ import print_function
+"""Common errors thrown when repo preupload checks fail."""
 
 import os
 import sys
@@ -64,12 +61,12 @@
 
 
 class HookCommandResult(HookResult):
-    """A single hook result based on a CommandResult."""
+    """A single hook result based on a CompletedProcess."""
 
     def __init__(self, hook, project, commit, result, files=(),
                  fixup_func=None):
         HookResult.__init__(self, hook, project, commit,
-                            result.error if result.error else result.output,
+                            result.stderr if result.stderr else result.stdout,
                             files=files, fixup_func=fixup_func)
         self.result = result
 
diff --git a/rh/shell.py b/rh/shell.py
index 5e23f57..dda3be3 100644
--- a/rh/shell.py
+++ b/rh/shell.py
@@ -1,4 +1,3 @@
-# -*- coding:utf-8 -*-
 # Copyright 2016 The Android Open Source Project
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
@@ -15,8 +14,6 @@
 
 """Functions for working with shell code."""
 
-from __future__ import print_function
-
 import os
 import sys
 
@@ -25,9 +22,6 @@
     sys.path.insert(0, _path)
 del _path
 
-# pylint: disable=wrong-import-position
-from rh.sixish import string_types
-
 
 # For use by ShellQuote.  Match all characters that the shell might treat
 # specially.  This means a number of things:
@@ -156,11 +150,11 @@
     if sval is None:
         return default
 
-    if isinstance(sval, string_types):
+    if isinstance(sval, str):
         s = sval.lower()
         if s in ('yes', 'y', '1', 'true'):
             return True
-        elif s in ('no', 'n', '0', 'false'):
+        if s in ('no', 'n', '0', 'false'):
             return False
 
     raise ValueError('Could not decode as a boolean value: %r' % (sval,))
diff --git a/rh/shell_unittest.py b/rh/shell_unittest.py
index 47182a5..21478cf 100755
--- a/rh/shell_unittest.py
+++ b/rh/shell_unittest.py
@@ -1,5 +1,4 @@
 #!/usr/bin/env python3
-# -*- coding:utf-8 -*-
 # Copyright 2016 The Android Open Source Project
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
@@ -16,8 +15,6 @@
 
 """Unittests for the shell module."""
 
-from __future__ import print_function
-
 import difflib
 import os
 import sys
diff --git a/rh/signals.py b/rh/signals.py
index 80e5da7..c8a8d81 100644
--- a/rh/signals.py
+++ b/rh/signals.py
@@ -1,4 +1,3 @@
-# -*- coding:utf-8 -*-
 # Copyright 2016 The Android Open Source Project
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
@@ -15,8 +14,6 @@
 
 """Signal related functionality."""
 
-from __future__ import print_function
-
 import os
 import signal
 import sys
@@ -36,53 +33,9 @@
     """
     if handler in (None, signal.SIG_IGN):
         return True
-    elif handler == signal.SIG_DFL:
+    if handler == signal.SIG_DFL:
         # This scenario is a fairly painful to handle fully, thus we just
         # state we couldn't handle it and leave it to client code.
         return False
     handler(signum, frame)
     return True
-
-
-def signal_module_usable(_signal=signal.signal, _SIGUSR1=signal.SIGUSR1):
-    """Verify that the signal module is usable and won't segfault on us.
-
-    See http://bugs.python.org/issue14173.  This function detects if the
-    signals module is no longer safe to use (which only occurs during
-    final stages of the interpreter shutdown) and heads off a segfault
-    if signal.* was accessed.
-
-    This shouldn't be used by anything other than functionality that is
-    known and unavoidably invoked by finalizer code during python shutdown.
-
-    Finally, the default args here are intentionally binding what we need
-    from the signal module to do the necessary test; invoking code shouldn't
-    pass any options, nor should any developer ever remove those default
-    options.
-
-    Note that this functionality is intended to be removed just as soon
-    as all consuming code installs their own SIGTERM handlers.
-    """
-    # Track any signals we receive while doing the check.
-    received, actual = [], None
-    def handler(signum, frame):
-        received.append([signum, frame])
-    try:
-        # Play with sigusr1, since it's not particularly used.
-        actual = _signal(_SIGUSR1, handler)
-        _signal(_SIGUSR1, actual)
-        return True
-    except (TypeError, AttributeError, SystemError, ValueError):
-        # The first three exceptions can be thrown depending on the state of the
-        # signal module internal Handlers array; we catch all, and interpret it
-        # as if we were invoked during sys.exit cleanup.
-        # The last exception can be thrown if we're trying to be used in a
-        # thread which is not the main one.  This can come up with standard
-        # python modules such as BaseHTTPServer.HTTPServer.
-        return False
-    finally:
-        # And now relay those signals to the original handler.  Not all may
-        # be delivered- the first may throw an exception for example.  Not our
-        # problem however.
-        for signum, frame in received:
-            actual(signum, frame)
diff --git a/rh/sixish.py b/rh/sixish.py
deleted file mode 100644
index 08f3731..0000000
--- a/rh/sixish.py
+++ /dev/null
@@ -1,57 +0,0 @@
-# -*- coding:utf-8 -*-
-# Copyright 2017 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Local version of the standard six module.
-
-Since this module is often unavailable by default on distros (or only available
-for specific versions), manage our own little copy.
-"""
-
-from __future__ import print_function
-
-import os
-import sys
-
-_path = os.path.realpath(__file__ + '/../..')
-if sys.path[0] != _path:
-    sys.path.insert(0, _path)
-del _path
-
-
-# Our attempts to wrap things below for diff versions of python confuse pylint.
-# pylint: disable=import-error,no-name-in-module,unused-import
-
-
-try:
-    import configparser
-except ImportError:
-    import ConfigParser as configparser
-
-
-# We allow mock to be disabled as it's not needed by non-unittest code.
-try:
-    import unittest.mock as mock
-except ImportError:
-    try:
-        import mock
-    except ImportError:
-        pass
-
-
-if sys.version_info.major < 3:
-    # pylint: disable=basestring-builtin,undefined-variable
-    string_types = basestring
-else:
-    string_types = str
diff --git a/rh/terminal.py b/rh/terminal.py
index c549f12..39c96ac 100644
--- a/rh/terminal.py
+++ b/rh/terminal.py
@@ -1,4 +1,3 @@
-# -*- coding:utf-8 -*-
 # Copyright 2016 The Android Open Source Project
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
@@ -18,8 +17,6 @@
 This module handles terminal interaction including ANSI color codes.
 """
 
-from __future__ import print_function
-
 import os
 import sys
 
diff --git a/rh/utils.py b/rh/utils.py
index ae16938..aeab52f 100644
--- a/rh/utils.py
+++ b/rh/utils.py
@@ -1,4 +1,3 @@
-# -*- coding:utf-8 -*-
 # Copyright 2016 The Android Open Source Project
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
@@ -15,8 +14,6 @@
 
 """Various utility functions."""
 
-from __future__ import print_function
-
 import errno
 import functools
 import os
@@ -34,17 +31,48 @@
 # pylint: disable=wrong-import-position
 import rh.shell
 import rh.signals
-from rh.sixish import string_types
 
 
-class CommandResult(object):
-    """An object to store various attributes of a child process."""
+def timedelta_str(delta):
+    """A less noisy timedelta.__str__.
 
-    def __init__(self, cmd=None, error=None, output=None, returncode=None):
-        self.cmd = cmd
-        self.error = error
-        self.output = output
-        self.returncode = returncode
+    The default timedelta stringification contains a lot of leading zeros and
+    uses microsecond resolution.  This makes for noisy output.
+    """
+    total = delta.total_seconds()
+    hours, rem = divmod(total, 3600)
+    mins, secs = divmod(rem, 60)
+    ret = '%i.%03is' % (secs, delta.microseconds // 1000)
+    if mins:
+        ret = '%im%s' % (mins, ret)
+    if hours:
+        ret = '%ih%s' % (hours, ret)
+    return ret
+
+
+class CompletedProcess(getattr(subprocess, 'CompletedProcess', object)):
+    """An object to store various attributes of a child process.
+
+    This is akin to subprocess.CompletedProcess.
+    """
+
+    # The linter is confused by the getattr usage above.
+    # TODO(vapier): Drop this once we're Python 3-only and we drop getattr.
+    # pylint: disable=bad-option-value,super-on-old-class
+    def __init__(self, args=None, returncode=None, stdout=None, stderr=None):
+        if sys.version_info.major < 3:
+            self.args = args
+            self.stdout = stdout
+            self.stderr = stderr
+            self.returncode = returncode
+        else:
+            super().__init__(
+                args=args, returncode=returncode, stdout=stdout, stderr=stderr)
+
+    @property
+    def cmd(self):
+        """Alias to self.args to better match other subprocess APIs."""
+        return self.args
 
     @property
     def cmdstr(self):
@@ -52,54 +80,67 @@
         return rh.shell.cmd_to_str(self.cmd)
 
 
-class RunCommandError(Exception):
-    """Error caught in RunCommand() method."""
+class CalledProcessError(subprocess.CalledProcessError):
+    """Error caught in run() function.
 
-    def __init__(self, msg, result, exception=None):
-        self.msg, self.result, self.exception = msg, result, exception
+    This is akin to subprocess.CalledProcessError.  We do not support |output|,
+    only |stdout|.
+
+    Attributes:
+      returncode: The exit code of the process.
+      cmd: The command that triggered this exception.
+      msg: Short explanation of the error.
+      exception: The underlying Exception if available.
+    """
+
+    def __init__(self, returncode, cmd, stdout=None, stderr=None, msg=None,
+                 exception=None):
         if exception is not None and not isinstance(exception, Exception):
-            raise ValueError('exception must be an exception instance; got %r'
-                             % (exception,))
-        Exception.__init__(self, msg)
-        self.args = (msg, result, exception)
+            raise TypeError('exception must be an exception instance; got %r'
+                            % (exception,))
 
-    def stringify(self, error=True, output=True):
+        super().__init__(returncode, cmd, stdout)
+        # The parent class will set |output|, so delete it.
+        del self.output
+        # TODO(vapier): When we're Python 3-only, delete this assignment as the
+        # parent handles it for us.
+        self.stdout = stdout
+        # TODO(vapier): When we're Python 3-only, move stderr to the init above.
+        self.stderr = stderr
+        self.msg = msg
+        self.exception = exception
+
+    @property
+    def cmdstr(self):
+        """Return self.cmd as a well shell-quoted string for debugging."""
+        return '' if self.cmd is None else rh.shell.cmd_to_str(self.cmd)
+
+    def stringify(self, stdout=True, stderr=True):
         """Custom method for controlling what is included in stringifying this.
 
-        Each individual argument is the literal name of an attribute
-        on the result object; if False, that value is ignored for adding
-        to this string content.  If true, it'll be incorporated.
-
         Args:
-          error: See comment about individual arguments above.
-          output: See comment about individual arguments above.
+          stdout: Whether to include captured stdout in the return value.
+          stderr: Whether to include captured stderr in the return value.
+
+        Returns:
+          A summary string for this result.
         """
         items = [
-            'return code: %s; command: %s' % (
-                self.result.returncode, self.result.cmdstr),
+            'return code: %s; command: %s' % (self.returncode, self.cmdstr),
         ]
-        if error and self.result.error:
-            items.append(self.result.error)
-        if output and self.result.output:
-            items.append(self.result.output)
+        if stderr and self.stderr:
+            items.append(self.stderr)
+        if stdout and self.stdout:
+            items.append(self.stdout)
         if self.msg:
             items.append(self.msg)
         return '\n'.join(items)
 
     def __str__(self):
-        # __str__ needs to return ascii, thus force a conversion to be safe.
-        return self.stringify().decode('utf-8', 'replace').encode(
-            'ascii', 'xmlcharrefreplace')
-
-    def __eq__(self, other):
-        return (isinstance(other, type(self)) and
-                self.args == other.args)
-
-    def __ne__(self, other):
-        return not self.__eq__(other)
+        return self.stringify()
 
 
-class TerminateRunCommandError(RunCommandError):
+class TerminateCalledProcessError(CalledProcessError):
     """We were signaled to shutdown while running a command.
 
     Client code shouldn't generally know, nor care about this class.  It's
@@ -107,63 +148,6 @@
     """
 
 
-def sudo_run_command(cmd, user='root', **kwargs):
-    """Run a command via sudo.
-
-    Client code must use this rather than coming up with their own RunCommand
-    invocation that jams sudo in- this function is used to enforce certain
-    rules in our code about sudo usage, and as a potential auditing point.
-
-    Args:
-      cmd: The command to run.  See RunCommand for rules of this argument-
-          SudoRunCommand purely prefixes it with sudo.
-      user: The user to run the command as.
-      kwargs: See RunCommand options, it's a direct pass thru to it.
-          Note that this supports a 'strict' keyword that defaults to True.
-          If set to False, it'll suppress strict sudo behavior.
-
-    Returns:
-      See RunCommand documentation.
-
-    Raises:
-      This function may immediately raise RunCommandError if we're operating
-      in a strict sudo context and the API is being misused.
-      Barring that, see RunCommand's documentation- it can raise the same things
-      RunCommand does.
-    """
-    sudo_cmd = ['sudo']
-
-    if user == 'root' and os.geteuid() == 0:
-        return run_command(cmd, **kwargs)
-
-    if user != 'root':
-        sudo_cmd += ['-u', user]
-
-    # Pass these values down into the sudo environment, since sudo will
-    # just strip them normally.
-    extra_env = kwargs.pop('extra_env', None)
-    extra_env = {} if extra_env is None else extra_env.copy()
-
-    sudo_cmd.extend('%s=%s' % (k, v) for k, v in extra_env.items())
-
-    # Finally, block people from passing options to sudo.
-    sudo_cmd.append('--')
-
-    if isinstance(cmd, string_types):
-        # We need to handle shell ourselves so the order is correct:
-        #  $ sudo [sudo args] -- bash -c '[shell command]'
-        # If we let RunCommand take care of it, we'd end up with:
-        #  $ bash -c 'sudo [sudo args] -- [shell command]'
-        shell = kwargs.pop('shell', False)
-        if not shell:
-            raise Exception('Cannot run a string command without a shell')
-        sudo_cmd.extend(['/bin/bash', '-c', cmd])
-    else:
-        sudo_cmd.extend(cmd)
-
-    return run_command(sudo_cmd, **kwargs)
-
-
 def _kill_child_process(proc, int_timeout, kill_timeout, cmd, original_handler,
                         signum, frame):
     """Used as a signal handler by RunCommand.
@@ -183,30 +167,29 @@
     # where the Popen instance was created, but no process was generated.
     if proc.returncode is None and proc.pid is not None:
         try:
-            while proc.poll() is None and int_timeout >= 0:
+            while proc.poll_lock_breaker() is None and int_timeout >= 0:
                 time.sleep(0.1)
                 int_timeout -= 0.1
 
             proc.terminate()
-            while proc.poll() is None and kill_timeout >= 0:
+            while proc.poll_lock_breaker() is None and kill_timeout >= 0:
                 time.sleep(0.1)
                 kill_timeout -= 0.1
 
-            if proc.poll() is None:
+            if proc.poll_lock_breaker() is None:
                 # Still doesn't want to die.  Too bad, so sad, time to die.
                 proc.kill()
         except EnvironmentError as e:
             print('Ignoring unhandled exception in _kill_child_process: %s' % e,
                   file=sys.stderr)
 
-        # Ensure our child process has been reaped.
-        proc.wait()
+        # Ensure our child process has been reaped, but don't wait forever.
+        proc.wait_lock_breaker(timeout=60)
 
     if not rh.signals.relay_signal(original_handler, signum, frame):
         # Mock up our own, matching exit code for signaling.
-        cmd_result = CommandResult(cmd=cmd, returncode=signum << 8)
-        raise TerminateRunCommandError('Received signal %i' % signum,
-                                       cmd_result)
+        raise TerminateCalledProcessError(
+            signum << 8, cmd, msg='Received signal %i' % signum)
 
 
 class _Popen(subprocess.Popen):
@@ -235,21 +218,7 @@
         try:
             os.kill(self.pid, signum)
         except EnvironmentError as e:
-            if e.errno == errno.EPERM:
-                # Kill returns either 0 (signal delivered), or 1 (signal wasn't
-                # delivered).  This isn't particularly informative, but we still
-                # need that info to decide what to do, thus error_code_ok=True.
-                ret = sudo_run_command(['kill', '-%i' % signum, str(self.pid)],
-                                       redirect_stdout=True,
-                                       redirect_stderr=True, error_code_ok=True)
-                if ret.returncode == 1:
-                    # The kill binary doesn't distinguish between permission
-                    # denied and the pid is missing.  Denied can only occur
-                    # under weird grsec/selinux policies.  We ignore that
-                    # potential and just assume the pid was already dead and
-                    # try to reap it.
-                    self.poll()
-            elif e.errno == errno.ESRCH:
+            if e.errno == errno.ESRCH:
                 # Since we know the process is dead, reap it now.
                 # Normally Popen would throw this error- we suppress it since
                 # frankly that's a misfeature and we're already overriding
@@ -258,22 +227,44 @@
             else:
                 raise
 
+    def _lock_breaker(self, func, *args, **kwargs):
+        """Helper to manage the waitpid lock.
 
-# pylint: disable=redefined-builtin
-def run_command(cmd, error_message=None, redirect_stdout=False,
-                redirect_stderr=False, cwd=None, input=None,
-                shell=False, env=None, extra_env=None, ignore_sigint=False,
-                combine_stdout_stderr=False, log_stdout_to_file=None,
-                error_code_ok=False, int_timeout=1, kill_timeout=1,
-                stdout_to_pipe=False, capture_output=False,
-                quiet=False, close_fds=True):
+        Workaround https://bugs.python.org/issue25960.
+        """
+        # If the lock doesn't exist, or is not locked, call the func directly.
+        lock = getattr(self, '_waitpid_lock', None)
+        if lock is not None and lock.locked():
+            try:
+                lock.release()
+                return func(*args, **kwargs)
+            finally:
+                if not lock.locked():
+                    lock.acquire()
+        else:
+            return func(*args, **kwargs)
+
+    def poll_lock_breaker(self, *args, **kwargs):
+        """Wrapper around poll() to break locks if needed."""
+        return self._lock_breaker(self.poll, *args, **kwargs)
+
+    def wait_lock_breaker(self, *args, **kwargs):
+        """Wrapper around wait() to break locks if needed."""
+        return self._lock_breaker(self.wait, *args, **kwargs)
+
+
+# We use the keyword arg |input| which trips up pylint checks.
+# pylint: disable=redefined-builtin,input-builtin
+def run(cmd, redirect_stdout=False, redirect_stderr=False, cwd=None, input=None,
+        shell=False, env=None, extra_env=None, combine_stdout_stderr=False,
+        check=True, int_timeout=1, kill_timeout=1, capture_output=False,
+        close_fds=True):
     """Runs a command.
 
     Args:
       cmd: cmd to run.  Should be input to subprocess.Popen.  If a string, shell
           must be true.  Otherwise the command must be an array of arguments,
           and shell must be false.
-      error_message: Prints out this message when an error occurs.
       redirect_stdout: Returns the stdout.
       redirect_stderr: Holds stderr output until input is communicated.
       cwd: The working directory to run this cmd.
@@ -284,56 +275,39 @@
       env: If non-None, this is the environment for the new process.
       extra_env: If set, this is added to the environment for the new process.
           This dictionary is not used to clear any entries though.
-      ignore_sigint: If True, we'll ignore signal.SIGINT before calling the
-          child.  This is the desired behavior if we know our child will handle
-          Ctrl-C.  If we don't do this, I think we and the child will both get
-          Ctrl-C at the same time, which means we'll forcefully kill the child.
       combine_stdout_stderr: Combines stdout and stderr streams into stdout.
-      log_stdout_to_file: If set, redirects stdout to file specified by this
-          path.  If |combine_stdout_stderr| is set to True, then stderr will
-          also be logged to the specified file.
-      error_code_ok: Does not raise an exception when command returns a non-zero
-          exit code.  Instead, returns the CommandResult object containing the
-          exit code.
+      check: Whether to raise an exception when command returns a non-zero exit
+          code, or return the CompletedProcess object containing the exit code.
+          Note: will still raise an exception if the cmd file does not exist.
       int_timeout: If we're interrupted, how long (in seconds) should we give
           the invoked process to clean up before we send a SIGTERM.
       kill_timeout: If we're interrupted, how long (in seconds) should we give
           the invoked process to shutdown from a SIGTERM before we SIGKILL it.
-      stdout_to_pipe: Redirect stdout to pipe.
       capture_output: Set |redirect_stdout| and |redirect_stderr| to True.
-      quiet: Set |stdout_to_pipe| and |combine_stdout_stderr| to True.
       close_fds: Whether to close all fds before running |cmd|.
 
     Returns:
-      A CommandResult object.
+      A CompletedProcess object.
 
     Raises:
-      RunCommandError: Raises exception on error with optional error_message.
+      CalledProcessError: Raises exception on error.
     """
     if capture_output:
         redirect_stdout, redirect_stderr = True, True
 
-    if quiet:
-        stdout_to_pipe, combine_stdout_stderr = True, True
-
     # Set default for variables.
-    stdout = None
-    stderr = None
+    popen_stdout = None
+    popen_stderr = None
     stdin = None
-    cmd_result = CommandResult()
+    result = CompletedProcess()
 
     # Force the timeout to float; in the process, if it's not convertible,
     # a self-explanatory exception will be thrown.
     kill_timeout = float(kill_timeout)
 
     def _get_tempfile():
-        kwargs = {}
-        if sys.version_info.major < 3:
-            kwargs['bufsize'] = 0
-        else:
-            kwargs['buffering'] = 0
         try:
-            return tempfile.TemporaryFile(**kwargs)
+            return tempfile.TemporaryFile(buffering=0)
         except EnvironmentError as e:
             if e.errno != errno.ENOENT:
                 raise
@@ -342,7 +316,7 @@
             # issue in this particular case since our usage gurantees deletion,
             # and since this is primarily triggered during hard cgroups
             # shutdown.
-            return tempfile.TemporaryFile(dir='/tmp', **kwargs)
+            return tempfile.TemporaryFile(dir='/tmp', buffering=0)
 
     # Modify defaults based on parameters.
     # Note that tempfiles must be unbuffered else attempts to read
@@ -350,34 +324,31 @@
     # view of the file.
     # The Popen API accepts either an int or a file handle for stdout/stderr.
     # pylint: disable=redefined-variable-type
-    if log_stdout_to_file:
-        stdout = open(log_stdout_to_file, 'w+')
-    elif stdout_to_pipe:
-        stdout = subprocess.PIPE
-    elif redirect_stdout:
-        stdout = _get_tempfile()
+    if redirect_stdout:
+        popen_stdout = _get_tempfile()
 
     if combine_stdout_stderr:
-        stderr = subprocess.STDOUT
+        popen_stderr = subprocess.STDOUT
     elif redirect_stderr:
-        stderr = _get_tempfile()
+        popen_stderr = _get_tempfile()
     # pylint: enable=redefined-variable-type
 
     # If subprocesses have direct access to stdout or stderr, they can bypass
     # our buffers, so we need to flush to ensure that output is not interleaved.
-    if stdout is None or stderr is None:
+    if popen_stdout is None or popen_stderr is None:
         sys.stdout.flush()
         sys.stderr.flush()
 
     # If input is a string, we'll create a pipe and send it through that.
     # Otherwise we assume it's a file object that can be read from directly.
-    if isinstance(input, string_types):
+    if isinstance(input, str):
         stdin = subprocess.PIPE
+        input = input.encode('utf-8')
     elif input is not None:
         stdin = input
         input = None
 
-    if isinstance(cmd, string_types):
+    if isinstance(cmd, str):
         if not shell:
             raise Exception('Cannot run a string command without a shell')
         cmd = ['/bin/bash', '-c', cmd]
@@ -390,76 +361,88 @@
     env = env.copy() if env is not None else os.environ.copy()
     env.update(extra_env if extra_env else {})
 
-    cmd_result.cmd = cmd
+    def ensure_text(s):
+        """Make sure |s| is a string if it's bytes."""
+        if isinstance(s, bytes):
+            s = s.decode('utf-8', 'replace')
+        return s
+
+    result.args = cmd
 
     proc = None
-    # Verify that the signals modules is actually usable, and won't segfault
-    # upon invocation of getsignal.  See signals.SignalModuleUsable for the
-    # details and upstream python bug.
-    use_signals = rh.signals.signal_module_usable()
     try:
-        proc = _Popen(cmd, cwd=cwd, stdin=stdin, stdout=stdout,
-                      stderr=stderr, shell=False, env=env,
+        proc = _Popen(cmd, cwd=cwd, stdin=stdin, stdout=popen_stdout,
+                      stderr=popen_stderr, shell=False, env=env,
                       close_fds=close_fds)
 
-        if use_signals:
-            old_sigint = signal.getsignal(signal.SIGINT)
-            if ignore_sigint:
-                handler = signal.SIG_IGN
-            else:
-                handler = functools.partial(
-                    _kill_child_process, proc, int_timeout, kill_timeout, cmd,
-                    old_sigint)
-            signal.signal(signal.SIGINT, handler)
+        old_sigint = signal.getsignal(signal.SIGINT)
+        handler = functools.partial(_kill_child_process, proc, int_timeout,
+                                    kill_timeout, cmd, old_sigint)
+        signal.signal(signal.SIGINT, handler)
 
-            old_sigterm = signal.getsignal(signal.SIGTERM)
-            handler = functools.partial(_kill_child_process, proc, int_timeout,
-                                        kill_timeout, cmd, old_sigterm)
-            signal.signal(signal.SIGTERM, handler)
+        old_sigterm = signal.getsignal(signal.SIGTERM)
+        handler = functools.partial(_kill_child_process, proc, int_timeout,
+                                    kill_timeout, cmd, old_sigterm)
+        signal.signal(signal.SIGTERM, handler)
 
         try:
-            (cmd_result.output, cmd_result.error) = proc.communicate(input)
+            (result.stdout, result.stderr) = proc.communicate(input)
         finally:
-            if use_signals:
-                signal.signal(signal.SIGINT, old_sigint)
-                signal.signal(signal.SIGTERM, old_sigterm)
+            signal.signal(signal.SIGINT, old_sigint)
+            signal.signal(signal.SIGTERM, old_sigterm)
 
-            if stdout and not log_stdout_to_file and not stdout_to_pipe:
+            if popen_stdout:
                 # The linter is confused by how stdout is a file & an int.
                 # pylint: disable=maybe-no-member,no-member
-                stdout.seek(0)
-                cmd_result.output = stdout.read()
-                stdout.close()
+                popen_stdout.seek(0)
+                result.stdout = popen_stdout.read()
+                popen_stdout.close()
 
-            if stderr and stderr != subprocess.STDOUT:
+            if popen_stderr and popen_stderr != subprocess.STDOUT:
                 # The linter is confused by how stderr is a file & an int.
                 # pylint: disable=maybe-no-member,no-member
-                stderr.seek(0)
-                cmd_result.error = stderr.read()
-                stderr.close()
+                popen_stderr.seek(0)
+                result.stderr = popen_stderr.read()
+                popen_stderr.close()
 
-        cmd_result.returncode = proc.returncode
+        result.returncode = proc.returncode
 
-        if not error_code_ok and proc.returncode:
+        if check and proc.returncode:
             msg = 'cwd=%s' % cwd
             if extra_env:
                 msg += ', extra env=%s' % extra_env
-            if error_message:
-                msg += '\n%s' % error_message
-            raise RunCommandError(msg, cmd_result)
+            raise CalledProcessError(
+                result.returncode, result.cmd, msg=msg,
+                stdout=ensure_text(result.stdout),
+                stderr=ensure_text(result.stderr))
     except OSError as e:
+        # Avoid leaking tempfiles.
+        if popen_stdout is not None and not isinstance(popen_stdout, int):
+            popen_stdout.close()
+        if popen_stderr is not None and not isinstance(popen_stderr, int):
+            popen_stderr.close()
+
         estr = str(e)
         if e.errno == errno.EACCES:
             estr += '; does the program need `chmod a+x`?'
-        if error_code_ok:
-            cmd_result = CommandResult(cmd=cmd, error=estr, returncode=255)
+        if not check:
+            result = CompletedProcess(args=cmd, stderr=estr, returncode=255)
         else:
-            raise RunCommandError(estr, CommandResult(cmd=cmd), exception=e)
+            raise CalledProcessError(
+                result.returncode, result.cmd, msg=estr, exception=e,
+                stdout=ensure_text(result.stdout),
+                stderr=ensure_text(result.stderr)) from e
     finally:
         if proc is not None:
             # Ensure the process is dead.
+            # Some pylint3 versions are confused here.
+            # pylint: disable=too-many-function-args
             _kill_child_process(proc, int_timeout, kill_timeout, cmd, None,
                                 None, None)
 
-    return cmd_result
-# pylint: enable=redefined-builtin
+    # Make sure output is returned as a string rather than bytes.
+    result.stdout = ensure_text(result.stdout)
+    result.stderr = ensure_text(result.stderr)
+
+    return result
+# pylint: enable=redefined-builtin,input-builtin
diff --git a/rh/utils_unittest.py b/rh/utils_unittest.py
new file mode 100755
index 0000000..ea2ddaa
--- /dev/null
+++ b/rh/utils_unittest.py
@@ -0,0 +1,220 @@
+#!/usr/bin/env python3
+# Copyright 2019 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Unittests for the utils module."""
+
+import datetime
+import os
+import sys
+import unittest
+
+_path = os.path.realpath(__file__ + '/../..')
+if sys.path[0] != _path:
+    sys.path.insert(0, _path)
+del _path
+
+# We have to import our local modules after the sys.path tweak.  We can't use
+# relative imports because this is an executable program, not a module.
+# pylint: disable=wrong-import-position
+import rh
+import rh.utils
+
+
+class TimeDeltaStrTests(unittest.TestCase):
+    """Verify behavior of timedelta_str object."""
+
+    def test_same(self):
+        """Check timedelta of 0 seconds."""
+        delta = datetime.timedelta(0)
+        self.assertEqual('0.000s', rh.utils.timedelta_str(delta))
+
+    def test_millisecondss(self):
+        """Check timedelta of milliseconds."""
+        delta = datetime.timedelta(seconds=0.123456)
+        self.assertEqual('0.123s', rh.utils.timedelta_str(delta))
+
+    def test_seconds(self):
+        """Check timedelta of seconds."""
+        delta = datetime.timedelta(seconds=12.3)
+        self.assertEqual('12.300s', rh.utils.timedelta_str(delta))
+
+    def test_minutes(self):
+        """Check timedelta of minutes."""
+        delta = datetime.timedelta(seconds=72.3)
+        self.assertEqual('1m12.300s', rh.utils.timedelta_str(delta))
+
+    def test_hours(self):
+        """Check timedelta of hours."""
+        delta = datetime.timedelta(seconds=4000.3)
+        self.assertEqual('1h6m40.300s', rh.utils.timedelta_str(delta))
+
+
+class CompletedProcessTests(unittest.TestCase):
+    """Verify behavior of CompletedProcess object."""
+
+    def test_empty_cmdstr(self):
+        """Check cmdstr with an empty command."""
+        result = rh.utils.CompletedProcess(args=[])
+        self.assertEqual('', result.cmdstr)
+
+    def test_basic_cmdstr(self):
+        """Check cmdstr with a basic command command."""
+        result = rh.utils.CompletedProcess(args=['ls', 'a b'])
+        self.assertEqual("ls 'a b'", result.cmdstr)
+
+    def test_str(self):
+        """Check str() handling."""
+        # We don't enforce much, just that it doesn't crash.
+        result = rh.utils.CompletedProcess()
+        self.assertNotEqual('', str(result))
+        result = rh.utils.CompletedProcess(args=[])
+        self.assertNotEqual('', str(result))
+
+    def test_repr(self):
+        """Check repr() handling."""
+        # We don't enforce much, just that it doesn't crash.
+        result = rh.utils.CompletedProcess()
+        self.assertNotEqual('', repr(result))
+        result = rh.utils.CompletedProcess(args=[])
+        self.assertNotEqual('', repr(result))
+
+
+class CalledProcessErrorTests(unittest.TestCase):
+    """Verify behavior of CalledProcessError object."""
+
+    def test_basic(self):
+        """Basic test we can create a normal instance."""
+        rh.utils.CalledProcessError(0, ['mycmd'])
+        rh.utils.CalledProcessError(1, ['mycmd'], exception=Exception('bad'))
+
+    def test_stringify(self):
+        """Check stringify() handling."""
+        # We don't assert much so we leave flexibility in changing format.
+        err = rh.utils.CalledProcessError(0, ['mycmd'])
+        self.assertIn('mycmd', err.stringify())
+        err = rh.utils.CalledProcessError(
+            0, ['mycmd'], exception=Exception('bad'))
+        self.assertIn('mycmd', err.stringify())
+
+    def test_str(self):
+        """Check str() handling."""
+        # We don't assert much so we leave flexibility in changing format.
+        err = rh.utils.CalledProcessError(0, ['mycmd'])
+        self.assertIn('mycmd', str(err))
+        err = rh.utils.CalledProcessError(
+            0, ['mycmd'], exception=Exception('bad'))
+        self.assertIn('mycmd', str(err))
+
+    def test_repr(self):
+        """Check repr() handling."""
+        # We don't assert much so we leave flexibility in changing format.
+        err = rh.utils.CalledProcessError(0, ['mycmd'])
+        self.assertNotEqual('', repr(err))
+        err = rh.utils.CalledProcessError(
+            0, ['mycmd'], exception=Exception('bad'))
+        self.assertNotEqual('', repr(err))
+
+
+class RunCommandTests(unittest.TestCase):
+    """Verify behavior of run helper."""
+
+    def test_basic(self):
+        """Simple basic test."""
+        ret = rh.utils.run(['true'])
+        self.assertEqual('true', ret.cmdstr)
+        self.assertIsNone(ret.stdout)
+        self.assertIsNone(ret.stderr)
+
+    def test_stdout_capture(self):
+        """Verify output capturing works."""
+        ret = rh.utils.run(['echo', 'hi'], redirect_stdout=True)
+        self.assertEqual('hi\n', ret.stdout)
+        self.assertIsNone(ret.stderr)
+
+    def test_stderr_capture(self):
+        """Verify stderr capturing works."""
+        ret = rh.utils.run(['sh', '-c', 'echo hi >&2'], redirect_stderr=True)
+        self.assertIsNone(ret.stdout)
+        self.assertEqual('hi\n', ret.stderr)
+
+    def test_stdout_utf8(self):
+        """Verify reading UTF-8 data works."""
+        ret = rh.utils.run(['printf', r'\xc3\x9f'], redirect_stdout=True)
+        self.assertEqual(u'ß', ret.stdout)
+        self.assertIsNone(ret.stderr)
+
+    def test_stdin_utf8(self):
+        """Verify writing UTF-8 data works."""
+        ret = rh.utils.run(['cat'], redirect_stdout=True, input=u'ß')
+        self.assertEqual(u'ß', ret.stdout)
+        self.assertIsNone(ret.stderr)
+
+    def test_check_false(self):
+        """Verify handling of check=False."""
+        ret = rh.utils.run(['false'], check=False)
+        self.assertNotEqual(0, ret.returncode)
+        self.assertIn('false', str(ret))
+
+        ret = rh.utils.run(['true'], check=False)
+        self.assertEqual(0, ret.returncode)
+        self.assertIn('true', str(ret))
+
+    def test_check_true(self):
+        """Verify handling of check=True."""
+        with self.assertRaises(rh.utils.CalledProcessError) as e:
+            rh.utils.run(['false'], check=True)
+        err = e.exception
+        self.assertNotEqual(0, err.returncode)
+        self.assertIn('false', str(err))
+
+        ret = rh.utils.run(['true'], check=True)
+        self.assertEqual(0, ret.returncode)
+        self.assertIn('true', str(ret))
+
+    def test_check_false_output(self):
+        """Verify handling of output capturing w/check=False."""
+        with self.assertRaises(rh.utils.CalledProcessError) as e:
+            rh.utils.run(['sh', '-c', 'echo out; echo err >&2; false'],
+                         check=True, capture_output=True)
+        err = e.exception
+        self.assertNotEqual(0, err.returncode)
+        self.assertIn('false', str(err))
+
+    def test_check_true_missing_prog_output(self):
+        """Verify handling of output capturing w/missing progs."""
+        with self.assertRaises(rh.utils.CalledProcessError) as e:
+            rh.utils.run(['./!~a/b/c/d/'], check=True, capture_output=True)
+        err = e.exception
+        self.assertNotEqual(0, err.returncode)
+        self.assertIn('a/b/c/d', str(err))
+
+    def test_check_false_missing_prog_output(self):
+        """Verify handling of output capturing w/missing progs."""
+        ret = rh.utils.run(['./!~a/b/c/d/'], check=False, capture_output=True)
+        self.assertNotEqual(0, ret.returncode)
+        self.assertIn('a/b/c/d', str(ret))
+
+    def test_check_false_missing_prog_combined_output(self):
+        """Verify handling of combined output capturing w/missing progs."""
+        with self.assertRaises(rh.utils.CalledProcessError) as e:
+            rh.utils.run(['./!~a/b/c/d/'], check=True,
+                         combine_stdout_stderr=True)
+        err = e.exception
+        self.assertNotEqual(0, err.returncode)
+        self.assertIn('a/b/c/d', str(err))
+
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/tools/android_test_mapping_format.py b/tools/android_test_mapping_format.py
index 3402632..ae784cf 100755
--- a/tools/android_test_mapping_format.py
+++ b/tools/android_test_mapping_format.py
@@ -1,5 +1,4 @@
-#!/usr/bin/python
-# -*- coding:utf-8 -*-
+#!/usr/bin/env python3
 # Copyright 2018 The Android Open Source Project
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
@@ -14,7 +13,7 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-"""Validate TEST_MAPPING files in Android source code.
+"""Validates TEST_MAPPING files in Android source code.
 
 The goal of this script is to validate the format of TEST_MAPPING files:
 1. It must be a valid json file.
@@ -23,30 +22,38 @@
    import TEST_MAPPING files.
 """
 
-from __future__ import print_function
-
 import argparse
 import json
 import os
+import re
 import sys
+from typing import Any, Dict
 
-IMPORTS = 'imports'
-NAME = 'name'
-OPTIONS = 'options'
-PATH = 'path'
-HOST = 'host'
-PREFERRED_TARGETS = 'preferred_targets'
-FILE_PATTERNS = 'file_patterns'
-TEST_MAPPING_URL = (
+_path = os.path.realpath(__file__ + '/../..')
+if sys.path[0] != _path:
+    sys.path.insert(0, _path)
+del _path
+
+# We have to import our local modules after the sys.path tweak.  We can't use
+# relative imports because this is an executable program, not a module.
+# pylint: disable=wrong-import-position
+import rh.git
+
+_IMPORTS = 'imports'
+_NAME = 'name'
+_OPTIONS = 'options'
+_PATH = 'path'
+_HOST = 'host'
+_PREFERRED_TARGETS = 'preferred_targets'
+_FILE_PATTERNS = 'file_patterns'
+_INVALID_IMPORT_CONFIG = 'Invalid import config in TEST_MAPPING file'
+_INVALID_TEST_CONFIG = 'Invalid test config in TEST_MAPPING file'
+_TEST_MAPPING_URL = (
     'https://source.android.com/compatibility/tests/development/'
     'test-mapping')
 
-
-if sys.version_info.major < 3:
-    # pylint: disable=basestring-builtin,undefined-variable
-    string_types = basestring
-else:
-    string_types = str
+# Pattern used to identify line-level '//'-format comment in TEST_MAPPING file.
+_COMMENTS_RE = re.compile(r'^\s*//')
 
 
 class Error(Exception):
@@ -57,8 +64,21 @@
     """Exception to raise when detecting an invalid TEST_MAPPING file."""
 
 
-def _validate_import(entry, test_mapping_file):
-    """Validate an import setting.
+def _filter_comments(json_data: str) -> str:
+    """Removes '//'-format comments in TEST_MAPPING file to valid format.
+
+    Args:
+        json_data: TEST_MAPPING file content (as a string).
+
+    Returns:
+        Valid json string without comments.
+    """
+    return ''.join(
+        '\n' if _COMMENTS_RE.match(x) else x for x in json_data.splitlines())
+
+
+def _validate_import(entry: Dict[str, Any], test_mapping_file: str):
+    """Validates an import setting.
 
     Args:
         entry: A dictionary of an import setting.
@@ -69,101 +89,107 @@
     """
     if len(entry) != 1:
         raise InvalidTestMappingError(
-            'Invalid import config in test mapping file %s. each import can '
-            'only have one `path` setting. Failed entry: %s' %
-            (test_mapping_file, entry))
-    if list(entry.keys())[0] != PATH:
+            f'{_INVALID_IMPORT_CONFIG} {test_mapping_file}. Each import can '
+            f'only have one `path` setting. Failed entry: {entry}')
+    if _PATH not in entry:
         raise InvalidTestMappingError(
-            'Invalid import config in test mapping file %s. import can only '
-            'have one `path` setting. Failed entry: %s' %
-            (test_mapping_file, entry))
+            f'{_INVALID_IMPORT_CONFIG} {test_mapping_file}. Import can '
+            f'only have one `path` setting. Failed entry: {entry}')
 
 
-def _validate_test(test, test_mapping_file):
-    """Validate a test declaration.
+def _validate_test(test: Dict[str, Any], test_mapping_file: str) -> bool:
+    """Returns whether a test declaration is valid.
 
     Args:
-        entry: A dictionary of a test declaration.
+        test: A dictionary of a test declaration.
         test_mapping_file: Path to the TEST_MAPPING file to be validated.
 
     Raises:
         InvalidTestMappingError: if the a test declaration is invalid.
     """
-    if NAME not in test:
+    if _NAME not in test:
         raise InvalidTestMappingError(
-            'Invalid test config in test mapping file %s. test config must '
-            'a `name` setting. Failed test config: %s' %
-            (test_mapping_file, test))
-    if not isinstance(test.get(HOST, False), bool):
+
+            f'{_INVALID_TEST_CONFIG} {test_mapping_file}. Test config must '
+            f'have a `name` setting. Failed test config: {test}')
+
+    if not isinstance(test.get(_HOST, False), bool):
         raise InvalidTestMappingError(
-            'Invalid test config in test mapping file %s. `host` setting in '
-            'test config can only have boolean value of `true` or `false`. '
-            'Failed test config: %s' % (test_mapping_file, test))
-    preferred_targets = test.get(PREFERRED_TARGETS, [])
-    if (not isinstance(preferred_targets, list) or
-            any(not isinstance(t, string_types) for t in preferred_targets)):
-        raise InvalidTestMappingError(
-            'Invalid test config in test mapping file %s. `preferred_targets` '
-            'setting in test config can only be a list of strings. Failed test '
-            'config: %s' % (test_mapping_file, test))
-    file_patterns = test.get(FILE_PATTERNS, [])
-    if (not isinstance(file_patterns, list) or
-            any(not isinstance(p, string_types) for p in file_patterns)):
-        raise InvalidTestMappingError(
-            'Invalid test config in test mapping file %s. `file_patterns` '
-            'setting in test config can only be a list of strings. Failed test '
-            'config: %s' % (test_mapping_file, test))
-    for option in test.get(OPTIONS, []):
+            f'{_INVALID_TEST_CONFIG} {test_mapping_file}. `host` setting in '
+            f'test config can only have boolean value of `true` or `false`. '
+            f'Failed test config: {test}')
+
+    for key in (_PREFERRED_TARGETS, _FILE_PATTERNS):
+        value = test.get(key, [])
+        if (not isinstance(value, list) or
+            any(not isinstance(t, str) for t in value)):
+            raise InvalidTestMappingError(
+                f'{_INVALID_TEST_CONFIG} {test_mapping_file}. `{key}` setting '
+                f'in test config can only be a list of strings. '
+                f'Failed test config: {test}')
+
+    for option in test.get(_OPTIONS, []):
+        if not isinstance(option, dict):
+            raise InvalidTestMappingError(
+                f'{_INVALID_TEST_CONFIG} {test_mapping_file}. Option setting '
+                f'in test config can only be a dictionary of key-val setting. '
+                f'Failed entry: {option}')
         if len(option) != 1:
             raise InvalidTestMappingError(
-                'Invalid option setting in test mapping file %s. each option '
-                'setting can only have one key-val setting. Failed entry: %s' %
-                (test_mapping_file, option))
+                f'{_INVALID_TEST_CONFIG} {test_mapping_file}. Each option '
+                f'setting can only have one key-val setting. '
+                f'Failed entry: {option}')
 
 
-def _load_file(test_mapping_file):
-    """Load a TEST_MAPPING file as a json file."""
+def process_file(test_mapping_file: str):
+    """Validates a TEST_MAPPING file content."""
     try:
-        with open(test_mapping_file) as file_obj:
-            return json.load(file_obj)
-    except ValueError as e:
+        test_mapping_data = json.loads(_filter_comments(test_mapping_file))
+    except ValueError as exception:
         # The file is not a valid JSON file.
         print(
-            'Failed to parse JSON file %s, error: %s' % (test_mapping_file, e),
+            f'Invalid JSON data in TEST_MAPPING file '
+            f'Failed to parse JSON data: {test_mapping_file}, '
+            f'error: {exception}',
             file=sys.stderr)
         raise
 
-
-def process_file(test_mapping_file):
-    """Validate a TEST_MAPPING file."""
-    test_mapping = _load_file(test_mapping_file)
-    # Validate imports.
-    for import_entry in test_mapping.get(IMPORTS, []):
-        _validate_import(import_entry, test_mapping_file)
-    # Validate tests.
-    all_tests = [test for group, tests in test_mapping.items()
-                 if group != IMPORTS for test in tests]
-    for test in all_tests:
-        _validate_test(test, test_mapping_file)
+    for group, value in test_mapping_data.items():
+        if group == _IMPORTS:
+            # Validate imports.
+            for test in value:
+                _validate_import(test, test_mapping_file)
+        else:
+            # Validate tests.
+            for test in value:
+                _validate_test(test, test_mapping_file)
 
 
 def get_parser():
-    """Return a command line parser."""
+    """Returns a command line parser."""
     parser = argparse.ArgumentParser(description=__doc__)
+    parser.add_argument('--commit', type=str,
+                        help='Specify the commit to validate.')
     parser.add_argument('project_dir')
     parser.add_argument('files', nargs='+')
     return parser
 
 
 def main(argv):
+    """Main function."""
     parser = get_parser()
     opts = parser.parse_args(argv)
     try:
         for filename in opts.files:
-            process_file(os.path.join(opts.project_dir, filename))
+            if opts.commit:
+                json_data = rh.git.get_file_content(opts.commit, filename)
+            else:
+                with open(os.path.join(opts.project_dir, filename)) as file:
+                    json_data = file.read()
+            process_file(json_data)
     except:
         print('Visit %s for details about the format of TEST_MAPPING '
-              'file.' % TEST_MAPPING_URL, file=sys.stderr)
+              'file.' % _TEST_MAPPING_URL, file=sys.stderr)
         raise
 
 
diff --git a/tools/android_test_mapping_format_unittest.py b/tools/android_test_mapping_format_unittest.py
index 6665953..14bae32 100755
--- a/tools/android_test_mapping_format_unittest.py
+++ b/tools/android_test_mapping_format_unittest.py
@@ -1,5 +1,4 @@
 #!/usr/bin/env python3
-# -*- coding:utf-8 -*-
 # Copyright 2018 The Android Open Source Project
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
@@ -14,6 +13,8 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
+"""Unittests for android_test_mapping_format."""
+
 import os
 import shutil
 import tempfile
@@ -22,7 +23,7 @@
 import android_test_mapping_format
 
 
-VALID_TEST_MAPPING = r"""
+_VALID_TEST_MAPPING = r"""
 {
   "presubmit": [
     {
@@ -53,11 +54,11 @@
 }
 """
 
-BAD_JSON = """
+_BAD_JSON = """
 {wrong format}
 """
 
-BAD_TEST_WRONG_KEY = """
+_BAD_TEST_WRONG_KEY = """
 {
   "presubmit": [
     {
@@ -67,7 +68,7 @@
 }
 """
 
-BAD_TEST_WRONG_HOST_VALUE = """
+_BAD_TEST_WRONG_HOST_VALUE = """
 {
   "presubmit": [
     {
@@ -79,7 +80,7 @@
 """
 
 
-BAD_TEST_WRONG_PREFERRED_TARGETS_VALUE_NONE_LIST = """
+_BAD_TEST_WRONG_PREFERRED_TARGETS_VALUE_NONE_LIST = """
 {
   "presubmit": [
     {
@@ -90,7 +91,7 @@
 }
 """
 
-BAD_TEST_WRONG_PREFERRED_TARGETS_VALUE_WRONG_TYPE = """
+_BAD_TEST_WRONG_PREFERRED_TARGETS_VALUE_WRONG_TYPE = """
 {
   "presubmit": [
     {
@@ -101,7 +102,7 @@
 }
 """
 
-BAD_TEST_WRONG_OPTION = """
+_BAD_TEST_WRONG_OPTION = """
 {
   "presubmit": [
     {
@@ -117,7 +118,7 @@
 }
 """
 
-BAD_IMPORT_WRONG_KEY = """
+_BAD_IMPORT_WRONG_KEY = """
 {
   "imports": [
     {
@@ -127,7 +128,7 @@
 }
 """
 
-BAD_IMPORT_WRONG_IMPORT_VALUE = """
+_BAD_IMPORT_WRONG_IMPORT_VALUE = """
 {
   "imports": [
     {
@@ -138,7 +139,7 @@
 }
 """
 
-BAD_FILE_PATTERNS = """
+_BAD_FILE_PATTERNS = """
 {
   "presubmit": [
     {
@@ -149,6 +150,40 @@
 }
 """
 
+_TEST_MAPPING_WITH_SUPPORTED_COMMENTS = r"""
+// supported comment
+{
+  // supported comment!@#$%^&*()_
+  "presubmit": [
+    {
+      "name": "CtsWindowManagerDeviceTestCases\"foo//baz",
+      "options": [
+        // supported comment!@#$%^&*()_
+        {
+          "include-annotation": "android.platform.test.annotations.Presubmit"
+        }
+      ]
+    }
+  ],
+  "imports": [
+    {
+      "path": "path1//path2//path3"
+    }
+  ]
+}
+"""
+
+_TEST_MAPPING_WITH_NON_SUPPORTED_COMMENTS = """
+{ #non-supported comments
+  // supported comments
+  "presubmit": [#non-supported comments
+    {  // non-supported comments
+      "name": "CtsWindowManagerDeviceTestCases",
+    }
+  ]
+}
+"""
+
 
 class AndroidTestMappingFormatTests(unittest.TestCase):
     """Unittest for android_test_mapping_format module."""
@@ -163,86 +198,112 @@
     def test_valid_test_mapping(self):
         """Verify that the check doesn't raise any error for valid test mapping.
         """
-        with open(self.test_mapping_file, 'w') as f:
-            f.write(VALID_TEST_MAPPING)
-        android_test_mapping_format.process_file(self.test_mapping_file)
+        with open(self.test_mapping_file, 'w') as file:
+            file.write(_VALID_TEST_MAPPING)
+        with open(self.test_mapping_file, 'r') as file:
+            android_test_mapping_format.process_file(file.read())
 
     def test_invalid_test_mapping_bad_json(self):
         """Verify that TEST_MAPPING file with bad json can be detected."""
-        with open(self.test_mapping_file, 'w') as f:
-            f.write(BAD_JSON)
-        self.assertRaises(
-            ValueError, android_test_mapping_format.process_file,
-            self.test_mapping_file)
+        with open(self.test_mapping_file, 'w') as file:
+            file.write(_BAD_JSON)
+        with open(self.test_mapping_file, 'r') as file:
+            self.assertRaises(
+                ValueError, android_test_mapping_format.process_file,
+                file.read())
 
     def test_invalid_test_mapping_wrong_test_key(self):
         """Verify that test config using wrong key can be detected."""
-        with open(self.test_mapping_file, 'w') as f:
-            f.write(BAD_TEST_WRONG_KEY)
-        self.assertRaises(
-            android_test_mapping_format.InvalidTestMappingError,
-            android_test_mapping_format.process_file,
-            self.test_mapping_file)
+        with open(self.test_mapping_file, 'w') as file:
+            file.write(_BAD_TEST_WRONG_KEY)
+        with open(self.test_mapping_file, 'r') as file:
+            self.assertRaises(
+                android_test_mapping_format.InvalidTestMappingError,
+                android_test_mapping_format.process_file,
+                file.read())
 
     def test_invalid_test_mapping_wrong_test_value(self):
         """Verify that test config using wrong host value can be detected."""
-        with open(self.test_mapping_file, 'w') as f:
-            f.write(BAD_TEST_WRONG_HOST_VALUE)
-        self.assertRaises(
-            android_test_mapping_format.InvalidTestMappingError,
-            android_test_mapping_format.process_file,
-            self.test_mapping_file)
+        with open(self.test_mapping_file, 'w') as file:
+            file.write(_BAD_TEST_WRONG_HOST_VALUE)
+        with open(self.test_mapping_file, 'r') as file:
+            self.assertRaises(
+                android_test_mapping_format.InvalidTestMappingError,
+                android_test_mapping_format.process_file,
+                file.read())
 
     def test_invalid_test_mapping_wrong_preferred_targets_value(self):
         """Verify invalid preferred_targets are rejected."""
-        with open(self.test_mapping_file, 'w') as f:
-            f.write(BAD_TEST_WRONG_PREFERRED_TARGETS_VALUE_NONE_LIST)
-        self.assertRaises(
-            android_test_mapping_format.InvalidTestMappingError,
-            android_test_mapping_format.process_file,
-            self.test_mapping_file)
-        with open(self.test_mapping_file, 'w') as f:
-            f.write(BAD_TEST_WRONG_PREFERRED_TARGETS_VALUE_WRONG_TYPE)
-        self.assertRaises(
-            android_test_mapping_format.InvalidTestMappingError,
-            android_test_mapping_format.process_file,
-            self.test_mapping_file)
+        with open(self.test_mapping_file, 'w') as file:
+            file.write(_BAD_TEST_WRONG_PREFERRED_TARGETS_VALUE_NONE_LIST)
+        with open(self.test_mapping_file, 'r') as file:
+            self.assertRaises(
+                android_test_mapping_format.InvalidTestMappingError,
+                android_test_mapping_format.process_file,
+                file.read())
+        with open(self.test_mapping_file, 'w') as file:
+            file.write(_BAD_TEST_WRONG_PREFERRED_TARGETS_VALUE_WRONG_TYPE)
+        with open(self.test_mapping_file, 'r') as file:
+            self.assertRaises(
+                android_test_mapping_format.InvalidTestMappingError,
+                android_test_mapping_format.process_file,
+                file.read())
 
     def test_invalid_test_mapping_wrong_test_option(self):
         """Verify that test config using wrong option can be detected."""
-        with open(self.test_mapping_file, 'w') as f:
-            f.write(BAD_TEST_WRONG_OPTION)
-        self.assertRaises(
-            android_test_mapping_format.InvalidTestMappingError,
-            android_test_mapping_format.process_file,
-            self.test_mapping_file)
+        with open(self.test_mapping_file, 'w') as file:
+            file.write(_BAD_TEST_WRONG_OPTION)
+        with open(self.test_mapping_file, 'r') as file:
+            self.assertRaises(
+                android_test_mapping_format.InvalidTestMappingError,
+                android_test_mapping_format.process_file,
+                file.read())
 
     def test_invalid_test_mapping_wrong_import_key(self):
         """Verify that import setting using wrong key can be detected."""
-        with open(self.test_mapping_file, 'w') as f:
-            f.write(BAD_IMPORT_WRONG_KEY)
-        self.assertRaises(
-            android_test_mapping_format.InvalidTestMappingError,
-            android_test_mapping_format.process_file,
-            self.test_mapping_file)
+        with open(self.test_mapping_file, 'w') as file:
+            file.write(_BAD_IMPORT_WRONG_KEY)
+        with open(self.test_mapping_file, 'r') as file:
+            self.assertRaises(
+                android_test_mapping_format.InvalidTestMappingError,
+                android_test_mapping_format.process_file,
+                file.read())
 
     def test_invalid_test_mapping_wrong_import_value(self):
         """Verify that import setting using wrong value can be detected."""
-        with open(self.test_mapping_file, 'w') as f:
-            f.write(BAD_IMPORT_WRONG_IMPORT_VALUE)
-        self.assertRaises(
-            android_test_mapping_format.InvalidTestMappingError,
-            android_test_mapping_format.process_file,
-            self.test_mapping_file)
+        with open(self.test_mapping_file, 'w') as file:
+            file.write(_BAD_IMPORT_WRONG_IMPORT_VALUE)
+        with open(self.test_mapping_file, 'r') as file:
+            self.assertRaises(
+                android_test_mapping_format.InvalidTestMappingError,
+                android_test_mapping_format.process_file,
+                file.read())
 
     def test_invalid_test_mapping_file_patterns_value(self):
         """Verify that file_patterns using wrong value can be detected."""
-        with open(self.test_mapping_file, 'w') as f:
-            f.write(BAD_FILE_PATTERNS)
-        self.assertRaises(
-            android_test_mapping_format.InvalidTestMappingError,
-            android_test_mapping_format.process_file,
-            self.test_mapping_file)
+        with open(self.test_mapping_file, 'w') as file:
+            file.write(_BAD_FILE_PATTERNS)
+        with open(self.test_mapping_file, 'r') as file:
+            self.assertRaises(
+                android_test_mapping_format.InvalidTestMappingError,
+                android_test_mapping_format.process_file,
+                file.read())
+
+    def test_valid_test_mapping_file_with_supported_comments(self):
+        """Verify that '//'-format comment can be filtered."""
+        with open(self.test_mapping_file, 'w') as file:
+            file.write(_TEST_MAPPING_WITH_SUPPORTED_COMMENTS)
+        with open(self.test_mapping_file, 'r') as file:
+            android_test_mapping_format.process_file(file.read())
+
+    def test_valid_test_mapping_file_with_non_supported_comments(self):
+        """Verify that non-supported comment can be detected."""
+        with open(self.test_mapping_file, 'w') as file:
+            file.write(_TEST_MAPPING_WITH_NON_SUPPORTED_COMMENTS)
+        with open(self.test_mapping_file, 'r') as file:
+            self.assertRaises(
+                ValueError, android_test_mapping_format.process_file,
+                file.read())
 
 
 if __name__ == '__main__':
diff --git a/tools/clang-format.py b/tools/clang-format.py
index 55b3f3b..2533b15 100755
--- a/tools/clang-format.py
+++ b/tools/clang-format.py
@@ -1,5 +1,4 @@
-#!/usr/bin/python
-# -*- coding:utf-8 -*-
+#!/usr/bin/env python3
 # Copyright 2016 The Android Open Source Project
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
@@ -16,8 +15,6 @@
 
 """Wrapper to run git-clang-format and parse its output."""
 
-from __future__ import print_function
-
 import argparse
 import os
 import sys
@@ -83,13 +80,13 @@
 
     # Fail gracefully if clang-format itself aborts/fails.
     try:
-        result = rh.utils.run_command(cmd, capture_output=True)
-    except rh.utils.RunCommandError as e:
+        result = rh.utils.run(cmd, capture_output=True)
+    except rh.utils.CalledProcessError as e:
         print('clang-format failed:\n%s' % (e,), file=sys.stderr)
         print('\nPlease report this to the clang team.', file=sys.stderr)
         return 1
 
-    stdout = result.output
+    stdout = result.stdout
     if stdout.rstrip('\n') == 'no modified files to format':
         # This is always printed when only files that clang-format does not
         # understand were modified.
@@ -102,12 +99,19 @@
 
     if diff_filenames:
         if opts.fix:
-            rh.utils.run_command(['git', 'apply'], input=stdout)
+            result = rh.utils.run(['git', 'apply'], input=stdout, check=False)
+            if result.returncode:
+                print('Error: Unable to automatically fix things.\n'
+                      '  Make sure your checkout is clean first.\n'
+                      '  If you have multiple commits, you might have to '
+                      'manually rebase your tree first.',
+                      file=sys.stderr)
+                return result.returncode
         else:
             print('The following files have formatting errors:')
             for filename in diff_filenames:
                 print('\t%s' % filename)
-            print('You can run `%s --fix %s` to fix this' %
+            print('You can try to fix this by running:\n%s --fix %s' %
                   (sys.argv[0], rh.shell.cmd_to_str(argv)))
             return 1
 
diff --git a/tools/cpplint.py b/tools/cpplint.py
index c08cf6f..c5db879 100755
--- a/tools/cpplint.py
+++ b/tools/cpplint.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python
+#!/usr/bin/env python3
 # pylint: skip-file
 #
 # Copyright (c) 2009 Google Inc. All rights reserved.
@@ -45,24 +45,49 @@
 import codecs
 import copy
 import getopt
+import glob
+import itertools
 import math  # for log
 import os
 import re
 import sre_compile
 import string
 import sys
+import sysconfig
 import unicodedata
+import xml.etree.ElementTree
+
+# if empty, use defaults
+_valid_extensions = set([])
+
+__VERSION__ = '1.5.5'
+
+try:
+  xrange          # Python 2
+except NameError:
+  #  -- pylint: disable=redefined-builtin
+  xrange = range  # Python 3
 
 
 _USAGE = """
-Syntax: cpplint.py [--verbose=#] [--output=vs7] [--filter=-x,+y,...]
+Syntax: cpplint.py [--verbose=#] [--output=emacs|eclipse|vs7|junit|sed|gsed]
+                   [--filter=-x,+y,...]
                    [--counting=total|toplevel|detailed] [--root=subdir]
+                   [--repository=path]
                    [--linelength=digits] [--headers=x,y,...]
+                   [--recursive]
+                   [--exclude=path]
+                   [--extensions=hpp,cpp,...]
+                   [--includeorder=default|standardcfirst]
                    [--quiet]
+                   [--version]
         <file> [file] ...
 
+  Style checker for C/C++ source files.
+  This is a fork of the Google style checker with minor extensions.
+
   The style guidelines this tries to follow are those in
-    https://google-styleguide.googlecode.com/svn/trunk/cppguide.xml
+    https://google.github.io/styleguide/cppguide.html
 
   Every problem is given a confidence score from 1-5, with 5 meaning we are
   certain of the problem, and 1 meaning it could be a legitimate construct.
@@ -73,17 +98,27 @@
   suppresses errors of all categories on that line.
 
   The files passed in will be linted; at least one file must be provided.
-  Default linted extensions are .cc, .cpp, .cu, .cuh and .h.  Change the
-  extensions with the --extensions flag.
+  Default linted extensions are %s.
+  Other file types will be ignored.
+  Change the extensions with the --extensions flag.
 
   Flags:
 
-    output=vs7
+    output=emacs|eclipse|vs7|junit|sed|gsed
       By default, the output is formatted to ease emacs parsing.  Visual Studio
-      compatible output (vs7) may also be used.  Other formats are unsupported.
+      compatible output (vs7) may also be used.  Further support exists for
+      eclipse (eclipse), and JUnit (junit). XML parsers such as those used
+      in Jenkins and Bamboo may also be used.
+      The sed format outputs sed commands that should fix some of the errors.
+      Note that this requires gnu sed. If that is installed as gsed on your
+      system (common e.g. on macOS with homebrew) you can use the gsed output
+      format. Sed commands are written to stdout, not stderr, so you should be
+      able to pipe output straight to a shell to run the fixes.
 
     verbose=#
       Specify a number 0-5 to restrict errors to certain verbosity levels.
+      Errors with lower verbosity levels have lower confidence and are more
+      likely to be false positives.
 
     quiet
       Don't print anything if no errors are found.
@@ -93,11 +128,11 @@
       error messages whose category names pass the filters will be printed.
       (Category names are printed with the message and look like
       "[whitespace/indent]".)  Filters are evaluated left to right.
-      "-FOO" and "FOO" means "do not print categories that start with FOO".
+      "-FOO" means "do not print categories that start with FOO".
       "+FOO" means "do print categories that start with FOO".
 
       Examples: --filter=-whitespace,+whitespace/braces
-                --filter=whitespace,runtime/printf,+runtime/printf_format
+                --filter=-whitespace,-runtime/printf,+runtime/printf_format
                 --filter=-,+build/include_what_you_use
 
       To see a list of all the categories used in cpplint, pass no arg:
@@ -110,17 +145,41 @@
       also be printed. If 'detailed' is provided, then a count
       is provided for each category like 'build/class'.
 
-    root=subdir
-      The root directory used for deriving header guard CPP variable.
-      By default, the header guard CPP variable is calculated as the relative
-      path to the directory that contains .git, .hg, or .svn.  When this flag
-      is specified, the relative path is calculated from the specified
-      directory. If the specified directory does not exist, this flag is
-      ignored.
+    repository=path
+      The top level directory of the repository, used to derive the header
+      guard CPP variable. By default, this is determined by searching for a
+      path that contains .git, .hg, or .svn. When this flag is specified, the
+      given path is used instead. This option allows the header guard CPP
+      variable to remain consistent even if members of a team have different
+      repository root directories (such as when checking out a subdirectory
+      with SVN). In addition, users of non-mainstream version control systems
+      can use this flag to ensure readable header guard CPP variables.
 
       Examples:
-        Assuming that top/src/.git exists (and cwd=top/src), the header guard
-        CPP variables for top/src/chrome/browser/ui/browser.h are:
+        Assuming that Alice checks out ProjectName and Bob checks out
+        ProjectName/trunk and trunk contains src/chrome/ui/browser.h, then
+        with no --repository flag, the header guard CPP variable will be:
+
+        Alice => TRUNK_SRC_CHROME_BROWSER_UI_BROWSER_H_
+        Bob   => SRC_CHROME_BROWSER_UI_BROWSER_H_
+
+        If Alice uses the --repository=trunk flag and Bob omits the flag or
+        uses --repository=. then the header guard CPP variable will be:
+
+        Alice => SRC_CHROME_BROWSER_UI_BROWSER_H_
+        Bob   => SRC_CHROME_BROWSER_UI_BROWSER_H_
+
+    root=subdir
+      The root directory used for deriving header guard CPP variable.
+      This directory is relative to the top level directory of the repository
+      which by default is determined by searching for a directory that contains
+      .git, .hg, or .svn but can also be controlled with the --repository flag.
+      If the specified directory does not exist, this flag is ignored.
+
+      Examples:
+        Assuming that src is the top level directory of the repository (and
+        cwd=top/src), the header guard CPP variables for
+        src/chrome/browser/ui/browser.h are:
 
         No flag => CHROME_BROWSER_UI_BROWSER_H_
         --root=chrome => BROWSER_UI_BROWSER_H_
@@ -134,17 +193,45 @@
       Examples:
         --linelength=120
 
+    recursive
+      Search for files to lint recursively. Each directory given in the list
+      of files to be linted is replaced by all files that descend from that
+      directory. Files with extensions not in the valid extensions list are
+      excluded.
+
+    exclude=path
+      Exclude the given path from the list of files to be linted. Relative
+      paths are evaluated relative to the current directory and shell globbing
+      is performed. This flag can be provided multiple times to exclude
+      multiple files.
+
+      Examples:
+        --exclude=one.cc
+        --exclude=src/*.cc
+        --exclude=src/*.cc --exclude=test/*.cc
+
     extensions=extension,extension,...
       The allowed file extensions that cpplint will check
 
       Examples:
-        --extensions=hpp,cpp
+        --extensions=%s
+
+    includeorder=default|standardcfirst
+      For the build/include_order rule, the default is to blindly assume angle
+      bracket includes with file extension are c-system-headers (default),
+      even knowing this will have false classifications.
+      The default is established at google.
+      standardcfirst means to instead use an allow-list of known c headers and
+      treat all others as separate group of "other system headers". The C headers
+      included are those of the C-standard lib and closely related ones.
 
     headers=x,y,...
       The header extensions that cpplint will treat as .h in checks. Values are
       automatically added to --extensions list.
+     (by default, only files with extensions %s will be assumed to be headers)
 
       Examples:
+        --headers=%s
         --headers=hpp,hxx
         --headers=hpp
 
@@ -169,7 +256,7 @@
 
     "exclude_files" allows to specify a regular expression to be matched against
     a file name. If the expression matches, the file is skipped and not run
-    through liner.
+    through the linter.
 
     "linelength" allows to specify the allowed line length for the project.
 
@@ -184,7 +271,7 @@
 
       Example file:
         filter=-build/include_order,+build/include_alpha
-        exclude_files=.*\.cc
+        exclude_files=.*\\.cc
 
     The above example disables build/include_order warning and enables
     build/include_alpha as well as excludes all .cc from being
@@ -207,9 +294,12 @@
     'build/forward_decl',
     'build/header_guard',
     'build/include',
+    'build/include_subdir',
     'build/include_alpha',
     'build/include_order',
     'build/include_what_you_use',
+    'build/namespaces_headers',
+    'build/namespaces_literals',
     'build/namespaces',
     'build/printf_format',
     'build/storage_class',
@@ -265,6 +355,13 @@
     'whitespace/todo',
     ]
 
+# keywords to use with --outputs which generate stdout for machine processing
+_MACHINE_OUTPUTS = [
+  'junit',
+  'sed',
+  'gsed'
+]
+
 # These error categories are no longer enforced by cpplint, but for backwards-
 # compatibility they may still appear in NOLINT comments.
 _LEGACY_ERROR_CATEGORIES = [
@@ -400,6 +497,18 @@
     'utility',
     'valarray',
     'vector',
+    # 17.6.1.2 C++14 headers
+    'shared_mutex',
+    # 17.6.1.2 C++17 headers
+    'any',
+    'charconv',
+    'codecvt',
+    'execution',
+    'filesystem',
+    'memory_resource',
+    'optional',
+    'string_view',
+    'variant',
     # 17.6.1.2 C++ headers for C library facilities
     'cassert',
     'ccomplex',
@@ -429,6 +538,186 @@
     'cwctype',
     ])
 
+# C headers
+_C_HEADERS = frozenset([
+    # System C headers
+    'assert.h',
+    'complex.h',
+    'ctype.h',
+    'errno.h',
+    'fenv.h',
+    'float.h',
+    'inttypes.h',
+    'iso646.h',
+    'limits.h',
+    'locale.h',
+    'math.h',
+    'setjmp.h',
+    'signal.h',
+    'stdalign.h',
+    'stdarg.h',
+    'stdatomic.h',
+    'stdbool.h',
+    'stddef.h',
+    'stdint.h',
+    'stdio.h',
+    'stdlib.h',
+    'stdnoreturn.h',
+    'string.h',
+    'tgmath.h',
+    'threads.h',
+    'time.h',
+    'uchar.h',
+    'wchar.h',
+    'wctype.h',
+    # additional POSIX C headers
+    'aio.h',
+    'arpa/inet.h',
+    'cpio.h',
+    'dirent.h',
+    'dlfcn.h',
+    'fcntl.h',
+    'fmtmsg.h',
+    'fnmatch.h',
+    'ftw.h',
+    'glob.h',
+    'grp.h',
+    'iconv.h',
+    'langinfo.h',
+    'libgen.h',
+    'monetary.h',
+    'mqueue.h',
+    'ndbm.h',
+    'net/if.h',
+    'netdb.h',
+    'netinet/in.h',
+    'netinet/tcp.h',
+    'nl_types.h',
+    'poll.h',
+    'pthread.h',
+    'pwd.h',
+    'regex.h',
+    'sched.h',
+    'search.h',
+    'semaphore.h',
+    'setjmp.h',
+    'signal.h',
+    'spawn.h',
+    'strings.h',
+    'stropts.h',
+    'syslog.h',
+    'tar.h',
+    'termios.h',
+    'trace.h',
+    'ulimit.h',
+    'unistd.h',
+    'utime.h',
+    'utmpx.h',
+    'wordexp.h',
+    # additional GNUlib headers
+    'a.out.h',
+    'aliases.h',
+    'alloca.h',
+    'ar.h',
+    'argp.h',
+    'argz.h',
+    'byteswap.h',
+    'crypt.h',
+    'endian.h',
+    'envz.h',
+    'err.h',
+    'error.h',
+    'execinfo.h',
+    'fpu_control.h',
+    'fstab.h',
+    'fts.h',
+    'getopt.h',
+    'gshadow.h',
+    'ieee754.h',
+    'ifaddrs.h',
+    'libintl.h',
+    'mcheck.h',
+    'mntent.h',
+    'obstack.h',
+    'paths.h',
+    'printf.h',
+    'pty.h',
+    'resolv.h',
+    'shadow.h',
+    'sysexits.h',
+    'ttyent.h',
+    # Additional linux glibc headers
+    'dlfcn.h',
+    'elf.h',
+    'features.h',
+    'gconv.h',
+    'gnu-versions.h',
+    'lastlog.h',
+    'libio.h',
+    'link.h',
+    'malloc.h',
+    'memory.h',
+    'netash/ash.h',
+    'netatalk/at.h',
+    'netax25/ax25.h',
+    'neteconet/ec.h',
+    'netipx/ipx.h',
+    'netiucv/iucv.h',
+    'netpacket/packet.h',
+    'netrom/netrom.h',
+    'netrose/rose.h',
+    'nfs/nfs.h',
+    'nl_types.h',
+    'nss.h',
+    're_comp.h',
+    'regexp.h',
+    'sched.h',
+    'sgtty.h',
+    'stab.h',
+    'stdc-predef.h',
+    'stdio_ext.h',
+    'syscall.h',
+    'termio.h',
+    'thread_db.h',
+    'ucontext.h',
+    'ustat.h',
+    'utmp.h',
+    'values.h',
+    'wait.h',
+    'xlocale.h',
+    # Hardware specific headers
+    'arm_neon.h',
+    'emmintrin.h',
+    'xmmintin.h',
+    ])
+
+# Folders of C libraries so commonly used in C++,
+# that they have parity with standard C libraries.
+C_STANDARD_HEADER_FOLDERS = frozenset([
+    # standard C library
+    "sys",
+    # glibc for linux
+    "arpa",
+    "asm-generic",
+    "bits",
+    "gnu",
+    "net",
+    "netinet",
+    "protocols",
+    "rpc",
+    "rpcsvc",
+    "scsi",
+    # linux kernel header
+    "drm",
+    "linux",
+    "misc",
+    "mtd",
+    "rdma",
+    "sound",
+    "video",
+    "xen",
+  ])
+
 # Type names
 _TYPES = re.compile(
     r'^(?:'
@@ -452,7 +741,8 @@
     r'^(?:[^/]*[A-Z][^/]*\.h|lua\.h|lauxlib\.h|lualib\.h)$')
 
 # Pattern for matching FileInfo.BaseName() against test file name
-_TEST_FILE_SUFFIX = r'(_test|_unittest|_regtest)$'
+_test_suffixes = ['_test', '_regtest', '_unittest']
+_TEST_FILE_SUFFIX = '(' + '|'.join(_test_suffixes) + r')$'
 
 # Pattern that matches only complete whitespace, possibly across multiple lines.
 _EMPTY_CONDITIONAL_BODY_PATTERN = re.compile(r'^\s*$', re.DOTALL)
@@ -466,7 +756,7 @@
     ]
 
 # Replacement macros for CHECK/DCHECK/EXPECT_TRUE/EXPECT_FALSE
-_CHECK_REPLACEMENT = dict([(m, {}) for m in _CHECK_MACROS])
+_CHECK_REPLACEMENT = dict([(macro_var, {}) for macro_var in _CHECK_MACROS])
 
 for op, replacement in [('==', 'EQ'), ('!=', 'NE'),
                         ('>=', 'GE'), ('>', 'GT'),
@@ -514,9 +804,10 @@
 # _IncludeState.CheckNextIncludeOrder().
 _C_SYS_HEADER = 1
 _CPP_SYS_HEADER = 2
-_LIKELY_MY_HEADER = 3
-_POSSIBLE_MY_HEADER = 4
-_OTHER_HEADER = 5
+_OTHER_SYS_HEADER = 3
+_LIKELY_MY_HEADER = 4
+_POSSIBLE_MY_HEADER = 5
+_OTHER_HEADER = 6
 
 # These constants define the current inline assembly state
 _NO_ASM = 0       # Outside of inline assembly block
@@ -536,6 +827,22 @@
 # Match string that indicates we're working on a Linux Kernel file.
 _SEARCH_KERNEL_FILE = re.compile(r'\b(?:LINT_KERNEL_FILE)')
 
+# Commands for sed to fix the problem
+_SED_FIXUPS = {
+  'Remove spaces around =': r's/ = /=/',
+  'Remove spaces around !=': r's/ != /!=/',
+  'Remove space before ( in if (': r's/if (/if(/',
+  'Remove space before ( in for (': r's/for (/for(/',
+  'Remove space before ( in while (': r's/while (/while(/',
+  'Remove space before ( in switch (': r's/switch (/switch(/',
+  'Should have a space between // and comment': r's/\/\//\/\/ /',
+  'Missing space before {': r's/\([^ ]\){/\1 {/',
+  'Tab found, replace by spaces': r's/\t/  /g',
+  'Line ends in whitespace.  Consider deleting these extra spaces.': r's/\s*$//',
+  'You don\'t need a ; after a }': r's/};/}/',
+  'Missing space after ,': r's/,\([^ ]\)/, \1/g',
+}
+
 _regexp_compile_cache = {}
 
 # {str, set(int)}: a map from error categories to sets of linenumbers
@@ -547,17 +854,55 @@
 _root = None
 _root_debug = False
 
+# The top level repository directory. If set, _root is calculated relative to
+# this directory instead of the directory containing version control artifacts.
+# This is set by the --repository flag.
+_repository = None
+
+# Files to exclude from linting. This is set by the --exclude flag.
+_excludes = None
+
+# Whether to supress all PrintInfo messages, UNRELATED to --quiet flag
+_quiet = False
+
 # The allowed line length of files.
 # This is set by --linelength flag.
 _line_length = 80
 
-# The allowed extensions for file names
-# This is set by --extensions flag.
-_valid_extensions = set(['cc', 'h', 'cpp', 'cu', 'cuh'])
+# This allows to use different include order rule than default
+_include_order = "default"
+
+try:
+  unicode
+except NameError:
+  #  -- pylint: disable=redefined-builtin
+  basestring = unicode = str
+
+try:
+  long
+except NameError:
+  #  -- pylint: disable=redefined-builtin
+  long = int
+
+if sys.version_info < (3,):
+  #  -- pylint: disable=no-member
+  # BINARY_TYPE = str
+  itervalues = dict.itervalues
+  iteritems = dict.iteritems
+else:
+  # BINARY_TYPE = bytes
+  itervalues = dict.values
+  iteritems = dict.items
+
+def unicode_escape_decode(x):
+  if sys.version_info < (3,):
+    return codecs.unicode_escape_decode(x)[0]
+  else:
+    return x
 
 # Treat all headers starting with 'h' equally: .h, .hpp, .hxx etc.
 # This is set by --headers flag.
-_hpp_headers = set(['h'])
+_hpp_headers = set([])
 
 # {str, bool}: a map from error categories to booleans which indicate if the
 # category should be suppressed for every line.
@@ -566,14 +911,47 @@
 def ProcessHppHeadersOption(val):
   global _hpp_headers
   try:
-    _hpp_headers = set(val.split(','))
-    # Automatically append to extensions list so it does not have to be set 2 times
-    _valid_extensions.update(_hpp_headers)
+    _hpp_headers = {ext.strip() for ext in val.split(',')}
   except ValueError:
-    PrintUsage('Header extensions must be comma seperated list.')
+    PrintUsage('Header extensions must be comma separated list.')
+
+def ProcessIncludeOrderOption(val):
+  if val is None or val == "default":
+    pass
+  elif val == "standardcfirst":
+    global _include_order
+    _include_order = val
+  else:
+    PrintUsage('Invalid includeorder value %s. Expected default|standardcfirst')
 
 def IsHeaderExtension(file_extension):
-  return file_extension in _hpp_headers
+  return file_extension in GetHeaderExtensions()
+
+def GetHeaderExtensions():
+  if _hpp_headers:
+    return _hpp_headers
+  if _valid_extensions:
+    return {h for h in _valid_extensions if 'h' in h}
+  return set(['h', 'hh', 'hpp', 'hxx', 'h++', 'cuh'])
+
+# The allowed extensions for file names
+# This is set by --extensions flag
+def GetAllExtensions():
+  return GetHeaderExtensions().union(_valid_extensions or set(
+    ['c', 'cc', 'cpp', 'cxx', 'c++', 'cu']))
+
+def ProcessExtensionsOption(val):
+  global _valid_extensions
+  try:
+    extensions = [ext.strip() for ext in val.split(',')]
+    _valid_extensions = set(extensions)
+  except ValueError:
+    PrintUsage('Extensions should be a comma-separated list of values;'
+               'for example: extensions=hpp,cpp\n'
+               'This could not be parsed: "%s"' % (val,))
+
+def GetNonHeaderExtensions():
+  return GetAllExtensions().difference(GetHeaderExtensions())
 
 def ParseNolintSuppressions(filename, raw_line, linenum, error):
   """Updates the global list of line error-suppressions.
@@ -686,7 +1064,7 @@
 
 def _IsSourceExtension(s):
   """File extension (excluding dot) matches a source file extension."""
-  return s in ('c', 'cc', 'cpp', 'cxx')
+  return s in GetNonHeaderExtensions()
 
 
 class _IncludeState(object):
@@ -707,11 +1085,13 @@
   _MY_H_SECTION = 1
   _C_SECTION = 2
   _CPP_SECTION = 3
-  _OTHER_H_SECTION = 4
+  _OTHER_SYS_SECTION = 4
+  _OTHER_H_SECTION = 5
 
   _TYPE_NAMES = {
       _C_SYS_HEADER: 'C system header',
       _CPP_SYS_HEADER: 'C++ system header',
+      _OTHER_SYS_HEADER: 'other system header',
       _LIKELY_MY_HEADER: 'header this file implements',
       _POSSIBLE_MY_HEADER: 'header this file may implement',
       _OTHER_HEADER: 'other header',
@@ -721,11 +1101,14 @@
       _MY_H_SECTION: 'a header this file implements',
       _C_SECTION: 'C system header',
       _CPP_SECTION: 'C++ system header',
+      _OTHER_SYS_SECTION: 'other system header',
       _OTHER_H_SECTION: 'other header',
       }
 
   def __init__(self):
     self.include_list = [[]]
+    self._section = None
+    self._last_header = None
     self.ResetSection('')
 
   def FindHeader(self, header):
@@ -832,6 +1215,12 @@
       else:
         self._last_header = ''
         return error_message
+    elif header_type == _OTHER_SYS_HEADER:
+      if self._section <= self._OTHER_SYS_SECTION:
+        self._section = self._OTHER_SYS_SECTION
+      else:
+        self._last_header = ''
+        return error_message
     elif header_type == _LIKELY_MY_HEADER:
       if self._section <= self._MY_H_SECTION:
         self._section = self._MY_H_SECTION
@@ -870,9 +1259,18 @@
 
     # output format:
     # "emacs" - format that emacs can parse (default)
+    # "eclipse" - format that eclipse can parse
     # "vs7" - format that Microsoft Visual Studio 7 can parse
+    # "junit" - format that Jenkins, Bamboo, etc can parse
+    # "sed" - returns a gnu sed command to fix the problem
+    # "gsed" - like sed, but names the command gsed, e.g. for macOS homebrew users
     self.output_format = 'emacs'
 
+    # For JUnit output, save errors and failures until the end so that they
+    # can be written into the XML
+    self._junit_errors = []
+    self._junit_failures = []
+
   def SetOutputFormat(self, output_format):
     """Sets the output format for errors."""
     self.output_format = output_format
@@ -947,10 +1345,71 @@
 
   def PrintErrorCounts(self):
     """Print a summary of errors by category, and the total."""
-    for category, count in self.errors_by_category.iteritems():
-      sys.stderr.write('Category \'%s\' errors found: %d\n' %
+    for category, count in sorted(iteritems(self.errors_by_category)):
+      self.PrintInfo('Category \'%s\' errors found: %d\n' %
                        (category, count))
-    sys.stdout.write('Total errors found: %d\n' % self.error_count)
+    if self.error_count > 0:
+      self.PrintInfo('Total errors found: %d\n' % self.error_count)
+
+  def PrintInfo(self, message):
+    # _quiet does not represent --quiet flag.
+    # Hide infos from stdout to keep stdout pure for machine consumption
+    if not _quiet and self.output_format not in _MACHINE_OUTPUTS:
+      sys.stdout.write(message)
+
+  def PrintError(self, message):
+    if self.output_format == 'junit':
+      self._junit_errors.append(message)
+    else:
+      sys.stderr.write(message)
+
+  def AddJUnitFailure(self, filename, linenum, message, category, confidence):
+    self._junit_failures.append((filename, linenum, message, category,
+        confidence))
+
+  def FormatJUnitXML(self):
+    num_errors = len(self._junit_errors)
+    num_failures = len(self._junit_failures)
+
+    testsuite = xml.etree.ElementTree.Element('testsuite')
+    testsuite.attrib['errors'] = str(num_errors)
+    testsuite.attrib['failures'] = str(num_failures)
+    testsuite.attrib['name'] = 'cpplint'
+
+    if num_errors == 0 and num_failures == 0:
+      testsuite.attrib['tests'] = str(1)
+      xml.etree.ElementTree.SubElement(testsuite, 'testcase', name='passed')
+
+    else:
+      testsuite.attrib['tests'] = str(num_errors + num_failures)
+      if num_errors > 0:
+        testcase = xml.etree.ElementTree.SubElement(testsuite, 'testcase')
+        testcase.attrib['name'] = 'errors'
+        error = xml.etree.ElementTree.SubElement(testcase, 'error')
+        error.text = '\n'.join(self._junit_errors)
+      if num_failures > 0:
+        # Group failures by file
+        failed_file_order = []
+        failures_by_file = {}
+        for failure in self._junit_failures:
+          failed_file = failure[0]
+          if failed_file not in failed_file_order:
+            failed_file_order.append(failed_file)
+            failures_by_file[failed_file] = []
+          failures_by_file[failed_file].append(failure)
+        # Create a testcase for each file
+        for failed_file in failed_file_order:
+          failures = failures_by_file[failed_file]
+          testcase = xml.etree.ElementTree.SubElement(testsuite, 'testcase')
+          testcase.attrib['name'] = failed_file
+          failure = xml.etree.ElementTree.SubElement(testcase, 'failure')
+          template = '{0}: {1} [{2}] [{3}]'
+          texts = [template.format(f[1], f[2], f[3], f[4]) for f in failures]
+          failure.text = '\n'.join(texts)
+
+    xml_decl = '<?xml version="1.0" encoding="UTF-8" ?>\n'
+    return xml_decl + xml.etree.ElementTree.tostring(testsuite, 'utf-8').decode('utf-8')
+
 
 _cpplint_state = _CppLintState()
 
@@ -1104,12 +1563,12 @@
     return os.path.abspath(self._filename).replace('\\', '/')
 
   def RepositoryName(self):
-    """FullName after removing the local path to the repository.
+    r"""FullName after removing the local path to the repository.
 
     If we have a real absolute path name here we can try to do something smart:
     detecting the root of the checkout and truncating /path/to/checkout from
     the name so that we get header guards that don't include things like
-    "C:\Documents and Settings\..." or "/home/username/..." in them and thus
+    "C:\\Documents and Settings\\..." or "/home/username/..." in them and thus
     people on different computers who have checked the source out to different
     locations won't see bogus errors.
     """
@@ -1118,6 +1577,20 @@
     if os.path.exists(fullname):
       project_dir = os.path.dirname(fullname)
 
+      # If the user specified a repository path, it exists, and the file is
+      # contained in it, use the specified repository path
+      if _repository:
+        repo = FileInfo(_repository).FullName()
+        root_dir = project_dir
+        while os.path.exists(root_dir):
+          # allow case insensitive compare on Windows
+          if os.path.normcase(root_dir) == os.path.normcase(repo):
+            return os.path.relpath(fullname, root_dir).replace('\\', '/')
+          one_up_dir = os.path.dirname(root_dir)
+          if one_up_dir == root_dir:
+            break
+          root_dir = one_up_dir
+
       if os.path.exists(os.path.join(project_dir, ".svn")):
         # If there's a .svn file in the current directory, we recursively look
         # up the directory tree for the top of the SVN checkout
@@ -1168,7 +1641,7 @@
     return self.Split()[1]
 
   def Extension(self):
-    """File extension - text following the final period."""
+    """File extension - text following the final period, includes that period."""
     return self.Split()[2]
 
   def NoExtension(self):
@@ -1233,15 +1706,25 @@
   if _ShouldPrintError(category, confidence, linenum):
     _cpplint_state.IncrementErrorCount(category)
     if _cpplint_state.output_format == 'vs7':
-      sys.stderr.write('%s(%s): error cpplint: [%s] %s [%d]\n' % (
+      _cpplint_state.PrintError('%s(%s): error cpplint: [%s] %s [%d]\n' % (
           filename, linenum, category, message, confidence))
     elif _cpplint_state.output_format == 'eclipse':
       sys.stderr.write('%s:%s: warning: %s  [%s] [%d]\n' % (
           filename, linenum, message, category, confidence))
+    elif _cpplint_state.output_format == 'junit':
+      _cpplint_state.AddJUnitFailure(filename, linenum, message, category,
+          confidence)
+    elif _cpplint_state.output_format in ['sed', 'gsed']:
+      if message in _SED_FIXUPS:
+        sys.stdout.write(_cpplint_state.output_format + " -i '%s%s' %s # %s  [%s] [%d]\n" % (
+            linenum, _SED_FIXUPS[message], filename, message, category, confidence))
+      else:
+        sys.stderr.write('# %s:%s:  "%s"  [%s] [%d]\n' % (
+            filename, linenum, message, category, confidence))
     else:
-      sys.stderr.write('%s:%s:  %s  [%s] [%d]\n' % (
-          filename, linenum, message, category, confidence))
-
+      final_message = '%s:%s:  %s  [%s] [%d]\n' % (
+          filename, linenum, message, category, confidence)
+      sys.stderr.write(final_message)
 
 # Matches standard C++ escape sequences per 2.13.2.3 of the C++ standard.
 _RE_PATTERN_CLEANSE_LINE_ESCAPES = re.compile(
@@ -1378,7 +1861,7 @@
 
 def RemoveMultiLineCommentsFromRange(lines, begin, end):
   """Clears a range of lines for multi-line comments."""
-  # Having // dummy comments makes the lines non-empty, so we will not get
+  # Having // <empty> comments makes the lines non-empty, so we will not get
   # unnecessary blank line warnings later in the code.
   for i in range(begin, end):
     lines[i] = '/**/'
@@ -1752,7 +2235,7 @@
   """Logs an error if no Copyright message appears at the top of the file."""
 
   # We'll say it should occur by line 10. Don't forget there's a
-  # dummy line at the front.
+  # placeholder line at the front.
   for line in xrange(1, min(len(lines), 11)):
     if re.search(r'Copyright', lines[line], re.I): break
   else:                       # means no copyright line was found
@@ -1788,10 +2271,10 @@
   lst = []
   while True:
     (head, tail) = os.path.split(path)
-    if head == path: # absolute paths end
+    if head == path:  # absolute paths end
       lst.append(head)
       break
-    if tail == path: # relative paths end
+    if tail == path:  # relative paths end
       lst.append(tail)
       break
 
@@ -1826,7 +2309,7 @@
   def FixupPathFromRoot():
     if _root_debug:
       sys.stderr.write("\n_root fixup, _root = '%s', repository name = '%s'\n"
-          %(_root, fileinfo.RepositoryName()))
+          % (_root, fileinfo.RepositoryName()))
 
     # Process the file path with the --root flag if it was set.
     if not _root:
@@ -1847,28 +2330,29 @@
                                  PathSplitToList(_root))
 
     if _root_debug:
-      sys.stderr.write("_root lstrip (maybe_path=%s, file_path_from_root=%s," +
-          " _root=%s)\n" %(maybe_path, file_path_from_root, _root))
+      sys.stderr.write(("_root lstrip (maybe_path=%s, file_path_from_root=%s," +
+          " _root=%s)\n") % (maybe_path, file_path_from_root, _root))
 
     if maybe_path:
       return os.path.join(*maybe_path)
 
     #   --root=.. , will prepend the outer directory to the header guard
     full_path = fileinfo.FullName()
-    root_abspath = os.path.abspath(_root)
+    # adapt slashes for windows
+    root_abspath = os.path.abspath(_root).replace('\\', '/')
 
     maybe_path = StripListPrefix(PathSplitToList(full_path),
                                  PathSplitToList(root_abspath))
 
     if _root_debug:
-      sys.stderr.write("_root prepend (maybe_path=%s, full_path=%s, " +
-          "root_abspath=%s)\n" %(maybe_path, full_path, root_abspath))
+      sys.stderr.write(("_root prepend (maybe_path=%s, full_path=%s, " +
+          "root_abspath=%s)\n") % (maybe_path, full_path, root_abspath))
 
     if maybe_path:
       return os.path.join(*maybe_path)
 
     if _root_debug:
-      sys.stderr.write("_root ignore, returning %s\n" %(file_path_from_root))
+      sys.stderr.write("_root ignore, returning %s\n" % (file_path_from_root))
 
     #   --root=FAKE_DIR is ignored
     return file_path_from_root
@@ -1900,6 +2384,11 @@
     if Search(r'//\s*NOLINT\(build/header_guard\)', i):
       return
 
+  # Allow pragma once instead of header guards
+  for i in raw_lines:
+    if Search(r'^\s*#pragma\s+once', i):
+      return
+
   cppvar = GetHeaderGuardCPPVariable(filename)
 
   ifndef = ''
@@ -1976,28 +2465,36 @@
 
 
 def CheckHeaderFileIncluded(filename, include_state, error):
-  """Logs an error if a .cc file does not include its header."""
+  """Logs an error if a source file does not include its header."""
 
   # Do not check test files
   fileinfo = FileInfo(filename)
   if Search(_TEST_FILE_SUFFIX, fileinfo.BaseName()):
     return
 
-  headerfile = filename[0:len(filename) - len(fileinfo.Extension())] + '.h'
-  if not os.path.exists(headerfile):
-    return
-  headername = FileInfo(headerfile).RepositoryName()
-  first_include = 0
-  for section_list in include_state.include_list:
-    for f in section_list:
-      if headername in f[0] or f[0] in headername:
-        return
-      if not first_include:
-        first_include = f[1]
+  for ext in GetHeaderExtensions():
+    basefilename = filename[0:len(filename) - len(fileinfo.Extension())]
+    headerfile = basefilename + '.' + ext
+    if not os.path.exists(headerfile):
+      continue
+    headername = FileInfo(headerfile).RepositoryName()
+    first_include = None
+    include_uses_unix_dir_aliases = False
+    for section_list in include_state.include_list:
+      for f in section_list:
+        include_text = f[0]
+        if "./" in include_text:
+          include_uses_unix_dir_aliases = True
+        if headername in include_text or include_text in headername:
+          return
+        if not first_include:
+          first_include = f[1]
 
-  error(filename, first_include, 'build/include', 5,
-        '%s should include its header file %s' % (fileinfo.RepositoryName(),
-                                                  headername))
+    message = '%s should include its header file %s' % (fileinfo.RepositoryName(), headername)
+    if include_uses_unix_dir_aliases:
+      message += ". Relative paths like . and .. are not allowed."
+
+    error(filename, first_include, 'build/include', 5, message)
 
 
 def CheckForBadCharacters(filename, lines, error):
@@ -2018,7 +2515,7 @@
     error: The function to call with any errors found.
   """
   for linenum, line in enumerate(lines):
-    if u'\ufffd' in line:
+    if unicode_escape_decode('\ufffd') in line:
       error(filename, linenum, 'readability/utf8', 5,
             'Line contains invalid UTF-8 (or Unicode replacement character).')
     if '\0' in line:
@@ -2647,8 +3144,8 @@
     #   class LOCKABLE API Object {
     #   };
     class_decl_match = Match(
-        r'^(\s*(?:template\s*<[\w\s<>,:]*>\s*)?'
-        r'(class|struct)\s+(?:[A-Z_]+\s+)*(\w+(?:::\w+)*))'
+        r'^(\s*(?:template\s*<[\w\s<>,:=]*>\s*)?'
+        r'(class|struct)\s+(?:[a-zA-Z0-9_]+\s+)*(\w+(?:::\w+)*))'
         r'(.*)$', line)
     if (class_decl_match and
         (not self.stack or self.stack[-1].open_parentheses == 0)):
@@ -2896,6 +3393,7 @@
       constructor_args[i] = constructor_arg
       i += 1
 
+    variadic_args = [arg for arg in constructor_args if '&&...' in arg]
     defaulted_args = [arg for arg in constructor_args if '=' in arg]
     noarg_constructor = (not constructor_args or  # empty arg list
                          # 'void' arg specifier
@@ -2906,20 +3404,24 @@
                           # all but at most one arg defaulted
                           (len(constructor_args) >= 1 and
                            not noarg_constructor and
-                           len(defaulted_args) >= len(constructor_args) - 1))
+                           len(defaulted_args) >= len(constructor_args) - 1) or
+                          # variadic arguments with zero or one argument
+                          (len(constructor_args) <= 2 and
+                           len(variadic_args) >= 1))
     initializer_list_constructor = bool(
         onearg_constructor and
         Search(r'\bstd\s*::\s*initializer_list\b', constructor_args[0]))
     copy_constructor = bool(
         onearg_constructor and
-        Match(r'(const\s+)?%s(\s*<[^>]*>)?(\s+const)?\s*(?:<\w+>\s*)?&'
+        Match(r'((const\s+(volatile\s+)?)?|(volatile\s+(const\s+)?))?'
+              r'%s(\s*<[^>]*>)?(\s+const)?\s*(?:<\w+>\s*)?&'
               % re.escape(base_classname), constructor_args[0].strip()))
 
     if (not is_marked_explicit and
         onearg_constructor and
         not initializer_list_constructor and
         not copy_constructor):
-      if defaulted_args:
+      if defaulted_args or variadic_args:
         error(filename, linenum, 'runtime/explicit', 5,
               'Constructors callable with one argument '
               'should be marked explicit.')
@@ -2971,7 +3473,7 @@
   # Note that we assume the contents of [] to be short enough that
   # they'll never need to wrap.
   if (  # Ignore control structures.
-      not Search(r'\b(if|for|while|switch|return|new|delete|catch|sizeof)\b',
+      not Search(r'\b(if|elif|for|while|switch|return|new|delete|catch|sizeof)\b',
                  fncall) and
       # Ignore pointers/references to functions.
       not Search(r' \([^)]+\)\([^)]*(\)|,$)', fncall) and
@@ -3084,7 +3586,7 @@
       if Search(r'(;|})', start_line):  # Declarations and trivial functions
         body_found = True
         break                              # ... ignore
-      elif Search(r'{', start_line):
+      if Search(r'{', start_line):
         body_found = True
         function = Search(r'((\w|:)*)\(', line).group(1)
         if Match(r'TEST', function):    # Handle TEST... macros
@@ -3277,9 +3779,10 @@
   # get rid of comments and strings
   line = clean_lines.elided[linenum]
 
-  # You shouldn't have spaces before your brackets, except maybe after
-  # 'delete []' or 'return []() {};'
-  if Search(r'\w\s+\[', line) and not Search(r'(?:delete|return)\s+\[', line):
+  # You shouldn't have spaces before your brackets, except for C++11 attributes
+  # or maybe after 'delete []', 'return []() {};', or 'auto [abc, ...] = ...;'.
+  if (Search(r'\w\s+\[(?!\[)', line) and
+      not Search(r'(?:auto&?|delete|return)\s+\[', line)):
     error(filename, linenum, 'whitespace/braces', 5,
           'Extra space before [')
 
@@ -3649,7 +4152,6 @@
     return True
   return False
 
-
 def CheckSectionSpacing(filename, clean_lines, class_info, linenum, error):
   """Checks for additional blank line issues related to sections.
 
@@ -3798,11 +4300,11 @@
   # its line, and the line after that should have an indent level equal to or
   # lower than the if. We also check for ambiguous if/else nesting without
   # braces.
-  if_else_match = Search(r'\b(if\s*\(|else\b)', line)
+  if_else_match = Search(r'\b(if\s*(|constexpr)\s*\(|else\b)', line)
   if if_else_match and not Match(r'\s*#', line):
     if_indent = GetIndentLevel(line)
     endline, endlinenum, endpos = line, linenum, if_else_match.end()
-    if_match = Search(r'\bif\s*\(', line)
+    if_match = Search(r'\bif\s*(|constexpr)\s*\(', line)
     if if_match:
       # This could be a multiline if condition, so find the end first.
       pos = if_match.end() - 1
@@ -3861,9 +4363,9 @@
 
   # Block bodies should not be followed by a semicolon.  Due to C++11
   # brace initialization, there are more places where semicolons are
-  # required than not, so we use a whitelist approach to check these
-  # rather than a blacklist.  These are the places where "};" should
-  # be replaced by just "}":
+  # required than not, so we explicitly list the allowed rules rather
+  # than listing the disallowed ones.  These are the places where "};"
+  # should be replaced by just "}":
   # 1. Some flavor of block following closing parenthesis:
   #    for (;;) {};
   #    while (...) {};
@@ -3919,11 +4421,11 @@
     #  - INTERFACE_DEF
     #  - EXCLUSIVE_LOCKS_REQUIRED, SHARED_LOCKS_REQUIRED, LOCKS_EXCLUDED:
     #
-    # We implement a whitelist of safe macros instead of a blacklist of
+    # We implement a list of safe macros instead of a list of
     # unsafe macros, even though the latter appears less frequently in
     # google code and would have been easier to implement.  This is because
-    # the downside for getting the whitelist wrong means some extra
-    # semicolons, while the downside for getting the blacklist wrong
+    # the downside for getting the allowed checks wrong means some extra
+    # semicolons, while the downside for getting disallowed checks wrong
     # would result in compile errors.
     #
     # In addition to macros, we also don't want to warn on
@@ -4067,12 +4569,12 @@
         return
       if closing_linenum > opening_linenum:
         # Opening line after the {. Ignore comments here since we checked above.
-        body = list(opening_line[opening_pos+1:])
+        bodylist = list(opening_line[opening_pos+1:])
         # All lines until closing line, excluding closing line, with comments.
-        body.extend(clean_lines.raw_lines[opening_linenum+1:closing_linenum])
+        bodylist.extend(clean_lines.raw_lines[opening_linenum+1:closing_linenum])
         # Closing line before the }. Won't (and can't) have comments.
-        body.append(clean_lines.elided[closing_linenum][:closing_pos-1])
-        body = '\n'.join(body)
+        bodylist.append(clean_lines.elided[closing_linenum][:closing_pos-1])
+        body = '\n'.join(bodylist)
       else:
         # If statement has brackets and fits on a single line.
         body = opening_line[opening_pos+1:closing_pos-1]
@@ -4287,6 +4789,16 @@
       if unicodedata.east_asian_width(uc) in ('W', 'F'):
         width += 2
       elif not unicodedata.combining(uc):
+        # Issue 337
+        # https://mail.python.org/pipermail/python-list/2012-August/628809.html
+        if (sys.version_info.major, sys.version_info.minor) <= (3, 2):
+          # https://github.com/python/cpython/blob/2.7/Include/unicodeobject.h#L81
+          is_wide_build = sysconfig.get_config_var("Py_UNICODE_SIZE") >= 4
+          # https://github.com/python/cpython/blob/2.7/Objects/unicodeobject.c#L564
+          is_low_surrogate = 0xDC00 <= ord(uc) <= 0xDFFF
+          if not is_wide_build and is_low_surrogate:
+            width -= 1
+
         width += 1
     return width
   else:
@@ -4334,7 +4846,7 @@
   # if(match($0, " <<")) complain = 0;
   # if(match(prev, " +for \\(")) complain = 0;
   # if(prevodd && match(prevprev, " +for \\(")) complain = 0;
-  scope_or_label_pattern = r'\s*\w+\s*:\s*\\?$'
+  scope_or_label_pattern = r'\s*(?:public|private|protected|signals)(?:\s+(?:slots\s*)?)?:\s*\\?$'
   classinfo = nesting_state.InnermostClass()
   initial_spaces = 0
   cleansed_line = clean_lines.elided[linenum]
@@ -4374,16 +4886,23 @@
   #
   # The "$Id:...$" comment may also get very long without it being the
   # developers fault.
+  #
+  # Doxygen documentation copying can get pretty long when using an overloaded
+  # function declaration
   if (not line.startswith('#include') and not is_header_guard and
       not Match(r'^\s*//.*http(s?)://\S*$', line) and
       not Match(r'^\s*//\s*[^\s]*$', line) and
-      not Match(r'^// \$Id:.*#[0-9]+ \$$', line)):
+      not Match(r'^// \$Id:.*#[0-9]+ \$$', line) and
+      not Match(r'^\s*/// [@\\](copydoc|copydetails|copybrief) .*$', line)):
     line_width = GetLineWidth(line)
     if line_width > _line_length:
       error(filename, linenum, 'whitespace/line_length', 2,
             'Lines should be <= %i characters long' % _line_length)
 
   if (cleansed_line.count(';') > 1 and
+      # allow simple single line lambdas
+      not Match(r'^[^{};]*\[[^\[\]]*\][^{}]*\{[^{}\n\r]*\}',
+                line) and
       # for loops are allowed two ;'s (and may run over two lines).
       cleansed_line.find('for') == -1 and
       (GetPreviousNonBlankLine(clean_lines, linenum)[0].find('for') == -1 or
@@ -4440,21 +4959,25 @@
   Returns:
     The filename with the common suffix removed.
   """
-  for suffix in ('test.cc', 'regtest.cc', 'unittest.cc',
-                 'inl.h', 'impl.h', 'internal.h'):
+  for suffix in itertools.chain(
+      ('%s.%s' % (test_suffix.lstrip('_'), ext)
+       for test_suffix, ext in itertools.product(_test_suffixes, GetNonHeaderExtensions())),
+      ('%s.%s' % (suffix, ext)
+       for suffix, ext in itertools.product(['inl', 'imp', 'internal'], GetHeaderExtensions()))):
     if (filename.endswith(suffix) and len(filename) > len(suffix) and
         filename[-len(suffix) - 1] in ('-', '_')):
       return filename[:-len(suffix) - 1]
   return os.path.splitext(filename)[0]
 
 
-def _ClassifyInclude(fileinfo, include, is_system):
+def _ClassifyInclude(fileinfo, include, used_angle_brackets, include_order="default"):
   """Figures out what kind of header 'include' is.
 
   Args:
     fileinfo: The current file cpplint is running over. A FileInfo instance.
     include: The path to a #included file.
-    is_system: True if the #include used <> rather than "".
+    used_angle_brackets: True if the #include used <> rather than "".
+    include_order: "default" or other value allowed in program arguments
 
   Returns:
     One of the _XXX_HEADER constants.
@@ -4464,6 +4987,8 @@
     _C_SYS_HEADER
     >>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'string', True)
     _CPP_SYS_HEADER
+    >>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'foo/foo.h', True, "standardcfirst")
+    _OTHER_SYS_HEADER
     >>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'foo/foo.h', False)
     _LIKELY_MY_HEADER
     >>> _ClassifyInclude(FileInfo('foo/foo_unknown_extension.cc'),
@@ -4474,13 +4999,23 @@
   """
   # This is a list of all standard c++ header files, except
   # those already checked for above.
-  is_cpp_h = include in _CPP_HEADERS
+  is_cpp_header = include in _CPP_HEADERS
+
+  # Mark include as C header if in list or in a known folder for standard-ish C headers.
+  is_std_c_header = (include_order == "default") or (include in _C_HEADERS
+            # additional linux glibc header folders
+            or Search(r'(?:%s)\/.*\.h' % "|".join(C_STANDARD_HEADER_FOLDERS), include))
+
+  # Headers with C++ extensions shouldn't be considered C system headers
+  is_system = used_angle_brackets and not os.path.splitext(include)[1] in ['.hpp', '.hxx', '.h++']
 
   if is_system:
-    if is_cpp_h:
+    if is_cpp_header:
       return _CPP_SYS_HEADER
-    else:
+    if is_std_c_header:
       return _C_SYS_HEADER
+    else:
+      return _OTHER_SYS_HEADER
 
   # If the target file and the include we're checking share a
   # basename when we drop common extensions, and the include
@@ -4488,9 +5023,11 @@
   target_dir, target_base = (
       os.path.split(_DropCommonSuffixes(fileinfo.RepositoryName())))
   include_dir, include_base = os.path.split(_DropCommonSuffixes(include))
+  target_dir_pub = os.path.normpath(target_dir + '/../public')
+  target_dir_pub = target_dir_pub.replace('\\', '/')
   if target_base == include_base and (
       include_dir == target_dir or
-      include_dir == os.path.normpath(target_dir + '/../public')):
+      include_dir == target_dir_pub):
     return _LIKELY_MY_HEADER
 
   # If the target and include share some initial basename
@@ -4534,7 +5071,7 @@
   # naming convention but not the include convention.
   match = Match(r'#include\s*"([^/]+\.h)"', line)
   if match and not _THIRD_PARTY_HEADERS_PATTERN.match(match.group(1)):
-    error(filename, linenum, 'build/include', 4,
+    error(filename, linenum, 'build/include_subdir', 4,
           'Include the directory when naming .h files')
 
   # we shouldn't include a file more than once. actually, there are a
@@ -4543,17 +5080,34 @@
   match = _RE_PATTERN_INCLUDE.search(line)
   if match:
     include = match.group(2)
-    is_system = (match.group(1) == '<')
+    used_angle_brackets = (match.group(1) == '<')
     duplicate_line = include_state.FindHeader(include)
     if duplicate_line >= 0:
       error(filename, linenum, 'build/include', 4,
             '"%s" already included at %s:%s' %
             (include, filename, duplicate_line))
-    elif (include.endswith('.cc') and
+      return
+
+    for extension in GetNonHeaderExtensions():
+      if (include.endswith('.' + extension) and
           os.path.dirname(fileinfo.RepositoryName()) != os.path.dirname(include)):
-      error(filename, linenum, 'build/include', 4,
-            'Do not include .cc files from other packages')
-    elif not _THIRD_PARTY_HEADERS_PATTERN.match(include):
+        error(filename, linenum, 'build/include', 4,
+              'Do not include .' + extension + ' files from other packages')
+        return
+
+    # We DO want to include a 3rd party looking header if it matches the
+    # filename. Otherwise we get an erroneous error "...should include its
+    # header" error later.
+    third_src_header = False
+    for ext in GetHeaderExtensions():
+      basefilename = filename[0:len(filename) - len(fileinfo.Extension())]
+      headerfile = basefilename + '.' + ext
+      headername = FileInfo(headerfile).RepositoryName()
+      if headername in include or include in headername:
+        third_src_header = True
+        break
+
+    if third_src_header or not _THIRD_PARTY_HEADERS_PATTERN.match(include):
       include_state.include_list[-1].append((include, linenum))
 
       # We want to ensure that headers appear in the right order:
@@ -4568,7 +5122,7 @@
       # track of the highest type seen, and complains if we see a
       # lower type after that.
       error_message = include_state.CheckNextIncludeOrder(
-          _ClassifyInclude(fileinfo, include, is_system))
+          _ClassifyInclude(fileinfo, include, used_angle_brackets, _include_order))
       if error_message:
         error(filename, linenum, 'build/include_order', 4,
               '%s. Should be: %s.h, c system, c++ system, other.' %
@@ -4607,7 +5161,7 @@
 
   # Give opening punctuations to get the matching close-punctuations.
   matching_punctuation = {'(': ')', '{': '}', '[': ']'}
-  closing_punctuation = set(matching_punctuation.itervalues())
+  closing_punctuation = set(itervalues(matching_punctuation))
 
   # Find the position to start extracting text.
   match = re.search(start_pattern, text, re.M)
@@ -4701,8 +5255,6 @@
   if match:
     include_state.ResetSection(match.group(1))
 
-  # Make Windows paths like Unix.
-  fullname = os.path.abspath(filename).replace('\\', '/')
 
   # Perform other checks now that we are sure that this is not an include line
   CheckCasts(filename, clean_lines, linenum, error)
@@ -4770,9 +5322,14 @@
           % (match.group(1), match.group(2)))
 
   if Search(r'\busing namespace\b', line):
-    error(filename, linenum, 'build/namespaces', 5,
-          'Do not use namespace using-directives.  '
-          'Use using-declarations instead.')
+    if Search(r'\bliterals\b', line):
+      error(filename, linenum, 'build/namespaces_literals', 5,
+            'Do not use namespace using-directives.  '
+            'Use using-declarations instead.')
+    else:
+      error(filename, linenum, 'build/namespaces', 5,
+            'Do not use namespace using-directives.  '
+            'Use using-declarations instead.')
 
   # Detect variable-length arrays.
   match = Match(r'\s*(.+::)?(\w+) [a-z]\w*\[(.+)];', line)
@@ -4819,7 +5376,7 @@
   if (IsHeaderExtension(file_extension)
       and Search(r'\bnamespace\s*{', line)
       and line[-1] != '\\'):
-    error(filename, linenum, 'build/namespaces', 4,
+    error(filename, linenum, 'build/namespaces_headers', 4,
           'Do not use unnamed namespaces in header files.  See '
           'https://google-styleguide.googlecode.com/svn/trunk/cppguide.xml#Namespaces'
           ' for more information.')
@@ -5109,19 +5666,19 @@
   #
   # We also accept & in static_assert, which looks like a function but
   # it's actually a declaration expression.
-  whitelisted_functions = (r'(?:[sS]wap(?:<\w:+>)?|'
+  allowed_functions = (r'(?:[sS]wap(?:<\w:+>)?|'
                            r'operator\s*[<>][<>]|'
                            r'static_assert|COMPILE_ASSERT'
                            r')\s*\(')
-  if Search(whitelisted_functions, line):
+  if Search(allowed_functions, line):
     return
   elif not Search(r'\S+\([^)]*$', line):
-    # Don't see a whitelisted function on this line.  Actually we
+    # Don't see an allowed function on this line.  Actually we
     # didn't see any function name on this line, so this is likely a
     # multi-line parameter list.  Try a bit harder to catch this case.
     for i in xrange(2):
       if (linenum > i and
-          Search(whitelisted_functions, clean_lines.elided[linenum - i - 1])):
+          Search(allowed_functions, clean_lines.elided[linenum - i - 1])):
         return
 
   decls = ReplaceAll(r'{[^}]*}', ' ', line)  # exclude function body
@@ -5196,7 +5753,7 @@
 
   if not expecting_function:
     CheckCStyleCast(filename, clean_lines, linenum, 'static_cast',
-                    r'\((int|float|double|bool|char|u?int(16|32|64))\)', error)
+                    r'\((int|float|double|bool|char|u?int(16|32|64)|size_t)\)', error)
 
   # This doesn't catch all cases. Consider (const char * const)"hello".
   #
@@ -5349,11 +5906,11 @@
                      )),
     ('<limits>', ('numeric_limits',)),
     ('<list>', ('list',)),
-    ('<map>', ('map', 'multimap',)),
+    ('<map>', ('multimap',)),
     ('<memory>', ('allocator', 'make_shared', 'make_unique', 'shared_ptr',
                   'unique_ptr', 'weak_ptr')),
     ('<queue>', ('queue', 'priority_queue',)),
-    ('<set>', ('set', 'multiset',)),
+    ('<set>', ('multiset',)),
     ('<stack>', ('stack',)),
     ('<string>', ('char_traits', 'basic_string',)),
     ('<tuple>', ('tuple',)),
@@ -5382,11 +5939,21 @@
 for _header, _templates in _HEADERS_MAYBE_TEMPLATES:
   for _template in _templates:
     # Match max<type>(..., ...), max(..., ...), but not foo->max, foo.max or
-    # type::max().
+    # 'type::max()'.
     _re_pattern_headers_maybe_templates.append(
         (re.compile(r'[^>.]\b' + _template + r'(<.*?>)?\([^\)]'),
             _template,
             _header))
+# Match set<type>, but not foo->set<type>, foo.set<type>
+_re_pattern_headers_maybe_templates.append(
+    (re.compile(r'[^>.]\bset\s*\<'),
+        'set<>',
+        '<set>'))
+# Match 'map<type> var' and 'std::map<type>(...)', but not 'map<type>(...)''
+_re_pattern_headers_maybe_templates.append(
+    (re.compile(r'(std\b::\bmap\s*\<)|(^(std\b::\b)map\b\(\s*\<)'),
+        'map<>',
+        '<map>'))
 
 # Other scripts may reach in and modify this pattern.
 _re_pattern_templates = []
@@ -5419,7 +5986,7 @@
   some false positives. This should be sufficiently rare in practice.
 
   Args:
-    filename_cc: is the path for the .cc file
+    filename_cc: is the path for the source (e.g. .cc) file
     filename_h: is the path for the header path
 
   Returns:
@@ -5427,20 +5994,23 @@
     bool: True if filename_cc and filename_h belong to the same module.
     string: the additional prefix needed to open the header file.
   """
-
-  fileinfo = FileInfo(filename_cc)
-  if not fileinfo.IsSource():
+  fileinfo_cc = FileInfo(filename_cc)
+  if not fileinfo_cc.Extension().lstrip('.') in GetNonHeaderExtensions():
     return (False, '')
-  filename_cc = filename_cc[:-len(fileinfo.Extension())]
-  matched_test_suffix = Search(_TEST_FILE_SUFFIX, fileinfo.BaseName())
+
+  fileinfo_h = FileInfo(filename_h)
+  if not IsHeaderExtension(fileinfo_h.Extension().lstrip('.')):
+    return (False, '')
+
+  filename_cc = filename_cc[:-(len(fileinfo_cc.Extension()))]
+  matched_test_suffix = Search(_TEST_FILE_SUFFIX, fileinfo_cc.BaseName())
   if matched_test_suffix:
     filename_cc = filename_cc[:-len(matched_test_suffix.group(1))]
+
   filename_cc = filename_cc.replace('/public/', '/')
   filename_cc = filename_cc.replace('/internal/', '/')
 
-  if not filename_h.endswith('.h'):
-    return (False, '')
-  filename_h = filename_h[:-len('.h')]
+  filename_h = filename_h[:-(len(fileinfo_h.Extension()))]
   if filename_h.endswith('-inl'):
     filename_h = filename_h[:-len('-inl')]
   filename_h = filename_h.replace('/public/', '/')
@@ -5466,18 +6036,19 @@
   """
   headerfile = None
   try:
-    headerfile = io.open(filename, 'r', 'utf8', 'replace')
+    with io.open(filename, 'r', 'utf8', 'replace') as headerfile:
+      linenum = 0
+      for line in headerfile:
+        linenum += 1
+        clean_line = CleanseComments(line)
+        match = _RE_PATTERN_INCLUDE.search(clean_line)
+        if match:
+          include = match.group(2)
+          include_dict.setdefault(include, linenum)
+    return True
   except IOError:
     return False
-  linenum = 0
-  for line in headerfile:
-    linenum += 1
-    clean_line = CleanseComments(line)
-    match = _RE_PATTERN_INCLUDE.search(clean_line)
-    if match:
-      include = match.group(2)
-      include_dict.setdefault(include, linenum)
-  return True
+
 
 
 def CheckForIncludeWhatYouUse(filename, clean_lines, include_state, error,
@@ -5555,7 +6126,7 @@
 
   # include_dict is modified during iteration, so we iterate over a copy of
   # the keys.
-  header_keys = include_dict.keys()
+  header_keys = list(include_dict.keys())
   for header in header_keys:
     (same_module, common_path) = FilesBelongToSameModule(abs_filename, header)
     fullpath = common_path + header
@@ -5567,11 +6138,13 @@
   # didn't include it in the .h file.
   # TODO(unknown): Do a better job of finding .h files so we are confident that
   # not having the .h file means there isn't one.
-  if filename.endswith('.cc') and not header_found:
-    return
+  if not header_found:
+    for extension in GetNonHeaderExtensions():
+      if filename.endswith('.' + extension):
+        return
 
   # All the lines have been processed, report the errors found.
-  for required_header_unstripped in required:
+  for required_header_unstripped in sorted(required, key=required.__getitem__):
     template = required[required_header_unstripped][1]
     if required_header_unstripped.strip('<>"') not in include_dict:
       error(filename, required[required_header_unstripped][0],
@@ -5710,11 +6283,9 @@
     Whether or not the new block is directly in a namespace.
   """
   if is_forward_declaration:
-    if len(nesting_state.stack) >= 1 and (
-        isinstance(nesting_state.stack[-1], _NamespaceInfo)):
-      return True
-    else:
-      return False
+    return len(nesting_state.stack) >= 1 and (
+      isinstance(nesting_state.stack[-1], _NamespaceInfo))
+
 
   return (len(nesting_state.stack) > 1 and
           nesting_state.stack[-1].check_namespace_indentation and
@@ -5764,7 +6335,7 @@
 
 def ProcessLine(filename, file_extension, clean_lines, line,
                 include_state, function_state, nesting_state, error,
-                extra_check_functions=[]):
+                extra_check_functions=None):
   """Processes a single line in the file.
 
   Args:
@@ -5803,8 +6374,9 @@
   CheckMakePairUsesDeduction(filename, clean_lines, line, error)
   CheckRedundantVirtual(filename, clean_lines, line, error)
   CheckRedundantOverrideOrFinal(filename, clean_lines, line, error)
-  for check_fn in extra_check_functions:
-    check_fn(filename, clean_lines, line, error)
+  if extra_check_functions:
+    for check_fn in extra_check_functions:
+      check_fn(filename, clean_lines, line, error)
 
 def FlagCxx11Features(filename, clean_lines, linenum, error):
   """Flag those c++11 features that we only allow in certain places.
@@ -5878,7 +6450,7 @@
 
 
 def ProcessFileData(filename, file_extension, lines, error,
-                    extra_check_functions=[]):
+                    extra_check_functions=None):
   """Performs lint checks and reports any errors to the given error function.
 
   Args:
@@ -5978,7 +6550,7 @@
                 if _cpplint_state.quiet:
                   # Suppress "Ignoring file" warning when using --quiet.
                   return False
-                sys.stderr.write('Ignoring "%s": file excluded by "%s". '
+                _cpplint_state.PrintInfo('Ignoring "%s": file excluded by "%s". '
                                  'File path component "%s" matches '
                                  'pattern "%s"\n' %
                                  (filename, cfg_file, base_name, val))
@@ -5986,34 +6558,38 @@
           elif name == 'linelength':
             global _line_length
             try:
-                _line_length = int(val)
+              _line_length = int(val)
             except ValueError:
-                sys.stderr.write('Line length must be numeric.')
+              _cpplint_state.PrintError('Line length must be numeric.')
+          elif name == 'extensions':
+            ProcessExtensionsOption(val)
           elif name == 'root':
             global _root
             # root directories are specified relative to CPPLINT.cfg dir.
             _root = os.path.join(os.path.dirname(cfg_file), val)
           elif name == 'headers':
             ProcessHppHeadersOption(val)
+          elif name == 'includeorder':
+            ProcessIncludeOrderOption(val)
           else:
-            sys.stderr.write(
+            _cpplint_state.PrintError(
                 'Invalid configuration option (%s) in file %s\n' %
                 (name, cfg_file))
 
     except IOError:
-      sys.stderr.write(
+      _cpplint_state.PrintError(
           "Skipping config file '%s': Can't open for reading\n" % cfg_file)
       keep_looking = False
 
   # Apply all the accumulated filters in reverse order (top-level directory
   # config options having the least priority).
-  for filter in reversed(cfg_filters):
-     _AddFilters(filter)
+  for cfg_filter in reversed(cfg_filters):
+    _AddFilters(cfg_filter)
 
   return True
 
 
-def ProcessFile(filename, vlevel, extra_check_functions=[]):
+def ProcessFile(filename, vlevel, extra_check_functions=None):
   """Does google-lint on a single file.
 
   Args:
@@ -6051,7 +6627,8 @@
                                         codecs.getwriter('utf8'),
                                         'replace').read().split('\n')
     else:
-      lines = codecs.open(filename, 'r', 'utf8', 'replace').read().split('\n')
+      with codecs.open(filename, 'r', 'utf8', 'replace') as target_file:
+        lines = target_file.read().split('\n')
 
     # Remove trailing '\r'.
     # The -1 accounts for the extra trailing blank line we get from split()
@@ -6063,7 +6640,7 @@
         lf_lines.append(linenum + 1)
 
   except IOError:
-    sys.stderr.write(
+    _cpplint_state.PrintError(
         "Skipping input '%s': Can't open for reading\n" % filename)
     _RestoreFilters()
     return
@@ -6073,9 +6650,9 @@
 
   # When reading from stdin, the extension is unknown, so no cpplint tests
   # should rely on the extension.
-  if filename != '-' and file_extension not in _valid_extensions:
-    sys.stderr.write('Ignoring %s; not a valid file name '
-                     '(%s)\n' % (filename, ', '.join(_valid_extensions)))
+  if filename != '-' and file_extension not in GetAllExtensions():
+    _cpplint_state.PrintError('Ignoring %s; not a valid file name '
+                     '(%s)\n' % (filename, ', '.join(GetAllExtensions())))
   else:
     ProcessFileData(filename, file_extension, lines, Error,
                     extra_check_functions)
@@ -6101,7 +6678,7 @@
   # Suppress printing anything if --quiet was passed unless the error
   # count has increased after processing this file.
   if not _cpplint_state.quiet or old_errors != _cpplint_state.error_count:
-    sys.stdout.write('Done processing %s\n' % filename)
+    _cpplint_state.PrintInfo('Done processing %s\n' % filename)
   _RestoreFilters()
 
 
@@ -6111,12 +6688,21 @@
   Args:
     message: The optional error message.
   """
-  sys.stderr.write(_USAGE)
+  sys.stderr.write(_USAGE  % (sorted(list(GetAllExtensions())),
+       ','.join(sorted(list(GetAllExtensions()))),
+       sorted(GetHeaderExtensions()),
+       ','.join(sorted(GetHeaderExtensions()))))
+
   if message:
     sys.exit('\nFATAL ERROR: ' + message)
   else:
-    sys.exit(1)
+    sys.exit(0)
 
+def PrintVersion():
+  sys.stdout.write('Cpplint fork (https://github.com/cpplint/cpplint)\n')
+  sys.stdout.write('cpplint ' + __VERSION__ + '\n')
+  sys.stdout.write('Python ' + sys.version + '\n')
+  sys.exit(0)
 
 def PrintCategories():
   """Prints a list of all the error-categories used by error messages.
@@ -6140,12 +6726,18 @@
   """
   try:
     (opts, filenames) = getopt.getopt(args, '', ['help', 'output=', 'verbose=',
+                                                 'v=',
+                                                 'version',
                                                  'counting=',
                                                  'filter=',
                                                  'root=',
+                                                 'repository=',
                                                  'linelength=',
                                                  'extensions=',
+                                                 'exclude=',
+                                                 'recursive',
                                                  'headers=',
+                                                 'includeorder=',
                                                  'quiet'])
   except getopt.GetoptError:
     PrintUsage('Invalid arguments.')
@@ -6155,17 +6747,21 @@
   filters = ''
   quiet = _Quiet()
   counting_style = ''
+  recursive = False
 
   for (opt, val) in opts:
     if opt == '--help':
       PrintUsage(None)
+    if opt == '--version':
+      PrintVersion()
     elif opt == '--output':
-      if val not in ('emacs', 'vs7', 'eclipse'):
-        PrintUsage('The only allowed output formats are emacs, vs7 and eclipse.')
+      if val not in ('emacs', 'vs7', 'eclipse', 'junit', 'sed', 'gsed'):
+        PrintUsage('The only allowed output formats are emacs, vs7, eclipse '
+                   'sed, gsed and junit.')
       output_format = val
     elif opt == '--quiet':
       quiet = True
-    elif opt == '--verbose':
+    elif opt == '--verbose' or opt == '--v':
       verbosity = int(val)
     elif opt == '--filter':
       filters = val
@@ -6178,49 +6774,126 @@
     elif opt == '--root':
       global _root
       _root = val
+    elif opt == '--repository':
+      global _repository
+      _repository = val
     elif opt == '--linelength':
       global _line_length
       try:
-          _line_length = int(val)
+        _line_length = int(val)
       except ValueError:
-          PrintUsage('Line length must be digits.')
+        PrintUsage('Line length must be digits.')
+    elif opt == '--exclude':
+      global _excludes
+      if not _excludes:
+        _excludes = set()
+      _excludes.update(glob.glob(val))
     elif opt == '--extensions':
-      global _valid_extensions
-      try:
-          _valid_extensions = set(val.split(','))
-      except ValueError:
-          PrintUsage('Extensions must be comma seperated list.')
+      ProcessExtensionsOption(val)
     elif opt == '--headers':
       ProcessHppHeadersOption(val)
+    elif opt == '--recursive':
+      recursive = True
+    elif opt == '--includeorder':
+      ProcessIncludeOrderOption(val)
 
   if not filenames:
     PrintUsage('No files were specified.')
 
+  if recursive:
+    filenames = _ExpandDirectories(filenames)
+
+  if _excludes:
+    filenames = _FilterExcludedFiles(filenames)
+
   _SetOutputFormat(output_format)
   _SetQuiet(quiet)
   _SetVerboseLevel(verbosity)
   _SetFilters(filters)
   _SetCountingStyle(counting_style)
 
+  filenames.sort()
   return filenames
 
+def _ExpandDirectories(filenames):
+  """Searches a list of filenames and replaces directories in the list with
+  all files descending from those directories. Files with extensions not in
+  the valid extensions list are excluded.
+
+  Args:
+    filenames: A list of files or directories
+
+  Returns:
+    A list of all files that are members of filenames or descended from a
+    directory in filenames
+  """
+  expanded = set()
+  for filename in filenames:
+    if not os.path.isdir(filename):
+      expanded.add(filename)
+      continue
+
+    for root, _, files in os.walk(filename):
+      for loopfile in files:
+        fullname = os.path.join(root, loopfile)
+        if fullname.startswith('.' + os.path.sep):
+          fullname = fullname[len('.' + os.path.sep):]
+        expanded.add(fullname)
+
+  filtered = []
+  for filename in expanded:
+    if os.path.splitext(filename)[1][1:] in GetAllExtensions():
+      filtered.append(filename)
+  return filtered
+
+def _FilterExcludedFiles(fnames):
+  """Filters out files listed in the --exclude command line switch. File paths
+  in the switch are evaluated relative to the current working directory
+  """
+  exclude_paths = [os.path.abspath(f) for f in _excludes]
+  # because globbing does not work recursively, exclude all subpath of all excluded entries
+  return [f for f in fnames
+          if not any(e for e in exclude_paths
+                  if _IsParentOrSame(e, os.path.abspath(f)))]
+
+def _IsParentOrSame(parent, child):
+  """Return true if child is subdirectory of parent.
+  Assumes both paths are absolute and don't contain symlinks.
+  """
+  parent = os.path.normpath(parent)
+  child = os.path.normpath(child)
+  if parent == child:
+    return True
+
+  prefix = os.path.commonprefix([parent, child])
+  if prefix != parent:
+    return False
+  # Note: os.path.commonprefix operates on character basis, so
+  # take extra care of situations like '/foo/ba' and '/foo/bar/baz'
+  child_suffix = child[len(prefix):]
+  child_suffix = child_suffix.lstrip(os.sep)
+  return child == os.path.join(prefix, child_suffix)
 
 def main():
   filenames = ParseArguments(sys.argv[1:])
+  backup_err = sys.stderr
+  try:
+    # Change stderr to write with replacement characters so we don't die
+    # if we try to print something containing non-ASCII characters.
+    sys.stderr = codecs.StreamReader(sys.stderr, 'replace')
 
-  # Change stderr to write with replacement characters so we don't die
-  # if we try to print something containing non-ASCII characters.
-  sys.stderr = codecs.StreamReaderWriter(sys.stderr,
-                                         codecs.getreader('utf8'),
-                                         codecs.getwriter('utf8'),
-                                         'replace')
+    _cpplint_state.ResetErrorCounts()
+    for filename in filenames:
+      ProcessFile(filename, _cpplint_state.verbose_level)
+    # If --quiet is passed, suppress printing error count unless there are errors.
+    if not _cpplint_state.quiet or _cpplint_state.error_count > 0:
+      _cpplint_state.PrintErrorCounts()
 
-  _cpplint_state.ResetErrorCounts()
-  for filename in filenames:
-    ProcessFile(filename, _cpplint_state.verbose_level)
-  # If --quiet is passed, suppress printing error count unless there are errors.
-  if not _cpplint_state.quiet or _cpplint_state.error_count > 0:
-    _cpplint_state.PrintErrorCounts()
+    if _cpplint_state.output_format == 'junit':
+      sys.stderr.write(_cpplint_state.FormatJUnitXML())
+
+  finally:
+    sys.stderr = backup_err
 
   sys.exit(_cpplint_state.error_count > 0)
 
diff --git a/tools/cpplint.py-update b/tools/cpplint.py-update
index 4af4389..3d32330 100755
--- a/tools/cpplint.py-update
+++ b/tools/cpplint.py-update
@@ -15,7 +15,10 @@
 
 set -eu
 
-GITHUB_URL="https://github.com/google/styleguide/raw/gh-pages"
+# The outdated Google version that only supports Python 2.
+GITHUB_URL="https://github.com/google/styleguide/raw/gh-pages/cpplint"
+# The forked version with Python 3 support.
+GITHUB_URL="https://github.com/cpplint/cpplint/raw/develop"
 SCRIPT_DIR="$(dirname "$(readlink -f -- "$0")")"
 
 usage() {
@@ -46,8 +49,11 @@
 
   # Download cpplint.py from upstream.
   local cpplint_py="${SCRIPT_DIR}/cpplint.py"
-  wget "${GITHUB_URL}/cpplint/cpplint.py" -O "${cpplint_py}"
-  sed -i '2i# pylint: skip-file' "${cpplint_py}"
+  wget "${GITHUB_URL}/cpplint.py" -O "${cpplint_py}"
+  sed -i \
+    -e '1s|python$|python3|' \
+    -e '2i# pylint: skip-file' \
+    "${cpplint_py}"
   chmod +x "${cpplint_py}"
 }
 
diff --git a/tools/google-java-format.py b/tools/google-java-format.py
index a9ac4d1..6659511 100755
--- a/tools/google-java-format.py
+++ b/tools/google-java-format.py
@@ -1,5 +1,4 @@
-#!/usr/bin/python
-# -*- coding:utf-8 -*-
+#!/usr/bin/env python3
 # Copyright 2016 The Android Open Source Project
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
@@ -16,8 +15,6 @@
 
 """Wrapper to run google-java-format to check for any malformatted changes."""
 
-from __future__ import print_function
-
 import argparse
 import os
 import sys
@@ -83,7 +80,7 @@
     # https://github.com/google/google-java-format/issues/107
     diff_cmd = ['git', 'diff', '--no-ext-diff', '-U0', '%s^!' % opts.commit]
     diff_cmd.extend(['--'] + opts.files)
-    diff = rh.utils.run_command(diff_cmd, capture_output=True).output
+    diff = rh.utils.run(diff_cmd, capture_output=True).stdout
 
     cmd = [opts.google_java_format_diff, '-p1', '--aosp']
     if opts.fix:
@@ -91,10 +88,8 @@
     if not opts.sort_imports:
         cmd.extend(['--skip-sorting-imports'])
 
-    stdout = rh.utils.run_command(cmd,
-                                  input=diff,
-                                  capture_output=True,
-                                  extra_env=extra_env).output
+    stdout = rh.utils.run(cmd, input=diff, capture_output=True,
+                          extra_env=extra_env).stdout
     if stdout:
         print('One or more files in your commit have Java formatting errors.')
         print('You can run `%s --fix %s` to fix this' %
diff --git a/tools/pylint.py b/tools/pylint.py
index b890b9e..570f055 100755
--- a/tools/pylint.py
+++ b/tools/pylint.py
@@ -1,5 +1,4 @@
-#!/usr/bin/python
-# -*- coding:utf-8 -*-
+#!/usr/bin/env python3
 # Copyright 2016 The Android Open Source Project
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
@@ -16,25 +15,61 @@
 
 """Wrapper to run pylint with the right settings."""
 
-from __future__ import print_function
-
 import argparse
 import errno
 import os
+import shutil
 import sys
+import subprocess
+
+
+assert (sys.version_info.major, sys.version_info.minor) >= (3, 6), (
+    'Python 3.6 or newer is required; found %s' % (sys.version,))
 
 
 DEFAULT_PYLINTRC_PATH = os.path.join(
     os.path.dirname(os.path.realpath(__file__)), 'pylintrc')
 
 
+def is_pylint3(pylint):
+    """See whether |pylint| supports Python 3."""
+    # Make sure pylint is using Python 3.
+    result = subprocess.run([pylint, '--version'], stdout=subprocess.PIPE,
+                            check=True)
+    if b'Python 3' not in result.stdout:
+        print('%s: unable to locate a Python 3 version of pylint; Python 3 '
+              'support cannot be guaranteed' % (__file__,), file=sys.stderr)
+        return False
+
+    return True
+
+
+def find_pylint3():
+    """Figure out the name of the pylint tool for Python 3.
+
+    It keeps changing with Python 2->3 migrations.  Fun.
+    """
+    # Prefer pylint3 as that's what we want.
+    if shutil.which('pylint3'):
+        return 'pylint3'
+
+    # If there's no pylint, give up.
+    if not shutil.which('pylint'):
+        print('%s: unable to locate pylint; please install:\n'
+              'sudo apt-get install pylint' % (__file__,), file=sys.stderr)
+        sys.exit(1)
+
+    return 'pylint'
+
+
 def get_parser():
     """Return a command line parser."""
     parser = argparse.ArgumentParser(description=__doc__)
     parser.add_argument('--init-hook', help='Init hook commands to run.')
+    parser.add_argument('--py3', action='store_true',
+                        help='Force Python 3 mode')
     parser.add_argument('--executable-path',
-                        help='The path of the pylint executable.',
-                        default='pylint')
+                        help='The path of the pylint executable.')
     parser.add_argument('--no-rcfile',
                         help='Specify to use the executable\'s default '
                         'configuration.',
@@ -48,7 +83,18 @@
     parser = get_parser()
     opts, unknown = parser.parse_known_args(argv)
 
-    cmd = [opts.executable_path]
+    pylint = opts.executable_path
+    if pylint is None:
+        if opts.py3:
+            pylint = find_pylint3()
+        else:
+            pylint = 'pylint'
+
+    # Make sure pylint is using Python 3.
+    if opts.py3:
+        is_pylint3(pylint)
+
+    cmd = [pylint]
     if not opts.no_rcfile:
         # We assume pylint is running in the top directory of the project,
         # so load the pylintrc file from there if it's available.
@@ -75,8 +121,8 @@
             print('%s: Try installing pylint: sudo apt-get install %s' %
                   (__file__, os.path.basename(cmd[0])), file=sys.stderr)
             return 1
-        else:
-            raise
+
+        raise
 
 
 if __name__ == '__main__':
diff --git a/tools/pylintrc b/tools/pylintrc
index f1ea6b2..68c74ef 100644
--- a/tools/pylintrc
+++ b/tools/pylintrc
@@ -106,6 +106,7 @@
     intern-builtin,
     invalid-str-codec,
     long-builtin,
+    long-suffix,
     map-builtin-not-iterating,
     metaclass-assignment,
     next-method-called,
@@ -113,6 +114,8 @@
     nonzero-method,
     oct-method,
     old-division,
+    old-ne-operator,
+    old-octal-literal,
     old-raise-syntax,
     parameter-unpacking,
     print-statement,
@@ -144,6 +147,7 @@
 # no Warning level messages displayed, use"--disable=all --enable=classes
 # --disable=W"
 # We leave many of the style warnings to judgement/peer review.
+# useless-object-inheritance: We disable this for Python 2 compatibility.
 disable=
     fixme,
     file-ignored,
@@ -162,6 +166,7 @@
     too-many-public-methods,
     too-many-return-statements,
     too-many-statements,
+    useless-object-inheritance,
 
 
 [REPORTS]
@@ -179,6 +184,9 @@
 # Tells whether to display a full report or only the messages
 reports=no
 
+# Activate the evaluation score.
+score=no
+
 # Python expression which should return a note less than 10 (10 is the highest
 # note). You have access to the variables errors warning, statement which
 # respectively contain the number of errors / warnings messages and the total