[PATCH 1/3] Add patman patch automation script

From: Simon Glass
Date: Sun May 03 2015 - 14:31:57 EST


This tool is a Python script which:
- Creates patch directly from your branch
- Cleans them up by removing unwanted tags
- Inserts a cover letter with change lists
- Runs the patches through checkpatch.pl and its own checks
- Optionally emails them out to selected people

Add the main part of the code, excluding tests and documentation.

Signed-off-by: Simon Glass <sjg@xxxxxxxxxxxx>
---

tools/patman/.gitignore | 1 +
tools/patman/checkpatch.py | 173 ++++++++++++
tools/patman/command.py | 123 +++++++++
tools/patman/commit.py | 88 ++++++
tools/patman/cros_subprocess.py | 397 +++++++++++++++++++++++++++
tools/patman/get_maintainer.py | 47 ++++
tools/patman/gitutil.py | 582 ++++++++++++++++++++++++++++++++++++++++
tools/patman/patchstream.py | 488 +++++++++++++++++++++++++++++++++
tools/patman/patman | 1 +
tools/patman/patman.py | 167 ++++++++++++
tools/patman/project.py | 27 ++
tools/patman/series.py | 271 +++++++++++++++++++
tools/patman/settings.py | 295 ++++++++++++++++++++
tools/patman/terminal.py | 158 +++++++++++
14 files changed, 2818 insertions(+)
create mode 100644 tools/patman/.gitignore
create mode 100644 tools/patman/checkpatch.py
create mode 100644 tools/patman/command.py
create mode 100644 tools/patman/commit.py
create mode 100644 tools/patman/cros_subprocess.py
create mode 100644 tools/patman/get_maintainer.py
create mode 100644 tools/patman/gitutil.py
create mode 100644 tools/patman/patchstream.py
create mode 120000 tools/patman/patman
create mode 100755 tools/patman/patman.py
create mode 100644 tools/patman/project.py
create mode 100644 tools/patman/series.py
create mode 100644 tools/patman/settings.py
create mode 100644 tools/patman/terminal.py

diff --git a/tools/patman/.gitignore b/tools/patman/.gitignore
new file mode 100644
index 0000000..0d20b64
--- /dev/null
+++ b/tools/patman/.gitignore
@@ -0,0 +1 @@
+*.pyc
diff --git a/tools/patman/checkpatch.py b/tools/patman/checkpatch.py
new file mode 100644
index 0000000..34a3bd2
--- /dev/null
+++ b/tools/patman/checkpatch.py
@@ -0,0 +1,173 @@
+# Copyright (c) 2011 The Chromium OS Authors.
+#
+# SPDX-License-Identifier: GPL-2.0+
+#
+
+import collections
+import command
+import gitutil
+import os
+import re
+import sys
+import terminal
+
+def FindCheckPatch():
+ top_level = gitutil.GetTopLevel()
+ try_list = [
+ os.getcwd(),
+ os.path.join(os.getcwd(), '..', '..'),
+ os.path.join(top_level, 'tools'),
+ os.path.join(top_level, 'scripts'),
+ '%s/bin' % os.getenv('HOME'),
+ ]
+ # Look in current dir
+ for path in try_list:
+ fname = os.path.join(path, 'checkpatch.pl')
+ if os.path.isfile(fname):
+ return fname
+
+ # Look upwwards for a Chrome OS tree
+ while not os.path.ismount(path):
+ fname = os.path.join(path, 'src', 'third_party', 'kernel', 'files',
+ 'scripts', 'checkpatch.pl')
+ if os.path.isfile(fname):
+ return fname
+ path = os.path.dirname(path)
+
+ sys.exit('Cannot find checkpatch.pl - please put it in your ' +
+ '~/bin directory or use --no-check')
+
+def CheckPatch(fname, verbose=False):
+ """Run checkpatch.pl on a file.
+
+ Returns:
+ namedtuple containing:
+ ok: False=failure, True=ok
+ problems: List of problems, each a dict:
+ 'type'; error or warning
+ 'msg': text message
+ 'file' : filename
+ 'line': line number
+ errors: Number of errors
+ warnings: Number of warnings
+ checks: Number of checks
+ lines: Number of lines
+ stdout: Full output of checkpatch
+ """
+ fields = ['ok', 'problems', 'errors', 'warnings', 'checks', 'lines',
+ 'stdout']
+ result = collections.namedtuple('CheckPatchResult', fields)
+ result.ok = False
+ result.errors, result.warning, result.checks = 0, 0, 0
+ result.lines = 0
+ result.problems = []
+ chk = FindCheckPatch()
+ item = {}
+ result.stdout = command.Output(chk, '--no-tree', fname)
+ #pipe = subprocess.Popen(cmd, stdout=subprocess.PIPE)
+ #stdout, stderr = pipe.communicate()
+
+ # total: 0 errors, 0 warnings, 159 lines checked
+ # or:
+ # total: 0 errors, 2 warnings, 7 checks, 473 lines checked
+ re_stats = re.compile('total: (\\d+) errors, (\d+) warnings, (\d+)')
+ re_stats_full = re.compile('total: (\\d+) errors, (\d+) warnings, (\d+)'
+ ' checks, (\d+)')
+ re_ok = re.compile('.*has no obvious style problems')
+ re_bad = re.compile('.*has style problems, please review')
+ re_error = re.compile('ERROR: (.*)')
+ re_warning = re.compile('WARNING: (.*)')
+ re_check = re.compile('CHECK: (.*)')
+ re_file = re.compile('#\d+: FILE: ([^:]*):(\d+):')
+
+ for line in result.stdout.splitlines():
+ if verbose:
+ print line
+
+ # A blank line indicates the end of a message
+ if not line and item:
+ result.problems.append(item)
+ item = {}
+ match = re_stats_full.match(line)
+ if not match:
+ match = re_stats.match(line)
+ if match:
+ result.errors = int(match.group(1))
+ result.warnings = int(match.group(2))
+ if len(match.groups()) == 4:
+ result.checks = int(match.group(3))
+ result.lines = int(match.group(4))
+ else:
+ result.lines = int(match.group(3))
+ elif re_ok.match(line):
+ result.ok = True
+ elif re_bad.match(line):
+ result.ok = False
+ err_match = re_error.match(line)
+ warn_match = re_warning.match(line)
+ file_match = re_file.match(line)
+ check_match = re_check.match(line)
+ if err_match:
+ item['msg'] = err_match.group(1)
+ item['type'] = 'error'
+ elif warn_match:
+ item['msg'] = warn_match.group(1)
+ item['type'] = 'warning'
+ elif check_match:
+ item['msg'] = check_match.group(1)
+ item['type'] = 'check'
+ elif file_match:
+ item['file'] = file_match.group(1)
+ item['line'] = int(file_match.group(2))
+
+ return result
+
+def GetWarningMsg(col, msg_type, fname, line, msg):
+ '''Create a message for a given file/line
+
+ Args:
+ msg_type: Message type ('error' or 'warning')
+ fname: Filename which reports the problem
+ line: Line number where it was noticed
+ msg: Message to report
+ '''
+ if msg_type == 'warning':
+ msg_type = col.Color(col.YELLOW, msg_type)
+ elif msg_type == 'error':
+ msg_type = col.Color(col.RED, msg_type)
+ elif msg_type == 'check':
+ msg_type = col.Color(col.MAGENTA, msg_type)
+ return '%s: %s,%d: %s' % (msg_type, fname, line, msg)
+
+def CheckPatches(verbose, args):
+ '''Run the checkpatch.pl script on each patch'''
+ error_count, warning_count, check_count = 0, 0, 0
+ col = terminal.Color()
+
+ for fname in args:
+ result = CheckPatch(fname, verbose)
+ if not result.ok:
+ error_count += result.errors
+ warning_count += result.warnings
+ check_count += result.checks
+ print '%d errors, %d warnings, %d checks for %s:' % (result.errors,
+ result.warnings, result.checks, col.Color(col.BLUE, fname))
+ if (len(result.problems) != result.errors + result.warnings +
+ result.checks):
+ print "Internal error: some problems lost"
+ for item in result.problems:
+ print GetWarningMsg(col, item.get('type', '<unknown>'),
+ item.get('file', '<unknown>'),
+ item.get('line', 0), item.get('msg', 'message'))
+ print
+ #print stdout
+ if error_count or warning_count or check_count:
+ str = 'checkpatch.pl found %d error(s), %d warning(s), %d checks(s)'
+ color = col.GREEN
+ if warning_count:
+ color = col.YELLOW
+ if error_count:
+ color = col.RED
+ print col.Color(color, str % (error_count, warning_count, check_count))
+ return False
+ return True
diff --git a/tools/patman/command.py b/tools/patman/command.py
new file mode 100644
index 0000000..d586f11
--- /dev/null
+++ b/tools/patman/command.py
@@ -0,0 +1,123 @@
+# Copyright (c) 2011 The Chromium OS Authors.
+#
+# SPDX-License-Identifier: GPL-2.0+
+#
+
+import os
+import cros_subprocess
+
+"""Shell command ease-ups for Python."""
+
+class CommandResult:
+ """A class which captures the result of executing a command.
+
+ Members:
+ stdout: stdout obtained from command, as a string
+ stderr: stderr obtained from command, as a string
+ return_code: Return code from command
+ exception: Exception received, or None if all ok
+ """
+ def __init__(self):
+ self.stdout = None
+ self.stderr = None
+ self.combined = None
+ self.return_code = None
+ self.exception = None
+
+ def __init__(self, stdout='', stderr='', combined='', return_code=0,
+ exception=None):
+ self.stdout = stdout
+ self.stderr = stderr
+ self.combined = combined
+ self.return_code = return_code
+ self.exception = exception
+
+
+# This permits interception of RunPipe for test purposes. If it is set to
+# a function, then that function is called with the pipe list being
+# executed. Otherwise, it is assumed to be a CommandResult object, and is
+# returned as the result for every RunPipe() call.
+# When this value is None, commands are executed as normal.
+test_result = None
+
+def RunPipe(pipe_list, infile=None, outfile=None,
+ capture=False, capture_stderr=False, oneline=False,
+ raise_on_error=True, cwd=None, **kwargs):
+ """
+ Perform a command pipeline, with optional input/output filenames.
+
+ Args:
+ pipe_list: List of command lines to execute. Each command line is
+ piped into the next, and is itself a list of strings. For
+ example [ ['ls', '.git'] ['wc'] ] will pipe the output of
+ 'ls .git' into 'wc'.
+ infile: File to provide stdin to the pipeline
+ outfile: File to store stdout
+ capture: True to capture output
+ capture_stderr: True to capture stderr
+ oneline: True to strip newline chars from output
+ kwargs: Additional keyword arguments to cros_subprocess.Popen()
+ Returns:
+ CommandResult object
+ """
+ if test_result:
+ if hasattr(test_result, '__call__'):
+ return test_result(pipe_list=pipe_list)
+ return test_result
+ result = CommandResult()
+ last_pipe = None
+ pipeline = list(pipe_list)
+ user_pipestr = '|'.join([' '.join(pipe) for pipe in pipe_list])
+ kwargs['stdout'] = None
+ kwargs['stderr'] = None
+ while pipeline:
+ cmd = pipeline.pop(0)
+ if last_pipe is not None:
+ kwargs['stdin'] = last_pipe.stdout
+ elif infile:
+ kwargs['stdin'] = open(infile, 'rb')
+ if pipeline or capture:
+ kwargs['stdout'] = cros_subprocess.PIPE
+ elif outfile:
+ kwargs['stdout'] = open(outfile, 'wb')
+ if capture_stderr:
+ kwargs['stderr'] = cros_subprocess.PIPE
+
+ try:
+ last_pipe = cros_subprocess.Popen(cmd, cwd=cwd, **kwargs)
+ except Exception, err:
+ result.exception = err
+ if raise_on_error:
+ raise Exception("Error running '%s': %s" % (user_pipestr, str))
+ result.return_code = 255
+ return result
+
+ if capture:
+ result.stdout, result.stderr, result.combined = (
+ last_pipe.CommunicateFilter(None))
+ if result.stdout and oneline:
+ result.output = result.stdout.rstrip('\r\n')
+ result.return_code = last_pipe.wait()
+ else:
+ result.return_code = os.waitpid(last_pipe.pid, 0)[1]
+ if raise_on_error and result.return_code:
+ raise Exception("Error running '%s'" % user_pipestr)
+ return result
+
+def Output(*cmd):
+ return RunPipe([cmd], capture=True, raise_on_error=False).stdout
+
+def OutputOneLine(*cmd, **kwargs):
+ raise_on_error = kwargs.pop('raise_on_error', True)
+ return (RunPipe([cmd], capture=True, oneline=True,
+ raise_on_error=raise_on_error,
+ **kwargs).stdout.strip())
+
+def Run(*cmd, **kwargs):
+ return RunPipe([cmd], **kwargs).stdout
+
+def RunList(cmd):
+ return RunPipe([cmd], capture=True).stdout
+
+def StopAll():
+ cros_subprocess.stay_alive = False
diff --git a/tools/patman/commit.py b/tools/patman/commit.py
new file mode 100644
index 0000000..3e0adb8
--- /dev/null
+++ b/tools/patman/commit.py
@@ -0,0 +1,88 @@
+# Copyright (c) 2011 The Chromium OS Authors.
+#
+# SPDX-License-Identifier: GPL-2.0+
+#
+
+import re
+
+# Separates a tag: at the beginning of the subject from the rest of it
+re_subject_tag = re.compile('([^:\s]*):\s*(.*)')
+
+class Commit:
+ """Holds information about a single commit/patch in the series.
+
+ Args:
+ hash: Commit hash (as a string)
+
+ Variables:
+ hash: Commit hash
+ subject: Subject line
+ tags: List of maintainer tag strings
+ changes: Dict containing a list of changes (single line strings).
+ The dict is indexed by change version (an integer)
+ cc_list: List of people to aliases/emails to cc on this commit
+ notes: List of lines in the commit (not series) notes
+ """
+ def __init__(self, hash):
+ self.hash = hash
+ self.subject = None
+ self.tags = []
+ self.changes = {}
+ self.cc_list = []
+ self.signoff_set = set()
+ self.notes = []
+
+ def AddChange(self, version, info):
+ """Add a new change line to the change list for a version.
+
+ Args:
+ version: Patch set version (integer: 1, 2, 3)
+ info: Description of change in this version
+ """
+ if not self.changes.get(version):
+ self.changes[version] = []
+ self.changes[version].append(info)
+
+ def CheckTags(self):
+ """Create a list of subject tags in the commit
+
+ Subject tags look like this:
+
+ propounder: fort: Change the widget to propound correctly
+
+ Here the tags are propounder and fort. Multiple tags are supported.
+ The list is updated in self.tag.
+
+ Returns:
+ None if ok, else the name of a tag with no email alias
+ """
+ str = self.subject
+ m = True
+ while m:
+ m = re_subject_tag.match(str)
+ if m:
+ tag = m.group(1)
+ self.tags.append(tag)
+ str = m.group(2)
+ return None
+
+ def AddCc(self, cc_list):
+ """Add a list of people to Cc when we send this patch.
+
+ Args:
+ cc_list: List of aliases or email addresses
+ """
+ self.cc_list += cc_list
+
+ def CheckDuplicateSignoff(self, signoff):
+ """Check a list of signoffs we have send for this patch
+
+ Args:
+ signoff: Signoff line
+ Returns:
+ True if this signoff is new, False if we have already seen it.
+ """
+ if signoff in self.signoff_set:
+ return False
+ self.signoff_set.add(signoff)
+ return True
diff --git a/tools/patman/cros_subprocess.py b/tools/patman/cros_subprocess.py
new file mode 100644
index 0000000..0fc4a06
--- /dev/null
+++ b/tools/patman/cros_subprocess.py
@@ -0,0 +1,397 @@
+# Copyright (c) 2012 The Chromium OS Authors.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+#
+# Copyright (c) 2003-2005 by Peter Astrand <astrand@xxxxxxxxxxxxxx>
+# Licensed to PSF under a Contributor Agreement.
+# See http://www.python.org/2.4/license for licensing details.
+
+"""Subprocress execution
+
+This module holds a subclass of subprocess.Popen with our own required
+features, mainly that we get access to the subprocess output while it
+is running rather than just at the end. This makes it easiler to show
+progress information and filter output in real time.
+"""
+
+import errno
+import os
+import pty
+import select
+import subprocess
+import sys
+import unittest
+
+
+# Import these here so the caller does not need to import subprocess also.
+PIPE = subprocess.PIPE
+STDOUT = subprocess.STDOUT
+PIPE_PTY = -3 # Pipe output through a pty
+stay_alive = True
+
+
+class Popen(subprocess.Popen):
+ """Like subprocess.Popen with ptys and incremental output
+
+ This class deals with running a child process and filtering its output on
+ both stdout and stderr while it is running. We do this so we can monitor
+ progress, and possibly relay the output to the user if requested.
+
+ The class is similar to subprocess.Popen, the equivalent is something like:
+
+ Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+
+ But this class has many fewer features, and two enhancement:
+
+ 1. Rather than getting the output data only at the end, this class sends it
+ to a provided operation as it arrives.
+ 2. We use pseudo terminals so that the child will hopefully flush its output
+ to us as soon as it is produced, rather than waiting for the end of a
+ line.
+
+ Use CommunicateFilter() to handle output from the subprocess.
+
+ """
+
+ def __init__(self, args, stdin=None, stdout=PIPE_PTY, stderr=PIPE_PTY,
+ shell=False, cwd=None, env=None, **kwargs):
+ """Cut-down constructor
+
+ Args:
+ args: Program and arguments for subprocess to execute.
+ stdin: See subprocess.Popen()
+ stdout: See subprocess.Popen(), except that we support the sentinel
+ value of cros_subprocess.PIPE_PTY.
+ stderr: See subprocess.Popen(), except that we support the sentinel
+ value of cros_subprocess.PIPE_PTY.
+ shell: See subprocess.Popen()
+ cwd: Working directory to change to for subprocess, or None if none.
+ env: Environment to use for this subprocess, or None to inherit parent.
+ kwargs: No other arguments are supported at the moment. Passing other
+ arguments will cause a ValueError to be raised.
+ """
+ stdout_pty = None
+ stderr_pty = None
+
+ if stdout == PIPE_PTY:
+ stdout_pty = pty.openpty()
+ stdout = os.fdopen(stdout_pty[1])
+ if stderr == PIPE_PTY:
+ stderr_pty = pty.openpty()
+ stderr = os.fdopen(stderr_pty[1])
+
+ super(Popen, self).__init__(args, stdin=stdin,
+ stdout=stdout, stderr=stderr, shell=shell, cwd=cwd, env=env,
+ **kwargs)
+
+ # If we're on a PTY, we passed the slave half of the PTY to the subprocess.
+ # We want to use the master half on our end from now on. Setting this here
+ # does make some assumptions about the implementation of subprocess, but
+ # those assumptions are pretty minor.
+
+ # Note that if stderr is STDOUT, then self.stderr will be set to None by
+ # this constructor.
+ if stdout_pty is not None:
+ self.stdout = os.fdopen(stdout_pty[0])
+ if stderr_pty is not None:
+ self.stderr = os.fdopen(stderr_pty[0])
+
+ # Insist that unit tests exist for other arguments we don't support.
+ if kwargs:
+ raise ValueError("Unit tests do not test extra args - please add tests")
+
+ def CommunicateFilter(self, output):
+ """Interact with process: Read data from stdout and stderr.
+
+ This method runs until end-of-file is reached, then waits for the
+ subprocess to terminate.
+
+ The output function is sent all output from the subprocess and must be
+ defined like this:
+
+ def Output([self,] stream, data)
+ Args:
+ stream: the stream the output was received on, which will be
+ sys.stdout or sys.stderr.
+ data: a string containing the data
+
+ Note: The data read is buffered in memory, so do not use this
+ method if the data size is large or unlimited.
+
+ Args:
+ output: Function to call with each fragment of output.
+
+ Returns:
+ A tuple (stdout, stderr, combined) which is the data received on
+ stdout, stderr and the combined data (interleaved stdout and stderr).
+
+ Note that the interleaved output will only be sensible if you have
+ set both stdout and stderr to PIPE or PIPE_PTY. Even then it depends on
+ the timing of the output in the subprocess. If a subprocess flips
+ between stdout and stderr quickly in succession, by the time we come to
+ read the output from each we may see several lines in each, and will read
+ all the stdout lines, then all the stderr lines. So the interleaving
+ may not be correct. In this case you might want to pass
+ stderr=cros_subprocess.STDOUT to the constructor.
+
+ This feature is still useful for subprocesses where stderr is
+ rarely used and indicates an error.
+
+ Note also that if you set stderr to STDOUT, then stderr will be empty
+ and the combined output will just be the same as stdout.
+ """
+
+ read_set = []
+ write_set = []
+ stdout = None # Return
+ stderr = None # Return
+
+ if self.stdin:
+ # Flush stdio buffer. This might block, if the user has
+ # been writing to .stdin in an uncontrolled fashion.
+ self.stdin.flush()
+ if input:
+ write_set.append(self.stdin)
+ else:
+ self.stdin.close()
+ if self.stdout:
+ read_set.append(self.stdout)
+ stdout = []
+ if self.stderr and self.stderr != self.stdout:
+ read_set.append(self.stderr)
+ stderr = []
+ combined = []
+
+ input_offset = 0
+ while read_set or write_set:
+ try:
+ rlist, wlist, _ = select.select(read_set, write_set, [], 0.2)
+ except select.error, e:
+ if e.args[0] == errno.EINTR:
+ continue
+ raise
+
+ if not stay_alive:
+ self.terminate()
+
+ if self.stdin in wlist:
+ # When select has indicated that the file is writable,
+ # we can write up to PIPE_BUF bytes without risk
+ # blocking. POSIX defines PIPE_BUF >= 512
+ chunk = input[input_offset : input_offset + 512]
+ bytes_written = os.write(self.stdin.fileno(), chunk)
+ input_offset += bytes_written
+ if input_offset >= len(input):
+ self.stdin.close()
+ write_set.remove(self.stdin)
+
+ if self.stdout in rlist:
+ data = ""
+ # We will get an error on read if the pty is closed
+ try:
+ data = os.read(self.stdout.fileno(), 1024)
+ except OSError:
+ pass
+ if data == "":
+ self.stdout.close()
+ read_set.remove(self.stdout)
+ else:
+ stdout.append(data)
+ combined.append(data)
+ if output:
+ output(sys.stdout, data)
+ if self.stderr in rlist:
+ data = ""
+ # We will get an error on read if the pty is closed
+ try:
+ data = os.read(self.stderr.fileno(), 1024)
+ except OSError:
+ pass
+ if data == "":
+ self.stderr.close()
+ read_set.remove(self.stderr)
+ else:
+ stderr.append(data)
+ combined.append(data)
+ if output:
+ output(sys.stderr, data)
+
+ # All data exchanged. Translate lists into strings.
+ if stdout is not None:
+ stdout = ''.join(stdout)
+ else:
+ stdout = ''
+ if stderr is not None:
+ stderr = ''.join(stderr)
+ else:
+ stderr = ''
+ combined = ''.join(combined)
+
+ # Translate newlines, if requested. We cannot let the file
+ # object do the translation: It is based on stdio, which is
+ # impossible to combine with select (unless forcing no
+ # buffering).
+ if self.universal_newlines and hasattr(file, 'newlines'):
+ if stdout:
+ stdout = self._translate_newlines(stdout)
+ if stderr:
+ stderr = self._translate_newlines(stderr)
+
+ self.wait()
+ return (stdout, stderr, combined)
+
+
+# Just being a unittest.TestCase gives us 14 public methods. Unless we
+# disable this, we can only have 6 tests in a TestCase. That's not enough.
+#
+# pylint: disable=R0904
+
+class TestSubprocess(unittest.TestCase):
+ """Our simple unit test for this module"""
+
+ class MyOperation:
+ """Provides a operation that we can pass to Popen"""
+ def __init__(self, input_to_send=None):
+ """Constructor to set up the operation and possible input.
+
+ Args:
+ input_to_send: a text string to send when we first get input. We will
+ add \r\n to the string.
+ """
+ self.stdout_data = ''
+ self.stderr_data = ''
+ self.combined_data = ''
+ self.stdin_pipe = None
+ self._input_to_send = input_to_send
+ if input_to_send:
+ pipe = os.pipe()
+ self.stdin_read_pipe = pipe[0]
+ self._stdin_write_pipe = os.fdopen(pipe[1], 'w')
+
+ def Output(self, stream, data):
+ """Output handler for Popen. Stores the data for later comparison"""
+ if stream == sys.stdout:
+ self.stdout_data += data
+ if stream == sys.stderr:
+ self.stderr_data += data
+ self.combined_data += data
+
+ # Output the input string if we have one.
+ if self._input_to_send:
+ self._stdin_write_pipe.write(self._input_to_send + '\r\n')
+ self._stdin_write_pipe.flush()
+
+ def _BasicCheck(self, plist, oper):
+ """Basic checks that the output looks sane."""
+ self.assertEqual(plist[0], oper.stdout_data)
+ self.assertEqual(plist[1], oper.stderr_data)
+ self.assertEqual(plist[2], oper.combined_data)
+
+ # The total length of stdout and stderr should equal the combined length
+ self.assertEqual(len(plist[0]) + len(plist[1]), len(plist[2]))
+
+ def test_simple(self):
+ """Simple redirection: Get process list"""
+ oper = TestSubprocess.MyOperation()
+ plist = Popen(['ps']).CommunicateFilter(oper.Output)
+ self._BasicCheck(plist, oper)
+
+ def test_stderr(self):
+ """Check stdout and stderr"""
+ oper = TestSubprocess.MyOperation()
+ cmd = 'echo fred >/dev/stderr && false || echo bad'
+ plist = Popen([cmd], shell=True).CommunicateFilter(oper.Output)
+ self._BasicCheck(plist, oper)
+ self.assertEqual(plist [0], 'bad\r\n')
+ self.assertEqual(plist [1], 'fred\r\n')
+
+ def test_shell(self):
+ """Check with and without shell works"""
+ oper = TestSubprocess.MyOperation()
+ cmd = 'echo test >/dev/stderr'
+ self.assertRaises(OSError, Popen, [cmd], shell=False)
+ plist = Popen([cmd], shell=True).CommunicateFilter(oper.Output)
+ self._BasicCheck(plist, oper)
+ self.assertEqual(len(plist [0]), 0)
+ self.assertEqual(plist [1], 'test\r\n')
+
+ def test_list_args(self):
+ """Check with and without shell works using list arguments"""
+ oper = TestSubprocess.MyOperation()
+ cmd = ['echo', 'test', '>/dev/stderr']
+ plist = Popen(cmd, shell=False).CommunicateFilter(oper.Output)
+ self._BasicCheck(plist, oper)
+ self.assertEqual(plist [0], ' '.join(cmd[1:]) + '\r\n')
+ self.assertEqual(len(plist [1]), 0)
+
+ oper = TestSubprocess.MyOperation()
+
+ # this should be interpreted as 'echo' with the other args dropped
+ cmd = ['echo', 'test', '>/dev/stderr']
+ plist = Popen(cmd, shell=True).CommunicateFilter(oper.Output)
+ self._BasicCheck(plist, oper)
+ self.assertEqual(plist [0], '\r\n')
+
+ def test_cwd(self):
+ """Check we can change directory"""
+ for shell in (False, True):
+ oper = TestSubprocess.MyOperation()
+ plist = Popen('pwd', shell=shell, cwd='/tmp').CommunicateFilter(oper.Output)
+ self._BasicCheck(plist, oper)
+ self.assertEqual(plist [0], '/tmp\r\n')
+
+ def test_env(self):
+ """Check we can change environment"""
+ for add in (False, True):
+ oper = TestSubprocess.MyOperation()
+ env = os.environ
+ if add:
+ env ['FRED'] = 'fred'
+ cmd = 'echo $FRED'
+ plist = Popen(cmd, shell=True, env=env).CommunicateFilter(oper.Output)
+ self._BasicCheck(plist, oper)
+ self.assertEqual(plist [0], add and 'fred\r\n' or '\r\n')
+
+ def test_extra_args(self):
+ """Check we can't add extra arguments"""
+ self.assertRaises(ValueError, Popen, 'true', close_fds=False)
+
+ def test_basic_input(self):
+ """Check that incremental input works
+
+ We set up a subprocess which will prompt for name. When we see this prompt
+ we send the name as input to the process. It should then print the name
+ properly to stdout.
+ """
+ oper = TestSubprocess.MyOperation('Flash')
+ prompt = 'What is your name?: '
+ cmd = 'echo -n "%s"; read name; echo Hello $name' % prompt
+ plist = Popen([cmd], stdin=oper.stdin_read_pipe,
+ shell=True).CommunicateFilter(oper.Output)
+ self._BasicCheck(plist, oper)
+ self.assertEqual(len(plist [1]), 0)
+ self.assertEqual(plist [0], prompt + 'Hello Flash\r\r\n')
+
+ def test_isatty(self):
+ """Check that ptys appear as terminals to the subprocess"""
+ oper = TestSubprocess.MyOperation()
+ cmd = ('if [ -t %d ]; then echo "terminal %d" >&%d; '
+ 'else echo "not %d" >&%d; fi;')
+ both_cmds = ''
+ for fd in (1, 2):
+ both_cmds += cmd % (fd, fd, fd, fd, fd)
+ plist = Popen(both_cmds, shell=True).CommunicateFilter(oper.Output)
+ self._BasicCheck(plist, oper)
+ self.assertEqual(plist [0], 'terminal 1\r\n')
+ self.assertEqual(plist [1], 'terminal 2\r\n')
+
+ # Now try with PIPE and make sure it is not a terminal
+ oper = TestSubprocess.MyOperation()
+ plist = Popen(both_cmds, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
+ shell=True).CommunicateFilter(oper.Output)
+ self._BasicCheck(plist, oper)
+ self.assertEqual(plist [0], 'not 1\n')
+ self.assertEqual(plist [1], 'not 2\n')
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/tools/patman/get_maintainer.py b/tools/patman/get_maintainer.py
new file mode 100644
index 0000000..00b4939
--- /dev/null
+++ b/tools/patman/get_maintainer.py
@@ -0,0 +1,47 @@
+# Copyright (c) 2012 The Chromium OS Authors.
+#
+# SPDX-License-Identifier: GPL-2.0+
+#
+
+import command
+import gitutil
+import os
+
+def FindGetMaintainer():
+ """Look for the get_maintainer.pl script.
+
+ Returns:
+ If the script is found we'll return a path to it; else None.
+ """
+ try_list = [
+ os.path.join(gitutil.GetTopLevel(), 'scripts'),
+ ]
+ # Look in the list
+ for path in try_list:
+ fname = os.path.join(path, 'get_maintainer.pl')
+ if os.path.isfile(fname):
+ return fname
+
+ return None
+
+def GetMaintainer(fname, verbose=False):
+ """Run get_maintainer.pl on a file if we find it.
+
+ We look for get_maintainer.pl in the 'scripts' directory at the top of
+ git. If we find it we'll run it. If we don't find get_maintainer.pl
+ then we fail silently.
+
+ Args:
+ fname: Path to the patch file to run get_maintainer.pl on.
+
+ Returns:
+ A list of email addresses to CC to.
+ """
+ get_maintainer = FindGetMaintainer()
+ if not get_maintainer:
+ if verbose:
+ print "WARNING: Couldn't find get_maintainer.pl"
+ return []
+
+ stdout = command.Output(get_maintainer, '--norolestats', fname)
+ return stdout.splitlines()
diff --git a/tools/patman/gitutil.py b/tools/patman/gitutil.py
new file mode 100644
index 0000000..9e739d8
--- /dev/null
+++ b/tools/patman/gitutil.py
@@ -0,0 +1,582 @@
+# Copyright (c) 2011 The Chromium OS Authors.
+#
+# SPDX-License-Identifier: GPL-2.0+
+#
+
+import command
+import re
+import os
+import series
+import subprocess
+import sys
+import terminal
+
+import checkpatch
+import settings
+
+# True to use --no-decorate - we check this in Setup()
+use_no_decorate = True
+
+def LogCmd(commit_range, git_dir=None, oneline=False, reverse=False,
+ count=None):
+ """Create a command to perform a 'git log'
+
+ Args:
+ commit_range: Range expression to use for log, None for none
+ git_dir: Path to git repositiory (None to use default)
+ oneline: True to use --oneline, else False
+ reverse: True to reverse the log (--reverse)
+ count: Number of commits to list, or None for no limit
+ Return:
+ List containing command and arguments to run
+ """
+ cmd = ['git']
+ if git_dir:
+ cmd += ['--git-dir', git_dir]
+ cmd += ['--no-pager', 'log', '--no-color']
+ if oneline:
+ cmd.append('--oneline')
+ if use_no_decorate:
+ cmd.append('--no-decorate')
+ if reverse:
+ cmd.append('--reverse')
+ if count is not None:
+ cmd.append('-n%d' % count)
+ if commit_range:
+ cmd.append(commit_range)
+ return cmd
+
+def CountCommitsToBranch():
+ """Returns number of commits between HEAD and the tracking branch.
+
+ This looks back to the tracking branch and works out the number of commits
+ since then.
+
+ Return:
+ Number of patches that exist on top of the branch
+ """
+ pipe = [LogCmd('@{upstream}..', oneline=True),
+ ['wc', '-l']]
+ stdout = command.RunPipe(pipe, capture=True, oneline=True).stdout
+ patch_count = int(stdout)
+ return patch_count
+
+def NameRevision(commit_hash):
+ """Gets the revision name for a commit
+
+ Args:
+ commit_hash: Commit hash to look up
+
+ Return:
+ Name of revision, if any, else None
+ """
+ pipe = ['git', 'name-rev', commit_hash]
+ stdout = command.RunPipe([pipe], capture=True, oneline=True).stdout
+
+ # We expect a commit, a space, then a revision name
+ name = stdout.split(' ')[1].strip()
+ return name
+
+def GuessUpstream(git_dir, branch):
+ """Tries to guess the upstream for a branch
+
+ This lists out top commits on a branch and tries to find a suitable
+ upstream. It does this by looking for the first commit where
+ 'git name-rev' returns a plain branch name, with no ! or ^ modifiers.
+
+ Args:
+ git_dir: Git directory containing repo
+ branch: Name of branch
+
+ Returns:
+ Tuple:
+ Name of upstream branch (e.g. 'upstream/master') or None if none
+ Warning/error message, or None if none
+ """
+ pipe = [LogCmd(branch, git_dir=git_dir, oneline=True, count=100)]
+ result = command.RunPipe(pipe, capture=True, capture_stderr=True,
+ raise_on_error=False)
+ if result.return_code:
+ return None, "Branch '%s' not found" % branch
+ for line in result.stdout.splitlines()[1:]:
+ commit_hash = line.split(' ')[0]
+ name = NameRevision(commit_hash)
+ if '~' not in name and '^' not in name:
+ if name.startswith('remotes/'):
+ name = name[8:]
+ return name, "Guessing upstream as '%s'" % name
+ return None, "Cannot find a suitable upstream for branch '%s'" % branch
+
+def GetUpstream(git_dir, branch):
+ """Returns the name of the upstream for a branch
+
+ Args:
+ git_dir: Git directory containing repo
+ branch: Name of branch
+
+ Returns:
+ Tuple:
+ Name of upstream branch (e.g. 'upstream/master') or None if none
+ Warning/error message, or None if none
+ """
+ try:
+ remote = command.OutputOneLine('git', '--git-dir', git_dir, 'config',
+ 'branch.%s.remote' % branch)
+ merge = command.OutputOneLine('git', '--git-dir', git_dir, 'config',
+ 'branch.%s.merge' % branch)
+ except:
+ upstream, msg = GuessUpstream(git_dir, branch)
+ return upstream, msg
+
+ if remote == '.':
+ return merge, None
+ elif remote and merge:
+ leaf = merge.split('/')[-1]
+ return '%s/%s' % (remote, leaf), None
+ else:
+ raise ValueError, ("Cannot determine upstream branch for branch "
+ "'%s' remote='%s', merge='%s'" % (branch, remote, merge))
+
+
+def GetRangeInBranch(git_dir, branch, include_upstream=False):
+ """Returns an expression for the commits in the given branch.
+
+ Args:
+ git_dir: Directory containing git repo
+ branch: Name of branch
+ Return:
+ Expression in the form 'upstream..branch' which can be used to
+ access the commits. If the branch does not exist, returns None.
+ """
+ upstream, msg = GetUpstream(git_dir, branch)
+ if not upstream:
+ return None, msg
+ rstr = '%s%s..%s' % (upstream, '~' if include_upstream else '', branch)
+ return rstr, msg
+
+def CountCommitsInRange(git_dir, range_expr):
+ """Returns the number of commits in the given range.
+
+ Args:
+ git_dir: Directory containing git repo
+ range_expr: Range to check
+ Return:
+ Number of patches that exist in the supplied rangem or None if none
+ were found
+ """
+ pipe = [LogCmd(range_expr, git_dir=git_dir, oneline=True)]
+ result = command.RunPipe(pipe, capture=True, capture_stderr=True,
+ raise_on_error=False)
+ if result.return_code:
+ return None, "Range '%s' not found or is invalid" % range_expr
+ patch_count = len(result.stdout.splitlines())
+ return patch_count, None
+
+def CountCommitsInBranch(git_dir, branch, include_upstream=False):
+ """Returns the number of commits in the given branch.
+
+ Args:
+ git_dir: Directory containing git repo
+ branch: Name of branch
+ Return:
+ Number of patches that exist on top of the branch, or None if the
+ branch does not exist.
+ """
+ range_expr, msg = GetRangeInBranch(git_dir, branch, include_upstream)
+ if not range_expr:
+ return None, msg
+ return CountCommitsInRange(git_dir, range_expr)
+
+def CountCommits(commit_range):
+ """Returns the number of commits in the given range.
+
+ Args:
+ commit_range: Range of commits to count (e.g. 'HEAD..base')
+ Return:
+ Number of patches that exist on top of the branch
+ """
+ pipe = [LogCmd(commit_range, oneline=True),
+ ['wc', '-l']]
+ stdout = command.RunPipe(pipe, capture=True, oneline=True).stdout
+ patch_count = int(stdout)
+ return patch_count
+
+def Checkout(commit_hash, git_dir=None, work_tree=None, force=False):
+ """Checkout the selected commit for this build
+
+ Args:
+ commit_hash: Commit hash to check out
+ """
+ pipe = ['git']
+ if git_dir:
+ pipe.extend(['--git-dir', git_dir])
+ if work_tree:
+ pipe.extend(['--work-tree', work_tree])
+ pipe.append('checkout')
+ if force:
+ pipe.append('-f')
+ pipe.append(commit_hash)
+ result = command.RunPipe([pipe], capture=True, raise_on_error=False,
+ capture_stderr=True)
+ if result.return_code != 0:
+ raise OSError, 'git checkout (%s): %s' % (pipe, result.stderr)
+
+def Clone(git_dir, output_dir):
+ """Checkout the selected commit for this build
+
+ Args:
+ commit_hash: Commit hash to check out
+ """
+ pipe = ['git', 'clone', git_dir, '.']
+ result = command.RunPipe([pipe], capture=True, cwd=output_dir,
+ capture_stderr=True)
+ if result.return_code != 0:
+ raise OSError, 'git clone: %s' % result.stderr
+
+def Fetch(git_dir=None, work_tree=None):
+ """Fetch from the origin repo
+
+ Args:
+ commit_hash: Commit hash to check out
+ """
+ pipe = ['git']
+ if git_dir:
+ pipe.extend(['--git-dir', git_dir])
+ if work_tree:
+ pipe.extend(['--work-tree', work_tree])
+ pipe.append('fetch')
+ result = command.RunPipe([pipe], capture=True, capture_stderr=True)
+ if result.return_code != 0:
+ raise OSError, 'git fetch: %s' % result.stderr
+
+def CreatePatches(start, count, series):
+ """Create a series of patches from the top of the current branch.
+
+ The patch files are written to the current directory using
+ git format-patch.
+
+ Args:
+ start: Commit to start from: 0=HEAD, 1=next one, etc.
+ count: number of commits to include
+ Return:
+ Filename of cover letter
+ List of filenames of patch files
+ """
+ if series.get('version'):
+ version = '%s ' % series['version']
+ cmd = ['git', 'format-patch', '-M', '--signoff']
+ if series.get('cover'):
+ cmd.append('--cover-letter')
+ prefix = series.GetPatchPrefix()
+ if prefix:
+ cmd += ['--subject-prefix=%s' % prefix]
+ cmd += ['HEAD~%d..HEAD~%d' % (start + count, start)]
+
+ stdout = command.RunList(cmd)
+ files = stdout.splitlines()
+
+ # We have an extra file if there is a cover letter
+ if series.get('cover'):
+ return files[0], files[1:]
+ else:
+ return None, files
+
+def BuildEmailList(in_list, tag=None, alias=None, raise_on_error=True):
+ """Build a list of email addresses based on an input list.
+
+ Takes a list of email addresses and aliases, and turns this into a list
+ of only email address, by resolving any aliases that are present.
+
+ If the tag is given, then each email address is prepended with this
+ tag and a space. If the tag starts with a minus sign (indicating a
+ command line parameter) then the email address is quoted.
+
+ Args:
+ in_list: List of aliases/email addresses
+ tag: Text to put before each address
+ alias: Alias dictionary
+ raise_on_error: True to raise an error when an alias fails to match,
+ False to just print a message.
+
+ Returns:
+ List of email addresses
+
+ >>> alias = {}
+ >>> alias['fred'] = ['f.bloggs@xxxxxxxxxxxx']
+ >>> alias['john'] = ['j.bloggs@xxxxxxxxxxxx']
+ >>> alias['mary'] = ['Mary Poppins <m.poppins@xxxxxxxxx>']
+ >>> alias['boys'] = ['fred', ' john']
+ >>> alias['all'] = ['fred ', 'john', ' mary ']
+ >>> BuildEmailList(['john', 'mary'], None, alias)
+ ['j.bloggs@xxxxxxxxxxxx', 'Mary Poppins <m.poppins@xxxxxxxxx>']
+ >>> BuildEmailList(['john', 'mary'], '--to', alias)
+ ['--to "j.bloggs@xxxxxxxxxxxx"', \
+'--to "Mary Poppins <m.poppins@xxxxxxxxx>"']
+ >>> BuildEmailList(['john', 'mary'], 'Cc', alias)
+ ['Cc j.bloggs@xxxxxxxxxxxx', 'Cc Mary Poppins <m.poppins@xxxxxxxxx>']
+ """
+ quote = '"' if tag and tag[0] == '-' else ''
+ raw = []
+ for item in in_list:
+ raw += LookupEmail(item, alias, raise_on_error=raise_on_error)
+ result = []
+ for item in raw:
+ if not item in result:
+ result.append(item)
+ if tag:
+ return ['%s %s%s%s' % (tag, quote, email, quote) for email in result]
+ return result
+
+def EmailPatches(series, cover_fname, args, dry_run, raise_on_error, cc_fname,
+ self_only=False, alias=None, in_reply_to=None):
+ """Email a patch series.
+
+ Args:
+ series: Series object containing destination info
+ cover_fname: filename of cover letter
+ args: list of filenames of patch files
+ dry_run: Just return the command that would be run
+ raise_on_error: True to raise an error when an alias fails to match,
+ False to just print a message.
+ cc_fname: Filename of Cc file for per-commit Cc
+ self_only: True to just email to yourself as a test
+ in_reply_to: If set we'll pass this to git as --in-reply-to.
+ Should be a message ID that this is in reply to.
+
+ Returns:
+ Git command that was/would be run
+
+ # For the duration of this doctest pretend that we ran patman with ./patman
+ >>> _old_argv0 = sys.argv[0]
+ >>> sys.argv[0] = './patman'
+
+ >>> alias = {}
+ >>> alias['fred'] = ['f.bloggs@xxxxxxxxxxxx']
+ >>> alias['john'] = ['j.bloggs@xxxxxxxxxxxx']
+ >>> alias['mary'] = ['m.poppins@xxxxxxxxx']
+ >>> alias['boys'] = ['fred', ' john']
+ >>> alias['all'] = ['fred ', 'john', ' mary ']
+ >>> alias[os.getenv('USER')] = ['this-is-me@xxxxxx']
+ >>> series = series.Series()
+ >>> series.to = ['fred']
+ >>> series.cc = ['mary']
+ >>> EmailPatches(series, 'cover', ['p1', 'p2'], True, True, 'cc-fname', \
+ False, alias)
+ 'git send-email --annotate --to "f.bloggs@xxxxxxxxxxxx" --cc \
+"m.poppins@xxxxxxxxx" --cc-cmd "./patman --cc-cmd cc-fname" cover p1 p2'
+ >>> EmailPatches(series, None, ['p1'], True, True, 'cc-fname', False, \
+ alias)
+ 'git send-email --annotate --to "f.bloggs@xxxxxxxxxxxx" --cc \
+"m.poppins@xxxxxxxxx" --cc-cmd "./patman --cc-cmd cc-fname" p1'
+ >>> series.cc = ['all']
+ >>> EmailPatches(series, 'cover', ['p1', 'p2'], True, True, 'cc-fname', \
+ True, alias)
+ 'git send-email --annotate --to "this-is-me@xxxxxx" --cc-cmd "./patman \
+--cc-cmd cc-fname" cover p1 p2'
+ >>> EmailPatches(series, 'cover', ['p1', 'p2'], True, True, 'cc-fname', \
+ False, alias)
+ 'git send-email --annotate --to "f.bloggs@xxxxxxxxxxxx" --cc \
+"f.bloggs@xxxxxxxxxxxx" --cc "j.bloggs@xxxxxxxxxxxx" --cc \
+"m.poppins@xxxxxxxxx" --cc-cmd "./patman --cc-cmd cc-fname" cover p1 p2'
+
+ # Restore argv[0] since we clobbered it.
+ >>> sys.argv[0] = _old_argv0
+ """
+ to = BuildEmailList(series.get('to'), '--to', alias, raise_on_error)
+ if not to:
+ git_config_to = command.Output('git', 'config', 'sendemail.to')
+ if not git_config_to:
+ print ("No recipient.\n"
+ "Please add something like this to a commit\n"
+ "Series-to: Fred Bloggs <f.blogs@xxxxxxxxxxxx>\n"
+ "Or do something like this\n"
+ "git config sendemail.to u-boot@xxxxxxxxxxxxx")
+ return
+ cc = BuildEmailList(list(set(series.get('cc')) - set(series.get('to'))),
+ '--cc', alias, raise_on_error)
+ if self_only:
+ to = BuildEmailList([os.getenv('USER')], '--to', alias, raise_on_error)
+ cc = []
+ cmd = ['git', 'send-email', '--annotate']
+ if in_reply_to:
+ cmd.append('--in-reply-to="%s"' % in_reply_to)
+
+ cmd += to
+ cmd += cc
+ cmd += ['--cc-cmd', '"%s --cc-cmd %s"' % (sys.argv[0], cc_fname)]
+ if cover_fname:
+ cmd.append(cover_fname)
+ cmd += args
+ str = ' '.join(cmd)
+ if not dry_run:
+ os.system(str)
+ return str
+
+
+def LookupEmail(lookup_name, alias=None, raise_on_error=True, level=0):
+ """If an email address is an alias, look it up and return the full name
+
+ TODO: Why not just use git's own alias feature?
+
+ Args:
+ lookup_name: Alias or email address to look up
+ alias: Dictionary containing aliases (None to use settings default)
+ raise_on_error: True to raise an error when an alias fails to match,
+ False to just print a message.
+
+ Returns:
+ tuple:
+ list containing a list of email addresses
+
+ Raises:
+ OSError if a recursive alias reference was found
+ ValueError if an alias was not found
+
+ >>> alias = {}
+ >>> alias['fred'] = ['f.bloggs@xxxxxxxxxxxx']
+ >>> alias['john'] = ['j.bloggs@xxxxxxxxxxxx']
+ >>> alias['mary'] = ['m.poppins@xxxxxxxxx']
+ >>> alias['boys'] = ['fred', ' john', 'f.bloggs@xxxxxxxxxxxx']
+ >>> alias['all'] = ['fred ', 'john', ' mary ']
+ >>> alias['loop'] = ['other', 'john', ' mary ']
+ >>> alias['other'] = ['loop', 'john', ' mary ']
+ >>> LookupEmail('mary', alias)
+ ['m.poppins@xxxxxxxxx']
+ >>> LookupEmail('arthur.wellesley@xxxxxxxxxx', alias)
+ ['arthur.wellesley@xxxxxxxxxx']
+ >>> LookupEmail('boys', alias)
+ ['f.bloggs@xxxxxxxxxxxx', 'j.bloggs@xxxxxxxxxxxx']
+ >>> LookupEmail('all', alias)
+ ['f.bloggs@xxxxxxxxxxxx', 'j.bloggs@xxxxxxxxxxxx', 'm.poppins@xxxxxxxxx']
+ >>> LookupEmail('odd', alias)
+ Traceback (most recent call last):
+ ...
+ ValueError: Alias 'odd' not found
+ >>> LookupEmail('loop', alias)
+ Traceback (most recent call last):
+ ...
+ OSError: Recursive email alias at 'other'
+ >>> LookupEmail('odd', alias, raise_on_error=False)
+ Alias 'odd' not found
+ []
+ >>> # In this case the loop part will effectively be ignored.
+ >>> LookupEmail('loop', alias, raise_on_error=False)
+ Recursive email alias at 'other'
+ Recursive email alias at 'john'
+ Recursive email alias at 'mary'
+ ['j.bloggs@xxxxxxxxxxxx', 'm.poppins@xxxxxxxxx']
+ """
+ if not alias:
+ alias = settings.alias
+ lookup_name = lookup_name.strip()
+ if '@' in lookup_name: # Perhaps a real email address
+ return [lookup_name]
+
+ lookup_name = lookup_name.lower()
+ col = terminal.Color()
+
+ out_list = []
+ if level > 10:
+ msg = "Recursive email alias at '%s'" % lookup_name
+ if raise_on_error:
+ raise OSError, msg
+ else:
+ print col.Color(col.RED, msg)
+ return out_list
+
+ if lookup_name:
+ if not lookup_name in alias:
+ msg = "Alias '%s' not found" % lookup_name
+ if raise_on_error:
+ raise ValueError, msg
+ else:
+ print col.Color(col.RED, msg)
+ return out_list
+ for item in alias[lookup_name]:
+ todo = LookupEmail(item, alias, raise_on_error, level + 1)
+ for new_item in todo:
+ if not new_item in out_list:
+ out_list.append(new_item)
+
+ #print "No match for alias '%s'" % lookup_name
+ return out_list
+
+def GetTopLevel():
+ """Return name of top-level directory for this git repo.
+
+ Returns:
+ Full path to git top-level directory
+
+ This test makes sure that we are running tests in the right subdir
+
+ >>> os.path.realpath(os.path.dirname(__file__)) == \
+ os.path.join(GetTopLevel(), 'tools', 'patman')
+ True
+ """
+ return command.OutputOneLine('git', 'rev-parse', '--show-toplevel')
+
+def GetAliasFile():
+ """Gets the name of the git alias file.
+
+ Returns:
+ Filename of git alias file, or None if none
+ """
+ fname = command.OutputOneLine('git', 'config', 'sendemail.aliasesfile',
+ raise_on_error=False)
+ if fname:
+ fname = os.path.join(GetTopLevel(), fname.strip())
+ return fname
+
+def GetDefaultUserName():
+ """Gets the user.name from .gitconfig file.
+
+ Returns:
+ User name found in .gitconfig file, or None if none
+ """
+ uname = command.OutputOneLine('git', 'config', '--global', 'user.name')
+ return uname
+
+def GetDefaultUserEmail():
+ """Gets the user.email from the global .gitconfig file.
+
+ Returns:
+ User's email found in .gitconfig file, or None if none
+ """
+ uemail = command.OutputOneLine('git', 'config', '--global', 'user.email')
+ return uemail
+
+def GetDefaultSubjectPrefix():
+ """Gets the format.subjectprefix from local .git/config file.
+
+ Returns:
+ Subject prefix found in local .git/config file, or None if none
+ """
+ sub_prefix = command.OutputOneLine('git', 'config', 'format.subjectprefix',
+ raise_on_error=False)
+
+ return sub_prefix
+
+def Setup():
+ """Set up git utils, by reading the alias files."""
+ # Check for a git alias file also
+ global use_no_decorate
+
+ alias_fname = GetAliasFile()
+ if alias_fname:
+ settings.ReadGitAliases(alias_fname)
+ cmd = LogCmd(None, count=0)
+ use_no_decorate = (command.RunPipe([cmd], raise_on_error=False)
+ .return_code == 0)
+
+def GetHead():
+ """Get the hash of the current HEAD
+
+ Returns:
+ Hash of HEAD
+ """
+ return command.OutputOneLine('git', 'show', '-s', '--pretty=format:%H')
+
+if __name__ == "__main__":
+ import doctest
+
+ doctest.testmod()
diff --git a/tools/patman/patchstream.py b/tools/patman/patchstream.py
new file mode 100644
index 0000000..6d3c41f
--- /dev/null
+++ b/tools/patman/patchstream.py
@@ -0,0 +1,488 @@
+# Copyright (c) 2011 The Chromium OS Authors.
+#
+# SPDX-License-Identifier: GPL-2.0+
+#
+
+import math
+import os
+import re
+import shutil
+import tempfile
+
+import command
+import commit
+import gitutil
+from series import Series
+
+# Tags that we detect and remove
+re_remove = re.compile('^BUG=|^TEST=|^BRANCH=|^Change-Id:|^Review URL:'
+ '|Reviewed-on:|Commit-\w*:')
+
+# Lines which are allowed after a TEST= line
+re_allowed_after_test = re.compile('^Signed-off-by:')
+
+# Signoffs
+re_signoff = re.compile('^Signed-off-by: *(.*)')
+
+# The start of the cover letter
+re_cover = re.compile('^Cover-letter:')
+
+# A cover letter Cc
+re_cover_cc = re.compile('^Cover-letter-cc: *(.*)')
+
+# Patch series tag
+re_series_tag = re.compile('^Series-([a-z-]*): *(.*)')
+
+# Commit series tag
+re_commit_tag = re.compile('^Commit-([a-z-]*): *(.*)')
+
+# Commit tags that we want to collect and keep
+re_tag = re.compile('^(Tested-by|Acked-by|Reviewed-by|Patch-cc): (.*)')
+
+# The start of a new commit in the git log
+re_commit = re.compile('^commit ([0-9a-f]*)$')
+
+# We detect these since checkpatch doesn't always do it
+re_space_before_tab = re.compile('^[+].* \t')
+
+# States we can be in - can we use range() and still have comments?
+STATE_MSG_HEADER = 0 # Still in the message header
+STATE_PATCH_SUBJECT = 1 # In patch subject (first line of log for a commit)
+STATE_PATCH_HEADER = 2 # In patch header (after the subject)
+STATE_DIFFS = 3 # In the diff part (past --- line)
+
+class PatchStream:
+ """Class for detecting/injecting tags in a patch or series of patches
+
+ We support processing the output of 'git log' to read out the tags we
+ are interested in. We can also process a patch file in order to remove
+ unwanted tags or inject additional ones. These correspond to the two
+ phases of processing.
+ """
+ def __init__(self, series, name=None, is_log=False):
+ self.skip_blank = False # True to skip a single blank line
+ self.found_test = False # Found a TEST= line
+ self.lines_after_test = 0 # MNumber of lines found after TEST=
+ self.warn = [] # List of warnings we have collected
+ self.linenum = 1 # Output line number we are up to
+ self.in_section = None # Name of start...END section we are in
+ self.notes = [] # Series notes
+ self.section = [] # The current section...END section
+ self.series = series # Info about the patch series
+ self.is_log = is_log # True if indent like git log
+ self.in_change = 0 # Non-zero if we are in a change list
+ self.blank_count = 0 # Number of blank lines stored up
+ self.state = STATE_MSG_HEADER # What state are we in?
+ self.signoff = [] # Contents of signoff line
+ self.commit = None # Current commit
+
+ def AddToSeries(self, line, name, value):
+ """Add a new Series-xxx tag.
+
+ When a Series-xxx tag is detected, we come here to record it, if we
+ are scanning a 'git log'.
+
+ Args:
+ line: Source line containing tag (useful for debug/error messages)
+ name: Tag name (part after 'Series-')
+ value: Tag value (part after 'Series-xxx: ')
+ """
+ if name == 'notes':
+ self.in_section = name
+ self.skip_blank = False
+ if self.is_log:
+ self.series.AddTag(self.commit, line, name, value)
+
+ def AddToCommit(self, line, name, value):
+ """Add a new Commit-xxx tag.
+
+ When a Commit-xxx tag is detected, we come here to record it.
+
+ Args:
+ line: Source line containing tag (useful for debug/error messages)
+ name: Tag name (part after 'Commit-')
+ value: Tag value (part after 'Commit-xxx: ')
+ """
+ if name == 'notes':
+ self.in_section = 'commit-' + name
+ self.skip_blank = False
+
+ def CloseCommit(self):
+ """Save the current commit into our commit list, and reset our state"""
+ if self.commit and self.is_log:
+ self.series.AddCommit(self.commit)
+ self.commit = None
+
+ def ProcessLine(self, line):
+ """Process a single line of a patch file or commit log
+
+ This process a line and returns a list of lines to output. The list
+ may be empty or may contain multiple output lines.
+
+ This is where all the complicated logic is located. The class's
+ state is used to move between different states and detect things
+ properly.
+
+ We can be in one of two modes:
+ self.is_log == True: This is 'git log' mode, where most output is
+ indented by 4 characters and we are scanning for tags
+
+ self.is_log == False: This is 'patch' mode, where we already have
+ all the tags, and are processing patches to remove junk we
+ don't want, and add things we think are required.
+
+ Args:
+ line: text line to process
+
+ Returns:
+ list of output lines, or [] if nothing should be output
+ """
+ # Initially we have no output. Prepare the input line string
+ out = []
+ line = line.rstrip('\n')
+
+ commit_match = re_commit.match(line) if self.is_log else None
+
+ if self.is_log:
+ if line[:4] == ' ':
+ line = line[4:]
+
+ # Handle state transition and skipping blank lines
+ series_tag_match = re_series_tag.match(line)
+ commit_tag_match = re_commit_tag.match(line)
+ cover_cc_match = re_cover_cc.match(line)
+ signoff_match = re_signoff.match(line)
+ tag_match = None
+ if self.state == STATE_PATCH_HEADER:
+ tag_match = re_tag.match(line)
+ is_blank = not line.strip()
+ if is_blank:
+ if (self.state == STATE_MSG_HEADER
+ or self.state == STATE_PATCH_SUBJECT):
+ self.state += 1
+
+ # We don't have a subject in the text stream of patch files
+ # It has its own line with a Subject: tag
+ if not self.is_log and self.state == STATE_PATCH_SUBJECT:
+ self.state += 1
+ elif commit_match:
+ self.state = STATE_MSG_HEADER
+
+ # If we are in a section, keep collecting lines until we see END
+ if self.in_section:
+ if line == 'END':
+ if self.in_section == 'cover':
+ self.series.cover = self.section
+ elif self.in_section == 'notes':
+ if self.is_log:
+ self.series.notes += self.section
+ elif self.in_section == 'commit-notes':
+ if self.is_log:
+ self.commit.notes += self.section
+ else:
+ self.warn.append("Unknown section '%s'" % self.in_section)
+ self.in_section = None
+ self.skip_blank = True
+ self.section = []
+ else:
+ self.section.append(line)
+
+ # Detect the commit subject
+ elif not is_blank and self.state == STATE_PATCH_SUBJECT:
+ self.commit.subject = line
+
+ # Detect the tags we want to remove, and skip blank lines
+ elif re_remove.match(line) and not commit_tag_match:
+ self.skip_blank = True
+
+ # TEST= should be the last thing in the commit, so remove
+ # everything after it
+ if line.startswith('TEST='):
+ self.found_test = True
+ elif self.skip_blank and is_blank:
+ self.skip_blank = False
+
+ # Detect the start of a cover letter section
+ elif re_cover.match(line):
+ self.in_section = 'cover'
+ self.skip_blank = False
+
+ elif cover_cc_match:
+ value = cover_cc_match.group(1)
+ self.AddToSeries(line, 'cover-cc', value)
+
+ # If we are in a change list, key collected lines until a blank one
+ elif self.in_change:
+ if is_blank:
+ # Blank line ends this change list
+ self.in_change = 0
+ elif line == '---':
+ self.in_change = 0
+ out = self.ProcessLine(line)
+ else:
+ if self.is_log:
+ self.series.AddChange(self.in_change, self.commit, line)
+ self.skip_blank = False
+
+ # Detect Series-xxx tags
+ elif series_tag_match:
+ name = series_tag_match.group(1)
+ value = series_tag_match.group(2)
+ if name == 'changes':
+ # value is the version number: e.g. 1, or 2
+ try:
+ value = int(value)
+ except ValueError as str:
+ raise ValueError("%s: Cannot decode version info '%s'" %
+ (self.commit.hash, line))
+ self.in_change = int(value)
+ else:
+ self.AddToSeries(line, name, value)
+ self.skip_blank = True
+
+ # Detect Commit-xxx tags
+ elif commit_tag_match:
+ name = commit_tag_match.group(1)
+ value = commit_tag_match.group(2)
+ if name == 'notes':
+ self.AddToCommit(line, name, value)
+ self.skip_blank = True
+
+ # Detect the start of a new commit
+ elif commit_match:
+ self.CloseCommit()
+ self.commit = commit.Commit(commit_match.group(1))
+
+ # Detect tags in the commit message
+ elif tag_match:
+ # Remove Tested-by self, since few will take much notice
+ if (tag_match.group(1) == 'Tested-by' and
+ tag_match.group(2).find(os.getenv('USER') + '@') != -1):
+ self.warn.append("Ignoring %s" % line)
+ elif tag_match.group(1) == 'Patch-cc':
+ self.commit.AddCc(tag_match.group(2).split(','))
+ else:
+ out = [line]
+
+ # Suppress duplicate signoffs
+ elif signoff_match:
+ if (self.is_log or not self.commit or
+ self.commit.CheckDuplicateSignoff(signoff_match.group(1))):
+ out = [line]
+
+ # Well that means this is an ordinary line
+ else:
+ pos = 1
+ # Look for ugly ASCII characters
+ for ch in line:
+ # TODO: Would be nicer to report source filename and line
+ if ord(ch) > 0x80:
+ self.warn.append("Line %d/%d ('%s') has funny ascii char" %
+ (self.linenum, pos, line))
+ pos += 1
+
+ # Look for space before tab
+ m = re_space_before_tab.match(line)
+ if m:
+ self.warn.append('Line %d/%d has space before tab' %
+ (self.linenum, m.start()))
+
+ # OK, we have a valid non-blank line
+ out = [line]
+ self.linenum += 1
+ self.skip_blank = False
+ if self.state == STATE_DIFFS:
+ pass
+
+ # If this is the start of the diffs section, emit our tags and
+ # change log
+ elif line == '---':
+ self.state = STATE_DIFFS
+
+ # Output the tags (signeoff first), then change list
+ out = []
+ log = self.series.MakeChangeLog(self.commit)
+ out += [line]
+ if self.commit:
+ out += self.commit.notes
+ out += [''] + log
+ elif self.found_test:
+ if not re_allowed_after_test.match(line):
+ self.lines_after_test += 1
+
+ return out
+
+ def Finalize(self):
+ """Close out processing of this patch stream"""
+ self.CloseCommit()
+ if self.lines_after_test:
+ self.warn.append('Found %d lines after TEST=' %
+ self.lines_after_test)
+
+ def ProcessStream(self, infd, outfd):
+ """Copy a stream from infd to outfd, filtering out unwanting things.
+
+ This is used to process patch files one at a time.
+
+ Args:
+ infd: Input stream file object
+ outfd: Output stream file object
+ """
+ # Extract the filename from each diff, for nice warnings
+ fname = None
+ last_fname = None
+ re_fname = re.compile('diff --git a/(.*) b/.*')
+ while True:
+ line = infd.readline()
+ if not line:
+ break
+ out = self.ProcessLine(line)
+
+ # Try to detect blank lines at EOF
+ for line in out:
+ match = re_fname.match(line)
+ if match:
+ last_fname = fname
+ fname = match.group(1)
+ if line == '+':
+ self.blank_count += 1
+ else:
+ if self.blank_count and (line == '-- ' or match):
+ self.warn.append("Found possible blank line(s) at "
+ "end of file '%s'" % last_fname)
+ outfd.write('+\n' * self.blank_count)
+ outfd.write(line + '\n')
+ self.blank_count = 0
+ self.Finalize()
+
+
+def GetMetaDataForList(commit_range, git_dir=None, count=None,
+ series = None, allow_overwrite=False):
+ """Reads out patch series metadata from the commits
+
+ This does a 'git log' on the relevant commits and pulls out the tags we
+ are interested in.
+
+ Args:
+ commit_range: Range of commits to count (e.g. 'HEAD..base')
+ git_dir: Path to git repositiory (None to use default)
+ count: Number of commits to list, or None for no limit
+ series: Series object to add information into. By default a new series
+ is started.
+ allow_overwrite: Allow tags to overwrite an existing tag
+ Returns:
+ A Series object containing information about the commits.
+ """
+ if not series:
+ series = Series()
+ series.allow_overwrite = allow_overwrite
+ params = gitutil.LogCmd(commit_range,reverse=True, count=count,
+ git_dir=git_dir)
+ stdout = command.RunPipe([params], capture=True).stdout
+ ps = PatchStream(series, is_log=True)
+ for line in stdout.splitlines():
+ ps.ProcessLine(line)
+ ps.Finalize()
+ return series
+
+def GetMetaData(start, count):
+ """Reads out patch series metadata from the commits
+
+ This does a 'git log' on the relevant commits and pulls out the tags we
+ are interested in.
+
+ Args:
+ start: Commit to start from: 0=HEAD, 1=next one, etc.
+ count: Number of commits to list
+ """
+ return GetMetaDataForList('HEAD~%d' % start, None, count)
+
+def FixPatch(backup_dir, fname, series, commit):
+ """Fix up a patch file, by adding/removing as required.
+
+ We remove our tags from the patch file, insert changes lists, etc.
+ The patch file is processed in place, and overwritten.
+
+ A backup file is put into backup_dir (if not None).
+
+ Args:
+ fname: Filename to patch file to process
+ series: Series information about this patch set
+ commit: Commit object for this patch file
+ Return:
+ A list of errors, or [] if all ok.
+ """
+ handle, tmpname = tempfile.mkstemp()
+ outfd = os.fdopen(handle, 'w')
+ infd = open(fname, 'r')
+ ps = PatchStream(series)
+ ps.commit = commit
+ ps.ProcessStream(infd, outfd)
+ infd.close()
+ outfd.close()
+
+ # Create a backup file if required
+ if backup_dir:
+ shutil.copy(fname, os.path.join(backup_dir, os.path.basename(fname)))
+ shutil.move(tmpname, fname)
+ return ps.warn
+
+def FixPatches(series, fnames):
+ """Fix up a list of patches identified by filenames
+
+ The patch files are processed in place, and overwritten.
+
+ Args:
+ series: The series object
+ fnames: List of patch files to process
+ """
+ # Current workflow creates patches, so we shouldn't need a backup
+ backup_dir = None #tempfile.mkdtemp('clean-patch')
+ count = 0
+ for fname in fnames:
+ commit = series.commits[count]
+ commit.patch = fname
+ result = FixPatch(backup_dir, fname, series, commit)
+ if result:
+ print '%d warnings for %s:' % (len(result), fname)
+ for warn in result:
+ print '\t', warn
+ print
+ count += 1
+ print 'Cleaned %d patches' % count
+ return series
+
+def InsertCoverLetter(fname, series, count):
+ """Inserts a cover letter with the required info into patch 0
+
+ Args:
+ fname: Input / output filename of the cover letter file
+ series: Series object
+ count: Number of patches in the series
+ """
+ fd = open(fname, 'r')
+ lines = fd.readlines()
+ fd.close()
+
+ fd = open(fname, 'w')
+ text = series.cover
+ prefix = series.GetPatchPrefix()
+ for line in lines:
+ if line.startswith('Subject:'):
+ # if more than 10 or 100 patches, it should say 00/xx, 000/xxx, etc
+ zero_repeat = int(math.log10(count)) + 1
+ zero = '0' * zero_repeat
+ line = 'Subject: [%s %s/%d] %s\n' % (prefix, zero, count, text[0])
+
+ # Insert our cover letter
+ elif line.startswith('*** BLURB HERE ***'):
+ # First the blurb test
+ line = '\n'.join(text[1:]) + '\n'
+ if series.get('notes'):
+ line += '\n'.join(series.notes) + '\n'
+
+ # Now the change list
+ out = series.MakeChangeLog(None)
+ line += '\n' + '\n'.join(out)
+ fd.write(line)
+ fd.close()
diff --git a/tools/patman/patman b/tools/patman/patman
new file mode 120000
index 0000000..6cc3d7a
--- /dev/null
+++ b/tools/patman/patman
@@ -0,0 +1 @@
+patman.py
\ No newline at end of file
diff --git a/tools/patman/patman.py b/tools/patman/patman.py
new file mode 100755
index 0000000..6c6473e
--- /dev/null
+++ b/tools/patman/patman.py
@@ -0,0 +1,167 @@
+#!/usr/bin/env python
+#
+# Copyright (c) 2011 The Chromium OS Authors.
+#
+# SPDX-License-Identifier: GPL-2.0+
+#
+
+"""See README for more information"""
+
+from optparse import OptionParser
+import os
+import re
+import sys
+import unittest
+
+# Our modules
+import checkpatch
+import command
+import gitutil
+import patchstream
+import project
+import settings
+import terminal
+import test
+
+
+parser = OptionParser()
+parser.add_option('-H', '--full-help', action='store_true', dest='full_help',
+ default=False, help='Display the README file')
+parser.add_option('-c', '--count', dest='count', type='int',
+ default=-1, help='Automatically create patches from top n commits')
+parser.add_option('-i', '--ignore-errors', action='store_true',
+ dest='ignore_errors', default=False,
+ help='Send patches email even if patch errors are found')
+parser.add_option('-m', '--no-maintainers', action='store_false',
+ dest='add_maintainers', default=True,
+ help="Don't cc the file maintainers automatically")
+parser.add_option('-n', '--dry-run', action='store_true', dest='dry_run',
+ default=False, help="Do a dry run (create but don't email patches)")
+parser.add_option('-p', '--project', default=project.DetectProject(),
+ help="Project name; affects default option values and "
+ "aliases [default: %default]")
+parser.add_option('-r', '--in-reply-to', type='string', action='store',
+ help="Message ID that this series is in reply to")
+parser.add_option('-s', '--start', dest='start', type='int',
+ default=0, help='Commit to start creating patches from (0 = HEAD)')
+parser.add_option('-t', '--ignore-bad-tags', action='store_true',
+ default=False, help='Ignore bad tags / aliases')
+parser.add_option('--test', action='store_true', dest='test',
+ default=False, help='run tests')
+parser.add_option('-v', '--verbose', action='store_true', dest='verbose',
+ default=False, help='Verbose output of errors and warnings')
+parser.add_option('--cc-cmd', dest='cc_cmd', type='string', action='store',
+ default=None, help='Output cc list for patch file (used by git)')
+parser.add_option('--no-check', action='store_false', dest='check_patch',
+ default=True,
+ help="Don't check for patch compliance")
+parser.add_option('--no-tags', action='store_false', dest='process_tags',
+ default=True, help="Don't process subject tags as aliaes")
+
+parser.usage += """
+
+Create patches from commits in a branch, check them and email them as
+specified by tags you place in the commits. Use -n to do a dry run first."""
+
+
+# Parse options twice: first to get the project and second to handle
+# defaults properly (which depends on project).
+(options, args) = parser.parse_args()
+settings.Setup(parser, options.project, '')
+(options, args) = parser.parse_args()
+
+# Run our meagre tests
+if options.test:
+ import doctest
+
+ sys.argv = [sys.argv[0]]
+ suite = unittest.TestLoader().loadTestsFromTestCase(test.TestPatch)
+ result = unittest.TestResult()
+ suite.run(result)
+
+ for module in ['gitutil', 'settings']:
+ suite = doctest.DocTestSuite(module)
+ suite.run(result)
+
+ # TODO: Surely we can just 'print' result?
+ print result
+ for test, err in result.errors:
+ print err
+ for test, err in result.failures:
+ print err
+
+# Called from git with a patch filename as argument
+# Printout a list of additional CC recipients for this patch
+elif options.cc_cmd:
+ fd = open(options.cc_cmd, 'r')
+ re_line = re.compile('(\S*) (.*)')
+ for line in fd.readlines():
+ match = re_line.match(line)
+ if match and match.group(1) == args[0]:
+ for cc in match.group(2).split(', '):
+ cc = cc.strip()
+ if cc:
+ print cc
+ fd.close()
+
+elif options.full_help:
+ pager = os.getenv('PAGER')
+ if not pager:
+ pager = 'more'
+ fname = os.path.join(os.path.dirname(sys.argv[0]), 'README')
+ command.Run(pager, fname)
+
+# Process commits, produce patches files, check them, email them
+else:
+ gitutil.Setup()
+
+ if options.count == -1:
+ # Work out how many patches to send if we can
+ options.count = gitutil.CountCommitsToBranch() - options.start
+
+ col = terminal.Color()
+ if not options.count:
+ str = 'No commits found to process - please use -c flag'
+ sys.exit(col.Color(col.RED, str))
+
+ # Read the metadata from the commits
+ if options.count:
+ series = patchstream.GetMetaData(options.start, options.count)
+ cover_fname, args = gitutil.CreatePatches(options.start, options.count,
+ series)
+
+ # Fix up the patch files to our liking, and insert the cover letter
+ series = patchstream.FixPatches(series, args)
+ if series and cover_fname and series.get('cover'):
+ patchstream.InsertCoverLetter(cover_fname, series, options.count)
+
+ # Do a few checks on the series
+ series.DoChecks()
+
+ # Check the patches, and run them through 'git am' just to be sure
+ if options.check_patch:
+ ok = checkpatch.CheckPatches(options.verbose, args)
+ else:
+ ok = True
+
+ cc_file = series.MakeCcFile(options.process_tags, cover_fname,
+ not options.ignore_bad_tags,
+ options.add_maintainers)
+
+ # Email the patches out (giving the user time to check / cancel)
+ cmd = ''
+ its_a_go = ok or options.ignore_errors
+ if its_a_go:
+ cmd = gitutil.EmailPatches(series, cover_fname, args,
+ options.dry_run, not options.ignore_bad_tags, cc_file,
+ in_reply_to=options.in_reply_to)
+ else:
+ print col.Color(col.RED, "Not sending emails due to errors/warnings")
+
+ # For a dry run, just show our actions as a sanity check
+ if options.dry_run:
+ series.ShowActions(args, cmd, options.process_tags)
+ if not its_a_go:
+ print col.Color(col.RED, "Email would not be sent")
+
+ os.remove(cc_file)
diff --git a/tools/patman/project.py b/tools/patman/project.py
new file mode 100644
index 0000000..e05ff11
--- /dev/null
+++ b/tools/patman/project.py
@@ -0,0 +1,27 @@
+# Copyright (c) 2012 The Chromium OS Authors.
+#
+# SPDX-License-Identifier: GPL-2.0+
+#
+
+import os.path
+
+import gitutil
+
+def DetectProject():
+ """Autodetect the name of the current project.
+
+ This looks for signature files/directories that are unlikely to exist except
+ in the given project.
+
+ Returns:
+ The name of the project, like "linux" or "u-boot". Returns "unknown"
+ if we can't detect the project.
+ """
+ top_level = gitutil.GetTopLevel()
+
+ if os.path.exists(os.path.join(top_level, "include", "u-boot")):
+ return "u-boot"
+ elif os.path.exists(os.path.join(top_level, "kernel")):
+ return "linux"
+
+ return "unknown"
diff --git a/tools/patman/series.py b/tools/patman/series.py
new file mode 100644
index 0000000..a17a7d1
--- /dev/null
+++ b/tools/patman/series.py
@@ -0,0 +1,271 @@
+# Copyright (c) 2011 The Chromium OS Authors.
+#
+# SPDX-License-Identifier: GPL-2.0+
+#
+
+import itertools
+import os
+
+import get_maintainer
+import gitutil
+import terminal
+
+# Series-xxx tags that we understand
+valid_series = ['to', 'cc', 'version', 'changes', 'prefix', 'notes', 'name',
+ 'cover-cc', 'process_log']
+
+class Series(dict):
+ """Holds information about a patch series, including all tags.
+
+ Vars:
+ cc: List of aliases/emails to Cc all patches to
+ commits: List of Commit objects, one for each patch
+ cover: List of lines in the cover letter
+ notes: List of lines in the notes
+ changes: (dict) List of changes for each version, The key is
+ the integer version number
+ allow_overwrite: Allow tags to overwrite an existing tag
+ """
+ def __init__(self):
+ self.cc = []
+ self.to = []
+ self.cover_cc = []
+ self.commits = []
+ self.cover = None
+ self.notes = []
+ self.changes = {}
+ self.allow_overwrite = False
+
+ # Written in MakeCcFile()
+ # key: name of patch file
+ # value: list of email addresses
+ self._generated_cc = {}
+
+ # These make us more like a dictionary
+ def __setattr__(self, name, value):
+ self[name] = value
+
+ def __getattr__(self, name):
+ return self[name]
+
+ def AddTag(self, commit, line, name, value):
+ """Add a new Series-xxx tag along with its value.
+
+ Args:
+ line: Source line containing tag (useful for debug/error messages)
+ name: Tag name (part after 'Series-')
+ value: Tag value (part after 'Series-xxx: ')
+ """
+ # If we already have it, then add to our list
+ name = name.replace('-', '_')
+ if name in self and not self.allow_overwrite:
+ values = value.split(',')
+ values = [str.strip() for str in values]
+ if type(self[name]) != type([]):
+ raise ValueError("In %s: line '%s': Cannot add another value "
+ "'%s' to series '%s'" %
+ (commit.hash, line, values, self[name]))
+ self[name] += values
+
+ # Otherwise just set the value
+ elif name in valid_series:
+ self[name] = value
+ else:
+ raise ValueError("In %s: line '%s': Unknown 'Series-%s': valid "
+ "options are %s" % (commit.hash, line, name,
+ ', '.join(valid_series)))
+
+ def AddCommit(self, commit):
+ """Add a commit into our list of commits
+
+ We create a list of tags in the commit subject also.
+
+ Args:
+ commit: Commit object to add
+ """
+ commit.CheckTags()
+ self.commits.append(commit)
+
+ def ShowActions(self, args, cmd, process_tags):
+ """Show what actions we will/would perform
+
+ Args:
+ args: List of patch files we created
+ cmd: The git command we would have run
+ process_tags: Process tags as if they were aliases
+ """
+ to_set = set(gitutil.BuildEmailList(self.to));
+ cc_set = set(gitutil.BuildEmailList(self.cc));
+
+ col = terminal.Color()
+ print 'Dry run, so not doing much. But I would do this:'
+ print
+ print 'Send a total of %d patch%s with %scover letter.' % (
+ len(args), '' if len(args) == 1 else 'es',
+ self.get('cover') and 'a ' or 'no ')
+
+ # TODO: Colour the patches according to whether they passed checks
+ for upto in range(len(args)):
+ commit = self.commits[upto]
+ print col.Color(col.GREEN, ' %s' % args[upto])
+ cc_list = list(self._generated_cc[commit.patch])
+ for email in set(cc_list) - to_set - cc_set:
+ if email == None:
+ email = col.Color(col.YELLOW, "<alias '%s' not found>"
+ % tag)
+ if email:
+ print ' Cc: ',email
+ print
+ for item in to_set:
+ print 'To:\t ', item
+ for item in cc_set - to_set:
+ print 'Cc:\t ', item
+ print 'Version: ', self.get('version')
+ print 'Prefix:\t ', self.get('prefix')
+ if self.cover:
+ print 'Cover: %d lines' % len(self.cover)
+ cover_cc = gitutil.BuildEmailList(self.get('cover_cc', ''))
+ all_ccs = itertools.chain(cover_cc, *self._generated_cc.values())
+ for email in set(all_ccs) - to_set - cc_set:
+ print ' Cc: ',email
+ if cmd:
+ print 'Git command: %s' % cmd
+
+ def MakeChangeLog(self, commit):
+ """Create a list of changes for each version.
+
+ Return:
+ The change log as a list of strings, one per line
+
+ Changes in v4:
+ - Jog the dial back closer to the widget
+
+ Changes in v3: None
+ Changes in v2:
+ - Fix the widget
+ - Jog the dial
+
+ etc.
+ """
+ final = []
+ process_it = self.get('process_log', '').split(',')
+ process_it = [item.strip() for item in process_it]
+ need_blank = False
+ for change in sorted(self.changes, reverse=True):
+ out = []
+ for this_commit, text in self.changes[change]:
+ if commit and this_commit != commit:
+ continue
+ if 'uniq' not in process_it or text not in out:
+ out.append(text)
+ line = 'Changes in v%d:' % change
+ have_changes = len(out) > 0
+ if 'sort' in process_it:
+ out = sorted(out)
+ if have_changes:
+ out.insert(0, line)
+ else:
+ out = [line + ' None']
+ if need_blank:
+ out.insert(0, '')
+ final += out
+ need_blank = have_changes
+ if self.changes:
+ final.append('')
+ return final
+
+ def DoChecks(self):
+ """Check that each version has a change log
+
+ Print an error if something is wrong.
+ """
+ col = terminal.Color()
+ if self.get('version'):
+ changes_copy = dict(self.changes)
+ for version in range(1, int(self.version) + 1):
+ if self.changes.get(version):
+ del changes_copy[version]
+ else:
+ if version > 1:
+ str = 'Change log missing for v%d' % version
+ print col.Color(col.RED, str)
+ for version in changes_copy:
+ str = 'Change log for unknown version v%d' % version
+ print col.Color(col.RED, str)
+ elif self.changes:
+ str = 'Change log exists, but no version is set'
+ print col.Color(col.RED, str)
+
+ def MakeCcFile(self, process_tags, cover_fname, raise_on_error,
+ add_maintainers):
+ """Make a cc file for us to use for per-commit Cc automation
+
+ Also stores in self._generated_cc to make ShowActions() faster.
+
+ Args:
+ process_tags: Process tags as if they were aliases
+ cover_fname: If non-None the name of the cover letter.
+ raise_on_error: True to raise an error when an alias fails to match,
+ False to just print a message.
+ add_maintainers: Call the get_maintainers to CC maintainers
+ Return:
+ Filename of temp file created
+ """
+ # Look for commit tags (of the form 'xxx:' at the start of the subject)
+ fname = '/tmp/patman.%d' % os.getpid()
+ fd = open(fname, 'w')
+ all_ccs = []
+ for commit in self.commits:
+ list = []
+ if process_tags:
+ list += gitutil.BuildEmailList(commit.tags,
+ raise_on_error=raise_on_error)
+ list += gitutil.BuildEmailList(commit.cc_list,
+ raise_on_error=raise_on_error)
+ if add_maintainers:
+ list += get_maintainer.GetMaintainer(commit.patch)
+ all_ccs += list
+ print >>fd, commit.patch, ', '.join(set(list))
+ self._generated_cc[commit.patch] = list
+
+ if cover_fname:
+ cover_cc = gitutil.BuildEmailList(self.get('cover_cc', ''))
+ print >>fd, cover_fname, ', '.join(set(cover_cc + all_ccs))
+
+ fd.close()
+ return fname
+
+ def AddChange(self, version, commit, info):
+ """Add a new change line to a version.
+
+ This will later appear in the change log.
+
+ Args:
+ version: version number to add change list to
+ info: change line for this version
+ """
+ if not self.changes.get(version):
+ self.changes[version] = []
+ self.changes[version].append([commit, info])
+
+ def GetPatchPrefix(self):
+ """Get the patch version string
+
+ Return:
+ Patch string, like 'RFC PATCH v5' or just 'PATCH'
+ """
+ git_prefix = gitutil.GetDefaultSubjectPrefix()
+ if git_prefix:
+ git_prefix = '%s][' % git_prefix
+ else:
+ git_prefix = ''
+
+ version = ''
+ if self.get('version'):
+ version = ' v%s' % self['version']
+
+ # Get patch name prefix
+ prefix = ''
+ if self.get('prefix'):
+ prefix = '%s ' % self['prefix']
+ return '%s%sPATCH%s' % (git_prefix, prefix, version)
diff --git a/tools/patman/settings.py b/tools/patman/settings.py
new file mode 100644
index 0000000..ba2a68f
--- /dev/null
+++ b/tools/patman/settings.py
@@ -0,0 +1,295 @@
+# Copyright (c) 2011 The Chromium OS Authors.
+#
+# SPDX-License-Identifier: GPL-2.0+
+#
+
+import ConfigParser
+import os
+import re
+
+import command
+import gitutil
+
+"""Default settings per-project.
+
+These are used by _ProjectConfigParser. Settings names should match
+the "dest" of the option parser from patman.py.
+"""
+_default_settings = {
+ "u-boot": {},
+ "linux": {
+ "process_tags": "False",
+ }
+}
+
+class _ProjectConfigParser(ConfigParser.SafeConfigParser):
+ """ConfigParser that handles projects.
+
+ There are two main goals of this class:
+ - Load project-specific default settings.
+ - Merge general default settings/aliases with project-specific ones.
+
+ # Sample config used for tests below...
+ >>> import StringIO
+ >>> sample_config = '''
+ ... [alias]
+ ... me: Peter P. <likesspiders@xxxxxxxxxxx>
+ ... enemies: Evil <evil@xxxxxxxxxxx>
+ ...
+ ... [sm_alias]
+ ... enemies: Green G. <ugly@xxxxxxxxxxx>
+ ...
+ ... [sm2_alias]
+ ... enemies: Doc O. <pus@xxxxxxxxxxx>
+ ...
+ ... [settings]
+ ... am_hero: True
+ ... '''
+
+ # Check to make sure that bogus project gets general alias.
+ >>> config = _ProjectConfigParser("zzz")
+ >>> config.readfp(StringIO.StringIO(sample_config))
+ >>> config.get("alias", "enemies")
+ 'Evil <evil@xxxxxxxxxxx>'
+
+ # Check to make sure that alias gets overridden by project.
+ >>> config = _ProjectConfigParser("sm")
+ >>> config.readfp(StringIO.StringIO(sample_config))
+ >>> config.get("alias", "enemies")
+ 'Green G. <ugly@xxxxxxxxxxx>'
+
+ # Check to make sure that settings get merged with project.
+ >>> config = _ProjectConfigParser("linux")
+ >>> config.readfp(StringIO.StringIO(sample_config))
+ >>> sorted(config.items("settings"))
+ [('am_hero', 'True'), ('process_tags', 'False')]
+
+ # Check to make sure that settings works with unknown project.
+ >>> config = _ProjectConfigParser("unknown")
+ >>> config.readfp(StringIO.StringIO(sample_config))
+ >>> sorted(config.items("settings"))
+ [('am_hero', 'True')]
+ """
+ def __init__(self, project_name):
+ """Construct _ProjectConfigParser.
+
+ In addition to standard SafeConfigParser initialization, this also loads
+ project defaults.
+
+ Args:
+ project_name: The name of the project.
+ """
+ self._project_name = project_name
+ ConfigParser.SafeConfigParser.__init__(self)
+
+ # Update the project settings in the config based on
+ # the _default_settings global.
+ project_settings = "%s_settings" % project_name
+ if not self.has_section(project_settings):
+ self.add_section(project_settings)
+ project_defaults = _default_settings.get(project_name, {})
+ for setting_name, setting_value in project_defaults.iteritems():
+ self.set(project_settings, setting_name, setting_value)
+
+ def get(self, section, option, *args, **kwargs):
+ """Extend SafeConfigParser to try project_section before section.
+
+ Args:
+ See SafeConfigParser.
+ Returns:
+ See SafeConfigParser.
+ """
+ try:
+ return ConfigParser.SafeConfigParser.get(
+ self, "%s_%s" % (self._project_name, section), option,
+ *args, **kwargs
+ )
+ except (ConfigParser.NoSectionError, ConfigParser.NoOptionError):
+ return ConfigParser.SafeConfigParser.get(
+ self, section, option, *args, **kwargs
+ )
+
+ def items(self, section, *args, **kwargs):
+ """Extend SafeConfigParser to add project_section to section.
+
+ Args:
+ See SafeConfigParser.
+ Returns:
+ See SafeConfigParser.
+ """
+ project_items = []
+ has_project_section = False
+ top_items = []
+
+ # Get items from the project section
+ try:
+ project_items = ConfigParser.SafeConfigParser.items(
+ self, "%s_%s" % (self._project_name, section), *args, **kwargs
+ )
+ has_project_section = True
+ except ConfigParser.NoSectionError:
+ pass
+
+ # Get top-level items
+ try:
+ top_items = ConfigParser.SafeConfigParser.items(
+ self, section, *args, **kwargs
+ )
+ except ConfigParser.NoSectionError:
+ # If neither section exists raise the error on...
+ if not has_project_section:
+ raise
+
+ item_dict = dict(top_items)
+ item_dict.update(project_items)
+ return item_dict.items()
+
+def ReadGitAliases(fname):
+ """Read a git alias file. This is in the form used by git:
+
+ alias uboot u-boot@xxxxxxxxxxxxx
+ alias wd Wolfgang Denk <wd@xxxxxxx>
+
+ Args:
+ fname: Filename to read
+ """
+ try:
+ fd = open(fname, 'r')
+ except IOError:
+ print "Warning: Cannot find alias file '%s'" % fname
+ return
+
+ re_line = re.compile('alias\s+(\S+)\s+(.*)')
+ for line in fd.readlines():
+ line = line.strip()
+ if not line or line[0] == '#':
+ continue
+
+ m = re_line.match(line)
+ if not m:
+ print "Warning: Alias file line '%s' not understood" % line
+ continue
+
+ list = alias.get(m.group(1), [])
+ for item in m.group(2).split(','):
+ item = item.strip()
+ if item:
+ list.append(item)
+ alias[m.group(1)] = list
+
+ fd.close()
+
+def CreatePatmanConfigFile(config_fname):
+ """Creates a config file under $(HOME)/.patman if it can't find one.
+
+ Args:
+ config_fname: Default config filename i.e., $(HOME)/.patman
+
+ Returns:
+ None
+ """
+ name = gitutil.GetDefaultUserName()
+ if name == None:
+ name = raw_input("Enter name: ")
+
+ email = gitutil.GetDefaultUserEmail()
+
+ if email == None:
+ email = raw_input("Enter email: ")
+
+ try:
+ f = open(config_fname, 'w')
+ except IOError:
+ print "Couldn't create patman config file\n"
+ raise
+
+ print >>f, "[alias]\nme: %s <%s>" % (name, email)
+ f.close();
+
+def _UpdateDefaults(parser, config):
+ """Update the given OptionParser defaults based on config.
+
+ We'll walk through all of the settings from the parser
+ For each setting we'll look for a default in the option parser.
+ If it's found we'll update the option parser default.
+
+ The idea here is that the .patman file should be able to update
+ defaults but that command line flags should still have the final
+ say.
+
+ Args:
+ parser: An instance of an OptionParser whose defaults will be
+ updated.
+ config: An instance of _ProjectConfigParser that we will query
+ for settings.
+ """
+ defaults = parser.get_default_values()
+ for name, val in config.items('settings'):
+ if hasattr(defaults, name):
+ default_val = getattr(defaults, name)
+ if isinstance(default_val, bool):
+ val = config.getboolean('settings', name)
+ elif isinstance(default_val, int):
+ val = config.getint('settings', name)
+ parser.set_default(name, val)
+ else:
+ print "WARNING: Unknown setting %s" % name
+
+def _ReadAliasFile(fname):
+ """Read in the U-Boot git alias file if it exists.
+
+ Args:
+ fname: Filename to read.
+ """
+ if os.path.exists(fname):
+ bad_line = None
+ with open(fname) as fd:
+ linenum = 0
+ for line in fd:
+ linenum += 1
+ line = line.strip()
+ if not line or line.startswith('#'):
+ continue
+ words = line.split(' ', 2)
+ if len(words) < 3 or words[0] != 'alias':
+ if not bad_line:
+ bad_line = "%s:%d:Invalid line '%s'" % (fname, linenum,
+ line)
+ continue
+ alias[words[1]] = [s.strip() for s in words[2].split(',')]
+ if bad_line:
+ print bad_line
+
+def Setup(parser, project_name, config_fname=''):
+ """Set up the settings module by reading config files.
+
+ Args:
+ parser: The parser to update
+ project_name: Name of project that we're working on; we'll look
+ for sections named "project_section" as well.
+ config_fname: Config filename to read ('' for default)
+ """
+ # First read the git alias file if available
+ _ReadAliasFile('doc/git-mailrc')
+ config = _ProjectConfigParser(project_name)
+ if config_fname == '':
+ config_fname = '%s/.patman' % os.getenv('HOME')
+
+ if not os.path.exists(config_fname):
+ print "No config file found ~/.patman\nCreating one...\n"
+ CreatePatmanConfigFile(config_fname)
+
+ config.read(config_fname)
+
+ for name, value in config.items('alias'):
+ alias[name] = value.split(',')
+
+ _UpdateDefaults(parser, config)
+
+# These are the aliases we understand, indexed by alias. Each member is a list.
+alias = {}
+
+if __name__ == "__main__":
+ import doctest
+
+ doctest.testmod()
diff --git a/tools/patman/terminal.py b/tools/patman/terminal.py
new file mode 100644
index 0000000..e78a7c1
--- /dev/null
+++ b/tools/patman/terminal.py
@@ -0,0 +1,158 @@
+# Copyright (c) 2011 The Chromium OS Authors.
+#
+# SPDX-License-Identifier: GPL-2.0+
+#
+
+"""Terminal utilities
+
+This module handles terminal interaction including ANSI color codes.
+"""
+
+import os
+import sys
+
+# Selection of when we want our output to be colored
+COLOR_IF_TERMINAL, COLOR_ALWAYS, COLOR_NEVER = range(3)
+
+# Initially, we are set up to print to the terminal
+print_test_mode = False
+print_test_list = []
+
+class PrintLine:
+ """A line of text output
+
+ Members:
+ text: Text line that was printed
+ newline: True to output a newline after the text
+ colour: Text colour to use
+ """
+ def __init__(self, text, newline, colour):
+ self.text = text
+ self.newline = newline
+ self.colour = colour
+
+ def __str__(self):
+ return 'newline=%s, colour=%s, text=%s' % (self.newline, self.colour,
+ self.text)
+
+def Print(text='', newline=True, colour=None):
+ """Handle a line of output to the terminal.
+
+ In test mode this is recorded in a list. Otherwise it is output to the
+ terminal.
+
+ Args:
+ text: Text to print
+ newline: True to add a new line at the end of the text
+ colour: Colour to use for the text
+ """
+ if print_test_mode:
+ print_test_list.append(PrintLine(text, newline, colour))
+ else:
+ if colour:
+ col = Color()
+ text = col.Color(colour, text)
+ print text,
+ if newline:
+ print
+
+def SetPrintTestMode():
+ """Go into test mode, where all printing is recorded"""
+ global print_test_mode
+
+ print_test_mode = True
+
+def GetPrintTestLines():
+ """Get a list of all lines output through Print()
+
+ Returns:
+ A list of PrintLine objects
+ """
+ global print_test_list
+
+ ret = print_test_list
+ print_test_list = []
+ return ret
+
+def EchoPrintTestLines():
+ """Print out the text lines collected"""
+ for line in print_test_list:
+ if line.colour:
+ col = Color()
+ print col.Color(line.colour, line.text),
+ else:
+ print line.text,
+ if line.newline:
+ print
+
+
+class Color(object):
+ """Conditionally wraps text in ANSI color escape sequences."""
+ BLACK, RED, GREEN, YELLOW, BLUE, MAGENTA, CYAN, WHITE = range(8)
+ BOLD = -1
+ BRIGHT_START = '\033[1;%dm'
+ NORMAL_START = '\033[22;%dm'
+ BOLD_START = '\033[1m'
+ RESET = '\033[0m'
+
+ def __init__(self, colored=COLOR_IF_TERMINAL):
+ """Create a new Color object, optionally disabling color output.
+
+ Args:
+ enabled: True if color output should be enabled. If False then this
+ class will not add color codes at all.
+ """
+ try:
+ self._enabled = (colored == COLOR_ALWAYS or
+ (colored == COLOR_IF_TERMINAL and
+ os.isatty(sys.stdout.fileno())))
+ except:
+ self._enabled = False
+
+ def Start(self, color, bright=True):
+ """Returns a start color code.
+
+ Args:
+ color: Color to use, .e.g BLACK, RED, etc.
+
+ Returns:
+ If color is enabled, returns an ANSI sequence to start the given
+ color, otherwise returns empty string
+ """
+ if self._enabled:
+ base = self.BRIGHT_START if bright else self.NORMAL_START
+ return base % (color + 30)
+ return ''
+
+ def Stop(self):
+ """Retruns a stop color code.
+
+ Returns:
+ If color is enabled, returns an ANSI color reset sequence,
+ otherwise returns empty string
+ """
+ if self._enabled:
+ return self.RESET
+ return ''
+
+ def Color(self, color, text, bright=True):
+ """Returns text with conditionally added color escape sequences.
+
+ Keyword arguments:
+ color: Text color -- one of the color constants defined in this
+ class.
+ text: The text to color.
+
+ Returns:
+ If self._enabled is False, returns the original text. If it's True,
+ returns text with color escape sequences based on the value of
+ color.
+ """
+ if not self._enabled:
+ return text
+ if color == self.BOLD:
+ start = self.BOLD_START
+ else:
+ base = self.BRIGHT_START if bright else self.NORMAL_START
+ start = base % (color + 30)
+ return start + text + self.RESET
--
2.2.0.rc0.207.ga3a616c

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/